blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
179abd03f2ae118cfb2b85da6360707ead06748a
1b10b46afdf24b4ce4f2d57e315e09e17c0a9c2b
/winding_helix.py
51d16cff03b2651355fadbdb7bd2a560ed49af5b
[]
no_license
tthtlc/sansagraphics
e6aad1541dabc85b3871e1890c9f79aa33055355
113e559fb128c93ed1f02155ec74e76878b86c37
refs/heads/master
2021-01-15T15:52:35.126301
2020-03-30T16:58:57
2020-03-30T16:58:57
15,507,431
2
1
null
null
null
null
UTF-8
Python
false
false
3,415
py
# Pygame/PyopenGL example by Bastiaan Zapf, Apr 2009 ### From http://python-opengl-examples.blogspot.sg/ # # Draw an helix, wiggle it pleasantly # # Keywords: Alpha Blending, Textures, Animation, Double Buffer from OpenGL.GL import * from OpenGL.GLU import * from math import * # trigonometry import pygame # just to get a display # get an OpenGL surface pygame.init() pygame.display.set_mode((800,600), pygame.OPENGL|pygame.DOUBLEBUF) # How to catch errors here? done = False t=0 while not done: t=t+1 # for fun comment out these two lines glClearColor(0.0, 0.0, 0.0, 1.0) glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) # Get a perspective at the helix glMatrixMode(GL_PROJECTION); glLoadIdentity() gluPerspective(90,1,0.01,1000) gluLookAt(sin(t/200.0)*3,sin(t/500.0)*3,cos(t/200.0)*3,0,0,0,0,1,0) # Draw the helix (this ought to be a display list call) glMatrixMode(GL_MODELVIEW) # get a texture (this ought not to be inside the inner loop) texture=glGenTextures( 1 ) glBindTexture( GL_TEXTURE_2D, texture ); glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE ); # set sane defaults for a plethora of potentially uninitialized # variables glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT ); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) # a texture #pulse = sin(t/30)*0.5+0.5 # try this one pulse = 0 texdata=[[[0.0,0,1,1], [0.0,0,0,0], [0.0,1,0,1], [0.0,0,0,0]], [[0.0,0,0,0], [pulse,pulse,pulse,1], [pulse,pulse,pulse,1], [0.0,0,0,0]], [[0.0,1,0,1], [1,pulse,pulse,1], [pulse,pulse,0,1], [0.0,0,0,0]], [[0.0,0,0,0], [0.0,0,0,0], [0.0,0,0,0], [0.0,0,0,0]]]; glTexImage2Df(GL_TEXTURE_2D, 0,4,0,GL_RGBA, texdata) glEnable(GL_BLEND); glBlendFunc (GL_SRC_ALPHA, GL_ONE); # XXX Why GL_ONE? # alternatively: # glEnable(GL_DEPTH_TEST); glEnable( GL_TEXTURE_2D ); # use the texture glBindTexture( GL_TEXTURE_2D, texture ); # vertices & texture data glBegin(GL_TRIANGLE_STRIP); #pulse2 = 0.5 for i in range(0,100): r=5.0 # try other values - integers as well R=10.0 d=1 # try other values j=i #pulse2 += 0.5 if (i%3==0): glTexCoord2f(0,i); glVertex3f( cos(i/r)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r)); elif (i%3==1): glTexCoord2f(1,i); glVertex3f( cos(i/r + 3.14/2)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r + 3.14/1)); else: glTexCoord2f(2,i); glVertex3f( cos(i/r + 3.14/1)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r+3.14/1)); # glVertex3f( cos(i/r+3.14)*pulse2, -2.5+i*0.05+d+pulse2*1, sin(i/r+3.14)*pulse2); glEnd(); glFlush() glDeleteTextures(texture) pygame.display.flip()
[ "htmldeveloper@gmail.com" ]
htmldeveloper@gmail.com
80830d2c4527373672b28a60f6897f9622dbb64d
4b2df7b62246133fd3c8af2529f6544dcf2b4350
/pushups/migrations/0005_auto_20181011_1118.py
b591fed36590ad62240012ec288faed71fe7cbe2
[]
no_license
MrMacchew/LOL_CTS
24b0904f2a4b2934d0c386511c269684cfe3b3ca
cf9c7e434d73365aded766ac8703cb02ddd06104
refs/heads/master
2020-04-01T03:02:19.267620
2018-10-20T07:51:37
2018-10-20T07:51:37
152,806,859
0
0
null
null
null
null
UTF-8
Python
false
false
390
py
# Generated by Django 2.1.1 on 2018-10-11 17:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pushups', '0004_auto_20181011_1113'), ] operations = [ migrations.AlterField( model_name='match', name='accountId', field=models.IntegerField(default=None), ), ]
[ "mattcain@weber.edu" ]
mattcain@weber.edu
0dd2cef3dc56c4a4f8d361b8c08ba8662f40f907
1fd5f886a0cf83d30e95792036ffbafc2d3d12fe
/utils/affichage.py
a6992e28990667e0b213ae69265871a69cc8ca05
[]
no_license
constance-scherer/PLDAC_Recommandation_analyse_sous_titres
9a2358bdf4b564bceccedd9588f7f4d2cb8e8e67
92106d497ffceb65df35d3884dec1072913ce8d1
refs/heads/master
2020-04-20T06:59:08.606057
2019-05-29T10:50:21
2019-05-29T10:50:21
168,699,380
4
0
null
2019-02-10T16:42:26
2019-02-01T13:11:40
Jupyter Notebook
UTF-8
Python
false
false
524
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import pandas as pd def get_hist(df, x_axis, y_axis, titre, colour, font_size=None, horizontal=False): if horizontal: hist = df.plot.barh(x=x_axis, y=y_axis, color=colour, title =titre, fontsize = font_size, edgecolor = "none").get_figure() else: hist = df.plot.bar(x=x_axis, y=y_axis, color=colour, title =titre, fontsize = font_size, edgecolor = "none").get_figure() path_fig = "img/"+titre+'.png' hist.savefig(path_fig, bbox_inches="tight")
[ "amina.djelloul@hotmail.fr" ]
amina.djelloul@hotmail.fr
d77e5c51f77650cf17fab3e34a6d2b3c30310516
7672706c2d285a6eef5689381eef56dc3d6e779c
/assignment4_4.py
26bd85ba5395ef34dd76a3c0e7795f494e26d6ae
[]
no_license
AreRex14/netprog-assignment
3fbf2f949d774f3a957297d5cb11f18b94e00815
1057bc2485a98c260320dc45c01c91a3e3a6ef18
refs/heads/master
2020-12-26T21:27:11.902369
2020-02-01T17:29:00
2020-02-01T17:29:00
237,649,985
0
0
null
null
null
null
UTF-8
Python
false
false
656
py
import dns.resolver import json def MX_lookup(host): answers = dns.resolver.query(host, 'MX') servers = [] for rdata in answers: servers.append((rdata.preference, rdata.exchange)) servers_pref_ascend = sorted(servers, key=lambda server: server[0]) return servers_pref_ascend def JSON_lookup(host): answers = dns.resolver.query(host, 'MX') servers = [] for rdata in answers: servers.append((rdata.preference, rdata.exchange)) data = json.dump(json.load(servers), indent=4) return data if __name__ == '__main__': host = input("Enter a domain name to look up: ") mail_servers = MX_lookup(host) for s in mail_servers: print(s)
[ "arerifxynwa@gmail.com" ]
arerifxynwa@gmail.com
c4bbebeeaa1fede9542e856ca68e24409905d33f
c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f
/sources/scipy-scipy-414c1ab/scipy/io/tests/test_wavfile.py
266775ecd99e28e8010c480d95ff5fce9e266339
[]
no_license
georgiee/lip-sync-lpc
7662102d4715e4985c693b316a02d11026ffb117
e931cc14fe4e741edabd12471713bf84d53a4250
refs/heads/master
2018-09-16T08:47:26.368491
2018-06-05T17:01:08
2018-06-05T17:01:08
5,779,592
17
4
null
null
null
null
UTF-8
Python
false
false
2,146
py
import os import tempfile import warnings import numpy as np from numpy.testing import assert_equal, assert_, assert_raises, assert_array_equal from numpy.testing.utils import WarningManager from scipy.io import wavfile def datafile(fn): return os.path.join(os.path.dirname(__file__), 'data', fn) def test_read_1(): warn_ctx = WarningManager() warn_ctx.__enter__() try: warnings.simplefilter('ignore', wavfile.WavFileWarning) rate, data = wavfile.read(datafile('test-44100-le-1ch-4bytes.wav')) finally: warn_ctx.__exit__() assert_equal(rate, 44100) assert_(np.issubdtype(data.dtype, np.int32)) assert_equal(data.shape, (4410,)) def test_read_2(): rate, data = wavfile.read(datafile('test-8000-le-2ch-1byteu.wav')) assert_equal(rate, 8000) assert_(np.issubdtype(data.dtype, np.uint8)) assert_equal(data.shape, (800, 2)) def test_read_fail(): fp = open(datafile('example_1.nc')) assert_raises(ValueError, wavfile.read, fp) fp.close() def _check_roundtrip(rate, dtype, channels): fd, tmpfile = tempfile.mkstemp(suffix='.wav') try: os.close(fd) data = np.random.rand(100, channels) if channels == 1: data = data[:,0] data = (data*128).astype(dtype) wavfile.write(tmpfile, rate, data) rate2, data2 = wavfile.read(tmpfile) assert_equal(rate, rate2) assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype) assert_array_equal(data, data2) finally: os.unlink(tmpfile) def test_write_roundtrip(): for signed in ('i', 'u'): for size in (1, 2, 4, 8): if size == 1 and signed == 'i': # signed 8-bit integer PCM is not allowed continue for endianness in ('>', '<'): if size == 1 and endianness == '<': continue for rate in (8000, 32000): for channels in (1, 2, 5): dt = np.dtype('%s%s%d' % (endianness, signed, size)) yield _check_roundtrip, rate, dt, channels
[ "georgios@kaleadis.de" ]
georgios@kaleadis.de
30bf23cbb12bb828a340c74a0d91fa08a504b30e
777e23a382d7dd84232795a929c4004c768d1837
/www/orm.py
beb91d24c8bdbfe88c890b3f0be0725751427fef
[]
no_license
Altkaka/Altkaka-Web
542126c2ec72453fb1ca8495892ef1bd4282f2e7
64773d579aa3097a1b3af2d071358105f388cf04
refs/heads/master
2021-05-04T19:14:23.648750
2017-10-12T08:40:16
2017-10-12T08:40:16
106,657,692
1
0
null
null
null
null
UTF-8
Python
false
false
13,659
py
import logging; logging.basicConfig(level = logging.INFO) import asyncio import aiomysql from myapis import APIError from myapis import * async def create_pool(loop, **kw): logging.info('create database connection pool...') global __pool __pool = await aiomysql.create_pool( host = kw.get('host','localhost'), port = kw.get('port',3306), user = kw['user'], password = kw['password'], db = kw['db'], charset = kw.get('charset', 'utf8'), autocommit = kw.get('autocommit', True), maxsize = kw.get('maxsize', 10), minsize = kw.get('minsize', 1), loop = loop ) async def select(sql, args, size=None): logging.info('select : SQL: %s', sql) logging.info('select : args: %s', args) global __pool async with __pool.get() as conn: async with conn.cursor(aiomysql.DictCursor) as cur: await cur.execute(sql.replace('?', '%s'), args or ()) if size: rs = await cur.fetchmany(size) else: rs = await cur.fetchall() logging.info('row returned: %s' % len(rs)) return rs async def execute(sql, args, autocommit = True): # logging.info('execute:SQL:',sql, 'args:',args) # global __pool # with (yield from __pool) as conn: # try: # cur = yield from conn.cursor() # yield from cur.execute(sql.replace('?', '%s'), args) # affected = cur.rowcount # yield from cur.close() # except BaseException as e: # raise # logging.ERROR(e.__context__) # return affected logging.info('execute : SQL: %s', sql) logging.info('execute : args: %s', args) global __pool async with __pool.get() as conn: if not autocommit: await conn.begin() try: async with conn.cursor(aiomysql.DictCursor) as cur: await cur.execute(sql.replace('?', '%s'), args) affected = cur.rowcount await cur.close() if not autocommit: await conn.commit() logging.info('commit success!') except BaseException as e: if not autocommit: await conn.rollback() raise finally: conn.close() return affected logging.info('rows returned: %s ' % affected) def create_args_string(len): return '?'+',?'*(len-1) class ModelMetaclass(type): def __new__(cls, name, bases, attrs): if name == 'Model': return type.__new__(cls, name, bases, attrs) tableName = attrs.get('__table__', None) or name logging.info('found model: %s (table: %s)' % (name, tableName)) mappings = dict() fields = [] primaryKey = None for k, v in attrs.items(): if isinstance(v, Field): logging.info('found mapping: %s ==> %s' % (k, v)) mappings[k] = v if v.primary_key: if primaryKey: raise APIError('Duplicate primary key for field: %s' % k) primaryKey = k else: fields.append(k) if not primaryKey: raise APIError('Primary key not found.') for k in mappings.keys(): attrs.pop(k) escaped_fields = list(map(lambda f: '`%s`' % f, fields)) attrs['__mappings__'] = mappings attrs['__table__'] = tableName attrs['__primary_key__'] = primaryKey attrs['__fields__'] = fields # 下列sql语句中的反引号是为了防止字段名称出现保留字报错而预留的,一般在进行mysql的sql语句撰写时,字段名称使用双引号防止报错 attrs['__select__'] = 'select `%s`, %s from `%s`' % (primaryKey, ','.join(escaped_fields), tableName) attrs['__insert__'] = 'insert into `%s` (%s, `%s`) values (%s)' % (tableName, ','.join(escaped_fields), primaryKey, create_args_string(len(escaped_fields)+1)) attrs['__update__'] = 'update `%s` set %s where `%s`=?' % (tableName, ','.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primaryKey) attrs['__delete__'] = 'delete from `%s` where `%s` = ?' % (tableName, primaryKey) return type.__new__(cls, name, bases, attrs) class Model(dict, metaclass=ModelMetaclass): def __init__(self, **kw): super(Model, self).__init__(**kw) def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(r"'Model' object has no attribute '%s' " % key) def __setattr__(self, key, value): self[key] = value def getValue(self, key): return getattr(self, key, None) def getValueOrDefault(self, key): value = getattr(self, key, None) if value is None: field = self.__mappings__[key] if field.default is not None: value = field.default() if callable(field.default) else field.default logging.debug('using default value for %s:%s' % (key, str(value))) setattr(self, key, value) return value @classmethod #根据主键查找记录 async def find(cls, pk): rs = await select('%s where `%s` = ?' % (cls.__select__, cls.__primary_key__), [pk], 1) if len(rs) == 0: return None logging.info('find rs:%s',rs[0]) return cls(**rs[0]) async def save(self): args = list(map(self.getValueOrDefault, self.__fields__)) args.append(self.getValueOrDefault(self.__primary_key__)) rows = await execute(self.__insert__, args) if rows != 1: logging.warning('faild to insert record: affected rows: %s' % rows) @classmethod #findAll() - 根据WHERE条件查找 async def findAll(cls, **kw): order_flag = False order_values = '' limit_flag = False limit_values = () logging.info('find-all beigin') logging.info('find-all: %s-%d', kw, len(kw)) if has_orders(kw): order_flag = True order_values = kw[has_orders(kw)] kw.pop(has_orders(kw)) if has_limit(kw): limit_flag = True limit_values = kw[has_limit(kw)] kw.pop(has_limit(kw)) values = list(kw.values()) values.append(limit_values[0]) values.append(limit_values[1]) if len(kw)==0: if order_flag and limit_flag: rs = await select('%s order by %s limit ? , ?' % (cls.__select__, order_values), values) elif order_flag and not limit_flag: rs = await select('%s order by %s' % (cls.__select__, order_values), list(kw.values())) elif not order_flag and limit_flag: rs = await select('%s limit ? , ?' % cls.__select__,values) else: rs = await select('%s ' % cls.__select__ , args=None) else: if order_flag and limit_flag: rs = await select('%s where %s order by %s limit ? , ?' % (cls.__select__, str_to_where(kw), order_values), values) elif order_flag and not limit_flag: rs = await select('%s where %s order by %s' % (cls.__select__, str_to_where(kw), order_values), list(kw.values())) elif not order_flag and limit_flag: rs = await select('%s where %s limit ? , ?' % (cls.__select__, str_to_where(kw)),values) else: rs = await select('%s where %s' % (cls.__select__, str_to_where(kw)), list(kw.values())) if len(rs) == 0: return None logging.info('find-all end results: %s',rs) return [cls(**r) for r in rs] @classmethod #findNumber() - 根据WHERE条件查找,但返回的是整数,适用于select count(*)类型的SQL async def findNumber(cls, **kw): if len(kw)==0: logging.info('%s' % cls.__select__) rs = await select('select count(*) from %s' % cls.__table__, args=None) else: rs = await select('select count(*) from %s where %s' % (cls.__table__, str_to_where(kw)), list(kw.values())) logging.info('findnumber:%s', rs[0]['count(*)']) if len(rs) == 0: return None return rs[0]['count(*)'] #根据主键插入 async def update(self): args = list(map(self.getValueOrDefault, self.__fields__)) args.append(self.getValueOrDefault(self.__primary_key__)) rows = await execute(self.__update__, args) return rows #根据主键删除 async def remove(self): args=[] args.append(self.getValueOrDefault(self.__primary_key__)) rows = await execute(self.__delete__, args) return rows class Field(object): def __init__(self, name, column_type, primary_key, default): self.name = name self.column_type = column_type self.primary_key = primary_key self.default = default def __str__(self): return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name) class StringField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(100)'): super().__init__(name, ddl, primary_key, default) class TinyIntField(Field): def __init__(self, name=None, primary_key=False, default = None, ddl='tinyint'): super().__init__(name, ddl, primary_key, default) class SmallIntField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='smallint'): super().__init__(name, ddl, primary_key, default) class MediumIntField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='mediumint'): super().__init__(name, ddl, primary_key, default) class IntField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='int'): super().__init__(name, ddl, primary_key, default) class BigIntField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='bigint'): super().__init__(name, ddl, primary_key, default) class FloatField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='float'): super().__init__(name, ddl, primary_key, default) class DoubleField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='double'): super().__init__(name, ddl, primary_key, default) class DecimalField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='decimal(19,2)'): super().__init__(name, ddl, primary_key, default) class CharStringField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='char(100)'): super().__init__(name, ddl, primary_key, default) class TinyBlobField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='tinyblob'): super().__init__(name, ddl, primary_key, default) class TinyTextField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='tinytext'): super().__init__(name, ddl, primary_key, default) class BlobField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='blob'): super().__init__(name, ddl, primary_key, default) class TextField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='text'): super().__init__(name, ddl, primary_key, default) class MediumBlobField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='mediumblob'): super().__init__(name, ddl, primary_key, default) class MediumTextField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='mediumtext'): super().__init__(name, ddl, primary_key, default) class LongBlobField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='longblob'): super().__init__(name, ddl, primary_key, default) class longTextField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='longtext'): super().__init__(name, ddl, primary_key, default) class VarBinaryField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='varbinary(100)'): super().__init__(name, ddl, primary_key, default) class BinaryField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='binary(100)'): super().__init__(name, ddl, primary_key, default) class DateField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='date'): super().__init__(name, ddl, primary_key, default) class TimeField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='time'): super().__init__(name, ddl, primary_key, default) class YearField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='year'): super().__init__(name, ddl, primary_key, default) class DateTimeField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='datetime'): super().__init__(name, ddl, primary_key, default) class TimeStampField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='timestamp'): super().__init__(name, ddl, primary_key, default) class BooleanField(Field): def __init__(self, name=None, primary_key=False, default=None, ddl='boolean'): super().__init__(name, ddl, primary_key, default)
[ "dongjiwukl@163.com" ]
dongjiwukl@163.com
4698d40ef6e587e24e8f464698a390215afa3948
115bf3b584b489f34167e5e9d98eb53a6c03044b
/libcrowbar/configs/cpplint.py
e60e94019c192b30e58b7b1c196a6d53fa8cb4a4
[]
no_license
npcardoso/PhDThesis
fe094414d695cf64db100d23997c0e347e2dea14
16062572cfe6e234856325c2a8c9e4c88a335236
refs/heads/master
2021-01-16T21:29:21.888085
2018-10-26T18:22:32
2018-10-26T18:22:32
63,144,416
37
15
null
null
null
null
UTF-8
Python
true
false
234,786
py
#!/usr/bin/python # # Copyright (c) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Does google-lint on c++ files. The goal of this script is to identify places in the code that *may* be in non-compliance with google style. It does not attempt to fix up these problems -- the point is to educate. It does also not attempt to find all problems, or to ensure that everything it does find is legitimately a problem. In particular, we can get very confused by /* and // inside strings! We do a small hack, which is to ignore //'s with "'s after them on the same line, but it is far from perfect (in either direction). """ import codecs import copy import getopt import math # for log import os import re import sre_compile import string import sys import unicodedata _USAGE = """ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--root=subdir] [--linelength=digits] <file> [file] ... The style guidelines this tries to follow are those in http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. To suppress false-positive errors of a certain category, add a 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) suppresses errors of all categories on that line. The files passed in will be linted; at least one file must be provided. Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the extensions with the --extensions flag. Flags: output=vs7 By default, the output is formatted to ease emacs parsing. Visual Studio compatible output (vs7) may also be used. Other formats are unsupported. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. "-FOO" and "FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces --filter=whitespace,runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count is provided for each category like 'build/class'. root=subdir The root directory used for deriving header guard CPP variable. By default, the header guard CPP variable is calculated as the relative path to the directory that contains .git, .hg, or .svn. When this flag is specified, the relative path is calculated from the specified directory. If the specified directory does not exist, this flag is ignored. Examples: Assuming that src/.git exists, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is 80 characters. Examples: --linelength=120 extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: --extensions=hpp,cpp cpplint.py supports per-directory configurations specified in CPPLINT.cfg files. CPPLINT.cfg file can contain a number of key=value pairs. Currently the following options are supported: set noparent filter=+filter1,-filter2,... exclude_files=regex "set noparent" option prevents cpplint from traversing directory tree upwards looking for more .cfg files in parent directories. This option is usually placed in the top-level project directory. The "filter" option is similar in function to --filter flag. It specifies message filters in addition to the |_DEFAULT_FILTERS| and those specified through --filter command-line flag. "exclude_files" allows to specify a regular expression to be matched against a file name. If the expression matches, the file is skipped and not run through liner. CPPLINT.cfg has an effect on files in the same directory and all sub-directories, unless overridden by a nested configuration file. Example file: filter=-build/include_order,+build/include_alpha exclude_files=.*\.cc The above example disables build/include_order warning and enables build/include_alpha as well as excludes all .cc from being processed by linter, in the current directory (where the .cfg file is located) and all sub-directories. """ # We categorize each error message we print. Here are the categories. # We want an explicit list so we can list them all in cpplint --filter=. # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ 'build/class', 'build/c++11', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', 'build/include_alpha', 'build/include_order', 'build/include_what_you_use', 'build/namespaces', 'build/printf_format', 'build/storage_class', 'legal/copyright', 'readability/alt_tokens', 'readability/braces', 'readability/casting', 'readability/check', 'readability/constructors', 'readability/fn_size', 'readability/function', 'readability/inheritance', 'readability/multiline_comment', 'readability/multiline_string', 'readability/namespace', 'readability/nolint', 'readability/nul', 'readability/streams', 'readability/todo', 'readability/utf8', 'runtime/arrays', 'runtime/casting', 'runtime/explicit', 'runtime/int', 'runtime/init', 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', 'runtime/indentation_namespace', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', 'runtime/references', 'runtime/string', 'runtime/threadsafe_fn', 'runtime/vlog', 'whitespace/blank_line', 'whitespace/braces', 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', 'whitespace/parens', 'whitespace/semicolon', 'whitespace/tab', 'whitespace/todo' ] # The default state of the category filter. This is overridden by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = ['-build/include_alpha'] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers _CPP_HEADERS = frozenset([ # Legacy 'algobase.h', 'algo.h', 'alloc.h', 'builtinbuf.h', 'bvector.h', 'complex.h', 'defalloc.h', 'deque.h', 'editbuf.h', 'fstream.h', 'function.h', 'hash_map', 'hash_map.h', 'hash_set', 'hash_set.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip.h', 'iostream.h', 'istream.h', 'iterator.h', 'list.h', 'map.h', 'multimap.h', 'multiset.h', 'ostream.h', 'pair.h', 'parsestream.h', 'pfstream.h', 'procbuf.h', 'pthread_alloc', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', 'set.h', 'slist', 'slist.h', 'stack.h', 'stdiostream.h', 'stl_alloc.h', 'stl_relops.h', 'streambuf.h', 'stream.h', 'strfile.h', 'strstream.h', 'tempbuf.h', 'tree.h', 'type_traits.h', 'vector.h', # 17.6.1.2 C++ library headers 'algorithm', 'array', 'atomic', 'bitset', 'chrono', 'codecvt', 'complex', 'condition_variable', 'deque', 'exception', 'forward_list', 'fstream', 'functional', 'future', 'initializer_list', 'iomanip', 'ios', 'iosfwd', 'iostream', 'istream', 'iterator', 'limits', 'list', 'locale', 'map', 'memory', 'mutex', 'new', 'numeric', 'ostream', 'queue', 'random', 'ratio', 'regex', 'set', 'sstream', 'stack', 'stdexcept', 'streambuf', 'string', 'strstream', 'system_error', 'thread', 'tuple', 'typeindex', 'typeinfo', 'type_traits', 'unordered_map', 'unordered_set', 'utility', 'valarray', 'vector', # 17.6.1.2 C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', 'ciso646', 'climits', 'clocale', 'cmath', 'csetjmp', 'csignal', 'cstdalign', 'cstdarg', 'cstdbool', 'cstddef', 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctgmath', 'ctime', 'cuchar', 'cwchar', 'cwctype', ]) # These headers are excluded from [build/include] and [build/include_order] # checks: # - Anything not following google file name conventions (containing an # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. _THIRD_PARTY_HEADERS_PATTERN = re.compile( r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') # Assertion macros. These are defined in base/logging.h and # testing/base/gunit.h. Note that the _M versions need to come first # for substring matching to work. _CHECK_MACROS = [ 'DCHECK', 'CHECK', 'EXPECT_TRUE_M', 'EXPECT_TRUE', 'ASSERT_TRUE_M', 'ASSERT_TRUE', 'EXPECT_FALSE_M', 'EXPECT_FALSE', 'ASSERT_FALSE_M', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. # # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { 'and': '&&', 'bitor': '|', 'or': '||', 'xor': '^', 'compl': '~', 'bitand': '&', 'and_eq': '&=', 'or_eq': '|=', 'xor_eq': '^=', 'not': '!', 'not_eq': '!=' } # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. # # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 _LIKELY_MY_HEADER = 3 _POSSIBLE_MY_HEADER = 4 _OTHER_HEADER = 5 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block _INSIDE_ASM = 1 # Inside inline assembly block _END_ASM = 2 # Last line of inline assembly block _BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') _regexp_compile_cache = {} # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. _error_suppressions = {} # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 # The allowed extensions for file names # This is set by --extensions flag. _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) else: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category) def ResetNolintSuppressions(): """Resets the set of NOLINT suppressions to empty.""" _error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment. """ return (linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set())) def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s) def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s) def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s) class _IncludeState(object): """Tracks line numbers for includes, and the order in which includes appear. include_list contains list of lists of (header, line number) pairs. It's a lists of lists rather than just one flat list to make it easier to update across preprocessor boundaries. Call CheckNextIncludeOrder() once for each header in the file, passing in the type constants defined above. Calls in an illegal order will raise an _IncludeError with an appropriate error message. """ # self._section will move monotonically through this set. If it ever # needs to move backwards, CheckNextIncludeOrder will raise an error. _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header', } def __init__(self): self.include_list = [[]] self.ResetSection('') def FindHeader(self, header): """Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before. """ for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1 def ResetSection(self, directive): """Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else"). """ # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = [] def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and not Match(r'^\s*$', clean_lines.elided[linenum - 1])): return False return True def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return '' class _CppLintState(object): """Maintains module-wide state..""" def __init__(self): self.verbose_level = 1 # global setting. self.error_count = 0 # global count of reported errors # filters to apply when emitting error messages self.filters = _DEFAULT_FILTERS[:] # backup of filter list. Used to restore the state after each file. self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts # output format: # "emacs" - format that emacs can parse (default) # "vs7" - format that Microsoft Visual Studio 7 can parse self.output_format = 'emacs' def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): """Sets the module's counting options.""" self.counting = counting_style def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] self.AddFilters(filters) def AddFilters(self, filters): """ Adds more filters to the existing list of error-message filters. """ for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt) def BackupFilters(self): """ Saves the current filter list to backup storage.""" self._filters_backup = self.filters[:] def RestoreFilters(self): """ Restores filters previously backed up.""" self.filters = self._filters_backup[:] def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count) _cpplint_state = _CppLintState() def _OutputFormat(): """Gets the module's output format.""" return _cpplint_state.output_format def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): """Returns the module's verbosity setting.""" return _cpplint_state.verbose_level def _SetVerboseLevel(level): """Sets the module's verbosity, and returns the previous setting.""" return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level) def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters) def _AddFilters(filters): """Adds more filter overrides. Unlike _SetFilters, this function does not reset the current list of filters available. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.AddFilters(filters) def _BackupFilters(): """ Saves the current filter list to backup storage.""" _cpplint_state.BackupFilters() def _RestoreFilters(): """ Restores filters previously backed up.""" _cpplint_state.RestoreFilters() class _FunctionState(object): """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): """Start analyzing function body. Args: function_name: The name of the function being tracked. """ self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): """Count line in current function body.""" if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger)) def End(self): """Stop analyzing function body.""" self.in_a_function = False class _IncludeError(Exception): """Indicates a problem with the include order in a file.""" pass class FileInfo(object): """Provides utility functions for filenames. FileInfo provides easy access to the components of a file's path relative to the project root. """ def __init__(self, filename): self._filename = filename def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest) def BaseName(self): """File base name - text after the final slash, before the final period.""" return self.Split()[1] def Extension(self): """File extension - text following the final period.""" return self.Split()[2] def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2]) def IsSource(self): """File has a source file extension.""" return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Match a single C style comment on the same line. _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' # Matches multi-line C style comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + _RE_PATTERN_C_COMMENTS + r'\s+|' + r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + _RE_PATTERN_C_COMMENTS + r')') def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if matched: delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '// dummy' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 3 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments, 2) lines member contains lines without comments, and 3) raw_lines member contains all the lines without processing. All these three members are of <type 'list'>, and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed def FindEndOfExpressionInLine(line, startpos, stack): """Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ for i in xrange(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) if end_pos > -1: return (line, linenum, end_pos) # Did not find end of expression before end of file, give up return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, stack): """Find position at the matching start of current expression. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. stack: nesting stack at endpos. Returns: On finding matching start: (index at matching start, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at beginning of this line) """ i = endpos while i >= 0: char = line[i] if char in ')]}': # Found end of expression, push to expression stack stack.append(char) elif char == '>': # Found potential end of template argument list. # # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:]) or Search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator i -= 1 else: # If there is a matching '>', we can pop the expression stack. # Otherwise, ignore this '<' since it must be an operator. if stack and stack[-1] == '>': stack.pop() if not stack: return (i, None) elif char in '([{': # Found start of expression. # # If there are any unmatched '>' on the stack, they must be # operators. Remove those. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) if ((char == '(' and stack[-1] == ')') or (char == '[' and stack[-1] == ']') or (char == '{' and stack[-1] == '}')): stack.pop() if not stack: return (i, None) else: # Mismatched parentheses return (-1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '<', the matching '>' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) i -= 1 return (-1, stack) def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in ')}]>': return (line, 0, -1) # Check last line (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while stack and linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) if start_pos > -1: return (line, linenum, start_pos) # Did not find start of expression before beginning of file, give up return (line, 0, -1) def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"') def GetIndentLevel(line): """Return the number of leading spaces in line. Args: line: A string to check. Returns: An integer count of leading spaces, possibly zero. """ indent = Match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0 def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' def CheckForHeaderGuard(filename, lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # Don't check for header guards if there are error suppression # comments somewhere in this file. # # Because this is silencing a warning for a nonexistent line, we # only support the very specific NOLINT(build/header_guard) syntax, # and not the general NOLINT or NOLINT(*) syntax. for i in lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = None ifndef_linenum = 0 define = None endif = None endif_linenum = 0 for linenum, line in enumerate(lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return if not define: error(filename, 0, 'build/header_guard', 5, 'No #define header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) if define != ifndef: error(filename, 0, 'build/header_guard', 5, '#ifndef and #define don\'t match, suggested CPP variable is: %s' % cppvar) return if endif != ('#endif // %s' % cppvar): error_level = 0 if endif != ('#endif // %s' % (cppvar + '_')): error_level = 5 ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error) error(filename, endif_linenum, 'build/header_guard', error_level, '#endif line should be "#endif // %s"' % cppvar) def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') def CheckForNewlineAtEOF(filename, lines, error): """Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.') # (non-threadsafe name, thread-safe alternative, validation pattern) # # The validation pattern is used to eliminate false positives such as: # _rand(); // false positive due to substring match. # ->rand(); // some member function rand(). # ACMRandom rand(seed); // some variable named rand. # ISAACRandom rand(); // another variable named rand. # # Basically we require the return value of these functions to be used # in some expression context on the same line by matching on some # operator before the function name. This eliminates constructors and # member function calls. _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' _THREADING_LIST = ( ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), ('strtok(', 'strtok_r(', _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), ) def CheckPosixThreading(filename, clean_lines, linenum, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: # Additional pattern matching check to confirm that this is the # function we are looking for if Search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_func + '...) instead of ' + single_thread_func + '...) for improved thread safety.') def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') def IsMacroDefinition(clean_lines, linenum): if Search(r'^#define', clean_lines[linenum]): return True if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): return True return False def IsForwardClassDeclaration(clean_lines, linenum): return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) class _BlockInfo(object): """Stores information about a generic block of code.""" def __init__(self, seen_open_brace): self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM self.check_namespace_indentation = False def CheckBegin(self, filename, clean_lines, linenum, error): """Run checks that applies to text up to the opening brace. This is mostly for checking the text after the class identifier and the "{", usually where the base class is specified. For other blocks, there isn't much to check, so we always pass. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def CheckEnd(self, filename, clean_lines, linenum, error): """Run checks that applies to text after the closing brace. This is mostly used for checking end of namespace comments. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def IsBlockInfo(self): """Returns true if this block is a _BlockInfo. This is convenient for verifying that an object is an instance of a _BlockInfo, but not an instance of any of the derived classes. Returns: True for this class, False for derived classes. """ return self.__class__ == _BlockInfo class _ExternCInfo(_BlockInfo): """Stores information about an 'extern "C"' block.""" def __init__(self): _BlockInfo.__init__(self, True) class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, False) self.name = name self.starting_linenum = linenum self.is_derived = False self.check_namespace_indentation = True if class_or_struct == 'struct': self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False # Remember initial indentation level for this class. Using raw_lines here # instead of elided to account for leading comments. self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) # Try to find the end of the class. This will be confused by things like: # class A { # } *x = { ... # # But it's still good enough for CheckSectionSpacing. self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += line.count('{') - line.count('}') if not depth: self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. indent = Match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent) class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): _BlockInfo.__init__(self, False) self.name = name or '' self.starting_linenum = linenum self.check_namespace_indentation = True def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"') class _PreprocessorInfo(object): """Stores checkpoints of nesting stacks when #if/#else is seen.""" def __init__(self, stack_before_if): # The entire nesting stack before #if self.stack_before_if = stack_before_if # The entire nesting stack up to #else self.stack_before_else = [] # Whether we have already seen #else or #elif self.seen_else = False class NestingState(object): """Holds states related to parsing braces.""" def __init__(self): # Stack for tracking all braces. An object is pushed whenever we # see a "{", and popped when we see a "}". Only 3 types of # objects are possible: # - _ClassInfo: a class or struct. # - _NamespaceInfo: a namespace. # - _BlockInfo: some other type of block. self.stack = [] # Top of the previous stack before each Update(). # # Because the nesting_stack is updated at the end of each line, we # had to do some convoluted checks to find out what is the current # scope at the beginning of the line. This check is simplified by # saving the previous top of nesting stack. # # We could save the full stack, but we only need the top. Copying # the full nesting stack would slow down cpplint by ~10%. self.previous_stack_top = [] # Stack of _PreprocessorInfo objects. self.pp_stack = [] def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo) def InExternC(self): """Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ExternCInfo) def InClassDeclaration(self): """Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ClassInfo) def InAsmBlock(self): """Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM. """ return self.stack and self.stack[-1].inline_asm != _NO_ASM def InTemplateArgumentList(self, clean_lines, linenum, pos): """Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments. """ while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) # These things do not look like template argument list: # class Suspect { # class Suspect x; } if token in ('{', '}', ';'): return False # These things look like template argument list: # template <class Suspect> # template <class Suspect = default_value> # template <class Suspect[]> # template <class Suspect...> if token in ('>', '=', '[', ']', '.'): return True # Check if token is an unmatched '<'. # If not, move on to the next character. if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue # We can't be sure if we just find a single '<', and need to # find the matching '>'. (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: # Not sure if template argument list or syntax error in file return False linenum = end_line pos = end_pos return False def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass # TODO(unknown): Update() is too long, but we will refactor later. def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remember top of the previous nesting stack. # # The stack is always pushed/popped and not modified in place, so # we can just do a shallow copy instead of copy.deepcopy. Using # deepcopy would slow down cpplint by ~28%. if self.stack: self.previous_stack_top = self.stack[-1] else: self.previous_stack_top = None # Update pp_stack self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; class_decl_match = Match( r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): # We do not want to accept classes that are actually template arguments: # template <class Ignore1, # class Ignore2 = Default<Args>, # template <Args> class Ignore3> # void Function() {}; # # To avoid template argument cases, we scan forward and look for # an unmatched '>'. If we see one, assume we are inside a # template argument list. end_declaration = len(class_decl_match.group(1)) if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): self.stack.append(_ClassInfo( class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(4) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True elif Match(r'^extern\s*"[^"]*"\s*\{', line): self.stack.append(_ExternCInfo()) else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): """Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise. """ for i in range(len(self.stack), 0, -1): classinfo = self.stack[i - 1] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name) def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if Search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] if Search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.') if Match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): # TODO(unknown): Could it be expanded safely to arbitrary references, # without triggering too many false positives? The first # attempt triggered 5 warnings for mostly benign code in the regtest, hence # the restriction. # Here's the original regexp, for the reference: # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. Also look for # non-single-argument constructors which are also technically valid, but # strongly suggest something is wrong. explicit_constructor_match = Match( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line) if explicit_constructor_match: is_marked_explicit = explicit_constructor_match.group(1) if not explicit_constructor_match.group(2): constructor_args = [] else: constructor_args = explicit_constructor_match.group(2).split(',') # collapse arguments so that commas in template parameter lists and function # argument parameter lists don't split arguments in two i = 0 while i < len(constructor_args): constructor_arg = constructor_args[i] while (constructor_arg.count('<') > constructor_arg.count('>') or constructor_arg.count('(') > constructor_arg.count(')')): constructor_arg += ',' + constructor_args[i + 1] del constructor_args[i + 1] constructor_args[i] = constructor_arg i += 1 defaulted_args = [arg for arg in constructor_args if '=' in arg] noarg_constructor = (not constructor_args or # empty arg list # 'void' arg specifier (len(constructor_args) == 1 and constructor_args[0].strip() == 'void')) onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg not noarg_constructor) or # all but at most one arg defaulted (len(constructor_args) >= 1 and not noarg_constructor and len(defaulted_args) >= len(constructor_args) - 1)) initializer_list_constructor = bool( onearg_constructor and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), constructor_args[0].strip())) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): if defaulted_args: error(filename, linenum, 'runtime/explicit', 5, 'Constructors callable with one argument ' 'should be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 5, 'Single-parameter constructors should be marked explicit.') elif is_marked_explicit and not onearg_constructor: if noarg_constructor: error(filename, linenum, 'runtime/explicit', 5, 'Zero-parameter constructors should not be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 0, 'Constructors that require multiple arguments ' 'should not be marked explicit.') def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace() def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): is_namespace_indent_item = ( len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and nesting_state.previous_stack_top == nesting_state.stack[-2]) if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, clean_lines.elided, line): CheckItemIndentationInNamespace(filename, clean_lines.elided, line, error) def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') def CheckComment(line, filename, linenum, next_line_start, error): """Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found. """ commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison if (line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes # Allow one space for new scopes, two spaces otherwise: if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # Checks for common mistakes in TODO comments. comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the //. if Match(r'//[^ ]*\w', comment): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after if/for/while/switch, no spaces around parens in function calls, two spaces between code and comment, don't start a block with a blank line, don't end a function with a blank line, don't add a blank line after public/protected/private, don't have too many blank lines in a row. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw = clean_lines.lines_without_raw_strings line = raw[linenum] # Before nixing comments, check if the line is blank for no good # reason. This includes the first line after a block is opened, and # blank lines at the end of a function (ie, right before a line like '}' # # Skip all the blank line checks if we are immediately inside a # namespace body. In other words, don't issue blank line warnings # for this block: # namespace { # # } # # A warning about missing end of namespace comments will be issued instead. # # Also skip blank line checks for 'extern "C"' blocks, which are formatted # like namespaces. if (IsBlankLine(line) and not nesting_state.InNamespaceBody() and not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') # TODO(unknown): Don't complain if line before blank line, and line after, # both start with alnums and are indented the same amount. # This ignores whitespace at the start of a namespace block # because those are not usually indented. if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: # OK, we have a blank line at the start of a code block. Before we # complain, we check if it is an exception to the rule: The previous # non-empty line has the parameters of a function header that are indented # 4 spaces (because they did not fit in a 80 column line when placed on # the same line as the function name). We also check for the case where # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False if Match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 and Match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') else: # Search for the function arguments or an initializer list. We use a # simple heuristic here: If the line is indented 4 spaces; and we have a # closing paren, without the opening paren, followed by an opening brace # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) or Match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block ' 'should be deleted.') # Ignore blank lines at the end of a block in a long if-else # chain, like this: # if (condition1) { # // Something followed by a blank line # # } else if (condition2) { # // Something else # } if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line and Match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') matched = Match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) # Next, check comments next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) # get rid of comments and strings line = clean_lines.elided[linenum] # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'return []() {};' if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. if (Search(r'for *\(.*[^:]:[^: ]', line) or Search(r'for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. Those are checked separately # in CheckRValueReference match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line) if (match and match.group(1) != '(' and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) def CheckParenthesisSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around parentheses. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) def CheckCommaSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') def CheckBracesSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces. And since you should never have braces at the beginning of a line, # this is an easy test. match = Match(r'^(.*[^ ({]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # ternary = expr ? new type{} : nullptr; # OuterTemplate<InnerTemplateConstructor<Type>{}> # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<>]:". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') def IsDecltype(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise. """ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False def IsTemplateParameterList(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise. """ (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, column) if (startpos > -1 and Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): return True return False def IsRValueType(clean_lines, nesting_state, linenum, column): """Check if the token ending on (linenum, column) is a type. Assumes that text to the right of the column is "&&" or a function name. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is a type, False if we are not sure. """ prefix = clean_lines.elided[linenum][0:column] # Get one word to the left. If we failed to do so, this is most # likely not a type, since it's unlikely that the type name and "&&" # would be split across multiple lines. match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix) if not match: return False # Check text following the token. If it's "&&>" or "&&," or "&&...", it's # most likely a rvalue reference used inside a template. suffix = clean_lines.elided[linenum][column:] if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix): return True # Check for simple type and end of templates: # int&& variable # vector<int>&& variable # # Because this function is called recursively, we also need to # recognize pointer and reference types: # int* Function() # int& Function() if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool', 'short', 'int', 'long', 'signed', 'unsigned', 'float', 'double', 'void', 'auto', '>', '*', '&']: return True # If we see a close parenthesis, look for decltype on the other side. # decltype would unambiguously identify a type, anything else is # probably a parenthesized expression and not a type. if match.group(2) == ')': return IsDecltype( clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1) # Check for casts and cv-qualifiers. # match.group(1) remainder # -------------- --------- # const_cast< type&& # const type&& # type const&& if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|' r'reinterpret_cast\s*<|\w+\s)\s*$', match.group(1)): return True # Look for a preceding symbol that might help differentiate the context. # These are the cases that would be ambiguous: # match.group(1) remainder # -------------- --------- # Call ( expression && # Declaration ( type&& # sizeof ( type&& # if ( expression && # while ( expression && # for ( type&& # for( ; expression && # statement ; type&& # block { type&& # constructor { expression && start = linenum line = match.group(1) match_symbol = None while start >= 0: # We want to skip over identifiers and commas to get to a symbol. # Commas are skipped so that we can find the opening parenthesis # for function parameter lists. match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line) if match_symbol: break start -= 1 line = clean_lines.elided[start] if not match_symbol: # Probably the first statement in the file is an rvalue reference return True if match_symbol.group(2) == '}': # Found closing brace, probably an indicate of this: # block{} type&& return True if match_symbol.group(2) == ';': # Found semicolon, probably one of these: # for(; expression && # statement; type&& # Look for the previous 'for(' in the previous lines. before_text = match_symbol.group(1) for i in xrange(start - 1, max(start - 6, 0), -1): before_text = clean_lines.elided[i] + before_text if Search(r'for\s*\([^{};]*$', before_text): # This is the condition inside a for-loop return False # Did not find a for-init-statement before this semicolon, so this # is probably a new statement and not a condition. return True if match_symbol.group(2) == '{': # Found opening brace, probably one of these: # block{ type&& = ... ; } # constructor{ expression && expression } # Look for a closing brace or a semicolon. If we see a semicolon # first, this is probably a rvalue reference. line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1] end = start depth = 1 while True: for ch in line: if ch == ';': return True elif ch == '{': depth += 1 elif ch == '}': depth -= 1 if depth == 0: return False end += 1 if end >= clean_lines.NumLines(): break line = clean_lines.elided[end] # Incomplete program? return False if match_symbol.group(2) == '(': # Opening parenthesis. Need to check what's to the left of the # parenthesis. Look back one extra line for additional context. before_text = match_symbol.group(1) if linenum > 1: before_text = clean_lines.elided[linenum - 1] + before_text before_text = match_symbol.group(1) # Patterns that are likely to be types: # [](type&& # for (type&& # sizeof(type&& # operator=(type&& # if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text): return True # Patterns that are likely to be expressions: # if (expression && # while (expression && # : initializer(expression && # , initializer(expression && # ( FunctionCall(expression && # + FunctionCall(expression && # + (expression && # # The last '+' represents operators such as '+' and '-'. if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text): return False # Something else. Check that tokens to the left look like # return_type function_name match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$', match_symbol.group(1)) if match_func: # Check for constructors, which don't have return types. if Search(r'\b(?:explicit|inline)$', match_func.group(1)): return True implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix) if (implicit_constructor and implicit_constructor.group(1) == implicit_constructor.group(2)): return True return IsRValueType(clean_lines, nesting_state, linenum, len(match_func.group(1))) # Nothing before the function name. If this is inside a block scope, # this is probably a function call. return not (nesting_state.previous_stack_top and nesting_state.previous_stack_top.IsBlockInfo()) if match_symbol.group(2) == '>': # Possibly a closing bracket, check that what's on the other side # looks like the start of a template. return IsTemplateParameterList( clean_lines, start, len(match_symbol.group(1))) # Some other symbol, usually something like "a=b&&c". This is most # likely not a type. return False def IsDeletedOrDefault(clean_lines, linenum): """Check if current constructor or operator is deleted or default. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if this is a deleted or default constructor. """ open_paren = clean_lines.elided[linenum].find('(') if open_paren < 0: return False (close_line, _, close_paren) = CloseExpression( clean_lines, linenum, open_paren) if close_paren < 0: return False return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) def IsRValueAllowed(clean_lines, linenum): """Check if RValue reference is allowed on a particular line. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if line is within the region where RValue references are allowed. """ # Allow region marked by PUSH/POP macros for i in xrange(linenum, 0, -1): line = clean_lines.elided[i] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): if not line.endswith('PUSH'): return False for j in xrange(linenum, clean_lines.NumLines(), 1): line = clean_lines.elided[j] if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): return line.endswith('POP') # Allow operator= line = clean_lines.elided[linenum] if Search(r'\boperator\s*=\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) # Allow constructors match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) if match and match.group(1) == match.group(2): return IsDeletedOrDefault(clean_lines, linenum) if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): return IsDeletedOrDefault(clean_lines, linenum) if Match(r'\s*[\w<>]+\s*\(', line): previous_line = 'ReturnType' if linenum > 0: previous_line = clean_lines.elided[linenum - 1] if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): return IsDeletedOrDefault(clean_lines, linenum) return False def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): """Check for rvalue references. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Find lines missing spaces around &&. # TODO(unknown): currently we don't check for rvalue references # with spaces surrounding the && to avoid false positives with # boolean expressions. line = clean_lines.elided[linenum] match = Match(r'^(.*\S)&&', line) if not match: match = Match(r'(.*)&&\S', line) if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): return # Either poorly formed && or an rvalue reference, check the context # to get a more accurate error message. Mostly we want to determine # if what's to the left of "&&" is a type or not. and_pos = len(match.group(1)) if IsRValueType(clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&') def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1) def CheckBraces(filename, clean_lines, linenum, error): """Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'else if\s*\(', line): # could be multi-line if brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, # single statements, but we reject anything with more than one semicolon in # it. This means that the first semicolon after the if should be at the end of # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. if_else_match = Search(r'\b(if\s*\(|else\b)', line) if if_else_match and not Match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() if_match = Search(r'\bif\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. if (not Match(r'\s*{', endline[endpos:]) and not (Match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') if not Match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: # Make sure the next line is dedented next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. if (if_match and Match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' 'Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on compound # literals and lambdas. closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') def FindCheckMacro(line): """Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found. """ for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1) def CheckCheck(filename, clean_lines, linenum, error): """Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Decide the set of replacement macros that should be suggested lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) def CheckAltTokens(filename, clean_lines, linenum, error): """Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. elif ((initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) extended_length = int((_line_length * 1.25)) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') # Matches the first component of a filename delimited by -s and _s. That is: # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] def _IsTestFilename(filename): """Determines if the given filename has a suffix that identifies it as a test. Args: filename: The input filename. Returns: True if 'filename' looks like a test, False otherwise. """ if (filename.endswith('_test.cc') or filename.endswith('_unittest.cc') or filename.endswith('_regtest.cc')): return True else: return False def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) if target_base == include_base and ( include_dir == target_dir or include_dir == os.path.normpath(target_dir + '/../public')): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make certain tasks easier. However, to prevent false positives, checks applicable to #include lines in CheckLanguage must be put here. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] # "include" should use the new style "foo/bar.h" instead of just "bar.h" # Only do this check if the included header follows google naming # conventions. If not, assume that it's a 3rd party API that # requires special include conventions. # # We also make an exception for Lua headers, which follow google # naming convention but not the include convention. match = Match(r'#include\s*"([^/]+\.h)"', line) if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): error(filename, linenum, 'build/include', 4, 'Include the directory when naming .h files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's # not. match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') duplicate_line = include_state.FindHeader(include) if duplicate_line >= 0: error(filename, linenum, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, duplicate_line)) elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): include_state.include_list[-1].append((include, linenum)) # We want to ensure that headers appear in the right order: # 1) for foo.cc, foo.h (preferred location) # 2) c system files # 3) cpp system files # 4) for foo.cc, foo.h (deprecated location) # 5) other google headers # # We classify each include statement as one of those 5 types # using a number of techniques. The include_state object keeps # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( _ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, '%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, 'Include "%s" not in alphabetical order' % include) include_state.SetLastHeader(canonical_include) # Look for any of the stream classes that are part of standard C++. match = _RE_PATTERN_INCLUDE.match(line) if match: include = match.group(2) if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): # Many unit tests use cout, so we exempt them. if not _IsTestFilename(filename): # Suggest a different header for ostream if include == 'ostream': error(filename, linenum, 'readability/streams', 3, 'For logging, include "base/logging.h" instead of <ostream>.') else: error(filename, linenum, 'readability/streams', 3, 'Streams are highly discouraged.') def _GetTextInside(text, start_pattern): r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found. """ # TODO(unknown): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes declare or disable copy/assign # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(unknown): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present, # then it should be the last thing in the class declaration. match = Match( (r'\s*' r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' r'\(.*\);$'), line) if match and linenum + 1 < clean_lines.NumLines(): next_line = clean_lines.elided[linenum + 1] # We allow some, but not all, declarations of variables to be present # in the statement that defines the class. The [\w\*,\s]* fragment of # the regular expression below allows users to declare instances of # the class or pointers to instances, but not less common types such # as function pointers or arrays. It's a tradeoff between allowing # reasonable code and avoiding trying to parse more C++ using regexps. if not Search(r'^\s*}[\w\*,\s]*;', next_line): error(filename, linenum, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') def CheckPrintf(filename, clean_lines, linenum, error): """Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) def IsDerivedFunction(clean_lines, linenum): """Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False def IsInitializerList(clean_lines, linenum): """Check if current line is inside constructor initializer list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line appears to be inside constructor initializer list, False otherwise. """ for i in xrange(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: remove_function_body = Match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) if Search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True if Search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True if Search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. # - An opening brace, probably the start of current class or namespace. # # Current line is probably not inside an initializer list since # we saw one of those things without seeing the starting colon. return False # Got to the beginning of the file without seeing the start of # constructor initializer list. return False def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)) def CheckCasts(filename, clean_lines, linenum, error): """Various cast related checks. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) if match and not expecting_function: matched_type = match.group(2) # matched_new_or_template is used to silence two false positives: # - New operators # - Template arguments with function types # # For template arguments, we match on types immediately following # an opening bracket without any spaces. This is a fast way to # silence the common case where the function type is the first # template argument. False negative with less-than comparison is # avoided because those operators are usually followed by a space. # # function<double(double)> // bracket + no space = false positive # value < double(42) // bracket + space = true positive matched_new_or_template = match.group(1) # Avoid arrays by looking for brackets that come after the closing # parenthesis. if Match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: # - Function pointers # - Casts to pointer types # - Placement new # - Alias declarations matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and not Search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. # # Some non-identifier character is required before the '&' for the # expression to be recognized as a cast. These are casts: # expression = &static_cast<int*>(temporary()); # function(&(int*)(temporary())); # # This is not a cast: # reference_type&(int* function_param); match = Search( r'(?:[^\w]&\(([^)]+)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match and match.group(1) != '*': # Try a better error message when the & is bound to something # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': _, y2, x2 = CloseExpression(clean_lines, y1, x1) if x2 >= 0: extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] if Match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: error(filename, linenum, 'readability/casting', 4, ('Are you taking an address of something dereferenced ' 'from a cast? Wrapping the dereferenced expression in ' 'parentheses will make the binding more obvious')) else: error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True def ExpectingFunctionArgs(clean_lines, linenum): """Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types. """ line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1])))) _HEADERS_CONTAINING_TEMPLATES = ( ('<deque>', ('deque',)), ('<functional>', ('unary_function', 'binary_function', 'plus', 'minus', 'multiplies', 'divides', 'modulus', 'negate', 'equal_to', 'not_equal_to', 'greater', 'less', 'greater_equal', 'less_equal', 'logical_and', 'logical_or', 'logical_not', 'unary_negate', 'not1', 'binary_negate', 'not2', 'bind1st', 'bind2nd', 'pointer_to_unary_function', 'pointer_to_binary_function', 'ptr_fun', 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 'mem_fun_ref_t', 'const_mem_fun_t', 'const_mem_fun1_t', 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 'mem_fun_ref', )), ('<limits>', ('numeric_limits',)), ('<list>', ('list',)), ('<map>', ('map', 'multimap',)), ('<memory>', ('allocator',)), ('<queue>', ('queue', 'priority_queue',)), ('<set>', ('set', 'multiset',)), ('<stack>', ('stack',)), ('<string>', ('char_traits', 'basic_string',)), ('<utility>', ('pair',)), ('<vector>', ('vector',)), # gcc extensions. # Note: std::hash is their hash, ::hash is our hash ('<hash_map>', ('hash_map', 'hash_multimap',)), ('<hash_set>', ('hash_set', 'hash_multiset',)), ('<slist>', ('slist',)), ) _RE_PATTERN_STRING = re.compile(r'\bstring\b') _re_pattern_algorithm_header = [] for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', 'transform'): # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or # type::max(). _re_pattern_algorithm_header.append( (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), _template, '<algorithm>')) _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( (re.compile(r'(\<|\b)' + _template + r'\s*\<'), _template + '<>', _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ if not filename_cc.endswith('.cc'): return (False, '') filename_cc = filename_cc[:-len('.cc')] if filename_cc.endswith('_unittest'): filename_cc = filename_cc[:-len('_unittest')] elif filename_cc.endswith('_test'): filename_cc = filename_cc[:-len('_test')] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path def UpdateIncludeState(filename, include_dict, io=codecs): """Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): """Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly') def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): """Check that default lambda captures are not used. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # A lambda introducer specifies a default capture if it starts with "[=" # or if it starts with "[&" _not_ followed by an identifier. match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) if match: # Found a potential error, check what comes after the lambda-introducer. # If it's not open parenthesis (for lambda-declarator) or open brace # (for compound-statement), it's not a lambda. line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): error(filename, linenum, 'build/c++11', 4, # 4 = high confidence 'Default lambda captures are an unapproved C++ feature.') def CheckRedundantVirtual(filename, clean_lines, linenum, error): """Check if line contains a redundant "virtual" function-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for "virtual" on current line. line = clean_lines.elided[linenum] virtual = Match(r'^(.*\bvirtual\b)', line) if not virtual: return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). # TODO(unknown): doesn't work if there are virtual functions with # decltype() or other things that use parentheses, but csearch suggests # that this is rare. end_col = -1 end_line = -1 start_col = len(virtual.group(1)) for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] parameter_list = Match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( clean_lines, start_line, start_col + len(parameter_list.group(1))) break start_col = 0 if end_col < 0: return # Couldn't find end of parameter list, give up # Look for "override" or "final" after the parameter list # (possibly on the next few lines). for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] match = Search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' 'already declared as "%s"' % match.group(1))) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 if Search(r'[^\w]\s*$', line): break def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): """Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Check that at most one of "override" or "final" is present, not both line = clean_lines.elided[linenum] if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"')) # Returns true if we are at a new block, and it is directly # inside of a namespace. def IsBlockInNameSpace(nesting_state, is_forward_declaration): """Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace. """ if is_forward_declaration: if len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)): return True else: return False return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo)) def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration) # Call this method if the line is directly inside of a namespace. # If the line above is blank (excluding comments) or the start of # an inner namespace, it cannot be indented. def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): line = raw_lines_no_comments[linenum] if Match(r'^\s+', line): error(filename, linenum, 'runtime/indentation_namespace', 4, 'Do not indent within a namespace') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error) def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', # utility 'forward', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name) def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): """Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) if file_extension == 'h': CheckForHeaderGuard(filename, lines, error) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) for line in xrange(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) # We check here rather than inside ProcessLine so that we see raw # lines rather than "cleaned" lines. CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error) def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True def ProcessFile(filename, vlevel, extra_check_functions=[]): """Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: sys.stderr.write( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in _valid_extensions: sys.stderr.write('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(_valid_extensions))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) # If end-of-line sequences are a mix of LF and CR-LF, issue # warnings on the lines with CR. # # Don't issue any warnings if all lines are uniformly LF or CR-LF, # since critique can handle these just fine, and the style guide # doesn't dictate a particular end of line sequence. # # We can't depend on os.linesep to determine what the desired # end-of-line sequence should be, since that will return the # server-side end-of-line sequence. if lf_lines and crlf_lines: # Warn on every line with CR. An alternative approach might be to # check whether the file is mostly CRLF or just LF, and warn on the # minority, we bias toward LF here since most tools prefer LF. for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') sys.stderr.write('Done processing %s\n' % filename) _RestoreFilters() def PrintUsage(message): """Prints a brief usage string and exits, optionally with an error message. Args: message: The optional error message. """ sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(1) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0) def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames def main(): filenames = ParseArguments(sys.argv[1:]) # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() sys.exit(_cpplint_state.error_count > 0) if __name__ == '__main__': main()
[ "alexandrecperez@gmail.com" ]
alexandrecperez@gmail.com
db93795161562c704ef128162efea62145d2f060
0b80b985d83f9999658f0039472af20eec97f60d
/dl_code.py
b7742e3308dd4d4b5a68b20cf86e523350536631
[]
no_license
sahilm142/imdb-reviews-analysis
83955edc362fea056b5b01270f0936118d9d6da5
0f19fd0d02c3b734936b14f569d85f5a47e16c53
refs/heads/master
2020-05-15T12:18:33.109597
2019-04-19T20:38:03
2019-04-19T20:38:03
182,245,570
0
0
null
null
null
null
UTF-8
Python
false
false
2,094
py
# -*- coding: utf-8 -*- """ Created on Thu Mar 7 11:30:32 2019 @author: Sahil """ import numpy as np import pandas as pd import csv def create_dataset(folder_name,type_rev): ''' Column names 0: Type of review from top 250s 1: TV 2: Movies 1: Serial no of type 0 in top 250 2: Rating of review 3: Review 4: Sentiment Score (1-4: Negative->0 and 7-10: Positive-> 1) ''' for j in range(1,251): for i in [1,2,3,4,7,8,9,10]: try: datas = open(folder_name+"/"+str(j)+"/"+str(i)+".txt","r") df = pd.read_csv(datas,sep='\n',header=None) #datas = open("Data/"+str(j)+"/summary/"+str(i)+".txt","r") #df_summ = pd.read_csv(datas,sep='\n',header=None) except: print("Token {0}:{1}".format(j,i)) continue with open(folder_name+'.csv', 'a') as csvfile: k=0 while k<len(df): try: csv_writer = csv.writer(csvfile, delimiter=',') if i<5: csv_writer.writerow([type_rev,j,i,df[0][k],0]) else: csv_writer.writerow([type_rev,j,i,df[0][k],1]) k+=1 except: print("{0} {1} {2} ".format(j,i,len(df))) break # Review type 1: TV 2: MOVIES create_dataset("tv_250",1) create_dataset("movies_250",2) data_tv = pd.read_csv("tv_250.csv",header=None,encoding="latin-1") data_movies = pd.read_csv("movies_250.csv",header=None,encoding="latin-1") data = pd.concat([data_tv, data_movies]) # Reviews reviews = data.iloc[:,3].values for i in range(len(reviews)): with open("final_data/reviews.txt","a",encoding="latin-1") as f: f.writelines(reviews[i]+"\n") # Labels labels = data.iloc[:,4].values for i in range(len(labels)): with open("final_data/labels.txt","a") as f: f.writelines(labels[i]+"\n")
[ "sahil.mansoori.143@gmail.com" ]
sahil.mansoori.143@gmail.com
117fc293b953a162050b93cb0bc575cb49d741c8
30846dedeb87be7ba9894427122f6263fc99e67f
/courseSelection/urls.py
32ccc9fee82c3d18a272e7fc0c3b235ddc065d70
[]
no_license
nslam/jwsys
003f2f6e5e4942182f6c2f9c35237a6127bc8015
c6958e128109cdffd830d69fc3a9d0bae0fac3d3
refs/heads/master
2021-01-23T05:56:05.059336
2019-03-09T15:25:54
2019-03-09T15:25:54
93,001,593
4
8
null
2017-07-05T02:10:46
2017-06-01T00:50:34
HTML
UTF-8
Python
false
false
1,825
py
from django.conf.urls import url from django.views.generic import RedirectView from .views import student_views, instructor_views, manager_views, index_views urlpatterns = [ url(r'^$', index_views.index), # manager url(r'^manager$', RedirectView.as_view(url='manager/index')), url(r'^manager/index$', manager_views.show_manager), url(r'^manager/curriculum$',manager_views.set_curriculum_demand), url(r'^manager/curriculum/result$',manager_views.curriculum_demand_result), url(r'^manager/manualselection$', manager_views.manual_selection), url(r'^manager/selectiontime$', manager_views.set_time), url(r'^manager/selectiontime/timeresult$', manager_views.time_result), url(r'^manager/selectiontime/confirmresult$', manager_views.confirm_result), url(r'^manager/setting$', manager_views.other_setting), url(r'^manager/setting/result$', manager_views.other_setting_result), url(r'^manager/manualselection$', manager_views.manual_selection), url(r'^manager/manualselection/result$', manager_views.selection_result), # instructor url(r'^instructor$', RedirectView.as_view(url='instructor/index')), url(r'^instructor/index$', instructor_views.index), url(r'^instructor/studentlist$', instructor_views.studentlist), # student url(r'^student$', RedirectView.as_view(url='student/index')), url(r'^student/index$', student_views.index), url(r'^student/curriculum$', student_views.curriculum), url(r'^student/selection$', student_views.selection), url(r'^student/selection/drop$', student_views.dropcourse), url(r'^student/selection/coursedetails$', student_views.coursedetails), url(r'^student/selection/priority$', student_views.selectionpriority), url(r'^student/selection/result$', student_views.selectionresult), url(r'^student/schedule$', student_views.schedule), ]
[ "hanfei.ren@foxmail.com" ]
hanfei.ren@foxmail.com
41cc8cb8ec10ccb8c7eb432e8f3cc4602df5f651
d043a51ff0ca2f9fb3943c3f0ea21c61055358e9
/python3网络爬虫开发实战/数据存储/MySQL实验/删除数据2.py
7af2d45b23cc102f658c4407ee7362981f7f0c80
[]
no_license
lj1064201288/dell_python
2f7fd9dbcd91174d66a2107c7b7f7a47dff4a4d5
529985e0e04b9bde2c9e0873ea7593e338b0a295
refs/heads/master
2020-03-30T03:51:51.263975
2018-12-11T13:21:13
2018-12-11T13:21:13
150,707,725
0
0
null
null
null
null
UTF-8
Python
false
false
382
py
import pymysql db = pymysql.connect(host="localhost", user='root', password='123456', port=3306, db='django') cursor = db.cursor() table = "friends" age = "age > 30" sql = 'DELETE FROM {table} WHERE {age}'.format(table=table, age=age) try: cursor.execute(sql) print("Successful...") db.commit() except: print("Failed...") db.rollback() finally: db.close()
[ "1064201288@qq.com" ]
1064201288@qq.com
8dfab12c043371b1ac8d6e3cf94c374f2d82fae4
bff707c5c0046350cc5a8f3d76b37c8403059380
/mysite/blog/migrations/0015_auto_20180831_2354.py
afb7c4fe8e7fdba1178521619a9d5e686c9ae0e2
[]
no_license
0xArt/PersonalSite
4c54259e72e3ef5971ad85490ea536e45b7603da
02b092477fa69b78aa813398c6d18a79b94a7f97
refs/heads/master
2020-04-05T16:24:21.044320
2019-04-04T00:51:54
2019-04-04T00:51:54
157,010,755
0
0
null
null
null
null
UTF-8
Python
false
false
383
py
# Generated by Django 2.0.6 on 2018-09-01 06:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0014_auto_20180831_2352'), ] operations = [ migrations.AlterField( model_name='post', name='summary', field=models.CharField(max_length=400), ), ]
[ "artinisagholian@gmail.com" ]
artinisagholian@gmail.com
84eeb4e216661d1b5592535c6727d2131a0709a8
5c84c379978ac4c663d6193ea2e4e156f1fc0922
/hard/149_maxpoints_on_a_line.py
ed5bf69070d34a3ddba151cc7ec4590f1eb836f8
[]
no_license
nraghuveer/leetcode
a46483a9fd7f990410d6b9132c618e5d54baf9a7
ca045ce2c6d23fb8f92ea9871565b21cbdbeef19
refs/heads/master
2021-07-01T15:43:43.249587
2020-10-15T17:47:38
2020-10-15T17:47:38
180,434,748
0
0
null
null
null
null
UTF-8
Python
false
false
1,806
py
# https://leetcode.com/problems/max-points-on-a-line/ # using slope ? # if two points have same slope => colinear # calculate slope for n points with n - 1 points => O(pow(n,2)) => not good from collections import defaultdict from typing import List def gcd(x,y): while y: x, y = y, x % y return x class Solution: def maxPoints(self, points: List[List[int]]) -> int: """Get max points from given points that are on given lines""" if not points: return 0 slope_map = defaultdict(int) l = len(points) max_count = 0 for i in range(l): curmax = overlap = vertical = 0 for j in range(i+1, l): # if same point, track this to update count if points[i] == points[j]: overlap += 1 # to avoid ZeroDivisionError elif points[i][0] == points[j][0]: vertical += 1 else: x = (points[j][1] - points[i][1]) y = (points[j][0] - points[i][0]) g = gcd(x,y) x = x/g y = y/g slope_map[(x,y)] += 1 curmax = max(curmax, slope_map[(x,y)]) # incase, zerodivisionerror cases are more => consider vertical curmax = max(curmax, vertical) # clear the dict, important # as the these slope are related with the points[i] slope_map.clear() # update the global count. max_count = max(max_count, curmax + overlap + 1) return max_count if __name__ == "__main__": solution = Solution() assert solution.maxPoints([[1,1],[2,2],[3,3]]) == 3 print('done')
[ "raghuveernaraharisetti@gmail.com" ]
raghuveernaraharisetti@gmail.com
92ed6a36ac6f7be76144f403a841125f2a79c943
633c18a9e1931f937f7f91f05ce9749a4ac169f6
/work_with_pythest/tests/test_math.py
05d5b8bf6daeef827b40a6d56148b1075e179af4
[]
no_license
borko81/python_scripts
fb3ff79377f19233e18d20f4f150735cdbe52c29
4e8ed38550f3b90bc00c07605d7e92822b079206
refs/heads/master
2022-07-07T19:26:52.467714
2022-06-24T15:46:57
2022-06-24T15:46:57
224,904,971
0
0
null
null
null
null
UTF-8
Python
false
false
280
py
import pytest def test_one_plus_one(): assert 1 + 1 == 2 def test_one_plust_two(): a = 1 b = 2 c = 3 assert a + b == c def test_division_by_zero(): with pytest.raises(ZeroDivisionError) as e: num = 1 / 0 assert 'division' in str(e.value)
[ "bstoilov81@gmail.com" ]
bstoilov81@gmail.com
c9830ab4f029b375f6bd3a3f24a0a151fc6d831a
0454d50b12960ef3a4a1f101f6d3bee585c7cfe9
/tests/parser/test_lieshu.py
99032d9cec5ead06d147eba826352df9a8959c42
[]
no_license
Syhen/hmqf_crawler_hy
7a99c05d1ac87bc293872aeb5efec450db3fb689
80508040340d1c5a9fd5192e2f5f623fd77cac08
refs/heads/master
2021-09-19T23:31:38.730466
2018-08-01T09:51:40
2018-08-01T09:51:40
111,872,551
1
3
null
2018-01-05T10:07:00
2017-11-24T03:36:46
Python
UTF-8
Python
false
false
1,586
py
# -*- coding: utf-8 -*- """ create on 2017-11-27 上午11:15 author @heyao """ import json from nose.tools import assert_list_equal, assert_is_instance, assert_dict_equal, assert_equal from content_market.parser.lieshu import Lieshu class TestLieshu(object): def setUp(self): self.lieshu = Lieshu() def tear_down(self): pass def test_chapter_list(self): with open('parser/data/lieshu/book_detail.html', 'r') as f: content = f.read().decode('utf-8') with open('parser/data/lieshu/chapters.json', 'r') as f: real_chapters = json.load(f) url = 'http://www.lieshu.cc' chapters = self.lieshu.parse_chapter_list(content, url) assert_is_instance(chapters, type((i for i in (1,)))) assert_list_equal(list(chapters), real_chapters) def test_chapter_content(self): with open('parser/data/lieshu/chapter_content.html', 'r') as f: content_page = f.read().decode('utf-8') with open('parser/data/lieshu/chapter_content.txt', 'r') as f: content = f.read().decode('utf-8') assert_equal(content, self.lieshu.parse_content(content_page)) def test_book_detail(self): with open('parser/data/lieshu/book_detail.html', 'r') as f: content = f.read().decode('utf-8') with open('parser/data/lieshu/book_detail.json', 'r') as f: book_detail = json.load(f) url = 'http://www.lieshu.cc/2/2732/' info = self.lieshu.parse_detail(content, url) assert_dict_equal(book_detail, dict(info))
[ "lushangkun1228@hotmail.com" ]
lushangkun1228@hotmail.com
21f4eac2a5d60a2dfe080bd75652381d18460ec0
d37189d84ee0fe11969fb4b591899035a5533352
/fun2.py
1e86b8658e999401565ccd3b3f43d478390d1109
[]
no_license
KebadSew/scratch_python
5654e1fe2e13f88b630b26ace21e96bac3278da2
aa460807200a6eb3b64ba17549769c4b0d023572
refs/heads/master
2023-02-16T15:34:42.924669
2021-01-19T00:58:07
2021-01-19T00:58:07
293,111,352
0
0
null
null
null
null
UTF-8
Python
false
false
570
py
# create a function which prints sum of two input number parameters ''' def sum(x,y): print("Sum is ",x+y) sum(5, 7) # subtract def mekenes(x,y): print("Mekenes of x-y is ",x-y) mekenes(5, 7) ''' def sum(x, y, z): return x+y+z s = sum(8, 6, 2) # create a function which prints sum of two input number parameters ''' def sum(x,y): print("Sum is ",x+y) sum(5, 7) # subtract def mekenes(x,y): print("Mekenes of x-y is ",x-y) mekenes(5, 7) ''' def sum(x, y, z): return x+y+z s = sum(8, 6, 2) print("The sum of 8+6+2 is ", s)
[ "lingering.quest@gmail.com" ]
lingering.quest@gmail.com
af01032059305357b2406966e9ed3d432d2a7f77
0be6bb93eda9c8fb1798bd99f15ef4acb04fc504
/src/pe0026.py
61df838294b1fd130e79832d9c2ff856ba97bc98
[]
no_license
neysene/project-euler
d7f9ec8c3a46fd7fd61eec4044632e6166146337
79f9170482000328dcddb4a34701b75ab8209638
refs/heads/master
2021-01-10T07:03:55.054443
2016-01-29T05:30:40
2016-01-29T05:30:40
49,287,376
0
0
null
null
null
null
UTF-8
Python
false
false
461
py
if __name__ == '__main__': maxx, keep = 1, 3 for i in xrange(2, 1000): num, denom, flag = 10, i, True a = [] while flag: k = num%denom if k == 0: break elif k in a: if len(a) > maxx: maxx = len(a) keep = i break else: a.append(k) num = (k) * 10 print keep
[ "ismailgonul@gmail.com" ]
ismailgonul@gmail.com
1c90d3231346ed0d9f466ab115158842a74a22cb
af73bf48ac21f0cdbfe1dffc9fba09172dbcfd4a
/youtube_parser.py
2a9ae0a5a08ce1e77239d36f9b8adb55521b33c4
[ "MIT" ]
permissive
cborao/youtube-xml-parser
2d92e57d3d23339f9da74d90cfd7505dc75eacf3
0ed6377cf39ba59ec762589cb1f6399cb5786081
refs/heads/master
2023-06-05T10:10:45.294835
2021-06-22T16:55:12
2021-06-22T16:55:12
379,341,177
0
0
null
null
null
null
UTF-8
Python
false
false
2,324
py
#!/usr/bin/python3 # # Simple XML parser for YouTube XML channels # César Borao Moratinos # # Based on "ytparser.py" code: # # Jesus M. Gonzalez-Barahona # jgb @ gsyc.es # SARO and SAT subjects (Universidad Rey Juan Carlos) # 2020 # # The input is a valid channel ID. The parser produces a HTML document in standard output, with # the list of videos on the channel # from urllib.error import URLError from xml.sax.handler import ContentHandler from xml.sax import make_parser import sys import urllib.request videos = "" class YTHandler(ContentHandler): def __init__(self): self.inEntry = False self.inContent = False self.content = "" self.title = "" self.link = "" def startElement(self, name, attrs): if name == 'entry': self.inEntry = True elif self.inEntry: if name == 'title': self.inContent = True elif name == 'link': self.link = attrs.get('href') def endElement(self, name): global videos if name == 'entry': self.inEntry = False videos = videos \ + " <li><a href='" + self.link + "'>" \ + self.title + "</a></li>\n" elif self.inEntry: if name == 'title': self.title = self.content self.content = "" self.inContent = False def characters(self, chars): if self.inContent: self.content = self.content + chars # Loading parser and driver Parser = make_parser() Parser.setContentHandler(YTHandler()) # --- Main prog if __name__ == "__main__": PAGE = """ <!DOCTYPE html> <html lang="en"> <body> <h1>Youtube channel contents:</h1> <ul> {videos} </ul> </body> </html> """ if len(sys.argv) < 2: print("Usage: python youtube_parser.py <channel id>") print(" <channel id>: The unique ID of a youtube channel") sys.exit(1) # Reading the channel's xml file try: xmlFile = urllib.request.urlopen('https://www.youtube.com/feeds/videos.xml?channel_id=' + sys.argv[1]) Parser.parse(xmlFile) page = PAGE.format(videos=videos) print(page) except URLError: print("Introduce a valid channel Id")
[ "c.borao.2017@alumnos.urjc.es" ]
c.borao.2017@alumnos.urjc.es
7373cab884ab98deb78bcd0b60f131314c4adecb
42a5c898a3a750c54dc746429e306b9f40a8638e
/pizza/orders/admin.py
bd3ee529187b49a87581f033cfc17e3d0e95696a
[]
no_license
selbieh/Pizza
16f4198714b88ad93f354e6c0eb98d92a19e364b
c10bd78b1318d7e81128e66fa67d09241618e00d
refs/heads/master
2022-05-18T04:25:46.431748
2020-01-13T13:45:59
2020-01-13T13:45:59
233,557,658
0
0
null
2022-04-22T22:59:33
2020-01-13T09:24:18
Python
UTF-8
Python
false
false
120
py
from django.contrib import admin from .models import orderPizzaItem,order admin.site.register([orderPizzaItem,order])
[ "selbieh@gmail.com" ]
selbieh@gmail.com
f7c3fccd2351d12f60914ebd2d253e3434834656
48a29c558eba558cff4c40171d14ae92a29bccaa
/matrix/zero_matrix.py
d8982f5c0e6ab831d934c5118283c2e7cef71fb4
[]
no_license
gammaseeker/DSA_Python
ea0a3cb526d7f71136c9a6134be0947c9be65ab0
70633cb7b53dbe628e7edd0fb2b6973872f90e50
refs/heads/master
2023-07-07T02:25:50.548688
2021-08-10T20:00:56
2021-08-10T20:00:56
196,867,646
0
0
null
null
null
null
UTF-8
Python
false
false
1,237
py
def zero_matrix(matrix): # Check if top row has 0 row_zero = False for col in range(0, len(matrix[0])): if matrix[0][col] == 0: row_zero = True # Check if first col has 0 col_zero = False for row in range(0, len(matrix)): if matrix[row][0] == 0: col_zero = True # Look for zeros and mark them in first row,col if len(matrix) > 1 and len(matrix[0]) > 1: for row in range(1, len(matrix)): for col in range(1, len(matrix[0])): if matrix[row][col] == 0: matrix[0][col] = 0 matrix[row][0] = 0 # Insert the zeros if len(matrix) > 1 and len(matrix[0]) > 1: for row in range(1, len(matrix)): for col in range(1, len(matrix[0])): if matrix[0][col] == 0 or matrix[row][0] == 0: matrix[row][col] = 0 if row_zero: for col in range(0, len(matrix[0])): matrix[0][col] = 0 if col_zero: for row in range(0, len(matrix)): matrix[row][0] = 0 test1 = [[1, 1, 1], [1, 0, 1], [1, 1, 1]] test2 = [[0,1,2,0],[3,4,5,2],[1,3,1,5]] zero_matrix(test1) print(test1) zero_matrix(test2) print(test2)
[ "jjiemjitpolchai9540@bths.edu" ]
jjiemjitpolchai9540@bths.edu
40c19d84fb25f6fed0b4af8ac7f99c567eff0950
0d86664dd973242fdf895e515fe8df5847c03980
/analyze/extensions/com.castsoftware.html5.2.0.8-funcrel/js_file_filters.py
977bcb983f010c69e2264b2241d0ef05da06bb99
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-public-domain" ]
permissive
neel7h/engineering
2de1ed914be924aabf7d3133c28efd250fc08e13
4afd87d1700a34d662453860526aef5ba1201268
refs/heads/master
2022-02-18T06:32:43.532951
2019-10-03T08:41:39
2019-10-03T08:41:39
212,519,749
0
0
null
null
null
null
UTF-8
Python
false
false
9,701
py
''' Created on 26 nov. 2014 @author: iboillon ''' import os import json import re import cast.analysers from cast.application import open_source_file # @UnresolvedImport import traceback class FileFilter: def __init__(self): jsonPath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'filters.json')) self.filters = json.loads(open_source_file(jsonPath).read()) self.last_matches_result = None def get_last_result(self): return self.last_matches_result if self.last_matches_result else '' def matches(self, filename, css = False): self.last_matches_result = None fname = filename.replace(os.sep, '/') for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'FilePath' ]: pattern = _filter['value'].upper() if css and pattern.endswith(".JS"): pattern = pattern[0:-3] + '.CSS' if self.match_string(pattern, fname.upper()): self.last_matches_result = 'filepath matches pattern ' + pattern return True if filename.endswith('.cshtml.html'): # we skip .cshtml.html files because they are generated from .cshtml files cshtmlFilepath = filename[:-5] if os.path.isfile(cshtmlFilepath): self.last_matches_result = 'generated from .cshtml file' return True return False # matches a pattern token containing one or several stars with a string # A pattern token does not contain /. # Example: **/*toto*/** contains 3 pattern tokens: **, *toto* and ** def matches_token_with_star(self, patternToken, fnameToken): vals = patternToken.split('*') valsFound = [] oneValueNotFound = False l = len(vals) cmpt = 0 for val in vals: if val: if cmpt == 0: if not fnameToken.startswith(val): valsFound.append(False) oneValueNotFound = True else: valsFound.append(True) elif cmpt == l-1: if not fnameToken.endswith(val): valsFound.append(False) oneValueNotFound = True else: valsFound.append(True) else: if not val in fnameToken: valsFound.append(False) oneValueNotFound = True else: valsFound.append(True) else: valsFound.append(True) cmpt += 1 if not oneValueNotFound: # check that there are no / between matches i = 0 ok = True while i < l-1: middle = fnameToken[len(vals[i]):len(fnameToken)-len(vals[i+1])] if '/' in middle: ok = False i += 1 if ok: return True return False # matches a pattern corresponding to a file path with a string # Example: **/*toto*/** def match_string(self, pattern, fname): patternTokens = pattern.split('/') fnameTokens = fname.split('/') cmptFname = len(fnameTokens) - 1 doubleStarJustPassed = False for patternToken in reversed(patternTokens): if patternToken == '**': doubleStarJustPassed = True continue starPresent = False if '*' in patternToken: starPresent = True if doubleStarJustPassed: ok = False while cmptFname >= 0: fnameToken = fnameTokens[cmptFname] cmptFname -= 1 if not starPresent: if fnameToken == patternToken: ok = True break else: if self.matches_token_with_star(patternToken, fnameToken): ok = True break if not ok and cmptFname < 0: return False else: fnameToken = fnameTokens[cmptFname] if not starPresent: if not fnameToken == patternToken: return False else: if not self.matches_token_with_star(patternToken, fnameToken): return False cmptFname -= 1 doubleStarJustPassed = False if cmptFname >= 0 and patternTokens[0] != '**': return False return True class JSFileFilter(FileFilter): def __init__(self): FileFilter.__init__(self) def match_file(self, filename, bUTF8): nbLongLines = 0 maxLine = 0 nLine = 0 try: with open_source_file(filename) as f: for line in f: if nLine <= 15: for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'FileContent' ]: try: if re.search(_filter['value'], line): self.last_matches_result = 'pattern found in file : ' + _filter['value'] return True except: cast.analysers.log.debug('Internal issue when filtering file: ' + str(filename) + ' line ' + str(nLine)) cast.analysers.log.debug(str(traceback.format_exc())) nLine += 1 l = len(line) if l > 400: nbLongLines += 1 if l > maxLine: maxLine = l except: cast.analysers.log.debug('Internal issue when filtering file: ' + str(filename)) cast.analysers.log.debug(str(traceback.format_exc())) # we check is the file can be a minified file if nLine == 0 or nbLongLines / nLine > 0.5 or (nbLongLines / nLine > 0.2 and maxLine > 10000): self.last_matches_result = 'minified file' return True return False def matches(self, filename): if FileFilter.matches(self, filename): return True try: return self.match_file(filename, True) except UnicodeDecodeError: return self.match_file(filename, False) return False class CssFileFilter(FileFilter): def __init__(self): FileFilter.__init__(self) def match_file(self, filename, bUTF8): nLine = 0 try: with open_source_file(filename) as f: for line in f: if nLine <= 15: for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'CssFileContent' ]: try: if re.search(_filter['value'], line): self.last_matches_result = 'pattern found in file : ' + _filter['value'] return True except: pass else: break except: cast.analysers.log.debug('Internal issue when reading file: ' + str(filename)) cast.analysers.log.debug(str(traceback.format_exc())) return False def matches(self, filename): if FileFilter.matches(self, filename, True): return True try: return self.match_file(filename, True) except UnicodeDecodeError: return self.match_file(filename, False) return False class HtmlFileFilter(FileFilter): def __init__(self): FileFilter.__init__(self) def match_file(self, filename, bUTF8): nLine = 0 try: with open_source_file(filename) as f: for line in f: if nLine <= 15: for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'HtmlFileContent' ]: try: if re.search(_filter['value'], line): self.last_matches_result = 'pattern found in file : ' + _filter['value'] return True except: pass else: break except: cast.analysers.log.debug('Internal issue when reading file: ' + str(filename)) cast.analysers.log.debug(str(traceback.format_exc())) return False def matches(self, filename): if FileFilter.matches(self, filename): return True try: return self.match_file(filename, True) except UnicodeDecodeError: return self.match_file(filename, False) return False
[ "a.kumar3@castsoftware.com" ]
a.kumar3@castsoftware.com
f2ebf591f742eb1433a9072d3c9826170e1cb8cd
2f73a3d4daac2aa2c38c3443b4f5555c49faa1c8
/Data.py
d8e917bf4fa96358299cdd241123799362a03919
[]
no_license
18021009/project
656b6c8f9a0120c1185493d04405660895db93e9
0133f412e50e3dadd13bd0028832babf846070e5
refs/heads/main
2023-05-07T17:08:41.529766
2021-06-01T04:06:38
2021-06-01T04:06:38
372,696,937
0
0
null
null
null
null
UTF-8
Python
false
false
4,718
py
from math import nan from os import name from Station import station import numpy as np import datetime import pandas as pd from Map import map from Point import point # standardline date data.csv to college.csv # ds = pd.read_csv('data.csv') def changeToDate(output_file): ds = pd.read_csv('data.csv') day_delta = datetime.timedelta(days=1) start_date = datetime.date(2019, 1, 1) end_date = datetime.date(2020, 1, 1) for i in range((end_date - start_date).days): day = start_date + i*day_delta _day = day.strftime('X%m/X%d/%Y').replace('X0','X').replace('X','') ds['time'] = ds['time'].replace({_day: day}) ds.to_csv(output_file, index=False) def buffer_data(input_file, buffer): dataStation = pd.read_csv(input_file) dataStation['wind_speed'] = nan dataStation['temperature'] = nan dataStation['satellite_NO2'] = nan dataStation["road_density"] = nan dataStation["relative_humidity"] = nan dataStation["pressure"] = nan dataStation["population_density"] = nan dataStation["pblh"] = nan dataStation["NDVI"] = nan dataStation["dpt"] = nan dataStationArray = dataStation.values dataStation = pd.DataFrame(dataStationArray, columns=['time', 'lat', 'long', 'NO2', 'name', 'wind_speed' + str(buffer), 'temperature' + str(buffer), 'satellite_NO2' + str(buffer), 'road_density' + str(buffer), 'relative_humidity' + str(buffer), 'pressure' + str(buffer), 'population_density' + str(buffer), 'pblh' + str(buffer), 'NDVI' + str(buffer), 'dpt' + str(buffer)]) dataStation.to_csv(input_file, float_format='{:f}'.format, index=False) changeToDate('buffer_1_data.csv') buffer_data('buffer_1_data.csv', 1) changeToDate('buffer_2_data.csv') buffer_data('buffer_2_data.csv', 2) changeToDate('buffer_3_data.csv') buffer_data('buffer_3_data.csv', 3) # a = pd.read_csv("buffer_1_data.csv") # b = pd.read_csv("buffer_2_data.csv") # merged = a.merge(b, on=['time', 'lat', 'long', 'name'], how='inner') # merged.to_csv('merge.csv', index=False) # c = pd.read_csv("merge.csv") # d = pd.read_csv("buffer_3_data.csv") # merged = c.merge(d, on=['time', 'lat', 'long', 'name'], how='inner') # merged.to_csv('merge.csv', index=False) # buffer_radius # _buffer_radius = 1 # dataStation = pd.read_csv('college.csv') # dataStation['wind_speed'] = -999.0 # dataStation["road_dens"] = -999.0 # dataStation["pp_dens"] = -999.0 # dataStation["earth_no2"] = -999.0 # dataStationArray = dataStation.values # # add wind speed to dataStationArray # start_date = datetime.date(2019, 1, 1) # end_date = datetime.date(2020, 1, 1) # day_delta = datetime.timedelta(days=1) # for i in range((end_date - start_date).days): # fileName = "WSPDCombine_" # day = start_date + i*day_delta # file = "map/wind_speed/" + fileName + day.strftime('%Y%m%d') + ".tif" # _map = map() # _map.setMap(file) # for data in dataStationArray: # if((data[0] == day.strftime('%Y-%m-%d'))): # _point = point(data[2], data[1]) # _point.set_position_on_matrix(_map) # _station = station(_point, _buffer_radius) # _station.setBufferValue(_map) # data[5] = np.float64(_station.bufferValue) # # add road to college.csv # _map = map() # _map.setMap('map/road_density/road_dens.tif') # for data in dataStationArray: # _point = point(data[2], data[1]) # _point.set_position_on_matrix(_map) # _station = station(_point, _buffer_radius) # _station.setBufferValue(_map) # data[6] = _station.bufferValue # # add population_density # _map = map() # _map.setMap('map/population_density/ppd.tif') # for data in dataStationArray: # _point = point(data[2], data[1]) # _point.set_position_on_matrix(_map) # _station = station(_point, _buffer_radius) # _station.setBufferValue(_map) # data[7] = _station.bufferValue # # add earth_no2 # for i in range((end_date - start_date).days): # fileName = "NO2_" # day = start_date + i*day_delta # file = "map/NO2/" + fileName + day.strftime('%Y%m%d') + ".tif" # _map = map() # _map.setMap(file) # for data in dataStationArray: # if((data[0] == day.strftime('%Y-%m-%d'))): # _point = point(data[2], data[1]) # _point.set_position_on_matrix(_map) # _station = station(_point, _buffer_radius) # _station.setBufferValue(_map) # data[8] = _station.bufferValue # newDataStation = pd.DataFrame(dataStationArray, columns=['time', 'lat', 'long', 'NO2', 'name', 'wind_speed', 'road_dens', 'pp_dens', 'earth_no2']) # newDataStation.to_csv('college_2.csv', float_format='{:f}'.format, index=False)
[ "myEmail@example.com" ]
myEmail@example.com
a7ac6aca6ae6303875db1502f4c7a1f188290a7d
bead792530ab007addd60ce777e9ce19bc45cc74
/inception-google/utils.py
b797d03ecbf9e46c79fdd3249d8fbd5b928d25c1
[]
no_license
knowmefly/Youth-AI-SelfImprovement
aefb47bf13284509372cfd6c1ea14a81e2be21ce
bb15cdc07dc6c231b5d44acae088f98a44f97761
refs/heads/master
2020-04-25T04:26:20.997249
2019-03-06T20:33:08
2019-03-06T20:33:08
172,510,073
2
1
null
null
null
null
UTF-8
Python
false
false
1,371
py
# -*- coding: utf-8 -*- import tensorflow as tf slim = tf.contrib.slim # 定义默认的arg scope def inception_arg_scope(weight_decay=0.00004, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, activation_fn=tf.nn.relu, batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS): # 指定正则化函数的参数 batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'updates_collections': batch_norm_updates_collections, 'fused': None, } if use_batch_norm: normalizer_fn = slim.batch_norm normalizer_params = batch_norm_params else: normalizer_fn = None normalizer_params = {} # 为卷积层和全连接层的权重设置 weight_decay with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope( [slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=activation_fn, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) as sc: return sc
[ "knowmefly@qq.com" ]
knowmefly@qq.com
181a7dc33b61cdc418e9314d9e6ba8faa6a0d378
0d7d344edf0dc4b905b12a96a004a773191aa26f
/visas/admin.py
b00da55229665e711a24d095008554baee723958
[]
no_license
BoughezalaMohamedAimen/Amine
ae615ca64c5d0c8977e26aee2906e606439250d5
6060d48ab1308c217fe1bd8bd419369f83cb733a
refs/heads/master
2020-06-27T11:57:30.682966
2019-08-04T22:56:41
2019-08-04T22:56:41
199,948,247
0
0
null
null
null
null
UTF-8
Python
false
false
110
py
from django.contrib import admin from .models import * # Register your models here. admin.site.register(Visa)
[ "mamoumou121@gmail.com" ]
mamoumou121@gmail.com
649d3305c8a94ba9233b0341f2e5877d71f30475
79ea04b61afc43231dfdc76f290356af46598914
/FRW/manager.py
284daa87f29a5c755671f1ad31ca4ef1eac95ffb
[ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause" ]
permissive
PearseT/Maya_scripts
21b3abd78b7c132e5b28182f23d181050ec2b112
037fe39b1b4928dce6f967c710ecc0d1d087502d
refs/heads/master
2020-06-03T04:23:17.990529
2019-12-03T13:46:30
2019-12-03T13:46:30
191,436,434
0
1
null
null
null
null
UTF-8
Python
false
false
26,731
py
# TODO: # importDeformerWeights to perform auto-binding for some of the more common deformers like skinCluster, cluster, etc. # quadruped import sys, os, imp, inspect, shutil, glob, platform, __main__ from functools import partial import maya.cmds as mc THIS_DIR, THIS_FILE = os.path.split(__file__) sys.path.append(THIS_DIR) THIS_FILE_NAME = os.path.splitext(THIS_FILE)[0] def __initialize(): global STAGING_DIR, ASSET_TYPES, EDITOR, CACHE STAGING_DIR = ASSET_TYPES = EDITOR = None CACHE = {} LIB_CACHE = {} def main(force=False): if force: if mc.dockControl("dc_FRW", ex=True) == True: mc.deleteUI("dc_FRW") if mc.window("w_FRW", ex=True): mc.deleteUI("w_FRW") if not mc.window("w_FRW", ex=True): a = mc.window("w_FRW", t="the Fastest Rig in the West") tl = mc.tabLayout() tab1 = mc.paneLayout(cn="horizontal3", st=1, shp=1, ps=[(1,1,1),(2,1,99),(3,1,1)]) mc.columnLayout(adj=True) mc.rowLayout(nc=5, adj=4) mc.iconTextButton(st="iconOnly", i1="QR_add.png", ann="create new asset", c=__createAsset_ui) mc.iconTextButton(st="iconOnly", i1="QR_delete.png", ann="delete selected asset", c=__deleteAsset) mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update assets list", c=__update) mc.text(l="") mc.iconTextButton(st="iconOnly", i1="UVEditorSnapshot.png", ann="update icon", c=__icon) mc.setParent("..") mc.rowLayout(nc=3, adj=2) mc.textScrollList("tsl_type_FRW", w=100, h=200, sc=__updateNames) mc.textScrollList("tsl_name_FRW", w=170, h=200, sc=__updateIconAndPath) mc.image("img_FRW", w=200, h=200) mc.setParent("..") mc.rowLayout(nc=2, adj=1) mc.textField("tf_path_FRW", ed=False) mc.iconTextButton(st="iconOnly", i1="passSetRelationEditor.png", ann="edit", c=__edit) mc.setParent("..") mc.setParent("..") mc.scrollLayout("sl_inspector_FRW", bv=True) mc.setParent("..") mc.button("b_execute_FRW", l="execute", c=__execute) mc.setParent("..") tab2 = mc.scrollLayout(bv=True) mc.columnLayout("cl_library_FRW", adj=True, rs=5) mc.setParent("..") mc.setParent("..") tab3 = mc.scrollLayout(bv=True) mc.columnLayout("cl_extensions_FRW", adj=True, rs=5) mc.setParent("..") mc.setParent("..") mc.tabLayout(tl, e=True, tl=[(tab1, "builder"), (tab2, "library"), (tab3, "extensions")]) if not mc.dockControl("dc_FRW", ex=True): mc.dockControl("dc_FRW", l="the Fastest Rig in the West", con="w_FRW", aa=["left","right"], a="left", w=1) mc.dockControl("dc_FRW", e=True, fl=True) else: mc.dockControl("dc_FRW", e=True, vis=True) __initialize() __update() __library() __extensions() def __update(*arg): __config() si = None if mc.textScrollList("tsl_type_FRW", q=True, nsi=True): si = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0] mc.textScrollList("tsl_type_FRW", e=True, ra=True) if os.path.isdir(STAGING_DIR): for d in os.listdir(STAGING_DIR): mc.textScrollList("tsl_type_FRW", e=True, a=d) if d == si: mc.textScrollList("tsl_type_FRW", e=True, si=d) __updateNames() def __config(): if not os.path.isfile(THIS_DIR+"/"+THIS_FILE_NAME+".cfg"): return f = open(THIS_DIR+"/"+THIS_FILE_NAME+".cfg") l = f.readlines() f.close() for line in l: line = line.strip() if "=" not in line: continue line = line.split("=") if len(line) != 2: continue key = line[0].strip() if key == "STAGING_DIR": global STAGING_DIR STAGING_DIR = THIS_DIR+"/staging/" value = eval(line[1].strip()) if type(value) == str or type(value) == unicode: if value[-1] != "/": value += "/" STAGING_DIR = value elif key == "ASSET_TYPES": global ASSET_TYPES ASSET_TYPES = eval(line[1].strip()) elif key == "EDITOR": global EDITOR EDITOR = line[1].strip() def __updateNames(): si = None if mc.textScrollList("tsl_name_FRW", q=True, nsi=True): si = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0] mc.textScrollList("tsl_name_FRW", e=True, ra=True) if mc.textScrollList("tsl_type_FRW", q=True, nsi=True): t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0] if os.path.isdir(STAGING_DIR): for d in os.listdir(STAGING_DIR+"/"+t): mc.textScrollList("tsl_name_FRW", e=True, a=d) if d == si: mc.textScrollList("tsl_name_FRW", e=True, si=d) __updateIconAndPath() def __updateIconAndPath(): mc.textField("tf_path_FRW", e=True, tx="") mc.image("img_FRW", e=True, i=THIS_DIR+"/frw.png") if mc.textScrollList("tsl_type_FRW", q=True, nsi=True): t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0] if mc.textScrollList("tsl_name_FRW", q=True, nsi=True): n = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0] f = STAGING_DIR+"/"+t+"/"+n+"/"+n+".py" if os.path.isfile(f): mc.textField("tf_path_FRW", e=True, tx=f) f = f[:-3]+".png" if os.path.isfile(f): mc.image("img_FRW", e=True, i=f) __updateInspector() # Updates the inspector according to the contents (functions and signatures) of the template script. # Stores useful information in a global cache, accessible from everywhere in the code. def __updateInspector(): global CACHE CACHE = {"index":{}, "function":{}, "execute":{}} mc.button("b_execute_FRW", e=True, en=False) l = mc.scrollLayout("sl_inspector_FRW", q=True, ca=True) or [] if len(l): mc.deleteUI(l) if mc.textScrollList("tsl_type_FRW", q=True, nsi=True): t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0] if mc.textScrollList("tsl_name_FRW", q=True, nsi=True): CACHE["name"] = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0] CACHE["file"] = STAGING_DIR+t+"/"+CACHE["name"]+"/"+CACHE["name"]+".py" if os.path.isfile(CACHE["file"]): m = imp.load_source(CACHE["name"], CACHE["file"]) for n, o in inspect.getmembers(m, inspect.isfunction): CACHE["index"][o.__code__.co_firstlineno] = [n, inspect.getargspec(o)] ids = sorted(CACHE["index"].viewkeys()); c = len(ids) for i in range(c): if i == 0: mc.button("b_execute_FRW", e=True, en=True) fn = CACHE["index"][ids[i]][0] CACHE["function"][fn] = {"checkbox":None, "arguments":{}, "presets":{}} mc.rowLayout(nc=10, adj=2, p="sl_inspector_FRW") cb = mc.iconTextCheckBox(i="checkboxOff.png", si="checkboxOn.png", v=__loadStatePreset(fn), cc=partial(__saveStatePreset, ids[i])) CACHE["function"][fn]["checkbox"] = cb mc.text(l=CACHE["index"][ids[i]][0], w=250, al="left", fn="fixedWidthFont") ab = mc.iconTextButton(st="iconOnly", i1="fileOpen.png", ann="load preset", vis=False, c=partial(__loadAllArgPresets, ids[i])) eb = mc.iconTextButton(st="iconOnly", i1="fileSave.png", ann="save preset", vis=False, c=partial(__saveAllArgPresets, ids[i])) db = mc.iconTextButton(st="iconOnly", i1="QR_delete.png", ann="delete preset", vis=False, c=partial(__deleteAllArgPresets, ids[i])) rv = mc.iconTextButton(st="iconOnly", i1="RS_disable.png", ann="reset value", vis=False, c=partial(__resetAllArgValues, ids[i])) mc.text(l="", w=5) CACHE["function"][fn]["error"] = mc.image(i="RS_WarningOldCollection", vis=False) e = mc.iconTextButton(st="iconOnly", i1="timeplay.png", c=partial(__execute, ids[i])) CACHE["execute"][e] = CACHE["index"][ids[i]][0] mc.setParent("..") arg_nms = CACHE["index"][ids[i]][1][0]; c_nms = len(arg_nms) arg_val = CACHE["index"][ids[i]][1][3] or []; c_val = len(arg_val) offset = c_nms - c_val # arguments for j in range(offset): if j == 0: for s in [ab, eb, db, rv]: mc.iconTextButton(s, e=True, vis=True) tfg, img = __argumentWidget(j, ids[i], CACHE["index"][ids[i]][0], arg_nms[j], None) CACHE["function"][fn]["arguments"][arg_nms[j]] = tfg CACHE["function"][fn]["presets"][arg_nms[j]] = img # keyword arguments for j in range(c_val): if j == 0: for s in [ab, eb, db, rv]: mc.iconTextButton(s, e=True, vis=True) jj = j+offset tfg, img = __argumentWidget(jj, ids[i], CACHE["index"][ids[i]][0], arg_nms[jj], arg_val[j]) CACHE["function"][fn]["arguments"][arg_nms[jj]] = tfg CACHE["function"][fn]["presets"][arg_nms[jj]] = img if i < c-1: mc.separator(st="in", w=435, h=10, p="sl_inspector_FRW") # Load at once any available presets for the arguments of the inspected function. __loadArgPreset(ids[i], arg_nms) def __argumentWidget(i, idx, fn, arg_nam, arg_val, presets=True): mc.rowLayout(nc=2, adj=True) tfg = mc.textFieldGrp(l=arg_nam, tx=str(arg_val)) if presets: mc.popupMenu() mc.menuItem("load preset", i="folder-open.png", c=partial(__loadArgPreset, idx, [arg_nam])) mc.menuItem("save preset", i="UVTkSaveValue.png", c=partial(__saveArgPreset, idx, fn+"."+arg_nam)) mc.menuItem("delete preset", i="RS_delete.png", c=partial(__deleteArgPreset, idx, fn+"."+arg_nam)) mc.menuItem(d=True) mc.menuItem("reset value", i="RS_disable.png", c=partial(__resetArgValue, idx, arg_nam)) img = mc.image(i="Bookmark.png", vis=False) else: img = None mc.setParent("..") return tfg, img def __icon(*arg): if "file" not in CACHE.viewkeys(): return mc.select(cl=True) for e in mc.lsUI(ed=True): try: mc.viewFit(p=e) except: pass f = CACHE["file"][:-3]+".png" if os.path.isfile(f): os.remove(f) fmt = mc.getAttr("defaultRenderGlobals.imageFormat") mc.setAttr("defaultRenderGlobals.imageFormat", 32) i = mc.playblast(cf=f, fmt="image", cc=False, fr=1, v=False, orn=False, os=True, p=100, wh=[200,200], qlt=100) mc.setAttr("defaultRenderGlobals.imageFormat", fmt) mc.image("img_FRW", e=True, i=f) # edit build template def __edit(*arg): if "file" not in CACHE.viewkeys(): return if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return if platform.system() == "Windows": os.system("start "+EDITOR+" "+CACHE["file"]) else: os.system(EDITOR+" "+CACHE["file"]+"&") # edit library def __edit2(*arg): if not os.path.isfile(arg[0]): mc.confirmDialog(t=" ", m="File not found: "+arg[0], b="ok") return if platform.system() == "Windows": os.system("start "+EDITOR+" "+arg[0]) else: os.system(EDITOR+" "+arg[0]+"&") def __extensions(): l = mc.columnLayout("cl_extensions_FRW", q=True, ca=True) or [] if len(l) > 0: mc.deleteUI(l) mc.columnLayout(p="cl_extensions_FRW") mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update", c=__extensions) mc.setParent("..") __main__.FRW_DIR = THIS_DIR d = THIS_DIR+"/extensions/" if not os.path.isdir(d): return for f in glob.glob(d+"*.py"): try: n = os.path.splitext(os.path.split(f)[1])[0] m = imp.load_source(n, f) fl = mc.frameLayout(l=n, bv=True, cll=True, mw=5, mh=5, p="cl_extensions_FRW") m.main() mc.setParent("..") mc.frameLayout(fl, e=True, cl=False) mc.frameLayout(fl, e=True, cl=True) except Exception as e: print("Extension: "+f) print(" Error: "+str(e)) def __library(): l = mc.columnLayout("cl_library_FRW", q=True, ca=True) or [] if len(l) > 0: mc.deleteUI(l) mc.columnLayout(p="cl_library_FRW") mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update", c=__library) mc.setParent("..") if not os.path.isdir(THIS_DIR): return global LIB_CACHE LIB_CACHE = {} for f in glob.glob(THIS_DIR+"/*.py"): f = f.replace("\\", "/") n = os.path.splitext(os.path.split(f)[1])[0] try: m = imp.load_source(n, f) except Exception as e: print("Library: "+f) print(" Error: "+str(e)) continue fl = mc.frameLayout(l=n, bv=True, cll=True, cl=True, mw=15, mh=15, p="cl_library_FRW") mc.rowLayout(nc=2, adj=1) mc.textField(tx=f, ed=False) mc.iconTextButton(st="iconOnly", i1="passSetRelationEditor.png", ann="edit", c=partial(__edit2, f)) mc.setParent("..") mc.separator(st="in", w=420, h=10) LIB_CACHE[f] = {} for n, o in inspect.getmembers(m, inspect.isfunction): LIB_CACHE[f][o.__code__.co_firstlineno] = [n, inspect.getargspec(o)] ids = sorted(LIB_CACHE[f].viewkeys()); c = len(ids) for i in range(c): fn = LIB_CACHE[f][ids[i]][0] arg_nms = LIB_CACHE[f][ids[i]][1][0]; c_nms = len(arg_nms) arg_val = LIB_CACHE[f][ids[i]][1][3] or []; c_val = len(arg_val) mc.frameLayout(l=fn, bv=True, cll=True, cl=True, mw=5, mh=5, fn="smallPlainLabelFont") mc.rowLayout(nc=2, adj=1) mc.text(l="")#fn, al="left", fn="fixedWidthFont") e = mc.iconTextButton(st="iconOnly", i1="timeplay.png", c=partial(__execute2, f, ids[i])) mc.setParent("..") if c_nms > 0: LIB_CACHE[f][ids[i]].append({}) offset = c_nms - c_val # arguments for j in range(offset): LIB_CACHE[f][ids[i]][2][arg_nms[j]] = __argumentWidget(j, ids[i], LIB_CACHE[f][ids[i]][1][0], arg_nms[j], None, presets=False)[0] # keyword arguments for j in range(c_val): jj = j+offset LIB_CACHE[f][ids[i]][2][arg_nms[jj]] = __argumentWidget(jj, ids[i], LIB_CACHE[f][ids[i]][1][0], arg_nms[jj], arg_val[j], presets=False)[0] # if i < c-1: mc.separator(st="in", h=10) mc.setParent("..") mc.frameLayout(fl, e=True, cl=False) mc.frameLayout(fl, e=True, cl=True) # # argument presets # def __loadAllArgPresets(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return f = CACHE["file"][:-3]+".pre" if os.path.isfile(f): fn = CACHE["index"][arg[0]][0] f = open(f); lines = f.readlines(); f.close() for line in lines: line = line.strip() if "=" not in line: continue l = line.split("=") if not "." in l[0]: continue fn2, arg2 = l[0].strip().split(".") if fn != fn2: continue for arg in CACHE["function"][fn]["arguments"].viewkeys(): if arg != arg2: continue tfg = CACHE["function"][fn]["arguments"][arg] mc.textFieldGrp(tfg, e=True, tx=l[1].strip()) img = CACHE["function"][fn]["presets"][arg] mc.image(img, e=True, vis=True) def __saveAllArgPresets(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return fn = CACHE["index"][arg[0]][0] filepath = CACHE["file"][:-3]+".pre" if os.path.isfile(filepath): f = open(filepath); l = f.readlines(); f.close() for arg in CACHE["function"][fn]["arguments"].viewkeys(): add = False for i in range(len(l)): s = l[i].strip() if "=" not in s: continue l2 = s.split("=") if "." not in l2[0]: continue if fn+"."+arg != l2[0].strip(): continue add = True tfg = CACHE["function"][fn]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) l[i] = fn+"."+arg+" = "+str(val)+"\n" img = CACHE["function"][fn]["presets"][arg] mc.image(img, e=True, vis=True) break if not add: tfg = CACHE["function"][fn]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) l.append(fn+"."+arg+" = "+str(val)+"\n") img = CACHE["function"][fn]["presets"][arg] mc.image(img, e=True, vis=True) else: l = [] for arg in CACHE["function"][fn]["arguments"].viewkeys(): tfg = CACHE["function"][fn]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) l.append(fn+"."+arg+" = "+str(val)+"\n") img = CACHE["function"][fn]["presets"][arg] mc.image(img, e=True, vis=True) f = open(filepath, "w"); f.writelines(l); f.close() def __deleteAllArgPresets(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return idx = arg[0] fn = CACHE["index"][idx][0] filepath = CACHE["file"][:-3]+".pre" if os.path.isfile(filepath): f = open(filepath); l = f.readlines(); f.close() for arg in CACHE["function"][fn]["arguments"].viewkeys(): for i in range(len(l)): s = l[i].strip() if "=" not in s: continue l2 = s.split("=") if "." not in l2[0]: continue if fn+"."+arg != l2[0].strip(): continue __resetArgValue(idx, arg) l.pop(i) img = CACHE["function"][fn]["presets"][arg] mc.image(img, e=True, vis=False) break f = open(filepath, "w"); f.writelines(l); f.close() def __resetAllArgValues(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return for arg2 in CACHE["function"][CACHE["index"][arg[0]][0]]["arguments"].viewkeys(): __resetArgValue(arg[0], arg2) def __loadStatePreset(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return True f = CACHE["file"][:-3]+".pre" if os.path.isfile(f): f = open(f); lines = f.readlines(); f.close() for line in lines: line = line.strip() if "=" not in line: continue l = line.split("=") if "." in l[0]: continue if arg[0] != l[0].strip(): continue return eval(l[1]) return True def __saveStatePreset(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return cb = CACHE["function"][CACHE["index"][arg[0]][0]]["checkbox"] val = str(mc.iconTextCheckBox(cb, q=True, v=True)) fn = CACHE["index"][arg[0]][0] filepath = CACHE["file"][:-3]+".pre" if os.path.isfile(filepath): add = False f = open(filepath); l = f.readlines(); f.close() for i in range(len(l)): s = l[i].strip() if "=" not in s: continue l2 = s.split("=") if "." in l2[0]: continue if fn != l2[0].strip(): continue add = True l[i] = fn+" = "+val+"\n" break if not add: l.append(fn+" = "+val+"\n") f = open(filepath, "w"); f.writelines(l); f.close() else: s = fn+" = "+val+"\n" f = open(filepath, "w"); f.write(s); f.close() def __loadArgPreset(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return f = CACHE["file"][:-3]+".pre" if not os.path.isfile(f): lines = [] else: f = open(f); lines = f.readlines(); f.close() idx = arg[0] fn = CACHE["index"][idx][0] args = arg[1] for arg in args: img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg] mc.image(img, e=True, vis=False) for line in lines: line = line.strip() if "=" not in line: continue l = line.split("=") if "." not in l[0]: continue fn2, arg2 = l[0].strip().split(".") if fn != fn2 or arg != arg2: continue tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg] mc.textFieldGrp(tfg, e=True, tx=("=".join(s for s in l[1:])).strip()) mc.image(img, e=True, vis=True) def __saveArgPreset(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return idx = arg[0] fn, arg = arg[1].split(".") filepath = CACHE["file"][:-3]+".pre" if os.path.isfile(filepath): add = False f = open(filepath); l = f.readlines(); f.close() for i in range(len(l)): s = l[i].strip() if "=" not in s: continue l2 = s.split("=") if "." not in l2[0]: continue if fn+"."+arg != l2[0].strip(): continue add = True tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) l[i] = fn+"."+arg+" = "+str(val)+"\n" break if not add: tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) l.append(fn+"."+arg+" = "+str(val)+"\n") f = open(filepath, "w"); f.writelines(l); f.close() else: tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg] val = mc.textFieldGrp(tfg, q=True, tx=True) s = fn+"."+arg+" = "+str(val)+"\n" f = open(filepath, "w"); f.write(s); f.close() img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg] mc.image(img, e=True, vis=True) def __deleteArgPreset(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return idx = arg[0] fn, arg = arg[1].split(".") filepath = CACHE["file"][:-3]+".pre" if os.path.isfile(filepath): f = open(filepath); l = f.readlines(); f.close() for i in range(len(l)): s = l[i].strip() if "=" not in s: continue l2 = s.split("=") if "." not in l2[0]: continue if fn+"."+arg == l2[0].strip(): l.pop(i) f = open(filepath, "w"); f.writelines(l); f.close() __resetArgValue(idx, arg) break img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg] mc.image(img, e=True, vis=False) def __resetArgValue(*arg): nms = CACHE["index"][arg[0]][1][0]; c_nms = len(nms) val = CACHE["index"][arg[0]][1][3] or []; c_val = len(val) offset = c_nms - c_val for i in range(c_nms): if arg[1] == nms[i]: break tfg = CACHE["function"][CACHE["index"][arg[0]][0]]["arguments"][arg[1]] if c_nms != c_val: if i < offset: mc.textFieldGrp(tfg, e=True, tx="None") else: mc.textFieldGrp(tfg, e=True, tx=str(val[i-offset])) else: mc.textFieldGrp(tfg, e=True, tx=str(val[i])) # # execute code from inspector # def __execute(*arg): if not os.path.isfile(CACHE["file"]): mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok") return cmd = CACHE["name"]+'=imp.load_source("'+CACHE["name"]+'", "'+CACHE["file"]+'")' print("import imp\n"+cmd); exec(cmd) for idx in sorted(CACHE["index"].viewkeys()): fn = CACHE["index"][idx][0] mc.image(CACHE["function"][fn]["error"], e=True, vis=False) if type(arg[0]) != int: if not mc.iconTextCheckBox(CACHE["function"][fn]["checkbox"], q=True, v=True): continue elif idx != arg[0]: continue cmd = CACHE["name"]+"."+fn+"("+__arguments(idx)+")" print(cmd) try: exec(cmd) except Exception as e: mc.image(CACHE["function"][fn]["error"], e=True, vis=True) raise Exception(e) if type(arg[0]) == bool: __icon() def __arguments(idx): arg = "" nms = CACHE["index"][idx][1][0]; cnt_nms = len(nms) val = CACHE["index"][idx][1][3] or []; cnt_val = len(val) off = cnt_nms-cnt_val for i in range(off): tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][nms[i]] val = mc.textFieldGrp(tfg, q=True, tx=True) try: val = eval(val) except: val = '"'+val+'"' arg += str(val) if cnt_nms != cnt_val: arg += ", " for i in range(cnt_val): tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][nms[i+off]] val = mc.textFieldGrp(tfg, q=True, tx=True) try: val = eval(val) except: val = '"'+val+'"' arg += nms[i+off]+"="+str(val) if i < cnt_val-1: arg += ", " return arg # # execute code from library # def __execute2(*arg): if not os.path.isfile(arg[0]): mc.confirmDialog(t=" ", m="File not found: "+arg[0], b="ok") return n = os.path.split(os.path.splitext(arg[0])[0])[1] cmd = n+'=imp.load_source("'+n+'", "'+arg[0]+'")' print("import imp\n"+cmd); exec(cmd) cmd = n+"."+LIB_CACHE[arg[0]][arg[1]][0]+"("+__arguments2(arg[0], arg[1])+")" print(cmd); exec(cmd) def __arguments2(f, idx): arg = "" nms = LIB_CACHE[f][idx][1][0]; cnt_nms = len(nms) val = LIB_CACHE[f][idx][1][3] or []; cnt_val = len(val) off = cnt_nms-cnt_val for i in range(off): tfg = LIB_CACHE[f][idx][2][nms[i]] val = mc.textFieldGrp(tfg, q=True, tx=True) try: val = eval(val) except: val = '"'+val+'"' arg += str(val) if cnt_nms != cnt_val: arg += ", " for i in range(cnt_val): tfg = LIB_CACHE[f][idx][2][nms[i+off]] val = mc.textFieldGrp(tfg, q=True, tx=True) try: val = eval(val) except: val = '"'+val+'"' arg += nms[i+off]+"="+str(val) if i < cnt_val-1: arg += ", " return arg # # create/delete assets # def __createAsset_ui(*arg): mc.layoutDialog(ui=__createAsset_dlg, t="create new asset") def __createAsset_dlg(): mc.columnLayout(adj=True) mc.rowLayout(nc=2, adj=2) mc.text(l="rig type", al="right", w=80) mc.optionMenu("om_rigType_FRW") for f in glob.glob(THIS_DIR+"/*.ma"): mc.menuItem(l=os.path.splitext(os.path.split(f)[1])[0]) mc.setParent("..") mc.rowLayout(nc=2, adj=2) mc.text(l="asset type", al="right", w=80) mc.optionMenu("om_assetType_FRW") for t in ASSET_TYPES: mc.menuItem(l=t) mc.setParent("..") mc.rowLayout(nc=2, adj=2) mc.text(l="asset name", al="right", w=80) mc.textField("tf_assetName_FRW") mc.setParent("..") mc.text(l="") mc.rowColumnLayout(nc=2, cw=[(1,148),(2,148)]) mc.button(l="create", c=__createAsset_stage) mc.button(l="cancel", c=__createAsset_cancel) mc.setParent("..") mc.setParent("..") def __createAsset_cancel(*arg): mc.layoutDialog(dis="cancel") def __createAsset_stage(*arg): n = mc.textField("tf_assetName_FRW", q=True, tx=True).strip() if not n: mc.confirmDialog(t=" ", m="Incorrect asset name.", b="ok") return rt = mc.optionMenu("om_rigType_FRW", q=True, v=True) at = mc.optionMenu("om_assetType_FRW", q=True, v=True) f = STAGING_DIR+at+"/"+n+"/"+n+".py" if os.path.isfile(f): result = mc.confirmDialog(t="overwrite existing asset", m="Asset with this name already exists. Do you want to overwrite it ?", b=["yes","no"], cb="no", ds="no", db="no") if result == "no": return createAsset(rt, at, n) mc.layoutDialog(dis="cancel") __update() mc.textScrollList("tsl_type_FRW", e=True, si=at) __updateNames() mc.textScrollList("tsl_name_FRW", e=True, si=n) __update() def createAsset(rigType, assetType, assetName): directory = STAGING_DIR+assetType+"/"+assetName+"/" try: os.makedirs(directory) except: pass if not os.path.isdir(directory): raise Exception("Cannot create directory: "+directory) filepath = directory+assetName+".py" try: shutil.copy(THIS_DIR+"/template.py", filepath) except: raise Exception("Cannot create file: "+filepath) try: os.makedirs(directory+"/weights") except: pass try: os.remove(filepath[:-3]+".pre") except: pass if not os.path.isfile(THIS_DIR+"/"+rigType+".py"): rigType = "generic" for f in glob.glob(THIS_DIR+"/"+rigType+".*"): ext = os.path.splitext(f)[1] if ext == ".py" or ext == ".pyc": continue shutil.copy(f, directory+assetName+ext) m = imp.load_source(rigType, THIS_DIR+"/"+rigType+".py") for n, o in inspect.getmembers(m, inspect.isfunction): if n == "main": args = inspect.getargspec(o) args1 = ", ".join((a+"="+str(b), a+'="'+str(b)+'"')[type(b) == str] for a,b in zip(args[0],args[3])) args2 = ", ".join(a+"="+a for a in args[0]) f = open(filepath); s = f.read(); f.close() s = s.replace("FRW_DIR", THIS_DIR).replace("FRW_RIG", rigType) s = s.replace("FRW_ARG2", args2).replace("FRW_ARG", args1) f = open(filepath, "w"); f.write(s); f.close() break print("Result: "+filepath) return filepath def __deleteAsset(*arg): if "file" not in CACHE.viewkeys(): return d = os.path.split(CACHE["file"])[0] if not os.path.isdir(d): mc.confirmDialog(t=" ", m="Invalid asset.", b="ok") return result = mc.confirmDialog(t="delete asset", m="Do you want to delete the selected asset ?", b=["yes","no"], cb="no", ds="no", db="no") if result == "no": return try: shutil.rmtree(d) except: raise Exception("Cannot delete directory: "+d) __update()
[ "pearsetoomey@gmail.com" ]
pearsetoomey@gmail.com
2d3d1b442af9336be133c309201d7efd2fff5c15
19692e21e740eca07b493cf4ebf22ad833ce827d
/lawsite_nogit/lawsite/wsgi.py
11149264db289cadfe32f7a73806afab1794e05b
[]
no_license
reedharder/bending_the_law
6033082d78175285983e98dc8cda0c9da72b97b2
bd85f6a3f91c3f9bb28da87177a5578a7fffb9c6
refs/heads/master
2020-04-09T11:55:49.036953
2016-08-05T15:58:47
2016-08-05T15:58:47
40,094,227
0
0
null
null
null
null
UTF-8
Python
false
false
565
py
""" WSGI config for lawsite project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lawsite.settings") application = get_wsgi_application() ''' from whitenoise.django import DjangoWhiteNoise from dj_static import Cling application = Cling(get_wsgi_application()) application = DjangoWhiteNoise(application) '''
[ "reedharder@gmail.com" ]
reedharder@gmail.com
1ce65bae1f1abca5f6f1b6dcf3dd5b53a58ec9b5
a87ed28a5217101f57f387c8003ed73e4bb873d3
/cracking-the-code-interview/queue.py
deb62c3fb3c1dda57e89705ad3572c070f678842
[]
no_license
snahor/chicharron
82f65836258462a900f2dba6b4192a436e16e7d0
710e7114d8768965cd50556cbbeeed0e3604cf92
refs/heads/master
2021-01-24T14:11:32.235253
2017-07-19T06:06:42
2017-07-19T06:06:42
41,190,005
0
0
null
null
null
null
UTF-8
Python
false
false
1,003
py
from linked_list import Node class Queue: ''' >>> q = Queue() >>> q.enqueue(1) >>> q.enqueue(2) >>> q.enqueue(3) >>> q.dequeue() 1 >>> q.dequeue() 2 >>> q.enqueue(4) >>> q.enqueue(5) >>> q.dequeue() 3 >>> q.dequeue() 4 >>> q.dequeue() 5 >>> q.dequeue() ''' def __init__(self): self.head = None self.last = None def enqueue(self, value): node = Node(value) if not self.head: self.head = node self.last = node else: self.last.next = node self.last = node def dequeue(self): if not self.head: return None node = self.head self.head = node.next if self.last == node: self.last = node.next return node.value def is_empty(self): return self.head is None if __name__ == '__main__': import doctest doctest.testmod(optionflags=doctest.ELLIPSIS)
[ "hans.r.69@gmail.com" ]
hans.r.69@gmail.com
236f08e901aa2811beb0f5bc228a88b8b65cf996
caf1d3bd64bbece382fcad9c38da28f8bfd7b6ea
/rules.py
7788b1ed5ac7c146bbc9bc09e96d0aaab5aa965a
[]
no_license
PavelPylypenko/kz_tagging
bf5dc192f7a3d552d9edda97ec141050204e33df
a057d3e8c26ba914bf59bc7063519e4be4090f28
refs/heads/master
2022-11-25T16:23:01.810210
2020-08-06T09:33:06
2020-08-06T09:33:06
285,529,922
0
0
null
null
null
null
UTF-8
Python
false
false
11,701
py
NNATENDS = ['шык', 'шы', 'пыр', 'мпыр', 'алар', 'ашыщ', 'лар', 'елер', 'ды', 'рдан', 'рлан', 'рсақ', 'қтар', 'ылар', 'ылык', 'нші', 'лік', 'сшы', 'пша', 'хана', 'ашы', 'ші', 'паз', 'лық', 'йлар', 'қсы', 'ылық', 'ндық', 'ім', 'ар', 'ас', 'кер', 'уші', 'шілер', 'рік', 'ктер', 'қша', 'пан', 'лшы', 'дыр', 'тыр', 'рған', 'қай', 'алар', 'ылар', 'ңғы', 'ылар', 'ырақ', 'тік', 'ңдар', 'лын', 'ншақ', 'най', 'қтар', 'гер', 'рлер', 'ылар', 'ңіз', 'зші', 'шлер', 'гер', 'рлер', 'пкер', 'рлер', 'лігі', 'тур', 'турлер', 'ші', 'ілер', 'ншық', 'ын', 'шілік', 'ылық', 'дар', 'лық', 'ылар', 'шы', 'тар', 'гер', 'герлер', 'лер', 'ханалар', 'ілеп', 'паз', 'ік', 'іктер', 'керткіш', 'ту', 'ірткі', 'еп', 'ептер', 'сіз', 'уас', 'керу', 'ім', 'імде', 'башы', 'елер', 'пенділер', 'бек', 'кқор', 'шіл', 'ктер', 'ағасы', 'сы', 'лар', 'улар', 'тау'] NNILENDS = ['мның', 'енің', 'рдың', 'дың'] NNBAENDS = ['да', 'те', 'та', 'нда', 'нде', 'ға', 'ге', 'қа', 'ке', 'на', 'не', 'а', 'е', 'тік', 'еге ырға', 'рға', 'йға', 'ыға', 'аға', 'шаға', 'сіз', 'мға', 'ға'] NNTAENDS = ['мды', 'ені', 'рды', 'ырды', 'тты', 'ңды', 'керту', 'қы'] NNDJAENDS = ['да', 'зға', 'рда', 'еде'] NNSHIENDS = ['дан', 'ден', 'тан', 'тен', 'нан', 'нен', 'здан', 'зда', 'еден', 'рдан'] NNKOENDS = ['бен', 'здар', 'ммен', 'емен', 'рдан', 'мен', 'лармен', 'пен', 'нен', 'рмен', 'тпен', 'ңге', 'менен', 'ммен', 'мен', 'тармен', 'ілермен', 'герлермен', 'басшылық', 'іпқону', 'пенен'] SUB_ONE_SUF = ('тар', 'тер', 'дар', 'дер', 'лар', 'лер') SUB_PLURAL_SUFFS = ('ның', 'нің', 'дың', 'дін', 'тың', 'тің', 'ға', 'ге', 'қа', 'ке', 'а', 'е', 'на', 'не', 'ны', 'н', 'ні', 'ды', 'ді', 'ты', 'ті', 'да', 'де', 'нда', 'нде', 'та', 'те', 'дан', ' ден', ' тан', ' тен', ' нан', ' нен', 'мен', ' менен', ' бен', ' бенен', ' пен', ' пенен') SUB_SUFFIXES = ('ғай', 'гей', 'гер', 'ғи', 'ғой', 'дас', 'дес', 'дік', 'дық', 'кер', 'кес', 'қай', 'қар', 'қи', 'қой', 'қор', 'лас', 'лес', 'ліқ', 'лық', 'ман', 'паз', 'пана', 'сақ', 'тас', 'тес', 'тік', 'тық', 'хана', 'ша', 'шақ', 'ше', 'шек', 'ші', 'шік', 'шы', 'шық', 'ақ', 'ба', 'бе', 'ғақ', 'ғаш', 'гек', 'гі', 'ғіш', 'ғы', 'ғыш', 'дақ', 'дек', 'ек', 'ік', 'ім', 'іс', 'іш', 'к', 'кі', 'кіш', 'қ', 'қаш', 'қы', 'қыш', 'лақ', 'лек', 'м', 'ма', 'мақ', 'ме', 'мек', 'па', 'пақ', 'пе', 'пек', 'с', 'тақ', 'тек', 'уік', 'уық', 'ш', 'ық', 'ым', 'ыс', 'ыш', 'герлік', 'гіштік', 'ғыштық', 'дастық', 'дестік', 'ділік', 'дылық', 'кеәтік', 'қорлық', 'ластық', 'лестік', 'лілік', 'лылық', 'паздық', 'сақтық', 'сіздік', 'сыздық', 'тастық', 'тестік', 'тілік', 'тылық', 'шақтық', 'шілдік', 'шілік', 'шылдық', 'шылық', 'жан', 'ке', 'қан', 'сымақ', 'тай', 'ш', 'ша', 'шақ', 'ше', 'шік', 'шық') SUB_ONE_l1 = ['а', 'у', 'н'] SUB_ONE_l2 = ['ға', 'ге', 'қа', 'ке', 'на', 'не', 'ны', 'ні', 'ды', 'ді', 'ты', 'ті', 'да', 'де', 'та', 'те', ] SUB_ONE_l3 = ['ның', 'нің', 'дың', 'дін', 'тың', 'тің', 'нда', 'нде', 'дан', 'ден', 'тан', 'тен', 'нан', 'нен', 'мен', 'бен', 'пен'] SUB_ONE_l5 = ['менен', 'бенен', 'пенен'] OBJ_SUFFIXES = ('ға', 'ге', 'қа', 'ке', 'а', 'е', 'на', 'не', 'ны', 'н', 'ні', 'ды', 'ді', 'ты', 'ті') OBJ_ENDS = ('тар', 'тер', 'дар', 'дер', 'лар', 'лер') PRED_A_STARTS = ('тұр', 'отыр', 'жатыр', 'жүр') PRED_A_SUFFIXES = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты') PRED_A_ENDS = ('п', 'ып', 'іп') PRED_B_SUFFIXES = ('ap', 'ер', 'ыр', 'ір', 'а', 'е', 'й', 'и') PRED_B_SUFFIX_ENDS = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты') PRED_C_SUFFIXES = ('ap', 'ep', 'ыр', 'ір') PRED_C_POSS_SUFFIXES = ('', 'біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты') PRED_C_ADD = ('еді', 'е') PRED_D_ADD = ('еді', 'екен') PRED_D_POSS_SUFFIXES = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты') PRED_D_SUFFIXES = ('ді', 'дік', 'діқ', 'дім', 'дің', 'ды', 'дык', 'дық', 'дым', 'дың', 'қ', 'ті', 'тік', 'тім', 'тің', 'ты', 'тык', 'тық', 'тым', 'тың', 'а', 'ай', 'ал', 'ан', 'ар', 'арыс', 'ға', 'ғал', 'ғар', 'ғе', 'ге', 'гер', 'гі', 'гіз', 'гіздір', 'гіле', 'гір', 'гіт', 'ғы', 'ғыз', 'ғыздыр', 'ғызыл', 'ғыла', 'ғыр', 'ғыт', 'да', 'дан', 'дар', 'дас', 'дастыр', 'де', 'ден', 'дендір', 'дес', 'діг', 'дік', 'дір', 'діргіз', 'дық', 'дыр', 'дырғыз', 'дырыл', 'е', 'ей', 'ел', 'ен', 'ер', 'й', 'іг', 'іғ', 'ік', 'ікіс', 'іл', 'іла', 'ілде', 'ілу', 'імсіре', 'ін', 'індір', 'ініс', 'іну', 'іңкіре', 'ір', 'ірде', 'іре', 'ірей', 'іріс', 'ірке', 'іркен', 'ірқе', 'іс', 'ісу', 'іт', 'ке', 'кер', 'кіз', 'кіле', 'кір', 'қа', 'қал', 'қан', 'қар', 'қе', 'қур', 'қыз', 'қыла', 'қыла', 'қыр', 'л', 'ла', 'лан', 'ландыр', 'лас', 'ластыр', 'лат', 'ле', 'лен', 'лендір', 'лес', 'лестір', 'лет', 'ліг', 'лік', 'лікіс', 'лін', 'ліс', 'лқа', 'лу', 'лығ', 'лық', 'лын', 'лыс', 'мала', 'меле', 'мсіре', 'мсыра', 'н', 'ні', 'ніл', 'ніс', 'ныл', 'ныс', 'ңгіре', 'ңғыра', 'ңкіре', 'ңқыра', 'ңра', 'ңре', 'р', 'ра', 'ре', 'с', 'са', 'сан', 'се', 'сен', 'сет', 'сетіл', 'сі', 'сін', 'сіре', 'стір', 'стыр', 'сы', 'сын', 'сыра', 'т', 'та', 'тан', 'тандыр', 'тас', 'те', 'тен', 'тендір', 'тес', 'тік', 'ттыр', 'тығ', 'тығс', 'тығыс', 'тық', 'тыр', 'тырыл', 'ура', 'ші', 'шы', 'ығ', 'ығыс', 'ық', 'ықыс', 'ыл', 'ыла', 'ылда', 'ылу', 'ылыс', 'ымсыра', 'ын', 'ындыр', 'ыну', 'ыныс', 'ыр', 'ыра', 'ырай', 'ырқа', 'ырқан', 'ырла', 'ыс', 'ысу', 'ыт', 'азы', 'ақта', 'ал', 'ала', 'аңғыра', 'аура', 'бала', 'бе', 'беле', 'би', 'бі', 'бы', 'дала', 'ди', 'ді', 'ды', 'екте', 'ел', 'еңгіре', 'еуре', 'жи', 'жіре', 'жыра', 'зы', 'і', 'ін', 'ірей', 'іс', 'іт', 'қи', 'лі', 'лы', 'ма', 'мала', 'меле', 'ми', 'мсіре', 'мсыра', 'ңра', 'ңре', 'палапеле', 'пи', 'пі', 'пы', 'ра', 'ре', 'си', 'сіре', 'сый', 'сыра', 'т', 'ти', 'ті', 'ты', 'усіре', 'усыра', 'ши', 'ші', 'шы', 'ы', 'ын', 'ыра', 'ырай', 'ыс', 'ыт') PRED_SUFFIXES = ('ді', 'дік', 'діқ', 'дім', 'дің', 'ды', 'дык', 'дық', 'дым', 'дың', 'қ', 'ті', 'тік', 'тім', 'тің', 'ты', 'тык', 'тық', 'тым', 'тың', 'а', 'ай', 'ал', 'ан', 'ар', 'арыс', 'ға', 'ғал', 'ғар', 'ғе', 'ге', 'гер', 'гі', 'гіз', 'гіздір', 'гіле', 'гір', 'гіт', 'ғы', 'ғыз', 'ғыздыр', 'ғызыл', 'ғыла', 'ғыр', 'ғыт', 'да', 'дан', 'дар', 'дас', 'дастыр', 'де', 'ден', 'дендір', 'дес', 'діг', 'дік', 'дір', 'діргіз', 'дық', 'дыр', 'дырғыз', 'дырыл', 'е', 'ей', 'ел', 'ен', 'ер', 'й', 'іг', 'іғ', 'ік', 'ікіс', 'іл', 'іла', 'ілде', 'ілу', 'імсіре', 'ін', 'індір', 'ініс', 'іну', 'іңкіре', 'ір', 'ірде', 'іре', 'ірей', 'іріс', 'ірке', 'іркен', 'ірқе', 'іс', 'ісу', 'іт', 'ке', 'кер', 'кіз', 'кіле', 'кір', 'қа', 'қал', 'қан', 'қар', 'қе', 'қур', 'қыз', 'қыла', 'қыла', 'қыр', 'л', 'ла', 'лан', 'ландыр', 'лас', 'ластыр', 'лат', 'ле', 'лен', 'лендір', 'лес', 'лестір', 'лет', 'ліг', 'лік', 'лікіс', 'лін', 'ліс', 'лқа', 'лу', 'лығ', 'лық', 'лын', 'лыс', 'мала', 'меле', 'мсіре', 'мсыра', 'н', 'ні', 'ніл', 'ніс', 'ныл', 'ныс', 'ңгіре', 'ңғыра', 'ңкіре', 'ңқыра', 'ңра', 'ңре', 'р', 'ра', 'ре', 'с', 'са', 'сан', 'се', 'сен', 'сет', 'сетіл', 'сі', 'сін', 'сіре', 'стір', 'стыр', 'сы', 'сын', 'сыра', 'т', 'та', 'тан', 'тандыр', 'тас', 'те', 'тен', 'тендір', 'тес', 'тік', 'ттыр', 'тығ', 'тығс', 'тығыс', 'тық', 'тыр', 'тырыл', 'ура', 'ші', 'шы', 'ығ', 'ығыс', 'ық', 'ықыс', 'ыл', 'ыла', 'ылда', 'ылу', 'ылыс', 'ымсыра', 'ын', 'ындыр', 'ыну', 'ыныс', 'ыр', 'ыра', 'ырай', 'ырқа', 'ырқан', 'ырла', 'ыс', 'ысу', 'ыт', 'азы', 'ақта', 'ал', 'ала', 'аңғыра', 'аура', 'бала', 'бе', 'беле', 'би', 'бі', 'бы', 'дала', 'ди', 'ді', 'ды', 'екте', 'ел', 'еңгіре', 'еуре', 'жи', 'жіре', 'жыра', 'зы', 'і', 'ін', 'ірей', 'іс', 'іт', 'қи', 'лі', 'лы', 'ма', 'мала', 'меле', 'ми', 'мсіре', 'мсыра', 'ңра', 'ңре', 'палапеле', 'пи', 'пі', 'пы', 'ра', 'ре', 'си', 'сіре', 'сый', 'сыра', 'т', 'ти', 'ті', 'ты', 'усіре', 'усыра', 'ши', 'ші', 'шы', 'ы', 'ын', 'ыра', 'ырай', 'ыс', 'ыт', 'ған', 'ген', 'қан', 'кен', 'қон', 'ға', 'ге', 'қа', 'ке', 'атын', 'етін', 'йтын', 'йтін')
[ "pavlo.pylypenko@anvileight.com" ]
pavlo.pylypenko@anvileight.com
31377b78b9aa2c2a50c21500d418eb84e8d65b07
ae5bc58aea259f9e633398b99e9705c89a0cea3d
/tasks/viewpoint_select/utils_data.py
15883db86b9b6068ef4ef746b53f5f631cafb115
[ "MIT-0" ]
permissive
ayshrv/visitron
3bacefd4cf62c66864cfcdba4e24af7a576590dd
2f30e6c002ed021d2be209a94a5e77c2d7e2117f
refs/heads/main
2023-06-03T17:47:06.905510
2021-06-30T22:18:55
2021-06-30T22:18:55
302,179,557
1
0
NOASSERTION
2021-06-30T22:59:18
2020-10-07T22:56:49
Python
UTF-8
Python
false
false
20,796
py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 import base64 import csv import json import logging import math import os import pickle import re import sys import time from itertools import chain import lmdb import networkx as nx import numpy as np from tqdm import tqdm csv.field_size_limit(sys.maxsize) logger = logging.getLogger(__name__) def load_nav_graphs(scans): """ Load connectivity graph for each scan """ def distance(pose1, pose2): """ Euclidean distance between two graph poses """ return ( (pose1["pose"][3] - pose2["pose"][3]) ** 2 + (pose1["pose"][7] - pose2["pose"][7]) ** 2 + (pose1["pose"][11] - pose2["pose"][11]) ** 2 ) ** 0.5 graphs = {} for scan in scans: with open("connectivity/%s_connectivity.json" % scan) as f: G = nx.Graph() positions = {} data = json.load(f) for i, item in enumerate(data): if item["included"]: for j, conn in enumerate(item["unobstructed"]): if conn and data[j]["included"]: positions[item["image_id"]] = np.array( [item["pose"][3], item["pose"][7], item["pose"][11]] ) assert data[j]["unobstructed"][ i ], "Graph should be undirected" G.add_edge( item["image_id"], data[j]["image_id"], weight=distance(item, data[j]), ) nx.set_node_attributes(G, values=positions, name="position") graphs[scan] = G return graphs def get_data_root(dataset_type="NDH"): if dataset_type == "NDH": data_root = "srv/task_data/NDH/data/" elif dataset_type == "CVDN": data_root = "srv/task_data/CVDN/data/" elif dataset_type == "R2R": data_root = "srv/task_data/R2R/data/R2R_" elif dataset_type == "R4R": data_root = "srv/task_data/R4R/data/R4R_" elif dataset_type == "RxR": data_root = "srv/task_data/RxR/data" elif dataset_type == "PretrainNDH": data_root = "srv/task_data/pretrain_data/NDH_" elif dataset_type == "PretrainR2R": data_root = "srv/task_data/pretrain_data/R2R_" elif dataset_type == "PretrainR4R": data_root = "srv/task_data/pretrain_data/R4R_" elif dataset_type == "PretrainRxR": data_root = "srv/task_data/pretrain_data/RxR_" else: raise NotImplementedError return data_root def load_datasets(splits, dataset_type="NDH"): data = [] data_root = get_data_root(dataset_type) if dataset_type == "RxR": import jsonlines assert splits == ["train"] with jsonlines.open(f"{data_root}/rxr_train_guide.jsonl") as f: for line in f.iter(): data.append(line) return data for split in splits: assert split in ["train", "val_seen", "val_unseen", "test"] with open(data_root + "%s.json" % split) as f: data += json.load(f) return data def load_classifier_data(splits): data = [] raw_data = [] data_root = get_data_root("CVDN") for split in splits: assert split in ["train", "val_seen", "val_unseen", "test"] data_path = data_root + "%s.json" % split with open(data_path) as f: items = json.load(f) raw_data.extend(items) for item in raw_data: item["inst_idx"] = str(item["idx"]) item["planner_path"] = item["planner_nav_steps"] item["player_path"] = item["nav_steps"] item["nav_history"] = item["player_path"] heading, elevation = 2.0, 17.5 if "nav_camera" in item and len(item["nav_camera"]) > 0: nav_camera = item["nav_camera"][0] if "message" in nav_camera: heading = nav_camera["message"][-1]["heading"] elevation = nav_camera["message"][-1]["elevation"] item["start_pano"] = { "heading": heading, "elevation": elevation, "pano": item["planner_nav_steps"][0], } dialog = {0: []} last_timestep = 0 for index, turn in enumerate(item["dialog_history"]): if index % 2 == 0: assert turn["role"] == "navigator" timestep = turn["nav_idx"] message = turn["message"] dialog_history = dialog[last_timestep] dialog_history.append(message) dialog[timestep] = dialog_history last_timestep = timestep else: if timestep != turn["nav_idx"]: logger.info( "Timestep for oracle and navigator mismatch, correcting it. " f"Timestep: {timestep} turn['nav_idx']: {turn['nav_idx']}" ) assert turn["role"] == "oracle" message = turn["message"] dialog_history = dialog[timestep] dialog_history.append(message) dialog[timestep] = dialog_history item["dialog_history"] = dialog item["request_locations"] = list(dialog.keys()) data.append(item) return data def load_gameplay_data(splits): data = [] data_root = get_data_root("CVDN") for split in splits: assert split in ["train", "val_seen", "val_unseen", "test"] logger.info("Using CVDN for " + split + "!\n\n\n") data_source = data_root + split + ".json" with open(data_source) as f: items = json.load(f) new_items = [] for item in items: item["inst_idx"] = item["idx"] item["planner_path"] = item["planner_nav_steps"] item["player_path"] = item["nav_steps"] item["nav_history"] = item["player_path"] heading, elevation = 2.0, 17.5 if "nav_camera" in item and len(item["nav_camera"]) > 0: nav_camera = item["nav_camera"][0] if "message" in nav_camera: heading = nav_camera["message"][-1]["heading"] elevation = nav_camera["message"][-1]["elevation"] item["start_pano"] = { "heading": heading, "elevation": elevation, "pano": item["planner_nav_steps"][0], } nav_ins, ora_ins, request_locations, nav_seen, ora_seen, nav_idx = ( [], [], {}, [], [], 0, ) for index, turn in enumerate(item["dialog_history"]): if turn["role"] == "navigator": nav_ins.append(turn["message"]) if len(ora_seen) > 0: request_locations[nav_idx] = [ " ".join(nav_seen), " ".join(ora_seen), index, ] ora_seen = [] nav_seen = [] nav_seen.append(turn["message"]) else: ora_ins.append(turn["message"]) if len(nav_seen) > 0: nav_idx = int(turn["nav_idx"]) ora_seen.append(turn["message"]) if len(ora_seen) > 0: request_locations[nav_idx] = [ nav_seen[-1], ora_seen[-1], len(item["dialog_history"]), ] # [' '.join(nav_seen), ' '.join(ora_seen), len(item['dialog_history'])] item["nav_instructions"] = " ".join(nav_ins) item["ora_instructions"] = " ".join(ora_ins) if ( len(item["nav_instructions"]) == 0 or len(item["ora_instructions"]) == 0 ): continue item["request_locations"] = request_locations item["inst_idx"] = str(item["inst_idx"]) assert len(item["player_path"]) > 1, item["player_path"] new_items.append(item) data += new_items return data def save_preprocessed_data(data, splits, version, dataset_type="NDH"): data_root = get_data_root(dataset_type) combined_split = "_".join(splits) path = f"{data_root}{combined_split}_preprocessed_{version}.pickle" logger.info(f"Saving preprocessed data to {path}") with open(path, "wb") as handle: pickle.dump(data, handle, protocol=-1) def check_and_load_preprocessed_data(splits, version, dataset_type="NDH"): if dataset_type == "NDH": data_root = "srv/task_data/NDH/data/" elif dataset_type == "R2R": data_root = "srv/task_data/R2R/data/R2R_" elif dataset_type == "R4R": data_root = "srv/task_data/R4R/data/R4R_" elif dataset_type == "RxR": data_root = "srv/task_data/RxR/data/RxR_" elif dataset_type == "PretrainNDH": data_root = "srv/task_data/pretrain_data/NDH_" elif dataset_type == "PretrainR2R": data_root = "srv/task_data/pretrain_data/R2R_" elif dataset_type == "PretrainR4R": data_root = "srv/task_data/pretrain_data/R4R_" elif dataset_type == "PretrainRxR": data_root = "srv/task_data/pretrain_data/RxR_" else: raise NotImplementedError combined_split = "_".join(splits) path = f"{data_root}{combined_split}_preprocessed_{version}.pickle" if os.path.exists(path) and os.path.isfile(path): logger.info(f"Loading preprocessed data from {path}") t_s = time.time() with open(path, "rb") as handle: data = pickle.load(handle) t_e = time.time() logger.info( "Loaded Image Features from {} in time: {:0.2f} mins".format( path, (t_e - t_s) / 60.0 ) ) return data return False def truncate_dialogs(sentences, amount, left=True): """ Truncate `dialogs` at a token-level TO the specified `amount` FROM the direction specified by `left` Consider length of each dialog to be len(dialog) + 1 as `[QUES]` or `[ANS]` tag needs to be counted as well. """ if amount is None: return sentences if (len(list(chain(*sentences))) + len(sentences)) <= amount: return sentences if left: reversed_sentences = sentences[::-1] reversed_truncated_sentences = [] amount_appended = 0 for turn in reversed_sentences: if amount_appended < amount: remaining_amount = amount - amount_appended if (len(turn) + 1) <= remaining_amount: reversed_truncated_sentences.append(turn) amount_appended += len(turn) + 1 else: reversed_truncated_sentences.append(turn[-remaining_amount + 1 :]) amount_appended += len(turn[-remaining_amount + 1 :]) + 1 break # can break out of the loop at this point truncated_sentences = reversed_truncated_sentences[::-1] return truncated_sentences else: truncated_sentences = [] amount_appended = 0 for turn in sentences: if amount_appended < amount: remaining_amount = amount - amount_appended if (len(turn) + 1) <= remaining_amount: truncated_sentences.append(turn) amount_appended += len(turn) + 1 else: truncated_sentences.append(turn[: remaining_amount - 1]) amount_appended += len(turn[: remaining_amount - 1]) + 1 break # can break out of the loop at this point return truncated_sentences def read_tsv_img_features(path=None, feature_size=2048, blind=False): if path: logger.info("Loading image features from %s" % path) if blind: logger.info("... and zeroing them out for 'blind' evaluation") tsv_fieldnames = [ "scanId", "viewpointId", "image_w", "image_h", "vfov", "features", ] features = {} with open(path, "rt") as tsv_in_file: reader = csv.DictReader( tsv_in_file, delimiter="\t", fieldnames=tsv_fieldnames ) for item in reader: image_h = int(item["image_h"]) image_w = int(item["image_w"]) vfov = int(item["vfov"]) long_id = item["scanId"] + "_" + item["viewpointId"] if not blind: features[long_id] = np.frombuffer( base64.b64decode(item["features"]), dtype=np.float32 ).reshape((36, feature_size)) else: features[long_id] = np.zeros((36, feature_size), dtype=np.float32) else: logger.info("Image features not provided") features = None image_w = 640 image_h = 480 vfov = 60 dictionary = { "features": features, "image_w": image_w, "image_h": image_h, "vfov": vfov, } return dictionary def timeSince(since, percent): now = time.time() s = now - since es = s / percent rs = es - s return "%s (- %s)" % (asMinutes(s), asMinutes(rs)) def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return "%dm %ds" % (m, s) def load_detector_classes(UPDOWN_DATA="srv/detector_classes_attributes"): classes = ["__background__"] with open(os.path.join(UPDOWN_DATA, "objects_vocab.txt")) as f: for object in f.readlines(): classes.append(object.split(",")[0].lower().strip()) return classes class FeaturesReader: def __init__(self, path, use_lmdb=True, in_memory=False): self.use_lmdb = use_lmdb if not self.use_lmdb: ( self.keys, self.features, self.region_tokens, self.image_w, self.image_h, self.vfov, ) = self.load_features_from_pickle(path) else: img_feature_path = path + ".lmdb" logger.info(f"Loading lmdb features from {img_feature_path}") # open database self.env = lmdb.open( img_feature_path, readonly=True, readahead=False, max_readers=1, lock=False, ) # get keys with self.env.begin(write=False) as txn: self.keys = pickle.loads(txn.get("keys".encode())) key = self.keys[0] with self.env.begin(write=False) as txn: item = pickle.loads(txn.get(key)) self.image_w = item["image_w"] self.image_h = item["image_h"] self.vfov = item["vfov"] region_labels_path = path + "-region_labels.pickle" with open(region_labels_path, "rb") as handle: self.region_tokens = pickle.load(handle) logger.info(f"Loaded region labels from {region_labels_path}") # get viewpoints self.viewpoints = {} for key in self.keys: scan_id, viewpoint_id, feature_view_index = key.decode().split("_") if scan_id not in self.viewpoints: self.viewpoints[scan_id] = set() self.viewpoints[scan_id].add(viewpoint_id) def load_features_from_pickle(self, path): t_s = time.time() img_feature_path = path + ".pickle" logger.info(f"Loading Image Features from {img_feature_path}") with open(img_feature_path, "rb") as f: loaded_feature_data = pickle.load(f) image_w = loaded_feature_data[0]["image_w"] image_h = loaded_feature_data[0]["image_h"] vfov = loaded_feature_data[0]["vfov"] keys = [] features = {} region_tokens = {} for item in loaded_feature_data: long_id = ( f"{item['scanId']}_{item['viewpointId']}_{item['featureViewIndex']}" ).encode() features[long_id] = item["features"] region_tokens[long_id] = item["region_tokens"] keys.append(long_id) t_e = time.time() logger.info( "Loaded Image Features from {} in time: {:0.2f} mins".format( img_feature_path, (t_e - t_s) / 60.0 ) ) return keys, features, region_tokens, image_w, image_h, vfov def __len__(self): return len(self.keys) def __getitem__(self, key): if key not in self.keys: raise TypeError(f"invalid key: {key}") if self.use_lmdb: # load from disk with self.env.begin(write=False) as txn: item = pickle.loads(txn.get(key)) return item["features"] else: return self.features[key] def get_region_tokens(self, key): if key not in self.keys: raise TypeError(f"invalid key: {key}") return self.region_tokens[key] def get_encoding_for_oscar(tokenizer, obs): truncate_dialog = True use_oscar_settings = True TAR_BACK = False pad_token_id = 0 cls_token_segment_id = 0 pad_token_segment_id = 0 sep_token_segment_id = 0 tar_token_segment_id = 1 ques_token_segment_id = 2 ans_token_segment_id = 3 MAX_SEQ_LENGTH = 512 MAX_DIALOG_LEN = 512 - 4 # including [QUES]s and [ANS]s MAX_TARGET_LENGTH = 4 - 2 # [CLS], [TAR], [SEP] after QA and before Action # # TOTAL 768 new_obs = [] for item in obs: instruction = item["instructions"] target = instruction.split("<TAR>")[1] rest = instruction.split("<TAR>")[0] dialog_history = re.split("<NAV>|<ORA>", rest) dialog_history = [item for item in dialog_history if item != ""] token_target = tokenizer.tokenize(target) token_target = token_target[:MAX_TARGET_LENGTH] token_dialog_history = [] for turn in dialog_history: token_turn = tokenizer.tokenize(turn) token_dialog_history.append(token_turn) if truncate_dialog: # max_seq_length - 4 as accounting for [CLS], [TAR], Target, [SEP] token_dialog_history = truncate_dialogs( token_dialog_history, amount=MAX_DIALOG_LEN, left=True ) tokens = [tokenizer.cls_token] segment_ids = [cls_token_segment_id] if not TAR_BACK: if use_oscar_settings: sep_token = tokenizer.sep_token else: sep_token = tokenizer.tar_token tokens += [sep_token] + token_target segment_ids += [tar_token_segment_id] * (len(token_target) + 1) for i, turn in enumerate(token_dialog_history): if use_oscar_settings: sep_token = tokenizer.sep_token segment_id = sep_token_segment_id else: if i % 2 == 0: sep_token = tokenizer.ques_token segment_id = ques_token_segment_id else: sep_token = tokenizer.ans_token segment_id = ans_token_segment_id tokens += [sep_token] + turn segment_ids += [segment_id] * (len(turn) + 1) if TAR_BACK: if use_oscar_settings: sep_token = tokenizer.sep_token else: sep_token = tokenizer.tar_token tokens += [sep_token] + token_target segment_ids += [tar_token_segment_id] * (len(token_target) + 1) tokens += [tokenizer.sep_token] segment_ids += [sep_token_segment_id] tokens += [pad_token_id] * (MAX_SEQ_LENGTH - len(tokens) - 1) segment_ids += [pad_token_segment_id] * (MAX_SEQ_LENGTH - len(segment_ids) - 1) token_ids = tokenizer.convert_tokens_to_ids(tokens) new_obs.append({"instr_encoding": token_ids, "segment_ids": segment_ids}) # "tokens": tokens return new_obs
[ "shrivastava.ayush1996@gmail.com" ]
shrivastava.ayush1996@gmail.com
3a2925faeb0eaad7e3a73932dba72170f81fdccb
26629871a6c7eaa82dcf1d7f1adf8cae2ab24991
/DressitUp/Home/views.py
fccfbe31e43ed74692670d210e631723d6a742cb
[]
no_license
RonakNandanwar26/DressitUp
2421fb62ad5e47be36f66dc3920cafe49ee43eb9
4e7ac01a9411ad2b767efb2a80ad5dc6344449ab
refs/heads/master
2022-11-30T23:14:38.989536
2020-07-11T12:17:08
2020-07-11T12:17:08
278,849,777
0
0
null
2022-11-18T10:56:10
2020-07-11T11:39:51
Python
UTF-8
Python
false
false
2,563
py
from django.shortcuts import render, redirect, get_object_or_404 from .forms import ContactForm, ProfileForm, UserForm from django.contrib import messages from django.core.mail import send_mail from DressitUp import settings from products.forms import ProductForm # Create your views here. def home(request): template = 'Home/index.html' return render(request, template, {}) def list(request): template = 'Home/list.html' return render(request, template, {}) def about(request): template = 'Home/about.html' return render(request, template, {}) def shop(request): template = 'Home/shop.html' return render(request, template, {}) def contact(request): if request.method == "POST": form = ContactForm(request.POST or None) if form.is_valid(): contact_name = form.cleaned_data['name'] contact_email = form.cleaned_data['email'] sub = form.cleaned_data['subject'] content = form.cleaned_data['message'] print(contact_name) form.save() subject = 'Hello ' + contact_name + ' from DressitUp!' message = 'Stay Connected. We would love to hear you!' email_from = settings.EMAIL_HOST_USER email_to = [contact_email, ] send_mail(subject, message, email_from, email_to) messages.success(request, 'Form submitted successfully.') return redirect('Home:home') else: messages.error(request, 'Please correct the error below.') else: form = ContactForm() template = 'Home/contact.html' return render(request, template, {'form': form}) def profile(request): template = 'Home/profile.html' if request.method == 'POST': user_form = UserForm(request.POST or None, request.FILES or None, instance=request.user) profile_form = ProfileForm(request.POST or None, request.FILES or None, instance=request.user.profile) if user_form.is_valid() and profile_form.is_valid(): user_form.save() profile_form.save() messages.success(request, "Your Profile is Updated Successfully..") return redirect('Home:home') else: messages.error(request, 'Please Correct the error below') else: user_form = UserForm(instance=request.user) profile_form = ProfileForm(instance=request.user.profile) return render(request, template, {'user_form': user_form, 'profile_form': profile_form})
[ "ronaknandanwar1999@gmail.com" ]
ronaknandanwar1999@gmail.com
30295c60432b3dc86a5982db72a44530415d66b1
893577de9978f7868e7a3608ab697a320adf55f1
/python/Day1/problem1_3.py
9c71c686b36cf77b1e2c9ff80693415d699a73b8
[]
no_license
zealfory/xiyu-NLPTrainee
0d8c6ab80cfc7b3a00e886f340f34e5ed4650fc2
3e63bad5d53b478563003d0c78fa1cab63fcefb4
refs/heads/master
2020-06-13T15:24:30.589485
2019-08-26T08:15:22
2019-08-26T08:15:22
194,693,706
1
0
null
null
null
null
UTF-8
Python
false
false
1,079
py
def longestValidParentheses(s): """ :para s: str -- 字符串 :return: int -- 最长有效括号串长度 """ s_length = len(s) stack = [] start = 0 maxlen = 0 for i in range(s_length): # 左括号入栈 if s[i] == '(': stack.append(i) # 右括号 else: # 栈空则更改起始点 if len(stack) == 0: start = i + 1 continue # 栈非空则出栈 else: a = stack.pop() # 更新最大长度值 if len(stack) == 0: maxlen = max(i - start + 1, maxlen) else: maxlen = max(i-stack[-1], maxlen) return maxlen # test def main(): print(longestValidParentheses("(()")) print(longestValidParentheses(")()())")) if __name__ == "__main__": main()
[ "noreply@github.com" ]
zealfory.noreply@github.com
46cda83c4132a39c6286332ab4240e378fc2e4e7
e4ab9d29abcadd76e4f540d3ea5487aff4259004
/lab_7.1.py
0255a13ca5b7f91bd5fb38e43f948cf43ecf9a42
[]
no_license
swyatik/python-KPI
83332ed2fa3a49acd6c521416a08c005f4be78d2
10adac7d76790256ebe72339455a0a081433d4f6
refs/heads/master
2020-06-04T22:27:10.463697
2019-06-16T16:52:02
2019-06-16T16:52:02
192,215,205
0
0
null
null
null
null
UTF-8
Python
false
false
1,796
py
class Sphere(object): def __init__(self, radius = 1.0, x = 0.0, y = 0.0, z = 0.0): self.radius = float(radius) self.x = float(x) self.y = float(y) self.z = float(z) def get_volume(self): v = 4 / 3 * 3.1415926535897932384626433 * self.radius ** 3 return v def get_square(self): s = 4 * 3.1415926535897932384626433 * (self.radius ** 2) return s def get_radius(self): return self.radius def get_center(self): return (self.x, self.y, self.z,) def set_radius(self, r): self.r = float(r) self.radius = r def set_center(self, x_new, y_new, z_new): self.x = float(x_new) self.y = float(y_new) self.z = float(z_new) def is_point_inside(self, x_1, y_1, z_1): self.x_1 = x_1 self.y_1 = y_1 self.z_1 = z_1 rn = ((self.x_1 - self.x) ** 2 + (self.y_1 - self.y) ** 2 + (self.z_1 - self.z) ** 2) ** 0.5 if rn > self.radius: return False else: return True s1 = Sphere() print(s1.radius, s1.x, s1.y, s1.z) print('V1 =', s1.get_volume()) print('S1 =', s1.get_square()) print('R =', s1.get_radius()) print('coordinates = ', s1.get_center()) s1.set_radius(5) print('R= %s' % (s1.get_radius())) s1.set_center(1025, 1026, 1027) print('coordinates=', s1.get_center()) print(s1.is_point_inside(1000, 1000, 1000), '\n') s0 = Sphere(0.5) # test sphere creation with radius and default center print(s0.get_center()) # (0.0, 0.0, 0.0) print(s0.get_volume()) # 0.523598775598 print(s0.is_point_inside(0, -1.5, 0)) # False s0.set_radius(1.6) print(s0.is_point_inside(0, -1.5, 0)) # True print(s0.get_radius()) # 1.6
[ "noreply@github.com" ]
swyatik.noreply@github.com
c47eb54349cc1aaf6624d4dd8dda17bbcb9f3a10
e1c4b32f23d8622be21db1445c9877f0de1680f1
/backend/app/controllers/home.py
c6b483330ecfcd0750b79fd1d46b35e43bca8be4
[]
no_license
AngCosmin/api-flask
8d212f0393b9a7590eeafd1b704f1a2b51bfe0a3
7c09d78cda9160b60a162ac15761ad5817c17917
refs/heads/master
2022-12-15T04:36:43.692837
2019-04-05T20:30:15
2019-04-05T20:30:15
179,749,615
0
0
null
2022-09-16T17:58:57
2019-04-05T20:24:59
Python
UTF-8
Python
false
false
133
py
from flask import Blueprint blueprint = Blueprint('home', __name__) @blueprint.route('/') def index(): return 'Hello World'
[ "cosminzorr@gmail.com" ]
cosminzorr@gmail.com
3b5eb65cc24ada0602641c43bd8365025a109f61
43bd7dce16d5dd856d9755ee44b89316ab4dcfbd
/BakeryManagement/asgi.py
7aa89e87c3a4ee6687a9cd753db29c46f5d449c9
[]
no_license
rishabh-22/BakeryManagement
86bc0e478ed954c46e734afc0ee2f9261d46b2a7
0b75306f1db2f42e047d1e65a1baeaf62c29919e
refs/heads/master
2023-03-26T01:24:18.087439
2021-03-11T19:43:35
2021-03-11T19:43:35
344,091,099
1
0
null
null
null
null
UTF-8
Python
false
false
409
py
""" ASGI config for BakeryManagement project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BakeryManagement.settings') application = get_asgi_application()
[ "rishabh.bh22@gmail.com" ]
rishabh.bh22@gmail.com
b749d4bcecade6a4c865a8f3a69ebd30954dfe41
f09dc121f213f2881df3572288b7ee5b39246d73
/aliyun-python-sdk-config/aliyunsdkconfig/request/v20190108/GetSupportedResourceTypesRequest.py
8fb02d120fe982b0df0cc395179ce63061909e27
[ "Apache-2.0" ]
permissive
hetw/aliyun-openapi-python-sdk
2f31378ad6be0896fb8090423f607e9c7d3ae774
7443eacee9fbbaa93c7975c6dbec92d3c364c577
refs/heads/master
2023-01-19T22:42:36.214770
2020-12-04T10:55:14
2020-12-04T10:55:14
318,689,093
1
0
NOASSERTION
2020-12-05T03:03:03
2020-12-05T03:03:03
null
UTF-8
Python
false
false
1,296
py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkconfig.endpoint import endpoint_data class GetSupportedResourceTypesRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Config', '2019-01-08', 'GetSupportedResourceTypes','Config') self.set_method('GET') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
[ "sdk-team@alibabacloud.com" ]
sdk-team@alibabacloud.com
a6b9a81031ca5ebba259e3bfd9015c0ce85b1d1f
3e0abf5d310edec9ac8cd939b83518d5f1cb753c
/feature-a.py
e0ef5294caaa47e7af55eaf6dd68035d8175d3a2
[]
no_license
anushkhasingh30/git-1
ebc13f9974bee04650e7a6aa0e8313d1ebe5eaac
4516ce4a2ac811246c50a7b8012ff4a028959695
refs/heads/master
2023-06-25T00:04:15.593702
2021-07-27T10:38:00
2021-07-27T10:38:00
389,939,561
0
0
null
null
null
null
UTF-8
Python
false
false
19
py
print('feature a ')
[ "anushkhasingh30@gmail.com" ]
anushkhasingh30@gmail.com
5b82ca7833330ee0646d306a6cef65cb5c33df37
762742b3c5cb5706e93e12dbdc3f8c46fc65f0db
/Packs/GreyNoise/Integrations/GreyNoise/GreyNoise_test.py
bc42620c600a89f5bf9c62f42f621c88f3b2320f
[ "MIT" ]
permissive
EmersonElectricCo/content
018f95f7fe7de13819e093a3661587a18407e348
82c82bbee7d428f0b14991a88c67672e2c02f5af
refs/heads/master
2021-06-17T04:54:22.938033
2021-05-06T16:39:59
2021-05-06T16:39:59
161,693,191
2
0
MIT
2018-12-18T15:16:49
2018-12-13T20:47:26
Python
UTF-8
Python
false
false
8,763
py
import pytest import json import GreyNoise from test_data.input_data import ( # type: ignore parse_code_and_body_data, get_ip_reputation_score_data, test_module_data, ip_reputation_command_data, ip_quick_check_command_data, generate_advanced_query_data, query_command_data, get_ip_context_data_data, stats_command_data, riot_command_response_data ) class DummyResponse: """ Dummy Response object of requests.response for unit testing. """ def __init__(self, headers, text, status_code): self.headers = headers self.text = text self.status_code = status_code def json(self): """ Dummy json method. """ return json.loads(self.text) @pytest.mark.parametrize("input_data, expected_output", parse_code_and_body_data) def test_parse_code_and_body(input_data, expected_output): """ Tests various combinations of error codes and messages. """ response = GreyNoise.parse_code_and_body(input_data) assert response == expected_output @pytest.mark.parametrize("input_data, expected_output", get_ip_reputation_score_data) def test_get_ip_reputation_score(input_data, expected_output): """ Tests various combinations of GreyNoise classification data. """ response = GreyNoise.get_ip_reputation_score(input_data) assert response == expected_output @pytest.mark.parametrize("api_key, api_response, status_code, expected_output", test_module_data) def test_test_module(api_key, api_response, status_code, expected_output, mocker): """ Tests test_module for GreyNoise integration. """ client = GreyNoise.Client(api_key, "dummy_server", 10, "proxy", False, "dummy_integration") if isinstance(api_key, str) and api_key == "true_key": mocker.patch('greynoise.GreyNoise._request', return_value=api_response) response = GreyNoise.test_module(client) assert response == expected_output else: dummy_response = DummyResponse({}, api_response, status_code) mocker.patch('requests.Session.get', return_value=dummy_response) with pytest.raises(Exception) as err: _ = GreyNoise.test_module(client) assert str(err.value) == expected_output @pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_reputation_command_data) def test_ip_reputation_command(args, test_scenario, api_response, status_code, expected_output, mocker): """ Tests various combinations of vald and invalid responses for IPReputation command. """ client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration") dummy_response = DummyResponse( { "Content-Type": "application/json" }, json.dumps(api_response), status_code ) if test_scenario == "positive": mocker.patch('requests.Session.get', return_value=dummy_response) response = GreyNoise.ip_reputation_command(client, args) assert response[0].outputs == expected_output else: mocker.patch('requests.Session.get', return_value=dummy_response) with pytest.raises(Exception) as err: _ = GreyNoise.ip_reputation_command(client, args) assert str(err.value) == expected_output @pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_quick_check_command_data) def test_ip_quick_check_command(args, test_scenario, api_response, status_code, expected_output, mocker): """ Tests various combinations of valid and invalid responses for ip-quick-check command. """ client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration") dummy_response = DummyResponse( { "Content-Type": "application/json" }, json.dumps(api_response), status_code ) if test_scenario == "positive": mocker.patch('requests.Session.get', return_value=dummy_response) response = GreyNoise.ip_quick_check_command(client, args) assert response.outputs == expected_output elif test_scenario == "negative" and status_code == 200: mocker.patch('requests.Session.get', return_value=dummy_response) response = GreyNoise.ip_quick_check_command(client, args) with open('test_data/quick_check.md') as f: expected_hr = f.read() assert response.readable_output == expected_hr elif test_scenario == "negative": mocker.patch('requests.Session.get', return_value=dummy_response) with pytest.raises(Exception) as err: _ = GreyNoise.ip_quick_check_command(client, args) assert str(err.value) == expected_output elif test_scenario == "custom": mocker.patch('greynoise.GreyNoise.quick', return_value=api_response) with pytest.raises(Exception) as err: _ = GreyNoise.ip_quick_check_command(client, args) assert str(err.value) == expected_output @pytest.mark.parametrize("args, expected_output", generate_advanced_query_data) def test_generate_advanced_query(args, expected_output): """ Tests various combinations of command arguments to generate GreyNoise advanced_query for query/stats command. """ response = GreyNoise.generate_advanced_query(args) assert response == expected_output @pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", query_command_data) def test_query_command(args, test_scenario, api_response, status_code, expected_output, mocker): """ Tests various combinations of valid and invalid responses for query command. """ client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration") dummy_response = DummyResponse( { "Content-Type": "application/json" }, json.dumps(api_response), status_code ) mocker.patch('requests.Session.get', return_value=dummy_response) if test_scenario == "positive": response = GreyNoise.query_command(client, args) assert response.outputs[GreyNoise.QUERY_OUTPUT_PREFIX['IP']] == expected_output['data'] else: with pytest.raises(Exception) as err: _ = GreyNoise.query_command(client, args) assert str(err.value) == expected_output @pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", stats_command_data) def test_stats_command(args, test_scenario, api_response, status_code, expected_output, mocker): """ Tests various combinations of valid and invalid responses for stats command. """ client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration") dummy_response = DummyResponse( { "Content-Type": "application/json" }, json.dumps(api_response), status_code ) mocker.patch('requests.Session.get', return_value=dummy_response) if test_scenario == "positive": response = GreyNoise.stats_command(client, args) assert response.outputs == expected_output else: with pytest.raises(Exception) as err: _ = GreyNoise.stats_command(client, args) assert str(err.value) == expected_output @pytest.mark.parametrize("input_data, expected_output", get_ip_context_data_data) def test_get_ip_context_data(input_data, expected_output): """ Tests various combinations for converting ip-context and query command responses from sdk to Human Readable format. """ response = GreyNoise.get_ip_context_data(input_data) assert response == expected_output @pytest.mark.parametrize("test_scenario, status_code, input_data, expected", riot_command_response_data) def test_riot_command(mocker, test_scenario, status_code, input_data, expected): """ Test various inputs for riot command """ client = GreyNoise.Client(api_key="true_api_key", api_server="dummy_server", timeout=10, proxy="proxy", use_cache=False, integration_name="dummy_integration") dummy_response = DummyResponse( { "Content-Type": "application/json" }, json.dumps(expected["raw_data"]), status_code ) mocker.patch('requests.Session.get', return_value=dummy_response) if test_scenario == "positive": response = GreyNoise.riot_command(client, input_data) assert response.outputs == expected["raw_data"] else: with pytest.raises(Exception) as err: _ = GreyNoise.riot_command(client, input_data) assert str(err.value) == expected["error_message"].format(input_data["ip"])
[ "noreply@github.com" ]
EmersonElectricCo.noreply@github.com
6fe04aaf0e701031982130a0f867b59e8d83e3ec
42d18b5dba342099dae032ab2aa2bb19b995f9be
/ch/ch1/wxpy/helper/sendHelper.py
836277903d0cc0bfb05cfdad56a0430e3bb0d0a0
[]
no_license
wenyaodong777/python-workshop
4e38ee7f3c96e8cdac3804c980b735db304ffb18
5f7bb9aa227ec46c89f793f592f3c90e9cd50603
refs/heads/master
2020-05-26T18:14:58.354116
2019-05-24T00:52:32
2019-05-24T00:52:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
158
py
#!/usr/bin/python # -*- coding: UTF-8 -*- class WXSender(): def send(self, groups, content): for group in groups: group.send(content)
[ "wuchenbao@odc.cmbchina.cn" ]
wuchenbao@odc.cmbchina.cn
04da31593f90e147dd4899029a36daa0fe6f87e5
867d946a375a3ad9808af45c15e8b2ea3274da0f
/Scripts/Linux/SQLTimeBasedColumns.py
4bf2980aab49ca6f4911ed4cfa193946ca4187bf
[]
no_license
AmatheraeWorld/AmatheraeWorld
5985a6731221d375750d30ca22c59fe3aed52a1f
1b32d2d2fed410c9c486c1cbc21dc8fa0ac8d1a7
refs/heads/master
2023-06-16T13:41:32.443881
2021-07-11T20:26:51
2021-07-11T20:26:51
265,905,582
1
1
null
null
null
null
UTF-8
Python
false
false
1,147
py
#!/usr/bin/python3 import requests, time, sys, signal from pwn import * def def_handler(sig, frame): log.faiulure("Saliendo") sys.exit(1) signal.signal(signal.SIGINT, def_handler) url = 'http://admin.cronos.htb/index.php' burp = {'http': 'http://127.0.0.1:8080'} s = r'0123456789abcdefghijklmnopqrstuvwxyz' result = '' def check(payload): data_post = { 'username': '%s' % payload, 'password': 'test' } time_start = time.time() content = requests.post(url, data=data_post) time_end = time.time() if time_end - time_start > 5: return 1 p2 = log.progress("Payload") for j in range(0,5): p1 = log.progress("Columnas [%d]" % j) for i in range (1, 10): for c in s: payload = "' or if(substr((select column_name from information_schema.columns where table_schema='admin' and table_name='users' limit %d,1),%d,1)='%c',sleep(5),1)-- -" % (j, i, c) p2.status("%s" % payload) if check(payload): result += c p1.status("%s" % result) break p1.success("%s" % result) result = ''
[ "noreply@github.com" ]
AmatheraeWorld.noreply@github.com
c0fc1c5fe7e20c2b73669f3d38ce4eff71b3fa44
400f4a13784f93029dbe035392ba62f0956f1c1f
/sampler.py
8ec1187020e92427b9f63246e08ea5c919a22c2b
[ "MIT" ]
permissive
ussama-azizun/Masked_Face_Recognition
5e3516ec0d99380ef22decdd06c536bfe79a6cd1
2dc572573ebd9ac208314690b529ed69addf0913
refs/heads/master
2023-07-13T03:49:27.553385
2021-08-03T07:51:35
2021-08-03T07:51:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,331
py
from torch.utils.data.sampler import Sampler import itertools import numpy as np def samples(df): label_to_samples = [] samples = [] label = 0 for index, row in df.iterrows(): if index == 0: samples.append(index) label = row['target'] else: if row['target'] != label: label_to_samples.append(samples) samples = [] label = row['target'] samples.append(index) return label_to_samples class PKSampler(Sampler): def __init__(self, data_source, p=15, k=20): super().__init__(data_source) self.p = p self.k = k self.data_source = data_source def __iter__(self): pk_count = len(self) // (self.p * self.k) for _ in range(pk_count): labels = np.random.choice(np.arange(len(self.data_source.label_to_samples)), self.p, replace=False) for l in labels: indices = self.data_source.label_to_samples[l] replace = True if len(indices) < self.k else False for i in np.random.choice(indices, self.k, replace=replace): yield i def __len__(self): pk = self.p * self.k samples = ((len(self.data_source) - 1) // pk + 1) * pk return samples def grouper(iterable, n): it = itertools.cycle(iter(iterable)) for _ in range((len(iterable) - 1) // n + 1): yield list(itertools.islice(it, n)) # full label coverage per 'epoch' class PKSampler2(Sampler): def __init__(self, data_source, p=15, k=20): super().__init__(data_source) self.p = p self.k = k self.data_source = data_source def __iter__(self): rand_labels = np.random.permutation(np.arange(len(self.data_source.label_to_samples))) for labels in grouper(rand_labels, self.p): for l in labels: indices = self.data_source.label_to_samples[l] replace = True if len(indices) < self.k else False for j in np.random.choice(indices, self.k, replace=replace): yield j def __len__(self): num_labels = len(self.data_source.label_to_samples) samples = ((num_labels - 1) // self.p + 1) * self.p * self.k return samples
[ "samyuan101234@gmail.com" ]
samyuan101234@gmail.com
45bb0f11373a3220f0f4387907cff7b0eee4e3f3
dc72589c38ba179524c2ee2e408c4f37b77cabf3
/backend/lizz_mob_jul15_dev_7685/urls.py
8827abab37592bfacf837a0176cebcca38cae754
[]
no_license
crowdbotics-apps/lizz-mob-jul15-dev-7685
cbcab97908bd568acc68b606d4c5becdb160364c
a41e88b463169443bcfdf12cf356a958c44f3400
refs/heads/master
2022-11-17T22:30:15.286209
2020-07-16T17:54:10
2020-07-16T17:54:10
280,012,855
0
0
null
null
null
null
UTF-8
Python
false
false
1,958
py
"""lizz_mob_jul15_dev_7685 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from allauth.account.views import confirm_email from rest_framework import permissions from drf_yasg.views import get_schema_view from drf_yasg import openapi urlpatterns = [ path("", include("home.urls")), path("accounts/", include("allauth.urls")), path("api/v1/", include("home.api.v1.urls")), path("admin/", admin.site.urls), path("users/", include("users.urls", namespace="users")), path("rest-auth/", include("rest_auth.urls")), # Override email confirm to use allauth's HTML view instead of rest_auth's API view path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email), path("rest-auth/registration/", include("rest_auth.registration.urls")), ] admin.site.site_header = "lizz mob jul15" admin.site.site_title = "lizz mob jul15 Admin Portal" admin.site.index_title = "lizz mob jul15 Admin" # swagger api_info = openapi.Info( title="lizz mob jul15 API", default_version="v1", description="API documentation for lizz mob jul15 App", ) schema_view = get_schema_view( api_info, public=True, permission_classes=(permissions.IsAuthenticated,), ) urlpatterns += [ path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs") ]
[ "team@crowdbotics.com" ]
team@crowdbotics.com
72f601d574e8d581120332421e096abfc29920f1
3280dd107a70e7d3637bfcfc2819f9b1477ed99a
/Myquizproject/Myquizproject/urls.py
55bd04fcaf038ecb464df39accb26f55b4cbb00c
[]
no_license
pooja666-v/pv_Quiz_repo
80b9cc3cb1c45a48e30f9e4f15392bbdf02bf22b
fc59c7fb7a92549b8cea3fac0f4cb451bdbcc2f7
refs/heads/master
2023-05-26T23:17:45.797984
2021-06-15T13:01:31
2021-06-15T13:01:31
376,889,813
0
0
null
null
null
null
UTF-8
Python
false
false
3,568
py
"""Myquizproject URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from quiz import views from django.contrib.auth.views import LogoutView,LoginView urlpatterns = [ path('admin/', admin.site.urls), path('teacher/',include('teacher.urls')), path('student/',include('student.urls')), path('',views.home_view,name=''), path('logout', LogoutView.as_view(template_name='quiz/logout.html'),name='logout'), path('aboutus', views.aboutus_view), path('contactus', views.contactus_view), path('afterlogin', views.afterlogin_view,name='afterlogin'), path('adminclick', views.adminclick_view), path('adminlogin', LoginView.as_view(template_name='quiz/adminlogin.html'),name='adminlogin'), path('admin-dashboard', views.admin_dashboard_view,name='admin-dashboard'), path('admin-teacher', views.admin_teacher_view,name='admin-teacher'), path('admin-view-teacher', views.admin_view_teacher_view,name='admin-view-teacher'), path('update-teacher/<int:pk>', views.update_teacher_view,name='update-teacher'), path('delete-teacher/<int:pk>', views.delete_teacher_view,name='delete-teacher'), path('admin-view-pending-teacher', views.admin_view_pending_teacher_view,name='admin-view-pending-teacher'), path('admin-view-teacher-salary', views.admin_view_teacher_salary_view,name='admin-view-teacher-salary'), path('approve-teacher/<int:pk>', views.approve_teacher_view,name='approve-teacher'), path('reject-teacher/<int:pk>', views.reject_teacher_view,name='reject-teacher'), path('admin-student', views.admin_student_view,name='admin-student'), path('admin-view-student', views.admin_view_student_view,name='admin-view-student'), path('admin-view-student-marks', views.admin_view_student_marks_view,name='admin-view-student-marks'), path('admin-view-marks/<int:pk>', views.admin_view_marks_view,name='admin-view-marks'), path('admin-check-marks/<int:pk>', views.admin_check_marks_view,name='admin-check-marks'), path('update-student/<int:pk>', views.update_student_view,name='update-student'), path('delete-student/<int:pk>', views.delete_student_view,name='delete-student'), path('admin-course', views.admin_course_view,name='admin-course'), path('admin-add-course', views.admin_add_course_view,name='admin-add-course'), path('admin-view-course', views.admin_view_course_view,name='admin-view-course'), path('delete-course/<int:pk>', views.delete_course_view,name='delete-course'), path('admin-question', views.admin_question_view,name='admin-question'), path('admin-add-question', views.admin_add_question_view,name='admin-add-question'), path('admin-view-question', views.admin_view_question_view,name='admin-view-question'), path('view-question/<int:pk>', views.view_question_view,name='view-question'), path('delete-question/<int:pk>', views.delete_question_view,name='delete-question'), ]
[ "poojavasawade98@gmail.com" ]
poojavasawade98@gmail.com
233d511497513d1530a9e8ff0eb47948dc21f5d0
d33065f76aa1a59142794364bb65526771ee71b3
/PDS/tcpstats
0bf7c29658d2db29425445bd767ee5578cad4f0f
[]
no_license
annaostroukh/University-projects
31e300aa0674e13a6d3cb83dc4ccc0161e98bb02
d16e64030f4230107130d770e3c15e472aed7319
refs/heads/master
2021-01-11T14:14:20.619238
2017-02-07T14:50:46
2017-02-07T14:50:46
81,216,867
0
1
null
null
null
null
UTF-8
Python
false
false
10,103
#!/usr/bin/env python2.7 import datetime from decimal import Decimal import socket import struct import sys import webbrowser import dpkt from dpkt.tcp import TCP from dpkt.ethernet import Ethernet from dpkt.ip import IP, IP_PROTO_TCP import json def main(): computeTCPStat() # function parse a packet capture # @filename is a name of file which we parse # return file_entries - list of dictionaries with parsed tcp data def fileParse(filename): f = open(filename, 'rb') # opening a packet file pcap = dpkt.pcap.Reader(f) # reading a packet file packet_counter = 0 # counter of packets in a file file_entries = [] # list of dictionaries of tcp data entries keys = ('packet #', 'time', 'len', 'seq', 'ack', 'window', 'scale factor', 'payload', 'sum', 'flags', 'source', 'source ip', 'destination', 'destination ip') # keys for each value in dictionary for timestamp, buf in pcap: packet_counter += 1 eth = Ethernet(buf) if eth.type != dpkt.ethernet.ETH_TYPE_IP: # if not an IP packet continue ip = eth.data dst_ip = socket.inet_ntoa(ip.dst) src_ip = socket.inet_ntoa(ip.src) if ip.p != IP_PROTO_TCP: # if not a TCP packet print ("No TCP packet found") continue tcp = ip.data # Allow to decode flags fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0 syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0 rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0 psh_flag = (tcp.flags & dpkt.tcp.TH_PUSH) != 0 ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0 urg_flag = (tcp.flags & dpkt.tcp.TH_URG) != 0 ece_flag = (tcp.flags & dpkt.tcp.TH_ECE) != 0 cwr_flag = (tcp.flags & dpkt.tcp.TH_CWR) != 0 # human-readable definitions of flags flags = (("FIN " if fin_flag else "") + ("SYN " if syn_flag else "") + ("RST " if rst_flag else "") + ("PSH " if psh_flag else "") + ("ACK " if ack_flag else "") + ("URG " if urg_flag else "") + ("ECE " if ece_flag else "") + ("CWR " if cwr_flag else "")) # define window scale factor option_list = dpkt.tcp.parse_opts(tcp.opts) for option in option_list: if option[0] == 3: WSCALE = struct.unpack(">b", option[1]) time = Decimal(datetime.datetime.utcfromtimestamp(timestamp).strftime('%S.%f')) # tulip with values of each packet in dictionary data = (packet_counter, time, len(buf), tcp.seq, tcp.ack, tcp.win, WSCALE[0], len(tcp.data), tcp.sum, flags, tcp.sport, src_ip, tcp.dport, dst_ip) tcp_data = dict(zip(keys, data)) # matching keys with values and adding entries to the dictionary file_entries.append(tcp_data) # creating a list of dictionaries with parsed tcp data f.close() return file_entries def computeTCPStat(): print ("Parsing a file...") file_entries = fileParse(filename) timeVal = [] # list of dictionaries with time values outputDict =[] # output dictionary keysTime = ('packet #', 'time', 'delta') curTime = 0 # current time for speed measurements print ("Analysing statistics...") for i in range(len(file_entries)): # Setting up reference packet according to SYN flag (TCP connection initiated) if file_entries[i]['flags'] == 'SYN ': refPacket = file_entries[i]['packet #'] timeVal.append({'packet #': refPacket, 'time': Decimal(0.000000), 'delta': Decimal(0.000000)}) # Setting up reference time and adding reference packet to dictionary # Computing delta and reference time for refPacket in range(len(file_entries) - refPacket): delta = file_entries[refPacket+1]['time'] - file_entries[refPacket]['time'] # Time delta from reference packet time = delta + Decimal(timeVal[refPacket]['time']) # Time since reference packet packet = refPacket+2 # Saving time statistics to dictionary dataTime = (packet, time, abs(delta)) timeVal.append(dict(zip(keysTime, dataTime))) # Getting the receiver and sender parameters of a TCP connection for i in range(len(file_entries)): if file_entries[i]['flags'] == 'SYN ': receiverIP = file_entries[i]['destination ip'] receiverWinScale = pow(2, file_entries[i]['scale factor']) if file_entries[i]['flags'] == 'SYN ACK ': senderIP = file_entries[i]['destination ip'] senderWinScale = pow(2, file_entries[i]['scale factor']) # Receiver window for i in range(len(file_entries)): if (file_entries[i]['flags'] != 'SYN ' and file_entries[i]['flags'] != 'SYN ACK ') and file_entries[i]['destination ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: recWin = file_entries[i]['window'] * receiverWinScale timeRecWin = timeVal[i]['time'] dataRec = (str(timeRecWin), str(recWin)) keysRec = ('ReceiverTime', 'RecWin') outputDict.append(dict(zip(keysRec, dataRec))) # Sender window for i in range(len(file_entries)): if (file_entries[i]['flags'] != 'SYN ' and file_entries[i]['flags'] != 'SYN ACK ') and file_entries[i]['destination ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: sendWin = file_entries[i]['window'] * senderWinScale timeSendWin = timeVal[i]['time'] dataSend = (str(timeSendWin), str(sendWin)) keysSend = ('SenderTime', 'SenderWin') outputDict.append(dict(zip(keysSend, dataSend))) # Round trip time for i in range(len(file_entries)): # Receiver RTT if file_entries[i]['flags'] == 'ACK ' and file_entries[i]['destination ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: if file_entries[i]['seq'] == file_entries[i-1]['ack'] and file_entries[i-1]['flags'] != 'SYN ACK ' and file_entries[i-1]['flags'] != 'FIN ACK ': rtt = timeVal[i-1]['delta'] seqNumber = file_entries[i-1]['seq'] dataRtt = (str(rtt * 1000), str(seqNumber)) # Saving rtt converted to [ms] keysRtt = ('RTT', 'Sequence') outputDict.append(dict(zip(keysRtt, dataRtt))) # Slow start for i in range(len(file_entries)): # Receiver SS (packets from server) if (file_entries[i]['flags'] == 'SYN ACK ' or file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ') and file_entries[i]['source ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: if file_entries[i]['flags'] == 'SYN ACK ': time = 0 # setting reference time according to the first SYN ACK packet if file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ': time = time + timeVal[i]['delta'] # time on X-axis will show how RTT is changing since time reference seqNumberSS = file_entries[i]['seq'] dataSS = (str(time), str(seqNumberSS)) keysSS = ('TimeSS', 'SequenceSS') outputDict.append(dict(zip(keysSS, dataSS))) # Sender SS (packets from client) if (file_entries[i]['flags'] == 'SYN ' or file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ') and file_entries[i]['source ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: seqNumberSS = file_entries[i]['seq'] timeSS = timeVal[i]['time'] dataSS = (str(timeSS), str(seqNumberSS)) keysSS = ('TimeSSsen', 'SequenceSSsen') outputDict.append(dict(zip(keysSS, dataSS))) # Speed of TCP connection in interval 0.01 s timerRange = int((int(timeVal[-1]['time'])+1)/0.01) # setting up a required amount of steps to look through packets for timer in range(timerRange): lastTime = curTime curTime = curTime + 0.01 byte = 0 # bytes of receiver byteSen = 0 # bytes of sender bytes =[] bytesSen = [] for i in range(len(timeVal)): # Receiver speed if lastTime <= timeVal[i]['time'] <= curTime and file_entries[i]['source ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: byte = byte + file_entries[i]['len'] time = lastTime bytes.append(byte) # Sender speed elif lastTime <= timeVal[i]['time'] <= curTime and file_entries[i]['source ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']: byteSen = byteSen + file_entries[i]['len'] timeSen = lastTime bytesSen.append(byteSen) # computing receiver bytes if bytes: bytessum = max(bytes) else: time = lastTime bytessum = 0 dataSp = (str(time), str(bytessum)) keysSp = ('Time', 'BytesRec') outputDict.append(dict(zip(keysSp, dataSp))) # computing sender bytes if bytesSen: bytessumSen = max(bytesSen) else: timeSen = lastTime bytessumSen = 0 dataSpSen = (str(timeSen), str(bytessumSen)) keysSpSen = ('TimeSen', 'BytesSen') outputDict.append(dict(zip(keysSpSen, dataSpSen))) # Exporting statistics to JSON log file print ("Exporting statistics...") file = open("log.json", "w") json.dump(outputDict, file, indent = 1) file.close() if __name__ == "__main__": if len(sys.argv)>1: filename = sys.argv[1] main()
[ "annaostroukh@gmail.com" ]
annaostroukh@gmail.com
c08ce6dd49ab813444d35c3c9349c72f052e228b
5e255ad1360c90478393744586663741a9569c21
/linebot/models/base.py
164fca9d9e9240236cfe90b9b6b2b37ba835814f
[ "Apache-2.0" ]
permissive
line/line-bot-sdk-python
d76268e8b542060d6eccbacc5dbfab16960ecc35
cffd35948238ae24982173e30b1ea1e595bbefd9
refs/heads/master
2023-08-31T22:12:31.698183
2023-08-28T01:10:09
2023-08-28T01:10:09
70,553,423
1,898
1,181
Apache-2.0
2023-09-11T05:14:07
2016-10-11T03:42:26
Python
UTF-8
Python
false
false
4,121
py
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """linebot.models.base module.""" import json from .. import utils class Base(object): """Base class of model. Suitable for JSON base data. """ def __init__(self, **kwargs): """__init__ method. :param kwargs: """ pass def __str__(self): """__str__ method.""" return self.as_json_string() def __repr__(self): """__repr__ method.""" return str(self) def __eq__(self, other): """__eq__ method. :param other: """ return other and self.as_json_dict() == other.as_json_dict() def __ne__(self, other): """__ne__ method. :param other: """ return not self.__eq__(other) def as_json_string(self): """Return JSON string from this object. :rtype: str """ return json.dumps(self.as_json_dict(), sort_keys=True) def as_json_dict(self): """Return dictionary from this object. :return: dict """ data = {} for key, value in self.__dict__.items(): camel_key = utils.to_camel_case(key) if isinstance(value, (list, tuple, set)): data[camel_key] = list() for item in value: if hasattr(item, 'as_json_dict'): data[camel_key].append(item.as_json_dict()) else: data[camel_key].append(item) elif hasattr(value, 'as_json_dict'): data[camel_key] = value.as_json_dict() elif value is not None: data[camel_key] = value return data @classmethod def new_from_json_dict(cls, data, use_raw_message=False): """Create a new instance from a dict. :param data: JSON dict :param bool use_raw_message: Using original Message key as attribute """ if use_raw_message: return cls(use_raw_message=use_raw_message, **data) new_data = {utils.to_snake_case(key): value for key, value in data.items()} return cls(**new_data) @staticmethod def get_or_new_from_json_dict(data, cls): """Get `cls` object w/ deserialization from json if needed. If data is instance of cls, return data. Else if data is instance of dict, create instance from dict. Else, return None. :param data: :param cls: :rtype: object """ if isinstance(data, cls): return data elif isinstance(data, dict): return cls.new_from_json_dict(data) return None @staticmethod def get_or_new_from_json_dict_with_types( data, cls_map, type_key='type', use_raw_message=False ): """Get `cls` object w/ deserialization from json by using type key hint if needed. If data is instance of one of cls, return data. Else if data is instance of dict, create instance from dict. Else, return None. :param data: :param cls_map: :param type_key: :rtype: object :param bool use_raw_message: Using original Message key as attribute """ if isinstance(data, tuple(cls_map.values())): return data elif isinstance(data, dict): type_val = data[type_key] if type_val in cls_map: return cls_map[type_val].new_from_json_dict(data, use_raw_message=use_raw_message) return None
[ "noreply@github.com" ]
line.noreply@github.com
f9de853a23a36e10aefcbfd18bf0dfcea6055cfa
19d47d47c9614dddcf2f8d744d883a90ade0ce82
/pynsxt/swagger_client/models/ns_service_group_list_result.py
bbaee722d7f2d1956d8eea75ec65fa8637b79b2e
[]
no_license
darshanhuang1/pynsxt-1
9ed7c0da9b3a64e837a26cbbd8b228e811cee823
fb1091dff1af7f8b8f01aec715682dea60765eb8
refs/heads/master
2020-05-25T14:51:09.932853
2018-05-16T12:43:48
2018-05-16T12:43:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,282
py
# coding: utf-8 """ NSX API VMware NSX REST API # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from swagger_client.models.list_result import ListResult # noqa: F401,E501 from swagger_client.models.ns_service_group import NSServiceGroup # noqa: F401,E501 from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501 from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501 class NSServiceGroupListResult(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { '_self': 'SelfResourceLink', 'links': 'list[ResourceLink]', 'schema': 'str', 'cursor': 'str', 'sort_ascending': 'bool', 'sort_by': 'str', 'result_count': 'int', 'results': 'list[NSServiceGroup]' } attribute_map = { '_self': '_self', 'links': '_links', 'schema': '_schema', 'cursor': 'cursor', 'sort_ascending': 'sort_ascending', 'sort_by': 'sort_by', 'result_count': 'result_count', 'results': 'results' } def __init__(self, _self=None, links=None, schema=None, cursor=None, sort_ascending=None, sort_by=None, result_count=None, results=None): # noqa: E501 """NSServiceGroupListResult - a model defined in Swagger""" # noqa: E501 self.__self = None self._links = None self._schema = None self._cursor = None self._sort_ascending = None self._sort_by = None self._result_count = None self._results = None self.discriminator = None if _self is not None: self._self = _self if links is not None: self.links = links if schema is not None: self.schema = schema if cursor is not None: self.cursor = cursor if sort_ascending is not None: self.sort_ascending = sort_ascending if sort_by is not None: self.sort_by = sort_by if result_count is not None: self.result_count = result_count self.results = results @property def _self(self): """Gets the _self of this NSServiceGroupListResult. # noqa: E501 :return: The _self of this NSServiceGroupListResult. # noqa: E501 :rtype: SelfResourceLink """ return self.__self @_self.setter def _self(self, _self): """Sets the _self of this NSServiceGroupListResult. :param _self: The _self of this NSServiceGroupListResult. # noqa: E501 :type: SelfResourceLink """ self.__self = _self @property def links(self): """Gets the links of this NSServiceGroupListResult. # noqa: E501 The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501 :return: The links of this NSServiceGroupListResult. # noqa: E501 :rtype: list[ResourceLink] """ return self._links @links.setter def links(self, links): """Sets the links of this NSServiceGroupListResult. The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501 :param links: The links of this NSServiceGroupListResult. # noqa: E501 :type: list[ResourceLink] """ self._links = links @property def schema(self): """Gets the schema of this NSServiceGroupListResult. # noqa: E501 :return: The schema of this NSServiceGroupListResult. # noqa: E501 :rtype: str """ return self._schema @schema.setter def schema(self, schema): """Sets the schema of this NSServiceGroupListResult. :param schema: The schema of this NSServiceGroupListResult. # noqa: E501 :type: str """ self._schema = schema @property def cursor(self): """Gets the cursor of this NSServiceGroupListResult. # noqa: E501 Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501 :return: The cursor of this NSServiceGroupListResult. # noqa: E501 :rtype: str """ return self._cursor @cursor.setter def cursor(self, cursor): """Sets the cursor of this NSServiceGroupListResult. Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501 :param cursor: The cursor of this NSServiceGroupListResult. # noqa: E501 :type: str """ self._cursor = cursor @property def sort_ascending(self): """Gets the sort_ascending of this NSServiceGroupListResult. # noqa: E501 :return: The sort_ascending of this NSServiceGroupListResult. # noqa: E501 :rtype: bool """ return self._sort_ascending @sort_ascending.setter def sort_ascending(self, sort_ascending): """Sets the sort_ascending of this NSServiceGroupListResult. :param sort_ascending: The sort_ascending of this NSServiceGroupListResult. # noqa: E501 :type: bool """ self._sort_ascending = sort_ascending @property def sort_by(self): """Gets the sort_by of this NSServiceGroupListResult. # noqa: E501 Field by which records are sorted # noqa: E501 :return: The sort_by of this NSServiceGroupListResult. # noqa: E501 :rtype: str """ return self._sort_by @sort_by.setter def sort_by(self, sort_by): """Sets the sort_by of this NSServiceGroupListResult. Field by which records are sorted # noqa: E501 :param sort_by: The sort_by of this NSServiceGroupListResult. # noqa: E501 :type: str """ self._sort_by = sort_by @property def result_count(self): """Gets the result_count of this NSServiceGroupListResult. # noqa: E501 Count of results found (across all pages), set only on first page # noqa: E501 :return: The result_count of this NSServiceGroupListResult. # noqa: E501 :rtype: int """ return self._result_count @result_count.setter def result_count(self, result_count): """Sets the result_count of this NSServiceGroupListResult. Count of results found (across all pages), set only on first page # noqa: E501 :param result_count: The result_count of this NSServiceGroupListResult. # noqa: E501 :type: int """ self._result_count = result_count @property def results(self): """Gets the results of this NSServiceGroupListResult. # noqa: E501 Paged collection of NSServiceGroups # noqa: E501 :return: The results of this NSServiceGroupListResult. # noqa: E501 :rtype: list[NSServiceGroup] """ return self._results @results.setter def results(self, results): """Sets the results of this NSServiceGroupListResult. Paged collection of NSServiceGroups # noqa: E501 :param results: The results of this NSServiceGroupListResult. # noqa: E501 :type: list[NSServiceGroup] """ if results is None: raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501 self._results = results def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NSServiceGroupListResult): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "tcraft@pivotal.io" ]
tcraft@pivotal.io
34659a2890a4b19d6a7a1abb7a98dd6fbe5adce9
0e1e643e864bcb96cf06f14f4cb559b034e114d0
/Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather2_W_fixGood_C_change/train/pyr_4s/L6/step10_a.py
2202753791e6d77741009c3408d45023e128a019
[]
no_license
KongBOy/kong_model2
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
1af20b168ffccf0d5293a393a40a9fa9519410b2
refs/heads/master
2022-10-14T03:09:22.543998
2022-10-06T11:33:42
2022-10-06T11:33:42
242,080,692
3
0
null
null
null
null
UTF-8
Python
false
false
140,332
py
############################################################################################################################################################################################################# ############################################################################################################################################################################################################# ### 把 kong_model2 加入 sys.path import os code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層 code_dir = "\\".join(code_exe_path_element[:-1]) kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層 kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir import sys ### 把 kong_model2 加入 sys.path sys.path.append(kong_model2_dir) sys.path.append(code_dir) # print(__file__.split("\\")[-1]) # print(" code_exe_path:", code_exe_path) # print(" code_exe_path_element:", code_exe_path_element) # print(" code_dir:", code_dir) # print(" kong_layer:", kong_layer) # print(" kong_model2_dir:", kong_model2_dir) ############################################################################################################################################################################################################# kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index # print(" kong_to_py_layer:", kong_to_py_layer) if (kong_to_py_layer == 0): template_dir = "" elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0 elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0 elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1]) # print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae ############################################################################################################################################################################################################# exp_dir = template_dir ############################################################################################################################################################################################################# from step06_a_datas_obj import * from step09_4side_L6 import * from step10_a2_loss_info_obj import * from step10_b2_exp_builder import Exp_builder rm_paths = [path for path in sys.path if code_dir in path] for rm_path in rm_paths: sys.path.remove(rm_path) rm_moduless = [module for module in sys.modules if "step09" in module] for rm_module in rm_moduless: del sys.modules[rm_module] import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_4s.L6.step10_a as W_w_M_to_C_p20_pyr from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good ############################################################################################################################################################################################################# ''' exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~ 比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在: 6_mask_unet/自己命的名字/result_a 6_mask_unet/自己命的名字/result_b 6_mask_unet/自己命的名字/... ''' use_db_obj = type8_blender_kong_doc3d_v2 use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔 ############################################################# ### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder") ############################################################# # "1" 3 6 10 15 21 28 36 45 55 # side1 OK 1 ch032_1side_1__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 "3" 6 10 15 21 28 36 45 55 # side2 OK 4 ch032_1side_2__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_2__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_2__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_2__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 3 "6" 10 15 21 28 36 45 55 # side3 OK 10 ch032_1side_3__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_3__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 3 6 "10" 15 21 28 36 45 55 # side4 OK 20 ch032_1side_4__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_4__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 3 6 10 "15" 21 28 36 45 55 # side5 OK 35 ch032_1side_5__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_5__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 3 6 10 15 "21" 28 36 45 55 # side6 OK 56 ch032_1side_6__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_6__2side_6__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") # 1 3 6 10 15 21 "28" 36 45 55 # side7 OK 84 ch032_1side_7__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_6__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ch032_1side_7__2side_7__3side_7_4side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="") ############################################################# if(__name__ == "__main__"): print("build exps cost time:", time.time() - start_time) if len(sys.argv) < 2: ############################################################################################################ ### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~ ch032_1side_1__2side_1__3side_1_4side_1.build().run() # print('no argument') sys.exit() ### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run() eval(sys.argv[1])
[ "s89334roy@yahoo.com.tw" ]
s89334roy@yahoo.com.tw
e65631e729c8549976a81aa1a8b98e467e8c7c78
228dd278c875b9539908afffefcfe5b130a28a62
/v2/src/code/measure_service2.py
44431c1f5d47dc6869ce58091850202aa7bda78f
[]
no_license
sheriefvt/MARS-services
57d9ca81f1f606ca8a8938e75b41fb806a36f8b9
a5f6b6140b0c8a30cd083a41a465f0bc999017a1
refs/heads/master
2021-04-30T12:50:53.156082
2018-02-13T02:06:11
2018-02-13T02:06:11
121,283,295
0
0
null
null
null
null
UTF-8
Python
false
false
5,415
py
__author__ = 'Sherif Abdelhamid' #Measure Service Version 1.0 Beta from bottle import get, post, request, run # or route import os import threading import time import networkx as nx import sqlite3 import json import datetime import ConfigParser,io with open ('mars.config', "r") as myfile: data=myfile.read() config = ConfigParser.RawConfigParser(allow_no_value=True) config.readfp(io.BytesIO(data)) server = config.get("MARS_configuration", "server") host = config.get("MARS_configuration", "host") port = config.get("MARS_configuration", "port") port2 = config.get("MARS_configuration", "port2") port3 = config.get("MARS_configuration", "port3") database_path = config.get("MARS_configuration", "database") index_path1 = config.get("MARS_configuration", "index1") index_path2 = config.get("MARS_configuration", "index2") file_path = config.get("MARS_configuration", "uploadfile") qsub_path = config.get("MARS_configuration", "qsub") graph_path = config.get("MARS_configuration", "graph") code_path = config.get("MARS_configuration", "code") output_path = config.get("MARS_configuration", "output") qlog_path = config.get("MARS_configuration", "qlog") class MyError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) ##End point to call the measure service to compute a specific measure on a network. @get('/graphservice/measure/compute') def do_compute(): gname = request.query.get('graph') mid = request.query.get('measure') p = getmeasureinfo(mid) if p[3]=='None': par='' else: par = p[3] ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d--%H:%M:%S') if p[1]=="networkx": tmp=networkx_qsub(gname,p[0],ts,par) elif p[1] == "galib": tmp = galib_qsub(gname,p[2],ts,par,p[0]) elif p[1] == "standalone": tmp = cplus_qsub(gname,p[2],ts,par,p[0]) elif p[1] == "sql": tmp =sql_qsub(gname,ts,p[5],p[2],p[4],p[0]) name =qsub_path +gname+"-"+mid+'.qsub' f1 = open(name, "w") f1.write(tmp) f1.close() if os.path.exists(name): qb = threading.Thread(name='qsub_worker', target=qsub_worker(name)) qb.start() return ##Function to create qsub file for calculating degree within DBMS def sql_qsub(gname,ts,dbname,sqlstmt,target,m): tmp2=sqlstmt.format(g=gname) tmp='''#!/bin/bash #PBS -lwalltime=10:00:00 #PBS -W group_list=sipcinet #PBS -q sfx_q #PBS -N {gname}-{measure}-MARS #PBS -o {qp}{graph_name}{ti}.qlog cd $PBS_O_WORKDIR sqlite3 {dp} 'update {g}_{tr} set {mn} = ({sq})' '''.format(g=gname,mn=dbname,ti=ts,tr=target,sq=tmp2,dp=database_path,measure=m,qp=qlog_path) return tmp ##Function to create qsub file for calculating different measures using networkx library. Currently, it calculates the degree, # betweeness_centrality, clustering, load_centrality, node_clique_number, and closeness_centrality. def networkx_qsub(gname,command,ts,parameter): tmp='''#!/bin/bash #PBS -lwalltime=10:00:00 #PBS -W group_list=sipcinet #PBS -q sfx_q #PBS -N {graph_name}-{measure}-MARS #PBS -o {qp}{graph_name}{ti}.qlog cd $PBS_O_WORKDIR export PATH=/home/sipcinet/edison/python-2.7.9/bin:$PATH python {cp}measure.py {gp}{graph_name} {op}{graph_name}_{measure}.out {measure} {graph_name} {pr} '''.format(graph_name=gname,measure=command,ti=ts,cp=code_path,op=output_path,pr=parameter,gp=graph_path,qp=qlog_path) return tmp ##Function to create qsub file for calculating keshell using code provided by Chris Kulhman. Code is an executable. def cplus_qsub(gname,mname,ts,parameter,command): tmp='''#!/bin/bash #PBS -lwalltime=10:00:00 #PBS -W group_list=sipcinet #PBS -q sfx_q #PBS -N {graph_name}-{cmd}-MARS #PBS -o {qp}{graph_name}{ti}.qlog cd $PBS_O_WORKDIR export PATH=/home/sipcinet/edison/python-2.7.9/bin:$PATH {cp}{measure} {gp}{graph_name}.uel {pr} {op}{graph_name}_{cmd}.out '''.format(graph_name=gname,measure=mname,ti=ts,cp=code_path,gp=graph_path,pr=parameter,cmd=command,op=output_path,qp=qlog_path) return tmp ##Function to create qsub file for calculating clustering coef. using code provided by Maliq. Code is an executable def galib_qsub(gname,mname,ts,parameter,cmd2): tmp='''#!/bin/sh #PBS -l walltime=10:00:00 #PBS -l nodes=10:ppn=1 #PBS -W group_list=ndssl #PBS -q ndssl_q #PBS -A ndssl #PBS -N {graph_name}-{cmd}-MARS #PBS -o {qp}{graph_name}{ti}.qlog #PBS -j oe . /etc/profile.d/modules.sh module add mvapich2/gcc #module add mpiexec time mpiexec -f $PBS_NODEFILE {cp}{command} {gp}{graph_name}.gph {op}{graph_name}_{cmd}.out {pr} exit; '''.format(graph_name=gname,command=mname,cp=code_path,ti=ts,pr=parameter,cmd=cmd2,gp=graph_path,op=output_path,qp=qlog_path) return tmp ##Load measure information from DB def getmeasureinfo(m): db = sqlite3.connect(database_path) c = db.cursor() sqlt = ("select id,package,command,parameter,target from measure where id='{c}'").format(c=m) c.execute(sqlt) data = c.fetchone() return data ##Submit qsub job request def qsub_worker(name): os.system('qsub {filename}'.format(filename=name)) run(server=server,host=host, port=port3,debug=True)
[ "sherif@cos.io" ]
sherif@cos.io
dcbe927e6b4d7e84a65d80d415af4b07cbf43625
7e64b95e39d9a0a95e25eae82b01bfef2b6e550c
/benchmarking/model_one_job_batched.py
03714abbeeabd1ced60e64916c8e2a06568264bb
[ "MIT" ]
permissive
yuGithuuub/scCODA_reproducibility
a927075d9cbca2b0f1ff4d9ad74b872e286591dc
1f2565eca4bc9e6ccd16aa6885ccde6c19caa196
refs/heads/main
2023-01-23T03:23:04.559234
2020-12-10T15:14:19
2020-12-10T15:14:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,832
py
# Only relevant for server execution import pickle as pkl import sys import os import benchmark_utils as add dataset_path = sys.argv[1] save_path = sys.argv[2] model_name = sys.argv[3] count = int(sys.argv[4]) if sys.argv[5] == "True": keep_sccoda_results = True else: keep_sccoda_results = False print("model name:", model_name) file_name = os.listdir(dataset_path)[count] if model_name == "ALDEx2_alr": kwargs = {"server": True, "method": "we.eBH", "mc_samples": 128, "denom": [5], "alpha": 0.05, "fdr_correct": False} elif model_name == "ALDEx2": kwargs = {"server": True, "method": "we.eBH", "mc_samples": 128, "alpha": 0.05, "fdr_correct": False} elif model_name in ["simple_dm", "scCODA"]: kwargs = {"num_results": 20000, "n_burnin": 5000, "num_adapt_steps": 4000, "keep_sccoda_results": keep_sccoda_results} elif model_name in ["alr_ttest", "alr_wilcoxon"]: kwargs = {"reference_index": 4, "alpha": 0.05, "fdr_correct": True} elif model_name in ["Haber", "ttest", "clr_ttest", "dirichreg"]: kwargs = {"alpha": 0.05, "fdr_correct": True} elif model_name == "scdc": kwargs = {"server": True} else: kwargs = {} if keep_sccoda_results: results, effects = add.model_on_one_datafile(dataset_path+file_name, model_name, **kwargs) results = add.get_scores(results) save = {"results": results, "effects": effects} else: results = add.model_on_one_datafile(dataset_path+file_name, model_name, **kwargs) results = add.get_scores(results) save = results with open(save_path + model_name + "_results_" + str(count) + ".pkl", "wb") as f: pkl.dump(save, f)
[ "johannes.ostner@online.de" ]
johannes.ostner@online.de
241b062d29b2a2e895a396fb385dd2ffb44bab96
3ff9821b1984417a83a75c7d186da9228e13ead9
/No_1410_HTML Entity Parser/by_re_replacement.py
c017682935944a4f3a73df684c4c097a91d80e6d
[ "MIT" ]
permissive
brianchiang-tw/leetcode
fd4df1917daef403c48cb5a3f5834579526ad0c2
6978acfb8cb767002cb953d02be68999845425f3
refs/heads/master
2023-06-11T00:44:01.423772
2023-06-01T03:52:00
2023-06-01T03:52:00
222,939,709
41
12
null
null
null
null
UTF-8
Python
false
false
3,076
py
''' Description: HTML entity parser is the parser that takes HTML code as input and replace all the entities of the special characters by the characters itself. The special characters and their entities for HTML are: Quotation Mark: the entity is &quot; and symbol character is ". Single Quote Mark: the entity is &apos; and symbol character is '. Ampersand: the entity is &amp; and symbol character is &. Greater Than Sign: the entity is &gt; and symbol character is >. Less Than Sign: the entity is &lt; and symbol character is <. Slash: the entity is &frasl; and symbol character is /. Given the input text string to the HTML parser, you have to implement the entity parser. Return the text after replacing the entities by the special characters. Example 1: Input: text = "&amp; is an HTML entity but &ambassador; is not." Output: "& is an HTML entity but &ambassador; is not." Explanation: The parser will replace the &amp; entity by & Example 2: Input: text = "and I quote: &quot;...&quot;" Output: "and I quote: \"...\"" Example 3: Input: text = "Stay home! Practice on Leetcode :)" Output: "Stay home! Practice on Leetcode :)" Example 4: Input: text = "x &gt; y &amp;&amp; x &lt; y is always false" Output: "x > y && x < y is always false" Example 5: Input: text = "leetcode.com&frasl;problemset&frasl;all" Output: "leetcode.com/problemset/all" Constraints: 1 <= text.length <= 10^5 The string may contain any possible characters out of all the 256 ASCII characters. ''' import re class Solution: def entityParser(self, text: str) -> str: html_symbol = [ '&quot;', '&apos;', '&gt;', '&lt;', '&frasl;', '&amp;'] formal_symbol = [ '"', "'", '>', '<', '/', '&'] for html_sym, formal_sym in zip(html_symbol, formal_symbol): text = re.sub( html_sym , formal_sym, text ) return text # n : the character length of input, text. ## Time Complexity: O( n ) # # The overhead in time is the cost of string replacement, which is of O( n ). ## Space Complexity: O( n ) # # The overhead in space is the storage for output string, which is of O( n ). from collections import namedtuple TestEntry = namedtuple('TestEntry', 'text') def test_bench(): test_data = [ TestEntry( text = "&amp; is an HTML entity but &ambassador; is not." ), TestEntry( text = "and I quote: &quot;...&quot;" ), TestEntry( text = "Stay home! Practice on Leetcode :)" ), TestEntry( text = "x &gt; y &amp;&amp; x &lt; y is always false" ), TestEntry( text = "leetcode.com&frasl;problemset&frasl;all" ), ] # expected output: ''' & is an HTML entity but &ambassador; is not. and I quote: "..." Stay home! Practice on Leetcode :) x > y && x < y is always false leetcode.com/problemset/all ''' for t in test_data: print( Solution().entityParser( text = t.text) ) return if __name__ == '__main__': test_bench()
[ "brianchiang1988@icloud.com" ]
brianchiang1988@icloud.com
ab0c75ec1d40b45a73b6375c1400520dda72e2cd
1b9c4798836f7c38782995422efcdbe7b48ed459
/fashion/urls.py
96ad507ae51d691eb26decda0c9b1a42c8cd09c4
[]
no_license
risa4an/fashion-blog
44ccff27dbe330665f1eeae460d636f6e8fe2d0d
aa46877246aa61b28dfdaea2e495c8e887a24f06
refs/heads/master
2022-12-22T10:38:48.718814
2020-09-16T19:56:55
2020-09-16T19:56:55
260,867,007
0
0
null
null
null
null
UTF-8
Python
false
false
1,138
py
"""fashion URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from django.conf.urls import include, url from fashion import settings from fashion.apps.accounts import views urlpatterns = [ path('articles/', include('articles.urls'), name = 'home'), path('admin/', admin.site.urls), path('', include('accounts.urls')), path('photographers/', include('photographers.urls')) ] static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "katya.risunova@gmail.com" ]
katya.risunova@gmail.com
499cc74e02d9c9125a27b10ac84169ce55f05be3
7998125d3b2d3d1427f0715e9d7283b3108625c1
/wcics/server/forms/forms/admin/topics/create.py
f6fdf0312231da71bbd9d63bced8582cbde4eebd
[ "MIT" ]
permissive
CS-Center/CS-Center
042b74e1c1b829a241260159ee40bf9ffa5a7027
3cd09f29d214406e6618fc67b9faf59a18f3f11b
refs/heads/master
2021-07-03T00:12:38.569492
2020-09-16T20:38:39
2020-09-16T20:38:39
209,430,258
0
0
null
null
null
null
UTF-8
Python
false
false
418
py
# -*- coding: utf-8 -*- from wcics.server.forms.validator_sets import * from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, TextAreaField class TopicSudoCreateForm(FlaskForm): tid = StringField("ID", admin_topic_create_tid) name = StringField("Name", admin_topic_create_name) description = TextAreaField("Description", admin_topic_create_description) submit = SubmitField("Create")
[ "keenan@cscenter.ca" ]
keenan@cscenter.ca
a0805ca69c9facac7d1aacfd3d9aa666d1741f76
94a27c14e31aeac3fe16980240e19634837679a8
/max-frequency.py
a9828419afb5ee165a3274267b910be6b95e359b
[]
no_license
EEmery/anomaly-detection
73762045eb317f0dc565f1199b28c61ce8fe1756
3fbb098ea483c85a7f0bec46ca33c3b3b417dbbf
refs/heads/master
2020-04-05T23:28:35.698159
2017-09-10T00:35:39
2017-09-10T00:35:39
83,551,646
0
0
null
null
null
null
UTF-8
Python
false
false
2,148
py
# Imports necessary libraries print "Importing packages\n" import pandas as pd import matplotlib.pyplot as plt from warnings import filterwarnings from numpy import nan # Ignores warnings filterwarnings("ignore") input_file_path = "Data/preprocessed_v2/not-normalized/" periods = ['WEEK', 'MONTH', 'QUARTER', 'SEMESTER'] periods_amounts = [53, 12, 4, 2] file_names = ['weekly', 'monthly', 'quarterly', 'semesterly'] # Iterates over all periods for period, period_amount, file_name in zip(periods, periods_amounts, file_names): print "Making " + file_name + " analysis" # Opens file (related to the period) periodic_analysis = pd.read_csv(input_file_path + file_name + '_analysis.csv') # Remover YEAR necessity by increasing period limits periodic_analysis[period] = periodic_analysis[period] + (periodic_analysis['YEAR'] - 2015) * period_amount # Slices data frame to get only necessary columns periodic_analysis = periodic_analysis[['ID', period, 'FREQUENCY', 'GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN']] # Reshapes data frame to desired shape periodic_analysis = periodic_analysis.set_index(['ID', period]) periodic_analysis = periodic_analysis.stack().unstack(1) # Gets the period of higher frequency max_frequencies = periodic_analysis.loc(axis=0)[:, 'FREQUENCY'].idxmax(axis=1).reset_index().rename(columns={0:'STAMP'}) # Creates a data frame for final results result_df = pd.DataFrame(columns = ['ID', 'GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN']) # Iterates over the ID's for i, ID, STAMP in zip(range(len(max_frequencies)), max_frequencies['ID'], max_frequencies['STAMP']): data = periodic_analysis.loc(axis=0)[ID].ix[1:, STAMP] # Gets the means from the higher frequency period row = [ID] for mean in ['GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN']: # Iterates over all fule type means try: row.append(data[mean]) # Appends to final result except KeyError: row.append(nan) result_df.loc[i] = row # Appends to result data frame # Show some data if period == 'MONTH': print len(result_df) periodic_analysis.loc['741NKH'].T.plot.bar() plt.show()
[ "emeryecs@gmail.com" ]
emeryecs@gmail.com
1db2de63ecb9a8bd60c598385a262b6e2b0785b9
a20f8ae0c129927318792ec4547d534dbe38871a
/model/pcnn_att_adam.py
053182fb9d612e4235924547884f5d4eb3d5a2cb
[ "MIT" ]
permissive
zhaohuiqiang/ONRE
dbc82abc31271c690252d3f2f5cf799c5f871f83
736fc5ff6f12be590d02cb66abb82c1616e1327c
refs/heads/master
2020-03-31T17:14:35.197909
2018-11-20T01:00:47
2018-11-20T01:00:47
152,414,099
0
0
null
null
null
null
UTF-8
Python
false
false
1,028
py
from framework import Framework import tensorflow as tf FLAGS = tf.app.flags.FLAGS def pcnn_att_adam(is_training): if is_training: framework = Framework(is_training=True) else: framework = Framework(is_training=False) word_embedding = framework.embedding.word_embedding() pos_embedding = framework.embedding.pos_embedding() embedding = framework.embedding.concat_embedding(word_embedding, pos_embedding) x = framework.encoder.pcnn(embedding, FLAGS.hidden_size, framework.mask, activation=tf.nn.relu) logit, repre = framework.selector.attention(x, framework.scope, framework.label_for_select) if is_training: loss = framework.classifier.softmax_cross_entropy(logit) output = output(logit) framework.init_train_model(loss, output, optimizer=tf.train.AdamOptimizer) framework.load_train_data() framework.train() else: framework.init_test_model(tf.nn.softmax(logit)) framework.load_test_data() framework.test()
[ "346091714@qq.com" ]
346091714@qq.com
7f083320d95a03e5d1511f9b21545afc2344cbca
3f0a0ee646326530c4cd6996276e8c819dfab65c
/battleship.py
5bcc8530ea64006f289500c7ec2e3a8ebc74e6fa
[]
no_license
kidisty/Python-1
bde878603283a6b3b966296ad2d4b0f89f70000f
203cd35b07633610b3773c0100c342386e636935
refs/heads/master
2022-11-12T07:11:15.072699
2020-07-06T09:59:33
2020-07-06T09:59:33
277,503,399
0
0
null
null
null
null
UTF-8
Python
false
false
1,215
py
import random board = [] for x in range(5): board.append(["O"] * 5) def print_board(board): for row in board: print " ".join(row) print "Let's play Battleship!" print_board(board) def random_row(board): return random.randint(0, len(board) - 1) def random_col(board): return random.randint(0, len(board[0]) - 1) ship_row = random_row(board) ship_col = random_col(board) # print ship_row # print ship_col turn = 0 # Everything from here on should go in your for loop! # Be sure to indent four spaces! for turn in range(4): guess_row = input("Guess Row:") guess_col = input("Guess Col:") if guess_row == ship_row and guess_col == ship_col: print "Congratulations! You sunk my battleship!" break else: if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4): print "Oops, that's not even in the ocean." elif(board[guess_row][guess_col] == "X"): print "You guessed that one already." else: print "You missed my battleship!" board[guess_row][guess_col] = "X" # Print (turn + 1) here! if turn > 4: print "Game Over" print turn + 1 print_board(board)
[ "kidistyohannes@kidists-MacBook-Pro.local" ]
kidistyohannes@kidists-MacBook-Pro.local
56b5c5e6c185d48ef5ff9fdb8aa7c49f34eb9f35
055b8c0176f9036061c9abd56e18db28eb69111f
/venv/bin/pip2.7
ff662bae4ff3be2065482ea43c0d8cb0caa9a617
[]
no_license
patientplatypus/pythonsimpleskeleton
0b56cc589db7ca91ecd7160b28f3556c3e3e153b
8bb70d549b8c367fe39ebe73685fd765032e13f7
refs/heads/master
2021-05-12T08:38:20.330403
2018-01-13T21:30:34
2018-01-13T21:30:34
117,291,647
0
0
null
null
null
null
UTF-8
Python
false
false
253
7
#!/Users/patientplatypus/Documents/python_play/venv/bin/python2.7 # -*- coding: utf-8 -*- import re import sys from pip import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "pweyand@gmail.com" ]
pweyand@gmail.com
0e0fbfff7cc2527f4cf77872685c1eba72a8441b
368f19de1a70535f3938da92d7278c0296a1b142
/fb/manage.py
fa01778819c491bc93750de929e949753879bf33
[]
no_license
viveksoundrapandi/chrome-aldown
82a5f3c679108a146f0c9fd76d8809b868a2933b
7a1915534ed90bc647e9775178a6f16a7be8e7a1
refs/heads/master
2022-11-05T02:12:32.272354
2022-10-07T11:29:36
2022-10-07T11:29:36
5,950,897
0
1
null
2022-10-07T11:29:37
2012-09-25T14:03:40
Python
UTF-8
Python
false
false
245
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fb.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[ "vivekhas3@gmail.com" ]
vivekhas3@gmail.com
f76d5f3aec244f5d33fcd7e2887d2eb61bb5658a
0b25b1c2ea3e3f05ea388e1105cd2fab50e7ba54
/mysite/blog/forms.py
68ba9afdb13950be95db2f366aa5aebf783e6d1c
[]
no_license
webclinic017/Django-project
f8337aeb296d12760143951635d0297c13313a50
e757aef633c63aaf857afd1f274d42d16703ca0c
refs/heads/master
2022-12-25T17:30:14.503627
2020-10-12T08:47:08
2020-10-12T08:47:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
397
py
from django import forms from .models import Comment class EmailPostForm(forms.Form): name = forms.CharField() email = forms.EmailField() to = forms.EmailField() comments = forms.CharField(required=False, widget=forms.Textarea) class CommentForm(forms.ModelForm): class Meta: model = Comment fields = ('name', 'email', 'body')
[ "moreshubham203@gmail.com" ]
moreshubham203@gmail.com
241ccb6c7c4ae0c34b892c9d317fbd849d3ea4ef
7fba01da6426480612d7cef9ceb2e15f3df6d01c
/PYTHON/pythonDesafios/venv/lib/python3.9/site-packages/santos/santos.py
b862f2661c3fd15467cd45185f3ff2200ba50eaa
[ "MIT" ]
permissive
Santos1000/Curso-Python
f320fec1e7ced4c133ade69acaa798d431e14113
549223a1633f6f619c87554dd8078cf7841bb1df
refs/heads/main
2023-05-26T12:01:23.868814
2021-05-26T13:22:58
2021-05-26T13:22:58
371,039,290
0
0
null
null
null
null
UTF-8
Python
false
false
9,153
py
__author__ = 'anderson' # -*- coding: utf-8 -*- from threading import Thread from datetime import datetime from exceptions import TaskException import logging log = logging.getLogger(__name__) class ControlJobs: __jobs = [] def stop(self, jobname): log.debug("Job name %s" % jobname) log.debug(self.__jobs) for idx, th in enumerate(self.__jobs): if jobname in th: th[jobname]._stop() del self.__jobs[idx] break def addjob(self, job): self.__jobs.append(job) log.debug(self.__jobs) stopjobs = ControlJobs() class TaskScheduling(Thread): """ Os parâmetros aceitos são: seconds, minutes, hour, time_of_the_day, day_of_the_week, day_of_the_month Descrição: O parâmetro seconds define que a função será executada repetidamente na frequência do valor passado em segundos ex: seconds="20", será executado de 20 em 20 segundos O parâmetro minutes define que a função será executada repetidamente na frequência do valor passado em minutos ex: minutes="20", será executado de 20 em 20 minutos O parâmetro hour define que a função será executada repetidamente na frequência do valor passado em horas ex: hour="2", será executado de 2 em 2 horas obs: Esses três parâmetros não podem ser combinados, nem entre e nem com os dois abaixo. O parâmetro time_of_the_day define que a função será executada todo dia em um horário específico, que deve ser passado no seguinte formato hh:mm:ss.(hh: 0..23 ; mm: 0..59, ss: 0..59) ex: time_of_the_day="14:15:00", será executada todo dia às quartoze horas e quinze minutos O parâmetro day_of_the_week define que a função será executada no dia da semana passado como valor. Os valores possíveis são: Su(Sunday/Domingo), M(Monday/Segunda), Tu(Tuesday/Terça), W(Wednesday/Quarta), Th(Thursday/Quinta), F(Friday/Sexta), Sa(Saturday/Sábado) em maiúsculo. Tem que ser combinado com o parâmetro time_of_the_day para especificar a hora, minuto e segundo daquele dia da semana. ex: day_of_the_week="W" time_of_the_day="22:00:00", Será executado toda quarta às vinte e dua horas. Exemplos de uso: Basta decorar a função ou método da classe que se queira agendar. @TaskScheduling(seconds="30") def do_something(a): print("Print do_something: %s" % a) import time time.sleep(6) print("terminou do_something") do_something() ***************************************** class Teste(object): @TaskScheduling(time_of_the_day="08:30:00") def some_function(self, a): print("Print some_function: %s" % a) import time print("Função some_function") time.sleep(10) print("terminou some_function") obj = Teste() obj.some_function("b") """ days = {"M": 0, "Tu": 1, "W": 2, "Th": 3, "F": 4, "Sa": 5, "Su": 6} #recebe os parametros do decorator def __init__(self, *arguments, **argumentsMap): Thread.__init__(self) self.args = arguments self.argumentsMap = argumentsMap self.threadname = argumentsMap["name"] self.execute = False log.debug("Arguments: %r:" % self.argumentsMap) #É o decorador de verdade, recebe a função decorada, como é uma classe preciso implementar o método call def __call__(self, function): self.function = function #recebe os argumentos da função decorada def task(*functionargs, **functionArgumentsMap): self.functionargs = functionargs self.functionArgumentsMap = functionArgumentsMap stopjobs.addjob({self.threadname: self}) self.start() return task def run(self): try: log.debug("JOB RUNNING") import time self.execute = True while self.execute: interval = self.calculateInterval() log.debug("Interval: %r in seconds" % interval) time.sleep(interval) self.function(*self.functionargs, **self.functionArgumentsMap) except TaskException as t: log.debug(t) def _stop(self): log.debug("STOP") self.execute = False return self.execute def calculateInterval(self): """ É responsável por determinar o tempo em segundos da próxima tarefa. Quando o parâmetro para determinar o tempo da pŕoxima tarefa for time_of_the_day é chamado o método auxCalculate para determinar tal tempo. :return: """ if "day_of_the_week" in self.argumentsMap: if "hour" in self.argumentsMap or "minutes" in self.argumentsMap or "seconds" in self.argumentsMap: raise TaskException("Parametros extras que não combinam") if "time_of_the_day" in self.argumentsMap: return self.calculateDayOfTheWeek(self.argumentsMap["day_of_the_week"], self.argumentsMap["time_of_the_day"]) else: raise TaskException("Parâmetro time_of_the_day não está presente") elif "time_of_the_day" in self.argumentsMap: if "hour" in self.argumentsMap or "minutes" in self.argumentsMap or "seconds" in self.argumentsMap: raise TaskException("Parametros extras que não combinam") return self.auxCalculate(self.argumentsMap["time_of_the_day"])[0] elif "hour" in self.argumentsMap: if "seconds" in self.argumentsMap or "minutes" in self.argumentsMap: raise TaskException("Parametros extras que não combinam") return int(self.argumentsMap["hour"]) * 3600 elif "minutes" in self.argumentsMap: if "seconds" in self.argumentsMap: raise TaskException("Parametros extras que não combinam") else: return int(self.argumentsMap["minutes"]) * 60 elif "seconds" in self.argumentsMap: log.debug("seconds") return int(self.argumentsMap["seconds"]) else: raise TaskException("Parâmetro(s): %r inválidos" % self.argumentsMap) def calculateDayOfTheWeek(self, day_of_the_week, time_of_the_day): entrada = day_of_the_week weekday = datetime.now().weekday() dif = self.days[entrada] - weekday sleep, diference = self.auxCalculate(time_of_the_day) if self.days[entrada] == weekday: if diference > 0: return sleep else: return sleep + (6 * (24*3600)) #24 horas para segundo elif self.days[entrada] > weekday: if diference > 0: return sleep + (dif * (24*3600)) else: #Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia. if dif == 1: return sleep else: return sleep + ((dif-1) * (24*3600)) #24 horas para segundo else: #numero de dias de diferença resp = 7 - abs(dif) if diference > 0: return sleep + (resp * (24*3600)) else: #Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia. if resp == 1: return sleep else: return sleep + ((resp-1) * (24*3600)) #24 horas para segundo def auxCalculate(self, time_of_the_day): """ Essa método retorno o tempo em segundos para que a tarefa seja sempre executada na hora escolhida. :param time_of_the_day: :return: sleep_time """ try: times = [3600, 60, 1] one_day_has = '24:00:00'.split(":") time_day = sum([a*b for a, b in zip(times, [int(i) for i in one_day_has])]) aux_time = time_of_the_day.split(":") time_want = sum([a*b for a, b in zip(times, [int(i) for i in aux_time])]) #Transforma o tempo atual para segundos hjf = datetime.now().strftime("%H:%M:%S").split(":") now = sum([a*b for a, b in zip(times, [int(i) for i in hjf])]) #diferença entre o tempo atual e o tempo desejado em segundos diference = time_want - now sleep_time = None if diference < 0: #só será executado no outro dia sleep_time = time_day - (diference * (-1)) else: #ainda será executado no mesmo dia sleep_time = diference except TaskException as t: log.debug(t) return sleep_time, diference
[ "83990871+Santos1000@users.noreply.github.com" ]
83990871+Santos1000@users.noreply.github.com
038676f976925f63daee208e21804295dadcdee2
0581b790cf9feda6638084fca19a03137baa3ce1
/Tron_niezainstalowany/Setup.py
e06af08aadc6917f538a6f807b07a7b5e67e0520
[]
no_license
Frax123/TRON-GAME-PYTHON
127eb4878093eb6419aaa3a7b051f5f54caf1343
2524efe0c9718d13f1cd8e74882fe316179d6d4e
refs/heads/master
2020-05-04T21:54:46.401725
2019-04-04T12:35:33
2019-04-04T12:35:33
179,493,063
0
0
null
null
null
null
UTF-8
Python
false
false
374
py
# -*- coding: utf-8 -*- import cx_Freeze executables = [cx_Freeze.Executable('Tron.py')] cx_Freeze.setup(name = 'Tron', options = {'build_exe':{'packages': ['pygame'], 'include_files' : ['Red_player.png', 'Blue_player.png', 'Icon.png', 'Wybuch.png']}}, description = 'Tron: First Chapter', executables = executables)
[ "noreply@github.com" ]
Frax123.noreply@github.com
dd169cb46861b1832b662aacb471b515ebccff0e
afeed161a0bd0e92cdcfcf3c580db8f78719bae8
/Milestone_One/bs4_scrape.py
9fd1671a4ae3045c68b4492451484a6c4a36dccb
[]
no_license
changjung1995/WQD7005_Data_Mining
07046b0c8088123c29c0a0eb65126570515b291d
3e214835ff527b6e3d2fefa442efae4c60ae527a
refs/heads/master
2023-05-27T15:10:29.562769
2020-06-20T06:46:01
2020-06-20T06:46:01
246,978,227
0
1
null
2023-05-22T22:45:37
2020-03-13T03:11:35
Jupyter Notebook
UTF-8
Python
false
false
2,317
py
# -*- coding: utf-8 -*- """ @author: Tan Chang Jung & Tan Sia Hong """ #%% import requests from bs4 import BeautifulSoup from datetime import date import time import pandas as pd #%% headers = {'User-Agent' : 'Chrome/74.0.3729.169'} # select the top 20 from the ranking of cryptocurrencies cryptocurrency = ['bitcoin','ethereum','xrp','bitcoin-cash','tether', 'bitcoin-sv','litecoin','eos','binance-coin','neo', 'chainlink','cardano','stellar','tron','unus-sed-leo', 'monero','huobi-token','ethereum-classic','crypto-com-coin', 'dash'] #%% # capture today date today = date.today().strftime("%Y%m%d") # format the base url link base_url = 'https://coinmarketcap.com/currencies/{}/historical-data/?start=20100101&end=' + today # header heading = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Market Capacity'] for cc in cryptocurrency: url = base_url.format(cc) response = requests.get(url, headers = headers) soup = BeautifulSoup(response.content, 'html.parser') #find html code for table table = soup.find_all('div', class_='cmc-table__table-wrapper-outer') table = table[2] data = [] for rows in table.find_all('tr'): row = {} for cols, head in zip(rows.find_all('td'), heading): row[head] = cols.text.replace('\n','').strip() data.append(row) time.sleep(5) df = pd.DataFrame(data) df = df.drop(df.index[0]) # remove empty row df["Date"] = pd.to_datetime(df["Date"]).dt.strftime('%Y-%m-%d') df['Open'] = df['Open'].str.replace(',','') df['Open'] = df['Open'].astype('float64').round(2) df['High'] = df['High'].str.replace(',','') df['High'] = df['High'].astype('float64').round(2) df['Low'] = df['Low'].str.replace(',','') df['Low'] = df['Low'].astype('float64').round(2) df['Close'] = df['Close'].str.replace(',','') df['Close'] = df['Close'].astype('float64').round(2) df['Volume'] = df['Volume'].str.replace(',','') df['Volume'] = df['Volume'].astype('float64').round(2) df['Market Capacity'] = df['Market Capacity'].str.replace(',','') df['Market Capacity'] = df['Market Capacity'].astype('float64').round(2) # save to csv df.to_csv(cc + '.csv', index = False)
[ "wqd190008@siswa.um.edu.my" ]
wqd190008@siswa.um.edu.my
c19d323dd79ed8200a36279528eb8fd77c5d502e
b3122fec6858e1b4474889bc0b58cbdbec40ac34
/DZ5/DZ5_5.py
bb40fae277a8a97cfcacfb80f5a0604210a86293
[]
no_license
Mehalich/git-geekbrains
edb05ceb7643e02d1334d118d6e304720a266066
e6833de99b5c2c37a4d16b7856fa85ba1f17117a
refs/heads/main
2023-04-30T00:54:10.232243
2021-05-16T16:51:52
2021-05-16T16:51:52
359,473,465
1
0
null
2021-05-16T16:54:21
2021-04-19T13:40:34
Python
UTF-8
Python
false
false
536
py
""" 5. Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами. Программа должна подсчитывать сумму чисел в файле и выводить ее на экран. """ f = open("DZ5_5.txt", "w+") f.write("1 2 3 4 5 6 7 8 9 0") f.seek(0) line = f.readlines() result = 0 line = str(line[0]) line = line.split() for step in line: result += int(step) print(result) f.close()
[ "yakovlev_mv@mail.ru" ]
yakovlev_mv@mail.ru
90fcefb5891a9d1a1d6662ad1e564dc05696dc70
3eadfce7b2238f7d25256976e939b4d0e6c49ebb
/switchhello_pd.py
d744975e787c98bdb695c2c9c35d6bbe9cb98a3e
[ "BSD-2-Clause" ]
permissive
kyab/kazu
591d3fc40a9845c9e8d6a22ae1fb1aef8a4441ba
9ae0c3cec8ba09770b039825d99cdc53a627b6fd
refs/heads/master
2021-01-19T04:02:07.279051
2016-06-24T23:40:40
2016-06-24T23:40:40
61,918,052
0
0
null
null
null
null
UTF-8
Python
false
false
704
py
import RPi.GPIO as G import time import os import signal import sys print "switchhello_pd started." COUNT = 5 PIN_LED = 17 PIN_SWITCH = 27 def signal_handler(signal, frame): G.cleanup() print "GPIO cleanup done." sys.exit(0) def wait_and_shout(): G.wait_for_edge(PIN_SWITCH, G.RISING) print "SWITCH PUSHED" if G.input(PIN_SWITCH): print "HIGH" else: print "LOW" G.output(PIN_LED,True) os.system("aplay -q -D hw:0 ./one.wav &") time.sleep(0.1) G.output(PIN_LED,False) G.setmode(G.BCM) G.setup(PIN_LED, G.OUT) G.setup(PIN_SWITCH, G.IN, pull_up_down = G.PUD_DOWN) signal.signal(signal.SIGINT, signal_handler) while True: try: wait_and_shout() except: pass G.cleanup()
[ "kyossi212@gmail.com" ]
kyossi212@gmail.com
bd0794483225cac132025003cb18438963984fcf
178998aecae2aa9d52e43b702abd52fd6ba58b2b
/0517/defT.py
6a4a7b45c745ddf7f69c664c45c1027a1ac5f75a
[]
no_license
www111111/git-new
34d664cad5084d9d016f25131eb0d5a8f719d7d1
3fd96e79b3c3a67afbc6227db1c69294f75848c0
refs/heads/master
2020-03-17T10:30:24.560482
2018-05-28T12:24:20
2018-05-28T12:24:20
133,514,309
0
0
null
null
null
null
UTF-8
Python
false
false
182
py
def sum(): a=int(input('a')) b=input('+') c=int(input('c')) e=input('+') d=int(input('d')) if(b=='+' and e=='+'): sum=a+c+d return(sum)
[ "1337872746@qq.com" ]
1337872746@qq.com
cc1984673c8ca18f83d4bb875b6a0454c72c78f0
d7bd5d5fd6114ceec28b190434958c5d3e8d0b8a
/install_nltk.py
046937c12c7ca4cddcc380ea6f28482a1457f08d
[]
no_license
jay-cleveland/reddit_data_acquisition
1c2529c6063a47c163aa73449004df433f7811b0
c1e78b3328c87c7bff034cc63723076a7da89354
refs/heads/master
2022-10-16T08:52:12.591232
2022-09-30T02:08:33
2022-09-30T02:08:33
105,435,874
0
0
null
null
null
null
UTF-8
Python
false
false
89
py
import nltk def main(): nltk.download('punkt') nltk.download('all-corpora') main()
[ "clevelanjk18@uww.edu" ]
clevelanjk18@uww.edu
1e84c539079a73cab67e9517c9c96f370c7348f8
4b8cde0ef35b67618eea421c20a7cf0c6882b75b
/motor-surprise-rage.py
1b1a3ca04300174a8af7d846534b8936bad235e5
[]
no_license
MeRuslan/thesis_work
57aa2006711e33db33d47b576a0cce047045fa66
935b15c611c65f77eae26c5d768ad3f363873832
refs/heads/master
2021-01-21T21:06:58.803983
2017-06-19T13:35:12
2017-06-19T13:35:12
94,780,166
0
0
null
null
null
null
UTF-8
Python
false
false
26,085
py
from func import * # ATTTENTION! Maybe there are some mistakes in neuron parameters! logger = logging.getLogger('neuromodulation') startbuild = datetime.datetime.now() nest.ResetKernel() nest.SetKernelStatus({'overwrite_files': True, 'local_num_threads': 8, 'resolution': 0.1}) generate_neurons(1000) # Init parameters of our synapse models DOPA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0] DOPA_synparams_in['vt'] = nest.Create('volume_transmitter')[0] SERO_synparams_in['vt'] = nest.Create('volume_transmitter')[0] SERO_synparams_ex['vt'] = nest.Create('volume_transmitter')[0] NORA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0] nest.CopyModel('static_synapse', gen_static_syn, static_syn) nest.CopyModel('stdp_synapse', glu_synapse, STDP_synparams_Glu) nest.CopyModel('stdp_synapse', gaba_synapse, STDP_synparams_GABA) nest.CopyModel('stdp_synapse', ach_synapse, STDP_synparams_ACh) nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_ex, DOPA_synparams_ex) nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_in, DOPA_synparams_in) nest.CopyModel('stdp_serotonin_synapse', sero_synapse_ex, SERO_synparams_ex) nest.CopyModel('stdp_serotonin_synapse', sero_synapse_in, SERO_synparams_in) nest.CopyModel('stdp_noradrenaline_synapse', nora_synapse_ex, NORA_synparams_ex) ## - my .50 logger.debug("* * * Start connection initialisation") #################################################################### # * * * ventral pathway * * * connect(ldt[ldt_Ach], thalamus[thalamus_Glu], syn_type=ACh, weight_coef=0.005) connect(ldt[ldt_Ach], bnst[bnst_Ach], syn_type=ACh, weight_coef=0.005) connect(ldt[ldt_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005) connect(ldt[ldt_Ach], prefrontal[pfc_Glu0], syn_type=ACh, weight_coef=0.005) connect(thalamus[thalamus_Glu], motor[motor_Glu0], syn_type=Glu, weight_coef=0.005) connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu, weight_coef=0.005) connect(thalamus[thalamus_Glu], motor[motor_5HT], syn_type=Glu, weight_coef=0.005) connect(motor[motor_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005) connect(motor[motor_Glu1], lc[lc_N0], syn_type=Glu, weight_coef=0.005) connect(prefrontal[pfc_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005) connect(prefrontal[pfc_Glu1], bnst[bnst_Glu], syn_type=Glu, weight_coef=0.005) connect(bnst[bnst_Glu], bnst[bnst_GABA], syn_type=Glu, weight_coef=0.005) connect(bnst[bnst_Ach], amygdala[amygdala_Ach], syn_type=ACh, weight_coef=0.005) connect(bnst[bnst_GABA], hypothalamus[hypothalamus_pvn_GABA], syn_type=GABA, weight_coef=0.005) connect(amygdala[amygdala_Ach], lc[lc_Ach], syn_type=ACh, weight_coef=0.005) connect(amygdala[amygdala_GABA], bnst[bnst_GABA], syn_type=GABA, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu, weight_coef=0.005) connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu, weight_coef=0.005) connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu0], syn_type=GABA, weight_coef=0.005) connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu1], syn_type=GABA, weight_coef=0.005) connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_5HT], syn_type=GABA, weight_coef=0.005) # inside LC connect(lc[lc_Ach], lc[lc_GABA], syn_type=ACh, weight_coef=0.005) connect(lc[lc_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005) connect(lc[lc_Ach], lc[lc_N1], syn_type=ACh, weight_coef=0.005) connect(lc[lc_D1], lc[lc_N0], syn_type=DA_ex, weight_coef=0.005) connect(lc[lc_D2], lc[lc_N1], syn_type=DA_in, weight_coef=0.005) connect(lc[lc_GABA], lc[lc_N0], syn_type=GABA, weight_coef=0.005) # * * * dorsal pathway * * * connect(pgi[pgi_Glu], lc[lc_N0], syn_type=Glu, weight_coef=0.005) connect(pgi[pgi_Glu], lc[lc_N1], syn_type=Glu, weight_coef=0.005) connect(pgi[pgi_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005) connect(prh[prh_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005) connect(striatum[striatum_tan], lc[lc_GABA], syn_type=GABA, weight_coef=0.005) connect(vta[vta_DA0], lc[lc_D1], syn_type=DA_ex, weight_coef=0.005) connect(vta[vta_DA0], lc[lc_D2], syn_type=DA_in, weight_coef=0.005) connect(vta[vta_DA1], striatum[striatum_tan], syn_type=DA_ex, weight_coef=0.005) connect(vta[vta_DA1], striatum[striatum_GABA], syn_type=DA_ex, weight_coef=0.005) wse = 0.001 wsi = 0.5 # # * * * NIGROSTRIATAL PATHWAY* * * connect(motor[motor_Glu0], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005) connect(motor[motor_Glu0], snc[snc_DA], syn_type=Glu, weight_coef=0.005) connect(motor[motor_Glu0], striatum[striatum_D2], syn_type=Glu, weight_coef=0.05) connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu, weight_coef=0.003) # 0.0008 connect(motor[motor_Glu0], prefrontal[pfc_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram connect(motor[motor_Glu0], motor[motor_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram connect(motor[motor_Glu0], stn[stn_Glu], syn_type=Glu, weight_coef=7) connect(motor[motor_Glu1], striatum[striatum_D1], syn_type=Glu) connect(motor[motor_Glu1], striatum[striatum_D2], syn_type=Glu) connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu) connect(motor[motor_Glu1], stn[stn_Glu], syn_type=Glu) connect(motor[motor_Glu1], nac[nac_GABA0], syn_type=GABA) connect(striatum[striatum_tan], striatum[striatum_D1], syn_type=GABA) connect(striatum[striatum_tan], striatum[striatum_D2], syn_type=Glu) connect(striatum[striatum_D1], snr[snr_GABA], syn_type=GABA, weight_coef=0.001) connect(striatum[striatum_D1], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.001) connect(striatum[striatum_D1], gpe[gpe_GABA], syn_type=GABA, weight_coef=0.005) connect(striatum[striatum_D2], gpe[gpe_GABA], syn_type=GABA, weight_coef=1) connect(gpe[gpe_GABA], stn[stn_Glu], syn_type=GABA, weight_coef=0.0001) connect(gpe[gpe_GABA], striatum[striatum_D1], syn_type=GABA, weight_coef=0.001) connect(gpe[gpe_GABA], striatum[striatum_D2], syn_type=GABA, weight_coef=0.3) connect(gpe[gpe_GABA], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.0001) connect(gpe[gpe_GABA], snr[snr_GABA], syn_type=GABA, weight_coef=0.0001) connect(stn[stn_Glu], snr[snr_GABA], syn_type=Glu, weight_coef=0.2) connect(stn[stn_Glu], gpi[gpi_GABA], syn_type=Glu, weight_coef=0.2) connect(stn[stn_Glu], gpe[gpe_GABA], syn_type=Glu, weight_coef=0.3) connect(stn[stn_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.01) connect(gpi[gpi_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=1) # weight_coef=3) connect(snr[snr_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=1) # weight_coef=3) connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu) connect(thalamus[thalamus_Glu], stn[stn_Glu], syn_type=Glu, weight_coef=1) # 005 connect(thalamus[thalamus_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.001) connect(thalamus[thalamus_Glu], nac[nac_GABA0], syn_type=Glu) connect(thalamus[thalamus_Glu], nac[nac_GABA1], syn_type=Glu) connect(thalamus[thalamus_Glu], nac[nac_Ach], syn_type=Glu) connect(thalamus[thalamus_Glu], nac[nac_DA], syn_type=Glu) connect(thalamus[thalamus_Glu], nac[nac_5HT], syn_type=Glu) connect(thalamus[thalamus_Glu], nac[nac_NA], syn_type=Glu) # * * * INTEGRATED PATHWAY * * * connect(prefrontal[pfc_Glu0], vta[vta_DA0], syn_type=Glu) connect(prefrontal[pfc_Glu0], nac[nac_GABA1], syn_type=Glu) connect(prefrontal[pfc_Glu1], vta[vta_GABA2], syn_type=Glu) connect(prefrontal[pfc_Glu1], nac[nac_GABA1], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu) connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu) connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.3) connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.3) connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.3) connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.3) connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.3) connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.3) # * * * MESOCORTICOLIMBIC PATHWAY * * * connect(nac[nac_Ach], nac[nac_GABA1], syn_type=ACh) connect(nac[nac_GABA0], nac[nac_GABA1], syn_type=GABA, ) connect(nac[nac_GABA1], vta[vta_GABA2], syn_type=GABA, ) connect(vta[vta_GABA0], prefrontal[pfc_Glu0], syn_type=GABA, ) connect(vta[vta_GABA0], pptg[pptg_GABA], syn_type=GABA, ) connect(vta[vta_GABA1], vta[vta_DA0], syn_type=GABA, ) connect(vta[vta_GABA1], vta[vta_DA1], syn_type=GABA, ) connect(vta[vta_GABA2], nac[nac_GABA1], syn_type=GABA, ) connect(pptg[pptg_GABA], vta[vta_GABA0], syn_type=GABA, ) connect(pptg[pptg_GABA], snc[snc_GABA], syn_type=GABA, weight_coef=0.005) connect(pptg[pptg_ACh], vta[vta_GABA0], syn_type=ACh) connect(pptg[pptg_ACh], vta[vta_DA1], syn_type=ACh) connect(pptg[pptg_Glu], vta[vta_GABA0], syn_type=Glu) connect(pptg[pptg_Glu], vta[vta_DA1], syn_type=Glu) connect(pptg[pptg_ACh], striatum[striatum_D1], syn_type=ACh, weight_coef=0.3) connect(pptg[pptg_ACh], snc[snc_GABA], syn_type=ACh, weight_coef=0.005) connect(pptg[pptg_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.005) if noradrenaline_flag: logger.debug("* * * Making neuromodulating connections...") # vt_ex = nest.Create('volume_transmitter') # vt_in = nest.Create('volume_transmitter') # NORA_synparams_ex['vt'] = vt_ex[0] # NORA_synparams_in['vt'] = vt_in[0] connect(nts[nts_a1], lc[lc_N0], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a1], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], lc[lc_N1], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], amygdala[amygdala_Glu], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], amygdala[amygdala_Ach], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], amygdala[amygdala_GABA], syn_type=NA_ex, weight_coef=0.005) connect(nts[nts_a2], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], motor[motor_Glu0], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], motor[motor_Glu1], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], prefrontal[pfc_Glu1], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], vta[vta_a1], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], ldt[ldt_a1], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N0], ldt[ldt_a2], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N1], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N1], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N1], rn[rn_a1], syn_type=NA_ex, weight_coef=0.005) connect(lc[lc_N1], rn[rn_a2], syn_type=NA_ex, weight_coef=0.005) connect(rn[rn_a1], rn[rn_dr], syn_type=NA_ex, weight_coef=0.005) connect(rn[rn_a2], rn[rn_mnr], syn_type=NA_ex, weight_coef=0.005) connect(rn[rn_a2], rn[rn_rpa], syn_type=NA_ex, weight_coef=0.005) connect(rn[rn_a2], rn[rn_rmg], syn_type=NA_ex, weight_coef=0.005) # connect(vta[vta_a1], vta[vta_DA1], syn_type=NA_in, weight_coef=0.005) if serotonin_flag: # * * * AFFERENT PROJECTIONS * * connect(vta[vta_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse) connect(septum[septum_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse) connect(septum[septum_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse) connect(prefrontal[pfc_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse) connect(prefrontal[pfc_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse) connect(hypothalamus[hypothalamus_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse) connect(hypothalamus[hypothalamus_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse) connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse) connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse) connect(bnst[bnst_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse) connect(amygdala[amygdala_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse) connect(amygdala[amygdala_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse) connect(hippocampus[hippocampus_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse) # * * * EFFERENT PROJECTIONS * * * connect(rn[rn_dr], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], striatum[striatum_D2], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], striatum[striatum_GABA], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], striatum[striatum_Ach], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], nac[nac_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], nac[nac_GABA0], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], nac[nac_GABA1], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], nac[nac_Ach], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], nac[nac_DA], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], snr[snr_GABA], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], septum[septum_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # ? tune weights connect(rn[rn_dr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) # ? tune weights connect(rn[rn_dr], lateral_cortex[lateral_cortex_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], entorhinal_cortex[entorhinal_cortex_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], prefrontal[pfc_DA], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], prefrontal[pfc_NA], syn_type=SERO_in, weight_coef=wsi) # !!! connect(rn[rn_dr], lateral_tegmental_area[lateral_tegmental_area_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], lc[lc_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], lc[lc_N0], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], bnst[bnst_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], bnst[bnst_Glu], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], bnst[bnst_GABA], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], bnst[bnst_Ach], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], amygdala[amygdala_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], amygdala[amygdala_Glu], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], amygdala[amygdala_GABA], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_dr], amygdala[amygdala_Ach], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], vta[vta_5HT], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005 connect(rn[rn_mnr], vta[vta_a1], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005 connect(rn[rn_mnr], vta[vta_DA1], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005 connect(rn[rn_mnr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # ? connect(rn[rn_mnr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) # ? tune weights 0.005 connect(rn[rn_mnr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], motor[motor_Glu0], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], motor[motor_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], insular_cortex[insular_cortex_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], medial_cortex[medial_cortex_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], hypothalamus[hypothalamus_5HT], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], hypothalamus[hypothalamus_pvn_GABA], syn_type=SERO_in, weight_coef=wsi) connect(rn[rn_mnr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi) # * * * THALAMOCORTICAL PATHWAY * * * connect(thalamus[thalamus_5HT], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wse) connect(thalamus[thalamus_5HT], motor[motor_5HT], syn_type=SERO_ex, weight_coef=wse) connect(thalamus[thalamus_5HT], motor[motor_Glu0], syn_type=SERO_ex, weight_coef=wse) connect(prefrontal[pfc_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005 connect(motor[motor_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005 if dopamine_flag: logger.debug("* * * Making neuromodulating connections...") # NIGROSTRIATAL connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_ex) connect(snc[snc_DA], gpe[gpe_GABA], syn_type=DA_ex) connect(snc[snc_DA], stn[stn_Glu], syn_type=DA_ex) connect(snc[snc_DA], nac[nac_GABA0], syn_type=DA_ex) connect(snc[snc_DA], nac[nac_GABA1], syn_type=DA_ex) connect(snc[snc_DA], striatum[striatum_D2], syn_type=DA_in) connect(snc[snc_DA], striatum[striatum_tan], syn_type=DA_in) # MESOCORTICOLIMBIC connect(vta[vta_DA0], striatum[striatum_D1], syn_type=DA_ex) connect(vta[vta_DA0], striatum[striatum_D2], syn_type=DA_in) connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=DA_ex) connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=DA_ex) connect(vta[vta_DA1], nac[nac_GABA0], syn_type=DA_ex) connect(vta[vta_DA1], nac[nac_GABA1], syn_type=DA_ex) if dopamine_flag and serotonin_flag and noradrenaline_flag: # * * * DOPAMINE INTERACTION * * * connect(prefrontal[pfc_5HT], prefrontal[pfc_DA], syn_type=SERO_ex, weight_coef=wse) connect(prefrontal[pfc_DA], vta[vta_5HT], syn_type=DA_in, weight_coef=0.005) connect(prefrontal[pfc_DA], vta[vta_DA1], syn_type=DA_in, weight_coef=0.005) # connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_in, weight_coef=0.005) connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_ex, weight_coef=wse) connect(vta[vta_DA1], prefrontal[pfc_5HT], syn_type=DA_ex, weight_coef=0.005) connect(vta[vta_DA1], prefrontal[pfc_DA], syn_type=DA_ex, weight_coef=0.005) # connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DOPA_in, weight_coef=0.005) connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=0.005) # connect(vta[vta_DA1], striatum[striatum_DA], syn_type=DOPA_in, weight_coef=0.005) connect(vta[vta_DA1], striatum[striatum_D1], syn_type=DA_ex, weight_coef=0.005) # connect(vta[vta_DA1], nac[nac_5HT], syn_type=DOPA_in, weight_coef=0.005) connect(vta[vta_DA1], nac[nac_5HT], syn_type=DA_ex, weight_coef=0.005) # connect(vta[vta_DA1], nac[nac_DA], syn_type=DOPA_in, weight_coef=0.005) connect(vta[vta_DA1], nac[nac_DA], syn_type=DA_ex, weight_coef=0.005) # connect(striatum[striatum_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=0.005) connect(striatum[striatum_5HT], striatum[striatum_D1], syn_type=SERO_ex, weight_coef=wse) # ??????????????????????????????????? D1, D2? # connect(striatum[striatum_DA], snr[snr_GABA], syn_type=DOPA_in, weight_coef=0.005) connect(striatum[striatum_D1], snr[snr_GABA], syn_type=DA_ex, weight_coef=0.005) # connect(striatum[striatum_DA], snc[snc_DA], syn_type=DOPA_in, weight_coef=0.005) connect(striatum[striatum_D1], snc[snc_GABA], syn_type=DA_ex, weight_coef=0.005) connect(striatum[striatum_D1], snc[snc_DA], syn_type=DA_ex, weight_coef=0.005) connect(nac[nac_5HT], nac[nac_DA], syn_type=SERO_ex, weight_coef=wse) connect(snr[snr_GABA], snc[snc_DA], syn_type=SERO_in, weight_coef=wsi) connect(snc[snc_GABA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005) # ? connect(snc[snc_DA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005) connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_in, weight_coef=0.005) connect(snc[snc_DA], nac[nac_5HT], syn_type=DA_in, weight_coef=0.005) connect(snc[snc_DA], nac[nac_DA], syn_type=DA_in, weight_coef=0.005) connect(lc[lc_5HT], lc[lc_D1], syn_type=SERO_ex, weight_coef=0.005) connect(lc[lc_D1], rn[rn_dr], syn_type=DA_ex, weight_coef=0.005) # * * * NORADRENALINE INTERACTION * * * connect(lc[lc_5HT], lc[lc_N0], syn_type=SERO_in, weight_coef=0.005) connect(lc[lc_5HT], lc[lc_N1], syn_type=SERO_in, weight_coef=0.005) # * * * EFFERENT * * * # * * * CORTICOSPINAL TRACT * * * # connect(motor[motor_Glu1], medulla[medulla_GABA], syn_type=GABA, weight_coef=0.01) connectIn(motor[motor_Glu1], spine[spine_Glu1], syn_type=Glu) connectIn(spine[spine_Glu1], nmj[nmj_Glu], syn_type=Glu) # # * * * CORTICOBULBAR TRACT * * * connect(motor[motor_Glu0], medulla[medulla_GABA], syn_type=Glu) # # * * * RETICULOSPINAL TRACT * * * connect(pons[pons_Glu], spine[spine_GABA], syn_type=Glu) connect(medulla[medulla_GABA], spine[spine_GABA], syn_type=GABA) # * * * AFFERENT * * * # * * * SPINOTHALAMIC TRACT * * * connect(cellBodies[cellBodies_Glu], spine[spine_Glu2], syn_type=Glu) connect(spine[spine_Glu2], thalamus[thalamus_Glu], syn_type=Glu) logger.debug("* * * Attaching spike generators...") # #################################surprise connect_generator(nts[nts_a1], 0., 250., rate=250, coef_part=1) connect_generator(nts[nts_a2], 0., 250., rate=250, coef_part=1) connect_generator(prh[prh_GABA], 0., 250., rate=250, coef_part=1) connect_generator(pgi[pgi_GABA], 0., 250., rate=250, coef_part=1) connect_generator(pgi[pgi_Glu], 0., 250., rate=250, coef_part=1) connect_generator(ldt[ldt_a1], 0., 250., rate=250, coef_part=1) connect_generator(ldt[ldt_a2], 0., 250., rate=250, coef_part=1) connect_generator(ldt[ldt_Ach], 0., 250., rate=250, coef_part=1) connect_generator(lc[lc_N0], 0., 250., rate=250, coef_part=1) connect_generator(lc[lc_N1], 0., 250., rate=250, coef_part=1) connect_generator(prefrontal[pfc_5HT], 0., 250., rate=250, coef_part=1) connect_generator(motor[motor_5HT], 0., 250., rate=250, coef_part=1) connect_generator(rn[rn_dr], 0., 250., rate=250, coef_part=1) connect_generator(rn[rn_mnr], 0., 250., rate=250, coef_part=1) connect_generator(cellBodies[cellBodies_Glu], 200., 500., rate=250, coef_part=1) # # ############################anger/rage # connect_generator(nts[nts_a1], 400., 600., rate=250, coef_part=1) # connect_generator(nts[nts_a2], 400., 600., rate=250, coef_part=1) # connect_generator(prh[prh_GABA], 400., 600., rate=250, coef_part=1) # connect_generator(pgi[pgi_GABA], 400., 600., rate=250, coef_part=1) # connect_generator(pgi[pgi_Glu], 400., 600., rate=250, coef_part=1) # connect_generator(ldt[ldt_a1], 400., 600., rate=250, coef_part=1) # connect_generator(ldt[ldt_a2], 400., 600., rate=250, coef_part=1) # connect_generator(ldt[ldt_Ach], 400., 600., rate=250, coef_part=1) # connect_generator(lc[lc_N0], 400., 600., rate=250, coef_part=1) # # connect_generator(lc[lc_N1], 400., 600., rate=250, coef_part=1) # # connect_generator(motor[motor_Glu0], 400., 600., rate=250, coef_part=1) # connect_generator(pptg[pptg_GABA], 400., 600., rate=250, coef_part=1) # connect_generator(pptg[pptg_Glu], 400., 600., rate=250, coef_part=1) # connect_generator(pptg[pptg_ACh], 400., 600., rate=250, coef_part=1) # connect_generator(amygdala[amygdala_Glu], 400., 600., rate=250, coef_part=1) # connect_generator(snc[snc_DA], 400., 600., rate=250, coef_part=1) # connect_generator(vta[vta_DA0], 400., 600., rate=250, coef_part=1) ##connect_generator(pons[pons_5HT], 400., 600., rate=250, coef_part=1) ##connect_generator(periaqueductal_gray[periaqueductal_gray_5HT], 400., 600., rate=250, coef_part=1) ##connect_generator(reticular_formation[reticular_formation_5HT], 400., 600., rate=250, coef_part=1) logger.debug("* * * Attaching spikes detector") for part in getAllParts(): connect_detector(part) logger.debug("* * * Attaching multimeters") for part in getAllParts(): connect_multimeter(part) del generate_neurons, connect, connect_generator, connect_detector, connect_multimeter endbuild = datetime.datetime.now() simulate() get_log(startbuild, endbuild) save(GUI=status_gui)
[ "guyfulla@gmail.com" ]
guyfulla@gmail.com
91c2e382f455de622a8bfb58b1df4f5bbe6b01ff
e13a79dec2668c1870b3fea05f071fe872d400f0
/pde/storage/tests/test_generic_storages.py
474649dd328980f34d7df91ecac637408b9e3bd6
[ "MIT" ]
permissive
yiweizhang1025/py-pde
b27cc0b058b50d6af921e1ea84bf59a5bb0ff370
3862a35505b9ce4d62557bc65dfedd40638a90f3
refs/heads/master
2023-03-14T17:21:07.004742
2021-03-15T15:33:47
2021-03-15T15:33:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,739
py
""" .. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de> """ import functools import numpy as np import pytest from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField from pde.tools.misc import module_available def test_storage_write(tmp_path): """ test simple memory storage """ dim = 5 grid = UnitGrid([dim]) field = ScalarField(grid) storage_classes = {"MemoryStorage": MemoryStorage} if module_available("h5py"): file_path = tmp_path / "test_storage_write.hdf5" storage_classes["FileStorage"] = functools.partial(FileStorage, file_path) for name, storage_cls in storage_classes.items(): storage = storage_cls(info={"a": 1}) storage.start_writing(field, info={"b": 2}) storage.append(field.copy(data=np.arange(dim)), 0) storage.append(field.copy(data=np.arange(dim)), 1) storage.end_writing() assert not storage.has_collection np.testing.assert_allclose(storage.times, np.arange(2)) for f in storage: np.testing.assert_array_equal(f.data, np.arange(dim)) for i in range(2): np.testing.assert_array_equal(storage[i].data, np.arange(dim)) assert {"a": 1, "b": 2}.items() <= storage.info.items() storage = storage_cls() storage.clear() for i in range(3): storage.start_writing(field) storage.append(field.copy(data=np.arange(dim) + i), i) storage.end_writing() np.testing.assert_allclose( storage.times, np.arange(3), err_msg="storage class: " + name ) def test_storage_truncation(tmp_path): """ test whether simple trackers can be used """ file = tmp_path / "test_storage_truncation.hdf5" for truncate in [True, False]: storages = [MemoryStorage()] if module_available("h5py"): storages.append(FileStorage(file)) tracker_list = [s.tracker(interval=0.01) for s in storages] grid = UnitGrid([8, 8]) state = ScalarField.random_uniform(grid, 0.2, 0.3) pde = DiffusionPDE() pde.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list) if truncate: for storage in storages: storage.clear() pde.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list) times = np.arange(0.1, 0.201, 0.01) if not truncate: times = np.r_[np.arange(0, 0.101, 0.01), times] for storage in storages: msg = f"truncate={truncate}, storage={storage}" np.testing.assert_allclose(storage.times, times, err_msg=msg) assert not storage.has_collection def test_storing_extract_range(tmp_path): """ test methods specific to FieldCollections in memory storage """ sf = ScalarField(UnitGrid([1])) storage_classes = {"MemoryStorage": MemoryStorage} if module_available("h5py"): file_path = tmp_path / "test_storage_write.hdf5" storage_classes["FileStorage"] = functools.partial(FileStorage, file_path) for storage_cls in storage_classes.values(): # store some data s1 = storage_cls() s1.start_writing(sf) s1.append(sf.copy(data=np.array([0])), 0) s1.append(sf.copy(data=np.array([2])), 1) s1.end_writing() np.testing.assert_equal(s1[0].data, 0) np.testing.assert_equal(s1[1].data, 2) np.testing.assert_equal(s1[-1].data, 2) np.testing.assert_equal(s1[-2].data, 0) with pytest.raises(IndexError): s1[2] with pytest.raises(IndexError): s1[-3] # test extraction s2 = s1.extract_time_range() assert s2.times == list(s1.times) np.testing.assert_allclose(s2.data, s1.data) s3 = s1.extract_time_range(0.5) assert s3.times == s1.times[:1] np.testing.assert_allclose(s3.data, s1.data[:1]) s4 = s1.extract_time_range((0.5, 1.5)) assert s4.times == s1.times[1:] np.testing.assert_allclose(s4.data, s1.data[1:]) def test_storing_collection(tmp_path): """ test methods specific to FieldCollections in memory storage """ grid = UnitGrid([2, 2]) f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a") f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b") f3 = Tensor2Field.random_uniform(grid, 0.1, 0.4, label="c") fc = FieldCollection([f1, f2, f3]) storage_classes = {"MemoryStorage": MemoryStorage} if module_available("h5py"): file_path = tmp_path / "test_storage_write.hdf5" storage_classes["FileStorage"] = functools.partial(FileStorage, file_path) for storage_cls in storage_classes.values(): # store some data storage = storage_cls() storage.start_writing(fc) storage.append(fc, 0) storage.append(fc, 1) storage.end_writing() assert storage.has_collection assert storage.extract_field(0)[0] == f1 assert storage.extract_field(1)[0] == f2 assert storage.extract_field(2)[0] == f3 assert storage.extract_field(0)[0].label == "a" assert storage.extract_field(0, label="new label")[0].label == "new label" assert storage.extract_field(0)[0].label == "a" # do not alter label assert storage.extract_field("a")[0] == f1 assert storage.extract_field("b")[0] == f2 assert storage.extract_field("c")[0] == f3 with pytest.raises(ValueError): storage.extract_field("nonsense") def test_storage_apply(tmp_path): """ test the apply function of StorageBase """ grid = UnitGrid([2]) field = ScalarField(grid) storage_classes = {"None": None, "MemoryStorage": MemoryStorage} if module_available("h5py"): file_path = tmp_path / "test_storage_apply.hdf5" storage_classes["FileStorage"] = functools.partial(FileStorage, file_path) s1 = MemoryStorage() s1.start_writing(field, info={"b": 2}) s1.append(field.copy(data=np.array([0, 1])), 0) s1.append(field.copy(data=np.array([1, 2])), 1) s1.end_writing() for name, storage_cls in storage_classes.items(): out = None if storage_cls is None else storage_cls() s2 = s1.apply(lambda x: x + 1, out=out) assert storage_cls is None or s2 is out assert len(s2) == 2 np.testing.assert_allclose(s2.times, s1.times) assert s2[0] == ScalarField(grid, [1, 2]), name assert s2[1] == ScalarField(grid, [2, 3]), name # test empty storage s1 = MemoryStorage() s2 = s1.apply(lambda x: x + 1) assert len(s2) == 0 def test_storage_copy(tmp_path): """ test the copy function of StorageBase """ grid = UnitGrid([2]) field = ScalarField(grid) storage_classes = {"None": None, "MemoryStorage": MemoryStorage} if module_available("h5py"): file_path = tmp_path / "test_storage_apply.hdf5" storage_classes["FileStorage"] = functools.partial(FileStorage, file_path) s1 = MemoryStorage() s1.start_writing(field, info={"b": 2}) s1.append(field.copy(data=np.array([0, 1])), 0) s1.append(field.copy(data=np.array([1, 2])), 1) s1.end_writing() for name, storage_cls in storage_classes.items(): out = None if storage_cls is None else storage_cls() s2 = s1.copy(out=out) assert storage_cls is None or s2 is out assert len(s2) == 2 np.testing.assert_allclose(s2.times, s1.times) assert s2[0] == s1[0], name assert s2[1] == s1[1], name # test empty storage s1 = MemoryStorage() s2 = s1.copy() assert len(s2) == 0
[ "david.zwicker@ds.mpg.de" ]
david.zwicker@ds.mpg.de
4fb0d462d89a6686b3a67fd657cc1c0eb304bb7f
38d1ef01184bbdb3898b8cf495eeee48eaa1a30a
/Newton_Optimization/newton_linesearch.py
537cf2deab73ebdd2706c24882b81d41bace2821
[]
no_license
LenCewa/MasterThesis
39c86c8ab3c752fedc445fea14368bbd74ca8f3a
b2d4d67b7ae2f363dd627ecb4355de1ae6ef04a3
refs/heads/master
2020-06-16T15:47:29.971151
2020-04-15T09:29:43
2020-04-15T09:29:43
195,626,164
0
1
null
null
null
null
UTF-8
Python
false
false
3,840
py
import numpy as np import matplotlib.pyplot as plt from Fourier import * from util import * # Init Params for Fourier-Classes N = 5 omega = 1 T = (2 * jnp.pi) / omega step_size = 0.001 iterations = 450 fourier = Fourier(T, omega, step_size, N, iterations, [], []) d_fourier = dFourier(T, omega, step_size, N, iterations, [], []) dd_fourier = ddFourier(T, omega, step_size, N, iterations, [], []) def L(x, y): fx = fourier.predict(fourier.coefficients, x) return np.abs(y - fx)**2 def dL(x, y): fx = fourier.predict(fourier.coefficients, x) dfx = d_fourier.predict(fourier.coefficients, x) return 2 * (y - fx) * (-dfx) def ddL(x, y): fx = fourier.predict(fourier.coefficients, x) dfx = d_fourier.predict(fourier.coefficients, x) ddfx = dd_fourier.predict(fourier.coefficients, x) return 2 * (dfx**2 - (y - fx) * ddfx) def newton_optimization_linesearch(y, x0, iterations, alpha0, damping0): res = [jnp.array([x0])] err = [] alpha = alpha0 damping = damping0 roh = [1.2, 0.5, 1, 0.5, 0.01] for k in range(iterations): x = res[k] fx = fourier.predict(fourier.coefficients, x) dfx = dL(x,y) ddfx = ddL(x,y) i = 0 err += [np.linalg.norm(y - fx)] if err[k] < 1e-3: break d = -dfx / (ddfx + damping) fx_alphad = fourier.predict(fourier.coefficients, x + alpha * d) while fx_alphad > (fx + roh[4]*dfx * alpha * d): print("Iteration: ", k , " while-loop: ", i) print("f(x + alpha * d) = ", fx_alphad, " > f(x) + r*f'(x) = ", fx + roh[4]*dfx) i += 1 alpha = roh[1]*alpha # Optionally: damping = roh[2]*damping d = -dfx / (ddfx + damping) fx_alphad = fourier.predict(fourier.coefficients, x + alpha * d) x = x + alpha * d res += [x] alpha = np.min([roh[0], alpha, 1]) # Optinally: damping = roh[3] * damping return res, err t = jnp.linspace(0, 10*np.pi, num=1000) x0 = 2 y0 = fourier.predict(fourier.coefficients, x0) const_y0 = np.full(len(t), y0) f = fourier.batched_predict(fourier.coefficients, t) df = d_fourier.batched_predict(fourier.coefficients, t) ddf = dd_fourier.batched_predict(fourier.coefficients, t) const_0 = np.full(len(t), 0) # Run Newton Optimization steps = 20 x_start = 1.5 alpha0 = 1 damping0 = 0.999 res, err = newton_optimization_linesearch(y0[0], x_start, steps, alpha0, damping0) fx_t = [] ex_t = [] for x in res: pred = fourier.predict(fourier.coefficients, x)[0] fx_t += [pred] ex_t += [(y0 - pred)**2] print(res) print(err) L = L(t, y0) dL = dL(t, y0) ddL = ddL(t, y0) fig, axs = plt.subplots(3, 2) fig.suptitle("Newton Line Search: x* = " + str(x0) + ", y* = " + str(y0[0]) + ", x0 = " + str(x_start) + " ||| steps = " + str(steps)) axs[0, 0].plot(t, f) axs[0, 0].plot(t, const_y0, 'tab:red') axs[0, 0].plot(res, fx_t, 'k.-') axs[0, 0].plot(res[-2], fx_t[-2], 'ro') axs[0, 0].plot(res[-1], fx_t[-1], 'g*') axs[0, 0].set_title('f and y*') axs[1, 0].plot(t, df, 'tab:orange') axs[1, 0].set_title('df') axs[2, 0].plot(t, ddf, 'tab:green') axs[2, 0].set_title('ddf') axs[0, 1].plot(t, L) axs[0, 1].plot(t, const_0, 'tab:red') axs[0, 1].plot(res, ex_t, 'k.-') axs[0, 1].plot(res[-2], ex_t[-2], 'ro') axs[0, 1].plot(res[-1], ex_t[-1], 'g*') axs[0, 1].set_title('L') axs[1, 1].plot(t, dL, 'tab:orange') axs[1, 1].plot(t, const_0, 'tab:red') axs[1, 1].set_title('dL') axs[2, 1].plot(t, ddL, 'tab:green') axs[2, 1].set_title('ddL') for ax in axs.flat: ax.set(xlabel='x-label', ylabel='y-label') # Hide x labels and tick labels for top plots and y ticks for right plots. # for ax in axs.flat: # ax.label_outer() plt.show() #plt.savefig("LineSearchFigs/Newton_LineSearch_x0=" + str(x_start) + ".png")
[ "len13@hotmail.de" ]
len13@hotmail.de
52f20985a5f0c10e33313979e29aaeaca9acc59f
d806dd4a6791382813d2136283a602207fb4b43c
/sirius/blueprints/api/remote_service/tula/test/script.py
c571a235f8d6348648f5a6cb22945332ad0645a8
[]
no_license
MarsStirner/sirius
5bbf2a03dafb7248db481e13aff63ff989fabbc2
8839460726cca080ca8549bacd3a498e519c8f96
refs/heads/master
2021-03-24T12:09:14.673193
2017-06-06T16:28:53
2017-06-06T16:28:53
96,042,947
0
0
null
null
null
null
UTF-8
Python
false
false
11,263
py
#! coding:utf-8 """ @author: BARS Group @date: 25.10.2016 """ from sirius.app import app from sirius.blueprints.api.local_service.risar.active.test.test_data import \ get_mr_appointment_data from sirius.blueprints.api.local_service.risar.passive.test.request import \ send_event_remote, request_local from sirius.blueprints.api.local_service.risar.passive.test.test_data import \ get_sch_ticket_data_required, get_send_to_mis_card_data, \ get_send_to_mis_first_ticket25_data, get_send_to_mis_measures_data, \ get_send_to_mis_epicrisis_data, get_send_to_mis_second_ticket25_data, \ get_send_to_mis_pc_ticket25_data, get_send_to_mis_first_checkup_data, \ get_send_to_mis_second_checkup_data, get_send_to_mis_pc_checkup_data from sirius.blueprints.api.remote_service.tula.passive.checkup_first_ticket25.test.request import \ edit_checkup_first_ticket25 from sirius.blueprints.api.remote_service.tula.passive.checkup_first_ticket25.test.test_data import \ get_first_ticket25_data_more from sirius.blueprints.api.remote_service.tula.passive.childbirth.test.request import \ create_childbirth, edit_childbirth from sirius.blueprints.api.remote_service.tula.passive.childbirth.test.test_data import \ get_childbirth_data_required, get_childbirth_data_more from sirius.blueprints.api.remote_service.tula.passive.client.test.request import \ create_client, edit_client from sirius.blueprints.api.remote_service.tula.passive.client.test.test_data import \ get_client_data_required, get_client_data_more from sirius.blueprints.api.remote_service.tula.passive.doctor.test.request import \ create_doctor, edit_doctor, delete_doctor from sirius.blueprints.api.remote_service.tula.passive.doctor.test.test_data import \ get_doctor_data_required, get_doctor_data_more from sirius.blueprints.api.remote_service.tula.passive.hospitalization.test.request import \ create_hospitalization, edit_hospitalization from sirius.blueprints.api.remote_service.tula.passive.hospitalization.test.test_data import \ get_meas_hosp_data_required, get_meas_hosp_data_more from sirius.blueprints.api.remote_service.tula.passive.organization.test.request import \ create_organization, edit_organization, delete_organization from sirius.blueprints.api.remote_service.tula.passive.organization.test.test_data import \ get_organization_data_required, get_organization_data_more from sirius.blueprints.api.remote_service.tula.passive.research.test.request import \ create_research, edit_research from sirius.blueprints.api.remote_service.tula.passive.research.test.test_data import \ get_meas_research_data_required, get_meas_research_data_more from sirius.blueprints.api.remote_service.tula.passive.specialists_checkup.test.request import \ create_sp_checkup, edit_sp_checkup from sirius.blueprints.api.remote_service.tula.passive.specialists_checkup.test.test_data import \ get_sp_checkup_data_required, get_sp_checkup_data_more from sirius.blueprints.api.test.connect import make_login, release_token risar_session = None sirius_session = (None, None) class _TestTula: def test_mr_auth(self): global risar_session if risar_session: return with app.app_context(): with make_login() as sess: risar_session = sess print 'test_risar_auth', sess def test_full_cycle(self, testapp): ext_org_id = org_id = 111 # mis_to_mr_organisation(testapp, ext_org_id) ext_doctor_id = doctor_id = 112 # mis_to_mr_doctor(testapp, ext_org_id, ext_doctor_id) ext_client_id = 113 # mis_to_mr_client(testapp, ext_client_id) client_id = 110 sch_ticket_id = 3928 # 09:00 23.11.16 Тестовый Пользователь (акушер-гинеколог) # создать запись на прием в вебе (http://10.1.2.13:6600/patients/search/) # mr_to_mis_sch_ticket(testapp, org_id, doctor_id, client_id, sch_ticket_id) # card_id = !mr_create_card(testapp, client_id) card_id = 468 # создать карту в вебе # 690 ext_card_id = 222 # mr_to_mis_card(testapp, client_id, card_id) # !mr_create_first_checkup(testapp, card_id) first_checkup_id = 4345 # создать первичный осмотр в вебе second_checkup_id = 0 # создать вторичный осмотр в вебе pc_checkup_id = 0 # создать осмотр ПЦ в вебе # mr_to_mis_first_checkup(testapp, card_id, first_checkup_id) # mr_to_mis_first_ticket25(testapp, card_id, first_checkup_id) ext_first_checkup_id = 222 # mr_to_mis_second_ticket25(testapp, card_id, second_checkup_id) # mr_to_mis_pc_ticket25(testapp, card_id, pc_checkup_id) # mr_to_mis_first_checkup(testapp, card_id, first_checkup_id) # mr_to_mis_second_checkup(testapp, card_id, second_checkup_id) # mr_to_mis_pc_checkup(testapp, card_id, pc_checkup_id) # создать направления в вебе - осмотр, госпитализация, исследования # mr_to_mis_measures(testapp, card_id) # ch_event_measure_id = 6255 # res_event_measure_id = 6258 ext_ch_event_measure_id = 117 ext_res_event_measure_id = 118 ext_sp_checkup_id = 114 # mis_to_mr_meas_sp_checkup(testapp, ext_card_id, ext_org_id, ext_doctor_id, # ext_ch_event_measure_id, ext_sp_checkup_id) # ext_hosp_id = 115 # mis_to_mr_meas_hosp(testapp, card_id, ext_org_id, ext_doctor_id, ext_ch_event_measure_id, ext_hosp_id) ext_research_id = 116 # mis_to_mr_meas_research(testapp, ext_card_id, ext_org_id, ext_doctor_id, # ext_res_event_measure_id, ext_research_id) # mis_to_mr_first_ticket25(testapp, ext_card_id, ext_org_id, ext_doctor_id, ext_first_checkup_id) # mis_to_mr_second_ticket25 # mis_to_mr_pc_ticket25 # mis_to_mr_childbirth(testapp, ext_card_id, ext_org_id, ext_doctor_id) # mr_to_mis_epicrisis(testapp, card_id) def mis_to_mr_organisation(testapp, org_id): # create_organization(testapp, risar_session, get_organization_data_required(org_id)) # delete_organization(testapp, risar_session, org_id) edit_organization(testapp, risar_session, org_id, get_organization_data_more(org_id)) def mis_to_mr_doctor(testapp, org_id, doctor_id): # create_doctor(testapp, risar_session, get_doctor_data_required(org_id, doctor_id)) # delete_doctor(testapp, risar_session, org_id, doctor_id) edit_doctor(testapp, risar_session, org_id, doctor_id, get_doctor_data_more(org_id, doctor_id)) def mis_to_mr_client(testapp, client_id): # create_client(testapp, risar_session, get_client_data_required(client_id)) edit_client(testapp, risar_session, client_id, get_client_data_more(client_id)) def mr_make_appointment(testapp, client_id, ticket_id, doctor_id): is_delete = False make_appointment(risar_session, get_mr_appointment_data(client_id, ticket_id, doctor_id, is_delete)) def mr_to_mis_sch_ticket(testapp, org_id, doctor_id, client_id, ticket_id): is_delete = False send_event_remote(testapp, risar_session, get_sch_ticket_data_required( is_delete, client_id, ticket_id, org_id, doctor_id )) # def mr_create_card(testapp, client_id, sch_client_ticket_id=None): # res = create_card(risar_session, client_id, sch_client_ticket_id) # card_id = res['result']['card_id'] # return card_id def mr_to_mis_card(testapp, client_id, card_id): is_create = False request_local(testapp, risar_session, get_send_to_mis_card_data(client_id, card_id, is_create)) # def mr_create_first_checkup(testapp, card_id): # res = create_first_checkup(risar_session, card_id, get_first_checkup_data_required()) # checkup_id = res['result']['checkup_id'] # return checkup_id def mr_to_mis_first_ticket25(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_first_ticket25_data(card_id, checkup_id, is_create)) def mr_to_mis_second_ticket25(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_second_ticket25_data(card_id, checkup_id, is_create)) def mr_to_mis_pc_ticket25(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_pc_ticket25_data(card_id, checkup_id, is_create)) def mr_to_mis_first_checkup(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_first_checkup_data(card_id, checkup_id, is_create)) def mr_to_mis_second_checkup(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_second_checkup_data(card_id, checkup_id, is_create)) def mr_to_mis_pc_checkup(testapp, card_id, checkup_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_pc_checkup_data(card_id, checkup_id, is_create)) def mr_to_mis_measures(testapp, card_id): is_create = True request_local(testapp, risar_session, get_send_to_mis_measures_data(card_id, is_create)) def mis_to_mr_meas_sp_checkup(testapp, card_id, org_id, doctor_id, event_measure_id, sp_checkup_id): create_sp_checkup(testapp, risar_session, card_id, get_sp_checkup_data_required( org_id, doctor_id, event_measure_id, sp_checkup_id)) # edit_sp_checkup(testapp, risar_session, card_id, sp_checkup_id, get_sp_checkup_data_more( # org_id, doctor_id, event_measure_id, sp_checkup_id)) def mis_to_mr_meas_hosp(testapp, card_id, org_id, doctor_id, event_measure_id, meas_hosp_id): create_hospitalization(testapp, risar_session, card_id, get_meas_hosp_data_required( org_id, doctor_id, event_measure_id, meas_hosp_id)) edit_hospitalization(testapp, risar_session, card_id, meas_hosp_id, get_meas_hosp_data_more( org_id, doctor_id, event_measure_id, meas_hosp_id)) def mis_to_mr_meas_research(testapp, card_id, org_id, doctor_id, event_measure_id, meas_research_id): create_research(testapp, risar_session, card_id, get_meas_research_data_required( org_id, doctor_id, event_measure_id, meas_research_id)) # edit_research(testapp, risar_session, card_id, meas_research_id, get_meas_research_data_more( # org_id, doctor_id, event_measure_id, meas_research_id)) def mis_to_mr_first_ticket25(testapp, card_id, org_id, doctor_id, checkup_id): edit_checkup_first_ticket25(testapp, risar_session, card_id, checkup_id, get_first_ticket25_data_more( org_id, doctor_id, checkup_id)) def mis_to_mr_childbirth(testapp, card_id, org_id, doctor_id): # create_childbirth(testapp, risar_session, card_id, get_childbirth_data_required(org_id, doctor_id)) edit_childbirth(testapp, risar_session, card_id, get_childbirth_data_more(org_id, doctor_id)) def mr_to_mis_epicrisis(testapp, card_id): is_create = False request_local(testapp, risar_session, get_send_to_mis_epicrisis_data(card_id, is_create))
[ "paschenko@bars-open.ru" ]
paschenko@bars-open.ru
7e9ed44cfcf4dfe7080d14f4c8a120d31b1b1584
c025d4f76f37d4792299dd7239320d3327e1f7b2
/main test2
d175c5cb1966ce95b9d57093b43581e040469229
[]
no_license
vadiz/TESTBOT
c49ac3faae4ad55e6448d1d5d0fe831e827f9d1d
6ada4413a2767077db366b8dfc95d93df533b944
refs/heads/master
2021-09-05T21:28:00.296686
2018-01-31T04:51:34
2018-01-31T04:51:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
103,997
#coding:utf-8 import telebot, config from telebot import types import datetime from datetime import date import time import os import sys import subprocess import string import re import random from collections import Counter knownUsers = [] # todo: save these in a file, userStep = {} # so they won't reset every time the bot restarts def get_user_step(uid): if uid in userStep: return userStep[uid] else: knownUsers.append(uid) userStep[uid] = 0 return 0 def listener(messages): for m in messages: if m.content_type == 'text': date = datetime.date.today() print (str(m.chat.first_name) + " [" + str(m.chat.id) + "]: " + m.text) vremya = time.asctime(time.localtime(time.time())) print (vremya) spisok = [str(vremya) + '-' + str(m.chat.first_name) + " [" + str(m.chat.id) + "]: " + m.text] filename = str(date) + "_" + m.chat.first_name +'.txt' spisok2 = open("/home/makar/rabotayet/Bot_working/logs/" + filename, 'a') for index in spisok: spisok2.write(index + '\n') spisok2.close bot=telebot.TeleBot(config.TOKEN) bot.set_update_listener(listener) def main(): @bot.message_handler(commands=["start"]) def handle_text(message): user_markup = telebot.types.ReplyKeyboardMarkup(True, False) priv = ('Привет;)', 'Давай работать, что-ли?:Р', 'Хочешь аккаунтов?)', 'Мамбы, вк!! Легко и просто!!!', 'Нажми на кно... Хотя нет, не нажим... Жми, короче, я согласен...Может...', 'Давай нажимать кнопочки и ломать меня))', 'Люблю, когда нажимают кнопочки))', 'Надоели хачики? Попроси картиночку!!') #orig_mamba = open('mambaorig.txt', 'r+') rab_mamba = open('mamba.txt', 'r+') mamba_list = (rab_mamba.read()) mambalist = mamba_list.split('\n') mambishche = [x for x in mambalist if x != ''] mambaresultat = str(len(mambishche)) print(mambaresultat) #omambalist = (orig_mamba.read()) #omamba_list = omambalist.split('\n') #result = [] #for index in mambishche: # if index in omamba_list: # result.append(index) #print(len(result)) #mambaresultat = str(len(result)) ############# #orig_vk = open('vkorig.txt', 'r+') rab_vk = open('vk.txt', 'r+') vk_list = (rab_vk.read()) vklist = vk_list.split('\n') vk_proverka = [x for x in vklist if x != ''] vkresultat = str(len(vk_proverka)) print(vkresultat) #ovk_list = (orig_vk.read()) #ovklist = ovk_list.split('\n') #vkresult = [] #for index in vk_proverka: # if index in ovklist: # vkresult.append(index) #print(vkresult) #print(len(vkresult)) #vkresultat = str(len(vkresult)) ############ #orig_mamba_ua = open('mambaorigua.txt', 'r+') rab_mamba_ua = open('mambaua.txt', 'r+') mamba_list_ua = (rab_mamba_ua.read()) mambalist_ua = mamba_list_ua.split('\n') mambishche_ua = [x for x in mambalist_ua if x != ''] mambauaresult = str(len(mambishche_ua)) print(mambauaresult) #omambalist_ua = (orig_mamba_ua.read()) #omamba_list_ua = omambalist_ua.split('\n') #mamba_ua_result = [] #for index in mambishche_ua: # if index in omamba_list_ua: # mamba_ua_result.append(index) #print(len(mamba_ua_result)) #mambauaresult = str(len(mamba_ua_result)) ############# #orig_vkua = open('vkorigua.txt', 'r+') rab_vkua = open('vkkiev.txt', 'r+') vk_list_ua = (rab_vkua.read()) vklist_ua = vk_list_ua.split('\n') vk_proverka_ua = [x for x in vklist_ua if x != ''] vkuaresultat = str(len(vk_proverka_ua)) print(vkuaresultat) #ovk_list_ua = (orig_vkua.read()) #ovklist_ua = ovk_list_ua.split('\n') #vkresult_ua = [] #for index in vk_proverka_ua: # if index in ovklist_ua: # vkresult_ua.append(index) #print(len(vkresult_ua)) #vkuaresultat = str(len(vkresult_ua)) user_markup.row('Нужна мамба на Киев') user_markup.row('Получить вк Киев') user_markup.row('Получить мамбу МСК') user_markup.row('Получить вк МСК') user_markup.row('КНОПКА') bot.send_message(message.from_user.id, random.choice(priv), reply_markup=user_markup) bot.send_message(message.chat.id, 'У меня есть в наличии много вкусностей:)\n') bot.send_message(message.chat.id, 'Mamba.ru: ' + mambaresultat + '\nVk.com: ' + vkresultat + '\nMamba.UA: ' + mambauaresult + '\nvk.com(ua): ' + vkuaresultat) @bot.message_handler(func=lambda message: message.text == "КНОПКА") def handle_text(message): user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(message.from_user.id, 'Иди ко мне, сладкая...') kartink = random.choice(os.listdir("/home/makar/rabotayet/Bot_working/kartinki/")) kartinka = "/home/makar/rabotayet/Bot_working/kartinki/" + kartink print (kartink) print (kartinka) bot.send_photo(message.from_user.id, open(kartinka, 'rb')) @bot.message_handler(commands=["help"]) def start(message): bot.send_message(message.chat.id, '') @bot.message_handler(func=lambda message: message.text == "На главную") def handle_text(message): user_markup = telebot.types.ReplyKeyboardMarkup(True, False) #orig_mamba = open('mambaorig.txt', 'r+') rab_mamba = open('mamba.txt', 'r+') mamba_list = (rab_mamba.read()) mambalist = mamba_list.split('\n') mambishche = [x for x in mambalist if x != ''] mambaresultat = str(len(mambishche)) print(mambaresultat) #omambalist = (orig_mamba.read()) #omamba_list = omambalist.split('\n') #result = [] #for index in mambishche: # if index in omamba_list: # result.append(index) #print(len(result)) #mambaresultat = str(len(result)) ############# #orig_vk = open('vkorig.txt', 'r+') rab_vk = open('vk.txt', 'r+') vk_list = (rab_vk.read()) vklist = vk_list.split('\n') vk_proverka = [x for x in vklist if x != ''] vkresultat = str(len(vk_proverka)) print(vkresultat) #ovk_list = (orig_vk.read()) #ovklist = ovk_list.split('\n') #vkresult = [] #for index in vk_proverka: # if index in ovklist: # vkresult.append(index) #print(vkresult) #print(len(vkresult)) #vkresultat = str(len(vkresult)) ############ #orig_mamba_ua = open('mambaorigua.txt', 'r+') rab_mamba_ua = open('mambaua.txt', 'r+') mamba_list_ua = (rab_mamba_ua.read()) mambalist_ua = mamba_list_ua.split('\n') mambishche_ua = [x for x in mambalist_ua if x != ''] mambauaresult = str(len(mambishche_ua)) print(mambauaresult) #omambalist_ua = (orig_mamba_ua.read()) #omamba_list_ua = omambalist_ua.split('\n') #mamba_ua_result = [] #for index in mambishche_ua: # if index in omamba_list_ua: # mamba_ua_result.append(index) #print(len(mamba_ua_result)) #mambauaresult = str(len(mamba_ua_result)) ############# #orig_vkua = open('vkorigua.txt', 'r+') rab_vkua = open('vkkiev.txt', 'r+') vk_list_ua = (rab_vkua.read()) vklist_ua = vk_list_ua.split('\n') vk_proverka_ua = [x for x in vklist_ua if x != ''] vkuaresultat = str(len(vk_proverka_ua)) print(vkuaresultat) #ovk_list_ua = (orig_vkua.read()) #ovklist_ua = ovk_list_ua.split('\n') #vkresult_ua = [] #for index in vk_proverka_ua: # if index in ovklist_ua: # vkresult_ua.append(index) #print(len(vkresult_ua)) #vkuaresultat = str(len(vkresult_ua)) user_markup.row('Нужна мамба на Киев') user_markup.row('Получить вк Киев') user_markup.row('Получить мамбу МСК') user_markup.row('Получить вк МСК') user_markup.row('КНОПКА') glavn = ('Опять мы тут, продолжим же)', 'Что-нибудь еще?', 'Продолжаем.', 'Ну, что еще?','Меня разорили...','Я снова потерял часть себя:(','Желаете еще чего-нибудь?') bot.send_message(message.from_user.id, random.choice(glavn), reply_markup=user_markup) bot.send_message(message.chat.id, 'Теперь у меня: \n' +'Mamba.ru: ' + mambaresultat + '\nVk.com: ' + vkresultat + '\nMamba.UA: ' + mambauaresult + '\nvk.com(ua): ' + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Нужна мамба на Киев") def handle_text(message): #orig_mamba_ua = open('mambaorigua.txt', 'r+') rab_mamba_ua = open('mambaua.txt', 'r+') mamba_list_ua = (rab_mamba_ua.read()) mambalist_ua = mamba_list_ua.split('\n') mambishche_ua = [x for x in mambalist_ua if x != ''] mambauaresult = str(len(mambishche_ua)) print(mambauaresult) #omambalist_ua = (orig_mamba_ua.read()) #omamba_list_ua = omambalist_ua.split('\n') #mamba_ua_result = [] #for index in mambishche_ua: # if index in omamba_list_ua: # mamba_ua_result.append(index) #print(len(mamba_ua_result)) #mambauaresult = str(len(mamba_ua_result)) if mambauaresult == '1' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одна мамба') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одна мамба") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mambaua) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() elif mambauaresult == '2' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одна мамба') user_markup.row('Две мамбы') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одна мамба") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mambaua) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Две мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua) bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup) print (mambaua) print (mambaua1) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() elif mambauaresult == '3' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одна мамба') user_markup.row('Две мамбы') user_markup.row('Три мамбы') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одна мамба") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mambaua) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Две мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua) bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup) print (mambaua) print (mambaua1) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Три мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() elif mambauaresult == '4' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одна мамба') user_markup.row('Две мамбы') user_markup.row('Три мамбы') user_markup.row('Четыре мамбы') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одна мамба") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mambaua) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Две мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua) bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup) print (mambaua) print (mambaua1) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Три мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Четыре мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] mambaua3 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это сильно)) \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2) bot.send_message(m.chat.id, mambaua3, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (mambaua3) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() elif mambauaresult == '0' : user_markup.row('На главную') bot.send_message(message.chat.id, "Не осталось совсем, реально, ждите, пока закинут. \nАкков: " + mambaresult, reply_markup=user_markup) else : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одна мамба') user_markup.row('Две мамбы') user_markup.row('Три мамбы') user_markup.row('Четыре мамбы') user_markup.row('Пять мамб') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько нужно? \nВ наличии: " + mambauaresult, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одна мамба") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mambaua) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Две мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mambaua) bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup) print (mambaua) print (mambaua1) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Три мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Четыре мамбы") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] mambaua3 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это сильно)) \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2) bot.send_message(m.chat.id, mambaua3, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (mambaua3) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Пять мамб") def command_text_hi(m): uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() mambaua = uaaccount.pop (0) del uaaccount[0] mambaua1 = uaaccount.pop (0) del uaaccount[0] mambaua2 = uaaccount.pop (0) del uaaccount[0] mambaua3 = uaaccount.pop (0) del uaaccount[0] mambaua4 = uaaccount.pop (0) del uaaccount[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, если ты справишься с ними... \n '+mambaua) bot.send_message(m.chat.id, mambaua1) bot.send_message(m.chat.id, mambaua2) bot.send_message(m.chat.id, mambaua3) bot.send_message(m.chat.id, mambaua4, reply_markup=user_markup) print (mambaua) print (mambaua1) print (mambaua2) print (mambaua3) print (mambaua4) print (uaaccount) uamamba = open('mambaua.txt', 'w') for index in uaaccount: uamamba.write(index + '\n') uamamba.close uamamba = open('mambaua.txt', 'r+') uaspisok = (uamamba.read()) uaaccount = uaspisok.split('\n') print (uaaccount) uamamba.close() @bot.message_handler(func=lambda message: message.text == "Получить вк Киев") def handle_text(message): #orig_vkua = open('vkorigua.txt', 'r+') rab_vkua = open('vkkiev.txt', 'r+') vk_list_ua = (rab_vkua.read()) vklist_ua = vk_list_ua.split('\n') vk_proverka_ua = [x for x in vklist_ua if x != ''] vkuaresultat = str(len(vk_proverka_ua)) print(vkuaresultat) #ovk_list_ua = (orig_vkua.read()) #ovklist_ua = ovk_list_ua.split('\n') #vkresult_ua = [] #for index in vk_proverka_ua: # if index in ovklist_ua: # vkresult_ua.append(index) #print(len(vkresult_ua)) #vkuaresultat = str(len(vkresult_ua)) if vkuaresultat == '1': user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1 акк') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == '1 акк') def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vkkiev) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() elif vkuaresultat == '2' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1 акк') user_markup.row('2 акка') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == '1 акк') def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vkkiev) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "2 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev) bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup) print (vkkiev+' '+vkkiev1) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() elif vkuaresultat == '3' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1 акк') user_markup.row('2 акка') user_markup.row('3 акка') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == '1 акк') def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vkkiev) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "2 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev) bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup) print (vkkiev+' '+vkkiev1) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "3 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup) print (vkkiev+' '+vkkiev1+' '+vkkiev2) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() elif vkuaresultat == '4' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1 акк') user_markup.row('2 акка') user_markup.row('3 акка') user_markup.row('4 акка') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == '1 акк') def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vkkiev) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "2 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev) bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup) print (vkkiev+' '+vkkiev1) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "3 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup) print (vkkiev+' '+vkkiev1+' '+vkkiev2) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "4 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev3 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2) bot.send_message(m.chat.id, vkkiev3, reply_markup=user_markup) print (vkkiev+" "+vkkiev1+" "+vkkiev2+" "+vkkiev3) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() elif vkuaresultat == '0': user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(message.chat.id, "Закончились, совсем. \nАкков: " + vkuaresultat, reply_markup=user_markup) else: user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1 акк') user_markup.row('2 акка') user_markup.row('3 акка') user_markup.row('4 акка') user_markup.row('5 акков') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == '1 акк') def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vkkiev) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "2 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev) bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup) print (vkkiev+' '+vkkiev1) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "3 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup) print (vkkiev+' '+vkkiev1+' '+vkkiev2) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "4 акка") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev3 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2) bot.send_message(m.chat.id, vkkiev3, reply_markup=user_markup) print (vkkiev+" "+vkkiev1+" "+vkkiev2+" "+vkkiev3) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "5 акков") def command_text_hi(m): vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() vkkiev = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev1 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev2 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev3 = vkkiev_list.pop (0) del vkkiev_list[0] vkkiev4 = vkkiev_list.pop (0) del vkkiev_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ого, а сравишься?) \n' +vkkiev) bot.send_message(m.chat.id, vkkiev1) bot.send_message(m.chat.id, vkkiev2) bot.send_message(m.chat.id, vkkiev3) bot.send_message(m.chat.id, vkkiev4, reply_markup=user_markup) print (vkkiev+' '+vkkiev1+' '+vkkiev2+' '+vkkiev3+' '+vkkiev4) print (vkkiev_list) vkkiev = open('vkkiev.txt', 'w') for index in vkkiev_list: vkkiev.write(index + '\n') vkkiev.close vkkiev = open('vkkiev.txt', 'r+') svkkiev = (vkkiev.read()) vkkiev_list = svkkiev.split('\n') print (vkkiev_list) vkkiev.close() @bot.message_handler(func=lambda message: message.text == "Получить мамбу МСК") def handle_text(message): #orig_mamba = open('mambaorig.txt', 'r+') rab_mamba = open('mamba.txt', 'r+') mamba_list = (rab_mamba.read()) mambalist = mamba_list.split('\n') mambishche = [x for x in mambalist if x != ''] mambaresultat = str(len(mambishche)) print(mambaresultat) #omambalist = (orig_mamba.read()) #omamba_list = omambalist.split('\n') #result = [] #for index in mambishche: # if index in omamba_list: # result.append(index) #print(len(result)) #mambaresultat = str(len(result)) ############# if mambaresultat == '1': user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одну') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одну") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mamba) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() elif mambaresultat == '2' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одну') user_markup.row('Две') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одну") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mamba) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Две") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba) bot.send_message(m.chat.id, mamba1, reply_markup=user_markup) print (mamba) print (mamba1) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() elif mambaresultat == '3' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одну') user_markup.row('Две') user_markup.row('Три') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одну") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mamba) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Две") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba) bot.send_message(m.chat.id, mamba1, reply_markup=user_markup) print (mamba) print (mamba1) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Три") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() elif mambaresultat == '4' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одну') user_markup.row('Две') user_markup.row('Три') user_markup.row('Четыре') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одну") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mamba) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Две") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba) bot.send_message(m.chat.id, mamba1, reply_markup=user_markup) print (mamba) print (mamba1) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Три") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Четыре") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] mamba3 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это сильно)) \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2) bot.send_message(m.chat.id, mamba3, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (mamba3) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) A = s.split('\n') print (a) f.close() elif mambaresultat == '0': user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(message.chat.id, "Закончились", reply_markup=user_markup) else: user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('Одну') user_markup.row('Две') user_markup.row('Три') user_markup.row('Четыре') user_markup.row('Пять') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "Одну") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup) bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)') print (mamba) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Две") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Держи: \n '+mamba) bot.send_message(m.chat.id, mamba1, reply_markup=user_markup) print (mamba) print (mamba1) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Три") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Четыре") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] mamba3 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Это сильно)) \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2) bot.send_message(m.chat.id, mamba3, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (mamba3) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) A = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Пять") def command_text_hi(m): f = open('mamba.txt', 'r+') s = (f.read()) a = s.split('\n') print (a) f.close() mamba = a.pop (0) del a[0] mamba1 = a.pop (0) del a[0] mamba2 = a.pop (0) del a[0] mamba3 = a.pop (0) del a[0] mamba4 = a.pop (0) del a[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, если ты справишься с ними... \n '+mamba) bot.send_message(m.chat.id, mamba1) bot.send_message(m.chat.id, mamba2) bot.send_message(m.chat.id, mamba3) bot.send_message(m.chat.id, mamba4, reply_markup=user_markup) print (mamba) print (mamba1) print (mamba2) print (mamba3) print (mamba4) print (a) f = open('mamba.txt', 'w') for index in a: f.write(index + '\n') f.close f = open('mamba.txt', 'r+') s = (f.read()) A = s.split('\n') print (a) f.close() @bot.message_handler(func=lambda message: message.text == "Получить вк МСК") def handle_text(message): #orig_vk = open('vkorig.txt', 'r+') rab_vk = open('vk.txt', 'r+') vk_list = (rab_vk.read()) vklist = vk_list.split('\n') vk_proverka = [x for x in vklist if x != ''] vkresultat = str(len(vk_proverka)) print(vkresultat) #ovk_list = (orig_vk.read()) #ovklist = ovk_list.split('\n') #vkresult = [] #for index in vk_proverka: # if index in ovklist: # vkresult.append(index) #print(vkresult) #print(len(vkresult)) #vkresultat = str(len(vkresult)) if vkresultat == '1' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1') user_markup.row('На главную') bot.send_message(message.chat.id, "Последний, заберешь? ", reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "1") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vk) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vk) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() elif vkresultat == '2' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1') user_markup.row('2') user_markup.row('На главную') bot.send_message(message.chat.id, "Только две есть \nДаже докажу: " + vkresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "1") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vk) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vk) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "2") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk) bot.send_message(m.chat.id, vk1,reply_markup=user_markup) print (vk+' '+vk1) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() elif vkresultat == '3' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1') user_markup.row('2') user_markup.row('3') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \n Это все, что есть: " + vkresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "1") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vk) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vk) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "2") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk) bot.send_message(m.chat.id, vk1,reply_markup=user_markup) print (vk+' '+vk1) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "3") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2, reply_markup=user_markup) print (vk+' '+vk1+' '+vk2) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() elif vkresultat == '4' : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1') user_markup.row('2') user_markup.row('3') user_markup.row('4') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nОсталось: " + vkresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "1") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vk) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vk) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "2") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk) bot.send_message(m.chat.id, vk1,reply_markup=user_markup) print (vk+' '+vk1) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "3") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2, reply_markup=user_markup) print (vk+' '+vk1+' '+vk2) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "4") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] vk3 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2) bot.send_message(m.chat.id, vk3, reply_markup=user_markup) print (vk+" "+vk1+" "+vk2+" "+vk3) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() elif vkresultat == '0' : user_markup.row('На главную') bot.send_message(message.chat.id, "Больше нет, ждите, пока зальют.", reply_markup=user_markup) else : user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('1') user_markup.row('2') user_markup.row('3') user_markup.row('4') user_markup.row('5') user_markup.row('На главную') bot.send_message(message.chat.id, "Сколько? \nВ сухом остатке у нас: " + vkresultat, reply_markup=user_markup) @bot.message_handler(func=lambda message: message.text == "1") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Лови:) \n' +vk) bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup) print (vk) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "2") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk) bot.send_message(m.chat.id, vk1,reply_markup=user_markup) print (vk+' '+vk1) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "3") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2, reply_markup=user_markup) print (vk+' '+vk1+' '+vk2) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "4") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] vk3 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2) bot.send_message(m.chat.id, vk3, reply_markup=user_markup) print (vk+" "+vk1+" "+vk2+" "+vk3) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() @bot.message_handler(func=lambda message: message.text == "5") def command_text_hi(m): vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() vk = vkmsk_list.pop (0) del vkmsk_list[0] vk1 = vkmsk_list.pop (0) del vkmsk_list[0] vk2 = vkmsk_list.pop (0) del vkmsk_list[0] vk3 = vkmsk_list.pop (0) del vkmsk_list[0] vk4 = vkmsk_list.pop (0) del vkmsk_list[0] user_markup = telebot.types.ReplyKeyboardMarkup(True, False) user_markup.row('На главную') bot.send_message(m.chat.id, 'Ого, а сравишься?) \n' +vk) bot.send_message(m.chat.id, vk1) bot.send_message(m.chat.id, vk2) bot.send_message(m.chat.id, vk3) bot.send_message(m.chat.id, vk4, reply_markup=user_markup) print (vk+' '+vk1+' '+vk2+' '+vk3+' '+vk4) print (vkmsk_list) vkmsk = open('vk.txt', 'w') for index in vkmsk_list: vkmsk.write(index + '\n') vkmsk.close vkmsk = open('vk.txt', 'r+') svkmsk = (vkmsk.read()) vkmsk_list = svkmsk.split('\n') print (vkmsk_list) vkmsk.close() if __name__=="__main__": bot.polling() if __name__=="__main__": main()
[ "makarishche@gmail.com" ]
makarishche@gmail.com
7b1bd474762dbf9fa0ad77e916a9a288222c806a
44494598f8edcee0319f3b4ef69b704fbf6d88f2
/code/twurtle/src/TestDCMotorRobot.py
aad26a3b8a287a62bb2e513d1e4b4b865f1e0879
[]
no_license
whaleygeek/pyws
3cebd7e88b41e14d9c1e4dbb8148de63dadbdd57
e60724646e49287f1e12af609f325ac228b31512
refs/heads/master
2021-01-02T09:01:47.644851
2014-09-02T19:47:20
2014-09-02T19:47:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
168
py
# This is mainly to test that the packaging has worked for robot correctly import robot r = robot.MotorRobot(robot.DCMotorDrive(a1=11, a2=12, b1=13, b2=14)) r.test()
[ "david@thinkingbinaries.com" ]
david@thinkingbinaries.com
a5f5ad934ab6b4548d185c57b55e75a4fe701d2d
75dcb56e318688499bdab789262839e7f58bd4f6
/_algorithms_challenges/pybites/bitesofpy-master/!201-300/239/test_fizzbuzz.py
374796ea04fb39da68675115964e7be47e23b93c
[]
no_license
syurskyi/Algorithms_and_Data_Structure
9a1f358577e51e89c862d0f93f373b7f20ddd261
929dde1723fb2f54870c8a9badc80fc23e8400d3
refs/heads/master
2023-02-22T17:55:55.453535
2022-12-23T03:15:00
2022-12-23T03:15:00
226,243,987
4
1
null
2023-02-07T21:01:45
2019-12-06T04:14:10
Jupyter Notebook
UTF-8
Python
false
false
483
py
from fizzbuzz import fizzbuzz # write one or more pytest functions below, they need to start with test_ def test_fizzbuzz_base(): assert fizzbuzz(1) == 1 assert fizzbuzz(2) == 2 def test_fizzbuzz_fizz(): assert fizzbuzz(3) == 'Fizz' assert fizzbuzz(6) == 'Fizz' def test_fizzbuzz_buzz(): assert fizzbuzz(5) == 'Buzz' assert fizzbuzz(10) == 'Buzz' def test_fizzbuzz_fizzbuzz(): assert fizzbuzz(15) == 'Fizz Buzz' assert fizzbuzz(30) == 'Fizz Buzz'
[ "sergejyurskyj@yahoo.com" ]
sergejyurskyj@yahoo.com
6a0a01d92744efe31045b17e0d9e6e64dba5448a
8b2c5420f7e331fb6e48f3efd3cfc8a714291d4d
/finances/settings.py
7a343222fcdb21fb5e62ad3f6c5589226c6c6412
[]
no_license
jjjggg092/finalproject
bc297c8b623937f28565591138534c762bf36560
1159ca8ae47b364f84586e39176b678c3feb42f9
refs/heads/master
2021-06-22T12:27:34.707772
2019-12-10T23:13:46
2019-12-10T23:13:46
227,138,704
0
0
null
2021-06-10T22:22:57
2019-12-10T14:18:49
Python
UTF-8
Python
false
false
3,124
py
""" Django settings for finances project. Generated by 'django-admin startproject' using Django 2.0.3. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'c1+b$@a%ptdh=4=5i_4*6oa@k3*8+ezwc6__c^o!fszwf1=0gq' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'money.apps.MoneyConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'finances.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'finances.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/'
[ "jhon.goyes@yahoo.com" ]
jhon.goyes@yahoo.com
7951551b827a4fe78a0de05909e4bf7b4a989c18
34dd52bca544fd483606667fd9f867d6af68ef28
/exercise2/classification05.py
5146516b8e14e60c85d27ad8f4ef7e39f92cd66d
[]
no_license
Anderbone/CS918NaturalLanguageProcessing
8e68bd4a61758c2f127a344152fc1ad74f972e4c
ef1e68685e85e76354040c7359c2f29b0da7eff6
refs/heads/master
2020-05-24T18:15:00.075093
2019-05-18T21:26:42
2019-05-18T21:26:42
187,406,366
1
0
null
null
null
null
UTF-8
Python
false
false
14,174
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import testsets import evaluation import twokenize import sklearn.feature_extraction from nltk.classify.scikitlearn import SklearnClassifier import sklearn import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import textPreprocessor01 import nltk from nltk.stem import * from nltk.probability import FreqDist from nltk.corpus import sentiwordnet as swn from gensim.models import word2vec # import word2vecReader from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.pipeline import Pipeline from sklearn import svm from sklearn.externals import joblib from sklearn.feature_extraction import DictVectorizer from sklearn.linear_model import LogisticRegression from sklearn import preprocessing import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier # TODO: load training data def read_training_data(training_data): id_gts = {} with open(training_data, 'r', encoding='utf-8') as f: for line in f: fields = line.split('\t') tweetid = fields[0] gt = fields[1] content = fields[2].strip() id_gts[tweetid] = gt, content return id_gts # traindic = read_training_data('twitter-training-data1.txt') # traindic = read_training_data('twitter-training-data_small.txt') traindic = read_training_data('twitter-training-data.txt') # input here def perprocessing(tdic): new_dic = {} for line in tdic: id = line gt = tdic[line][0] raw = ' '.join(twokenize.tokenizeRawTweetText(tdic[line][1])) text = twokenize.normalizeTextForTagger(raw) text_tk = twokenize.tokenize(text) telist = [] for word in text_tk: word = word.lower() ps = nltk.stem.PorterStemmer() word = ps.stem(word) telist.append(word) newtext = ' '.join(telist) # print(newtext) newtext = textPreprocessor01.replaceall(newtext) new_dic[id] = gt, newtext return new_dic # print(new_dic) def get_train_corpus(new_dic): traincorpus = [] for line in new_dic: traincorpus.append(new_dic[line][1]) return traincorpus def get_split_corpus(new_dic): split_traincorpus = [] for line in new_dic: split_traincorpus.append(new_dic[line][1].split()) return split_traincorpus # tdic = read_training_data('twitter-training-data.txt') # print(tdic) # for i in tdic: # print(i) #id # print(tdic[i]) # print(tdic[i][0]) # gt. positive/negative # print(tdic[i][1]) # content # print(corpus) # print(split_corpus) # TODO extract features def get_vect(): vect = CountVectorizer(stop_words='english' ,lowercase=True) # vect = CountVectorizer(stop_words='english', min_df= ,lowercase=True) # vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2)) X = vect.fit_transform(train_corpus) return vect, X def get_train_ngrams(): # vectorizer = CountVectorizer(stop_words='english') # vect = CountVectorizer(stop_words='english') # # vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2)) # X = vect.fit_transform(corpus) # print(vectorizer.vocabulary_) X = get_vect()[1] # print(vectorizer.vocabulary_.keys()) # print('ngram----') # print(X.todense()) # print(len(X.todense())) # X.todense() # print(X.toarray()) return np.array(X.todense()) def get_test_ngrams(corpus): vect = get_vect()[0] X = vect.transform(corpus) b = X.todense() return np.array(b) def get_tfidf(corpus): # vectorizer = CountVectorizer(stop_words='english') # vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2)) vect = get_vect()[0] tfidf = TfidfVectorizer(vocabulary=list(vect.vocabulary_.keys()), min_df=0.6, lowercase=True, stop_words='english') tfs = tfidf.fit_transform(corpus) # X = vect.fit_transform(corpus) # print(vectorizer.vocabulary_) # print(vectorizer.vocabulary_.keys()) tt = tfs.todense() # print('tfid..') # print(len(tt)) return np.array(tt) # maybe it's wrong def wordembedding(split_corpus): # model = word2vec.Word2Vec(sentences, \ # workers=num_workers, \ # size=num_features, \ # min_count=min_word_count, \ # window=context, # sample=downsampling) model = word2vec.Word2Vec(split_corpus, size=50, min_count=1) # To make the model memory efficient model.init_sims(replace=True) # Saving the model for later use. Can be loaded using Word2Vec.load() model_name = "wordembedding_features" model.save(model_name) # print(model['may']) # print('word embedding --------------') # print(model.wv.syn0) # print(model.wv.vocab) # print(len(model.wv.vocab)) # print(model.wv.index2word) print(len(model.wv.index2word)) print(len(model.wv.syn0)) # right here def word_embedding2(split_corpus): # print('word embedding2 --------------------') all = [] for i in split_corpus: # print(i) model = word2vec.Word2Vec([i], size=300, min_count=1) # print(model.vocabulary) # print(model.wv.vocab) # s = model.wv.syn0 s = model.wv.vectors ans = list(map(sum, zip(*s))) # sum of them all.append(ans) return np.array(all) def senti_bi_lexicon(split_corpus): def inputfile(file): with open(file, 'r') as my_file: words = [every_line.rstrip() for every_line in my_file] return words def count_p_n(mylist): pos_num = 0 neg_num = 0 positive = inputfile('positive-words.txt') negative = inputfile('negative-words.txt') p_dic = FreqDist(positive) n_dic = FreqDist(negative) for word in mylist: pos_num += p_dic[word] neg_num += n_dic[word] return pos_num, neg_num P_N = [] for line in split_corpus: p_num_all = n_num_all = 0 p_n_num = count_p_n(line) p_num_all += p_n_num[0] n_num_all += p_n_num[1] P_N.append([p_num_all, n_num_all]) # print('sent..') # print(len(P_N)) return np.array(P_N) def get_url(split_corpus): url = [] for i in split_corpus: num = i.count('URLLINK') url.append([num]) # print(url) # print(len(url)) return np.array(url) def get_mention(split_corpus): men = [] for i in split_corpus: num = i.count('USERMENTION') men.append([num]) # print(url) # print(len(url)) return np.array(men) def get_face(split_corpus): face = [] for i in split_corpus: numi = i.count('HAPPYFACE') numj = i.count('SADFACE') face.append([numi, numj]) # print(url) # print(len(url)) return np.array(face) newdic = perprocessing(traindic) train_corpus = get_train_corpus(newdic) split_corpus = get_split_corpus(newdic) # print(split_corpus) F1 = get_train_ngrams() F2 = get_tfidf(train_corpus) F3 = senti_bi_lexicon(split_corpus) # print(F3) F4 = word_embedding2(split_corpus) # print(F4) F5 = get_url(split_corpus) # print(F5) F6 = get_mention(split_corpus) F7 = get_face(split_corpus) # print(F7) # print(F7) # X = np.concatenate((F3, F4, F5, F7), axis=1) # X = np.concatenate((F3, F1, F5, F7), axis=1) # print(X) # labels_to_array = {"positive": 1, "negative": -1, "neutral": 0} labels_to_array = {"positive": 0, "negative": 2, "neutral": 1} labels = [labels_to_array[newdic[tweet][0]] for tweet in newdic] # print(labels) # print('5.Y..') Y = np.array(labels) # X3 = F5 # print(F3) # X = F1 # X = F2 # X = F4 # X5 = F5 # X35 = np.concatenate((X3, X5), axis=1) # X = F5 # X = F6 # print(F5) # print(F6) # X = np.concatenate((F1, F2, F3, F4, F5, F6, F7), axis=1) # X = np.concatenate((F1, F3), axis=1) # X = F7 for classifier in ['MNB','Naive Bayes', 'Decision Tree', 'Logistic Regression', 'Random Forest', 'KNN']: # for classifier in ['Naive Bayes', 'Decision Tree', 'Logistic Regression', 'Random Forest', 'KNN']: # You may rename the names of the classifiers to something more descriptive if classifier == 'Naive Bayes': print('Training ' + classifier) # TODO: extract features for training classifier1 # TODO: train sentiment classifier1 # X = F1 # Y = Y.reshape(Y.size, 1) X = np.concatenate((F3, F5, F4, F7), axis=1) model = GaussianNB() model.fit(X, Y) # vec = DictVectorizer(sparse=False) # svm_clf = svm.SVC(kernel='linear') # model = Pipeline([('vectorizer', vec), ('svm', svm_clf)]) # model = svm.SVC() elif classifier == 'MNB': print('Training ' + classifier) # TODO: extract features for training classifier3 # TODO: train sentiment classifier3 # model = SklearnClassifier(MultinomialNB()) # model.train(X) X = F1 # base_model = MultinomialNB(alpha=1) # model = OnevsRestClassifier(base_model).fit(X,Y) model = MultinomialNB(alpha=1, class_prior=None, fit_prior=True) # model.fit(np.array(X), np.array(Y)) # print(X) model.fit(X, Y) # joblib.dump(model, 'F3_and_SVM.pkl') elif classifier == 'Decision Tree': print('Training ' + classifier) # TODO: extract features for training classifier2 # TODO: train sentiment classifier2 # X = F3 X = np.concatenate((F3, F4, F7), axis=1) model = tree.DecisionTreeClassifier() model.fit(X, Y) # lr = Pipeline([('sc', StandardScaler()), # ('clf', LogisticRegression())]) # y_hat = lr.predict(x_test) # y_hat = y_hat.reshape(x1.shape) elif classifier == 'Logistic Regression': print('Training ' + classifier) # TODO: extract features for training classifier3 # TODO: train sentiment classifier3 X = np.concatenate((F3, F4,F5, F7), axis=1) model = LogisticRegression() # model.fit(x, y.ravel()) model.fit(X, Y) elif classifier == 'Random Forest': print('Training ' + classifier) # TODO: extract features for training classifier3 # TODO: train sentiment classifier3 model = RandomForestClassifier(n_estimators=100, random_state=0) # forest = RandomForestClassifier(criterion='entropy', # n_estimators = 10, # random_state = 1, # n_jobs = 2) X = F2 model.fit(X, Y) elif classifier == 'KNN': print('Training ' + classifier) # TODO: extract features for training classifier3 # TODO: train sentiment classifier3 model = KNeighborsClassifier(n_neighbors=5, p=2) # model = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski') X = F3 model.fit(X, Y) # mymodel = model for testset in testsets.testsets: # TODO: classify tweets in test set # if testset == 'twitter-test1.txt': test = read_training_data(testset) testdic = perprocessing(test) t_corpus = get_train_corpus(testdic) ts_corpus = get_split_corpus(testdic) tF1 = get_test_ngrams(t_corpus) tF2 = get_tfidf(t_corpus) tF3 = senti_bi_lexicon(ts_corpus) tF4 = word_embedding2(ts_corpus) tF5 = get_url(ts_corpus) tF6 = get_mention(ts_corpus) tF7 = get_face(ts_corpus) if classifier == 'Naive Bayes': Xt = np.concatenate((tF3, tF4, tF5, tF7), axis=1) elif classifier == 'MNB': Xt = tF1 elif classifier == 'Logistic Regression': Xt = np.concatenate((tF3, tF4, tF5, tF7), axis=1) # Xt = tF4 elif classifier == 'KNN': Xt = tF3 elif classifier == 'Decision Tree': Xt = np.concatenate((tF3, tF7, tF4), axis=1) elif classifier == 'Random Forest': Xt = tF2 # ans_num = model.predict(t_F3) # model = joblib.load('F3_and_SVM.pkl') # ans_num = model.predict(t_F3) # ans_num = model.predict(t_F5) # Xt = np.concatenate((tF1, tF2, tF3, tF4, tF5, tF6), axis=1) # Xt = np.concatenate((tF1, tF2, tF3, tF4, tF5, tF6, tF7), axis=1) # Xt = np.concatenate((tF1, tF3, tF5, tF6, tF7), axis=1) # Xt = np.concatenate((tF3, tF1, tF5, tF7), axis=1) # Xt = np.concatenate((tF1), axis=1) # Xt = tF7 # Xt = tF1 ans_num = model.predict(Xt) # ans_num = model.predict(t_F1) # ans_num = model.predict(t_F2) # # print(ans) # # print(len(ans)) array_to_labels = {0: "positive", 2: "negative", 1: "neutral"} labels = [array_to_labels[i] for i in ans_num] # # print(labels) # # ans_dic = {} predictions = dict(zip(list(testdic.keys()), labels)) # print(ans_dictionary) # predictions = {'163361196206957578': 'neutral', '768006053969268950': 'neutral', '742616104384772304': 'neutral', ' # 102313285628711403': 'neutral', '653274888624828198': 'neutral'} # TODO: Remove this line, 'predictions' should be populated with the outputs of your classifier # predictions = ans_dictionary evaluation.evaluate(predictions, testset, classifier) evaluation.confusion(predictions, testset, classifier)
[ "noreply@github.com" ]
Anderbone.noreply@github.com
5defb566e1ea2c00089fc94bb725b27db106978b
c092a30eb6e977cd021cb5d2670c5d3b4a3ac062
/markliu/settings.py
27f0f08471f7ec9cf0430cc6c6cdaba2053df9bd
[]
no_license
joskid/personal-django-blog
28a1789034f92091e622079b583d3aa1e33c0c8c
917a4182e9af49967280608f6a4378c8b386bb91
refs/heads/master
2020-12-25T02:30:36.280993
2012-12-14T15:52:59
2012-12-14T15:52:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,043
py
# Django settings for the blog markliu.me import os import socket import sys import dj_database_url # Test to see if local_settings exists. If it doesn't exist then this is on the live host. if os.path.isfile('local_settings.py'): LIVEHOST = False else: LIVEHOST = True USE_STATICFILES = False PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) ADMINS = ( ('Mark Liu', 'markwayneliu@gmail.com'), ) MANAGERS = ADMINS if LIVEHOST: DEBUG = os.environ.get('DJANGO_DEBUG', '').lower() == "true" # Heroku settings: https://devcenter.heroku.com/articles/django#database-settings DATABASES = {'default': dj_database_url.config(default='postgres://localhost')} # Django storages AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME'] AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID'] AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY'] USE_STATICFILES = True S3_URL = 'https://s3.amazonaws.com/{0}/'.format(AWS_STORAGE_BUCKET_NAME) # URL prefix for static files. STATIC_URL = S3_URL GOOGLE_WEBMASTER_KEY = os.environ['GOOGLE_WEBMASTER_KEY'] SECRET_KEY = os.environ['SECRET_KEY'] DISQUS_API_KEY = os.environ['DISQUS_API_KEY'] DELICIOUS_PASSWORD = os.environ['DELICIOUS_PASSWORD'] MEDIA_ROOT = '' MEDIA_URL = '' else: DEBUG = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(PROJECT_ROOT, 'mark-liu.db'), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = 'http://127.0.0.1:8000/media/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/admin/' STATIC_URL = '/media/' # Django storages AWS_ACCESS_KEY_ID = '' # To use this to upload files to S3, this should be defined in local_settings.py AWS_SECRET_ACCESS_KEY = '' # To use this to upload files to S3, this should be defined in local_settings.py if 'collectstatic' in sys.argv: USE_STATICFILES = True STATICFILES_DIRS = ( os.path.join(PROJECT_ROOT, './media/'), ) TEMPLATE_DEBUG = DEBUG # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # Make this unique, and don't share it with anybody. # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ] MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Debug toolbar. This goes after any middleware that encodes the response's content. 'debug_toolbar.middleware.DebugToolbarMiddleware', ) ROOT_URLCONF = 'markliu.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_ROOT, 'templates/'), ) INSTALLED_APPS = ( 'django.contrib.staticfiles', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.markup', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'django.contrib.flatpages', 'south', 'coltrane', 'tagging', 'debug_toolbar', 'disqus', 'django_twitter_tags', 'google_webmaster', 'django_posterous', ) # INTERNAL_IPS is used for django-debug-toolbar. #INTERNAL_IPS = ('127.0.0.1',) # For django-debug-toolbar. DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, } DELICIOUS_USER = 'mliu7' DISQUS_WEBSITE_SHORTNAME = 'markliusblog' DJANGO_POSTEROUS_SITE_NAME = 'wiscospike' # The site name of your posterous site (yoursitename.posterous.com) DJANGO_POSTEROUS_BLOG_MODULE = 'coltrane' # The module of your django blog DJANGO_POSTEROUS_BLOG_MODEL = 'Entry' # The model where the blog posts are stored DJANGO_POSTEROUS_TITLE_FIELD = 'title' # The name of the title field within your blog model DJANGO_POSTEROUS_BODY_FIELD = 'body_html' # The name of the field where your post will be stored DJANGO_POSTEROUS_DATE_FIELD = 'pub_date' # The name of the field where the date of the post will be stored DJANGO_POSTEROUS_AUTHOR_FIELD = 'author' # The name of the field where the author of the post will be stored ############################################################################## # Django-storages DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage' if USE_STATICFILES: STATICFILES_STORAGE = DEFAULT_FILE_STORAGE AWS_QUERYSTRING_AUTH = False AWS_HEADERS = { 'Cache-Control': 'max-age=3600', } try: from local_settings import * except ImportError: pass
[ "markwayneliu@gmail.com" ]
markwayneliu@gmail.com
00c5033bfa5fe0ed63fc2a721b1cf2c87e5f7225
8acbb01acf5c69806037669868bd07062cf2f7a0
/Django_demo/settings.py
6a7d0c4495dd4313bc760832cf389b1e0c8847c1
[]
no_license
godhunter1993/Django
958c3ffe9c3bc28fbf0aa9f905a1867f52f7c4e4
e44c48f7c9e5aa1e5d484de3775d9902f5377b5f
refs/heads/master
2020-03-20T19:33:23.521774
2018-06-17T09:32:54
2018-06-17T09:32:54
137,642,783
1
0
null
null
null
null
UTF-8
Python
false
false
3,256
py
""" Django settings for Django_demo project. Generated by 'django-admin startproject' using Django 2.0.2. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '9anr*r86&2jplaj1i$$!)u1-)1x^4brr85=xcg78d68)i0pu17' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'learn', 'people', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', #'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'Django_demo.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Django_demo.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media').replace('\\', '/') # media即为图片上传的根路径 MEDIA_URL = '/media/'
[ "15150568410@139.com" ]
15150568410@139.com
516909e27870935ab937ccd022e1ac2e00a7cc98
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
/cases/synthetic/sieve-big-2404.py
36bdabeba62a66987aa786e8dfdb76e27f414dcd
[]
no_license
Virtlink/ccbench-chocopy
c3f7f6af6349aff6503196f727ef89f210a1eac8
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
refs/heads/main
2023-04-07T15:07:12.464038
2022-02-03T15:42:39
2022-02-03T15:42:39
451,969,776
0
0
null
null
null
null
UTF-8
Python
false
false
31,752
py
# A resizable list of integers class Vector(object): items: [int] = None size: int = 0 def __init__(self:"Vector"): self.items = [0] # Returns current capacity def capacity(self:"Vector") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector", new_items: [int]) -> object: item:int = 0 for item in new_items: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector", idx: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector") -> int: return self.size # A resizable list of integers class Vector2(object): items: [int] = None items2: [int] = None size: int = 0 size2: int = 0 def __init__(self:"Vector2"): self.items = [0] # Returns current capacity def capacity(self:"Vector2") -> int: return len(self.items) # Returns current capacity def capacity2(self:"Vector2") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector2") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity2(self:"Vector2") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector2", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append2(self:"Vector2", item: int, item2: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector2", new_items: [int]) -> object: item:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object: item:int = 0 item2:int = 0 for item in new_items: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector2", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at2(self:"Vector2", idx: int, idx2: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector2", idx: int) -> int: return self.items[idx] # Retrieves an item at a given index def get2(self:"Vector2", idx: int, idx2: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector2") -> int: return self.size # Retrieves the current size of the vector def length2(self:"Vector2") -> int: return self.size # A resizable list of integers class Vector3(object): items: [int] = None items2: [int] = None items3: [int] = None size: int = 0 size2: int = 0 size3: int = 0 def __init__(self:"Vector3"): self.items = [0] # Returns current capacity def capacity(self:"Vector3") -> int: return len(self.items) # Returns current capacity def capacity2(self:"Vector3") -> int: return len(self.items) # Returns current capacity def capacity3(self:"Vector3") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector3") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity2(self:"Vector3") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity3(self:"Vector3") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector3", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append2(self:"Vector3", item: int, item2: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append3(self:"Vector3", item: int, item2: int, item3: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector3", new_items: [int]) -> object: item:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object: item:int = 0 item2:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 for item in new_items: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector3", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at2(self:"Vector3", idx: int, idx2: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector3", idx: int) -> int: return self.items[idx] # Retrieves an item at a given index def get2(self:"Vector3", idx: int, idx2: int) -> int: return $Member[idx] # Retrieves an item at a given index def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector3") -> int: return self.size # Retrieves the current size of the vector def length2(self:"Vector3") -> int: return self.size # Retrieves the current size of the vector def length3(self:"Vector3") -> int: return self.size # A resizable list of integers class Vector4(object): items: [int] = None items2: [int] = None items3: [int] = None items4: [int] = None size: int = 0 size2: int = 0 size3: int = 0 size4: int = 0 def __init__(self:"Vector4"): self.items = [0] # Returns current capacity def capacity(self:"Vector4") -> int: return len(self.items) # Returns current capacity def capacity2(self:"Vector4") -> int: return len(self.items) # Returns current capacity def capacity3(self:"Vector4") -> int: return len(self.items) # Returns current capacity def capacity4(self:"Vector4") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector4") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity2(self:"Vector4") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity3(self:"Vector4") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity4(self:"Vector4") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector4", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append2(self:"Vector4", item: int, item2: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append3(self:"Vector4", item: int, item2: int, item3: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector4", new_items: [int]) -> object: item:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object: item:int = 0 item2:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 item4:int = 0 for item in new_items: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector4", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at2(self:"Vector4", idx: int, idx2: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector4", idx: int) -> int: return self.items[idx] # Retrieves an item at a given index def get2(self:"Vector4", idx: int, idx2: int) -> int: return self.items[idx] # Retrieves an item at a given index def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int: return self.items[idx] # Retrieves an item at a given index def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector4") -> int: return self.size # Retrieves the current size of the vector def length2(self:"Vector4") -> int: return self.size # Retrieves the current size of the vector def length3(self:"Vector4") -> int: return self.size # Retrieves the current size of the vector def length4(self:"Vector4") -> int: return self.size # A resizable list of integers class Vector5(object): items: [int] = None items2: [int] = None items3: [int] = None items4: [int] = None items5: [int] = None size: int = 0 size2: int = 0 size3: int = 0 size4: int = 0 size5: int = 0 def __init__(self:"Vector5"): self.items = [0] # Returns current capacity def capacity(self:"Vector5") -> int: return len(self.items) # Returns current capacity def capacity2(self:"Vector5") -> int: return len(self.items) # Returns current capacity def capacity3(self:"Vector5") -> int: return len(self.items) # Returns current capacity def capacity4(self:"Vector5") -> int: return len(self.items) # Returns current capacity def capacity5(self:"Vector5") -> int: return len(self.items) # Increases capacity of vector by one element def increase_capacity(self:"Vector5") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity2(self:"Vector5") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity3(self:"Vector5") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity4(self:"Vector5") -> int: self.items = self.items + [0] return self.capacity() # Increases capacity of vector by one element def increase_capacity5(self:"Vector5") -> int: self.items = self.items + [0] return self.capacity() # Appends one item to end of vector def append(self:"Vector5", item: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append2(self:"Vector5", item: int, item2: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append3(self:"Vector5", item: int, item2: int, item3: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends one item to end of vector def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object: if self.size == self.capacity(): self.increase_capacity() self.items[self.size] = item self.size = self.size + 1 # Appends many items to end of vector def append_all(self:"Vector5", new_items: [int]) -> object: item:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object: item:int = 0 item2:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 item4:int = 0 for item in new_items: self.append(item) # Appends many items to end of vector def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object: item:int = 0 item2:int = 0 item3:int = 0 item4:int = 0 item5:int = 0 for item in new_items: self.append(item) # Removes an item from the middle of vector def remove_at(self:"Vector5", idx: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at2(self:"Vector5", idx: int, idx2: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Removes an item from the middle of vector def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object: if idx < 0: return while idx < self.size - 1: self.items[idx] = self.items[idx + 1] idx = idx + 1 self.size = self.size - 1 # Retrieves an item at a given index def get(self:"Vector5", idx: int) -> int: return self.items[idx] # Retrieves an item at a given index def get2(self:"Vector5", idx: int, idx2: int) -> int: return self.items[idx] # Retrieves an item at a given index def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int: return self.items[idx] # Retrieves an item at a given index def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int: return self.items[idx] # Retrieves an item at a given index def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int: return self.items[idx] # Retrieves the current size of the vector def length(self:"Vector5") -> int: return self.size # Retrieves the current size of the vector def length2(self:"Vector5") -> int: return self.size # Retrieves the current size of the vector def length3(self:"Vector5") -> int: return self.size # Retrieves the current size of the vector def length4(self:"Vector5") -> int: return self.size # Retrieves the current size of the vector def length5(self:"Vector5") -> int: return self.size # A faster (but more memory-consuming) implementation of vector class DoublingVector(Vector): doubling_limit:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # A faster (but more memory-consuming) implementation of vector class DoublingVector2(Vector): doubling_limit:int = 1000 doubling_limit2:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector2") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity2(self:"DoublingVector2") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # A faster (but more memory-consuming) implementation of vector class DoublingVector3(Vector): doubling_limit:int = 1000 doubling_limit2:int = 1000 doubling_limit3:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector3") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity2(self:"DoublingVector3") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity3(self:"DoublingVector3") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # A faster (but more memory-consuming) implementation of vector class DoublingVector4(Vector): doubling_limit:int = 1000 doubling_limit2:int = 1000 doubling_limit3:int = 1000 doubling_limit4:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector4") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity2(self:"DoublingVector4") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity3(self:"DoublingVector4") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity4(self:"DoublingVector4") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # A faster (but more memory-consuming) implementation of vector class DoublingVector5(Vector): doubling_limit:int = 1000 doubling_limit2:int = 1000 doubling_limit3:int = 1000 doubling_limit4:int = 1000 doubling_limit5:int = 1000 # Overriding to do fewer resizes def increase_capacity(self:"DoublingVector5") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity2(self:"DoublingVector5") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity3(self:"DoublingVector5") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity4(self:"DoublingVector5") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Overriding to do fewer resizes def increase_capacity5(self:"DoublingVector5") -> int: if (self.capacity() <= self.doubling_limit // 2): self.items = self.items + self.items else: # If doubling limit has been reached, fall back to # standard capacity increases self.items = self.items + [0] return self.capacity() # Makes a vector in the range [i, j) def vrange(i:int, j:int) -> Vector: v:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v def vrange2(i:int, j:int, i2:int, j2:int) -> Vector: v:Vector = None v2:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector: v:Vector = None v2:Vector = None v3:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector: v:Vector = None v2:Vector = None v3:Vector = None v4:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector: v:Vector = None v2:Vector = None v3:Vector = None v4:Vector = None v5:Vector = None v = DoublingVector() while i < j: v.append(i) i = i + 1 return v # Sieve of Eratosthenes (not really) def sieve(v:Vector) -> object: i:int = 0 j:int = 0 k:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 def sieve2(v:Vector, v2:Vector) -> object: i:int = 0 i2:int = 0 j:int = 0 j2:int = 0 k:int = 0 k2:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 def sieve3(v:Vector, v2:Vector, v3:Vector) -> object: i:int = 0 i2:int = 0 i3:int = 0 j:int = 0 j2:int = 0 j3:int = 0 k:int = 0 k2:int = 0 k3:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object: i:int = 0 i2:int = 0 i3:int = 0 i4:int = 0 j:int = 0 j2:int = 0 j3:int = 0 j4:int = 0 k:int = 0 k2:int = 0 k3:int = 0 k4:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object: i:int = 0 i2:int = 0 i3:int = 0 i4:int = 0 i5:int = 0 j:int = 0 j2:int = 0 j3:int = 0 j4:int = 0 j5:int = 0 k:int = 0 k2:int = 0 k3:int = 0 k4:int = 0 k5:int = 0 while i < v.length(): k = v.get(i) j = i + 1 while j < v.length(): if v.get(j) % k == 0: v.remove_at(j) else: j = j + 1 i = i + 1 # Input parameter n:int = 50 n2:int = 50 n3:int = 50 n4:int = 50 n5:int = 50 # Data v:Vector = None v2:Vector = None v3:Vector = None v4:Vector = None v5:Vector = None i:int = 0 i2:int = 0 i3:int = 0 i4:int = 0 i5:int = 0 # Crunch v = vrange(2, n) v2 = vrange(2, n) v3 = vrange(2, n) v4 = vrange(2, n) v5 = vrange(2, n) sieve(v) # Print while i < v.length(): print(v.get(i)) i = i + 1
[ "647530+Virtlink@users.noreply.github.com" ]
647530+Virtlink@users.noreply.github.com
ef9b5b666e8749d77a7b64d744affbcd8a64a543
963cac9e78c4b742f7e7800200de8d1582799955
/test/veetou/parserTests.py
797c7be4f0f217a2fd7bbe13910a3ec1cd8fde32
[]
no_license
ptomulik/veetou
c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa
b30be2a604f4426f832ec9805547ecd6cc9083fe
refs/heads/master
2021-01-22T17:28:57.271251
2019-01-05T01:46:43
2020-05-04T16:23:44
85,016,513
0
1
null
null
null
null
UTF-8
Python
false
false
3,386
py
#!/usr/bin/env python3 # -*- coding: utf8 -*- import unittest import veetou.parser as parser class Test__Parser(unittest.TestCase): def test__funcions_symbols__1(self): self.assertIs(parser.dictmatcher , parser.functions_.dictmatcher) self.assertIs(parser.fullmatch , parser.functions_.fullmatch) self.assertIs(parser.fullmatchdict , parser.functions_.fullmatchdict) self.assertIs(parser.ifullmatch , parser.functions_.ifullmatch) self.assertIs(parser.imatch , parser.functions_.imatch) self.assertIs(parser.imatcher , parser.functions_.imatcher) self.assertIs(parser.match , parser.functions_.match) self.assertIs(parser.matchdict , parser.functions_.matchdict) self.assertIs(parser.matcher , parser.functions_.matcher) self.assertIs(parser.permutexpr , parser.functions_.permutexpr) self.assertIs(parser.reentrant , parser.functions_.reentrant) self.assertIs(parser.scatter , parser.functions_.scatter) self.assertIs(parser.search , parser.functions_.search) self.assertIs(parser.searchpd , parser.functions_.searchpd) self.assertIs(parser.skipemptylines , parser.functions_.skipemptylines) def test__parsererror_symbols__1(self): self.assertIs(parser.ParserError, parser.parsererror_.ParserError) def test__parser_symbols__1(self): self.assertIs(parser.Parser, parser.parser_.Parser) self.assertIs(parser.RootParser, parser.parser_.RootParser) def test__addressparser__1(self): self.assertIs(parser.AddressParser, parser.addressparser_.AddressParser) def test__contactparser__1(self): self.assertIs(parser.ContactParser, parser.contactparser_.ContactParser) def test__footerparser__1(self): self.assertIs(parser.FooterParser, parser.footerparser_.FooterParser) def test__headerparser__1(self): self.assertIs(parser.HeaderParser, parser.headerparser_.HeaderParser) def test__keymapparser__1(self): self.assertIs(parser.KeyMapParser, parser.keymapparser_.KeyMapParser) def test__pageparser__1(self): self.assertIs(parser.PageParser, parser.pageparser_.PageParser) def test__preambleparser__1(self): self.assertIs(parser.PreambleParser, parser.preambleparser_.PreambleParser) def test__reportparser__1(self): self.assertIs(parser.ReportParser, parser.reportparser_.ReportParser) def test__sheetparser__1(self): self.assertIs(parser.SheetParser, parser.sheetparser_.SheetParser) def test__summaryparser__1(self): self.assertIs(parser.SummaryParser, parser.summaryparser_.SummaryParser) def test__tableparser__1(self): self.assertIs(parser.TableParser, parser.tableparser_.TableParser) def test__tbodyparser__1(self): self.assertIs(parser.TbodyParser, parser.tbodyparser_.TbodyParser) def test__thparser__1(self): self.assertIs(parser.ThParser, parser.thparser_.ThParser) def test__trparser__1(self): self.assertIs(parser.TrParser, parser.trparser_.TrParser) if __name__ == '__main__': unittest.main() # Local Variables: # # tab-width:4 # # indent-tabs-mode:nil # # End: # vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
[ "ptomulik@meil.pw.edu.pl" ]
ptomulik@meil.pw.edu.pl
524246a6a70b4894f83eeaef95da242ddd7c0ae0
d5ab427f918849fabb5a6cc7efc15ebc71e3f9de
/graph.py
31f3a8d623c797c9295444433c22b6b9108a29e8
[]
no_license
wizacass/Intelektika_Proj
d189194b1f93e03614536fc272b704ad48cafb5e
76700550eaeaa5483ee37a337455014a912bcbb4
refs/heads/master
2023-05-09T06:35:53.806076
2021-05-25T15:46:13
2021-05-25T15:46:13
370,747,206
0
0
null
null
null
null
UTF-8
Python
false
false
2,682
py
import matplotlib.pyplot as plt import numpy as np import math from collections import Counter class Grapher: def __init__(self, dataset: str): self.dataset = dataset def histo(self, attribute, binary=False): if binary: counter = Counter(attribute.values) plt.bar(counter.keys(), counter.values()) else: counts, bins = np.histogram(attribute.values) plt.hist(bins[:-1], bins, weights=counts) plt.xlabel(attribute.name) plt.ylabel("Count") plt.savefig(f"results/graphs/{self.dataset}/{attribute.name}.png") plt.clf() def scatter(self, attributeX, attributeY): plt.xlabel(attributeX.name) plt.ylabel(attributeY.name) plt.scatter(attributeX.values, attributeY.values, alpha=0.5) plt.savefig( f"results/graphs/{self.dataset}/{attributeX.name} on {attributeY.name}.png") plt.clf() def splom(self, attributes): count = len(attributes) r = range(0, count) for i in r: for j in r: ax = plt.subplot2grid((count, count), (i, j)) ax.set_axis_off() if i != j: ax.scatter( attributes[i].values, attributes[j].values, s=0.5, alpha=0.25 ) plt.savefig( f"results/graphs/{self.dataset}/splom.png", dpi=1200 ) plt.clf() def bar_plot(self, attribute, label=""): counter = Counter(attribute.values) plt.bar(counter.keys(), counter.values()) plt.xlabel(attribute.name) plt.ylabel("Count") plt.savefig( f"results/graphs/{self.dataset}/bar_{attribute.name} {label}.png") plt.clf() def box_plot(self, attributes: list, labelX: str, labelY: str): plt.boxplot(attributes) plt.xticks([1, 2], ["True", "False"]) plt.xlabel(labelX) plt.ylabel(labelY) plt.savefig( f"results/graphs/{self.dataset}/box_{labelX} on {labelY}.png") plt.clf() def correlation_matrix(self, correlation_data: list, labels: list): plt.matshow(correlation_data) plt.colorbar() plt.xticks(range(0, len(correlation_data[0])), labels, rotation=45) plt.yticks(range(0, len(correlation_data[1])), labels, rotation=45) plt.savefig( f"results/graphs/{self.dataset}/correlation_matrix.png") plt.clf() def __column_count(self, size): count = 1 + 3.22 * (math.log(math.e) ** size) return int(round(count))
[ "visak.pet0@gmail.com" ]
visak.pet0@gmail.com
16d09cbbcb9e2143dfe40093700361425c5394ed
d92bad5384d80cf0f7e073bb7484b06514174f7a
/code/run_emcee_plPeak_noEvol_no190412.py
11ac3c75f9e4fae05fd3c145fe2fe6164833c64e
[]
no_license
tcallister/BBH-spin-q-correlations
2abe399dc927a4cdbb47ac92ad6005cb3450e676
63dc9bbf9ca0c84a94ec0c616f8c2b3cfcceed26
refs/heads/main
2023-06-12T00:22:12.803326
2021-06-29T20:35:45
2021-06-29T20:35:45
348,101,610
2
1
null
null
null
null
UTF-8
Python
false
false
3,874
py
import numpy as np import glob import emcee as mc import h5py import sys from support import * from likelihoods import * # -- Set prior bounds -- priorDict = { 'lmbda':(-5,4), 'mMax':(60,100), 'm0':(20,100), 'sigM':(1,10), 'fPeak':(0,1), 'bq':(-2,10), 'sig_kappa':6., 'mu':(-1,1), 'log_sigma':(-1.5,0.5), 'mMin':5. } # Dicts with samples: sampleDict = np.load("/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/sampleDict.pickle") sampleDict.pop('S190412m') mockDetections = h5py.File('/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/o3a_bbhpop_inj_info.hdf','r') ifar_1 = mockDetections['injections']['ifar_gstlal'].value ifar_2 = mockDetections['injections']['ifar_pycbc_bbh'].value ifar_3 = mockDetections['injections']['ifar_pycbc_full'].value detected = (ifar_1>1) + (ifar_2>1) + (ifar_3>1) m1_det = mockDetections['injections']['mass1_source'].value[detected] m2_det = mockDetections['injections']['mass2_source'].value[detected] s1z_det = mockDetections['injections']['spin1z'].value[detected] s2z_det = mockDetections['injections']['spin2z'].value[detected] z_det = mockDetections['injections']['redshift'].value[detected] mockDetectionsO1O2 = h5py.File('/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/injections_O1O2an_spin.h5','r') m1_det = np.append(m1_det,mockDetectionsO1O2['mass1_source']) m2_det = np.append(m2_det,mockDetectionsO1O2['mass2_source']) s1z_det = np.append(s1z_det,mockDetectionsO1O2['spin1z']) s2z_det = np.append(s2z_det,mockDetectionsO1O2['spin2z']) z_det = np.append(z_det,mockDetectionsO1O2['redshift']) pop_reweight = injection_weights(m1_det,m2_det,s1z_det,s2z_det,z_det,mMin=priorDict['mMin']) injectionDict = { 'm1':m1_det, 'm2':m2_det, 's1z':s1z_det, 's2z':s2z_det, 'z':z_det, 'weights':pop_reweight } nWalkers = 32 output = "/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/code/output/emcee_samples_plPeak_noEvol_no190412" # Search for existing chains old_chains = np.sort(glob.glob("{0}_r??.npy".format(output))) # If no chain already exists, begin a new one if len(old_chains)==0: run_version = 0 # Initialize walkers from random positions in mu-sigma2 parameter space initial_lmbdas = np.random.random(nWalkers)*(-2.) initial_mMaxs = np.random.random(nWalkers)*20.+80. initial_m0s = np.random.random(nWalkers)*10.+30 initial_sigMs = np.random.random(nWalkers)*4+1. initial_fs = np.random.random(nWalkers) initial_bqs = np.random.random(nWalkers)*2. initial_ks = np.random.normal(size=nWalkers,loc=0,scale=1)+2. initial_mus = np.random.random(nWalkers)*0.05 initial_sigmas = np.random.random(nWalkers)*0.5-1. initial_walkers = np.transpose([initial_lmbdas,initial_mMaxs,initial_m0s,initial_sigMs,initial_fs,initial_bqs,initial_ks,initial_mus,initial_sigmas]) # Otherwise resume existing chain else: # Load existing file and iterate run version old_chain = np.load(old_chains[-1]) run_version = int(old_chains[-1][-6:-4])+1 # Strip off any trailing zeros due to incomplete run goodInds = np.where(old_chain[0,:,0]!=0.0)[0] old_chain = old_chain[:,goodInds,:] # Initialize new walker locations to final locations from old chain initial_walkers = old_chain[:,-1,:] print('Initial walkers:') print(initial_walkers) # Dimension of parameter space dim = 9 # Run nSteps = 10000 sampler = mc.EnsembleSampler(nWalkers,dim,logp_powerLawPeak_noEvol,args=[sampleDict,injectionDict,priorDict],threads=16) for i,result in enumerate(sampler.sample(initial_walkers,iterations=nSteps)): if i%10==0: np.save("{0}_r{1:02d}.npy".format(output,run_version),sampler.chain) np.save("{0}_r{1:02d}.npy".format(output,run_version),sampler.chain)
[ "thomas.a.callister@gmail.com" ]
thomas.a.callister@gmail.com
892e7b51d8d330acc1612ca799d59c9a0d25beb4
4b2450b65f5802f524ddb8701baa0e71c929889b
/listanelement.py
873b5eef153b5eefbef4658036e49176c3427331
[]
no_license
joedave1/python
21e89dd0638156a3600bfb7fbf7422c73a79fc51
ae51152a663aa2e512c5be7f6134c4b35d78e88d
refs/heads/master
2020-06-29T11:22:05.627400
2019-08-16T08:51:14
2019-08-16T08:51:14
200,520,497
0
0
null
null
null
null
UTF-8
Python
false
false
301
py
x=input("Enter a commc seperated list values: ").split(",") color=list(x) print("The first color is %s and the last color is %s"%(color[0],color[-1]))x=input("Enter a commc seperated list values: ").split(",") color=list(x) print("The first color is %s and the last color is %s"%(color[0],color[-1]))
[ "noreply@github.com" ]
joedave1.noreply@github.com
77aee12c7c33e199445d96492a54a4d8c66a7a51
3a4cc16bf5fa10feedbb26623f1df14594f05a25
/jogodedados.py
abd944c9571c6f8756414a25fd04e1fdaa173ffe
[]
no_license
edsoncpsilva/Curso-Python
12f89ae0049e7909cab5e98ff9adbf3dfa003d25
939ccccb9f3beb5ee7a72fa96dfd0240fbd185fa
refs/heads/master
2020-03-24T02:21:57.707429
2018-08-17T20:19:45
2018-08-17T20:19:45
142,372,961
0
0
null
null
null
null
UTF-8
Python
false
false
1,272
py
#==> jogo de dados de 6 lados #importar biblioteca import random #variaveis sair = 's' qtd = 0 #loop de interacao while (sair == 's'): #interacao print() numero = int(input('Digite o numero que irá sair entre 1 e 6?:' )) nro_sorte = random.randrange(1, 7) if numero == nro_sorte: print('Acertei, estou com sorte!!!!') print() exit() if numero > nro_sorte: print('você digitou um numero Maior, tente novamente') print() if numero < nro_sorte: print('você digitou um numero Menor, tente novamente') print() print('-'*70) print('==> Numero Sorteado:' + str(nro_sorte)) print('-'*70) #validar opcao de SAIR ok = 'nok' while (ok == 'nok'): sair = input('Deseja Continuar Tentando (s/n): ') if sair == 's': ok = 'ok' if sair == 'S': sair = 's' ok = 'ok' if sair == 'n': ok = 'ok' if sair == 'N': sair = 'n' ok = 'ok' if ok == 'nok': print('Opcao Invalida, digite apenas "S" ou "N"') #controlar quantidade de tentativas if sair == 's': qtd = qtd + 1 if qtd == 4: print() print('********************************************') print('***** excedeu a qtd de tentativas de 2 *****') print('********************************************') sair = 'n' print() print('Fim de Jogo !!!!')
[ "noreply@github.com" ]
edsoncpsilva.noreply@github.com
832a298328bc29b34d0110a3029f906ad483a34d
37c3b81ad127c9e3cc26fa9168fda82460ca9bda
/Baekjoon/boj_20055_컨베이어 벨트 위의 로봇.py
dfdb3152402dc2cfac4c545e7cd087fba933dcf0
[]
no_license
potomatoo/TIL
5d85b69fdaed68966db7cfe2a565b7c64ed3e816
395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c
refs/heads/master
2021-07-08T16:19:40.410097
2021-04-19T02:33:40
2021-04-19T02:33:40
238,872,774
0
0
null
null
null
null
UTF-8
Python
false
false
751
py
def work(): global cnt while True: board.rotate(1) robot.rotate(1) robot[N-1] = 0 for i in range(N-2, -1, -1): if robot[i] and not robot[i+1] and board[i+1] > 0: board[i+1] -= 1 robot[i+1] = 1 robot[i] = 0 robot[N-1] = 0 if not robot[0] and board[0] > 0: board[0] -= 1 robot[0] = 1 flag = 0 for i in range(len(board)): if board[i] == 0: flag += 1 if flag >= K: break cnt += 1 from collections import deque N, K = map(int, input().split()) board = deque(map(int, input().split())) cnt = 1 robot = deque([0] * len(board)) work() print(cnt)
[ "duseh73@gmail.com" ]
duseh73@gmail.com
e34b387068ca8ec0ce9a89b18f694f3e87b653fb
b220bd0c6c7fe6fcea00ac2ae5195c1887b8a37e
/database/dbconn.py
fa05f16ecbde4cfecc85dfd3b816446e8a13ae57
[]
no_license
itwastheband/AO3rdr-backend
8f624ddeefbc09995f1784c3092fc1ebcbedbff7
19c1ed8ecdeea3250a958006d260207c582cb371
refs/heads/master
2022-06-17T03:47:45.741986
2020-05-03T23:34:14
2020-05-03T23:34:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,198
py
import os import boto from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey from boto.dynamodb2.items import Item from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2.table import Table from boto.dynamodb2.exceptions import ItemNotFound from decimal import Decimal from flask import _app_ctx_stack import time class DBconn(object): def __init__(self): aws_access_key_id = os.environ['S3_KEY'] # I AM OPS U NO GET MY KEYS aws_secret_access_key = os.environ['S3_SECRET'] # DIS IS MY JOB self._conn = DynamoDBConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) self.works_table = Table('ao3rdr-works', connection=self._conn) self.immutable_fields = ['work_id', 'user_id'] def get_user(self, user_id): res = self.works_table.query_2( user_id__eq=user_id, work_id__eq='settings', attributes=['user_id']) out = [] for entry in res: out.append(self.serialize(entry)['user_id']) return out def add_user(self, user_id): """ Adding a user adds a special "work" which is used to store a user's settings. """ return self.works_table.put_item(data={ 'user_id': user_id, 'work_id': 'settings', 'created': time.time() }) def update_work(self, user_id, work_id, data): item = self.works_table.get_item(user_id=user_id, work_id=work_id) # update the item for key, value in data.iteritems(): if key not in self.immutable_fields: item[key] = value item['db_updated'] = time.time() item.partial_save() def create_work(self, user_id, work_id, data): data['user_id'] = user_id data['work_id'] = work_id if 'created' not in data: data['created'] = time.time() self.works_table.put_item(data) def batch_update(self, data_list): with self.works_table.batch_write() as batch: for data in data_list: batch.put_item(data=data) def get_work(self, user_id, work_id): try: res = self.works_table.get_item(user_id=user_id, work_id=work_id) except ItemNotFound: return {} return self.serialize(res) def get_all_works(self, user_id): res = self.works_table.query_2(user_id__eq=user_id) for entry in res: yield self.serialize(entry) def close(self): self._conn.close() def serialize(self, item): out = serialize(dict(item)) return out def serialize(item): if isinstance(item, dict): out = {} for k, v in item.items(): out[k] = serialize(v) elif isinstance(item, set) or isinstance(item, list): out = [] for i in item: out.append(serialize(i)) elif isinstance(item, Decimal): out = float(item) else: out = item return out def get_db(): """Opens a new database connection if there is none yet for the current application context. """ top = _app_ctx_stack.top if not hasattr(top, 'db_conn'): top.__setattr__('db_conn', DBconn()) return top.db_conn ''' # Tips for working with DynameDB works_table = Table('ao3rdr-works', connection=conn) # put_item has param overwrite=False test_data = { 'user_id': 'testuser', 'work_id': '123456', 'rating': 5 } works_table.put_item(test_data) # When using get item, must use both primary and secondary keys works_table.get_item(user_id='testuser', work_id='123456') # To get by user, query is OK res = works_table.query_2(user_id__eq='testuser') for entry in res: print entry # entry useful fields: _data, keys(), and index like a dict, eg entry['work_id'] # Use the secondary index res = works_table.query_2(rating__eq=5, index='rating-index') for entry in res: print entry['work_id'] # get_item(table_name, key, attributes_to_get=None, consistent_read=False, object_hook=None) # put_item(table_name, item, expected=None, return_values=None, object_hook=None) '''
[ "darthkrallt@gmail.com" ]
darthkrallt@gmail.com
9305c3a78026026cae6e03d11b5982d9cee7f094
0617c812e9bf58a2dbc1c1fef35e497b054ed7e4
/venv/Lib/site-packages/pyrogram/raw/functions/stats/get_megagroup_stats.py
320398dd3f9fb86f271aeb14aaca77b3bc298f8c
[]
no_license
howei5163/my_framework
32cf510e19a371b6a3a7c80eab53f10a6952f7b2
492c9af4ceaebfe6e87df8425cb21534fbbb0c61
refs/heads/main
2023-01-27T14:33:56.159867
2020-12-07T10:19:33
2020-12-07T10:19:33
306,561,184
1
0
null
null
null
null
UTF-8
Python
false
false
2,553
py
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2020 Dan <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. from io import BytesIO from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector from pyrogram.raw.core import TLObject from pyrogram import raw from typing import List, Union, Any # # # # # # # # # # # # # # # # # # # # # # # # # !!! WARNING !!! # # This is a generated file! # # All changes made in this file will be lost! # # # # # # # # # # # # # # # # # # # # # # # # # class GetMegagroupStats(TLObject): # type: ignore """Telegram API method. Details: - Layer: ``117`` - ID: ``0xdcdf8607`` Parameters: channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>` dark (optional): ``bool`` Returns: :obj:`stats.MegagroupStats <pyrogram.raw.base.stats.MegagroupStats>` """ __slots__: List[str] = ["channel", "dark"] ID = 0xdcdf8607 QUALNAME = "pyrogram.raw.functions.stats.GetMegagroupStats" def __init__(self, *, channel: "raw.base.InputChannel", dark: Union[None, bool] = None) -> None: self.channel = channel # InputChannel self.dark = dark # flags.0?true @staticmethod def read(data: BytesIO, *args: Any) -> "GetMegagroupStats": flags = Int.read(data) dark = True if flags & (1 << 0) else False channel = TLObject.read(data) return GetMegagroupStats(channel=channel, dark=dark) def write(self) -> bytes: data = BytesIO() data.write(Int(self.ID, False)) flags = 0 flags |= (1 << 0) if self.dark is not None else 0 data.write(Int(flags)) data.write(self.channel.write()) return data.getvalue()
[ "houwei5163" ]
houwei5163
b645ed1a0ad19262304bef16a69381cbb05cbc2c
4a211e279ec89239033c5fe2d6d8d3e49b48d369
/salvo/src/lib/job_control_loader.py
d179d460ec8b996e850b26e0c4f04fbb774d9d79
[ "Apache-2.0" ]
permissive
envoyproxy/envoy-perf
cfb1e8f7af806600f11ebc235c1a72939420b087
d131bc2f1a7f8ae4f640da30fd30c027735d9788
refs/heads/main
2023-08-31T14:02:50.891888
2023-08-24T16:19:26
2023-08-24T16:19:26
94,845,161
109
29
Apache-2.0
2023-08-24T16:19:28
2017-06-20T03:20:02
Python
UTF-8
Python
false
false
3,111
py
"""This object abstracts the loading of json strings into protobuf objects.""" import json import logging import yaml from google.protobuf import json_format import api.control_pb2 as proto_control log = logging.getLogger(__name__) def _load_json_doc(filename: str) -> proto_control.JobControl: """Load a disk file as JSON. This function reads the specified filename and parses the contents as JSON. Args: filename: The file whose contents are to be read as JSON data Returns: A JobControl object populated with the contents from the specified JSON file """ contents = None log.debug(f"Opening JSON file {filename}") try: with open(filename, 'r') as json_doc: contents = json_format.Parse(json_doc.read(), proto_control.JobControl()) except FileNotFoundError as file_not_found: log.exception(f"Unable to load {filename}: {file_not_found}") except json_format.Error as json_parse_error: log.exception(f"Unable to parse JSON contents {filename}: {json_parse_error}") return contents def _load_yaml_doc(filename: str) -> proto_control.JobControl: """Load a disk file as YAML. This function reads the specified filename and parses the contents as YAML. Args: filename: The file whose contents are to be read as YAML data Returns: A JobControl object populated with the contents from the specified YAML file """ log.debug(f"Opening YAML file {filename}") contents = None try: with open(filename, 'r') as yaml_doc: contents = yaml.safe_load(yaml_doc.read()) contents = json_format.Parse(json.dumps(contents), proto_control.JobControl()) except FileNotFoundError as file_not_found: log.exception(f"Unable to load {filename}: {file_not_found}") except json_format.Error as yaml_parse_error: log.exception(f"Unable to parse YAML contents {filename}: {yaml_parse_error}") return contents def load_control_doc(filename: str) -> proto_control.JobControl: """Return a JobControl object from the identified filename. This function uses the extension of the specified file to read its contents as YAML or JSON Args: filename: The file whose contents are to be read and parsed as a Job Control object. Returns: A JobControl object populated with the contents from the specified filename """ contents = None # Try loading the contents based on the file extension if filename.endswith('.json'): log.debug(f"Loading JSON file {filename}") return _load_json_doc(filename) elif filename.endswith('.yaml'): log.debug(f"Loading YAML file {filename}") return _load_yaml_doc(filename) else: log.debug(f"Auto-detecting contents of {filename}") # Attempt to autodetect the contents try: contents = _load_json_doc(filename) except json_format.Error: log.info(f"Parsing {filename} as JSON failed. Trying YAML") if not contents: try: contents = _load_yaml_doc(filename) except json_format.Error: log.info(f"Parsing {filename} as YAML failed.") return contents
[ "noreply@github.com" ]
envoyproxy.noreply@github.com
1a701ab367bd7353d683543ba01c68dafb9c47e1
3536b829b5733807ffca9849e7ad463c43979c09
/sc2bot/agents/battle_agent.py
22e9a7bbc1b8d9e4a1550ce87856884d6aaf1e26
[]
no_license
alanxzhou/sc2bot
9b8d33dacc32074a70b8b4007f60801d6ff8037c
0eb2a3f733ea31250e29a123213b407ad9189a40
refs/heads/master
2020-09-04T17:40:32.608263
2020-03-16T23:32:59
2020-03-16T23:32:59
219,835,624
1
0
null
null
null
null
UTF-8
Python
false
false
12,381
py
from abc import ABC, abstractmethod import copy from collections import deque import pickle import matplotlib.pyplot as plt import numpy as np import os import time from pysc2.agents.scripted_agent import _xy_locs from pysc2.agents.base_agent import BaseAgent from pysc2.lib import actions from pysc2.lib import features from sc2bot.utils.epsilon import Epsilon from sc2bot.utils.replay_memory import ReplayMemory, Transition from sc2bot.models.nn_models import FeatureCNN, FeatureCNNFCLimited, FeatureCNNFCBig, BeaconCNN2 from sc2bot.agents.rl_agent import BaseRLAgent import torch import torch.nn as nn import torch.optim as optim _PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index _PLAYER_FRIENDLY = 1 _PLAYER_NEUTRAL = 3 # beacon/minerals _PLAYER_HOSTILE = 4 _NO_OP = actions.FUNCTIONS.no_op.id _MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id _ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id _SELECT_ARMY = actions.FUNCTIONS.select_army.id _NOT_QUEUED = [0] _SELECT_ALL = [0] _SELECT_POINT = actions.FUNCTIONS.select_point.id _UNIT_TYPE = 6 _SELECTED = 7 _UNIT_HIT_POINTS = 8 FUNCTIONS = actions.FUNCTIONS _PLAYER_ENEMY = features.PlayerRelative.ENEMY class BattleAgent(BaseRLAgent): """ Agent where the entire army is selected """ def __init__(self, save_name=None, load_name=None): super(BattleAgent, self).__init__(save_name=save_name, load_name=load_name) self.initialize_model(FeatureCNNFCBig(3, screen_size=self._screen_size)) self.steps_before_training = 5000 self.obs = None self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS] self.train_q_per_step = 1 def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10): """A run loop to have agents and an environment interact.""" total_frames = 0 start_time = time.time() action_spec = env.action_spec() observation_spec = env.observation_spec() self.setup(observation_spec, action_spec) try: while self.n_episodes < max_episodes: obs = env.reset()[0] # remove unit selection from the equation by selecting the entire army on every new game. select_army = actions.FunctionCall(_SELECT_ARMY, [[False]]) obs = env.step([select_army])[0] self.reset() episode_reward = 0 while True: total_frames += 1 self.obs = obs.observation["feature_screen"][self.features] s = np.expand_dims(self.obs, 0) if max_frames and total_frames >= max_frames: print("max frames reached") return if obs.last(): print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}") self._epsilon.increment() break action = self.get_action(s, unsqueeze=False) env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN) try: obs = env.step([env_actions])[0] r = obs.reward - 10 except ValueError as e: print(e) obs = env.step([actions.FunctionCall(_NO_OP, [])])[0] r = obs.reward - 1000 episode_reward += r s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0) done = r > 0 if self._epsilon.isTraining: transition = Transition(s, action, s1, r, done) self._memory.push(transition) if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining: self.train_q(squeeze=True) if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining: self._Qt = copy.deepcopy(self._Q) if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0): print('Evaluating...') self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0) self._epsilon.isTraining = True if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop self.reward.append(episode_reward) print(f'Evaluation Complete: Episode reward = {episode_reward}') break self.n_episodes += 1 if len(self._loss) > 0: self.loss.append(self._loss[-1]) self.max_q.append(self._max_q[-1]) if self.n_episodes % save_checkpoints == 0: if self.n_episodes > 0: self.save_data(episodes_done=self.n_episodes) except KeyboardInterrupt: pass finally: print("finished") elapsed_time = time.time() - start_time try: print("Took %.3f seconds for %s steps: %.3f fps" % ( elapsed_time, total_frames, total_frames / elapsed_time)) except: print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames)) class BattleAgentBeacon(BattleAgent): def __init__(self, save_name=None, load_name=None): super(BattleAgentBeacon, self).__init__(save_name=save_name, load_name=load_name) self.initialize_model(BeaconCNN2()) self.features = _PLAYER_RELATIVE def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10): """A run loop to have agents and an environment interact.""" total_frames = 0 start_time = time.time() action_spec = env.action_spec() observation_spec = env.observation_spec() self.setup(observation_spec, action_spec) try: while self.n_episodes < max_episodes: obs = env.reset()[0] # remove unit selection from the equation by selecting the entire army on every new game. select_army = actions.FunctionCall(_SELECT_ARMY, [[False]]) obs = env.step([select_army])[0] self.reset() episode_reward = 0 while True: total_frames += 1 self.obs = obs.observation["feature_screen"][self.features] s = np.expand_dims(self.obs, 0) if max_frames and total_frames >= max_frames: print("max frames reached") return if obs.last(): print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}") self._epsilon.increment() break action = self.get_action(s, unsqueeze=True) env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN) try: obs = env.step([env_actions])[0] r = obs.reward - 10 except ValueError as e: print(e) obs = env.step([actions.FunctionCall(_NO_OP, [])])[0] r = obs.reward - 1000 episode_reward += r s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0) done = r > 0 if self._epsilon.isTraining: transition = Transition(s, action, s1, r, done) self._memory.push(transition) if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining: self.train_q(squeeze=False) if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining: self._Qt = copy.deepcopy(self._Q) if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0): print('Evaluating...') self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0) self._epsilon.isTraining = True if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop self.reward.append(episode_reward) print(f'Evaluation Complete: Episode reward = {episode_reward}') break self.n_episodes += 1 if len(self._loss) > 0: self.loss.append(self._loss[-1]) self.max_q.append(self._max_q[-1]) if self.n_episodes % save_checkpoints == 0: if self.n_episodes > 0: self.save_data(episodes_done=self.n_episodes) except KeyboardInterrupt: pass finally: print("finished") elapsed_time = time.time() - start_time try: print("Took %.3f seconds for %s steps: %.3f fps" % ( elapsed_time, total_frames, total_frames / elapsed_time)) except: print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames)) class BattleAgentLimited(BattleAgent): def __init__(self, save_name=None, load_name=None): super(BattleAgentLimited, self).__init__(save_name=save_name, load_name=load_name) self.steps_before_training = 256 self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS] self.radius = 15 self._screen_size = 64 self.initialize_model(FeatureCNNFCLimited(len(self.features), self.radius, screen_size=64)) def get_action(self, s, unsqueeze=True): # greedy if np.random.rand() > self._epsilon.value(): s = torch.from_numpy(s).to(self.device) if unsqueeze: s = s.unsqueeze(0).float() else: s = s.float() with torch.no_grad(): self._action = self._Q(s).squeeze().cpu().data.numpy() return self._action.argmax() # explore else: action = np.random.randint(0, self.radius ** 2) return action def get_env_action(self, action, obs, command=_MOVE_SCREEN): relative_action = np.unravel_index(action, [self.radius, self.radius]) y_friendly, x_friendly = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_FRIENDLY).nonzero() # y_enemy, x_enemy = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero() if len(x_friendly) > 0: action = [int(relative_action[1] - self.radius/2 + round(x_friendly.mean())), int(relative_action[0] - self.radius/2 + round(y_friendly.mean()))] friendly_coordinates = np.vstack((x_friendly, y_friendly)).T if bool(np.sum(np.all(action == friendly_coordinates, axis=1))): command = _MOVE_SCREEN elif abs(sum(action)) < 2: command = _MOVE_SCREEN else: # action = [int(relative_action[1] - self.radius/2), int(relative_action[0] - self.radius/2)] return actions.FunctionCall(_NO_OP, []) if command in obs.observation["available_actions"]: return actions.FunctionCall(command, [[0], action]) else: return actions.FunctionCall(_NO_OP, [])
[ "alanzhou93@gmail.com" ]
alanzhou93@gmail.com
6b09cc57289aebfadf3badeff4f9bef7c017e0dc
04cd6250630b3aad49219acbae0b7682f4263afb
/sbaas/analysis/analysis_stage02_isotopomer/stage02_isotopomer_dependencies.py
7813c8ad014ac51fbf424a16b962f14cfd089746
[ "Apache-2.0" ]
permissive
SBRG/sbaas
ec04bd3a82248600328c053bc798d7d302fbaf9d
9df76bbffdd620cf8566744a2b0503935998fbe0
refs/heads/master
2021-01-21T23:29:26.713889
2015-06-24T17:16:59
2015-06-24T17:16:59
28,518,590
1
2
null
null
null
null
UTF-8
Python
false
false
297,680
py
'''isotopomer metabolomics analysis class''' from sbaas.analysis.analysis_base import * from .stage02_isotopomer_query import * from .stage02_isotopomer_io import * # Dependencies import operator, json, csv from copy import copy # Dependencies from 3rd party import scipy.io from numpy import histogram, mean, std, loadtxt import matplotlib as mpl import matplotlib.pyplot as plt import h5py from sbaas.resources.molmass import Formula # Dependencies from cobra from cobra.io.sbml import create_cobra_model_from_sbml_file from cobra.io.sbml import write_cobra_model_to_sbml_file from cobra.io.mat import save_matlab_model from cobra.manipulation.modify import convert_to_irreversible, revert_to_reversible from cobra.flux_analysis.objective import update_objective from cobra.flux_analysis.variability import flux_variability_analysis from cobra.flux_analysis.parsimonious import optimize_minimal_flux from cobra.flux_analysis import flux_variability_analysis, single_deletion from cobra.core.Reaction import Reaction from cobra.core.Metabolite import Metabolite class stage02_isotopomer_dependencies(): def __init__(self): self.calculate = base_calculate(); #variables: self.isotopomer_rxns_net_irreversible = { 'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'], 'stoichiometry':[1,1]}, 'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'], 'stoichiometry':[1,1,1,1]}, 'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'], 'stoichiometry':[1,1]}, '2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'], 'stoichiometry':[1,1,1]}, 'pyr_to_23dhmb':{'reactions':['ACLS','KARA1_reverse'], 'stoichiometry':[1,1]}, #'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'], # 'stoichiometry':[1,1,1]}, #cannot be lumped 'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'], 'stoichiometry':[1,1,1,1]}, 'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'], 'stoichiometry':[1,1,1]}, 'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'], 'stoichiometry':[1,1]}, 'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'], 'stoichiometry':[1,1,1]}, 'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'], 'stoichiometry':[1,1]}, 'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'], 'stoichiometry':[1,1]}, 'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT_reverse'], 'stoichiometry':[1,1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs 'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'], 'stoichiometry':[1,1,1]}, 'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'], 'stoichiometry':[1,1]}, '3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'], 'stoichiometry':[1,1,1]}, 'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD2','ORPT_reverse','OMPDC'], 'stoichiometry':[1,1,1,1,1]}, #'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD5','ORPT_reverse','OMPDC'], # 'stoichiometry':[1,1,1,1,1]}, 'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'], 'stoichiometry':[1,1,1,1]}, 'IMPSYN_2':{'reactions':['AIRC2','AIRC3_reverse','PRASCSi','ADSL2r'], 'stoichiometry':[1,1,1,1]}, 'IMPSYN_3':{'reactions':['AICART','IMPC_reverse'], 'stoichiometry':[1,1]}, 'imp_to_gmp':{'reactions':['IMPD','GMPS2'], 'stoichiometry':[1,1]}, 'imp_to_amp':{'reactions':['ADSS','ADSL1r'], 'stoichiometry':[1,1]}, #'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'], # 'stoichiometry':[1,1]}, 'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'], 'stoichiometry':[1,1,1]}, #'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'], # 'stoichiometry':[1,1]}, #cannot be lumped 'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'], 'stoichiometry':[1,1,1,1,1]}, 'FADSYN_2':{'reactions':['RBFSa','DB4PS'], 'stoichiometry':[1,1]}, 'FADSYN_3':{'reactions':['RBFK','FMNAT'], 'stoichiometry':[1,1]}, 'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'], 'stoichiometry':[1,1,1,1,1,1]}, #'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'], # 'stoichiometry':[1,1,1,1,1,1]}, #'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'], # 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped 'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'GTHSYN':{'reactions':['GLUCYS','GTHS'], 'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]}, 'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]}, 'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]}, 'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]}, 'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]}, 'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]}, 'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]}, 'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]}, #'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction 'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, 'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]}, 'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]}, 'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]}, 'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]}, 'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]}, 'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]}, 'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR_reverse','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]}, 'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]}, 'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]}, 'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT_reverse','GF6PTA'],'stoichiometry':[1,1,1,1]}, 'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]}, 'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]}, 'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]}, 'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]}, 'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]}, 'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]}, 'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]}, 'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]} }; #model reduction functions def load_ALEWt(self,anoxic = False, oxic = True, update_ampms2 = True, convert2irreversible = False): '''load iJO1366 with the following changes: 1. update to AMPMS2 to account for carbon monoxide 2. changes to uptake bounds for glucose M9 media 3. constrain the model to use 'PFK' instead of 'F6PA', 'DHAPT' when grown on glucose 4. constrain the model to use the physiologically perferred glutamate synthesis enzymes 5. depending on oxygen availability, constrain the model to use the correct RNR enzymes 6. depending on oxygen availability, constrain the model to use the correct Dihydroorotate dehydrogenase (PyrD) enzymes 7. constrain fatty acid biosynthesis to use the physiologically preferred enzymes''' ijo1366_sbml = settings.workspace_data+"/models/iJO1366.xml" # Read in the sbml file and define the model conditions cobra_model = create_cobra_model_from_sbml_file(ijo1366_sbml, print_time=True) if update_ampms2: # Update AMPMS2 coc = Metabolite('co_c','CO','carbon monoxide','c'); cop = Metabolite('co_p','CO','carbon monoxide','p'); coe = Metabolite('co_e','CO','carbon monoxide','e'); cobra_model.add_metabolites([coc,cop,coe]) ampms2_mets = {}; ampms2_mets[cobra_model.metabolites.get_by_id('air_c')] = -1; ampms2_mets[cobra_model.metabolites.get_by_id('amet_c')] = -1; ampms2_mets[cobra_model.metabolites.get_by_id('dad_DASH_5_c')] = 1; ampms2_mets[cobra_model.metabolites.get_by_id('met_DASH_L_c')] = 1; ampms2_mets[cobra_model.metabolites.get_by_id('4ampm_c')] = 1; ampms2_mets[cobra_model.metabolites.get_by_id('h_c')] = 3; ampms2_mets[cobra_model.metabolites.get_by_id('for_c')] = 1; ampms2_mets[cobra_model.metabolites.get_by_id('co_c')] = 1; ampms2 = Reaction('AMPMS3'); ampms2.add_metabolites(ampms2_mets); copp_mets = {}; copp_mets[cobra_model.metabolites.get_by_id('co_c')] = -1; copp_mets[cobra_model.metabolites.get_by_id('co_p')] = 1; copp = Reaction('COtpp'); copp.add_metabolites(copp_mets); coex_mets = {}; coex_mets[cobra_model.metabolites.get_by_id('co_p')] = -1; coex_mets[cobra_model.metabolites.get_by_id('co_e')] = 1; coex = Reaction('COtex'); coex.add_metabolites(coex_mets); cotrans_mets = {}; cotrans_mets[cobra_model.metabolites.get_by_id('co_e')] = -1; cotrans = Reaction('EX_co_LPAREN_e_RPAREN_'); cotrans.add_metabolites(cotrans_mets); cobra_model.add_reactions([ampms2,copp,coex,cotrans]); cobra_model.remove_reactions(['AMPMS2']); # Define the model conditions: system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary']; for b in system_boundaries: cobra_model.reactions.get_by_id(b).lower_bound = 0.0; cobra_model.reactions.get_by_id(b).upper_bound = 0.0; # Reset demand reactions demand = ['DM_4CRSOL', 'DM_5DRIB', 'DM_AACALD', 'DM_AMOB', 'DM_MTHTHF', 'DM_OXAM']; for d in demand: cobra_model.reactions.get_by_id(d).lower_bound = 0.0; cobra_model.reactions.get_by_id(d).upper_bound = 1000.0; # Change the objective update_objective(cobra_model,{'Ec_biomass_iJO1366_WT_53p95M':1.0}) # Assign KOs # Specify media composition (M9 glucose): cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -10.0; cobra_model.reactions.get_by_id('EX_o2_LPAREN_e_RPAREN_').lower_bound = -18.0; #uptake = ['EX_cl_LPAREN_e_RPAREN_', # 'EX_so4_LPAREN_e_RPAREN_', # 'EX_ca2_LPAREN_e_RPAREN_', # 'EX_pi_LPAREN_e_RPAREN_', # 'EX_fe2_LPAREN_e_RPAREN_', # 'EX_cu2_LPAREN_e_RPAREN_', # 'EX_zn2_LPAREN_e_RPAREN_', # 'EX_cbl1_LPAREN_e_RPAREN_', # 'EX_mobd_LPAREN_e_RPAREN_', # 'EX_ni2_LPAREN_e_RPAREN_', # 'EX_mn2_LPAREN_e_RPAREN_', # 'EX_k_LPAREN_e_RPAREN_', # 'EX_nh4_LPAREN_e_RPAREN_', # 'EX_cobalt2_LPAREN_e_RPAREN_', # 'EX_mg2_LPAREN_e_RPAREN_']; uptake = ['EX_ca2_LPAREN_e_RPAREN_', 'EX_cbl1_LPAREN_e_RPAREN_', 'EX_cl_LPAREN_e_RPAREN_', 'EX_co2_LPAREN_e_RPAREN_', 'EX_cobalt2_LPAREN_e_RPAREN_', 'EX_cu2_LPAREN_e_RPAREN_', 'EX_fe2_LPAREN_e_RPAREN_', 'EX_fe3_LPAREN_e_RPAREN_', 'EX_h_LPAREN_e_RPAREN_', 'EX_h2o_LPAREN_e_RPAREN_', 'EX_k_LPAREN_e_RPAREN_', 'EX_mg2_LPAREN_e_RPAREN_', 'EX_mn2_LPAREN_e_RPAREN_', 'EX_mobd_LPAREN_e_RPAREN_', 'EX_na1_LPAREN_e_RPAREN_', 'EX_nh4_LPAREN_e_RPAREN_', 'EX_ni2_LPAREN_e_RPAREN_', 'EX_pi_LPAREN_e_RPAREN_', 'EX_sel_LPAREN_e_RPAREN_', 'EX_slnt_LPAREN_e_RPAREN_', 'EX_so4_LPAREN_e_RPAREN_', 'EX_tungs_LPAREN_e_RPAREN_', 'EX_zn2_LPAREN_e_RPAREN_']; for u in uptake: cobra_model.reactions.get_by_id(u).lower_bound = -1000.0; # Specify allowed secretion products secrete = ['EX_meoh_LPAREN_e_RPAREN_', 'EX_5mtr_LPAREN_e_RPAREN_', 'EX_h_LPAREN_e_RPAREN_', 'EX_co2_LPAREN_e_RPAREN_', 'EX_co_LPAREN_e_RPAREN_', 'EX_h2o_LPAREN_e_RPAREN_', 'EX_ac_LPAREN_e_RPAREN_', 'EX_fum_LPAREN_e_RPAREN_', 'EX_for_LPAREN_e_RPAREN_', 'EX_etoh_LPAREN_e_RPAREN_', 'EX_lac_DASH_L_LPAREN_e_RPAREN_', 'EX_pyr_LPAREN_e_RPAREN_', 'EX_succ_LPAREN_e_RPAREN_']; for s in secrete: cobra_model.reactions.get_by_id(s).upper_bound = 1000.0; # Constrain specific reactions noFlux = ['F6PA', 'DHAPT']; ammoniaExcess = ['GLUDy']; # PMCID: 196288 # RNR control (DOI:10.1111/j.1365-2958.2006.05493.x) # Dihydroorotate dehydrogenase (PyrD) (DOI:10.1016/S0076-6879(78)51010-0, PMID: 199252, DOI:S0969212602008316 [pii]) aerobic = ['RNDR1', 'RNDR2', 'RNDR3', 'RNDR4', 'DHORD2', 'ASPO6','LCARR','PFL','FRD2','FRD3']; # see DOI:10.1111/j.1365-2958.2011.07593.x; see DOI:10.1089/ars.2006.8.773 for a review anaerobic = ['RNTR1c2', 'RNTR2c2', 'RNTR3c2', 'RNTR4c2', 'DHORD5', 'ASPO5','PDH','SUCDi']; # see DOI:10.1074/jbc.274.44.31291, DOI:10.1128/JB.00440-07 if anoxic: rxnList = noFlux + ammoniaExcess + anaerobic; for rxn in rxnList: cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0; elif oxic: rxnList = noFlux + ammoniaExcess + aerobic; for rxn in rxnList: cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0; else: rxnList = noFlux + ammoniaExcess; for rxn in rxnList: cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0; # Set the direction for specific reactions # Fatty acid biosynthesis: DOI: 10.1016/j.ymben.2010.10.007, PMCID: 372925 fattyAcidSynthesis = ['ACCOAC', 'ACOATA', 'HACD1', 'HACD2', 'HACD3', 'HACD4', 'HACD5', 'HACD6', 'HACD7', 'HACD8', 'KAS14', 'KAS15', 'MACPD', 'MCOATA', '3OAR100', '3OAR120', '3OAR121', '3OAR140', '3OAR141', '3OAR160', '3OAR161', '3OAR180', '3OAR181', '3OAR40', '3OAR60', '3OAR80'] fattyAcidOxidation = ['ACACT1r', 'ACACT2r', 'ACACT3r', 'ACACT4r', 'ACACT5r', 'ACACT6r', 'ACACT7r', 'ACACT8r', 'ACOAD1f', 'ACOAD2f', 'ACOAD3f', 'ACOAD4f', 'ACOAD5f', 'ACOAD6f', 'ACOAD7f', 'ACOAD8f', 'CTECOAI6', 'CTECOAI7', 'CTECOAI8', 'ECOAH1', 'ECOAH2', 'ECOAH3', 'ECOAH4', 'ECOAH5', 'ECOAH6', 'ECOAH7', 'ECOAH8'] ndpk = ['NDPK1','NDPK2','NDPK3','NDPK4','NDPK5','NDPK7','NDPK8']; rxnList = fattyAcidSynthesis + fattyAcidOxidation; for rxn in rxnList: cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn).upper_bound = 1000.0; # convert to irreversible if convert2irreversible: convert_to_irreversible(cobra_model); return cobra_model; def reduce_model(self,cobra_model,cobra_model_outFileName=None): '''reduce model''' # Input: cobra_model # Output: cobra_model # the lower and upper bounds have been set to 0.0 # for all reactions that cannot carry a flux cobra_model.optimize() sol_f = cobra_model.solution.f fva_data = flux_variability_analysis(cobra_model, fraction_of_optimum=0.9, objective_sense='maximize', the_reactions=None, allow_loops=True, solver='gurobi', the_problem='return', tolerance_optimality=1e-6, tolerance_feasibility=1e-6, tolerance_barrier=1e-8, lp_method=1, lp_parallel=0, new_objective=None, relax_b=None, error_reporting=None, number_of_processes=1, copy_model=False); #with open("data/ijo1366_irrev_fva.json", 'w') as outfile: # json.dump(data, outfile, indent=4); #fva_data = json.load(open("data/ijo1366_irrev_fva.json")); # Reduce model rxns_noflux = []; for k,v in fva_data.items(): if v['minimum'] == 0.0 and v['maximum'] == 0.0: cobra_model.reactions.get_by_id(k).lower_bound = 0.0; cobra_model.reactions.get_by_id(k).upper_bound = 0.0; rxns_noflux.append(k); if cobra_model_outFileName: write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName) cobra_model.optimize() sol_reduced_f = cobra_model.solution.f # Check that the reduced model is consistent with the original model if not sol_f == sol_reduced_f: print('reduced model is inconsistent with the original model') print('original model solution: ' + str(sol_f)) print('reduced model solution: ' + str(sol_reduced_f)) def reduce_model_pfba(self,cobra_model,cobra_model_outFileName=None,fba_outFileName=None,subs=[]): '''reduce model using pfba''' # Input: cobra_model # cobra_model_outFileName # subs = string of specific subsystems to reduce # Output: cobra_model # the lower and upper bounds have been set to 0.0 # for all reactions that cannot carry a flux cobra_model.optimize() sol_f = cobra_model.solution.f # Find minimal flux solution: pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi'); # Reduce model rxns_noflux = []; # set lb and ub for all reactions with 0 flux to 0; for k,v in cobra_model.solution.x_dict.items(): if (v < 0.0 or v == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs: cobra_model.reactions.get_by_id(k).lower_bound = 0.0; cobra_model.reactions.get_by_id(k).upper_bound = 0.0; rxns_noflux.append(k); if cobra_model_outFileName: write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName) if pfba_outFileName: # Write pfba solution to file with open(pfba_outFileName,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Flux']) for k,v in cobra_model.solution.x_dict.items(): writer.writerow([k,v]); cobra_model.optimize() sol_reduced_f = cobra_model.solution.f # Check that the reduced model is consistent with the original model if not sol_f == sol_reduced_f: print('reduced model is inconsistent with the original model') print('original model solution: ' + str(sol_f)) print('reduced model solution: ' + str(sol_reduced_f)) def add_net_reaction(self,cobra_model_IO, rxn_dict_I,remove_reverse=False): '''add a net reaction to the model after removing the individual reactions''' # input: rxn_dict_I = dictionary of net reaction ids and # corresponding list of individual reaction ids # output: cobra_model_IO = individual reactions replaced with a # net reaction cobra_model_IO.optimize(); sol_orig = cobra_model_IO.solution.f; print("original model solution", sol_orig) try: cobra_model_tmp = cobra_model_IO.copy2(); except KeyError as e: print(e); # make net reactions: rxn_dict_net = {}; for k,v in rxn_dict_I.items(): rxn_net = make_net_reaction(cobra_model_tmp, k, v['reactions'],v['stoichiometry']); if rxn_net: rxn_net.lower_bound = 0.0; rxn_net.upper_bound = 1000.0; rxn_net.objective_coefficient = 0.0; else: print('an error occured in add_net_reaction') exit(-1) #rxn_net.reversibility = False; rxn_dict_net[k] = (v['reactions'],rxn_net); # add replace individual reactions with net reaction for k,v in rxn_dict_net.items(): cobra_model_IO.remove_reactions(v[0]); # remove the reverse reaction if it exists for irreversible models if remove_reverse: for rxn in v[0]: if '_reverse' in rxn: rxn_rev = rxn.replace('_reverse','') if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev); else: rxn_rev = rxn+'_reverse'; if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev); cobra_model_IO.add_reaction(v[1]); cobra_model_IO.optimize(); sol_new = cobra_model_IO.solution.f; print(k, sol_new) def make_net_reaction(self,cobra_model_I, rxn_id_I, rxn_list_I,stoich_list_I): '''generate a net reaction from a list of individual reactions''' # input: rxn_list_I = list of reaction IDs # output: rxn_net_O = net reaction (cobra Reaction object) from cobra.core.Reaction import Reaction #rxn_net_O = cobra_model_I.reactions.get_by_id(rxn_list_I[0]); #for r in rxn_list_I[1:]: # if cobra_model_I.reactions.get_by_id(r).reversibility: # print r + " is reversible!"; # print "continue?" # rxn_net_O += cobra_model_I.reactions.get_by_id(r); # check input: if not len(stoich_list_I) == len(rxn_list_I): print("error in " + rxn_id_I + ": there are " + str(len(rxn_list_I)) + " rxn ids and " + str(len(stoich_list_I)) + " coefficients"); exit(-1); rxn_net_O = Reaction(rxn_id_I); for i,r in enumerate(rxn_list_I): mets = {}; metlist = []; metlist = cobra_model_I.reactions.get_by_id(r).products + cobra_model_I.reactions.get_by_id(r).reactants; for met in metlist: mets[met] = cobra_model_I.reactions.get_by_id(r).get_coefficient(met)*stoich_list_I[i]; rxn_net_O.add_metabolites(mets); rxn_net_O.subsystem = cobra_model_I.reactions.get_by_id(r).subsystem; #copy over the subsystem # check net reaction #if not rxn_net_O.check_mass_balance(): #print "error: " + rxn_id_I + " is not elementally balanced"; #print rxn_net_O.id; #print rxn_net_O.build_reaction_string(); return rxn_net_O; def get_solBySub(self,cobra_model_I,sol_I,sub_I): sol_O = {}; for k,v in sol_I.items(): try: if cobra_model_I.reactions.get_by_id(k).subsystem == sub_I: sol_O[k] = v; except: print(k + ' reaction not found') return sol_O; def groupBySameFlux(self,cobra_model_I,sol_I): flux_list = []; for r,f in sol_I.items(): if not f in flux_list and float(f)>0.0: flux_list.append(f) sameFlux_O = {}; for f in flux_list: rxn_list = []; for r,v in sol_I.items(): if v==f: rxn_list.append(r); stoich = [1]*len(rxn_list) rxnName = ''; for rxn in rxn_list: rxnName = rxnName + rxn + '_'; rxnName = rxnName[:-1]; # check that the reaction name is less than 225 characters if len(rxnName)>224: rxnName = rxnName[:224]; sameFlux_O[rxnName] = {'reactions':rxn_list, 'stoichiometry':stoich, 'flux':f}; #netRxn = make_net_reaction(cobra_model_copy,rxnName,rxn_list,stoich) #sameFlux_O[rxnName] = {'reactions':rxn_list, # 'stoichiometry':stoich, # 'flux':f, # 'net':netRxn}; return sameFlux_O def add_net_reaction_subsystem(self,cobra_model_IO,sol_I,subs_I): '''make net reactions for specific subsystems grouped by reactions that have the same flux from pfba''' #input: cobra_model # sol_I = pfba solution # sub_I = list of model subsystems #output: cobra_model # convert model to irreversible # convert_to_irreversible(cobra_model_IO); # Make net reactions for pathways outside of the scope # of the isotopomer model for s in subs_I: sol = get_solBySub(cobra_model_IO,sol_I,s) sameFlux = groupBySameFlux(cobra_model_IO,sol) netRxns = {}; for k,v in sameFlux.items(): if len(v['reactions'])>1: netRxns[k] = v; add_net_reaction(cobra_model_IO,netRxns); # add subsystem information back in for k in sameFlux.keys(): cobra_model_IO.reactions.get_by_id(k).subsystem = s remove_noflux_reactions(cobra_model_IO,sol_I,subs_I) # convert model back to reversible # revert_to_reversible(cobra_model_IO); def remove_noflux_reactions(self,cobra_model,sol=None,subs=[]): '''remove noflux reactions''' # Input: cobra_model # sol = pfba solution # subs = string of specific subsystems to reduce # Output: cobra_model # if the lower and upper bounds are zero, the reactions # are removed cobra_model.optimize() sol_f = cobra_model.solution.f # Reduce model rxns_noflux = []; # set lb and ub for all reactions with 0 flux to 0; if sol: if subs: for k,v in sol.items(): try: if (float(v) < 0.0 or float(v) == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs: cobra_model.reactions.get_by_id(k).lower_bound = 0.0; cobra_model.reactions.get_by_id(k).upper_bound = 0.0; cobra_model.remove_reactions(k) rxns_noflux.append(k); except: print('reaction is not in model: ' + k) else: for k,v in sol.items(): try: if (float(v) < 0.0 or float(v) == 0.0): cobra_model.reactions.get_by_id(k).lower_bound = 0.0; cobra_model.reactions.get_by_id(k).upper_bound = 0.0; cobra_model.remove_reactions(k) rxns_noflux.append(k); except: print('reaction is not in model: ' + k) else: if subs: for r in cobra_model.reactions: if r.lower_bound == 0.0 and r.upper_bound == 0.0 and cobra_model.reactions.get_by_id(r.id).subsystem in subs: cobra_model.remove_reactions(r.id) else: for r in cobra_model.reactions: if r.lower_bound == 0.0 and r.upper_bound == 0.0: cobra_model.remove_reactions(r.id) cobra_model.optimize() sol_reduced_f = cobra_model.solution.f # Check that the reduced model is consistent with the original model if not sol_f == sol_reduced_f: print('reduced model is inconsistent with the original model') print('original model solution: ' + str(sol_f)) print('reduced model solution: ' + str(sol_reduced_f)) def get_reactionsInfo(self,cobra_model): '''return the number of reactions and the number of reactions that cannot carry a flux (i.e. lb and ub of 0.0)''' nrxn_O = len(cobra_model.reactions); nrxn_noflux_O = 0; for r in cobra_model.reactions: if r.lower_bound == 0.0 and r.upper_bound == 0.0: nrxn_noflux_O += 1; return nrxn_O, nrxn_noflux_O #model reduction iteration functions def makeIsotopomerModel_iteration01(self,pfba_file,netrxn_irreversible_model_filename,fva_reduced_model_filename,reduced_lbub_filename): '''iteration 1: identification of reactions that can be lumped in pathways outside the model scope''' cobra_model = self.load_ALEWt(); # Make the model irreversible for downstream manipulations: convert_to_irreversible(cobra_model); # Add lumped isotopomer reactions self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible); # Find minimal flux solution: pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi'); # Write pfba solution to file with open(pfba_file,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Flux']) for k,v in cobra_model.solution.x_dict.items(): writer.writerow([k,v]); # Read in pfba solution pfba_sol = {}; with open(pfba_file,mode='r') as infile: dictreader = csv.DictReader(infile) for r in dictreader: pfba_sol[r['Reaction']] = r['Flux']; # Make net reactions for pathways outside of the scope # of the isotopomer model subs = ['Cell Envelope Biosynthesis', 'Glycerophospholipid Metabolism', 'Lipopolysaccharide Biosynthesis / Recycling', 'Membrane Lipid Metabolism', 'Murein Biosynthesis' 'Murein Recycling', 'Cofactor and Prosthetic Group Biosynthesis', #'Transport, Inner Membrane', #'Transport, Outer Membrane', #'Transport, Outer Membrane Porin', 'tRNA Charging', 'Unassigned', 'Exchange', 'Inorganic Ion Transport and Metabolism', 'Nitrogen Metabolism']; self.add_net_reaction_subsystem(cobra_model,pfba_sol,subs); self.remove_noflux_reactions(cobra_model,pfba_sol,['Transport, Outer Membrane Porin','Transport, Inner Membrane','Transport, Outer Membrane']) revert_to_reversible(cobra_model); # write model to sbml write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename) # Reduce model using FVA: self.reduce_model(cobra_model,fva_reduced_model_filename) # Remove all reactions with 0 flux self.remove_noflux_reactions(cobra_model); with open(reduced_lbub_filename,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Formula','LB','UB','Subsystem']) for r in cobra_model.reactions: writer.writerow([r.id, r.build_reaction_string(), r.lower_bound, r.upper_bound, r.subsystem]); def makeIsotopomerModel_iteration02(self,pfba_filename,fva_reduced_model_filename,netrxn_irreversible_model_filename,reduced_lbub_filename): '''iteration 2: addition of finalized lumped reactions that are in pathways that are within the scope of the model and reduction by removing reactions with zero optimal minimal flux outside the scope of the model''' cobra_model = load_ALEWt(); # Make the model irreversible for downstream manipulations: convert_to_irreversible(cobra_model); cobra_model.optimize(); # Add lumped isotopomer reactions self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible,True); cobra_model.optimize(); # Find minimal flux solution: pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi'); # Write pfba solution to file with open(pfba_filename,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Flux','Subsystem']) for k,v in cobra_model.solution.x_dict.items(): writer.writerow([k,v,cobra_model.reactions.get_by_id(k).subsystem]); # Read in pfba solution pfba_sol = {}; with open(pfba_filename,mode='r') as infile: dictreader = csv.DictReader(infile) for r in dictreader: pfba_sol[r['Reaction']] = r['Flux']; # remove noflux reactions for pathways outside of the scope # of the isotopomer model subs = ['Cell Envelope Biosynthesis', 'Glycerophospholipid Metabolism', 'Lipopolysaccharide Biosynthesis / Recycling', 'Membrane Lipid Metabolism', 'Murein Biosynthesis' 'Murein Recycling', 'Cofactor and Prosthetic Group Biosynthesis', 'Transport, Inner Membrane', 'Transport, Outer Membrane', 'Transport, Outer Membrane Porin', 'tRNA Charging', 'Unassigned', #'Exchange', 'Inorganic Ion Transport and Metabolism', 'Nitrogen Metabolism', 'Alternate Carbon Metabolism']; self.remove_noflux_reactions(cobra_model,pfba_sol,subs) # Reduce model using FVA: self.reduce_model(cobra_model,fva_reduced_model_filename) # Reset secretion products that may have been turned off secrete = ['EX_meoh_LPAREN_e_RPAREN_', 'EX_5mtr_LPAREN_e_RPAREN_', 'EX_h_LPAREN_e_RPAREN_', 'EX_co2_LPAREN_e_RPAREN_', 'EX_co_LPAREN_e_RPAREN_', 'EX_h2o_LPAREN_e_RPAREN_', 'EX_ac_LPAREN_e_RPAREN_', 'EX_fum_LPAREN_e_RPAREN_', 'EX_for_LPAREN_e_RPAREN_', 'EX_etoh_LPAREN_e_RPAREN_', 'EX_lac_DASH_L_LPAREN_e_RPAREN_', 'EX_pyr_LPAREN_e_RPAREN_', 'EX_succ_LPAREN_e_RPAREN_']; for s in secrete: cobra_model.reactions.get_by_id(s).upper_bound = 1000.0; # Remove all reactions with 0 flux r1,r2 = self.get_reactionsInfo(cobra_model); while r2 !=0: self.remove_noflux_reactions(cobra_model); r1,r2 = self.get_reactionsInfo(cobra_model); print(r1,r2); # write model to sbml write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename) with open(reduced_lbub_filename,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Formula','LB','UB','Subsystem']) for r in cobra_model.reactions: writer.writerow([r.id, r.build_reaction_string(), r.lower_bound, r.upper_bound, r.subsystem]); def makeIsotopomerModel_cobraMAT(self,model_filename,xml_filename,mat_filename,csv_filename,isotopomer_mapping_filename,ko_list=[],flux_dict={},description=None): '''iteration 3: Remove reactions that are thermodynamically unfavorable and add isotopomer data''' # Read in the sbml file and define the model conditions cobra_model = create_cobra_model_from_sbml_file(model_filename, print_time=True) # Modify glucose uptake: if cobra_model.reactions.has_id('EX_glc_LPAREN_e_RPAREN__reverse'): lb,ub = cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').lower_bound,cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').upper_bound; EX_glc_mets = {}; EX_glc_mets[cobra_model.metabolites.get_by_id('glc_DASH_D_e')] = -1; EX_glc = Reaction('EX_glc_LPAREN_e_RPAREN_'); EX_glc.add_metabolites(EX_glc_mets); cobra_model.add_reaction(EX_glc) cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -ub; cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').upper_bound = lb; cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN__reverse']) ## Remove thermodynamically infeasible reactions: #infeasible = []; #loops = []; #cobra_model.remove_reactions(infeasible + loops); # Apply KOs, if any: for ko in ko_list: cobra_model.reactions.get_by_id(ko).lower_bound = 0.0; cobra_model.reactions.get_by_id(ko).upper_bound = 0.0; # Apply flux constraints, if any: for rxn,flux in flux_dict.items(): cobra_model.reactions.get_by_id(rxn).lower_bound = flux['lb']; cobra_model.reactions.get_by_id(rxn).upper_bound = flux['ub']; # Change description, if any: if description: cobra_model.description = description; # Read in isotopomer model isotopomer_mapping = self.read_isotopomer_mapping_csv(isotopomer_mapping_filename); #broken isotopomer_str = self.build_isotopomer_str(isotopomer_mapping); # write model to sbml write_cobra_model_to_sbml_file(cobra_model,xml_filename) # Add isotopomer field to model for r in cobra_model.reactions: if r.id in isotopomer_str: cobra_model.reactions.get_by_id(r.id).isotopomer = isotopomer_str[r.id]; else: cobra_model.reactions.get_by_id(r.id).isotopomer = ''; # Add null basis: cobra_model_array = cobra_model.to_array_based_model(); N = self.calculate.null(cobra_model_array.S.todense()) #convert S from sparse to full and compute the nullspace cobra_model.N = N; # solve and save pFBA for later use: optimize_minimal_flux(cobra_model,True,solver='gurobi'); # add match field: match = numpy.zeros(len(cobra_model.reactions)); cobra_model.match = match; # write model to mat save_matlab_model_isotopomer(cobra_model,mat_filename); with open(csv_filename,mode='wb') as outfile: writer = csv.writer(outfile) writer.writerow(['Reaction','Formula','LB','UB','Genes','Subsystem','Isotopomer']) for r in cobra_model.reactions: writer.writerow([r.id, r.build_reaction_string(), r.lower_bound, r.upper_bound, r.gene_reaction_rule, r.subsystem, r.isotopomer]); #ecoli_INCA modifications def expand_ecoliINCA01(self,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O): '''expand the INCA Ecoli model to account for additional metabolites''' query = stage02_isotopomer_query() # get the xml model cobra_model_sbml = '' cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I); # load the model if cobra_model_sbml: if cobra_model_sbml['file_type'] == 'sbml': with open('data/cobra_model_tmp.xml','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True); elif cobra_model_sbml['file_type'] == 'json': with open('data/cobra_model_tmp.json','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = load_json_model('data/cobra_model_tmp.json'); else: print('file_type not supported') #get the atomMapping_reactions atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I); #change the mapping_id for cnt,row in enumerate(atomMappingReactions): atomMappingReactions[cnt]['mapping_id']=mapping_id_O; #expand the model to include glyoxylate shunt: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','glx_c'); glx = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') glx.charge = met_row['charge'] #get metabolites in the model icit = cobra_model.metabolites.get_by_id('icit_c') succ = cobra_model.metabolites.get_by_id('succ_c') accoa = cobra_model.metabolites.get_by_id('accoa_c') mal = cobra_model.metabolites.get_by_id('mal_DASH_L_c') #make ICL rxn_mets = {}; rxn_mets[icit] = -1; rxn_mets[succ] = 1; rxn_mets[glx] = 1; rxn = Reaction('ICL'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='ICL'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1,1] row_tmp['reactants_ids_tracked']=['icit_c'] row_tmp['products_ids_tracked']=['glx_c','succ_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C"], ["C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1], [0, 1, 2, 3]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['ab','fcde'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make MALS rxn_mets = {}; rxn_mets[glx] = -1; rxn_mets[accoa] = -1; rxn_mets[mal] = 1; rxn = Reaction('MALS'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='MALS'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1,-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['accoa_c','glx_c'] row_tmp['products_ids_tracked']=['mal_DASH_L_c'] row_tmp['reactants_elements_tracked']=[["C", "C"], ["C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1], [0, 1]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3]] row_tmp['reactants_mapping']=['ab','cd'] row_tmp['products_mapping']=['cdba'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #add in glucose transporters and intracellular glc #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c"); glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') glc_c.charge = met_row['charge'] met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e"); glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e') glc_e.charge = met_row['charge'] glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e') glcext.charge = met_row['charge'] glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e') glcpre.charge = met_row['charge'] #get metabolites in the model pep = cobra_model.metabolites.get_by_id('pep_c') pyr = cobra_model.metabolites.get_by_id('pyr_c') g6p = cobra_model.metabolites.get_by_id('g6p_c') #make EX_glc_LPAREN_e_RPAREN_ rxn_mets = {}; rxn_mets[glcext] = -1; rxn_mets[glc_e] = 1; rxn = Reaction('EX_glc_LPAREN_e_RPAREN_'); cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext'] row_tmp['products_ids_tracked']=['glc_DASH_D_e'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make EX_glc_LPAREN_e_RPAREN__pre rxn_mets = {}; rxn_mets[glcpre] = -1; rxn_mets[glc_e] = 1; rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre'); cobra_model.remove_reactions(['v60']); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre'] row_tmp['products_ids_tracked']=['glc_DASH_D_e'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c" rxn_mets = {}; rxn_mets[glc_e] = -1; rxn_mets[pep] = -1; rxn_mets[g6p] = 1; rxn_mets[pyr] = 1; rxn = Reaction('GLCptspp'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='GLCptspp'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1,-1] row_tmp['products_stoichiometry_tracked']=[1,1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c'] row_tmp['products_ids_tracked']=['g6p_c','pyr_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]] row_tmp['reactants_mapping']=['abcdef','ghi'] row_tmp['products_mapping']=['abcdef','ghi'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c" rxn_mets = {}; rxn_mets[glc_e] = -1; rxn_mets[glc_c] = 1; rxn = Reaction('GLCt2pp'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='GLCt2pp'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e'] row_tmp['products_ids_tracked']=['glc_DASH_D_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c" rxn_mets = {}; rxn_mets[glc_c] = -1; rxn_mets[g6p] = 1; rxn = Reaction('HEX1'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='HEX1'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_c'] row_tmp['products_ids_tracked']=['g6p_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); ##expand the model #acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c'); #cit = cobra_model.metabolites.get_by_id('cit_c') #icit = cobra_model.metabolites.get_by_id('icit_c') #e4p = cobra_model.metabolites.get_by_id('e4p_c') #r5p = cobra_model.metabolites.get_by_id('r5p_c') #phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c') #his = cobra_model.metabolites.get_by_id('his_DASH_L_c') #phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c'); #prpp = Metabolite('prpp_c','C5H8O14P3','5-Phospho-alpha-D-ribose 1-diphosphate','c'); ## update selected reactions to account for new metabolites #for rxn,row in enumerate(atomMappingReactions): # if row['rxn_id'] == 'ACONTa_ACONTb': # #split ACONTa_ACONTb # aconta_mets = {}; # aconta_mets[cit] = -1; # aconta_mets[acon] = 1; # aconta = Reaction('ACONTa'); # aconta.add_metabolites(aconta_mets); # cobra_model.remove_reactions(['ACONTa_ACONTb']); # cobra_model.add_reactions([aconta]); # cobra_model.repair(); # # Update the mapping ids # atomMappingReactions[rxn]['products_ids_tracked']=['acon_DASH_C_c'] # atomMappingReactions[rxn]['comment_']='updated' # elif row['rxn_id'] == 'PheSYN': # #split PheSYN to add in phpyr # # Update the mapping_ids # atomMappingReactions[rxn]['mapping_id']=mapping_id_O; # atomMappingReactions[rxn]['rxn_id']=rxn_ids[rxn]; # atomMappingReactions[rxn]['rxn_description']=''; # atomMappingReactions[rxn]['rxn_equation']=''; # atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[] # atomMappingReactions[rxn]['products_stoichiometry_tracked']=[] # atomMappingReactions[rxn]['reactants_ids_tracked']=[] # atomMappingReactions[rxn]['products_ids_tracked']=[] # atomMappingReactions[rxn]['reactants_elements_tracked']=[] # atomMappingReactions[rxn]['products_elements_tracked']=[] # atomMappingReactions[rxn]['reactants_positions_tracked']=[] # atomMappingReactions[rxn]['products_positions_tracked']=[] # atomMappingReactions[rxn]['reactants_mapping']=[] # atomMappingReactions[rxn]['products_mapping']=[] # atomMappingReactions[rxn]['used_']=True # atomMappingReactions[rxn]['comment_']=None # elif row['rxn_id'] == 'HisSYN': # # split HisSYN to add in prpp # #cobra_model.reactions.get_by_id(rxn_ids[rxn]) # #cobra_model.reactions.get_by_id(rxn_ids[rxn]) # # Update the mapping_ids # atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']] # # combine TKT1a and TKT1b # # combine TKT2a and TKT2b # # split PPC_PPCK # # split PTAr_ACKr_ACS ## add in ACONTb #acontb_mets = {}; #acontb_mets[acon] = -1; #acontb_mets[icit] = 1; #acontb = Reaction('ACONTb'); #acontb.add_metabolites(acontb_mets); #cobra_model.add_reactions([acontb]); #cobra_model.repair(); ## add in ACONTb mapping #row={}; #row['mapping_id']=mapping_id_O; #row['rxn_id']='ACONTb'; #row['rxn_description']=''; #row['rxn_equation']=''; #row['reactants_stoichiometry_tracked']=[-1] #row['products_stoichiometry_tracked']=[1] #row['reactants_ids_tracked']=['acon_DASH_C_c'] #row['products_ids_tracked']=['icit_c'] #row['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] #row['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] #row['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] #row['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] #row['reactants_mapping']=['abcdef'] #row['products_mapping']=['abcdef'] #row['used_']=True #row['comment_']='added' #atomMappingReactions.append(row) ## add in e4p_to_phpyr ## add in r5p_to_prp #r5p_to_prpp_mets = {}; #r5p_to_prpp_mets[e4p] = -1; #r5p_to_prpp_mets[prpp] = 1; #r5p_to_prpp = Reaction('r5p_to_prpp'); #r5p_to_prpp.add_metabolites(r5p_to_prpp_mets); #cobra_model.add_reactions([r5p_to_prpp]); #cobra_model.repair(); ## add in r5p_to_prpp mapping #row={}; #row['mapping_id']=mapping_id_O; #row['rxn_id']='r5p_to_prpp'; #row['rxn_description']=''; #row['rxn_equation']=''; #row['reactants_stoichiometry_tracked']=[-1] #row['products_stoichiometry_tracked']=[1] #row['reactants_ids_tracked']=['r5p_c'] #row['products_ids_tracked']=['prpp_c'] #row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]] #row['products_elements_tracked']=[["C", "C", "C", "C", "C"]] #row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]] #row['products_positions_tracked']=[[0, 1, 2, 3, 4]] #row['reactants_mapping']=['abcde'] #row['products_mapping']=['abcde'] #row['used_']=True #row['comment_']='added' #atomMappingReactions.append(row) # write the model to a temporary file save_json_model(cobra_model,'data/cobra_model_tmp.json') # add the model information to the database io = stage02_isotopomer_io() dataStage02IsotopomerModelRxns_data = []; dataStage02IsotopomerModelMets_data = []; dataStage02IsotopomerModels_data,\ dataStage02IsotopomerModelRxns_data,\ dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json') io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data); io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data); io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data); #add atomMappingReactions to the database io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions); def expand_ecoliINCA02(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O): '''expand the INCA Ecoli model to account for additional metabolites''' query = stage02_isotopomer_query() # get the xml model cobra_model_sbml = '' cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I); # load the model if cobra_model_sbml: if cobra_model_sbml['file_type'] == 'sbml': with open('data/cobra_model_tmp.xml','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True); elif cobra_model_sbml['file_type'] == 'json': with open('data/cobra_model_tmp.json','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = load_json_model('data/cobra_model_tmp.json'); else: print('file_type not supported') #get the atomMapping_reactions atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I); #change the mapping_id for cnt,row in enumerate(atomMappingReactions): atomMappingReactions[cnt]['mapping_id']=mapping_id_O; accoa = cobra_model.metabolites.get_by_id('accoa_c') #expand the model to include ATPSYN: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','atp_c'); atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') atp.charge = met_row['charge'] #get metabolites in the model r5p = cobra_model.metabolites.get_by_id('r5p_c') fthf = cobra_model.metabolites.get_by_id('10fthf_c') gly = cobra_model.metabolites.get_by_id('gly_c') co2 = cobra_model.metabolites.get_by_id('co2_c') glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c') gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c') asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c') fum = cobra_model.metabolites.get_by_id('fum_c') #make ATPSYN (irreversible) rxn_mets = {}; rxn_mets[r5p] = -1; rxn_mets[fthf] = -1; rxn_mets[gly] = -1; rxn_mets[co2] = -1; rxn_mets[fthf] = -1; rxn_mets[gln] = -1; rxn_mets[asp] = -1; rxn_mets[asp] = -1; rxn_mets[atp] = 1; rxn_mets[glu] = 1; rxn_mets[fum] = 1; rxn_mets[fum] = 1; rxn = Reaction('ATPSYN'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include GTPSYN: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','gtp_c'); gtp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') gtp.charge = met_row['charge'] #get metabolites in the model r5p = cobra_model.metabolites.get_by_id('r5p_c') fthf = cobra_model.metabolites.get_by_id('10fthf_c') gly = cobra_model.metabolites.get_by_id('gly_c') co2 = cobra_model.metabolites.get_by_id('co2_c') glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c') gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c') asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c') fum = cobra_model.metabolites.get_by_id('fum_c') #make GTPSYN (irreversible) rxn_mets = {}; rxn_mets[r5p] = -1; rxn_mets[fthf] = -1; rxn_mets[gly] = -1; rxn_mets[co2] = -1; rxn_mets[fthf] = -1; rxn_mets[gln] = -1; rxn_mets[gln] = -1; rxn_mets[asp] = -1; rxn_mets[gtp] = 1; rxn_mets[glu] = 1; rxn_mets[glu] = 1; rxn_mets[fum] = 1; rxn = Reaction('GTPSYN'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include VPMATr_reverse and VPMATr: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','3mob_c'); mob3 = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') mob3.charge = met_row['charge'] #get metabolites in the model val = cobra_model.metabolites.get_by_id('val_DASH_L_c') ala = cobra_model.metabolites.get_by_id('ala_DASH_L_c') pyr = cobra_model.metabolites.get_by_id('pyr_c') #make VPMATr_reverse (irreversible) rxn_mets = {}; rxn_mets[val] = -1; rxn_mets[pyr] = -1; rxn_mets[mob3] = 1; rxn_mets[ala] = 1; rxn = Reaction('VPMATr_reverse'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #make VPMATr (irreversible) rxn_mets = {}; rxn_mets[mob3] = -1; rxn_mets[ala] = -1; rxn_mets[val] = 1; rxn_mets[pyr] = 1; rxn = Reaction('VPMATr'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include COASYN: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','coa_c'); coa = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') coa.charge = met_row['charge'] #get metabolites in the model cys = cobra_model.metabolites.get_by_id('cys_DASH_L_c') mlthf = cobra_model.metabolites.get_by_id('mlthf_c') #make COASYN (irreversible) rxn_mets = {}; rxn_mets[atp] = -1; rxn_mets[mlthf] = -1; rxn_mets[mob3] = -1; rxn_mets[asp] = -1; rxn_mets[cys] = -1; rxn_mets[coa] = 1; rxn_mets[co2] = 1; rxn_mets[co2] = 1; rxn = Reaction('COASYN'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include FADSYN: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','fad_c'); fad = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') fad.charge = met_row['charge'] #get metabolites in the model ru5p = cobra_model.metabolites.get_by_id('ru5p_DASH_D_c') #make FADSYN (irreversible) rxn_mets = {}; rxn_mets[gtp] = -1; rxn_mets[ru5p] = -1; rxn_mets[ru5p] = -1; rxn_mets[atp] = -1; rxn_mets[fad] = 1; rxn_mets[co2] = 1; rxn_mets[co2] = 1; rxn_mets[co2] = 1; rxn = Reaction('FADSYN'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include CBMKr and CBMKr_reverse: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','cbp_c'); cbp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') cbp.charge = met_row['charge'] #make CBMKr (irreversible) rxn_mets = {}; rxn_mets[co2] = -1; rxn_mets[cbp] = 1; rxn = Reaction('CBMKr'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #make CBMKr_reverse (irreversible) rxn_mets = {}; rxn_mets[cbp] = -1; rxn_mets[co2] = 1; rxn = Reaction('CBMKr_reverse'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #expand the model to include UTPSYN: #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','utp_c'); utp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') utp.charge = met_row['charge'] #make UTPSYN (irreversible) rxn_mets = {}; rxn_mets[r5p] = -1; rxn_mets[cbp] = -1; rxn_mets[asp] = -1; rxn_mets[utp] = 1; rxn_mets[co2] = 1; rxn = Reaction('UTPSYN'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); # update selected reactions to account for coa_c cobra_model.reactions.get_by_id("ArgSYN").add_metabolites({coa:1}); cobra_model.reactions.get_by_id("CS").add_metabolites({coa:1}); cobra_model.reactions.get_by_id("LeuSYN").add_metabolites({coa:1}); cobra_model.reactions.get_by_id("PDH").add_metabolites({coa:-1}); cobra_model.reactions.get_by_id("PTAr_ACKr_ACS").add_metabolites({coa:1}); cobra_model.reactions.get_by_id("PTAr_ACKr_ACS_reverse").add_metabolites({coa:-1}); cobra_model.reactions.get_by_id("SERAT_CYSS").add_metabolites({coa:1}); cobra_model.reactions.get_by_id("THRD_GLYAT").add_metabolites({coa:-1}); cobra_model.reactions.get_by_id("MALS").add_metabolites({coa:1}); # update selected mappings to account for coa_c for rxn,row in enumerate(atomMappingReactions): if row['rxn_id'] == 'ArgSYN': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['glu_DASH_L_c','co2_c','gln_DASH_L_c','asp_DASH_L_c','accoa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['arg_DASH_L_c','akg_c','fum_c','ac_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['abcde','f','ghijk','lmno','ABCDEFGHIJKLMNOPQRSTUpq'] atomMappingReactions[rxn]['products_mapping']=['abcdef','ghijk','lmno','pq','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'CS': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['oaa_c','accoa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['cit_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTUef'] atomMappingReactions[rxn]['products_mapping']=['dcbfea','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'LeuSYN': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','pyr_c','pyr_c','glu_DASH_L_c'] atomMappingReactions[rxn]['products_ids_tracked']=['leu_DASH_L_c','co2_c','co2_c','akg_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cde','fgh','ijklm'] atomMappingReactions[rxn]['products_mapping']=['abdghe','c','f','ijklm','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'PDH': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['pyr_c','coa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c','co2_c'] atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUbc','a'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'PTAr_ACKr_ACS': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['ac_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab'] atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'PTAr_ACKr_ACS_reverse': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1] atomMappingReactions[rxn]['reactants_ids_tracked']=['ac_c','coa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c'] atomMappingReactions[rxn]['reactants_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUab'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'SERAT_CYSS': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['ser_DASH_L_c','accoa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['cys_DASH_L_c','ac_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTUde'] atomMappingReactions[rxn]['products_mapping']=['abc','de','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'THRD_GLYAT': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['thr_DASH_L_c','coa_c'] atomMappingReactions[rxn]['products_ids_tracked']=['gly_c','accoa_c'] atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTUcd'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) elif row['rxn_id'] == 'MALS': atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1] atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1] atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','glx_c'] atomMappingReactions[rxn]['products_ids_tracked']=['mal_DASH_L_c','coa_c'] atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cd'] atomMappingReactions[rxn]['products_mapping']=['cdba','ABCDEFGHIJKLMNOPQRSTU'] atomMappingReactions[rxn]['reactants_elements_tracked']=[] atomMappingReactions[rxn]['products_elements_tracked']=[] atomMappingReactions[rxn]['reactants_positions_tracked']=[] atomMappingReactions[rxn]['products_positions_tracked']=[] for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements) atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions) for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']): positions = [] elements = [] for pos,element in enumerate(mapping): positions.append(pos); elements.append('C'); atomMappingReactions[rxn]['products_elements_tracked'].append(elements) atomMappingReactions[rxn]['products_positions_tracked'].append(positions) # update BOF met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','adp_c'); adp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') adp.charge = met_row['charge'] cobra_model.reactions.get_by_id("Ec_Biomass_INCA").add_metabolites({coa:2.51, atp:-53.95,gtp:-0.20912,fad:-0.000223,utp:-0.1401}); # write the model to a temporary file save_json_model(cobra_model,'data/cobra_model_tmp.json') # add the model information to the database io = stage02_isotopomer_io() dataStage02IsotopomerModelRxns_data = []; dataStage02IsotopomerModelMets_data = []; dataStage02IsotopomerModels_data,\ dataStage02IsotopomerModelRxns_data,\ dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json') io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data); io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data); io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data); #add atomMappingReactions to the database io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions); # expand atomMappingReactions imm = stage02_isotopomer_metaboliteMapping() irm = stage02_isotopomer_reactionMapping() mappingUtilities = stage02_isotopomer_mappingUtilities() # make atomMappingMetabolites mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O], mapping_id_rxns_I=[mapping_id_O], mapping_id_mets_I=[],#mapping_id_mets_I=[mapping_id_I], mapping_id_new_I=mapping_id_O); # update symmetric metabolites imm.get_metaboliteMapping(mapping_id_O,'succ_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'fum_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() ## update _elements and _positions-_tracked #irm.get_reactionMapping(mapping_id_O,'ArgSYN') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'CS') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'LeuSYN') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'PDH') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS_reverse') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'SERAT_CYSS') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'THRD_GLYAT') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #irm.get_reactionMapping(mapping_id_O,'MALS') #irm.checkAndCorrect_elementsAndPositions(); #irm.update_reactionMapping() #irm.clear_reactionMapping() #make default base metabolites imm.get_metaboliteMapping(mapping_id_O,'asp_DASH_L_c') imm.make_defaultBaseMetabolites() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'cys_DASH_L_c') imm.make_defaultBaseMetabolites() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'ru5p_DASH_D_c') imm.make_defaultBaseMetabolites() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() #add in PRS to the network? #if not, substitute r5p_c for prpp_c #substitute co2_c for for_c #substitute phe_DASH_L_c for phpyr_c #ATPSYN irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN', [{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}], [], [], 'atp_c', [], []) irm.add_productMapping(['atp_c']) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN', [{'gln_DASH_L_c':'C'}], [], [], 'glu_DASH_L_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN', [{'asp_DASH_L_c':'C'}], [], [], 'fum_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN', [{'asp_DASH_L_c':'C'}], [], [], 'fum_c', [], []) irm.add_reactionMapping() irm.clear_reactionMapping() #GTPSYN irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN', [{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}], [], [], 'gtp_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN', [{'gln_DASH_L_c':'C'}], [], [], 'glu_DASH_L_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN', [{'gln_DASH_L_c':'C'}], [], [], 'glu_DASH_L_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN', [{'asp_DASH_L_c':'C'}], [], [], 'fum_c', [], []) irm.add_productMapping(['gtp_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #VPAMTr_reverse irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse', [{'val_DASH_L_c':'C'}], [], [], '3mob_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse', [{'pyr_c':'C'}], [], [], 'ala_DASH_L_c', [], []) irm.add_productMapping(['3mob_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #VPAMTr irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr', [{'3mob_c':'C'}], [], [], 'val_DASH_L_c', [], []) irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr', [{'ala_DASH_L_c':'C'}], [], [], 'pyr_c', [], []) irm.add_reactionMapping() irm.clear_reactionMapping() #COASYN irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'COASYN', [{'atp_c':'C'},{'mlthf_c':'C'},{'3mob_c':'C'},{'asp_DASH_L_c':'C'},{'cys_DASH_L_c':'C'}], [{'asp_DASH_L_c':3},{'cys_DASH_L_c':4}], [{'co2_c':0},{'co2_c':0}], 'coa_c', [{'co2_c':'C'},{'co2_c':'C'}], ['co2_c','co2_c']) #reverse product mapping for 3mob_c in database! irm.update_productMapping(['coa_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #ACCOA_psuedo irm.make_trackedBinaryReaction('full04','140407_iDM2014','accoa_c_base_met_ids', [{'coa_c':'C'},{'ac_c':'C'}], 'accoa_c') irm.update_productMapping(['accoa_c']) irm.clear_reactionMapping() #FADSYN irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'FADSYN', [{'gtp_c':'C'},{'ru5p_DASH_D_c':'C'},{'ru5p_DASH_D_c':'C'},{'atp_c':'C'}], [{'gtp_c':0},{'ru5p_DASH_D_c':1},{'ru5p_DASH_D_c':2}], [{'10fthf_c':0},{'co2_c':0},{'co2_c':0}], 'fad_c', [{'10fthf_c':'C'},{'co2_c':'C'},{'co2_c':'C'}], ['co2_c','co2_c','co2_c']) irm.add_productMapping(['fad_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #CBMKr irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr', [{'co2_c':'C'}], [], [], 'cbp_c', [], []) irm.add_productMapping(['cbp_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #CBMKr_reverse irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr_reverse', [{'cbp_c':'C'}], [], [], 'co2_c', [], []) irm.add_reactionMapping() irm.clear_reactionMapping() #UTPSYN irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'UTPSYN', [{'r5p_c':'C'},{'cbp_c':'C'},{'asp_DASH_L_c':'C'}], [{'asp_DASH_L_c':2}], [{'co2_c':0}], 'utp_c', [{'co2_c':'C'}], ['co2_c']) irm.add_productMapping(['utp_c']) irm.add_reactionMapping() irm.clear_reactionMapping() #ecoli_RL2013 modifications (TODO) def expand_ecoliRL2013_01(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O): '''expand the INCA Ecoli model to account for additional metabolites''' query = stage02_isotopomer_query() # get the xml model cobra_model_sbml = '' cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I); # load the model if cobra_model_sbml: if cobra_model_sbml['file_type'] == 'sbml': with open('data/cobra_model_tmp.xml','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True); elif cobra_model_sbml['file_type'] == 'json': with open('data/cobra_model_tmp.json','wb') as file: file.write(cobra_model_sbml['model_file']); file.close() cobra_model = None; cobra_model = load_json_model('data/cobra_model_tmp.json'); else: print('file_type not supported') #get the atomMapping_reactions atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I); #change the mapping_id for cnt,row in enumerate(atomMappingReactions): atomMappingReactions[cnt]['mapping_id']=mapping_id_O; #add in glucose transporters and intracellular glc #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"atp_c"); atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') atp.charge = met_row['charge'] met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c"); glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') glc_c.charge = met_row['charge'] met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e"); glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e') glc_e.charge = met_row['charge'] glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e') glcext.charge = met_row['charge'] glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e') glcpre.charge = met_row['charge'] #get metabolites in the model pep = cobra_model.metabolites.get_by_id('pep_c') pyr = cobra_model.metabolites.get_by_id('pyr_c') g6p = cobra_model.metabolites.get_by_id('g6p_c') #make EX_glc_LPAREN_e_RPAREN_ rxn_mets = {}; rxn_mets[glcext] = -1; rxn_mets[glc_e] = 1; rxn = Reaction('EX_glc_LPAREN_e_RPAREN_'); cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext'] row_tmp['products_ids_tracked']=['glc_DASH_D_e'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); ##make EX_glc_LPAREN_e_RPAREN__pre #rxn_mets = {}; #rxn_mets[glcpre] = -1; #rxn_mets[glc_e] = 1; #rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre'); #cobra_model.remove_reactions(['v60']); #rxn.add_metabolites(rxn_mets); #cobra_model.add_reactions([rxn]); #cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; #cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; #cobra_model.repair(); ##append the new atom mappings #row_tmp = {}; #row_tmp['mapping_id']=mapping_id_O; #row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre'; #row_tmp['rxn_description']=''; #row_tmp['rxn_equation']=''; #row_tmp['reactants_stoichiometry_tracked']=[-1] #row_tmp['products_stoichiometry_tracked']=[1] #row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre'] #row_tmp['products_ids_tracked']=['glc_DASH_D_e'] #row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] #row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] #row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] #row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] #row_tmp['reactants_mapping']=['abcdef'] #row_tmp['products_mapping']=['abcdef'] #row_tmp['used_']=True #row_tmp['comment_']='added' #atomMappingReactions.append(row_tmp); #make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c" rxn_mets = {}; rxn_mets[glc_e] = -1; rxn_mets[pep] = -1; rxn_mets[g6p] = 1; rxn_mets[pyr] = 1; rxn = Reaction('GLCptspp'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='GLCptspp'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1,-1] row_tmp['products_stoichiometry_tracked']=[1,1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c'] row_tmp['products_ids_tracked']=['g6p_c','pyr_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]] row_tmp['reactants_mapping']=['abcdef','ghi'] row_tmp['products_mapping']=['abcdef','ghi'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c" rxn_mets = {}; rxn_mets[glc_e] = -1; rxn_mets[glc_c] = 1; rxn = Reaction('GLCt2pp'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='GLCt2pp'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_e'] row_tmp['products_ids_tracked']=['glc_DASH_D_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); #make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c" rxn_mets = {}; rxn_mets[glc_c] = -1; rxn_mets[atp] = -1; rxn_mets[g6p] = 1; rxn = Reaction('HEX1'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0; cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0; cobra_model.repair(); #append the new atom mappings row_tmp = {}; row_tmp['mapping_id']=mapping_id_O; row_tmp['rxn_id']='HEX1'; row_tmp['rxn_description']=''; row_tmp['rxn_equation']=''; row_tmp['reactants_stoichiometry_tracked']=[-1] row_tmp['products_stoichiometry_tracked']=[1] row_tmp['reactants_ids_tracked']=['glc_DASH_D_c'] row_tmp['products_ids_tracked']=['g6p_c'] row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]] row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]] row_tmp['reactants_mapping']=['abcdef'] row_tmp['products_mapping']=['abcdef'] row_tmp['used_']=True row_tmp['comment_']='added' atomMappingReactions.append(row_tmp); # add in PRPPS phosphoribosylpyrophosphate synthetase atp[c] + r5p[c] <=> amp[c] + h[c] + prpp[c] #get metabolites not in the model met_row = {} met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"prpp_c"); prpp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c') prpp.charge = met_row['charge'] r5p = cobra_model.metabolites.get_by_id('r5p_c') # expand the model rxn_mets = {}; rxn_mets[r5p] = -1; rxn_mets[atp] = -1; rxn_mets[prpp] = 1; rxn = Reaction('PRPPS'); rxn.add_metabolites(rxn_mets); cobra_model.add_reactions([rxn]); cobra_model.repair(); # add in rxn mapping row={}; row['mapping_id']=mapping_id_O; row['rxn_id']='PRPPS'; row['rxn_description']=''; row['rxn_equation']=''; row['reactants_stoichiometry_tracked']=[-1] row['products_stoichiometry_tracked']=[1] row['reactants_ids_tracked']=['r5p_c'] row['products_ids_tracked']=['prpp_c'] row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]] row['products_elements_tracked']=[["C", "C", "C", "C", "C"]] row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]] row['products_positions_tracked']=[[0, 1, 2, 3, 4]] row['reactants_mapping']=['abcde'] row['products_mapping']=['abcde'] row['used_']=True row['comment_']='added' atomMappingReactions.append(row) ##expand the model #acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c'); #cit = cobra_model.metabolites.get_by_id('cit_c') #icit = cobra_model.metabolites.get_by_id('icit_c') #e4p = cobra_model.metabolites.get_by_id('e4p_c') #phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c') his = cobra_model.metabolites.get_by_id('his_DASH_L_c') #phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c'); # update selected reactions to account for new metabolites for rxn,row in enumerate(atomMappingReactions): if row['rxn_id'] == 'HisSYN': # split HisSYN to add in prpp cobra_model.reactions.get_by_id(row['rxn_id']).subtract_metabolites({atp:-1,r5p:-1}) cobra_model.reactions.get_by_id(row['rxn_id']).add_metabolites({prpp:-1}) # Update the mapping_ids atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']] # write the model to a temporary file save_json_model(cobra_model,'data/cobra_model_tmp.json') # add the model information to the database io = stage02_isotopomer_io() dataStage02IsotopomerModelRxns_data = []; dataStage02IsotopomerModelMets_data = []; dataStage02IsotopomerModels_data,\ dataStage02IsotopomerModelRxns_data,\ dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json') io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data); io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data); io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data); #add atomMappingReactions to the database io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions); # expand atomMappingReactions imm = stage02_isotopomer_metaboliteMapping() irm = stage02_isotopomer_reactionMapping() mappingUtilities = stage02_isotopomer_mappingUtilities() # make atomMappingMetabolites mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O], mapping_id_rxns_I=[mapping_id_O], mapping_id_mets_I=[], mapping_id_new_I=mapping_id_O); # update symmetric metabolites imm.get_metaboliteMapping(mapping_id_O,'succ_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'fum_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c') imm.make_symmetric() imm.update_metaboliteMapping() imm.clear_metaboliteMapping() #analysis functions def load_isotopomer_matlab(self,matlab_data,isotopomer_data=None): '''Load 13CFlux isotopomer simulation data from matlab file''' # load measured isotopomers from MATLAB file into numpy array # load names and calculated isotopomers from MATLAB file into numpy array names = scipy.io.loadmat(matlab_data)['output']['names'][0][0]; calculated_ave = scipy.io.loadmat(matlab_data)['output']['ave'][0][0]; calculated_stdev = scipy.io.loadmat(matlab_data)['output']['stdev'][0][0]; # load residuals from MATLAB file into numpy array residuals = scipy.io.loadmat(matlab_data)['residuals']; if isotopomer_data: measured_dict = json.load(open(isotopomer_data,'r')); measured_names = []; measured_ave = []; measured_stdev = []; # extract data to lists for frag,data in measured_dict['fragments'].items(): for name in data['data_names']: measured_names.append(name); for ave in data['data_ave']: measured_ave.append(ave); for stdev in data['data_stdev']: measured_stdev.append(stdev); # convert lists to dict measured_dict = {}; for i,name in enumerate(measured_names): measured_dict[name]={'measured_ave':measured_ave[i], 'measured_stdev':measured_stdev[i]}; # match measured names to calculated names measured_ave = []; measured_stdev = []; residuals = []; for i,name in enumerate(names): if name[0][0] in measured_dict: measured_ave.append(measured_dict[name[0][0]]['measured_ave']); measured_stdev.append(measured_dict[name[0][0]]['measured_stdev']); residuals.append(measured_dict[name[0][0]]['measured_ave']-calculated_ave[i][0]); else: measured_ave.append(None); measured_stdev.append(None); residuals.append(None); else: measured_ave_tmp = scipy.io.loadmat(matlab_data)['toCompare']; measured_ave = []; for d in measured_ave_tmp: measured_ave.append(d[0]); measured_stdev = numpy.zeros(len(measured_ave)); # combine into a dictionary isotopomer = {}; for i in range(len(names)): isotopomer[names[i][0][0]] = {'measured_ave':measured_ave[i], #TODO: extract out by fragment names 'measured_stdev':measured_stdev[i], 'calculated_ave':calculated_ave[i][0], 'calculated_stdev':calculated_stdev[i][0], 'residuals':residuals[i]}; return isotopomer; def load_confidenceIntervals_matlab(self,matlab_data,cobra_model_matlab,cobra_model_name): '''Load confidence intervals from matlab file''' # load confidence intervals from MATLAB file into numpy array cimin_h5py = h5py.File(matlab_data)['ci']['minv'][0]; cimax_h5py = h5py.File(matlab_data)['ci']['maxv'][0]; cimin = numpy.array(cimin_h5py); cimax = numpy.array(cimax_h5py); # load cobramodel rxns = scipy.io.loadmat(cobra_model_matlab)[cobra_model_name]['rxns'][0][0] # combine cimin, cimax, and rxns into dictionary ci = {}; for i in range(len(cimin)): ci[rxns[i][0][0]] = {'minv':cimin[i],'maxv':cimax[i]}; return ci; def compare_isotopomers_calculated(self,isotopomer_1, isotopomer_2): '''compare two calculated isotopomer distributions''' # extract into lists absDif_list = []; ssr_1_list = []; ssr_2_list = []; bestFit_list = []; frag_list = []; ssr_1 = 0.0; # sum of squared residuals (threshold of 10e1, Antoniewicz poster, co-culture, Met Eng X) ssr_2 = 0.0; measured_1_list = []; measured_2_list = []; calculatedAve_1_list = []; calculatedAve_2_list = []; measuredStdev_1_list = []; measuredStdev_2_list = []; for frag,data in isotopomer_1.items(): absDif = 0.0; sr_1 = 0.0; sr_2 = 0.0; bestFit = None; absDif = fabs(isotopomer_1[frag]['calculated_ave'] - isotopomer_2[frag]['calculated_ave']); sr_1 = pow(isotopomer_1[frag]['calculated_ave']-isotopomer_1[frag]['measured_ave'],2); sr_2 = pow(isotopomer_2[frag]['calculated_ave']-isotopomer_2[frag]['measured_ave'],2); if sr_1>sr_2: bestFit = '2'; elif sr_1<sr_2: bestFit = '1'; elif sr_1==sr_2: bestFit = None; absDif_list.append(absDif); ssr_1_list.append(sr_1); ssr_2_list.append(sr_2); bestFit_list.append(bestFit); frag_list.append(frag); ssr_1 += sr_1; ssr_2 += sr_2; measured_1_list.append(isotopomer_1[frag]['measured_ave']) measured_2_list.append(isotopomer_2[frag]['measured_ave']) calculatedAve_1_list.append(isotopomer_1[frag]['calculated_ave']); calculatedAve_2_list.append(isotopomer_2[frag]['calculated_ave']); measuredStdev_1_list.append(isotopomer_1[frag]['measured_stdev']); measuredStdev_2_list.append(isotopomer_2[frag]['measured_stdev']); # calculate the correlation coefficient # 1. between measured vs. calculated (1 and 2) # 2. between calculated 1 vs. calculated 2 r_measuredVsCalculated_1 = None; r_measuredVsCalculated_2 = None; r_measured1VsMeasured2 = None; p_measuredVsCalculated_1 = None; p_measuredVsCalculated_2 = None; p_measured1VsMeasured2 = None; r_measuredVsCalculated_1, p_measuredVsCalculated_1 = scipy.stats.pearsonr(measured_1_list,calculatedAve_1_list); r_measuredVsCalculated_2, p_measuredVsCalculated_2 = scipy.stats.pearsonr(measured_2_list,calculatedAve_2_list); r_measured1VsMeasured2, p_measured1VsMeasured2 = scipy.stats.pearsonr(calculatedAve_1_list,calculatedAve_2_list); # wrap stats into a dictionary isotopomer_comparison_stats = {}; isotopomer_comparison_stats = dict(list(zip(('r_measuredVsCalculated_1', 'p_measuredVsCalculated_1', 'r_measuredVsCalculated_2', 'p_measuredVsCalculated_2', 'r_measured1VsMeasured2', 'p_measured1VsMeasured2', 'ssr_1,ssr_2'), (r_measuredVsCalculated_1, p_measuredVsCalculated_1, r_measuredVsCalculated_2, p_measuredVsCalculated_2, r_measured1VsMeasured2, p_measured1VsMeasured2, ssr_1,ssr_2)))); ## zip, sort, unzip # does not appear to sort correctly! #zipped = zip(absDif_list,ssr_1_list,ssr_2_list,bestFit_list,frag_list, # measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list, # measuredStdev_1_list,measuredStdev_2_list); #zipped.sort(); #zipped.reverse(); #absDif_list,ssr_1_list,sst_2_list,bestFit_list,frag_list,\ # measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,\ # measuredStdev_1_list,measuredStdev_2_list = zip(*zipped); # restructure into a list of dictionaries for easy parsing or data base viewing isotopomer_comparison = []; for i in range(len(absDif_list)): isotopomer_comparison.append({'isotopomer_absDif':absDif_list[i], 'isotopomer_1_sr':ssr_1_list[i], 'isotopomer_2_sr':ssr_2_list[i], 'bestFit':bestFit_list[i], 'frag':frag_list[i], 'measured_1_ave':measured_1_list[i], 'measured_2_ave':measured_2_list[i], 'measured_1_stdev':measuredStdev_1_list[i], 'measured_2_stdev':measuredStdev_2_list[i], 'calculated_1_ave':calculatedAve_1_list[i], 'calculated_2_ave':calculatedAve_2_list[i]}); return isotopomer_comparison,isotopomer_comparison_stats; def compare_ci_calculated(self,ci_1,ci_2): '''compare 2 calculated confidence intervals''' # extract into lists rxns_1_list = []; rxns_2_list = []; ciminv_1_list = []; ciminv_2_list = []; cimaxv_1_list = []; cimaxv_2_list = []; cirange_1_list = []; cirange_2_list = []; cirange_1_sum = 0.0; cirange_2_sum = 0.0; # ci_1: for k,v in ci_1.items(): rxns_1_list.append(k); ciminv_1_list.append(v['minv']); cimaxv_1_list.append(v['maxv']); cirange_1_list.append(v['maxv']-v['minv']); cirange_1_sum += v['maxv']-v['minv']; ## zip, sort, unzip #zipped1 = zip(rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list); #zipped1.sort(); #rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list = zip(*zipped1); # ci_2: for k,v in ci_2.items(): rxns_2_list.append(k); ciminv_2_list.append(v['minv']); cimaxv_2_list.append(v['maxv']); cirange_2_list.append(v['maxv']-v['minv']); cirange_2_sum += v['maxv']-v['minv']; ## zip, sort, unzip #zipped2 = zip(rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list); #zipped2.sort(); #rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list = zip(*zipped2); # compare by rxn_id cirange_absDev_list = []; rxns_combined_list = []; ciminv_1_combined_list = []; ciminv_2_combined_list = []; cimaxv_1_combined_list = []; cimaxv_2_combined_list = []; cirange_1_combined_list = []; cirange_2_combined_list = []; cirange_1_combined_sum = 0.0; cirange_2_combined_sum = 0.0; for i in range(len(rxns_1_list)): for j in range(len(rxns_2_list)): if rxns_1_list[i] == rxns_2_list[j]: rxns_combined_list.append(rxns_1_list[i]); cirange_absDev_list.append(fabs(cirange_1_list[i]-cirange_2_list[j])); ciminv_1_combined_list.append(ciminv_1_list[i]); ciminv_2_combined_list.append(ciminv_2_list[j]); cimaxv_1_combined_list.append(cimaxv_1_list[i]); cimaxv_2_combined_list.append(cimaxv_2_list[j]); cirange_1_combined_list.append(cirange_1_list[i]); cirange_2_combined_list.append(cirange_2_list[j]); cirange_1_combined_sum += cirange_1_list[i] cirange_2_combined_sum += cirange_2_list[j] ## zip, sort, unzip #zippedCombined = zip(cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list); #zippedCombined.sort(); #zippedCombined.reverse(); #cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list = zip(*zippedCombined); # restructure into a list of dictionaries for easy parsing or data base viewing ci_comparison = []; for i in range(len(cirange_absDev_list)): ci_comparison.append({'cirange_absDev_list':cirange_absDev_list[i], 'rxns_combined_list':rxns_combined_list[i], 'ciminv_1_combined_list':ciminv_1_combined_list[i], 'ciminv_2_combined_list':ciminv_2_combined_list[i], 'cimaxv_1_combined_list':cimaxv_1_combined_list[i], 'cimaxv_2_combined_list':cimaxv_2_combined_list[i], 'cirange_1_combined_list':cirange_1_combined_list[i], 'cirange_2_combined_list':cirange_2_combined_list[i]}); return ci_comparison,cirange_1_sum,cirange_2_sum,cirange_1_combined_sum,cirange_2_combined_sum; def plot_compare_isotopomers_calculated(self,isotopomer_comparison,isotopomer_comparison_stats): '''Plot 1: isotopomer fitting comparison Plot 2: isotopomer residual comparison''' io = base_exportData(isotopomer_comparison); # Plot 1 and Plot 2: io.write_dict2tsv('data//data.tsv'); def plot_ci_calculated(self,ci): '''plot confidence intervals from fluxomics experiment using escher''' data = []; flux1 = {}; flux2 = {}; for k,v in ci.items(): flux1[k] = v['minv']; flux2[k] = v['maxv']; data.append(flux1); data.append(flux2); io = base_exportData(data); io.write_dict2json('visualization/escher/ci.json'); def export_modelWithFlux(self,cobra_model_xml_I,ci_list_I,cobra_model_xml_O): '''update model lower_bound/upper_bound with calculated flux confidence intervals''' cobra_model = create_cobra_model_from_sbml_file(cobra_model_xml_I); rxns_add = []; rxns_omitted = []; rxns_break = []; system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary']; objectives = [x.id for x in cobra_model.reactions if x.objective_coefficient == 1]; for i,ci_I in enumerate(ci_list_I): print('add flux from ci ' + str(i)); for rxn in cobra_model.reactions: if rxn.id in list(ci_I.keys()) and not(rxn.id in system_boundaries)\ and not(rxn.id in objectives): cobra_model_copy = cobra_model.copy(); # check for reactions that break the model: if ci_I[rxn.id]['minv'] > 0: cobra_model_copy.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv']; if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']: cobra_model_copy.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv']; cobra_model_copy.optimize(solver='gurobi'); if not cobra_model_copy.solution.f: print(rxn.id + ' broke the model!') rxns_break.append(rxn.id); else: if ci_I[rxn.id]['minv'] > 0: cobra_model.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv']; if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']: cobra_model.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv']; rxns_add.append(rxn.id); else: rxns_omitted.append(rxn.id); write_cobra_model_to_sbml_file(cobra_model,cobra_model_xml_O) class stage02_isotopomer_metaboliteMapping(): """Class to standardize metabolite mapping: A mapped metabolite takes the following form: 'met_id' + 'nMet_id' + '_' + 'element' + nElement Input: met_ids_elements_I = [{met_id:element},...] [{'f6p_c':'C'},{'f6p_c':'C'},{'f6p_c':'H'},{'f6p_c':'H'},{'ac_c':'C'},{'utp_c':'C'}] NOTE: The order matters if using multiple elements! will need to further test in future versions Base metabolites: default base metabolite is co2 for carbon and oh for hydrogen Base reaction: co2 + oh- + h+ = ch2o + o2""" def __init__(self, mapping_id_I=None, #met_name_I=None, met_id_I=None, #formula_I=None, met_elements_I=[], met_atompositions_I=[], met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=[], base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]): #self.session = Session(); self.stage02_isotopomer_query = stage02_isotopomer_query(); self.calculate = base_calculate(); self.metaboliteMapping={}; self.metaboliteMapping['mapping_id']=mapping_id_I; #self.metaboliteMapping['met_name']=met_name_I; self.metaboliteMapping['met_id']=met_id_I; #self.metaboliteMapping['formula']=formula_I; self.metaboliteMapping['met_elements']=met_elements_I; self.metaboliteMapping['met_atompositions']=met_atompositions_I; self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I; self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I; self.metaboliteMapping['used_']=used__I; self.metaboliteMapping['comment_']=comment__I; self.metaboliteMapping['met_mapping']=met_mapping_I; self.metaboliteMapping['base_met_ids']=base_met_ids_I; self.metaboliteMapping['base_met_elements']=base_met_elements_I; self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_I; self.metaboliteMapping['base_met_symmetry_elements']=base_met_symmetry_elements_I; self.metaboliteMapping['base_met_symmetry_atompositions']=base_met_symmetry_atompositions_I; self.metaboliteMapping['base_met_indices']=base_met_indices_I; def make_elementsAndPositionsTracked(self,met_id_I,element_I,n_elements_I): #Input: met_id_I,element_I,n_elements_I #Output: mapping_O,positions_O,elements_O #E.g: make_elementsTracked('fdp','C',6) mapping_O = []; positions_O = []; elements_O = []; for elements_cnt in range(n_elements_I): mapping = '[' + met_id_I.replace('.','_') + '_' + element_I + str(elements_cnt) + ']'; mapping_O.append(mapping); positions_O.append(elements_cnt); elements_O.append(element_I); return mapping_O,positions_O,elements_O; def make_trackedMetabolite(self,mapping_id_I,model_id_I,met_id_element_I,met_index_I=None): '''Make an unique atom mapping for the given metabolite and element''' currentElementPos = 0; mapping_O = []; positions_O = []; elements_O = []; base_met_ids_O = []; base_met_elements_O = []; base_met_atompositions_O = []; base_met_symmetry_elements_O = []; base_met_symmetry_atompositions_O = []; base_met_indices_O = []; for k,v in met_id_element_I.items(): # check if the metabolite is already in the database met_data = {} met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k) #NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match! if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]: nElements = len(met_data['met_elements']); else: # get the formula for the met_id formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k); # get the number of elements if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula if 0 in Formula(formula_I)._elements[v]: nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements # make the tracking nMet = 0; if met_index_I: nMet = met_index_I mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMet),v,nElements); positions_corrected = [currentElementPos+pos for pos in positions]; currentElementPos += max(positions)+1; mapping_O.append(mapping); positions_O.extend(positions_corrected); elements_O.extend(elements); base_met_ids_O.append(k) base_met_elements_O.append(elements) base_met_atompositions_O.append(positions) base_met_indices_O.append(nMet) self.metaboliteMapping['mapping_id']=mapping_id_I self.metaboliteMapping['met_id']=k self.metaboliteMapping['met_elements']=elements_O self.metaboliteMapping['met_atompositions']=positions_O self.metaboliteMapping['met_mapping']=mapping_O self.metaboliteMapping['base_met_ids']=base_met_ids_O self.metaboliteMapping['base_met_elements']=base_met_elements_O self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O self.metaboliteMapping['base_met_indices']=base_met_indices_O def make_compoundTrackedMetabolite(self,mapping_id_I,model_id_I,met_ids_elements_I,met_id_O,met_ids_indices_I = []): '''Make an unique atom mapping for the given metabolite based on base metabolites and elements''' #Input: # metIDs_elements_I = [{met_id:element},..] # met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c':'C'}}] # metIDs_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..] # met_ids_elements_I = [{'f6p_c':{'elements':['C'],'stoichiometry':1}},{'ac_c':{'elements':['C'],'stoichiometry':1}},{'utp_c':{'elements':['C'],'stoichiometry':1}}] # make_compoundTrackedMetabolite('full04','140407_iDM2014',met_ids_elements_I,'uacgam_c') currentElementPos = 0; mapping_O = []; positions_O = []; elements_O = []; base_met_ids_O = []; base_met_elements_O = []; base_met_atompositions_O = []; base_met_symmetry_elements_O = []; base_met_symmetry_atompositions_O = []; base_met_indices_O = []; # get unique met_ids met_ids_all = []; for row in met_ids_elements_I: for k,v in row.items(): met_ids_all.append(k); met_ids_unique = list(set(met_ids_all)) met_ids_cnt = {}; met_ids_elements = {}; for met_id in met_ids_unique: met_ids_cnt[met_id] = 0; met_ids_elements[met_id] = []; # make the compound mapping for row_cnt,row in enumerate(met_ids_elements_I): for k,v in row.items(): # check if the metabolite is already in the database met_data = {} met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k) #NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match! if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]: nElements = len(met_data['met_elements']); else: # get the formula for the met_id formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k); # get the number of elements if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula if 0 in Formula(formula_I)._elements[v]: nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements # determine the metabolite index nMets = met_ids_cnt[k]; if met_ids_indices_I: nMets = met_ids_indices_I[row_cnt] # make the tracking mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMets),v,nElements); positions_corrected = [currentElementPos+pos for pos in positions]; currentElementPos += max(positions)+1; # add to the compound tracking mapping_O.append(mapping); positions_O.extend(positions_corrected); elements_O.extend(elements); base_met_ids_O.append(k) base_met_elements_O.append(elements) base_met_atompositions_O.append(positions) base_met_indices_O.append(nMets) met_ids_cnt[k] += 1; # needed to ensure a unique metabolite mapping if the same met_id is used multiple times self.metaboliteMapping['mapping_id']=mapping_id_I self.metaboliteMapping['met_id']=met_id_O self.metaboliteMapping['met_elements']=elements_O self.metaboliteMapping['met_atompositions']=positions_O self.metaboliteMapping['met_mapping']=mapping_O self.metaboliteMapping['base_met_ids']=base_met_ids_O self.metaboliteMapping['base_met_elements']=base_met_elements_O self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O self.metaboliteMapping['base_met_indices']=base_met_indices_O def append_baseMetabolites_toMetabolite(self,model_id_I,met_ids_elements_I,met_id_O=None): '''Append a base metabolite to the current metabolite''' #get the currentElementPos currentElementPos = max(self.metaboliteMapping['met_atompositions'])+1; # get unique met_ids met_ids_unique = list(set(self.metaboliteMapping['base_met_ids'])) met_ids_cnt = {}; met_ids_elements = {}; for met_id in met_ids_unique: met_ids_cnt[met_id] = 0; met_ids_elements[met_id] = []; for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']): # determine the number of met_ids met_ids_cnt[met_id]+=1 # determine the unique elements if not self.metaboliteMapping['met_elements'][0] in met_ids_elements[met_id]: met_ids_elements[met_id].append(self.metaboliteMapping['met_elements'][met_id_cnt][0]); # add the mapping for the new metabolites for row in met_ids_elements_I: for k,v in row.items(): # check if the metabolite is already in the database met_data = {} met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],k) #NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match! if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]: nElements = len(met_data['met_elements']); else: # get the formula for the met_id formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k); # get the number of elements if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula if 0 in Formula(formula_I)._elements[v]: nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements # adjust the metabolite number if the same metabolite already exists nMets = met_ids_cnt[k]; met_id_mapping = k+nMets; # make the tracking mapping,positions,elements = self.make_elementsAndPositionsTracked(met_id_mapping,v,nElements); positions_corrected = [currentElementPos+pos for pos in positions]; currentElementPos += max(positions)+1; # add to the compound tracking self.metaboliteMapping['met_mapping'].append(mapping); self.metaboliteMapping['met_atompositions'].extend(positions_corrected); self.metaboliteMapping['met_elements'].extend(elements); self.metaboliteMapping['base_met_ids'].append(k) self.metaboliteMapping['base_met_elements'].append(elements) self.metaboliteMapping['base_met_atompositions'].append(positions) self.metaboliteMapping['base_met_indices'].append(met_ids_cnt[k]); met_ids_cnt[met_id]+=1; if met_id_O: self.metaboliteMapping['met_id']=met_id_O def pop_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None): '''Remove a base metabolite from the current metabolite: metabolites are removed FILO; NOTE: this can lead to problems downstream when the mapping is reconstructed from the base metabolites if multiple elements are used''' #Input: # met_id_element_I = {met_id:element} '''Unit Test: ''' met_mapping = self.metaboliteMapping['met_mapping']; base_met_ids = self.metaboliteMapping['base_met_ids']; base_met_elements = self.metaboliteMapping['base_met_elements']; base_met_atompositions = self.metaboliteMapping['base_met_atompositions']; base_met_indices = self.metaboliteMapping['base_met_indices']; #base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements']; #base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions']; met_mapping.reverse(); base_met_ids.reverse(); base_met_elements.reverse(); base_met_atompositions.reverse(); base_met_indices.reverse(); #base_met_symmetry_elements.reverse(); #base_met_symmetry_atompositions.reverse(); self.metaboliteMapping['met_mapping']=[] self.metaboliteMapping['base_met_ids']=[] self.metaboliteMapping['base_met_elements']=[] self.metaboliteMapping['base_met_atompositions']=[] self.metaboliteMapping['base_met_indices']=[] #self.metaboliteMapping['base_met_symmetry_elements']=[] #self.metaboliteMapping['base_met_symmetry_atompositions']=[] for met_id_remove,v in met_id_element_I.items(): removed = False for met_cnt,met_id in enumerate(base_met_ids): if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed: removed = True; else: self.metaboliteMapping['met_mapping'].insert(0,met_mapping[met_cnt]); self.metaboliteMapping['base_met_ids'].insert(0,base_met_ids[met_cnt]); self.metaboliteMapping['base_met_elements'].insert(0,base_met_elements[met_cnt]); self.metaboliteMapping['base_met_atompositions'].insert(0,base_met_atompositions[met_cnt]); self.metaboliteMapping['base_met_indices'].insert(0,base_met_indices[met_cnt]) #self.metaboliteMapping['base_met_symmetry_elements'].insert(0,base_met_symmetry_elements[met_cnt]); #self.metaboliteMapping['base_met_symmetry_atompositions'].insert(0,base_met_symmetry_atompositions[met_cnt]); '''v1: removes ALL base metabolites that match the met_id''' #for met_id_remove in met_ids_I: # for met_cnt,met_id in enumerate(base_met_ids): # if met_id_remove != met_id: # self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]); # self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]); # self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]); # self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]); # #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]); # #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]); if met_id_O: self.metaboliteMapping['met_id']=met_id_O self.update_trackedMetabolite_fromBaseMetabolites(model_id_I); def remove_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None,met_index_I=None): '''Remove a base metabolite from the current metabolite: metabolites are removed FIFO if the index is not specified;''' #Input: # met_id_element = {met_id:element} '''Unit Test:''' met_mapping = self.metaboliteMapping['met_mapping']; base_met_ids = self.metaboliteMapping['base_met_ids']; base_met_elements = self.metaboliteMapping['base_met_elements']; base_met_atompositions = self.metaboliteMapping['base_met_atompositions']; base_met_indices = self.metaboliteMapping['base_met_indices']; #base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements']; #base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions']; self.metaboliteMapping['met_mapping']=[] self.metaboliteMapping['base_met_ids']=[] self.metaboliteMapping['base_met_elements']=[] self.metaboliteMapping['base_met_atompositions']=[] self.metaboliteMapping['base_met_indices']=[] #self.metaboliteMapping['base_met_symmetry_elements']=[] #self.metaboliteMapping['base_met_symmetry_atompositions']=[] for met_id_remove,v in met_id_element_I.items(): removed = False for met_cnt,met_id in enumerate(base_met_ids): if met_index_I: if met_index_I == base_met_indices[met_cnt] and met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed: removed = True else: self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]); self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]); self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]); self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]); self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]); #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]); #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]); else: if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed: removed = True else: self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]); self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]); self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]); self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]); self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]); #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]); #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]); '''v1: removes ALL base metabolites that match the met_id''' #for met_id_remove in met_ids_I: # for met_cnt,met_id in enumerate(base_met_ids): # if met_id_remove != met_id: # self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]); # self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]); # self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]); # self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]); # #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]); # #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]); if met_id_O: self.metaboliteMapping['met_id']=met_id_O self.update_trackedMetabolite_fromBaseMetabolites(model_id_I); def extract_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_index_I=None): '''Returns a base metabolites from the current metabolite: returns metabolites in FIFO''' base_metaboliteMapping = stage02_isotopomer_metaboliteMapping(); base_met_ids = self.metaboliteMapping['base_met_ids']; met_id_remove = {}; met_index = None for k,v in met_id_element_I.items(): for met_cnt,met_id in enumerate(base_met_ids): if met_index_I: if met_index_I == self.metaboliteMapping['base_met_indices'][met_cnt] and k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]: met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]}; met_index = met_index_I; break; else: if k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]: met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]}; met_index = self.metaboliteMapping['base_met_indices'][met_cnt] break; base_metaboliteMapping.make_trackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_id_remove,met_index); return base_metaboliteMapping def update_trackedMetabolite_fromBaseMetabolites(self,model_id_I): '''update mapping, elements, and atompositions from base metabolites; NOTE: issues may arise in the number assigned to each metabolite if multiple elements are used''' # get unique met_ids met_ids_unique = list(set(self.metaboliteMapping['base_met_ids'])) met_ids_cnt = {}; met_ids_elements = {}; for met_id in met_ids_unique: met_ids_cnt[met_id] = 0; met_ids_elements[met_id] = []; # make the input structure met_ids_elements_I = []; for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']): met_ids_elements_I.append({met_id:self.metaboliteMapping['base_met_elements'][met_id_cnt][0]}) self.make_compoundTrackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_ids_elements_I,self.metaboliteMapping['met_id'],self.metaboliteMapping['base_met_indices']) def make_newMetaboliteMapping(self): '''Make a new mapping for the metabolite that switches out the names of the base metabolites for the current metabolite''' mapping_O= []; elements = list(set(self.metaboliteMapping['met_elements'])) element_cnt = {}; for element in elements: element_cnt[element] = 0; for met_element in self.metaboliteMapping['met_elements']: mapping = '[' + self.metaboliteMapping['met_id'].replace('.','_') + '_' + met_element + str(element_cnt[met_element]) + ']'; mapping_O.append(mapping); element_cnt[met_element]+=1 return mapping_O def make_defaultBaseMetabolites(self): '''Add default base metabolite to the metabolite''' self.metaboliteMapping['base_met_ids']=[]; self.metaboliteMapping['base_met_elements']=[]; self.metaboliteMapping['base_met_atompositions']=[]; self.metaboliteMapping['base_met_symmetry_elements']=[]; self.metaboliteMapping['base_met_symmetry_atompositions']=[]; self.metaboliteMapping['base_met_indices']=[]; compartment = self.metaboliteMapping['met_id'].split('_')[-1] for cnt,element in enumerate(self.metaboliteMapping['met_elements']): if element == 'C': self.metaboliteMapping['base_met_ids'].append('co2'+'_'+compartment); self.metaboliteMapping['base_met_elements'].append([element]); self.metaboliteMapping['base_met_atompositions'].append([0]); self.metaboliteMapping['base_met_indices'].append(cnt); elif element == 'H': self.metaboliteMapping['base_met_ids'].append('h'+'_'+element); self.metaboliteMapping['base_met_elements'].append([element]); self.metaboliteMapping['base_met_atompositions'].append([0]); self.metaboliteMapping['base_met_indices'].append(cnt); else: print("element not yet supported") def convert_arrayMapping2StringMapping(self): '''Convert an array representation of a mapping to a string representation''' arrayMapping = self.metaboliteMapping['met_mapping'] stringMapping = '' for mapping in self.metaboliteMapping['met_mapping']: stringMapping+=''.join(mapping) return stringMapping; def convert_stringMapping2ArrayMapping(self): '''Convert a string representation of a mapping to an array representation''' stringMapping = self.metaboliteMapping['met_mapping'] if '[' in self.metaboliteMapping['met_mapping']: stringMapping = self.metaboliteMapping['met_mapping'].split(']['); stringMapping = [m.replace('[','') for m in stringMapping]; stringMapping = [m.replace(']','') for m in stringMapping]; else: stringMapping = [m for m in stringMapping]; # add in '[]' arrayMapping = []; for m in stringMapping: arrayMapping.append('['+m+']') return arrayMapping; def add_metaboliteMapping(self, mapping_id_I=None, met_id_I=None, met_elements_I=None, met_atompositions_I=None, met_symmetry_elements_I=None, met_symmetry_atompositions_I=None, used__I=True, comment__I=None): '''Add tracked metabolite to the database''' if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I; if met_id_I: self.metaboliteMapping['met_id']=met_id_I; if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I; if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I; if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I; if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I; if used__I: self.metaboliteMapping['used_']=used__I; if comment__I: self.metaboliteMapping['comment_']=comment__I; #add data to the database #row = None; #row = data_stage02_isotopomer_atomMappingMetabolites(self.metaboliteMapping['mapping_id'], # self.metaboliteMapping['met_id'], # self.metaboliteMapping['met_elements'], # self.metaboliteMapping['met_atompositions'], # self.metaboliteMapping['met_symmetry_elements'], # self.metaboliteMapping['met_symmetry_atompositions'], # self.metaboliteMapping['used_'], # self.metaboliteMapping['comment_'], # self.make_newMetaboliteMapping(), # self.metaboliteMapping['base_met_ids'], # self.metaboliteMapping['base_met_elements'], # self.metaboliteMapping['base_met_atompositions'], # self.metaboliteMapping['base_met_symmetry_elements'], # self.metaboliteMapping['base_met_symmetry_atompositions'], # self.metaboliteMapping['base_met_indices']); #self.session.add(row); #self.session.commit(); data = self.metaboliteMapping; data['met_mapping'] = self.make_newMetaboliteMapping(); self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites([data]); def update_metaboliteMapping(self, mapping_id_I=None, met_id_I=None, met_elements_I=None, met_atompositions_I=None, met_symmetry_elements_I=None, met_symmetry_atompositions_I=None, used__I=True, comment__I=None): '''Add tracked metabolite to the database''' if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I; if met_id_I: self.metaboliteMapping['met_id']=met_id_I; if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I; if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I; if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I; if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I; if used__I: self.metaboliteMapping['used_']=used__I; if comment__I: self.metaboliteMapping['comment_']=comment__I; self.metaboliteMapping['met_mapping']=self.make_newMetaboliteMapping() #add update data in the database self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingMetabolites([self.metaboliteMapping]); def get_metaboliteMapping(self,mapping_id_I,met_id_I): '''Get tracked metabolite from the database''' row = {} row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,met_id_I); self.metaboliteMapping=row; def get_baseMetabolites(self): '''Get base metabolite from the database for the current metabolite''' row = {} row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],self.metaboliteMapping['met_id']); self.metaboliteMapping['base_met_ids']=row['base_met_ids']; self.metaboliteMapping['base_met_elements']=row['base_met_elements'] self.metaboliteMapping['base_met_atompositions']=row['base_met_atompositions'] self.metaboliteMapping['base_met_symmetry_elements']=row['base_met_symmetry_elements'] self.metaboliteMapping['base_met_symmetry_atompositions']=row['base_met_symmetry_atompositions'] ## if the current base_met_indices are already set, add to them ## NOTE: works only if the base metabolite is also the current metabolite #if len(self.metaboliteMapping['base_met_indices'])==1: # currentIndex = self.metaboliteMapping['base_met_indices'][0] # self.metaboliteMapping['base_met_indices'] = [currentIndex + i for i in row['base_met_indices']]; ## else ensure that all met_id/base_met_index pairs are unique #else: # self.metaboliteMapping['base_met_indices']=row['base_met_indices'] self.metaboliteMapping['base_met_indices']=row['base_met_indices'] def clear_metaboliteMapping(self): self.metaboliteMapping={}; self.metaboliteMapping['mapping_id']=None; #self.metaboliteMapping['met_name']=None; self.metaboliteMapping['met_id']=None; #self.metaboliteMapping['formula']=None; self.metaboliteMapping['met_elements']=None; self.metaboliteMapping['met_atompositions']=None; self.metaboliteMapping['met_symmetry_elements']=None; self.metaboliteMapping['met_symmetry_atompositions']=None; self.metaboliteMapping['used_']=True; self.metaboliteMapping['comment_']=None; self.metaboliteMapping['met_mapping']=None; self.metaboliteMapping['base_met_ids']=None; self.metaboliteMapping['base_met_elements']=None; self.metaboliteMapping['base_met_atompositions']=None; self.metaboliteMapping['base_met_symmetry_elements']=None; self.metaboliteMapping['base_met_symmetry_atompositions']=None; self.metaboliteMapping['base_met_indices']=None; def make_symmetric(self,met_symmetry_elements_I=[],met_symmetry_atompositions_I=[]): '''Make the current metabolite symmetric default = 180 symmetry''' if met_symmetry_elements_I and met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I; self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I; else: self.metaboliteMapping['met_symmetry_elements']=[m for m in reversed(self.metaboliteMapping['met_elements'])]; self.metaboliteMapping['met_symmetry_atompositions']=[m for m in reversed(self.metaboliteMapping['met_atompositions'])]; def copy_metaboliteMappingDict(self): '''Copy the current metabolite mapping''' copy_metaboliteMapping = {}; copy_metaboliteMapping['mapping_id']=self.metaboliteMapping['mapping_id'] #copy_metaboliteMapping['met_name']=self.metaboliteMapping['met_name'] copy_metaboliteMapping['met_id']=self.metaboliteMapping['met_id'] #copy_metaboliteMapping['formula']=self.metaboliteMapping['formula'] copy_metaboliteMapping['met_elements']=self.metaboliteMapping['met_elements'] copy_metaboliteMapping['met_atompositions']=self.metaboliteMapping['met_atompositions'] copy_metaboliteMapping['met_symmetry_elements']=self.metaboliteMapping['met_symmetry_elements'] copy_metaboliteMapping['met_symmetry_atompositions']=self.metaboliteMapping['met_symmetry_atompositions'] copy_metaboliteMapping['used_']=self.metaboliteMapping['used_'] copy_metaboliteMapping['comment_']=self.metaboliteMapping['comment_'] copy_metaboliteMapping['met_mapping']=self.metaboliteMapping['met_mapping'] copy_metaboliteMapping['base_met_ids']=self.metaboliteMapping['base_met_ids'] copy_metaboliteMapping['base_met_elements']=self.metaboliteMapping['base_met_elements'] copy_metaboliteMapping['base_met_atompositions']=self.metaboliteMapping['base_met_atompositions'] copy_metaboliteMapping['base_met_symmetry_elements']=self.metaboliteMapping['base_met_symmetry_elements'] copy_metaboliteMapping['base_met_symmetry_atompositions']=self.metaboliteMapping['base_met_symmetry_atompositions'] copy_metaboliteMapping['base_met_indices']=self.metaboliteMapping['base_met_indices']; return copy_metaboliteMapping def copy_metaboliteMapping(self): '''Copy the current metabolite mapping''' return self; class stage02_isotopomer_reactionMapping(): def __init__(self, mapping_id_I=None, rxn_id_I=None, rxn_description_I=None, reactants_stoichiometry_tracked_I=[], products_stoichiometry_tracked_I=[], reactants_ids_tracked_I=[], products_ids_tracked_I=[], reactants_elements_tracked_I=[], products_elements_tracked_I=[], reactants_positions_tracked_I=[], products_positions_tracked_I=[], reactants_mapping_I=[], products_mapping_I=[], rxn_equation_I=None, used__I=None, comment__I=None, reactants_metaboliteMappings_I=[], products_metaboliteMappings_I=[]): #self.session = Session(); self.stage02_isotopomer_query = stage02_isotopomer_query(); self.calculate = base_calculate(); self.reactionMapping={} self.reactionMapping['mapping_id']=mapping_id_I self.reactionMapping['rxn_id']=rxn_id_I self.reactionMapping['rxn_description']=rxn_description_I self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I self.reactionMapping['products_ids_tracked']=products_ids_tracked_I self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I self.reactionMapping['products_elements_tracked']=products_elements_tracked_I self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I self.reactionMapping['products_positions_tracked']=products_positions_tracked_I self.reactionMapping['reactants_mapping']=reactants_mapping_I self.reactionMapping['products_mapping']=products_mapping_I self.reactionMapping['rxn_equation']=rxn_equation_I self.reactionMapping['used_']=used__I self.reactionMapping['comment_']=comment__I self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_I self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_I self.reactants_base_met_ids=[]; self.reactants_base_met_elements=[]; self.reactants_base_met_atompositions=[]; self.reactants_base_met_symmetry_elements=[]; self.reactants_base_met_symmetry_atompositions=[]; self.reactants_base_met_indices=[]; self.products_base_met_ids=[]; self.products_base_met_elements=[]; self.products_base_met_atompositions=[]; self.products_base_met_symmetry_elements=[]; self.products_base_met_symmetry_atompositions=[]; self.products_base_met_indices=[]; def make_trackedCompoundReaction_fromRow(self,mapping_id_I,model_id_I,rxn_id_I, rxn_description_I=None, reactants_stoichiometry_tracked_I=[], products_stoichiometry_tracked_I=[], reactants_ids_tracked_I=[], products_ids_tracked_I=[], reactants_mapping_I=[], products_mapping_I=[], rxn_equation_I=None, used__I=True, comment__I=None): irm = stage02_isotopomer_reactionMapping( mapping_id_I=mapping_id_I, rxn_id_I=rxn_id_I, rxn_description_I=rxn_id_I, reactants_stoichiometry_tracked_I=reactants_stoichiometry_tracked_I, products_stoichiometry_tracked_I=products_stoichiometry_tracked_I, reactants_ids_tracked_I=reactants_ids_tracked_I, products_ids_tracked_I=products_ids_tracked_I, reactants_mapping_I=reactants_mapping_I, products_mapping_I=products_mapping_I, rxn_equation_I=rxn_equation_I, used__I=used__I, comment__I=comment__I); irm.reactionMapping['reactants_elements_tracked']=None; irm.reactionMapping['reactants_positions_tracked']=None; irm.reactionMapping['products_elements_tracked']=None; irm.reactionMapping['products_positions_tracked']=None; irm.checkAndCorrect_elementsAndPositions(); self.reactionMapping['mapping_id']=irm.reactionMapping['mapping_id'] self.reactionMapping['rxn_id']=irm.reactionMapping['rxn_id'] self.reactionMapping['rxn_description']=irm.reactionMapping['rxn_description'] self.reactionMapping['rxn_equation']=irm.reactionMapping['rxn_equation'] self.reactionMapping['used_']=irm.reactionMapping['used_'] self.reactionMapping['comment_']=irm.reactionMapping['comment_'] for reactant_id_cnt,reactant_id in enumerate(irm.reactionMapping['reactants_ids_tracked']): self.reactionMapping['reactants_stoichiometry_tracked'].append(irm.reactionMapping['reactants_stoichiometry_tracked'][reactant_id_cnt]) self.reactionMapping['reactants_ids_tracked'].append(irm.reactionMapping['reactants_ids_tracked'][reactant_id_cnt]) self.reactionMapping['reactants_elements_tracked'].append(irm.reactionMapping['reactants_elements_tracked'][reactant_id_cnt]) self.reactionMapping['reactants_positions_tracked'].append(irm.reactionMapping['reactants_positions_tracked'][reactant_id_cnt]) self.reactionMapping['reactants_mapping'].append(irm.reactionMapping['reactants_mapping'][reactant_id_cnt]) for product_id_cnt,product_id in enumerate(irm.reactionMapping['products_ids_tracked']): self.reactionMapping['products_stoichiometry_tracked'].append(irm.reactionMapping['products_stoichiometry_tracked'][product_id_cnt]) self.reactionMapping['products_ids_tracked'].append(irm.reactionMapping['products_ids_tracked'][product_id_cnt]) self.reactionMapping['products_elements_tracked'].append(irm.reactionMapping['products_elements_tracked'][product_id_cnt]) self.reactionMapping['products_positions_tracked'].append(irm.reactionMapping['products_positions_tracked'][product_id_cnt]) self.reactionMapping['products_mapping'].append(irm.reactionMapping['products_mapping'][product_id_cnt]) self.make_reactantsAndProductsMetaboliteMappings(reactionMapping_I=irm.reactionMapping); def make_trackedBinaryReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_id_I): '''Make a binary reaction of the form A + B + ... = C''' #Input # reactant_ids_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..] # product_ids_elements_I = {met_id:{elements=[string,...],stoichiometry:float}}} # e.g. met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c','C'}] # e.g. irm.make_trackedBinaryReaction('full04','140407_iDM2014','rxn01',met_ids_elements_I,'uacgam_c') imm = stage02_isotopomer_metaboliteMapping(); # get unique met_ids reactant_ids_all = []; for row in reactant_ids_elements_I: for k,v in row.items(): reactant_ids_all.append(k); reactant_ids_unique = list(set(reactant_ids_all)) reactant_ids_cnt = {}; for reactant_id in reactant_ids_unique: reactant_ids_cnt[reactant_id] = 0; # make the reactants mapping reactants_stoichiometry_tracked_O = []; reactants_ids_tracked_O = []; reactants_elements_tracked_O = []; reactants_positions_tracked_O = []; reactants_mapping_O = []; reactants_metaboliteMappings_O = []; for row in reactant_ids_elements_I: for k,v in row.items(): imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]); reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']); reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']); reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping()); reactants_stoichiometry_tracked_O.append(-1.0); reactants_ids_tracked_O.append(k); reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping())); imm.clear_metaboliteMapping() reactant_ids_cnt[k]+=1 # make the products mapping products_stoichiometry_tracked_O = []; products_ids_tracked_O = []; products_elements_tracked_O = []; products_positions_tracked_O = []; products_mapping_O = []; products_metaboliteMappings_O = []; if product_id_I: imm.make_compoundTrackedMetabolite(mapping_id_I,model_id_I,reactant_ids_elements_I,product_id_I); products_elements_tracked_O.append(imm.metaboliteMapping['met_elements']); products_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']); products_mapping_O.append(imm.convert_arrayMapping2StringMapping()); products_stoichiometry_tracked_O.append(1.0); products_ids_tracked_O.append(product_id_I); products_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping())); # save the reaction self.reactionMapping['mapping_id']=mapping_id_I self.reactionMapping['rxn_id']=rxn_id_I self.reactionMapping['rxn_description']=None self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O self.reactionMapping['products_ids_tracked']=products_ids_tracked_O self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O self.reactionMapping['products_elements_tracked']=products_elements_tracked_O self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O self.reactionMapping['products_positions_tracked']=products_positions_tracked_O self.reactionMapping['reactants_mapping']=reactants_mapping_O self.reactionMapping['products_mapping']=products_mapping_O self.reactionMapping['rxn_equation']=None self.reactionMapping['used_']=True self.reactionMapping['comment_']=None self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O def make_trackedCompoundReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O): '''Make a compound tracked reaction 1. make compound product 2. remove specified base products from compound product 3. update the compound product 4. rename the base products 5. append base products to products list''' #Input # reactant_ids_elements_I = [{met_id:elements},...] # base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned) # base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned) # index referes to the position of the base met_id in the reactant to be partitioned # compound_product_id_I = met_id # base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order # base_product_ids_O = [met_id_new,...] #Note: must be listed in order imm = stage02_isotopomer_metaboliteMapping(); imm_product = stage02_isotopomer_metaboliteMapping(); # initialize the structure to track the base_met_ids reactant_ids_all = []; for k in self.reactionMapping['reactants_ids_tracked']: reactant_ids_all.append(k); reactant_ids_unique = list(set(reactant_ids_all)) reactant_ids_cnt = {}; for reactant_id in reactant_ids_unique: reactant_ids_cnt[reactant_id] = 0; for reactant_id in reactant_ids_all: reactant_ids_cnt[reactant_id]+=1; # initialize the count for unique base_met_ids reactants_base_met_ids = []; reactants_base_indices = []; for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']): reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids']) reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices']) reactants_base_met_ids_I = []; # get unique reactants_base_met_ids reactants_base_met_ids_unique = list(set(reactants_base_met_ids)); reactants_base_met_ids_cnt = {}; for base_met_id in reactants_base_met_ids_unique: reactants_base_met_ids_cnt[base_met_id]=0; for cnt,base_met_id in enumerate(reactants_base_met_ids): reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1 # make the reactants mapping imm_product.metaboliteMapping['mapping_id'] = mapping_id_I imm_product.metaboliteMapping['base_met_ids']=[]; imm_product.metaboliteMapping['base_met_elements']=[]; imm_product.metaboliteMapping['base_met_atompositions']=[]; imm_product.metaboliteMapping['base_met_symmetry_elements']=[]; imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[]; imm_product.metaboliteMapping['base_met_indices']=[]; # initialize the counter the input matched_cnt = 0; for row_cnt,row in enumerate(reactant_ids_elements_I): for k,v in row.items(): # initialize new metabolites if not k in list(reactant_ids_cnt.keys()): reactant_ids_cnt[k]=0 # make the metabolite mapping imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]); #update the counter for unique met_ids reactant_ids_cnt[k]+=1 # update base_metabolites from the database for reactant that will be partitioned base_found = False; if matched_cnt < len(base_reactant_positions_I): for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair if k1 == k and row_cnt == v1: imm.get_baseMetabolites(); imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I); base_found = True; break; # assign new indices for each base metabolite based on the current indices in the reactants base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']); for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']): # initialize new base metabolites if not met_id1 in list(reactants_base_met_ids_cnt.keys()): reactants_base_met_ids_cnt[met_id1]=0; # assign the next current base_metabolite_index imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1] # update the base_reactant_indices_I if the corresponding base_met_index was changed if matched_cnt < len(base_reactant_positions_I): for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair if k1 == k and row_cnt == v1: # does the met_id and position in the reactant list match? for k2,v2 in base_reactant_indices_I[matched_cnt].items(): if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match? base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1]; reactants_base_met_ids_cnt[met_id1]+=1; # update counter for matched input if base_found: matched_cnt+=1; # update met_mapping imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I); # add in the new metaboliteMapping information self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']); self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']); self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping()); self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0); self.reactionMapping['reactants_ids_tracked'].append(k); self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']); self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']); self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']); #self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']); #self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']); self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']); # copy out all of the base information for the product imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']); imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']); imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']); #imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']); #imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']); imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']); # imm.clear_metaboliteMapping() # make the initial compound product mapping imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I) imm_product.metaboliteMapping['met_id']=compound_product_id_I; # extract out the products from the compound product base_products = []; for cnt,row in enumerate(base_product_ids_elements_I): for k,v in row.items(): base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k])); # remove the base_products from the compound product for cnt,row in enumerate(base_product_ids_elements_I): for k,v in row.items(): imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]); # make the final products if compound_product_id_I: imm_final_products = [imm_product]; else: imm_final_products = []; for d in base_products: imm_final_products.append(d); if compound_product_id_I: imm_final_products_ids = [compound_product_id_I]; else: imm_final_products_ids = []; for id in base_product_ids_O: imm_final_products_ids.append(id); for cnt,d in enumerate(imm_final_products): self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']); self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']); self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping()); self.reactionMapping['products_stoichiometry_tracked'].append(1.0); self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]); self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping())); # save the reaction self.reactionMapping['mapping_id']=mapping_id_I self.reactionMapping['rxn_id']=rxn_id_I self.reactionMapping['rxn_description']=None self.reactionMapping['rxn_equation']=None self.reactionMapping['used_']=True self.reactionMapping['comment_']=None def make_trackedCompoundReaction_fromMetaboliteMappings(self,mapping_id_I,model_id_I,rxn_id_I,reactant_metaboliteMappings_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O): '''Make a compound tracked reaction 1. make compound product 2. remove specified base products from compound product 3. update the compound product 4. rename the base products 5. append base products to products list''' #Input # reactant_metaboliteMappings_I = [mm_1,mm_2,...] # base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned) # base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned) # index referes to the position of the base met_id in the reactant to be partitioned # compound_product_id_I = met_id # base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order # base_product_ids_O = [met_id_new,...] #Note: must be listed in order imm_product = stage02_isotopomer_metaboliteMapping(); # initialize the structure to track the base_met_ids reactant_ids_all = []; for k in self.reactionMapping['reactants_ids_tracked']: reactant_ids_all.append(k); reactant_ids_unique = list(set(reactant_ids_all)) reactant_ids_cnt = {}; for reactant_id in reactant_ids_unique: reactant_ids_cnt[reactant_id] = 0; for reactant_id in reactant_ids_all: reactant_ids_cnt[reactant_id]+=1; # initialize the count for unique base_met_ids reactants_base_met_ids = []; reactants_base_indices = []; for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']): reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids']) reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices']) reactants_base_met_ids_I = []; # get unique reactants_base_met_ids reactants_base_met_ids_unique = list(set(reactants_base_met_ids)); reactants_base_met_ids_cnt = {}; for base_met_id in reactants_base_met_ids_unique: reactants_base_met_ids_cnt[base_met_id]=0; for cnt,base_met_id in enumerate(reactants_base_met_ids): reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1 # make the reactants mapping imm_product.metaboliteMapping['mapping_id'] = mapping_id_I imm_product.metaboliteMapping['base_met_ids']=[]; imm_product.metaboliteMapping['base_met_elements']=[]; imm_product.metaboliteMapping['base_met_atompositions']=[]; imm_product.metaboliteMapping['base_met_symmetry_elements']=[]; imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[]; imm_product.metaboliteMapping['base_met_indices']=[]; # initialize the counter the input matched_cnt = 0; for row_cnt,imm in enumerate(reactant_metaboliteMappings_I): # initialize new metabolites if not imm.metaboliteMapping['met_id'] in list(reactant_ids_cnt.keys()): reactant_ids_cnt[imm.metaboliteMapping['met_id']]=0 # make the metabolite mapping #update the counter for unique met_ids reactant_ids_cnt[imm.metaboliteMapping['met_id']]+=1 # update base_metabolites from the database for reactant that will be partitioned base_found = False; if matched_cnt < len(base_reactant_positions_I): for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1: base_found = True; break; # assign new indices for each base metabolite based on the current indices in the reactants base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']); for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']): # initialize new base metabolites if not met_id1 in list(reactants_base_met_ids_cnt.keys()): reactants_base_met_ids_cnt[met_id1]=0; # assign the next current base_metabolite_index imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1] # update the base_reactant_indices_I if the corresponding base_met_index was changed if matched_cnt < len(base_reactant_positions_I): for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1: # does the met_id and position in the reactant list match? for k2,v2 in base_reactant_indices_I[matched_cnt].items(): if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match? base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1]; reactants_base_met_ids_cnt[met_id1]+=1; # update counter for matched input if base_found: matched_cnt+=1; # update met_mapping imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I); # add in the new metaboliteMapping information self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']); self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']); self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping()); self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0); self.reactionMapping['reactants_ids_tracked'].append(imm.metaboliteMapping['met_id']); self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']); self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']); self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']); #self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']); #self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']); self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']); # copy out all of the base information for the product imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']); imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']); imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']); #imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']); #imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']); imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']); # make the initial compound product mapping imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I) imm_product.metaboliteMapping['met_id']=compound_product_id_I; # extract out the products from the compound product base_products = []; for cnt,row in enumerate(base_product_ids_elements_I): for k,v in row.items(): base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k])); # remove the base_products from the compound product for cnt,row in enumerate(base_product_ids_elements_I): for k,v in row.items(): imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]); # make the final products if compound_product_id_I: imm_final_products = [imm_product]; else: imm_final_products = []; for d in base_products: imm_final_products.append(d); if compound_product_id_I: imm_final_products_ids = [compound_product_id_I]; else: imm_final_products_ids = []; for id in base_product_ids_O: imm_final_products_ids.append(id); for cnt,d in enumerate(imm_final_products): self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']); self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']); self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping()); self.reactionMapping['products_stoichiometry_tracked'].append(1.0); self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]); self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping())); # save the reaction self.reactionMapping['mapping_id']=mapping_id_I self.reactionMapping['rxn_id']=rxn_id_I self.reactionMapping['rxn_description']=None self.reactionMapping['rxn_equation']=None self.reactionMapping['used_']=True self.reactionMapping['comment_']=None def make_trackedUnitaryReactions(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_ids_I): '''Make a unitary reaction of the form aA = bB where the coefficient a = b''' #Input # reactant_ids_elements_I = [{met_id:elements},] # product_ids_elements_I = [met_id,...] # check input if len(reactant_ids_elements_I)!=len(product_ids_I): print("length of reactants_ids does not match the length of products_ids"); return; imm = stage02_isotopomer_metaboliteMapping(); # get unique met_ids reactant_ids_all = []; for row in reactant_ids_elements_I: for k,v in row.items(): reactant_ids_all.append(k); reactant_ids_unique = list(set(reactant_ids_all)) reactant_ids_cnt = {}; for reactant_id in reactant_ids_unique: reactant_ids_cnt[reactant_id] = 0; # make the reactants mapping reactants_stoichiometry_tracked_O = []; reactants_ids_tracked_O = []; reactants_elements_tracked_O = []; reactants_positions_tracked_O = []; reactants_mapping_O = []; reactants_metaboliteMappings_O = []; for row in reactant_ids_elements_I: for k,v in row.items(): imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]); reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']); reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']); reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping()); reactants_stoichiometry_tracked_O.append(-abs(1)); reactants_ids_tracked_O.append(k); reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping())); imm.clear_metaboliteMapping() reactant_ids_cnt[k]+=1 # make the products mapping products_stoichiometry_tracked_O = []; products_ids_tracked_O = []; products_elements_tracked_O = []; products_positions_tracked_O = []; products_mapping_O = []; products_metaboliteMappings_O = []; for product_cnt,product in enumerate(product_ids_I): products_elements_tracked_O.append(reactants_elements_tracked_O[product_cnt]); products_positions_tracked_O.append(reactants_positions_tracked_O[product_cnt]); products_mapping_O.append(reactants_mapping_O[product_cnt]); products_stoichiometry_tracked_O.append(abs(reactants_stoichiometry_tracked_O[product_cnt])); products_ids_tracked_O.append(product); imm_tmp = copy(reactants_metaboliteMappings_O[product_cnt].copy_metaboliteMapping()); imm_tmp.metaboliteMapping['met_id']=product; # change the name products_metaboliteMappings_O.append(imm_tmp); # save the reaction self.reactionMapping['mapping_id']=mapping_id_I self.reactionMapping['rxn_id']=rxn_id_I self.reactionMapping['rxn_description']=None self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O self.reactionMapping['products_ids_tracked']=products_ids_tracked_O self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O self.reactionMapping['products_elements_tracked']=products_elements_tracked_O self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O self.reactionMapping['products_positions_tracked']=products_positions_tracked_O self.reactionMapping['reactants_mapping']=reactants_mapping_O self.reactionMapping['products_mapping']=products_mapping_O self.reactionMapping['rxn_equation']=None self.reactionMapping['used_']=True self.reactionMapping['comment_']=None self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O def make_reverseReaction(self,rxn_id_I=None): '''Make the reverse of the current reaction''' forward_reactionMapping = {} forward_reactionMapping['mapping_id']=self.reactionMapping['mapping_id'] forward_reactionMapping['rxn_id']=self.reactionMapping['rxn_id'] forward_reactionMapping['rxn_description']=self.reactionMapping['rxn_description'] forward_reactionMapping['reactants_stoichiometry_tracked']=self.reactionMapping['reactants_stoichiometry_tracked'] forward_reactionMapping['products_stoichiometry_tracked']=self.reactionMapping['products_stoichiometry_tracked'] forward_reactionMapping['reactants_ids_tracked']=self.reactionMapping['reactants_ids_tracked'] forward_reactionMapping['products_ids_tracked']=self.reactionMapping['products_ids_tracked'] forward_reactionMapping['reactants_elements_tracked']=self.reactionMapping['reactants_elements_tracked'] forward_reactionMapping['products_elements_tracked']=self.reactionMapping['products_elements_tracked'] forward_reactionMapping['reactants_positions_tracked']=self.reactionMapping['reactants_positions_tracked'] forward_reactionMapping['products_positions_tracked']=self.reactionMapping['products_positions_tracked'] forward_reactionMapping['reactants_mapping']=self.reactionMapping['reactants_mapping'] forward_reactionMapping['products_mapping']=self.reactionMapping['products_mapping'] forward_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation'] forward_reactionMapping['used_']=self.reactionMapping['used_'] forward_reactionMapping['comment_']=self.reactionMapping['comment_'] forward_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings'] forward_reactionMapping['products_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings'] reverse_reactionMapping = {} reverse_reactionMapping['mapping_id']=self.reactionMapping['mapping_id'] if rxn_id_I: reverse_reactionMapping['rxn_id']=rxn_id_I else: reverse_reactionMapping['rxn_id']=self.reactionMapping['rxn_id'] reverse_reactionMapping['rxn_description']=self.reactionMapping['rxn_description'] reverse_reactionMapping['reactants_stoichiometry_tracked']=[-s for s in self.reactionMapping['products_stoichiometry_tracked']] reverse_reactionMapping['products_stoichiometry_tracked']=[-s for s in self.reactionMapping['reactants_stoichiometry_tracked']] reverse_reactionMapping['reactants_ids_tracked']=self.reactionMapping['products_ids_tracked'] reverse_reactionMapping['products_ids_tracked']=self.reactionMapping['reactants_ids_tracked'] reverse_reactionMapping['reactants_elements_tracked']=self.reactionMapping['products_elements_tracked'] reverse_reactionMapping['products_elements_tracked']=self.reactionMapping['reactants_elements_tracked'] reverse_reactionMapping['reactants_positions_tracked']=self.reactionMapping['products_positions_tracked'] reverse_reactionMapping['products_positions_tracked']=self.reactionMapping['reactants_positions_tracked'] reverse_reactionMapping['reactants_mapping']=self.reactionMapping['products_mapping'] reverse_reactionMapping['products_mapping']=self.reactionMapping['reactants_mapping'] reverse_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation'] reverse_reactionMapping['used_']=self.reactionMapping['used_'] reverse_reactionMapping['comment_']=self.reactionMapping['comment_'] reverse_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings'] reverse_reactionMapping['products_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings'] self.reactionMapping = reverse_reactionMapping; def add_reactionMapping(self, mapping_id_I=None, rxn_id_I=None, rxn_description_I=None, reactants_stoichiometry_tracked_I=[], products_stoichiometry_tracked_I=[], reactants_ids_tracked_I=[], products_ids_tracked_I=[], reactants_elements_tracked_I=[], products_elements_tracked_I=[], reactants_positions_tracked_I=[], products_positions_tracked_I=[], reactants_mapping_I=[], products_mapping_I=[], rxn_equation_I=None, used__I=None, comment__I=None): if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I if used__I: self.reactionMapping['used_']=used__I if comment__I: self.reactionMapping['comment_']=comment__I # add data to the database self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping]) def add_productMapping(self,product_ids_I): '''Add newly made products to the atomMappingMetabolite table for future use''' for product in self.reactionMapping['products_metaboliteMappings']: if product.metaboliteMapping['met_id'] in product_ids_I: product.add_metaboliteMapping(); def update_productMapping(self,product_ids_I): '''Update newly made products to the atomMappingMetabolite table for future use''' for product in self.reactionMapping['products_metaboliteMappings']: if product.metaboliteMapping['met_id'] in product_ids_I: product.update_metaboliteMapping(); def update_reactionMapping(self, mapping_id_I=None, rxn_id_I=None, rxn_description_I=None, reactants_stoichiometry_tracked_I=[], products_stoichiometry_tracked_I=[], reactants_ids_tracked_I=[], products_ids_tracked_I=[], reactants_elements_tracked_I=[], products_elements_tracked_I=[], reactants_positions_tracked_I=[], products_positions_tracked_I=[], reactants_mapping_I=[], products_mapping_I=[], rxn_equation_I=None, used__I=None, comment__I=None): if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I if used__I: self.reactionMapping['used_']=used__I if comment__I: self.reactionMapping['comment_']=comment__I self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping]); def get_reactionMapping(self,mapping_id_I,rxn_id_I): row = {}; row = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I,rxn_id_I); self.reactionMapping = row; self.reactionMapping['reactants_metaboliteMappings']=[] self.reactionMapping['products_metaboliteMappings']=[] self.make_reactantsAndProductsMetaboliteMappings(); def make_reactantsAndProductsMetaboliteMappings(self,reactionMapping_I=None): '''Make reactants and products metabolite mapping from atomMappingReaction information''' #Input: # reactionMapping_I = row of atomMappingReactions # default: None, user current self if reactionMapping_I: reactionMapping_tmp = reactionMapping_I; else: reactionMapping_tmp = self.reactionMapping; for cnt,met in enumerate(reactionMapping_tmp['reactants_ids_tracked']): imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'], met_id_I=met, met_elements_I=reactionMapping_tmp['reactants_elements_tracked'][cnt], met_atompositions_I=reactionMapping_tmp['reactants_positions_tracked'][cnt], met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=reactionMapping_tmp['reactants_mapping'][cnt], base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]); self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); for cnt,met in enumerate(reactionMapping_tmp['products_ids_tracked']): imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'], met_id_I=met, met_elements_I=reactionMapping_tmp['products_elements_tracked'][cnt], met_atompositions_I=reactionMapping_tmp['products_positions_tracked'][cnt], met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=reactionMapping_tmp['products_mapping'][cnt], base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]); self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); def clear_reactionMapping(self): self.reactionMapping={} self.reactionMapping['mapping_id']=None self.reactionMapping['rxn_id']=None self.reactionMapping['rxn_description']=None self.reactionMapping['reactants_stoichiometry_tracked']=[] self.reactionMapping['products_stoichiometry_tracked']=[] self.reactionMapping['reactants_ids_tracked']=[] self.reactionMapping['products_ids_tracked']=[] self.reactionMapping['reactants_elements_tracked']=[] self.reactionMapping['products_elements_tracked']=[] self.reactionMapping['reactants_positions_tracked']=[] self.reactionMapping['products_positions_tracked']=[] self.reactionMapping['reactants_mapping']=[] self.reactionMapping['products_mapping']=[] self.reactionMapping['rxn_equation']=None self.reactionMapping['used_']=True self.reactionMapping['comment_']=None self.reactionMapping['reactants_metaboliteMappings']=[] self.reactionMapping['products_metaboliteMappings']=[] self.reactants_base_met_ids=[]; self.reactants_base_met_elements=[]; self.reactants_base_met_atompositions=[]; self.reactants_base_met_symmetry_elements=[]; self.reactants_base_met_symmetry_atompositions=[]; self.reactants_base_met_indices=[]; self.products_base_met_ids=[]; self.products_base_met_elements=[]; self.products_base_met_atompositions=[]; self.products_base_met_symmetry_elements=[]; self.products_base_met_symmetry_atompositions=[]; self.products_base_met_indices=[]; def checkAndCorrect_elementsAndPositions(self): '''Check that the reactant/product elements/positions are consistent with the reactants/products ids_tracked; if they are not, correct them''' # check that elements/positions are initialized if not self.reactionMapping['reactants_elements_tracked']: self.reactionMapping['reactants_elements_tracked']=[]; for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']): self.reactionMapping['reactants_elements_tracked'].append([]); if not self.reactionMapping['reactants_positions_tracked']: self.reactionMapping['reactants_positions_tracked']=[]; for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']): self.reactionMapping['reactants_positions_tracked'].append([]); # check that the length of the elements/positions match the length of the ids_tracked #TODO... # check each elements/positions for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']): # get the metabolite data from the database met_data = {} met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],reactant_id); if len(met_data['met_elements'])!=len(self.reactionMapping['reactants_elements_tracked'][cnt]): self.reactionMapping['reactants_elements_tracked'][cnt]=met_data['met_elements']; if len(met_data['met_atompositions'])!=len(self.reactionMapping['reactants_positions_tracked'][cnt]): self.reactionMapping['reactants_positions_tracked'][cnt]=met_data['met_atompositions']; # check that elements/positions are initialized if not self.reactionMapping['products_elements_tracked']: self.reactionMapping['products_elements_tracked']=[]; for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']): self.reactionMapping['products_elements_tracked'].append([]); if not self.reactionMapping['products_positions_tracked']: self.reactionMapping['products_positions_tracked']=[]; for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']): self.reactionMapping['products_positions_tracked'].append([]); # check that the length of the elements/positions match the length of the ids_tracked #TODO... # check each elements/positions for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']): # get the metabolite data from the database met_data = {} met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],product_id); if len(met_data['met_elements'])!=len(self.reactionMapping['products_elements_tracked'][cnt]): self.reactionMapping['products_elements_tracked'][cnt]=met_data['met_elements']; if len(met_data['met_atompositions'])!=len(self.reactionMapping['products_positions_tracked'][cnt]): self.reactionMapping['products_positions_tracked'][cnt]=met_data['met_atompositions']; def add_balanceProducts(self,unbalanced_met_I=None,unbalanced_met_position_I=None,unbalanced_met_positions_tracked_I=[],make_lumped_unbalanced_met_I=False,make_unique_unbalanced_mets_I=True): '''Add psuedo metabolites to the product in order to elementally balance the tracked reaction''' #Input: # unbalanced_met_I = reactant_id that is not elementally balanced # unbalanced_met_position_I = position of the reactant_id in the reactants_list # unbalanced_met_positions_tracked_I = positions of the elements that are not elementally balanced # make_lumped_unbalanced_met_I = boolean, # automatically detect mappings that are not elementally balanced and make an unbalanced product metabolite to balance all elementally unbalanced reactants # NOTE: does not work if the stoichiometry of all unbalanced reactants are not 1 # make_unique_unbalanced_mets_I = boolean, # automatically detect mappings/metabolites that are not elementally balanced and makes unbalanced product mappings/metabolites to balance each elementally unbalanced reactant mapping/metabolite if make_lumped_unbalanced_met_I: #TODO: check that all unbalanced reactants have a stoichiometry of 1 balance_met = self.reactionMapping['rxn_id'] + '_' + 'balance_c' + '.balance'; reactants_mappings = []; #list of a list products_mappings = []; #list # extract out reactants and products mappings for imm in self.reactionMapping['reactants_metaboliteMappings']: reactant_mapping=[]; reactant_mapping = imm.convert_stringMapping2ArrayMapping(); reactants_mappings.append(reactant_mapping); for imm in self.reactionMapping['products_metaboliteMappings']: product_mapping=[]; product_mapping = imm.convert_stringMapping2ArrayMapping(); products_mappings.extend(product_mapping); # find unbalanced reactant_mappings and # make the product mapping, positions, and elements product_mapping = []; product_positions_tracked = []; product_elements_tracked = []; product_cnt = 0; for reactant_cnt,reactants_mapping in enumerate(reactants_mappings): for element_cnt,reactant_mapping in enumerate(reactants_mapping): if not reactant_mapping in products_mappings: product_mapping.append(reactant_mapping); product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_cnt][element_cnt]); product_positions_tracked.append(product_cnt); product_cnt += 1; imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'], met_id_I=balance_met, met_elements_I=product_elements_tracked, met_atompositions_I=product_positions_tracked, met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=product_mapping, base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]); # add balance metabolite to the products self.reactionMapping['products_ids_tracked'].append(balance_met); self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping()); self.reactionMapping['products_positions_tracked'].append(product_positions_tracked); self.reactionMapping['products_stoichiometry_tracked'].append(1); self.reactionMapping['products_elements_tracked'].append(product_elements_tracked); self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); elif make_unique_unbalanced_mets_I: products_mappings = []; #list # extract out products mappings for imm in self.reactionMapping['products_metaboliteMappings']: product_mapping=[]; product_mapping = imm.convert_stringMapping2ArrayMapping(); products_mappings.extend(product_mapping); # check each reactant mapping/metabolite for reactant_pos,imm in enumerate(self.reactionMapping['reactants_metaboliteMappings']): reactant_mapping=[]; reactant_mapping = imm.convert_stringMapping2ArrayMapping(); # find missing mappings product_mapping = []; product_positions_tracked = []; product_elements_tracked = []; balance_met = None; product_cnt = 0; for mapping_pos,mapping in enumerate(reactant_mapping): if mapping not in products_mappings: balance_met = self.reactionMapping['rxn_id'] + '_' + self.reactionMapping['reactants_ids_tracked'][reactant_pos] + '_' + str(reactant_pos) + '.balance'; product_mapping.append(mapping); #product_positions_tracked.append(self.reactionMapping['reactants_positions_tracked'][reactant_pos][mapping_pos]); product_positions_tracked.append(product_cnt); product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_pos][mapping_pos]); product_cnt += 1; if balance_met: imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'], met_id_I=balance_met, met_elements_I=product_elements_tracked, met_atompositions_I=product_positions_tracked, met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=product_mapping, base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]); # add balance metabolite to the products self.reactionMapping['products_ids_tracked'].append(balance_met); self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping()); self.reactionMapping['products_positions_tracked'].append(product_positions_tracked); self.reactionMapping['products_elements_tracked'].append(product_elements_tracked); self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); self.reactionMapping['products_stoichiometry_tracked'].append(abs(self.reactionMapping['reactants_stoichiometry_tracked'][reactant_pos])); # use user specifications else: # find the position of the tracked metabolite if self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I): if unbalanced_met_position_I: unbalanced_met_pos = unbalanced_met_position_I; else: unbalanced_met_pos = self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I); balance_met = self.reactionMapping['rxn_id'] + '_' + unbalanced_met_I + '_' + str(unbalanced_met_pos) + '.balance'; # extract out mapping, positions, and elements reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][unbalanced_met_pos].convert_stringMapping2ArrayMapping(); reactant_positions_tracked = self.reactionMapping['reactants_positions_tracked'][unbalanced_met_pos]; reactant_elements_tracked = self.reactionMapping['reactants_elements_tracked'][unbalanced_met_pos]; # make the product mapping, positions, and elements product_mapping = []; product_positions_tracked = []; product_elements_tracked = []; if unbalanced_met_positions_tracked_I: for pos_cnt,pos in enumerate(unbalanced_met_positions_tracked_I): product_mapping.append(reactant_mapping[pos]); product_positions_tracked.append(pos_cnt); product_elements_tracked.append(reactant_elements_tracked[pos]); else: product_mapping=reactant_mapping product_positions_tracked=reactant_positions_tracked product_elements_tracked=reactant_elements_tracked imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'], met_id_I=balance_met, met_elements_I=product_elements_tracked, met_atompositions_I=product_positions_tracked, met_symmetry_elements_I=[], met_symmetry_atompositions_I=[], used__I=True, comment__I=None, met_mapping_I=product_mapping, base_met_ids_I=[], base_met_elements_I=[], base_met_atompositions_I=[], base_met_symmetry_elements_I=[], base_met_symmetry_atompositions_I=[], base_met_indices_I=[]); # add balance metabolite to the products self.reactionMapping['products_ids_tracked'].append(balance_met); self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping()); self.reactionMapping['products_positions_tracked'].append(product_positions_tracked); self.reactionMapping['products_elements_tracked'].append(product_elements_tracked); self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping())); self.reactionMapping['products_stoichiometry_tracked'].append(1); else: print('unbalanced metabolite not found!') def check_elementalBalance(self): ''' 1. Check that the number of elements tracked in the reactant matches the number of elements tracked in the products 2. Check that the reactant positions tracked match the reactant elements tracked''' #Output: # reactants_positions_tracked_cnt # products_positions_tracked_cnt element_balance = True; #check reactants reactants_positions_tracked_cnt = 0; for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']): print('checking reactant ' + reactant); # check that the reactant positions == reactant elements if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]): print('inconsistent reactants_positions and reactants_elements'); continue; reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]); #check products products_positions_tracked_cnt = 0; for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']): print('checking product ' + product); # check that the product positions == product elements if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]): print('inconsistent products_positions and products_elements'); continue; products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]); #record if reactants_positions_tracked_cnt!=products_positions_tracked_cnt: return reactants_positions_tracked_cnt,products_positions_tracked_cnt; else: return reactants_positions_tracked_cnt,products_positions_tracked_cnt; def check_reactionMapping(self): ''' 1. Check that the number of elements tracked in the reactant matches the number of elements tracked in the products 2. Check that the reactant positions tracked match the reactant elements tracked 3. Check that the mappings are 1-to-1 4. Check that the elements/positions/mappings are of the same length 5. Check that the stoichiometry and ids tracked are of the same length''' #Output: # reactants_positions_tracked_cnt # products_positions_tracked_cnt #checks: reactants_ids_stoichiometry_check = True; reactants_elements_positions_check = True; reactants_elements_mapping_check = True; reactants_positions_mapping_check = True; products_ids_stoichiometry_check = True; products_elements_positions_check = True; products_elements_mapping_check = True; products_positions_mapping_check = True; element_balance_check = True; mapping_check = True; #check reactants reactants_positions_tracked_cnt = 0; reactants_elements_tracked_cnt = 0; reactants_mappings_cnt = 0; reactants_stoichiometry_cnt = 0; reactants_ids_cnt = 0; reactants_mappings = []; # check that the reactant stoichiometry == reactant ids if len(self.reactionMapping['reactants_ids_tracked'])!=len(self.reactionMapping['reactants_stoichiometry_tracked']): print('inconsistent reactants_stoichiometry_tracked and reactants_ids_tracked'); reactants_ids_stoichiometry_check = False; reactants_ids_cnt += len(self.reactionMapping['reactants_ids_tracked']); reactants_stoichiometry_cnt += len(self.reactionMapping['reactants_stoichiometry_tracked']); # check elemental balance for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']): print('checking reactant elemental balance ' + reactant); reactant_mapping=[]; reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][reactant_cnt].convert_stringMapping2ArrayMapping(); # check that the reactant positions == reactant elements if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]): print('inconsistent reactants_positions and reactants_elements'); reactants_elements_positions_check = False; # check that the reactant positions == reactant mapping if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(reactant_mapping): print('inconsistent reactants_positions and reactants_mapping'); reactants_elements_mapping_check = False; # check that the reactant elements == reactant mapping if len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt])!=len(reactant_mapping): print('inconsistent reactants_elements and reactants_mapping'); reactants_positions_mapping_check = False; reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]); reactants_elements_tracked_cnt += len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]); reactants_mappings_cnt += len(reactant_mapping); reactants_mappings.append(reactant_mapping); #check products products_positions_tracked_cnt = 0; products_elements_tracked_cnt = 0; products_mappings_cnt = 0; products_stoichiometry_cnt = 0; products_ids_cnt = 0; products_mappings = []; # check that the product stoichiometry == product ids if len(self.reactionMapping['products_ids_tracked'])!=len(self.reactionMapping['products_stoichiometry_tracked']): print('inconsistent products_stoichiometry_tracked and products_ids_tracked'); products_ids_stoichiometry_check = False; products_ids_cnt += len(self.reactionMapping['products_ids_tracked']); products_stoichiometry_cnt += len(self.reactionMapping['products_stoichiometry_tracked']); # check elemental balance for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']): print('checking product elemental balance ' + product); product_mapping=[]; product_mapping = self.reactionMapping['products_metaboliteMappings'][product_cnt].convert_stringMapping2ArrayMapping(); # check that the product positions == product elements if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]): print('inconsistent products_positions and products_elements'); products_elements_positions_check = False; # check that the product positions == product mapping if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(product_mapping): print('inconsistent products_positions and products_mapping'); products_elements_mapping_check = False; # check that the product elements == product mapping if len(self.reactionMapping['products_elements_tracked'][product_cnt])!=len(product_mapping): print('inconsistent products_elements and products_mapping'); products_positions_mapping_check = False; products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]); products_elements_tracked_cnt += len(self.reactionMapping['products_elements_tracked'][product_cnt]); products_mappings_cnt += len(product_mapping); products_mappings.append(product_mapping); #check elemental balance if reactants_positions_tracked_cnt != products_positions_tracked_cnt: print('the length of reactants_positions_tracked does not match the length of products_positions_tracked'); element_balance_check = False; if reactants_elements_tracked_cnt != products_elements_tracked_cnt: print('reactants_elements_tracked does not match the length of products_elements_tracked'); element_balance_check = False; if reactants_mappings_cnt != products_mappings_cnt: print('the length of reactants_mapping does not match the length of products_mapping'); element_balance_check = False; #check 1-to-1 mapping reactants_mappings_list = []; for reactants_mapping in reactants_mappings: reactants_mappings_list.extend(reactants_mapping); # check for duplicate reactant mappings reactants_mappings_unique = list(set(reactants_mappings_list)); if len(reactants_mappings_list)!=len(reactants_mappings_unique): print('duplicate reactants_mappings found'); mapping_check = False; products_mappings_list = []; for products_mapping in products_mappings: products_mappings_list.extend(products_mapping); # check for duplicate product mappings products_mappings_unique = list(set(products_mappings_list)); if len(products_mappings_list)!=len(products_mappings_unique): print('duplicate products_mappings found'); mapping_check = False; # check that each product mapping has a matching reactant mapping, and vice versa for reactant_cnt,reactant in enumerate(reactants_mappings): print('checking reactant mapping ' + self.reactionMapping['reactants_ids_tracked'][reactant_cnt]); for mapping_cnt,mapping in enumerate(reactant): if not mapping in products_mappings_list: print('no mapping found for reactant mapping ' + mapping + ' and position ' + str(mapping_cnt)); mapping_check = False; for product_cnt,product in enumerate(products_mappings): print('checking product mapping ' + self.reactionMapping['products_ids_tracked'][product_cnt]); for mapping_cnt,mapping in enumerate(product): if not mapping in reactants_mappings_list: print('no mapping found for product mapping ' + mapping + ' and position ' + str(mapping_cnt)); mapping_check = False; if not element_balance_check or not mapping_check: print('check reaction mapping'); return reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\ products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\ element_balance_check,mapping_check; def clear_elementsAndPositions(self): '''Clear the reactants/products elements/positions''' self.reactionMapping['reactants_elements_tracked']=None; self.reactionMapping['reactants_positions_tracked']=None; self.reactionMapping['products_elements_tracked']=None; self.reactionMapping['products_positions_tracked']=None; class stage02_isotopomer_mappingUtilities(): def __init__(self): self.stage02_isotopomer_query = stage02_isotopomer_query(); def make_missingMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None): '''Make atom mapping metabolites from atom mapping reactions, QC atom mapping reactions; and create a new set of metabolite mappings that correspond to the current reaction mappings that need to be QC/QA'd''' #Input: # experiment_id_I = experiment_id # model_id_I = model_id # mapping_id_rxns_I = reaction mapping id (#default atomMappingMetabolite mapping id to add new metabolites to) # mapping_id_mets_I = existing metabolite mappings to use when making the new metabolite mappings # mapping_id_new_I = name of mapping id for the new metabolite mappings #Output: # default: new metabolite mappings will be added for the mapping id of the reactions # existing metabolite mappings will not be added # mapping_id_new_I != None: new metabolite mappings will be added for the mapping id specified #get model ids: if model_id_I: model_ids = model_id_I; else: model_ids = []; model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I); for model_id in model_ids: #get mapping ids if mapping_id_rxns_I and mapping_id_mets_I: mapping_ids_rxns=mapping_id_rxns_I; mapping_ids_mets=mapping_id_mets_I; elif mapping_id_rxns_I: mapping_ids_rxns=mapping_id_rxns_I; else: mapping_ids_rxns=[]; mapping_ids_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id); for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns): # get the metabolite mappings if mapping_id_rxns_I and mapping_id_mets_I: mappings=self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactionsAndAtomMappingMetabolites(mapping_id_rxns,mapping_ids_mets[mapping_cnt]); else: mappings = self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns); # remove duplicates duplicate_ind = []; for d1_cnt,d1 in enumerate(mappings): for d2_cnt in range(d1_cnt+1,len(mappings)): if d1['mapping_id'] == mappings[d2_cnt]['mapping_id'] and \ d1['met_id'] == mappings[d2_cnt]['met_id'] and \ d1['met_elements'] == mappings[d2_cnt]['met_elements'] and \ d1['met_atompositions'] == mappings[d2_cnt]['met_atompositions'] and \ d1['met_symmetry_elements'] == mappings[d2_cnt]['met_symmetry_elements'] and \ d1['met_symmetry_atompositions'] == mappings[d2_cnt]['met_symmetry_atompositions']: duplicate_ind.append(d2_cnt); duplicate_ind_unique=list(set(duplicate_ind)); # copy out unique metabolites data_O = []; for d1_cnt,d1 in enumerate(mappings): if d1_cnt in duplicate_ind_unique: continue; else: if mapping_id_new_I: d1['mapping_id']=mapping_id_new_I; # change to the new mapping data_O.append(d1); met_ids = [x['met_id'] for x in data_O]; met_ids_unique = list(set(met_ids)); data_mets_cnt = {}; for met in met_ids_unique: data_mets_cnt[met] = 0; for d in data_O: data_mets_cnt[d['met_id']] += 1; # add data to the database if mapping_id_new_I: self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_O); else: data_add_O = []; for d in data_O: # check to see if the metabolite is already in the database mapping_row = {}; mapping_row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,d['met_id']); if not mapping_row: data_add_O.append(d); self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_add_O); def make_missingReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None): '''Update missing or incomplete reaction mappings for the current mapping from the matching metabolite mappings, and optionally, from the previous reaction mappings''' #Note: prior to running, remove all reaction mappings that are not used. imm = stage02_isotopomer_metaboliteMapping(); data_O = []; #get model ids: if model_id_I: model_ids = model_id_I; else: model_ids = []; model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I); for model_id in model_ids: #get all reactions in the model: reactions = []; reactions = self.stage02_isotopomer_query.get_rows_modelID_dataStage02IsotopomerModelReactions(model_id); #get mapping ids if mapping_id_rxns_I and mapping_id_mets_I: mapping_ids_rxns=mapping_id_rxns_I; mapping_ids_mets=mapping_id_mets_I; elif mapping_id_rxns_I: mapping_ids_rxns=mapping_id_rxns_I; else: mapping_rxns=[]; mapping_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id); for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns): missing_reactions_O = []; missing_metabolites_O = []; for reaction_cnt,reaction in enumerate(reactions): #get the current reaction mappings mapping_rxns = []; mapping_rxns = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']); #if mapping_rxns: # atom mapping for the reaction already exists and is used # continue; if mapping_id_new_I: mapping_id_current = mapping_id_new_I; else: mapping_id_current = mapping_id_rxns; data_tmp={'mapping_id':mapping_id_current, 'rxn_id':reaction['rxn_id'], 'rxn_description':None, 'reactants_stoichiometry_tracked':[], 'products_stoichiometry_tracked':[], 'reactants_ids_tracked':[], 'products_ids_tracked':[], 'reactants_mapping':[], 'products_mapping':[], 'rxn_equation':reaction['equation'], 'products_elements_tracked':[], 'products_positions_tracked':[], 'reactants_elements_tracked':[], 'reactants_positions_tracked':[], 'used_':True, 'comment_':''}; #check if the reactants or products are tracked tracked_reactants = []; for reactant in reaction['reactants_ids']: tracked_reactant = {}; if mapping_id_mets_I: tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],reactant); else: tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,reactant); if tracked_reactant: tracked_reactants.append(tracked_reactant); tracked_products = []; for product in reaction['products_ids']: tracked_product = {}; if mapping_id_mets_I: tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],product); else: tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,product); if tracked_product: tracked_products.append(tracked_product); if tracked_reactants or tracked_products: #check if the reaction is missing or is missing a tracked metabolite tracked_reaction = {}; tracked_reaction = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']); if tracked_reaction: missing_reactants = []; # get the stoichiometry for each reactant tracked_reaction_reactant_ids_stoich = {}; for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']): tracked_reaction_reactant_ids_stoich[tracked_reactant_id] = 0; for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']): tracked_reaction_reactant_ids_stoich[tracked_reactant_id] += abs(tracked_reaction['reactants_stoichiometry_tracked'][tracked_reactant_id_cnt]); #copy existing data data_tmp['reactants_ids_tracked'].extend(tracked_reaction['reactants_ids_tracked']); data_tmp['reactants_stoichiometry_tracked'].extend(tracked_reaction['reactants_stoichiometry_tracked']); data_tmp['reactants_mapping'].extend(tracked_reaction['reactants_mapping']); data_tmp['reactants_elements_tracked'].extend(tracked_reaction['reactants_elements_tracked']); data_tmp['reactants_positions_tracked'].extend(tracked_reaction['reactants_positions_tracked']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; for tracked_reactant in tracked_reactants: if tracked_reactant['met_id'] in tracked_reaction['reactants_ids_tracked']: # check for matching stoichiometry reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['reactants_ids']): if met_id == tracked_reactant['met_id']: reaction_stoich = abs(reaction['reactants_stoichiometry'][met_id_cnt]); break; unbalanced_stoich = reaction_stoich - tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']]; if tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']] != reaction_stoich: for stoich_cnt in range(int(unbalanced_stoich)): missing_reactants.append(tracked_reactant); #add missing data data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']); data_tmp['reactants_stoichiometry_tracked'].append(0); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},stoich_cnt) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['reactants_mapping'].append(new_mapping); #data_tmp['reactants_mapping'].append(''); data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']); data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; data_tmp['used_']=False; data_tmp['comment_']+=tracked_reactant['met_id']+','; else: missing_reactants.append(tracked_reactant); reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['reactants_ids']): if met_id == tracked_reactant['met_id']: reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt]; break; #add missing data data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']); data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['reactants_mapping'].append(new_mapping); #data_tmp['reactants_mapping'].append(''); data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']); data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; data_tmp['used_']=False; data_tmp['comment_']+=tracked_reactant['met_id']+','; missing_products = []; # get the stoichiometry for each product tracked_reaction_product_ids_stoich = {}; for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']): tracked_reaction_product_ids_stoich[tracked_product_id] = 0; for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']): tracked_reaction_product_ids_stoich[tracked_product_id] += abs(tracked_reaction['products_stoichiometry_tracked'][tracked_product_id_cnt]); #copy existing data data_tmp['products_ids_tracked'].extend(tracked_reaction['products_ids_tracked']); data_tmp['products_stoichiometry_tracked'].extend(tracked_reaction['products_stoichiometry_tracked']); data_tmp['products_mapping'].extend(tracked_reaction['products_mapping']); data_tmp['products_elements_tracked'].extend(tracked_reaction['products_elements_tracked']); data_tmp['products_positions_tracked'].extend(tracked_reaction['products_positions_tracked']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; for tracked_product in tracked_products: if tracked_product['met_id'] in tracked_reaction['products_ids_tracked']: # check for matching stoichiometry reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['products_ids']): if met_id == tracked_product['met_id']: reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]); break; unbalanced_stoich = reaction_stoich - tracked_reaction_product_ids_stoich[tracked_product['met_id']]; if tracked_reaction_product_ids_stoich[tracked_product['met_id']] != reaction_stoich: for stoich_cnt in range(int(unbalanced_stoich)): missing_products.append(tracked_product); #add missing data data_tmp['products_ids_tracked'].append(tracked_product['met_id']); data_tmp['products_stoichiometry_tracked'].append(0); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},stoich_cnt) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['products_mapping'].append(new_mapping); #data_tmp['products_mapping'].append(''); data_tmp['products_elements_tracked'].append(tracked_product['met_elements']); data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; data_tmp['used_']=False; data_tmp['comment_']+=tracked_product['met_id']+','; else: missing_products.append(tracked_product); reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['products_ids']): if met_id == tracked_product['met_id']: reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]); break; #add missing data data_tmp['products_ids_tracked'].append(tracked_product['met_id']); data_tmp['products_stoichiometry_tracked'].append(reaction_stoich); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['products_mapping'].append(new_mapping); #data_tmp['products_mapping'].append(''); data_tmp['products_elements_tracked'].append(tracked_product['met_elements']); data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']); data_tmp['rxn_description']=tracked_reaction['rxn_description']; data_tmp['used_']=False; data_tmp['comment_']+=tracked_product['met_id']+','; if missing_reactants or missing_products: tmp = {}; tmp = tracked_reaction; tmp.update({'missing_reactants':missing_reactants}); tmp.update({'missing_products':missing_products}); tmp.update({'equation':reaction['equation']}) missing_metabolites_O.append(tmp); else: tmp = {}; tmp = reaction; tmp.update({'tracked_reactants':tracked_reactants}); tmp.update({'tracked_products':tracked_products}); missing_reactions_O.append(reaction); for tracked_reactant in tracked_reactants: reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['reactants_ids']): if met_id == tracked_reactant['met_id']: reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt]; break; #add missing data data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']); data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['reactants_mapping'].append(new_mapping); #data_tmp['reactants_mapping'].append(''); data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']); data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']); data_tmp['rxn_description']=None; data_tmp['used_']=False; data_tmp['comment_']=reaction['rxn_id']; for tracked_product in tracked_products: reaction_stoich = 0; for met_id_cnt,met_id in enumerate(reaction['products_ids']): if met_id == tracked_product['met_id']: reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]); break; #add missing data data_tmp['products_ids_tracked'].append(tracked_product['met_id']); data_tmp['products_stoichiometry_tracked'].append(reaction_stoich); imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0) new_mapping = imm.convert_arrayMapping2StringMapping(); imm.clear_metaboliteMapping(); data_tmp['products_mapping'].append(new_mapping); #data_tmp['products_mapping'].append(''); data_tmp['products_elements_tracked'].append(tracked_product['met_elements']); data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']); data_tmp['rxn_description']=None; data_tmp['used_']=False; data_tmp['comment_']=reaction['rxn_id']; data_O.append(data_tmp); #self.print_missingReactionMappings(missing_reactions_O,missing_metabolites_O); return missing_reactions_O,missing_metabolites_O; #add data to the database: self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions(data_O); def print_missingReactionMappings(self,missing_reactions_I,missing_metabolites_I): '''print missing reaction mappings to the screen''' #missing reactions script = ''; for missing_reaction in missing_reactions_I: script+= missing_reaction['rxn_id']+'\t'+missing_reaction['equation']+'\t'+str(missing_reaction['reactants_ids'])+'\t'+str(missing_reaction['products_ids'])+'\t'; for tracked_reactant in missing_reaction['tracked_reactants']: script+= tracked_reactant['met_id']+','; script+= '\t' for tracked_product in missing_reaction['tracked_products']: script+= tracked_product['met_id']+','; script+='\n' print(script) #missing metabolites script = ''; for missing_metabolite in missing_metabolites_I: script+= missing_metabolite['rxn_id']+'\t'+missing_metabolite['equation']+'\t'+str(missing_metabolite['reactants_ids_tracked'])+'\t'+str(missing_metabolite['products_ids_tracked'])+'\t'; for tracked_reactant in missing_metabolite['missing_reactants']: script+= tracked_reactant['met_id']+','; script+= '\t' for tracked_product in missing_metabolite['missing_products']: script+= tracked_product['met_id']+','; script+='\n' print(script) def find_inconsistentMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]): '''Find inconsistencies in the atom mapping by comparing the metabolite information in atomMappingMetabolites table to the atom mapping in the atomMappingReactions table''' #Output: # data_O = row of atomMappingReactions filled only with the inconsistent metabolite mapping information # missing_mets_O = metabolites that are tracked in atomMappingReactions, but are not present in atomMappingMetabolites data_O = []; missing_mets_O = []; #get model ids: if model_id_I: model_ids = model_id_I; else: model_ids = []; model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I); for model_id in model_ids: print('checking model_id ' + model_id); #get mapping ids if mapping_id_I: mapping_ids=mapping_id_I; else: mapping_ids=[]; mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id); for mapping_cnt,mapping_id in enumerate(mapping_ids): print('checking mapping_id ' + mapping_id); # get the reaction mapping reaction_mappings = []; reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id); for reaction_cnt,reaction_mapping in enumerate(reaction_mappings): print('checking reaction ' + reaction_mapping['rxn_id']); #debug: if reaction_mapping['rxn_id'] == 'COFACTOR_3': print('check'); #check reactants rxn_tmp = {}; rxn_tmp['mapping_id']=mapping_id rxn_tmp['rxn_id']=reaction_mapping['rxn_id'] rxn_tmp['rxn_description']=reaction_mapping['rxn_description'] rxn_tmp['reactants_stoichiometry_tracked']=[] rxn_tmp['products_stoichiometry_tracked']=[] rxn_tmp['reactants_ids_tracked']=[] rxn_tmp['products_ids_tracked']=[] rxn_tmp['reactants_elements_tracked']=[] rxn_tmp['products_elements_tracked']=[] rxn_tmp['reactants_positions_tracked']=[] rxn_tmp['products_positions_tracked']=[] rxn_tmp['reactants_mapping']=[] rxn_tmp['products_mapping']=[] rxn_tmp['rxn_equation']=None rxn_tmp['used_']=True rxn_tmp['comment_']='Inconsistent metabolites found'; rxn_tmp['reactants_metaboliteMappings']=[] rxn_tmp['products_metaboliteMappings']=[] bad_reactant = False; for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']): print('checking reactant ' + reactant); # get the metabolite mapping metabolite_mapping = {}; metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,reactant); if not metabolite_mapping: print('metabolite mapping not found') missing_mets_O.append(reactant); continue; # check the reaction mapping reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt]; if '[' in reaction_mapping['reactants_mapping'][reactant_cnt]: reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt].split(']['); reactants_mapping = [m.replace('[','') for m in reactants_mapping]; reactants_mapping = [m.replace(']','') for m in reactants_mapping]; if len(metabolite_mapping['met_atompositions']) != len(reactants_mapping): rxn_tmp['reactants_metaboliteMappings'].append(reaction_mapping['reactants_mapping'][reactant_cnt]); print('bad reactants_metaboliteMappings'); bad_reactant = True; # check the reaction elements tracked if metabolite_mapping['met_atompositions'] != reaction_mapping['reactants_positions_tracked'][reactant_cnt]: rxn_tmp['reactants_positions_tracked'].append(reaction_mapping['reactants_positions_tracked'][reactant_cnt]); print('bad reactants_positions_tracked'); bad_reactant = True; # check the reaction positions tracked if metabolite_mapping['met_elements'] != reaction_mapping['reactants_elements_tracked'][reactant_cnt]: rxn_tmp['reactants_elements_tracked'].append(reaction_mapping['reactants_elements_tracked'][reactant_cnt]); print('bad reactants_elements_tracked'); bad_reactant = True; if bad_reactant: rxn_tmp['reactants_ids_tracked'].append(reactant); rxn_tmp['reactants_stoichiometry_tracked'].append(reaction_mapping['reactants_stoichiometry_tracked'][reactant_cnt]); #check products bad_product = False; for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']): print('checking product ' + product); # get the metabolite mapping metabolite_mapping = {}; metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,product); if not metabolite_mapping: print('metabolite mapping not found') missing_mets_O.append(product); continue; # check the reaction mapping products_mapping = reaction_mapping['products_mapping'][product_cnt]; if '[' in reaction_mapping['products_mapping'][product_cnt]: products_mapping = reaction_mapping['products_mapping'][product_cnt].split(']['); products_mapping = [m.replace('[','') for m in products_mapping]; products_mapping = [m.replace(']','') for m in products_mapping]; if len(metabolite_mapping['met_atompositions']) != len(products_mapping): rxn_tmp['products_metaboliteMappings'].append(reaction_mapping['products_mapping'][product_cnt]); print('bad products_metaboliteMappings'); bad_product = True; # check the reaction elements tracked if metabolite_mapping['met_atompositions'] != reaction_mapping['products_positions_tracked'][product_cnt]: rxn_tmp['products_positions_tracked'].append(reaction_mapping['products_positions_tracked'][product_cnt]); print('bad products_positions_tracked'); bad_product = True; # check the reaction positions tracked if metabolite_mapping['met_elements'] != reaction_mapping['products_elements_tracked'][product_cnt]: rxn_tmp['products_elements_tracked'].append(reaction_mapping['products_elements_tracked'][product_cnt]); print('bad products_elements_tracked'); bad_product = True; if bad_product: rxn_tmp['products_ids_tracked'].append(product); rxn_tmp['products_stoichiometry_tracked'].append(reaction_mapping['products_stoichiometry_tracked'][product_cnt]); #record if bad_reactant or bad_product: data_O.append(rxn_tmp); return data_O,missing_mets_O; def find_unbalancedReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]): '''Find reactions mappings that are not elementally balanced''' #Output: # unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt, # 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...} unbalanced_rxns_O = {}; #get model ids: if model_id_I: model_ids = model_id_I; else: model_ids = []; model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I); for model_id in model_ids: print('checking model_id ' + model_id); #get mapping ids if mapping_id_I: mapping_ids=mapping_id_I; else: mapping_ids=[]; mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id); for mapping_cnt,mapping_id in enumerate(mapping_ids): print('checking mapping_id ' + mapping_id); # get the reaction mapping reaction_mappings = []; reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id); for reaction_cnt,reaction_mapping in enumerate(reaction_mappings): print('checking reaction ' + reaction_mapping['rxn_id']); #check reactants reactants_positions_tracked_cnt = 0; for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']): print('checking reactant ' + reactant); # check that the reactant positions == reactant elements if len(reaction_mapping['reactants_positions_tracked'][reactant_cnt])!=len(reaction_mapping['reactants_elements_tracked'][reactant_cnt]): print('inconsistent reactants_positions and reactants_elements'); continue; reactants_positions_tracked_cnt += len(reaction_mapping['reactants_positions_tracked'][reactant_cnt]); #check products products_positions_tracked_cnt = 0; for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']): print('checking product ' + product); # check that the product positions == product elements if len(reaction_mapping['products_positions_tracked'][product_cnt])!=len(reaction_mapping['products_elements_tracked'][product_cnt]): print('inconsistent products_positions and products_elements'); continue; products_positions_tracked_cnt += len(reaction_mapping['products_positions_tracked'][product_cnt]); #record if reactants_positions_tracked_cnt!=products_positions_tracked_cnt: unbalanced_rxns_O[reaction_mapping['rxn_id']] = {'n_products_elements_tracked':products_positions_tracked_cnt, 'n_reactants_elements_tracked':reactants_positions_tracked_cnt}; #unbalanced_rxns_O.append(reaction_mapping); return unbalanced_rxns_O; def find_inconsistentReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]): '''Find inconsistencies in the reaction mapping''' #Output: # unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt, # 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...} irm = stage02_isotopomer_reactionMapping(); #get model ids: if model_id_I: model_ids = model_id_I; else: model_ids = []; model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I); for model_id in model_ids: print('checking model_id ' + model_id); #get mapping ids if mapping_id_I: mapping_ids=mapping_id_I; else: mapping_ids=[]; mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id); for mapping_cnt,mapping_id in enumerate(mapping_ids): print('checking mapping_id ' + mapping_id); # get the reaction ids reaction_ids = []; reaction_ids = self.stage02_isotopomer_query.get_rxnIDs_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id); for reaction_cnt,reaction_id in enumerate(reaction_ids): print('checking reaction ' + reaction_id); #check each reaction irm.get_reactionMapping(mapping_id,reaction_id); reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\ products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\ element_balance_check,mapping_check = irm.check_reactionMapping(); #clear reaction irm.clear_reactionMapping(); class isotopomer_netRxns(): def __init__(self): self.isotopomer_rxns_net = {}; self.isotopomer_rxns_net = self.define_netRxns(); def define_netRxns(self): isotopomer_rxns_net = {}; isotopomer_rxns_net.update(self.define_netRxns_iDM2014_reversible()); isotopomer_rxns_net.update(self.define_netRxns_RL2013_reversible()); return isotopomer_rxns_net def define_netRxns_iDM2014_reversible(self): isotopomer_rxns_net = { 'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'], 'stoichiometry':[1,1]}, 'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'], 'stoichiometry':[1,1,1,1]}, 'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'], 'stoichiometry':[1,1]}, '2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'], 'stoichiometry':[1,1,1]}, 'pyr_to_23dhmb':{'reactions':['ACLS','KARA1'], 'stoichiometry':[1,-1]}, #'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'], # 'stoichiometry':[1,1,1]}, #cannot be lumped 'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'], 'stoichiometry':[1,1,1,1]}, 'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'], 'stoichiometry':[1,1,1]}, 'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'], 'stoichiometry':[1,1]}, 'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'], 'stoichiometry':[1,1,1]}, 'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'], 'stoichiometry':[1,1]}, 'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'], 'stoichiometry':[1,1]}, 'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT'], 'stoichiometry':[1,-1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs 'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'], 'stoichiometry':[1,1,1]}, 'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'], 'stoichiometry':[1,1]}, '3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'], 'stoichiometry':[1,1,1]}, 'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS','DHORD2','ORPT','OMPDC'], 'stoichiometry':[1,-1,1,-1,1]}, #'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS','DHORD5','ORPT','OMPDC'], # 'stoichiometry':[1,-1,1,-1,1]}, 'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'], 'stoichiometry':[1,1,1,1]}, 'IMPSYN_2':{'reactions':['AIRC2','AIRC3','PRASCSi','ADSL2r'], 'stoichiometry':[1,-1,1,1]}, 'IMPSYN_3':{'reactions':['AICART','IMPC'], 'stoichiometry':[1,-1]}, 'imp_to_gmp':{'reactions':['IMPD','GMPS2'], 'stoichiometry':[1,1]}, 'imp_to_amp':{'reactions':['ADSS','ADSL1r'], 'stoichiometry':[1,1]}, #'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'], # 'stoichiometry':[1,1]}, 'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'], 'stoichiometry':[1,1,1]}, #'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'], # 'stoichiometry':[1,1]}, #cannot be lumped 'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'], 'stoichiometry':[1,1,1,1,1]}, 'FADSYN_2':{'reactions':['RBFSa','DB4PS'], 'stoichiometry':[1,1]}, 'FADSYN_3':{'reactions':['RBFK','FMNAT'], 'stoichiometry':[1,1]}, 'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'], 'stoichiometry':[1,1,1,1,1,1]}, 'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'], 'stoichiometry':[1,1,1,1,1,1]}, #'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'], # 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped 'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'], 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'GTHSYN':{'reactions':['GLUCYS','GTHS'], 'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]}, 'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]}, 'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]}, 'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]}, 'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]}, 'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]}, 'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]}, 'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]}, 'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]}, 'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]}, #'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction 'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, 'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]}, 'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]}, 'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]}, 'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]}, 'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]}, 'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]}, 'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]}, 'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,-1,1,1,1,1,1]}, 'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]}, 'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]}, 'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]}, 'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT','GF6PTA'],'stoichiometry':[1,1,-1,1]}, 'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]}, 'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]}, 'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]}, 'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]}, 'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]}, 'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]}, 'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]}, 'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]}, 'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]} }; return isotopomer_rxns_net def define_netRxns_RL2013_reversible(self): isotopomer_rxns_net = { 'PTAr_ACKr_ACS':{'reactions':['PTAr','ACKr','ACS'], 'stoichiometry':[1,-1,-1]}, #acetate secretion 'ACONTa_ACONTb':{'reactions':['ACONTa','ACONTb'], 'stoichiometry':[1,1]}, 'G6PDH2r_PGL':{'reactions':['G6PDH2r','PGL'], 'stoichiometry':[1,1]}, 'GAPD_PGK':{'reactions':['GAPD','PGK'], #glycolysis 'stoichiometry':[1,-1]}, 'PGM':{'reactions':['PGM','ENO'], #glycolysis 'stoichiometry':[-1,1]}, 'SUCCOAS':{'reactions':['SUCOAS'], #mispelling 'stoichiometry':[1]} #TODO: amino acid synthesis reactions }; return isotopomer_rxns_net; class isotopomer_fluxSplits(): def __init__(self): self.isotopomer_splits = {}; self.isotopomer_splits = self.define_fluxSplits(); def define_fluxSplits(self): isotopomer_splits = {}; isotopomer_splits['g6p_2_f6p_or_6pgc']=['PGI','G6PDH2r']; isotopomer_splits['6pgc_2_2ddg6p_or_ru5p-D']=['EDD','GND']; isotopomer_splits['pep_2_oaa_or_pyr']=['PPC','PYK','GLCptspp']; isotopomer_splits['accoa_2_ac_or_cit']=['PTAr','CS']; isotopomer_splits['icit_2_akg_or_glx']=['ICDHyr','ICL']; isotopomer_splits['glc-D_2_g6p']=['HEX1','GLCptspp']; isotopomer_splits['mal-L_2_oaa_or_pyr']=['ME1','ME2','MDH']; return isotopomer_splits
[ "dmccloskey87@gmail.com" ]
dmccloskey87@gmail.com
572850f5eb177b3a39baee3d35b40e3eda54643a
4e879398eaecdc19f056ee538d0732b2e92aa84f
/SistemaDiscusiones/urls.py
7c2bedce1ec87129887c060f542029d86a8b4848
[]
no_license
acamposruiz/localdevask
9311566ab2526e2b6966374e43e7d198fe24045a
867cfafff33fc214d68c499bd7e97b4f77dcd3b0
refs/heads/master
2021-01-25T04:01:16.308722
2014-04-28T03:53:46
2014-04-28T03:53:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
530
py
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'SistemaDiscusiones.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^', include('apps.home.urls', namespace="home")), url(r'^', include('apps.users.urls', namespace="users")), # PYTHON SOCIAL AUTH url('', include('social.apps.django_app.urls', namespace="social")), url(r'^admin/', include(admin.site.urls)), )
[ "acamposruiz@gmail.com" ]
acamposruiz@gmail.com
b14adaf5a89b66b23c4ea53b5a93cd242caca777
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
/test_utilities/src/d1_test/mock_api/tests/test_get.py
d1eaef95d18355fd89576cc41c693343b6516ba0
[ "Apache-2.0" ]
permissive
DataONEorg/d1_python
5e685f1af0c356190f2d6df45d1ac849e2f56972
d72a9461894d9be7d71178fb7310101b8ef9066a
refs/heads/master
2023-08-29T03:16:38.131760
2023-06-27T21:59:37
2023-06-27T21:59:37
60,103,877
15
12
Apache-2.0
2023-09-06T18:27:53
2016-05-31T16:01:00
Python
UTF-8
Python
false
false
2,721
py
# This work was created by participants in the DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For # more information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import requests import responses import d1_test.d1_test_case import d1_test.mock_api.get class TestMockGet(d1_test.d1_test_case.D1TestCase): @responses.activate def test_1000(self, mn_client_v1_v2): """mock_api.get() returns a Requests Response object.""" d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) assert isinstance(mn_client_v1_v2.get("test_pid_1"), requests.Response) @responses.activate def test_1010(self, mn_client_v1_v2): """mock_api.get() returns the same content each time for a given PID.""" d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) obj_1a_str = mn_client_v1_v2.get("test_pid_1").content obj_2a_str = mn_client_v1_v2.get("test_pid_2").content obj_1b_str = mn_client_v1_v2.get("test_pid_1").content obj_2b_str = mn_client_v1_v2.get("test_pid_2").content assert obj_1a_str == obj_1b_str assert obj_2a_str == obj_2b_str @responses.activate def test_1020(self, mn_client_v1_v2): """mock_api.get(): Redirects.""" d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL) direct_sciobj_bytes = mn_client_v1_v2.get("test_pid_1").content redirect_sciobj_bytes = mn_client_v1_v2.get( "<REDIRECT:303:3>test_pid_1" ).content assert direct_sciobj_bytes == redirect_sciobj_bytes # @responses.activate # def test_0012(self): # """mock_api.get() returns 1024 bytes""" # obj_str = self.client.get('test_pid_1').content # self.assertEqual(len(obj_str), 1024) # @responses.activate # def test_0013(self): # """mock_api.get(): Passing a trigger header triggers a DataONEException""" # self.assertRaises( # d1_common.types.exceptions.NotAuthorized, self.client.get, 'test_pid', # vendorSpecific={'trigger': '401'} # )
[ "git@dahlsys.com" ]
git@dahlsys.com
3e43c121fa98f0c8fd7478f5ac8cd4cfe08fcd43
f576f0ea3725d54bd2551883901b25b863fe6688
/sdk/sql/azure-mgmt-sql/generated_samples/transparent_data_encryption_list.py
3e2275f884eabc284c7627538174b4de0a236e32
[ "MIT", "LicenseRef-scancode-generic-cla", "LGPL-2.1-or-later" ]
permissive
Azure/azure-sdk-for-python
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
c2ca191e736bb06bfbbbc9493e8325763ba990bb
refs/heads/main
2023-09-06T09:30:13.135012
2023-09-06T01:08:06
2023-09-06T01:08:06
4,127,088
4,046
2,755
MIT
2023-09-14T21:48:49
2012-04-24T16:46:12
Python
UTF-8
Python
false
false
1,661
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.identity import DefaultAzureCredential from azure.mgmt.sql import SqlManagementClient """ # PREREQUISITES pip install azure-identity pip install azure-mgmt-sql # USAGE python transparent_data_encryption_list.py Before run the sample, please set the values of the client ID, tenant ID and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. For more info about how to get the value, please see: https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal """ def main(): client = SqlManagementClient( credential=DefaultAzureCredential(), subscription_id="00000000-1111-2222-3333-444444444444", ) response = client.transparent_data_encryptions.list_by_database( resource_group_name="security-tde-resourcegroup", server_name="securitytde", database_name="testdb", ) for item in response: print(item) # x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-08-01-preview/examples/TransparentDataEncryptionList.json if __name__ == "__main__": main()
[ "noreply@github.com" ]
Azure.noreply@github.com
fe69d824ce277807f6d3e0d5eaaff8a66490ae4b
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
/otp/src/level/ModelEntity.py
5850215d12244dd9e104ca4eebaf6cf5fd012828
[]
no_license
satire6/Anesidora
da3a44e2a49b85252b87b612b435fb4970469583
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
refs/heads/master
2022-12-16T20:05:13.167119
2020-09-11T16:58:04
2020-09-11T17:02:06
294,751,966
89
32
null
null
null
null
UTF-8
Python
false
false
4,052
py
from toontown.toonbase.ToontownGlobals import * from direct.directnotify import DirectNotifyGlobal import BasicEntities class ModelEntity(BasicEntities.NodePathEntity): LoadFuncs = { 'loadModelCopy': loader.loadModelCopy, 'loadModel': loader.loadModel, 'loadModelOnce': loader.loadModelOnce, } def __init__(self, level, entId): # TODO: fill in default values automatically for missing attribs self.collisionsOnly = False self.loadType = 'loadModelCopy' self.flattenType = 'light' self.goonHatType = 'none' self.entInitialized = False BasicEntities.NodePathEntity.__init__(self, level, entId) self.entInitialized = True self.model = None self.loadModel() def destroy(self): if self.model: self.model.removeNode() del self.model BasicEntities.NodePathEntity.destroy(self) def loadModel(self): if self.model: self.model.removeNode() self.model = None if self.modelPath is None: return self.model = ModelEntity.LoadFuncs[self.loadType](self.modelPath) if self.model: self.model.reparentTo(self) # hide/show as appropriate if self.collisionsOnly: if __dev__: self.model.setTransparency(1) self.model.setColorScale(1,1,1,.1) else: self.model.hide() else: self.model.show() # HACK SDN: special code for moving crate wall collisions down if self.modelPath in ("phase_9/models/cogHQ/woodCrateB.bam", "phase_9/models/cogHQ/metal_crateB.bam", "phase_10/models/cashbotHQ/CBMetalCrate.bam", "phase_10/models/cogHQ/CBMetalCrate2.bam", "phase_10/models/cashbotHQ/CBWoodCrate.bam", "phase_11/models/lawbotHQ/LB_metal_crate.bam", "phase_11/models/lawbotHQ/LB_metal_crate2.bam", ): # get rid of any scales #self.model.flattenLight() # move walls down cNode = self.find("**/wall") cNode.setZ(cNode, -.75) # duplicate the floor and move it down to crate a # catch effect for low-hopped toons colNode = self.find("**/collision") floor = colNode.find("**/floor") floor2 = floor.copyTo(colNode) floor2.setZ(floor2, -.75) """ # incorporate the entity's overall scale self.model.setScale(self.getScale()) self.setScale(1) self.model.flattenLight() """ if self.goonHatType is not 'none': self.goonType = {'hardhat':'pg','security':'sg'}[self.goonHatType] self.hat = self.model ### this was copied from Goon.createHead if self.goonType == "pg": self.hat.find("**/security_hat").hide() elif self.goonType == "sg": self.hat.find("**/hard_hat").hide() ### del self.hat del self.goonType if self.flattenType == 'light': self.model.flattenLight() elif self.flattenType == 'medium': self.model.flattenMedium() elif self.flattenType == 'strong': self.model.flattenStrong() def setModelPath(self, path): self.modelPath = path self.loadModel() def setCollisionsOnly(self, collisionsOnly): self.collisionsOnly = collisionsOnly self.loadModel() def setGoonHatType(self, goonHatType): self.goonHatType = goonHatType self.loadModel()
[ "66761962+satire6@users.noreply.github.com" ]
66761962+satire6@users.noreply.github.com
7477820069e7127b7679f7bebbb2f0d9efd1638d
3c5044c77a6c01e1a70b1722e8a860851056f28c
/16-1.py
97b5250c34e13ea99567b0f0574dcb5660117bae
[]
no_license
MANAkudo/pyhton
2f8c10bbf0b98babb5fea2ecdc4c2c430668e6fd
90fa56fb44e7e02d05250543375a292dfef28eca
refs/heads/master
2023-08-03T19:57:48.436313
2021-09-22T01:50:32
2021-09-22T01:50:32
409,027,207
0
0
null
null
null
null
UTF-8
Python
false
false
116
py
f = open("16_1_read.txt",'w') f.write("1\n") f.write("2\n") f.write("3\n") f.write("4\n") f.write("5\n") f.close()
[ "ykh2135248@stu.o-hara.ac.jp" ]
ykh2135248@stu.o-hara.ac.jp
f7cfc720c7204254c708dca38c4f7baee6ae12b1
dd126d6b82eb47d90950a355d4948047ae119f9c
/fixture/db.py
92f71b61a9231f7c5f9209c0af7120114c85c768
[]
no_license
Korinsky/Python4QA_B24
9382c178a7e564272e2628426946ae087ec4ccdc
0c5d5f812a6cb858a3bf59e45745a7fce206fd7e
refs/heads/main
2023-07-14T05:36:57.307809
2021-08-18T13:05:14
2021-08-18T13:05:14
377,419,480
0
0
null
null
null
null
UTF-8
Python
false
false
2,270
py
import pymysql from model.group import Group from model.contact import Contact class DbFixture: def __init__(self, host, name, user, password): self.host = host self.name = name self.user = user self.password = password self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True) def get_groups_list(self): list = [] cursor = self.connection.cursor() try: cursor.execute("select group_id, group_name, group_header, group_footer from group_list") for row in cursor: (id, name, header, footer) = row list.append(Group(id=str(id), name=name, header=header, footer=footer)) finally: cursor.close() return list def get_contacts_list(self): list = [] cursor = self.connection.cursor() try: cursor.execute("select id, firstname, lastname, address, email, email2, email3, home, mobile, work, phone2 from addressbook where deprecated='0000-00-00 00:00:00'") for row in cursor: (id, firstname, lastname, address, email, email2, email3, homephone, mobilephone, workphone, secondaryphone) = row list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address, email=email, email2=email2, email3=email3, homephone=homephone, mobilephone=mobilephone, workphone=workphone, secondaryphone=secondaryphone)) finally: cursor.close() return list def destroy(self): self.connection.close() def get_contact_in_group(self): dict = {} cursor = self.connection.cursor() try: cursor.execute("select id, group_id from address_in_groups where deprecated='0000-00-00 00:00:00'") for row in cursor: (id, group_id) = row if id in dict.keys(): value = dict.get(id) value.append(group_id) else: value = [] value.append(group_id) dict[id] = value finally: cursor.close() return dict
[ "72462941+Korinsky@users.noreply.github.com" ]
72462941+Korinsky@users.noreply.github.com
98afb32b4a54532746127c0a78d01a693fc7d98a
21899ea0e94cb58f8ac99b7c731f59e0232839eb
/src/python/T0/WMBS/Oracle/Subscriptions/HaveJobGroup.py
59ece2561182f2a6ec7589262150c04280d86513
[ "Apache-2.0" ]
permissive
dmwm/T0
a6ee9d61abc05876fc24f8af69fe932a2f542d21
1af91d0b1971b7d45ea7378e754f2218ff9a8474
refs/heads/master
2023-08-16T10:55:27.493160
2023-08-11T09:38:03
2023-08-11T09:38:03
4,423,801
9
54
Apache-2.0
2023-09-14T11:43:30
2012-05-23T18:33:56
Python
UTF-8
Python
false
false
687
py
""" _HaveJobGroup_ Oracle implementation of HaveJobGroup For a given subscription check if there is an existing job group """ from WMCore.Database.DBFormatter import DBFormatter class HaveJobGroup(DBFormatter): sql = """SELECT 1 FROM wmbs_jobgroup WHERE wmbs_jobgroup.subscription = :subscription AND ROWNUM = 1 """ def execute(self, subscription, conn = None, transaction = False): results = self.dbi.processData(self.sql, { 'subscription' : subscription }, conn = conn, transaction = transaction)[0].fetchall() return ( len(results) > 0 and results[0][0] == 1 )
[ "Dirk.Hufnagel@cern.ch" ]
Dirk.Hufnagel@cern.ch
22e5a66e84c47b3691015f299972b4f9e43427f4
71c331e4b1e00fa3be03b7f711fcb05a793cf2af
/QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/firestore/v1/firestore_v1_client.py
ac370070865d488484aa602c2024b65bf41079fa
[ "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
iofh/QA-System
568228bb0c0adf9ec23b45cd144d61049e720002
af4a8f1b5f442ddf4905740ae49ed23d69afb0f6
refs/heads/master
2022-11-27T23:04:16.385021
2020-08-12T10:11:44
2020-08-12T10:11:44
286,980,492
0
0
null
null
null
null
UTF-8
Python
false
false
39,606
py
"""Generated client library for firestore version v1.""" # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.py import base_api from googlecloudsdk.third_party.apis.firestore.v1 import firestore_v1_messages as messages class FirestoreV1(base_api.BaseApiClient): """Generated client library for service firestore version v1.""" MESSAGES_MODULE = messages BASE_URL = 'https://firestore.googleapis.com/' MTLS_BASE_URL = 'https://firestore.mtls.googleapis.com/' _PACKAGE = 'firestore' _SCOPES = ['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/datastore'] _VERSION = 'v1' _CLIENT_ID = '1042881264118.apps.googleusercontent.com' _CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b' _USER_AGENT = 'google-cloud-sdk' _CLIENT_CLASS_NAME = 'FirestoreV1' _URL_VERSION = 'v1' _API_KEY = None def __init__(self, url='', credentials=None, get_credentials=True, http=None, model=None, log_request=False, log_response=False, credentials_args=None, default_global_params=None, additional_http_headers=None, response_encoding=None): """Create a new firestore handle.""" url = url or self.BASE_URL super(FirestoreV1, self).__init__( url, credentials=credentials, get_credentials=get_credentials, http=http, model=model, log_request=log_request, log_response=log_response, credentials_args=credentials_args, default_global_params=default_global_params, additional_http_headers=additional_http_headers, response_encoding=response_encoding) self.projects_databases_collectionGroups_fields = self.ProjectsDatabasesCollectionGroupsFieldsService(self) self.projects_databases_collectionGroups_indexes = self.ProjectsDatabasesCollectionGroupsIndexesService(self) self.projects_databases_collectionGroups = self.ProjectsDatabasesCollectionGroupsService(self) self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self) self.projects_databases_operations = self.ProjectsDatabasesOperationsService(self) self.projects_databases = self.ProjectsDatabasesService(self) self.projects_locations = self.ProjectsLocationsService(self) self.projects = self.ProjectsService(self) class ProjectsDatabasesCollectionGroupsFieldsService(base_api.BaseApiService): """Service class for the projects_databases_collectionGroups_fields resource.""" _NAME = 'projects_databases_collectionGroups_fields' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesCollectionGroupsFieldsService, self).__init__(client) self._upload_configs = { } def Get(self, request, global_params=None): r"""Gets the metadata and configuration for a Field. Args: request: (FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1Field) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}', http_method='GET', method_id='firestore.projects.databases.collectionGroups.fields.get', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest', response_type_name='GoogleFirestoreAdminV1Field', supports_download=False, ) def List(self, request, global_params=None): r"""Lists the field configuration and metadata for this database. Currently, FirestoreAdmin.ListFields only supports listing fields that have been explicitly overridden. To issue this query, call FirestoreAdmin.ListFields with the filter set to `indexConfig.usesAncestorConfig:false`. Args: request: (FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1ListFieldsResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields', http_method='GET', method_id='firestore.projects.databases.collectionGroups.fields.list', ordered_params=['parent'], path_params=['parent'], query_params=['filter', 'pageSize', 'pageToken'], relative_path='v1/{+parent}/fields', request_field='', request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest', response_type_name='GoogleFirestoreAdminV1ListFieldsResponse', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates a field configuration. Currently, field updates apply only to. single field index configuration. However, calls to FirestoreAdmin.UpdateField should provide a field mask to avoid changing any configuration that the caller isn't aware of. The field mask should be specified as: `{ paths: "index_config" }`. This call returns a google.longrunning.Operation which may be used to track the status of the field update. The metadata for the operation will be the type FieldOperationMetadata. To configure the default field settings for the database, use the special `Field` with resource name: `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. Args: request: (FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}', http_method='PATCH', method_id='firestore.projects.databases.collectionGroups.fields.patch', ordered_params=['name'], path_params=['name'], query_params=['updateMask'], relative_path='v1/{+name}', request_field='googleFirestoreAdminV1Field', request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest', response_type_name='GoogleLongrunningOperation', supports_download=False, ) class ProjectsDatabasesCollectionGroupsIndexesService(base_api.BaseApiService): """Service class for the projects_databases_collectionGroups_indexes resource.""" _NAME = 'projects_databases_collectionGroups_indexes' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesCollectionGroupsIndexesService, self).__init__(client) self._upload_configs = { } def Create(self, request, global_params=None): r"""Creates a composite index. This returns a google.longrunning.Operation. which may be used to track the status of the creation. The metadata for the operation will be the type IndexOperationMetadata. Args: request: (FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('Create') return self._RunMethod( config, request, global_params=global_params) Create.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes', http_method='POST', method_id='firestore.projects.databases.collectionGroups.indexes.create', ordered_params=['parent'], path_params=['parent'], query_params=[], relative_path='v1/{+parent}/indexes', request_field='googleFirestoreAdminV1Index', request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest', response_type_name='GoogleLongrunningOperation', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes a composite index. Args: request: (FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}', http_method='DELETE', method_id='firestore.projects.databases.collectionGroups.indexes.delete', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest', response_type_name='Empty', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets a composite index. Args: request: (FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1Index) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}', http_method='GET', method_id='firestore.projects.databases.collectionGroups.indexes.get', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest', response_type_name='GoogleFirestoreAdminV1Index', supports_download=False, ) def List(self, request, global_params=None): r"""Lists composite indexes. Args: request: (FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleFirestoreAdminV1ListIndexesResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes', http_method='GET', method_id='firestore.projects.databases.collectionGroups.indexes.list', ordered_params=['parent'], path_params=['parent'], query_params=['filter', 'pageSize', 'pageToken'], relative_path='v1/{+parent}/indexes', request_field='', request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest', response_type_name='GoogleFirestoreAdminV1ListIndexesResponse', supports_download=False, ) class ProjectsDatabasesCollectionGroupsService(base_api.BaseApiService): """Service class for the projects_databases_collectionGroups resource.""" _NAME = 'projects_databases_collectionGroups' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesCollectionGroupsService, self).__init__(client) self._upload_configs = { } class ProjectsDatabasesDocumentsService(base_api.BaseApiService): """Service class for the projects_databases_documents resource.""" _NAME = 'projects_databases_documents' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesDocumentsService, self).__init__(client) self._upload_configs = { } def BatchGet(self, request, global_params=None): r"""Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Args: request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BatchGetDocumentsResponse) The response message. """ config = self.GetMethodConfig('BatchGet') return self._RunMethod( config, request, global_params=global_params) BatchGet.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:batchGet', http_method='POST', method_id='firestore.projects.databases.documents.batchGet', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:batchGet', request_field='batchGetDocumentsRequest', request_type_name='FirestoreProjectsDatabasesDocumentsBatchGetRequest', response_type_name='BatchGetDocumentsResponse', supports_download=False, ) def BeginTransaction(self, request, global_params=None): r"""Starts a new transaction. Args: request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BeginTransactionResponse) The response message. """ config = self.GetMethodConfig('BeginTransaction') return self._RunMethod( config, request, global_params=global_params) BeginTransaction.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction', http_method='POST', method_id='firestore.projects.databases.documents.beginTransaction', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:beginTransaction', request_field='beginTransactionRequest', request_type_name='FirestoreProjectsDatabasesDocumentsBeginTransactionRequest', response_type_name='BeginTransactionResponse', supports_download=False, ) def Commit(self, request, global_params=None): r"""Commits a transaction, while optionally updating documents. Args: request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (CommitResponse) The response message. """ config = self.GetMethodConfig('Commit') return self._RunMethod( config, request, global_params=global_params) Commit.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:commit', http_method='POST', method_id='firestore.projects.databases.documents.commit', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:commit', request_field='commitRequest', request_type_name='FirestoreProjectsDatabasesDocumentsCommitRequest', response_type_name='CommitResponse', supports_download=False, ) def CreateDocument(self, request, global_params=None): r"""Creates a new document. Args: request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('CreateDocument') return self._RunMethod( config, request, global_params=global_params) CreateDocument.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}', http_method='POST', method_id='firestore.projects.databases.documents.createDocument', ordered_params=['parent', 'collectionId'], path_params=['collectionId', 'parent'], query_params=['documentId', 'mask_fieldPaths'], relative_path='v1/{+parent}/{collectionId}', request_field='document', request_type_name='FirestoreProjectsDatabasesDocumentsCreateDocumentRequest', response_type_name='Document', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes a document. Args: request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method='DELETE', method_id='firestore.projects.databases.documents.delete', ordered_params=['name'], path_params=['name'], query_params=['currentDocument_exists', 'currentDocument_updateTime'], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesDocumentsDeleteRequest', response_type_name='Empty', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets a single document. Args: request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method='GET', method_id='firestore.projects.databases.documents.get', ordered_params=['name'], path_params=['name'], query_params=['mask_fieldPaths', 'readTime', 'transaction'], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesDocumentsGetRequest', response_type_name='Document', supports_download=False, ) def List(self, request, global_params=None): r"""Lists documents. Args: request: (FirestoreProjectsDatabasesDocumentsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListDocumentsResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}', http_method='GET', method_id='firestore.projects.databases.documents.list', ordered_params=['parent', 'collectionId'], path_params=['collectionId', 'parent'], query_params=['mask_fieldPaths', 'orderBy', 'pageSize', 'pageToken', 'readTime', 'showMissing', 'transaction'], relative_path='v1/{+parent}/{collectionId}', request_field='', request_type_name='FirestoreProjectsDatabasesDocumentsListRequest', response_type_name='ListDocumentsResponse', supports_download=False, ) def ListCollectionIds(self, request, global_params=None): r"""Lists all the collection IDs underneath a document. Args: request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListCollectionIdsResponse) The response message. """ config = self.GetMethodConfig('ListCollectionIds') return self._RunMethod( config, request, global_params=global_params) ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds', http_method='POST', method_id='firestore.projects.databases.documents.listCollectionIds', ordered_params=['parent'], path_params=['parent'], query_params=[], relative_path='v1/{+parent}:listCollectionIds', request_field='listCollectionIdsRequest', request_type_name='FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest', response_type_name='ListCollectionIdsResponse', supports_download=False, ) def Listen(self, request, global_params=None): r"""Listens to changes. Args: request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListenResponse) The response message. """ config = self.GetMethodConfig('Listen') return self._RunMethod( config, request, global_params=global_params) Listen.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:listen', http_method='POST', method_id='firestore.projects.databases.documents.listen', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:listen', request_field='listenRequest', request_type_name='FirestoreProjectsDatabasesDocumentsListenRequest', response_type_name='ListenResponse', supports_download=False, ) def Patch(self, request, global_params=None): r"""Updates or inserts a document. Args: request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Document) The response message. """ config = self.GetMethodConfig('Patch') return self._RunMethod( config, request, global_params=global_params) Patch.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}', http_method='PATCH', method_id='firestore.projects.databases.documents.patch', ordered_params=['name'], path_params=['name'], query_params=['currentDocument_exists', 'currentDocument_updateTime', 'mask_fieldPaths', 'updateMask_fieldPaths'], relative_path='v1/{+name}', request_field='document', request_type_name='FirestoreProjectsDatabasesDocumentsPatchRequest', response_type_name='Document', supports_download=False, ) def Rollback(self, request, global_params=None): r"""Rolls back a transaction. Args: request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Rollback') return self._RunMethod( config, request, global_params=global_params) Rollback.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:rollback', http_method='POST', method_id='firestore.projects.databases.documents.rollback', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:rollback', request_field='rollbackRequest', request_type_name='FirestoreProjectsDatabasesDocumentsRollbackRequest', response_type_name='Empty', supports_download=False, ) def RunQuery(self, request, global_params=None): r"""Runs a query. Args: request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (RunQueryResponse) The response message. """ config = self.GetMethodConfig('RunQuery') return self._RunMethod( config, request, global_params=global_params) RunQuery.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery', http_method='POST', method_id='firestore.projects.databases.documents.runQuery', ordered_params=['parent'], path_params=['parent'], query_params=[], relative_path='v1/{+parent}:runQuery', request_field='runQueryRequest', request_type_name='FirestoreProjectsDatabasesDocumentsRunQueryRequest', response_type_name='RunQueryResponse', supports_download=False, ) def Write(self, request, global_params=None): r"""Streams batches of document updates and deletes, in order. Args: request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (WriteResponse) The response message. """ config = self.GetMethodConfig('Write') return self._RunMethod( config, request, global_params=global_params) Write.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:write', http_method='POST', method_id='firestore.projects.databases.documents.write', ordered_params=['database'], path_params=['database'], query_params=[], relative_path='v1/{+database}/documents:write', request_field='writeRequest', request_type_name='FirestoreProjectsDatabasesDocumentsWriteRequest', response_type_name='WriteResponse', supports_download=False, ) class ProjectsDatabasesOperationsService(base_api.BaseApiService): """Service class for the projects_databases_operations resource.""" _NAME = 'projects_databases_operations' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesOperationsService, self).__init__(client) self._upload_configs = { } def Cancel(self, request, global_params=None): r"""Starts asynchronous cancellation on a long-running operation. The server. makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`. Args: request: (FirestoreProjectsDatabasesOperationsCancelRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Cancel') return self._RunMethod( config, request, global_params=global_params) Cancel.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}:cancel', http_method='POST', method_id='firestore.projects.databases.operations.cancel', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}:cancel', request_field='googleLongrunningCancelOperationRequest', request_type_name='FirestoreProjectsDatabasesOperationsCancelRequest', response_type_name='Empty', supports_download=False, ) def Delete(self, request, global_params=None): r"""Deletes a long-running operation. This method indicates that the client is. no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Args: request: (FirestoreProjectsDatabasesOperationsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Empty) The response message. """ config = self.GetMethodConfig('Delete') return self._RunMethod( config, request, global_params=global_params) Delete.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}', http_method='DELETE', method_id='firestore.projects.databases.operations.delete', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesOperationsDeleteRequest', response_type_name='Empty', supports_download=False, ) def Get(self, request, global_params=None): r"""Gets the latest state of a long-running operation. Clients can use this. method to poll the operation result at intervals as recommended by the API service. Args: request: (FirestoreProjectsDatabasesOperationsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}', http_method='GET', method_id='firestore.projects.databases.operations.get', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsDatabasesOperationsGetRequest', response_type_name='GoogleLongrunningOperation', supports_download=False, ) def List(self, request, global_params=None): r"""Lists operations that match the specified filter in the request. If the. server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id. Args: request: (FirestoreProjectsDatabasesOperationsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningListOperationsResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations', http_method='GET', method_id='firestore.projects.databases.operations.list', ordered_params=['name'], path_params=['name'], query_params=['filter', 'pageSize', 'pageToken'], relative_path='v1/{+name}/operations', request_field='', request_type_name='FirestoreProjectsDatabasesOperationsListRequest', response_type_name='GoogleLongrunningListOperationsResponse', supports_download=False, ) class ProjectsDatabasesService(base_api.BaseApiService): """Service class for the projects_databases resource.""" _NAME = 'projects_databases' def __init__(self, client): super(FirestoreV1.ProjectsDatabasesService, self).__init__(client) self._upload_configs = { } def ExportDocuments(self, request, global_params=None): r"""Exports a copy of all or a subset of documents from Google Cloud Firestore. to another storage system, such as Google Cloud Storage. Recent updates to documents may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage. Args: request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('ExportDocuments') return self._RunMethod( config, request, global_params=global_params) ExportDocuments.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}:exportDocuments', http_method='POST', method_id='firestore.projects.databases.exportDocuments', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}:exportDocuments', request_field='googleFirestoreAdminV1ExportDocumentsRequest', request_type_name='FirestoreProjectsDatabasesExportDocumentsRequest', response_type_name='GoogleLongrunningOperation', supports_download=False, ) def ImportDocuments(self, request, global_params=None): r"""Imports documents into Google Cloud Firestore. Existing documents with the. same name are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportDocuments operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Firestore. Args: request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (GoogleLongrunningOperation) The response message. """ config = self.GetMethodConfig('ImportDocuments') return self._RunMethod( config, request, global_params=global_params) ImportDocuments.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/databases/{databasesId}:importDocuments', http_method='POST', method_id='firestore.projects.databases.importDocuments', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}:importDocuments', request_field='googleFirestoreAdminV1ImportDocumentsRequest', request_type_name='FirestoreProjectsDatabasesImportDocumentsRequest', response_type_name='GoogleLongrunningOperation', supports_download=False, ) class ProjectsLocationsService(base_api.BaseApiService): """Service class for the projects_locations resource.""" _NAME = 'projects_locations' def __init__(self, client): super(FirestoreV1.ProjectsLocationsService, self).__init__(client) self._upload_configs = { } def Get(self, request, global_params=None): r"""Gets information about a location. Args: request: (FirestoreProjectsLocationsGetRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Location) The response message. """ config = self.GetMethodConfig('Get') return self._RunMethod( config, request, global_params=global_params) Get.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/locations/{locationsId}', http_method='GET', method_id='firestore.projects.locations.get', ordered_params=['name'], path_params=['name'], query_params=[], relative_path='v1/{+name}', request_field='', request_type_name='FirestoreProjectsLocationsGetRequest', response_type_name='Location', supports_download=False, ) def List(self, request, global_params=None): r"""Lists information about the supported locations for this service. Args: request: (FirestoreProjectsLocationsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListLocationsResponse) The response message. """ config = self.GetMethodConfig('List') return self._RunMethod( config, request, global_params=global_params) List.method_config = lambda: base_api.ApiMethodInfo( flat_path='v1/projects/{projectsId}/locations', http_method='GET', method_id='firestore.projects.locations.list', ordered_params=['name'], path_params=['name'], query_params=['filter', 'pageSize', 'pageToken'], relative_path='v1/{+name}/locations', request_field='', request_type_name='FirestoreProjectsLocationsListRequest', response_type_name='ListLocationsResponse', supports_download=False, ) class ProjectsService(base_api.BaseApiService): """Service class for the projects resource.""" _NAME = 'projects' def __init__(self, client): super(FirestoreV1.ProjectsService, self).__init__(client) self._upload_configs = { }
[ "ige-public@hotmail.com" ]
ige-public@hotmail.com
9cc95780a34d3bb2c8acb0cde93d72a744ba1ce1
5f596cf8fc95e72caa87fcd51aa2446f9e6fc0d4
/tasks.py
01c26b63c44c8cec31f1ad19c349b4ea31ffa67d
[ "MIT" ]
permissive
jakobzeitler/causalinfo
265f34f79a13c6ee9ce1173aae202e960766327f
a8e6b6e9dae8dfd4d2e18010908c4905089538a1
refs/heads/master
2020-03-23T17:24:25.087306
2017-01-05T08:07:40
2017-01-05T08:07:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,699
py
# -*- coding: utf-8 -*- from invoke import task, run import os import sys @task def test(cover=False): """Run tests (use --cover for coverage tests)""" if cover: run('py.test --cov-report term-missing --cov=causalinfo tests', pty=True) else: run('py.test -v', pty=True) @task def clean(): """Clean all build and cruft files""" print("Removing python cruft ...") run("find . -name '*.pyc' -exec rm -f {} +") run("find . -name '*.pyo' -exec rm -f {} +") run("find . -name '*~' -exec rm -f {} +") run("find . -name '__pycache__' -exec rm -fr {} +") print("Removing build ...") run("rm -rf build") run("rm -rf dist") run("rm -rf *.egg-info") print("Removing IPython Notebook checkpoints...") run("find . -name '__pynb_checkpoints__' -exec rm -fr {} +") print("Removing generated html ...") run("rm -f README.html") @task def build(): """Build the distribution""" print("Building sdist ...") run('python setup.py sdist', hide='out') print("Building bdist_wheel ...") run('python setup.py bdist_wheel', hide='out') @task def publish(release=False): """Publish to the cheeseshop.""" if release: run('python setup.py register') run('twine upload dist/*.tar.gz') run('twine upload dist/*.whl') else: run('python setup.py -r test register') run('twine upload -r test dist/*.tar.gz') run('twine upload -r test dist/*.whl') @task def readme(browse=True): run('rst2html.py README.rst > README.html') if browse: run('open README.html') @task def notebook(): from IPython.terminal.ipapp import launch_new_instance from socket import gethostname import warnings print('Installing in develop mode') run('python setup.py develop', hide='out') print('Changing to notebooks folder') here = os.path.dirname(__file__) os.chdir(os.path.join(here, 'notebooks')) old_argv = sys.argv[:] # Taken from here: # http://stackoverflow.com/questions/ # 26338688/start-ipython-notebook-with-python-file try: warnings.filterwarnings("ignore", module = "zmq.*") sys.argv = ['ipython', 'notebook'] sys.argv.append("--IPKernelApp.pylab='inline'") sys.argv.append("--NotebookApp.ip=" + gethostname()) sys.argv.append("--NotebookApp.open_browser=True") print('Invoking "' + ' '.join(sys.argv) + '"') launch_new_instance() finally: # Not sure this is strictly necessary... sys.argv = old_argv os.chdir(here) print('Removing development package...') run('python setup.py develop -u', hide='out')
[ "brett.calcott@gmail.com" ]
brett.calcott@gmail.com
39ef41ca372b8c23e5a544cffabddd8ade50fad0
bb462a56300aff06f6265e500804a4ecc7e290c4
/mod_int.py
74edcf9d02e8596531719f955e0156a7cf5b6c2b
[ "CC0-1.0" ]
permissive
nohtaray/competitive-programming.py
6d4f0b5b6dde3dfee5a12674a1d0143d760b3644
7d38884007541061ddd69d617a69a0d9bc6176fa
refs/heads/master
2023-06-15T01:17:41.744771
2023-05-27T14:37:04
2023-05-27T14:37:04
180,506,267
1
0
null
null
null
null
UTF-8
Python
false
false
1,170
py
def ModInt(mod): class _ModInt: def __init__(self, value): self.value = value % mod def __add__(self, other): if isinstance(other, _ModInt): return _ModInt(self.value + other.value) else: return _ModInt(self.value + other) def __sub__(self, other): if isinstance(other, _ModInt): return _ModInt(self.value - other.value) else: return _ModInt(self.value - other) def __radd__(self, other): return self.__add__(other) def __mul__(self, other): if isinstance(other, _ModInt): return _ModInt(self.value * other.value) else: return _ModInt(self.value * other) def __truediv__(self, other): raise NotImplementedError() def __int__(self): return self.value def __repr__(self): return str(self.value) return _ModInt if __name__ == '__main__': MI7 = ModInt(mod=7) assert int(MI7(1) + MI7(8)) == 2 assert int(MI7(1) + 8) == 2 assert int(8 + MI7(1)) == 2
[ "ydt.hran2@gmail.com" ]
ydt.hran2@gmail.com
f64548cc59fb2b2294373d25879cdab04e508e9f
d121775327c0c2e1d7210eab0f52d1818c56aa0c
/Wikipedia_Scraper/venv/bin/wheel
12e896c57139377e445ecb2d018a31e72715bb96
[]
no_license
shmoss/Python-Backend-TownSounds
f396d8fbd55b08730286109dc27c1e948a33c9c8
ba38bed2894ac45eb344c8fa2a23a49daa6fd3f0
refs/heads/master
2021-07-15T07:52:05.267561
2021-07-08T21:28:37
2021-07-08T21:28:37
180,048,120
1
0
null
null
null
null
UTF-8
Python
false
false
265
#!/Users/starrmoss/PycharmProjects/hi/Wikipedia_Scraper/venv/bin/python # -*- coding: utf-8 -*- import re import sys from wheel.cli import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "shmoss@wisc.edu" ]
shmoss@wisc.edu
2a2f1f71c97c0e8e03c4f0bcc38faa88410be7f6
7e2aefac7b540f4d4bad0fa6dd94dbcdad34d6a3
/modular/app.py
f7b681f53f3b0400a266359154603d66d9fc1cbf
[]
no_license
imajaydwivedi/Python-BootCamp
ade3a3557d51b7b25a7b2ba3b79952a622896b29
2f5e1629a160a33017c9ab548b9d7c88ad57c917
refs/heads/master
2023-06-29T04:48:20.744027
2023-06-13T15:11:36
2023-06-13T15:11:36
250,841,367
0
0
null
null
null
null
UTF-8
Python
false
false
405
py
import services.directory as directory if __name__ == "__main__": directory.add({"name":"Krishna", "phone": 1234}) directory.add({"name":"Mohan", "phone": 2345}) directory.add({"name":"Koyya", "phone": 3456}) print(directory.list()) print(directory.count()) print(directory.find_by(1)) print(directory.search_by("Koyya")) directory.remove_by(1) print(directory.list())
[ "dwivedaj@arcesium.com" ]
dwivedaj@arcesium.com
5c59103c775af199dd22c783d6c67d60fb97d5d3
49e0b6094a6841efd74ba57cd01913b465223333
/data_structures_and_algorithms_python/challenges/tree_fizz_buzz/tree_fizz_buzz.py
5883a22406f14bb3defa4c58189abd1927c6c06e
[]
no_license
HamzaQahoush/data-structures-and-algorithms--Python
1c2fdfc8b90efc190108ed139372591741d5acc7
81bc4424065bc6b7ef99ab4dbba60524a75058a4
refs/heads/master
2023-07-15T04:03:05.158576
2021-08-05T17:34:47
2021-08-05T17:34:47
376,792,369
0
1
null
2021-08-05T17:29:16
2021-06-14T11:00:05
Python
UTF-8
Python
false
false
1,647
py
class Node : def __init__(self,value): self.value = value self.child = [] def __str__(self): return str(self.value) class KAryTree : def __init__(self): self.root= None """This code done with help from Ahmad Zatar""" def fizz_Buzz_Tree(KAryTree): def traverse(node): if node.child : for i in range(len(node.child)): traverse (node.child[i]) if node.child[i].value %5 == 0 and\ node.child[i].value % 3 == 0: node.child[i].value= "Fizz Buzz" elif node.child[i].value %5 == 0 : node.child[i].value= "Buzz" elif node.child[i].value %3 == 0 : node.child[i].value= "Fizz" else: node.child[i].value =str(node.child[i].value) traverse(KAryTree.root) if KAryTree.root.value %5 == 0 and\ KAryTree.root.value %3 ==0 : KAryTree.root.value ="Fizz Buzz" if KAryTree.root.value %5 == 0 : KAryTree.root.value ="Buzz" if KAryTree.root.value %3 ==0 : KAryTree.root.value ="Fizz" else : KAryTree.root.value= str(KAryTree.root.value) return KAryTree if __name__ == "__main__": kAryTree = KAryTree() kAryTree.root=Node(1) #root kAryTree.root.child+=[Node(2)] #child 0 kAryTree.root.child+=[Node(3)] #child 1 kAryTree.root.child+=[Node(5)] #child 2 kAryTree.root.child[0].child+=[Node(5)] #child[0,0] fizz_Buzz_Tree(kAryTree) print(kAryTree.root.child[0].value) # 2 -> 2 print(kAryTree.root.child[1].value) # 3 -> Fizz print(kAryTree.root.child[0].child[0].value) # 5 -> Buzz
[ "hamza.qah@gmail.com" ]
hamza.qah@gmail.com
7a4a7b2829526271df0ee298213f785025e1cafc
a473fdce56e422137f0f14514081bf7c10e9aa90
/source/data_processing.py
f58589cc42ed19b0dfa0cd0c580dc9171fd81ec2
[]
no_license
mrandic/Bike-Rental-Case
bc264d02d115db178ff35d67b0c98d4644dbf954
79d29a5a5a1a914e5936e688d2b1ed850373b301
refs/heads/main
2023-08-15T02:10:52.483478
2021-09-21T23:34:44
2021-09-21T23:34:44
409,000,045
0
0
null
null
null
null
UTF-8
Python
false
false
9,767
py
import pandas as pd import numpy as np from dateutil.parser import parse def processHubwayTripsData(hubway_trips_df): """ Create initial features from hubway trips data :param hubway_trips_df: Hubway trips data :return: Feature engineered dataframe """ hubway_trips_df['start_date'] = hubway_trips_df['start_date'].apply(lambda x: parse(x)) hubway_trips_df['year_start'] = hubway_trips_df['start_date'].apply(lambda x: x.year) hubway_trips_df['month_start'] = hubway_trips_df['start_date'].apply(lambda x: x.month) hubway_trips_df['weekday_start'] = hubway_trips_df['start_date'].apply(lambda x: x.dayofweek) hubway_trips_df['day_start'] = hubway_trips_df['start_date'].apply(lambda x: x.day) hubway_trips_df['hour_start'] = hubway_trips_df['start_date'].apply(lambda x: x.hour) hubway_trips_df = hubway_trips_df.rename(columns={'status': 'trip_status'}) return hubway_trips_df def mapFrequentPostalCodeToGPSData(): """ Manually map approximate postal code GPS locations obtained from OpenStreetMap service :return: Feature engineered dataframe """ dict = {'zip_code': ["'02118", "'02139", "'02215", "'02116", "'02115", "'02138", "'02114", "'02143", "'02113", "'02134" ], 'zip_code_lat': [42.3407, 42.3643, 42.3476, 42.3514, 42.3480, 42.34733, 42.36033, 42.38371, 42.36285, 42.35595 ], 'zip_code_lng': [-71.0708, -71.1022, -71.1009, -71.0776, -71.0885, -71.16867, -71.06732, -71.10213, -71.05518, -71.13411 ] } return pd.DataFrame(data=dict) def createMasterDataSet(hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df): """ Create master dataset from all available datasets :param hubway_trips_df: Hubway trips data :param hubway_stations_df: Hubway stations data :param weather_df: Weather data for Boston (additional added data source) :param zip_code_gps_df: ZIp code GPS locations (additional added data source) :return: Master dataset """ hubway_trips_df = processHubwayTripsData(hubway_trips_df) master_df = pd.merge(hubway_trips_df, hubway_stations_df, how='left', left_on='strt_statn', right_on='id') master_df = master_df.rename(columns={'id': 'id_start', 'terminal': 'terminal_start', 'station': 'station_start', 'municipal': 'municipal_start', 'lat': 'lat_start', 'lng': 'lng_start', 'status': 'status_start'}) master_df = pd.merge(master_df, hubway_stations_df, how='left', left_on='end_statn', right_on='id') master_df = master_df.rename( columns={'id': 'id_end', 'terminal': 'terminal_end', 'station': 'station_end', 'municipal': 'municipal_end', 'lat': 'lat_end', 'lng': 'lng_end', 'status': 'status_end'}) master_df = pd.merge(master_df, weather_df, how='left', left_on=['year_start', 'month_start', 'day_start'], right_on=['Year', 'Month', 'Day']) master_df = pd.merge(master_df, zip_code_gps_df, how='left', left_on=['zip_code'], right_on=['zip_code']) return master_df def importData(): """ Imports all datasets into working memory using pandas :return: Pandas dataframes for further analysis """ hubway_stations_df = pd.read_csv('hubway_stations.csv', sep=',').sort_values(['station'], ascending=True) hubway_trips_df = pd.read_csv('hubway_trips.csv', sep=',') weather_df = pd.read_csv('boston_weather.csv', sep=',') zip_code_gps_df = mapFrequentPostalCodeToGPSData() return hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df def createFeatures(master_df): """ Create initial set of features to be used in the project :param master_df: Master dataframe :return: Master dataframe """ # flag whether user has started and finished bike ride on the same station master_df['same_st_flg'] = np.where(master_df['strt_statn'] == master_df['end_statn'], 1, 0) # age feature master_df['age'] = master_df[(master_df['subsc_type'] == 'Registered')]['year_start'] - \ master_df[(master_df['subsc_type'] == 'Registered')]['birth_date'] # Binned Visibility feature bins = [0, 2, 4, 6, 8, np.inf] names = ['0-2', '2-4', '4-6', '6-8', '8+'] master_df['Avg Visibility Range (mi)'] = pd.cut(master_df['Avg Visibility (mi)'], bins, labels=names) # Binned Temperature feature bins = [20, 40, 60, 80, np.inf] names = ['20-40', '40-60', '60-80', '80+'] master_df['Avg Temp Range (F)'] = pd.cut(master_df['Avg Temp (F)'], bins, labels=names) # Binned Humidity feature bins = [20, 40, 60, 80, np.inf] names = ['20-40', '40-60', '60-80', '80+'] master_df['Avg Humidity Range (%)'] = pd.cut(master_df['Avg Humidity (%)'], bins, labels=names) # Binned Wind Range feature bins = [0, 5, 10, 15, np.inf] names = ['0-5', '5-10', '10-15', '15+'] master_df['Avg Wind Range (mph)'] = pd.cut(master_df['Avg Wind (mph)'], bins, labels=names) # Binned Dew Point feature bins = [0, 20, 40, 60, np.inf] names = ['0-20', '20-40', '40-60', '60+'] master_df['Avg Dew Point Range (F)'] = pd.cut(master_df['Avg Dew Point (F)'], bins, labels=names) # Binned Age feature bins = [0, 20, 40, 60, np.inf] names = ['0-20', '20-40', '40-60', '60+'] master_df['Age Range'] = pd.cut(master_df[(master_df['subsc_type'] == 'Registered')]['age'], bins, labels=names) bike_agg = master_df[['bike_nr', 'seq_id', 'duration']].groupby(by=['bike_nr']).agg( bike_use_cnt=('seq_id', 'count'), bike_ride_duration_avg=('duration', 'mean')).sort_values(["bike_use_cnt"], ascending=( False)).reset_index() master_df = pd.merge(master_df, bike_agg, how='left', left_on=['bike_nr'], right_on=['bike_nr']) # Binned bike use frequency range bins = [0, 500, 1000, 1500, np.inf] names = ['0-500', '500-1000', '1000-1500', '1500+'] master_df['Bike Use Range'] = pd.cut(master_df['bike_use_cnt'], bins, labels=names) # Binned bike time usage range bins = [500, 1000, 1500, np.inf] names = ['500-1000', '1000-1500', '1500+'] master_df['Bike Avg Time Use Range'] = pd.cut(master_df['bike_ride_duration_avg'], bins, labels=names) # Clear dataset from outliers (durations above 3000s) master_df = master_df[(master_df["duration"] > 0) & (master_df["duration"] <= 3000)] return master_df def renameColumns(feature_set): """ Rename columns to standardized style :param feature_set: Feature dataframe :return: Feature dataframe with renamed columns """ feature_set = feature_set.rename( columns={'lat_start': 'latitude', 'lng_start': 'longitude', 'year_start': 'year', 'month_start': 'month', 'weekday_start': 'weekday', 'day_start': 'day', 'hour_start': 'hour', 'municipal_start': 'staton_municipality', 'status_start': 'station_status', 'Bike Use Range': 'bike_freq_use_range', 'Bike Avg Time Use Range': 'bike_avg_dur_range', 'Avg Temp (F)': 'avg_tmp_f', 'Avg Dew Point (F)': 'avg_dew_point_f', 'Avg Humidity (%)': 'avg_humidity_pct', 'Avg Sea Level Press (in)': 'avg_sea_level_press_in', 'Avg Visibility (mi)': 'avg_visibility_mi', 'Avg Wind (mph)': 'avg_wind_mph', 'Snowfall (in)': 'sbowfall_in', 'Precip (in)': 'precip_in', 'Events': 'weather_event' }) return feature_set def featureSubset(master_df): """ Create initial feature subset The rest of the variables are excluded after being proven to provide weak influence on variable importance while building the model. :param master_df: Master dataframe :return: Master dataframe with filtered columns """ feature_set = master_df[[ 'municipal_start', 'lat_start', 'lng_start', 'status_start', 'trip_status', 'year_start', 'month_start', 'weekday_start', 'day_start', 'hour_start', 'subsc_type', 'zip_code', 'gender', 'age', 'Bike Use Range', 'Bike Avg Time Use Range', 'Avg Temp (F)', 'Avg Dew Point (F)', 'Avg Humidity (%)', 'Avg Sea Level Press (in)', 'Avg Visibility (mi)', 'Avg Wind (mph)', 'Snowfall (in)', 'Precip (in)', 'Events', 'duration' ]] return feature_set def setFeatureCategoryType(feature_set): """ Cast feature data type to a category type This is needed for proper One Hot Encoding process :param feature_set: Feature dataframe :return: Feature dataframe with column types set as categorized """ feature_set["bike_freq_use_range"] = feature_set["bike_freq_use_range"].astype('category') feature_set["bike_avg_dur_range"] = feature_set["bike_avg_dur_range"].astype('category') feature_set["staton_municipality"] = feature_set["staton_municipality"].astype('category') feature_set["station_status"] = feature_set["station_status"].astype('category') feature_set["trip_status"] = feature_set["trip_status"].astype('category') feature_set["subsc_type"] = feature_set["subsc_type"].astype('category') feature_set["zip_code"] = feature_set["zip_code"].astype('category') feature_set["gender"] = feature_set["gender"].astype('category') feature_set["weather_event"] = feature_set["weather_event"].astype('category') return feature_set
[ "milos.randic@telenor.no" ]
milos.randic@telenor.no