blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
c9d87460c9daf44323f8c8e853dd25cd21cb8670
35b96d09ff3b74e7f05cc0085dde129456d70ad9
/tornado/Day5/tornado_sqlalchemy.py
10d41141ef829b75b22f49e22e4892636e6990f9
[]
no_license
yanghongfei/Python
ef0e54f98bc390ffd908d27f2ed306952b3bba46
f1103754e2752d38bcfd4357aa4b1a2318b33e31
refs/heads/master
2020-07-01T20:06:52.870910
2018-11-01T09:15:34
2018-11-01T09:15:34
74,260,335
0
1
null
null
null
null
UTF-8
Python
false
false
1,238
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/9/30 10:49 # @Author : Fred Yang # @File : tornado_sqlalchemy.py # @Role : Sqlalchemy 增删改查 # 导入 from sqlalchemy import Column, String, create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from settings import DB_INFO HOST=DB_INFO['host'] USER=DB_INFO['user'] PORT=DB_INFO['port'] PASSWD=DB_INFO['password'] DB_NAME= DB_INFO['db_name'] # 创建对象的基类: Base = declarative_base() #定义User对象: class User(Base): # 表的名字: __tablename__ = 'user' # 表的结构: id = Column(String(100), primary_key=True) name = Column(String(200)) class Weibo(Base): __tablename__ = 'weibo' id = Column(String(100), primary_key=True) username = Column(String(100)) #用户名 content = Column(String(1000)) #内容 # 初始化数据库连接: engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(USER, PASSWD, HOST, PORT, DB_NAME)) #print(engine) # 创建DBSession类型: DBSession = sessionmaker(bind=engine) Base.metadata.create_all(engine) #创建表的语句 第一次使用
[ "yanghongfei@shinezone.com" ]
yanghongfei@shinezone.com
03cc688115e56b3caacc8b1bcb0a2acf97cca126
89eec81430daea547822c26cf637bcd9db5e57ad
/pols/migrations/0005_question_number.py
4d1e7c4c89dfaeeeb276ec140daf28cdb8c5dd7a
[]
no_license
sanlem/teston
5bd2f01ef4dc4f3cfef8189d6ea259af78fe4388
89c21ea745b1b517c589caf5688c7a856548d904
refs/heads/master
2020-12-11T22:17:23.943699
2015-06-18T15:44:08
2015-06-18T15:44:08
36,315,768
0
0
null
null
null
null
UTF-8
Python
false
false
398
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('pols', '0004_auto_20150525_1549'), ] operations = [ migrations.AddField( model_name='question', name='number', field=models.IntegerField(default=1), ), ]
[ "vfranchook@gmail.com" ]
vfranchook@gmail.com
df199d45bbed5b2eb57ed382acd03991cfdeaff4
1ce4c43907ec04f1e797c317871a23bcec46c3c8
/scripts/utilities/toggle_spout_leds.py
85bc4fc4c907348d12eef83f0aa5ecb56a414877
[]
no_license
m-col/reach
d9709593e1f0ec25786a4c4e601b14b26419ce96
8fabb4ce30ddb39260039ebea2d46a919dfbba14
refs/heads/master
2022-05-31T08:51:31.270970
2022-05-21T19:46:12
2022-05-21T19:46:12
169,552,311
1
2
null
2021-06-25T14:18:21
2019-02-07T10:12:30
Python
UTF-8
Python
false
false
155
py
#!/usr/bin/env python3 """ Toggle the LEDs. """ from reach.backends.raspberrypi import Utilities rpi = Utilities() rpi.toggle_spout_leds() rpi.cleanup()
[ "mcol@posteo.net" ]
mcol@posteo.net
d0fae8b7c4d33afb588c1fd017fe389b750b6135
547ac7b09add2e24146f59fa4377188cd59419fb
/reprozip/pack/vt_workflow/workflow_utils.py
316ba09a4b52958588d151e1ded15d6b8c4f1937
[ "BSD-3-Clause" ]
permissive
fchirigati/reprozip
44b274fec6d9558a97c85e7eb0678730702ccfe0
fb7b4e18a6938fdb10b6fe8e0fcd042ce4547375
refs/heads/master
2020-05-18T05:47:17.156691
2018-06-19T22:39:27
2018-06-19T22:39:27
10,867,693
0
2
null
null
null
null
UTF-8
Python
false
false
10,245
py
############################################################################### ## ## Copyright (C) 2012-2013, NYU-Poly. ## All rights reserved. ## Contact: fchirigati@nyu.edu ## ## This file is part of ReproZip. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of NYU-Poly nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### class WfObject: """ WfObject represents a VisTrails workflow object. """ def __init__(self): """ Init method for WfObject """ self.__sub_element = None def get_sub_element(self): return self.__sub_element def set_sub_element(self, value): self.__sub_element = value sub_element = property(get_sub_element, set_sub_element, None, None) class Module(WfObject): """ Module represents a module in the VisTrails workflow. """ def __init__(self, id, cache, name, namespace, package, version): """ Init method for Module. -> id is the unique id of the object; -> cache indicates whether the module is cacheable or not; -> name is the name of the module; -> namespace is the namespace of the module; -> package is the package that contains the module; -> version is the version of the package """ WfObject.__init__(self) self.__id = id self.__cache = cache self.__name = name self.__namespace = namespace self.__package = package self.__version = version def get_id(self): return self.__id def get_cache(self): return self.__cache def get_name(self): return self.__name def get_namespace(self): return self.__namespace def get_package(self): return self.__package def get_version(self): return self.__version id = property(get_id, None, None, None) cache = property(get_cache, None, None, None) name = property(get_name, None, None, None) namespace = property(get_namespace, None, None, None) package = property(get_package, None, None, None) version = property(get_version, None, None, None) class Annotation(WfObject): """ Annotation represents an annotation in an object of the VisTrails workflow. """ def __init__(self, id, wf_object, key, value): """ Init method for Annotation. -> id is the unique id of the annotation; -> wf_object is the object from the workflow with which the annotation is associated; -> key is the key of the annotation; -> value is the value of the annotation """ WfObject.__init__(self) self.__id = id self.__wf_object = wf_object self.__key = key self.__value = value def get_id(self): return self.__id def get_wf_object(self): return self.__wf_object def get_key(self): return self.__key def get_value(self): return self.__value id = property(get_id, None, None, None) wf_object = property(get_wf_object, None, None, None) key = property(get_key, None, None, None) value = property(get_value, None, None, None) class Location(WfObject): """ Location represents the location of a VisTrails module. """ def __init__(self, id, module, x, y): """ Init method for Location. -> id is the unique id of the object; -> module is the module with which the location is associated; -> x is the position in the x axis; -> y is the position in the y axis """ WfObject.__init__(self) self.__id = id self.__module = module self.__x = x self.__y = y def get_id(self): return self.__id def get_module(self): return self.__module def get_x(self): return self.__x def get_y(self): return self.__y id = property(get_id, None, None, None) module = property(get_module, None, None, None) x = property(get_x, None, None, None) y = property(get_y, None, None, None) class Function(WfObject): """ Function represents a function of a VisTrails module. """ def __init__(self, id, module, name, pos): """ Init method for Function. -> id is the unique id of the object; -> module is the module with which the function is associated; -> name is the name of the function; -> pos is... well, pos :-) """ WfObject.__init__(self) self.__id = id self.__module = module self.__name = name self.__pos = pos def get_id(self): return self.__id def get_module(self): return self.__module def get_name(self): return self.__name def get_pos(self): return self.__pos id = property(get_id, None, None, None) module = property(get_module, None, None, None) name = property(get_name, None, None, None) pos = property(get_pos, None, None, None) class Parameter(WfObject): """ Parameter represents the parameter for a function in a VisTrails workflow. """ def __init__(self, id, function, alias, name, pos, type, value): """ Init method for Parameter. -> id is the unique id of the object; -> function is the function with which the parameter is associated; -> alias is an alias for the parameter; -> name is the name of the parameter; -> pos is, well... pos :-) -> type represents the type of the parameter; -> value is the value of the parameter, respecting the type """ WfObject.__init__(self) self.__id = id self.__function = function self.__alias = alias self.__name = name self.__pos = pos self.__type = type self.__value = value def get_id(self): return self.__id def get_function(self): return self.__function def get_alias(self): return self.__alias def get_name(self): return self.__name def get_pos(self): return self.__pos def get_type(self): return self.__type def get_value(self): return self.__value id = property(get_id, None, None, None) function = property(get_function, None, None, None) alias = property(get_alias, None, None, None) name = property(get_name, None, None, None) pos = property(get_pos, None, None, None) type = property(get_type, None, None, None) value = property(get_value, None, None, None) class Connection(WfObject): """ Connection represents a connection in a VisTrails workflow. """ def __init__(self, id, source, dst): """ Init method for Connection. -> id is the unique id of the object; -> source is the source port of the connection; -> dst is the destination port of the connection """ WfObject.__init__(self) self.__id = id self.__source = source self.__dst = dst def get_id(self): return self.__id def get_source(self): return self.__source def get_dst(self): return self.__dst id = property(get_id, None, None, None) source = property(get_source, None, None, None) dst = property(get_dst, None, None, None) class Port(WfObject): """ Port represents a port in a VisTrails connection. """ def __init__(self, id, module, name, signature): """ Init method for Port. -> id is the unique id of the object; -> module is the module with which the port is associated; -> name is the name of the port; -> signature is the signature of the port """ WfObject.__init__(self) self.__id = id self.__module = module self.__name = name self.__signature = signature def get_id(self): return self.__id def get_module(self): return self.__module def get_name(self): return self.__name def get_signature(self): return self.__signature id = property(get_id, None, None, None) module = property(get_module, None, None, None) name = property(get_name, None, None, None) signature = property(get_signature, None, None, None)
[ "fernando.chirigati@gmail.com" ]
fernando.chirigati@gmail.com
c0d41c468fe46eae60304a6e4170b7feb432e5cd
973eed0d6173ab95b3cea9508bd20516ef84a56d
/services/gmaps.py
3a4b1b96e5fbcc5a7452e5331952955d404e7893
[ "Apache-2.0" ]
permissive
FenrirUnbound/kessel-run
213a71d94b74a518a6a92b3fb5929e1ae0e71997
0b39ec4aead0ee1397f46a0893166c433fe4f85b
refs/heads/master
2020-12-07T15:24:13.924077
2017-07-02T02:56:58
2017-07-02T02:56:58
95,517,943
0
0
null
2017-07-02T02:56:59
2017-06-27T04:36:54
Python
UTF-8
Python
false
false
774
py
import googlemaps import time from map_formatter import MapFormatter from models.route import Route from models.secret import Secret class Gmaps(object): def __init__(self): self.gmaps = googlemaps.Client(key=Secret.token()) self.route_data = Route() self.formatter = MapFormatter() def lookup_travel_time(self, route_id): desired_route = self.route_data.get(route_id) now = int(time.time()) map_data = self.gmaps.directions( alternatives=True, departure_time=now, destination=desired_route['destination'], mode='driving', origin=desired_route['origin'], units='imperial' ) return self.formatter.format(content=map_data)
[ "aeneascorrupt@gmail.com" ]
aeneascorrupt@gmail.com
616c58d38ba1341e31e3110eb28caf617d8d0c51
7f045311cf07d86c21c3e32649a0d3343351d7b5
/sky/c3.py
d661310b92f5e4c9a50397604a8e3141cb7587a1
[ "BSD-2-Clause" ]
permissive
amititash/sky
2c757ec572902f12a6a550597044101d6e7cdbc1
ffaf33e46825522bb87654593a0ca77c095c98b0
refs/heads/master
2020-04-14T22:50:55.264011
2019-02-06T05:11:22
2019-02-06T05:11:22
164,180,289
0
0
NOASSERTION
2019-01-05T04:02:40
2019-01-05T04:02:40
null
UTF-8
Python
false
false
2,190
py
#!/usr/bin/env python3 # --------- 1. Setup ---------------------------------------------- PROJECT_NAME = 'sophonone' import os, base64, re, logging from elasticsearch import Elasticsearch from sky.crawler_services import CrawlElasticSearchService from sky.crawler_plugins import CrawlElasticSearchPluginNews import json, sys import json import pika ''' # Parse the auth and host from env: bonsai = 'https://5bgygw52r4:637c8qay66@cj-test-9194042377.us-west-2.bonsaisearch.net' #os.environ['BONSAI_URL'] auth = re.search('https\:\/\/(.*)\@', bonsai).group(1).split(':') host = bonsai.replace('https://%s:%s@' % (auth[0], auth[1]), '') # Connect to cluster over SSL using auth for best security: es_header = [{ 'host': host, 'port': 443, 'use_ssl': True, 'http_auth': (auth[0],auth[1]) }] es = Elasticsearch(es_header) ''' es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}]) # Instantiate the new Elasticsearch connection: cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews) connection = pika.BlockingConnection(pika.URLParameters('amqp://titash:test123@54.175.53.47/paays_products_cj')) channel = connection.channel() channel.queue_declare(queue='crawl') #code starts here def goCrawl(ch, method, properties, msg): item = msg.decode('utf8') item = json.loads(item) print(item) #es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}]) # Instantiate the new Elasticsearch connection: #cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews) # --------- 4. Start crawling -------------------------------------- #from sky.configs import PRODUCTION_CRAWL_CONFIG #default = cs.get_crawl_plugin('default') #default.save_config(PRODUCTION_CRAWL_CONFIG) print("****crawling...",item["sku"]) #one_config = json.load(item) #configname = item['sku'] four = cs['testcrawl'] four.save_config(item) four.run() #Execution starts from here channel.basic_consume(goCrawl, queue='crawl', no_ack=True) print(' [*] Waiting for messages. To exit press CTRL+C') channel.start_consuming()
[ "amititash@gmail.com" ]
amititash@gmail.com
1f778b04e332c6fb1e5a8be955cd628bea529f50
36c546160a70228e28f216e841453a55a4b665bb
/cli_common.py
32a2ebcb79b2c99a1bb4fc6b64bbe49c2839a7ee
[]
no_license
tpietruszka/ulmfit_experiments
b4718df389478a12d920f72cdca476797d4397fc
9385cd7d4285f93a2f220bc9fd5095051879a49a
refs/heads/master
2020-04-21T18:18:01.633887
2020-04-07T17:56:19
2020-04-07T17:56:19
169,764,201
0
0
null
null
null
null
UTF-8
Python
false
false
416
py
import pathlib import os import sys os.environ['QT_QPA_PLATFORM'] = 'offscreen' # prevents some fastai imports from causing a crash try: from ulmfit_experiments import experiments except ModuleNotFoundError: sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from ulmfit_experiments import experiments results_dir = (pathlib.Path(__file__).parent / 'trained_models').resolve()
[ "tomek.pietruszka@gmail.com" ]
tomek.pietruszka@gmail.com
57c22cd8876ae6bdf928f7d58919d905f86c43a5
d343b6f47b9241f3822845c6627b82c9f98b95c4
/core/apps.py
ab6efcf7bc66b12c7822555e95a5755b283f13ad
[]
no_license
ivan371/kiber
11f23171bd51b29d210c44db0784b6caea31bdd6
39d7834c5e4e5497061748bd66232936300adda4
refs/heads/master
2021-05-05T06:30:55.836149
2020-06-16T20:37:59
2020-06-16T20:37:59
118,800,832
0
0
null
2020-06-16T20:38:00
2018-01-24T17:51:29
JavaScript
UTF-8
Python
false
false
144
py
from django.apps import AppConfig class CoreConfig(AppConfig): name = 'core' def ready(self): from .views import UserViewSet
[ "ivan@DESKTOP-GM6Q430.localdomain" ]
ivan@DESKTOP-GM6Q430.localdomain
e79dca9531ee613ea930b7be4c7871b1eac88c18
d608c2b9fbfcd142fa82875f01f70e1db95cecef
/FlaskAppVenv/Lib/site-packages/pymysql/tests/test_connection.py
c626a0d39468fc0249dbdd719881a28872564b48
[ "MIT" ]
permissive
nidheekamble/SponsCentral
9b30918006b98f242de86920a550f8e072ba093f
b8189993cb87cc2d83e36c9d72df7a3b7d620bd7
refs/heads/master
2022-12-21T11:14:36.565494
2021-01-31T16:15:33
2021-01-31T16:15:33
135,418,522
1
2
MIT
2022-12-08T07:57:59
2018-05-30T09:16:30
Python
UTF-8
Python
false
false
24,709
py
import datetime import sys import time import unittest2 import pymysql from pymysql.tests import base from pymysql._compat import text_type from pymysql.constants import CLIENT class TempUser: def __init__(self, c, user, db, auth=None, authdata=None, password=None): self._c = c self._user = user self._db = db create = "CREATE USER " + user if password is not None: create += " IDENTIFIED BY '%s'" % password elif auth is not None: create += " IDENTIFIED WITH %s" % auth if authdata is not None: create += " AS '%s'" % authdata try: c.execute(create) self._created = True except pymysql.err.InternalError: # already exists - TODO need to check the same plugin applies self._created = False try: c.execute("GRANT SELECT ON %s.* TO %s" % (db, user)) self._grant = True except pymysql.err.InternalError: self._grant = False def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if self._grant: self._c.execute("REVOKE SELECT ON %s.* FROM %s" % (self._db, self._user)) if self._created: self._c.execute("DROP USER %s" % self._user) class TestAuthentication(base.PyMySQLTestCase): socket_auth = False socket_found = False two_questions_found = False three_attempts_found = False pam_found = False mysql_old_password_found = False sha256_password_found = False import os osuser = os.environ.get('USER') # socket auth requires the current user and for the connection to be a socket # rest do grants @localhost due to incomplete logic - TODO change to @% then db = base.PyMySQLTestCase.databases[0].copy() socket_auth = db.get('unix_socket') is not None \ and db.get('host') in ('localhost', '127.0.0.1') cur = pymysql.connect(**db).cursor() del db['user'] cur.execute("SHOW PLUGINS") for r in cur: if (r[1], r[2]) != (u'ACTIVE', u'AUTHENTICATION'): continue if r[3] == u'auth_socket.so': socket_plugin_name = r[0] socket_found = True elif r[3] == u'dialog_examples.so': if r[0] == 'two_questions': two_questions_found = True elif r[0] == 'three_attempts': three_attempts_found = True elif r[0] == u'pam': pam_found = True pam_plugin_name = r[3].split('.')[0] if pam_plugin_name == 'auth_pam': pam_plugin_name = 'pam' # MySQL: authentication_pam # https://dev.mysql.com/doc/refman/5.5/en/pam-authentication-plugin.html # MariaDB: pam # https://mariadb.com/kb/en/mariadb/pam-authentication-plugin/ # Names differ but functionality is close elif r[0] == u'mysql_old_password': mysql_old_password_found = True elif r[0] == u'sha256_password': sha256_password_found = True #else: # print("plugin: %r" % r[0]) def test_plugin(self): if not self.mysql_server_is(self.connections[0], (5, 5, 0)): raise unittest2.SkipTest("MySQL-5.5 required for plugins") cur = self.connections[0].cursor() cur.execute("select plugin from mysql.user where concat(user, '@', host)=current_user()") for r in cur: self.assertIn(self.connections[0]._auth_plugin_name, (r[0], 'mysql_native_password')) @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipIf(socket_found, "socket plugin already installed") def testSocketAuthInstallPlugin(self): # needs plugin. lets install it. cur = self.connections[0].cursor() try: cur.execute("install plugin auth_socket soname 'auth_socket.so'") TestAuthentication.socket_found = True self.socket_plugin_name = 'auth_socket' self.realtestSocketAuth() except pymysql.err.InternalError: try: cur.execute("install soname 'auth_socket'") TestAuthentication.socket_found = True self.socket_plugin_name = 'unix_socket' self.realtestSocketAuth() except pymysql.err.InternalError: TestAuthentication.socket_found = False raise unittest2.SkipTest('we couldn\'t install the socket plugin') finally: if TestAuthentication.socket_found: cur.execute("uninstall plugin %s" % self.socket_plugin_name) @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(socket_found, "no socket plugin") def testSocketAuth(self): self.realtestSocketAuth() def realtestSocketAuth(self): with TempUser(self.connections[0].cursor(), TestAuthentication.osuser + '@localhost', self.databases[0]['db'], self.socket_plugin_name) as u: c = pymysql.connect(user=TestAuthentication.osuser, **self.db) class Dialog(object): fail=False def __init__(self, con): self.fail=TestAuthentication.Dialog.fail pass def prompt(self, echo, prompt): if self.fail: self.fail=False return b'bad guess at a password' return self.m.get(prompt) class DialogHandler(object): def __init__(self, con): self.con=con def authenticate(self, pkt): while True: flag = pkt.read_uint8() echo = (flag & 0x06) == 0x02 last = (flag & 0x01) == 0x01 prompt = pkt.read_all() if prompt == b'Password, please:': self.con.write_packet(b'stillnotverysecret\0') else: self.con.write_packet(b'no idea what to do with this prompt\0') pkt = self.con._read_packet() pkt.check_error() if pkt.is_ok_packet() or last: break return pkt class DefectiveHandler(object): def __init__(self, con): self.con=con @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipIf(two_questions_found, "two_questions plugin already installed") def testDialogAuthTwoQuestionsInstallPlugin(self): # needs plugin. lets install it. cur = self.connections[0].cursor() try: cur.execute("install plugin two_questions soname 'dialog_examples.so'") TestAuthentication.two_questions_found = True self.realTestDialogAuthTwoQuestions() except pymysql.err.InternalError: raise unittest2.SkipTest('we couldn\'t install the two_questions plugin') finally: if TestAuthentication.two_questions_found: cur.execute("uninstall plugin two_questions") @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(two_questions_found, "no two questions auth plugin") def testDialogAuthTwoQuestions(self): self.realTestDialogAuthTwoQuestions() def realTestDialogAuthTwoQuestions(self): TestAuthentication.Dialog.fail=False TestAuthentication.Dialog.m = {b'Password, please:': b'notverysecret', b'Are you sure ?': b'yes, of course'} with TempUser(self.connections[0].cursor(), 'pymysql_2q@localhost', self.databases[0]['db'], 'two_questions', 'notverysecret') as u: with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_2q', **self.db) pymysql.connect(user='pymysql_2q', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db) @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipIf(three_attempts_found, "three_attempts plugin already installed") def testDialogAuthThreeAttemptsQuestionsInstallPlugin(self): # needs plugin. lets install it. cur = self.connections[0].cursor() try: cur.execute("install plugin three_attempts soname 'dialog_examples.so'") TestAuthentication.three_attempts_found = True self.realTestDialogAuthThreeAttempts() except pymysql.err.InternalError: raise unittest2.SkipTest('we couldn\'t install the three_attempts plugin') finally: if TestAuthentication.three_attempts_found: cur.execute("uninstall plugin three_attempts") @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(three_attempts_found, "no three attempts plugin") def testDialogAuthThreeAttempts(self): self.realTestDialogAuthThreeAttempts() def realTestDialogAuthThreeAttempts(self): TestAuthentication.Dialog.m = {b'Password, please:': b'stillnotverysecret'} TestAuthentication.Dialog.fail=True # fail just once. We've got three attempts after all with TempUser(self.connections[0].cursor(), 'pymysql_3a@localhost', self.databases[0]['db'], 'three_attempts', 'stillnotverysecret') as u: pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db) pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DialogHandler}, **self.db) with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': object}, **self.db) with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DefectiveHandler}, **self.db) with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_3a', auth_plugin_map={b'notdialogplugin': TestAuthentication.Dialog}, **self.db) TestAuthentication.Dialog.m = {b'Password, please:': b'I do not know'} with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db) TestAuthentication.Dialog.m = {b'Password, please:': None} with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db) @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipIf(pam_found, "pam plugin already installed") @unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required") @unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required") def testPamAuthInstallPlugin(self): # needs plugin. lets install it. cur = self.connections[0].cursor() try: cur.execute("install plugin pam soname 'auth_pam.so'") TestAuthentication.pam_found = True self.realTestPamAuth() except pymysql.err.InternalError: raise unittest2.SkipTest('we couldn\'t install the auth_pam plugin') finally: if TestAuthentication.pam_found: cur.execute("uninstall plugin pam") @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(pam_found, "no pam plugin") @unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required") @unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required") def testPamAuth(self): self.realTestPamAuth() def realTestPamAuth(self): db = self.db.copy() import os db['password'] = os.environ.get('PASSWORD') cur = self.connections[0].cursor() try: cur.execute('show grants for ' + TestAuthentication.osuser + '@localhost') grants = cur.fetchone()[0] cur.execute('drop user ' + TestAuthentication.osuser + '@localhost') except pymysql.OperationalError as e: # assuming the user doesn't exist which is ok too self.assertEqual(1045, e.args[0]) grants = None with TempUser(cur, TestAuthentication.osuser + '@localhost', self.databases[0]['db'], 'pam', os.environ.get('PAMSERVICE')) as u: try: c = pymysql.connect(user=TestAuthentication.osuser, **db) db['password'] = 'very bad guess at password' with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user=TestAuthentication.osuser, auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler}, **self.db) except pymysql.OperationalError as e: self.assertEqual(1045, e.args[0]) # we had 'bad guess at password' work with pam. Well at least we get a permission denied here with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user=TestAuthentication.osuser, auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler}, **self.db) if grants: # recreate the user cur.execute(grants) # select old_password("crummy p\tassword"); #| old_password("crummy p\tassword") | #| 2a01785203b08770 | @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(mysql_old_password_found, "no mysql_old_password plugin") def testMySQLOldPasswordAuth(self): if self.mysql_server_is(self.connections[0], (5, 7, 0)): raise unittest2.SkipTest('Old passwords aren\'t supported in 5.7') # pymysql.err.OperationalError: (1045, "Access denied for user 'old_pass_user'@'localhost' (using password: YES)") # from login in MySQL-5.6 if self.mysql_server_is(self.connections[0], (5, 6, 0)): raise unittest2.SkipTest('Old passwords don\'t authenticate in 5.6') db = self.db.copy() db['password'] = "crummy p\tassword" with self.connections[0] as c: # deprecated in 5.6 if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)): with self.assertWarns(pymysql.err.Warning) as cm: c.execute("SELECT OLD_PASSWORD('%s')" % db['password']) else: c.execute("SELECT OLD_PASSWORD('%s')" % db['password']) v = c.fetchone()[0] self.assertEqual(v, '2a01785203b08770') # only works in MariaDB and MySQL-5.6 - can't separate out by version #if self.mysql_server_is(self.connections[0], (5, 5, 0)): # with TempUser(c, 'old_pass_user@localhost', # self.databases[0]['db'], 'mysql_old_password', '2a01785203b08770') as u: # cur = pymysql.connect(user='old_pass_user', **db).cursor() # cur.execute("SELECT VERSION()") c.execute("SELECT @@secure_auth") secure_auth_setting = c.fetchone()[0] c.execute('set old_passwords=1') # pymysql.err.Warning: 'pre-4.1 password hash' is deprecated and will be removed in a future release. Please use post-4.1 password hash instead if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)): with self.assertWarns(pymysql.err.Warning) as cm: c.execute('set global secure_auth=0') else: c.execute('set global secure_auth=0') with TempUser(c, 'old_pass_user@localhost', self.databases[0]['db'], password=db['password']) as u: cur = pymysql.connect(user='old_pass_user', **db).cursor() cur.execute("SELECT VERSION()") c.execute('set global secure_auth=%r' % secure_auth_setting) @unittest2.skipUnless(socket_auth, "connection to unix_socket required") @unittest2.skipUnless(sha256_password_found, "no sha256 password authentication plugin found") def testAuthSHA256(self): c = self.connections[0].cursor() with TempUser(c, 'pymysql_sha256@localhost', self.databases[0]['db'], 'sha256_password') as u: if self.mysql_server_is(self.connections[0], (5, 7, 0)): c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' ='Sh@256Pa33'") else: c.execute('SET old_passwords = 2') c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' = PASSWORD('Sh@256Pa33')") db = self.db.copy() db['password'] = "Sh@256Pa33" # not implemented yet so thows error with self.assertRaises(pymysql.err.OperationalError): pymysql.connect(user='pymysql_256', **db) class TestConnection(base.PyMySQLTestCase): def test_utf8mb4(self): """This test requires MySQL >= 5.5""" arg = self.databases[0].copy() arg['charset'] = 'utf8mb4' conn = pymysql.connect(**arg) def test_largedata(self): """Large query and response (>=16MB)""" cur = self.connections[0].cursor() cur.execute("SELECT @@max_allowed_packet") if cur.fetchone()[0] < 16*1024*1024 + 10: print("Set max_allowed_packet to bigger than 17MB") return t = 'a' * (16*1024*1024) cur.execute("SELECT '" + t + "'") assert cur.fetchone()[0] == t def test_autocommit(self): con = self.connections[0] self.assertFalse(con.get_autocommit()) cur = con.cursor() cur.execute("SET AUTOCOMMIT=1") self.assertTrue(con.get_autocommit()) con.autocommit(False) self.assertFalse(con.get_autocommit()) cur.execute("SELECT @@AUTOCOMMIT") self.assertEqual(cur.fetchone()[0], 0) def test_select_db(self): con = self.connections[0] current_db = self.databases[0]['db'] other_db = self.databases[1]['db'] cur = con.cursor() cur.execute('SELECT database()') self.assertEqual(cur.fetchone()[0], current_db) con.select_db(other_db) cur.execute('SELECT database()') self.assertEqual(cur.fetchone()[0], other_db) def test_connection_gone_away(self): """ http://dev.mysql.com/doc/refman/5.0/en/gone-away.html http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html#error_cr_server_gone_error """ con = self.connect() cur = con.cursor() cur.execute("SET wait_timeout=1") time.sleep(2) with self.assertRaises(pymysql.OperationalError) as cm: cur.execute("SELECT 1+1") # error occures while reading, not writing because of socket buffer. #self.assertEqual(cm.exception.args[0], 2006) self.assertIn(cm.exception.args[0], (2006, 2013)) def test_init_command(self): conn = self.connect( init_command='SELECT "bar"; SELECT "baz"', client_flag=CLIENT.MULTI_STATEMENTS) c = conn.cursor() c.execute('select "foobar";') self.assertEqual(('foobar',), c.fetchone()) conn.close() with self.assertRaises(pymysql.err.Error): conn.ping(reconnect=False) def test_read_default_group(self): conn = self.connect( read_default_group='client', ) self.assertTrue(conn.open) def test_context(self): with self.assertRaises(ValueError): c = self.connect() with c as cur: cur.execute('create table test ( a int ) ENGINE=InnoDB') c.begin() cur.execute('insert into test values ((1))') raise ValueError('pseudo abort') c.commit() c = self.connect() with c as cur: cur.execute('select count(*) from test') self.assertEqual(0, cur.fetchone()[0]) cur.execute('insert into test values ((1))') with c as cur: cur.execute('select count(*) from test') self.assertEqual(1,cur.fetchone()[0]) cur.execute('drop table test') def test_set_charset(self): c = self.connect() c.set_charset('utf8mb4') # TODO validate setting here def test_defer_connect(self): import socket d = self.databases[0].copy() try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(d['unix_socket']) except KeyError: sock.close() sock = socket.create_connection( (d.get('host', 'localhost'), d.get('port', 3306))) for k in ['unix_socket', 'host', 'port']: try: del d[k] except KeyError: pass c = pymysql.connect(defer_connect=True, **d) self.assertFalse(c.open) c.connect(sock) c.close() sock.close() @unittest2.skipUnless(sys.version_info[0:2] >= (3,2), "required py-3.2") def test_no_delay_warning(self): current_db = self.databases[0].copy() current_db['no_delay'] = True with self.assertWarns(DeprecationWarning) as cm: conn = pymysql.connect(**current_db) # A custom type and function to escape it class Foo(object): value = "bar" def escape_foo(x, d): return x.value class TestEscape(base.PyMySQLTestCase): def test_escape_string(self): con = self.connections[0] cur = con.cursor() self.assertEqual(con.escape("foo'bar"), "'foo\\'bar'") # added NO_AUTO_CREATE_USER as not including it in 5.7 generates warnings # mysql-8.0 removes the option however if self.mysql_server_is(con, (8, 0, 0)): cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES'") else: cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'") self.assertEqual(con.escape("foo'bar"), "'foo''bar'") def test_escape_builtin_encoders(self): con = self.connections[0] cur = con.cursor() val = datetime.datetime(2012, 3, 4, 5, 6) self.assertEqual(con.escape(val, con.encoders), "'2012-03-04 05:06:00'") def test_escape_custom_object(self): con = self.connections[0] cur = con.cursor() mapping = {Foo: escape_foo} self.assertEqual(con.escape(Foo(), mapping), "bar") def test_escape_fallback_encoder(self): con = self.connections[0] cur = con.cursor() class Custom(str): pass mapping = {text_type: pymysql.escape_string} self.assertEqual(con.escape(Custom('foobar'), mapping), "'foobar'") def test_escape_no_default(self): con = self.connections[0] cur = con.cursor() self.assertRaises(TypeError, con.escape, 42, {}) def test_escape_dict_value(self): con = self.connections[0] cur = con.cursor() mapping = con.encoders.copy() mapping[Foo] = escape_foo self.assertEqual(con.escape({'foo': Foo()}, mapping), {'foo': "bar"}) def test_escape_list_item(self): con = self.connections[0] cur = con.cursor() mapping = con.encoders.copy() mapping[Foo] = escape_foo self.assertEqual(con.escape([Foo()], mapping), "(bar)") def test_previous_cursor_not_closed(self): con = self.connect( init_command='SELECT "bar"; SELECT "baz"', client_flag=CLIENT.MULTI_STATEMENTS) cur1 = con.cursor() cur1.execute("SELECT 1; SELECT 2") cur2 = con.cursor() cur2.execute("SELECT 3") self.assertEqual(cur2.fetchone()[0], 3) def test_commit_during_multi_result(self): con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS) cur = con.cursor() cur.execute("SELECT 1; SELECT 2") con.commit() cur.execute("SELECT 3") self.assertEqual(cur.fetchone()[0], 3)
[ "shreyansh.chheda@gmail.com" ]
shreyansh.chheda@gmail.com
ba543c69df6097031a5e3dfae710324c39db3560
43671eb2be91782ac2096b9ec64e3a02b5aa9559
/elComandante/lowVoltage_agente.py
cab98c5f206c5377ec93e1596070cdea46f3e0d2
[]
no_license
psi46/elComandante
fba87eda9c404de0ceed0ab9b81f5258f172325b
052066eda34d1e335128af214e55e330f9b6e855
refs/heads/master
2020-04-06T12:37:01.024183
2016-11-28T14:23:31
2016-11-28T14:23:31
28,905,523
1
4
null
2015-08-08T11:06:20
2015-01-07T08:25:52
FORTRAN
UTF-8
Python
false
false
7,695
py
## @file ## Implements the agente class lowVoltage_agente ## @ingroup elComandante ## @ingroup elAgente import os import subprocess from myutils import process import el_agente def preexec(): os.setpgrp() ## Agente class that communicates with the lowVoltageClient ## ## This is the agente whose job is to communicte with the lowVoltageClient. It ## has a very simple task: To turn on and off the low voltage for the test ## setup. ## ## The low voltage device normally has to operate only before the test ## (lowVoltage_agente.prepare_test) and nothing has to be done during the ## actual testing, except for monitoring the device state. ## ## The action performed is normally only a power cycle which serves as a hard ## reset for the test hardware. ## ## The lowVoltag agente sends very high level commands to the lowVoltageClient such ## as SET OUTPUT ON, SET OUTPUT OFF, or EXEC POWERCYCLE and it does not have to know about the ## details of these operations. It expects that the client handles these ## things and that when it if finished, it will answer the FINISHED command ## with giving back FINISHED. Therefore, the agente waits for the operations ## of the client to finish. Since the client is a separate process, elComandante ## (of which this agente is a part) may continue to start or monitor other ## processes through other agentes. ## ## The configuration of the lowVoltag agente is made in the elComandante.conf ## and the elComandante.ini files. The elComandante.conf file contains information ## about the setup such as low voltage device type and device file name: ## @code ## lowVoltageSubscription: /lowVoltage ## ## [lowVoltageClient] ## lowVoltageType: yoctorelay ## @endcode ## ## The initialization only holds the parameter ## @code ## LowVoltageUse: True ## @endcode ## which enables or disables the lowVoltageAgente. ## @ingroup elComandante ## @ingroup elAgente class lowVoltage_agente(el_agente.el_agente): ## Initializes the agente ## @param timestamp Timestamp from elComandante ## @param log Log handler ## @param sclient Subsystem client handle def __init__(self, timestamp, log, sclient): el_agente.el_agente.__init__(self, timestamp, log, sclient) self.agente_name = "lowVoltageAgente" self.client_name = "lowVoltageClient" ## Sets up the permanent configuration of the agente ## ## Determines settings such as low voltage device type ## from elComandante's permanent configuration. ## @param conf Configuration handle ## @return Boolean for success def setup_configuration(self, conf): ## Type of the low voltage device, to be passed to the client self.device_type = conf.get("lowVoltageClient", "lowVoltageType") self.subscription = conf.get("subsystem", "lowVoltageSubscription") ## Directory for the log files self.logdir = conf.get("Directories", "dataDir") + "/logfiles/" return True ## Sets up the initialization of the agente ## ## Determines settings such as whether the low voltage device is used ## for this run from elComandante's run time configuration ## (initialization) ## @param init Initialization handle ## @return Boolean for success def setup_initialization(self, init): self.active = init.getboolean("LowVoltage", "LowVoltageUse") return True ## Checks whether the lowVoltageClient is running ## ## Checks whether the lowVoltageClient is running by finding ## the PID file and checking the process. ## @return Boolean, whether the client is running or not def check_client_running(self): if not self.active: return False if process.check_process_running(self.client_name + ".py"): raise Exception("Another %s is already running. Please close this client first." % self.client_name) return True return False ## Starts the lowVoltageClient ## ## If enabled, starts the lowVoltageClient with the parameters read from the ## configuration. ## @param Timestamp ## @return Boolean for success def start_client(self, timestamp): if not self.active: return True command = "xterm +sb -geometry 120x20-0+300 -fs 10 -fa 'Mono' -e '" command += "cd ../lowVoltageClient && python ../lowVoltageClient/lowVoltageClient.py " command += "--timestamp {0:d} ".format(timestamp) command += "--directory {0:s} ".format(self.logdir) command += "--device-type {0:s}'".format(self.device_type) self.log << "Starting " + self.client_name + " ..." ## Child process handle for the lowVoltageClient self.child = subprocess.Popen(command, shell = True, preexec_fn = preexec) return True ## Subscribes to the subsystem channel where the lowVoltageClient listening ## ## Enables listening to the subsystem channel that the lowVoltageClient is ## receiving commands on ## @return None def subscribe(self): if (self.active): self.sclient.subscribe(self.subscription) ## Checks whether the subsystem channel is open and the server is responding ## @return Boolean, whether it is responding or not def check_subscription(self): if (self.active): return self.sclient.checkSubscription(self.subscription) return True ## Asks the lowVoltageClient to exit by sending it a command through the subsystem ## @return Boolean for success def request_client_exit(self): if not self.active: return True self.sclient.send(self.subscription, ":EXIT\n") return False ## Tries to kill the lowVoltageClient by sending the SIGTERM signal ## @return Boolean for success def kill_client(self): if not self.active: return True try: self.child.kill() except: pass return True ## Prepares a test with a given environment ## ## Powercycles the low voltage of the test setup to hard reset ## all devices ## @param test The current test ## @param environment The environment the test should run in ## @return Boolean for success def prepare_test(self, test, environment): # Run before a test is executed if not self.active: return True self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n") self.set_pending() return True ## Function to execute the test which is disregarded by this agente ## @return Always returns True def execute_test(self): # Runs a test if not self.active: return True return True ## Function to clean up the test which is disregarded by this agente ## ## Turns of the beam. This may change in the future. ## @return Boolean for success def cleanup_test(self): # Run after a test has executed if not self.active: return True return True ## Final test cleanup ## @return Boolean for success def final_test_cleanup(self): # Run after a test has executed if not self.active: return True self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n") self.set_pending() return True ## Checks whether the client is finished or has an error ## ## Checks whether the client is finished or has an error. Even if ## no action is pending from the client it may happen that the state ## of the low voltage device changes. An error is received in this case ## and an exception is thrown. ## @return Boolean, whether the client has finished or not def check_finished(self): if not self.active: return True while True: packet = self.sclient.getFirstPacket(self.subscription) if packet.isEmpty(): break if self.pending and "FINISHED" in packet.data.upper(): self.pending = False elif "ERROR" in packet.data.upper(): self.pending = False raise Exception("Error from %s!" % self.client_name) return not self.pending ## Asks whether the client is finished and sets the agente state ## to pending ## @return None def set_pending(self): self.sclient.send(self.subscription, ":FINISHED\n") self.pending = True
[ "mrossini@phys.ethz.ch" ]
mrossini@phys.ethz.ch
c52152bc18b44d48c909e1256ce9ae3b6d37647f
310a141e68d730f2e3a0dee21b14cca65883e521
/courses/migrations/0008_course_passed.py
1ec04deca6b315eb29f330e1aa93eb3abec9e6b9
[]
no_license
sokogfb/edu_fcih
5c2eb883b88d70a34c7f21487527f18a8f6a26b2
c480b448350226a1727f1d155e99dbe1ca6d30e7
refs/heads/master
2021-09-12T14:53:38.484104
2018-04-17T23:13:10
2018-04-17T23:13:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
380
py
# Generated by Django 2.0.3 on 2018-04-02 19:28 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('courses', '0007_term_active'), ] operations = [ migrations.AddField( model_name='course', name='passed', field=models.BooleanField(default=False), ), ]
[ "abdelrahman.sico_931@fci.helwan.edu.eg" ]
abdelrahman.sico_931@fci.helwan.edu.eg
43d6dc559a18868fb2fe56aa0c08b57bada0fce3
f80b0891fbd9bbda3532327ed8129406d00947b7
/IP/Lista-6/Comando de Repetição (while) – Roteiro Laboratório/3.py
49086875a6382fccd9177714b3408668a594bca2
[]
no_license
viniciuspolux/UFPB
d7e7cd7101e90b008391605832404ba2ae6d2001
445fc953d9499e41e753c1c3e5c57937d93b2d59
refs/heads/master
2021-01-19T20:44:51.869087
2017-08-30T17:16:07
2017-08-30T17:16:07
101,222,709
0
0
null
null
null
null
UTF-8
Python
false
false
326
py
numeroa,numerob=map(int,input("Digite dois números com um espaço entre eles =").split()) x=1 while x < 2 : if(numeroa > 0 and numerob > 0): soma= numeroa + numerob prod= numeroa * numerob print("{} {}" .format(soma,prod)) else: print("Você digitou um número inválido") x += 1
[ "30605184+viniciuspolux@users.noreply.github.com" ]
30605184+viniciuspolux@users.noreply.github.com
cafe602ff007a80036f57b301bc84dd23e3e5581
eabe529cbf8a6ae6b0ae476961d69182a1827842
/parlai/tasks/tasks.py
421987f23ab2036c478bf84fb049b4e23a1fb35f
[]
no_license
JiaQiSJTU/ResponseSelection
b3ce8a15129e23830ba3a7311d0b2eb831217163
660732f7cc9c0c419a3cf26c85430eb258e5f1f0
refs/heads/master
2023-07-04T18:59:47.498626
2021-09-01T08:04:54
2021-09-01T08:04:54
297,997,764
28
2
null
null
null
null
UTF-8
Python
false
false
1,619
py
#!/usr/bin/env python3 """ Helper functions for defining the set of tasks in ParlAI. The actual task list and definitions are in the file task_list.py """ from .task_list import task_list from collections import defaultdict def _preprocess(name): return name.lower().replace('-', '') def _build(task_list): tasks = {} tags = defaultdict(list) for t in task_list: task = _preprocess(t['id']) tasks[task] = [t] for j in t['tags']: tag = _preprocess(j) if tag in tasks: raise RuntimeError('tag ' + tag + ' is the same as a task name') tags[tag].append(t) return tasks, tags def _id_to_task_data(t_id): t_id = _preprocess(t_id) if t_id in tasks: # return the task assoicated with this task id return tasks[t_id] elif t_id in tags: # return the list of tasks for this tag return tags[t_id] else: # should already be in task form raise RuntimeError('could not find tag/task id') def _id_to_task(t_id): if t_id[0] == '#': # this is a tag, so return all the tasks for this tag return ','.join((d['task'] for d in _id_to_task_data(t_id[1:]))) else: # this should already be in task form return t_id def ids_to_tasks(ids): if ids is None: raise RuntimeError( 'No task specified. Please select a task with ' + '--task {task_name}.' ) return ','.join((_id_to_task(i) for i in ids.split(',') if len(i) > 0)) # Build the task list from the json file. tasks, tags = _build(task_list)
[ "Jia_qi_0217@163.com" ]
Jia_qi_0217@163.com
37494b82fc1bfeefecdc11791dc68f84c757fca1
d4184f2468852c5312e3e7a7e2033f1700534130
/workflow/scripts/write_qc_metadata.py
ff0bfbcb948de8f68f439f1276ce7b4d6f9d7863
[]
no_license
austintwang/ENCODE_scATAC_bingren_raw
d85b529cecd55e596c1d96ea370ade57cf418e81
9c1a6b23615a9d0f6d28f7e9ea6937755016b086
refs/heads/master
2023-08-17T04:03:46.276522
2021-09-19T00:40:32
2021-09-19T00:40:32
407,326,407
0
0
null
null
null
null
UTF-8
Python
false
false
13,760
py
""" Adapted from https://github.com/ENCODE-DCC/atac-seq-pipeline/blob/master/src/encode_lib_log_parser.py """ from collections import OrderedDict import json import os def to_int(var): try: return int(var) except ValueError: return None def to_float(var): try: return float(var) except ValueError: return None def to_bool(var): return var.lower() in set(['true', 't', 'ok', 'yes', '1']) def parse_frac_mito_qc(txt): result = OrderedDict() with open(txt, 'r') as fp: for line in fp.read().strip('\n').split('\n'): k, v = line.split('\t') if k.startswith('frac_'): result[k] = float(v) else: result[k] = int(v) return result def parse_flagstat_qc(txt): result = OrderedDict() if not txt: return result total = '' total_qc_failed = '' duplicates = '' duplicates_qc_failed = '' mapped = '' mapped_qc_failed = '' mapped_pct = '' paired = '' paired_qc_failed = '' read1 = '' read1_qc_failed = '' read2 = '' read2_qc_failed = '' paired_properly = '' paired_properly_qc_failed = '' paired_properly_pct = '' with_itself = '' with_itself_qc_failed = '' singletons = '' singletons_qc_failed = '' singletons_pct = '' diff_chroms = '' diff_chroms_qc_failed = '' delimiter_pass_fail = ' + ' with open(txt, 'r') as f: for line in f: if ' total ' in line: if ' in total ' in line: tmp1 = line.split(' in total ') else: tmp1 = line.split(' total ') line1 = tmp1[0] tmp1 = line1.split(delimiter_pass_fail) total = tmp1[0] total_qc_failed = tmp1[1] if ' duplicates' in line: tmp2 = line.split(' duplicates') line2 = tmp2[0] tmp2 = line2.split(delimiter_pass_fail) duplicates = tmp2[0] duplicates_qc_failed = tmp2[1] if ' mapped (' in line: tmp3 = line.split(' mapped (') line3_1 = tmp3[0] tmp3_1 = line3_1.split(delimiter_pass_fail) mapped = tmp3_1[0] mapped_qc_failed = tmp3_1[1] line3_2 = tmp3[1] tmp3_2 = line3_2.split(':') mapped_pct = tmp3_2[0] # .replace('%','') if ' paired in sequencing' in line: tmp2 = line.split(' paired in sequencing') line2 = tmp2[0] tmp2 = line2.split(delimiter_pass_fail) paired = tmp2[0] paired_qc_failed = tmp2[1] if ' read1' in line: tmp2 = line.split(' read1') line2 = tmp2[0] tmp2 = line2.split(delimiter_pass_fail) read1 = tmp2[0] read1_qc_failed = tmp2[1] if ' read2' in line: tmp2 = line.split(' read2') line2 = tmp2[0] tmp2 = line2.split(delimiter_pass_fail) read2 = tmp2[0] read2_qc_failed = tmp2[1] if ' properly paired (' in line: tmp3 = line.split(' properly paired (') line3_1 = tmp3[0] tmp3_1 = line3_1.split(delimiter_pass_fail) paired_properly = tmp3_1[0] paired_properly_qc_failed = tmp3_1[1] line3_2 = tmp3[1] tmp3_2 = line3_2.split(':') paired_properly_pct = tmp3_2[0] # .replace('%','') if ' with itself and mate mapped' in line: tmp3 = line.split(' with itself and mate mapped') line3_1 = tmp3[0] tmp3_1 = line3_1.split(delimiter_pass_fail) with_itself = tmp3_1[0] with_itself_qc_failed = tmp3_1[1] if ' singletons (' in line: tmp3 = line.split(' singletons (') line3_1 = tmp3[0] tmp3_1 = line3_1.split(delimiter_pass_fail) singletons = tmp3_1[0] singletons_qc_failed = tmp3_1[1] line3_2 = tmp3[1] tmp3_2 = line3_2.split(':') singletons_pct = tmp3_2[0] # .replace('%','') if ' with mate mapped to a different chr' in line: tmp3 = line.split(' with mate mapped to a different chr') line3_1 = tmp3[0] tmp3_1 = line3_1.split(delimiter_pass_fail) diff_chroms = tmp3_1[0] diff_chroms_qc_failed = tmp3_1[1] if total: result['total_reads'] = int(total) if total_qc_failed: result['total_reads_qc_failed'] = int(total_qc_failed) if duplicates: result['duplicate_reads'] = int(duplicates) if duplicates_qc_failed: result['duplicate_reads_qc_failed'] = int(duplicates_qc_failed) if mapped: result['mapped_reads'] = int(mapped) if mapped_qc_failed: result['mapped_reads_qc_failed'] = int(mapped_qc_failed) if mapped_pct: if 'nan' not in mapped_pct and 'N/A' not in mapped_pct \ and 'NA' not in mapped_pct: if '%' in mapped_pct: mapped_pct = mapped_pct.replace('%', '') result['pct_mapped_reads'] = float(mapped_pct) else: result['pct_mapped_reads'] = 100.0 * float(mapped_pct) else: result['pct_mapped_reads'] = 0.0 if paired: result['paired_reads'] = int(paired) if paired_qc_failed: result['paired_reads_qc_failed'] = int(paired_qc_failed) if read1: result['read1'] = int(read1) if read1_qc_failed: result['read1_qc_failed'] = int(read1_qc_failed) if read2: result['read2'] = int(read2) if read2_qc_failed: result['read2_qc_failed'] = int(read2_qc_failed) if paired_properly: result['properly_paired_reads'] = int(paired_properly) if paired_properly_qc_failed: result['properly_paired_reads_qc_failed'] = int( paired_properly_qc_failed) if paired_properly_pct: if 'nan' not in paired_properly_pct and \ 'N/A' not in paired_properly_pct \ and 'NA' not in paired_properly_pct: if '%' in paired_properly_pct: paired_properly_pct = paired_properly_pct.replace('%', '') result['pct_properly_paired_reads'] = float( paired_properly_pct) else: result['pct_properly_paired_reads'] = 100.0 * \ float(paired_properly_pct) else: result['pct_properly_paired_reads'] = 0.0 if with_itself: result['with_itself'] = int(with_itself) if with_itself_qc_failed: result['with_itself_qc_failed'] = int(with_itself_qc_failed) if singletons: result['singletons'] = int(singletons) if singletons_qc_failed: result['singletons_qc_failed'] = int(singletons_qc_failed) if singletons_pct: if 'nan' not in singletons_pct and 'N/A' not in singletons_pct \ and 'NA' not in singletons_pct: if '%' in singletons_pct: singletons_pct = singletons_pct.replace('%', '') result['pct_singletons'] = float(singletons_pct) else: result['pct_singletons'] = 100.0 * float(singletons_pct) else: result['pct_singletons'] = 0.0 if diff_chroms: result['diff_chroms'] = int(diff_chroms) if diff_chroms_qc_failed: result['diff_chroms_qc_failed'] = int(diff_chroms_qc_failed) return result def parse_dup_qc(txt): result = OrderedDict() if not txt: return result paired_reads = '' unpaired_reads = '' unmapped_reads = '' unpaired_dupes = '' paired_dupes = '' paired_opt_dupes = '' dupes_pct = '' picard_log_found = False # picard markdup with open(txt, 'r') as f: header = '' # if 'UNPAIRED_READS_EXAMINED' in header content = '' for line in f: if header: content = line.replace(',', '.') picard_log_found = True break if 'UNPAIRED_READS_EXAMINED' in line: header = line if picard_log_found: header_items = header.split('\t') content_items = content.split('\t') m = dict(zip(header_items, content_items)) unpaired_reads = m['UNPAIRED_READS_EXAMINED'] paired_reads = m['READ_PAIRS_EXAMINED'] unmapped_reads = m['UNMAPPED_READS'] unpaired_dupes = m['UNPAIRED_READ_DUPLICATES'] paired_dupes = m['READ_PAIR_DUPLICATES'] paired_opt_dupes = m['READ_PAIR_OPTICAL_DUPLICATES'] if 'PERCENT_DUPLICATION' in m: dupes_pct = m['PERCENT_DUPLICATION'] else: dupes_pct = '0' else: # sambamba markdup with open(txt, 'r') as f: for line in f: if ' end pairs' in line: tmp1 = line.strip().split(' ') paired_reads = tmp1[1] if ' single ends ' in line: tmp1 = line.strip().split(' ') unpaired_reads = tmp1[1] unmapped_reads = tmp1[6] if 'found ' in line: tmp1 = line.strip().split(' ') if paired_reads == '0': unpaired_dupes = tmp1[1] # SE paired_dupes = 0 else: unpaired_dupes = 0 paired_dupes = str(int(tmp1[1])/2) # PE if paired_reads == '0': # SE dupes_pct = '{0:.2f}'.format( float(unpaired_dupes)/float(unpaired_reads)) elif paired_reads: dupes_pct = '{0:.2f}'.format( float(paired_dupes)/float(paired_reads)) if unpaired_reads: result['unpaired_reads'] = int(unpaired_reads) if paired_reads: result['paired_reads'] = int(paired_reads) if unmapped_reads: result['unmapped_reads'] = int(unmapped_reads) if unpaired_dupes: result['unpaired_duplicate_reads'] = int(unpaired_dupes) if paired_dupes: result['paired_duplicate_reads'] = int(paired_dupes) if paired_opt_dupes: result['paired_optical_duplicate_reads'] = int(paired_opt_dupes) if dupes_pct: result['pct_duplicate_reads'] = float(dupes_pct)*100.0 return result def parse_lib_complexity_qc(txt): result = OrderedDict() if not txt: return result with open(txt, 'r') as f: for line in f: arr = line.strip().split('\t') break result['total_fragments'] = to_int(arr[0]) result['distinct_fragments'] = to_int(arr[1]) result['positions_with_one_read'] = to_int(arr[2]) result['positions_with_one_read'] = to_int(arr[3]) result['NRF'] = to_float(arr[4]) result['PBC1'] = to_float(arr[5]) result['PBC2'] = to_float(arr[6]) return result def parse_picard_est_lib_size_qc(txt): result = OrderedDict() if not txt: return result with open(txt, 'r') as f: val = f.readlines()[0].strip() result['picard_est_lib_size'] = float(val) return result def build_quality_metric_header(sample_data, config, data_path, out_path): lab = config["dcc_lab"] experiment = sample_data["experiment"] replicate = sample_data["replicate_num"] data_alias = f"{lab}:{experiment}${replicate}${os.path.basename(data_path)}" alias = f"{lab}:{experiment}${replicate}${os.path.basename(out_path)}" h = OrderedDict({ "lab": lab, "award": config["dcc_award"], "quality_metric_of": data_alias, "aliases": [alias], }) return h def write_json(data, out_path): with open(out_path, "w") as f: json.dump(data, f, indent=4) try: out_group = snakemake.params['out_group'] sample_data = snakemake.params['sample_data'] data_path = snakemake.input['data_file'] config = snakemake.config if out_group == "fastqs": pass elif out_group == "mapping": alignment_stats_out = snakemake.output['alignment_stats'] samstats_raw = snakemake.input['samstats_raw'] a = parse_flagstat_qc(samstats_raw) h = build_quality_metric_header(sample_data, config, data_path) alignment_stats = h | a write_json(alignment_stats, alignment_stats_out) elif out_group == "filtering": alignment_stats_out = snakemake.output['alignment_stats'] lib_comp_stats_out = snakemake.output['lib_comp_stats'] samstats_filtered = snakemake.input['samstats_filtered'] picard_markdup = snakemake.input['picard_markdup'] pbc_stats = snakemake.input['pbc_stats'] frac_mito = snakemake.input['frac_mito'] s = parse_flagstat_qc(samstats_filtered) p = parse_picard_est_lib_size_qc(picard_markdup) l = parse_lib_complexity_qc(pbc_stats) m = parse_frac_mito_qc(frac_mito) h = build_quality_metric_header(sample_data, config, data_path) alignment_stats = h | s | m lib_comp_stats = h | p | l write_json(alignment_stats, alignment_stats_out) write_json(lib_comp_stats, lib_comp_stats_out) elif out_group == "fragments": pass elif out_group == "archr": pass except NameError: pass
[ "austin.wang1357@gmail.com" ]
austin.wang1357@gmail.com
461f9252ada4badc3896e5dda3754393969d3ce1
42e9810116a4c726f2fb60a0133fc3b81670c0e1
/setup.py
41ff4cdca9f78be739cc42a2b42a8886a90aca79
[ "BSD-3-Clause" ]
permissive
pinjasec/binarypinja
247e6a13f3b4f58fb16aab00a3649f575b428db6
106bb2c68ea530cbf99079749f1a7184cf21d480
refs/heads/master
2020-07-24T19:57:50.921387
2019-09-12T11:15:21
2019-09-12T11:15:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
324
py
from setuptools import find_packages, setup setup( name='pinja', packages=find_packages(), version='0.1.0', entry_points={ 'console_scripts': 'pinja = pinja.main:main' }, description='A short description of the project.', author='*pinja_sec', license='BSD-3', )
[ "poo_eix@protonmail.com" ]
poo_eix@protonmail.com
2eb829c87ae5849e5b0d7bf0a4c9e93efc347ecc
32d934cabb1eac917bb583a1428b87f78b335a4e
/code_per_day/day_47_to_48.py
a8d229a9b40eae3b056c893a2848e2fd3d553e8c
[]
no_license
zotroneneis/magical_universe
7339fefcfdf47e21e5ebcc6f56e3f1949230932a
c5da3367b7854c4cf9625c45e03742dba3a6d63c
refs/heads/master
2022-12-07T20:21:25.427333
2022-11-13T14:33:01
2022-11-13T14:33:01
141,951,821
414
58
null
null
null
null
UTF-8
Python
false
false
1,266
py
from collections import defaultdict class CastleKilmereMember: """ Creates a member of the Castle Kilmere School of Magic """ def __init__(self, name: str, birthyear: int, sex: str): self.name = name self.birthyear = birthyear self.sex = sex self._traits = defaultdict(lambda: False) def add_trait(self, trait, value=True): self._traits[trait] = value def exhibits_trait(self, trait: str) -> bool: value = self._traits[trait] return value def print_traits(self): true_traits = [trait for trait, value in self._traits.items() if value] false_traits = [trait for trait, value in self._traits.items() if not value] if true_traits: print(f"{self.name} is {', '.join(true_traits)}") if false_traits: print(f"{self.name} is not {', '.join(false_traits)}") if (not true_traits and not false_traits): print(f"{self.name} does not have traits yet") if __name__ == "__main__": bromley = CastleKilmereMember('Bromley Huckabee', 1959, 'male') bromley.add_trait('tidy-minded') bromley.add_trait('kind') bromley.exhibits_trait('kind') bromley.exhibits_trait('mean') bromley.print_traits()
[ "popkes@gmx.net" ]
popkes@gmx.net
58530dd0f15e00fa4623a19b9378cc34b6dd4111
e5937e1305b6f1a68c98bf85d479f2cc46271f6d
/First.py
8dccbe53121fefa238ea688a09fb13622b1be489
[]
no_license
sishen123258/python
14b974cc078e9b2f6e0a15561a071da7acbccd91
3e1fde3289f018979f9b67799fa2daee8920beaa
refs/heads/master
2021-04-09T16:51:28.129461
2015-05-29T07:38:29
2015-05-29T07:38:29
35,621,952
0
0
null
null
null
null
UTF-8
Python
false
false
876
py
__author__ = 'Yue' # # class MyFirstPyClass: # "first py class" # # def _init_(self, pm, ph): # self.name = pm # self.phone = ph # print("self created:", self.name); # # def updatePhone(self, ph): # self.phone = ph # print("self phone changed:", self.phone); # class Person: def __init__(self, first_name): self.first_name = first_name # Getter function @property def first_name(self): return self._first_name # Setter function @first_name.setter def first_name(self, value): if not isinstance(value, str): raise TypeError('Expected a string') self._first_name = value # Deleter function (optional) @first_name.deleter def first_name(self): raise AttributeError("Can't delete attribute") p=Person("tong") print(p.first_name)
[ "1144299328@qq.com" ]
1144299328@qq.com
0ae601e2d21d74e13bbdd78607d416c058eed97a
5d4def230bad7174e2a2352d277d391dfa118694
/vocab.py
3ae126b0cb1ca0de8128bb2c779c875720f8c902
[]
no_license
hitercs/biLSTM-SlotFilling
a733b2df0e65834b6c6a91d609daa60c73c596ca
8a0c1baed51e668e7fc4119f69ca6491e7328e7c
refs/heads/master
2020-03-14T07:20:50.869860
2018-05-01T10:35:18
2018-05-01T10:35:18
131,502,870
3
0
null
null
null
null
UTF-8
Python
false
false
1,673
py
#-*- encoding: utf-8 -*- import codecs import settings from util import Util class BiVocab(object): def __init__(self, src_vocab, trg_vocab): self.src_vocab = src_vocab self.trg_vocab = trg_vocab self.src_vocab_size = src_vocab.vocab_size self.trg_vocab_size = trg_vocab.vocab_size self.pad_id = self.trg_vocab.get_idx(settings.PAD) self.unk_id = self.trg_vocab.get_idx(settings.UNK) def get_src_word(self, idx): return self.src_vocab.get_word(idx) def get_trg_word(self, idx): return self.trg_vocab.get_word(idx) def get_src_idx(self, w): return self.src_vocab.get_idx(w) def get_trg_idx(self, w): return self.trg_vocab.get_idx(w) class Vocab(object): def __init__(self, vocab_size, vocab_fn): self.word2idx = dict() self.idx2word = dict() self.vocab_size = vocab_size self.build_vocab(vocab_fn) def build_vocab(self, vocab_fn): with codecs.open(vocab_fn, encoding='utf-8', mode='r', buffering=settings.read_buffer_size) as fp: for line in fp: word, idx, _ = line.strip().split() Util.add_vocab(self.word2idx, word, int(idx)) Util.add_vocab(self.idx2word, int(idx), word) def get_idx(self, word): if not word in self.word2idx: return self.word2idx[settings.UNK] if self.word2idx[word] > self.vocab_size - 1: return self.word2idx[settings.UNK] return self.word2idx[word] def get_word(self, idx): if idx > self.vocab_size - 1: return settings.UNK return self.idx2word[idx]
[ "schen@ir.hit.edu.cn" ]
schen@ir.hit.edu.cn
ec3ee36ac1ce3cea82d7bfe1563d5a76ade5968f
7a583c534559ad08950e6e1564d4a59095ce9669
/autoclient/src/plugins/memory.py
bc4df39f71e332e172e24144a790ef9c8973a5a3
[]
no_license
wyyalt/cmdb
67fbeabda2035e11c1933ab84b75c9c3feac7d92
c43c17db7c6fb9f63b2387b7054a89a54bee199a
refs/heads/master
2021-05-05T12:05:38.026134
2017-09-25T14:58:24
2017-09-25T14:58:24
104,718,572
0
0
null
null
null
null
UTF-8
Python
false
false
1,812
py
import os from lib import convert from lib.conf.config import settings class Memory(object): def __init__(self): pass @classmethod def initial(cls): return cls() def process(self, command_func, debug): if debug: output = open(os.path.join(settings.BASEDIR, 'files/memory.out'), 'r', encoding='utf-8').read() else: output = command_func("sudo dmidecode -q -t 17 2>/dev/null") return self.parse(output) def parse(self, content): """ 解析shell命令返回结果 :param content: shell 命令结果 :return:解析后的结果 """ ram_dict = {} key_map = { 'Size': 'capacity', 'Locator': 'slot', 'Type': 'model', 'Speed': 'speed', 'Manufacturer': 'manufacturer', 'Serial Number': 'sn', } devices = content.split('Memory Device') for item in devices: item = item.strip() if not item: continue if item.startswith('#'): continue segment = {} lines = item.split('\n\t') for line in lines: if not line.strip(): continue if len(line.split(':')): key, value = line.split(':') else: key = line.split(':')[0] value = "" if key in key_map: if key == 'Size': segment[key_map['Size']] = convert.convert_mb_to_gb(value, 0) else: segment[key_map[key.strip()]] = value.strip() ram_dict[segment['slot']] = segment return ram_dict
[ "wyyalt@live.com" ]
wyyalt@live.com
5f620ca66ea5f22f98da060905725de7b1622114
e8c0513bce6ba781d6d55c48330c54edbd20cc23
/manage.py
d66549db189c8fe3cddba1a8a34913eaa50627a3
[]
no_license
BohnSix/myblog
aad06969026e5e0059e83d3c8bedab66eab3a5d2
3961bd813c8d706b15e66cd55dff2edeb992ca3c
refs/heads/master
2022-09-25T00:47:35.509766
2019-11-21T08:33:20
2019-11-21T08:33:20
185,914,940
2
1
null
2022-09-16T18:13:43
2019-05-10T03:49:23
JavaScript
UTF-8
Python
false
false
721
py
from flask_migrate import Migrate, MigrateCommand from flask_script import Manager, Shell from app import create_app, db from app.models import * app = create_app(config_name="develop") @app.template_filter() def countTime(content): return int(content.__len__() / 200) + 1 manager = Manager(app) migrate = Migrate(app, db) manager.add_command('db', MigrateCommand) def make_shell_context(): return dict(db=db, Article=Article, User=User, Category=Category) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command("db", MigrateCommand) if __name__ == "__main__": # db.drop_all(app=app) # db.create_all(app=app) app.run(host="0.0.0.0", port=8080, debug=True)
[ "bohn_six@163.com" ]
bohn_six@163.com
c770c4a0ef473e599ea32a354884f2360f88218a
365051fefddc9d549201225915122cb413168919
/final CNN data aug/data_aug_v03.py
1360161c08ef375089f3a624dd6d9ccfb8841482
[]
no_license
PauloAxcel/SERS-EHD-pillars
1623f5141a3d6fcd6b6f13e83afe1dac08cb893a
89c029be9f3cb435103f497644d30e75ce3ae3ad
refs/heads/main
2023-07-19T10:04:44.237797
2021-09-04T21:05:14
2021-09-04T21:05:14
375,076,169
1
0
null
null
null
null
UTF-8
Python
false
false
3,140
py
# -*- coding: utf-8 -*- """ Created on Tue Oct 15 13:05:02 2019 @author: paulo """ #DATA AUGMENTATION import os import cv2 import random import numpy as np import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator #get the folders SERS_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/SERS/' NOENH_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/nonSERS/' gen_dir_tra = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Training/' gen_dir_val = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Validation/' #num is the total number of samples that we want to generate. num = 5000 #get the files inside the folders SERS_train = os.listdir(SERS_train_dir) NOENH_train = os.listdir(NOENH_train_dir) all_dir = [SERS_train_dir, NOENH_train_dir] all_data = [SERS_train, NOENH_train] for dire,file in zip(all_dir, all_data): #for i in range(len(all_data)): for j in range(num): #generate a rand to select a random file in the folder # rand = random.randint(0,len(all_data[i])-1) rand = random.randint(0,len(file)-1) if len(file[rand].split('_'))>1: continue else: # im = cv2.imread(all_dir[i]+all_data[i][rand]) im = cv2.imread(dire+file[rand]) # plt.imshow(im) #datagen.flow needs a rank 4 matrix, hence we use np.expand_dims to increase the dimention of the image image = np.expand_dims(im,0) # word_label = all_data[i][rand].split('.')[0] word_label = file[rand].split('.')[0] #Generate new image process datagen = ImageDataGenerator(featurewise_center=0, samplewise_center=0, rotation_range=180, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') #label files based on the train/validation by employing a rand function lab = dire.split('/')[-2] if random.random() < 0.8: aug_iter = datagen.flow(image,save_to_dir = gen_dir_tra , save_prefix = lab+'_train_' + word_label +'_gen_' + str(random.randint(0,num))) else: aug_iter = datagen.flow(image,save_to_dir = gen_dir_val ,save_prefix = lab+'_val_' + word_label +'_gen_' + str(random.randint(0,num))) #next function produces the result from the datagen flow. collapses the function. # plt.imshow(next(aug_iter)[0].astype(np.uint8)) aug_images = [next(aug_iter)[0].astype(np.uint8) for m in range(1)]
[ "noreply@github.com" ]
PauloAxcel.noreply@github.com
62c20ca9fb15d381b187ac793e03b1b5242e6d37
495b0b8de3ecc341511cdb10f11368b35b585bea
/SoftLayer/CLI/modules/filters.py
1e4274ac04ae064468c5d1d0736b540b8f35416c
[]
no_license
hugomatic/softlayer-api-python-client
cf6c1e6bfa32e559e72f8b0b069339ae8edd2ede
9c115f0912ee62763b805941593f6dd50de37068
refs/heads/master
2021-01-18T11:09:19.122162
2013-04-09T01:44:51
2013-04-09T01:44:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
996
py
""" usage: sl help filters Filters are used to limit the amount of results. Some commands will accept a filter operation for certain fields. Filters can be applied across multiple fields in most cases. Available Operations: Case Insensitive 'value' Exact value match 'value*' Begins with value '*value' Ends with value '*value*' Contains value Case Sensitive '~ value' Exact value match '> value' Greater than value '< value' Less than value '>= value' Greater than or equal to value '<= value' Less than or equal to value Examples: sl cci list --datacenter=dal05 sl cci list --hostname='prod*' sl cci list --network=100 --cpu=2 sl cci list --network='< 100' --cpu=2 sl cci list --memory='>= 2048' Note: Comparison operators (>, <, >=, <=) can be used with integers, floats, and strings. """ # :copyright: (c) 2013, SoftLayer Technologies, Inc. All rights reserved. # :license: BSD, see LICENSE for more details.
[ "k3vinmcdonald@gmail.com" ]
k3vinmcdonald@gmail.com
cd04729dafc1306355807963c87d375bbfa6c2a7
6b096e1074479b13dc9d28cec7e5220d2ecc5c13
/Python/q34.py
af3b5a71997a6c98c126bd1f89d3957a291886a6
[]
no_license
wzb1005/leetcode
ed3684e580b4dae37dce0af8314da10c89b557f7
4ba73ac913993ba5bb7deab5971aaeaaa16ed4d7
refs/heads/master
2023-03-19T06:37:30.274467
2021-03-09T03:02:57
2021-03-09T03:02:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
112
py
from typing import List class Solution: def searchRange(self, nums: List[int], target: int) -> List[int]:
[ "chiyexiao@icloud.com" ]
chiyexiao@icloud.com
853e8cd7d44015eb9bfbe2e8d913ffb2d35fe27c
35c75ed0ca9850a6dd62d0e19b7e2ab472c5f292
/shop/migrations/0002_auto_20171109_0354.py
f417d1eaa71b14f9d43a38cb507c9d803779dad6
[]
no_license
HyeriChang/tuanh
1314b270d7b8d44424c5b6b82361b20397d30f4b
38546afde0a4fa6a54727b4595b7cfa7c8baec1e
refs/heads/master
2021-05-07T06:32:31.060921
2017-12-06T09:36:48
2017-12-06T09:36:48
111,763,554
1
0
null
null
null
null
UTF-8
Python
false
false
1,656
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-11-09 03:54 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0001_initial'), ] operations = [ migrations.AddField( model_name='product', name='brand', field=models.CharField(default='', max_length=50), preserve_default=False, ), migrations.AddField( model_name='product', name='color', field=models.CharField(default='x', max_length=50), preserve_default=False, ), migrations.AddField( model_name='product', name='condition', field=models.CharField(default='', max_length=50), preserve_default=False, ), migrations.AddField( model_name='product', name='detail', field=models.TextField(default=''), preserve_default=False, ), migrations.AddField( model_name='product', name='material', field=models.CharField(default='', max_length=50), preserve_default=False, ), migrations.AddField( model_name='product', name='rating', field=models.FloatField(default=1), preserve_default=False, ), migrations.AddField( model_name='product', name='size', field=models.CharField(default='', max_length=50), preserve_default=False, ), ]
[ "anh.nguyentu3110@gmail.com" ]
anh.nguyentu3110@gmail.com
10fe64c7113e565bb25b1d2565fa28e8ea3cfdcd
40711c0546644d1bb8709ee348211d294c0a48d2
/Rapport/Benchmarking/state_plot.py
48111f01a271055f269de89702c8ec49040ddac4
[]
no_license
smaibom/bach_2015
aaefa2a33cc0d5bb06761d72a3820ee0f2cfe290
4177977b81bb6f6c945e5e8a1956dbd4ca4b43f2
refs/heads/master
2021-01-20T21:53:03.374855
2015-06-07T21:54:47
2015-06-07T21:54:47
31,496,008
1
0
null
null
null
null
UTF-8
Python
false
false
1,459
py
""" Demo of a simple plot with a custom dashed line. A Line object's ``set_dashes`` method allows you to specify dashes with a series of on/off lengths (in points). """ import numpy as np import matplotlib.pyplot as plt #x = np.linspace(0, 22, 22) grosses = [62242364,123246883,122562228,122562228,183556657,184105105,184105105,243618278,245304681,245304681,305119181,305119181,305967853,977490276] labels = ['0 0 0', '0 0 1', '0 1 0', '1 0 0', '0 0 2', '0 2 0', '2 0 0 ', '0 0 3', '0 3 0', '3 0 0 ', '1 0 1', '0 1 1', '1 1 0', '1 1 1'] fig = plt.figure() fig.subplots_adjust(bottom=0.2) # Remark 1 ax = fig.add_subplot(111) ax.ticklabel_format(style='plain') # Remark 2 ax.set_xticks(range(len(labels))) ax.set_xticklabels(labels, rotation=80) ax.bar(range(len(grosses)), grosses) plt.xlabel('alterations, deletions and insertions\n(In order)') plt.ylabel('states processed') plt.show() #0 = 62242364 0 hits #1ins = 123246883 0 hits #1del = 122562228 0 hits #1mut = 122562228 0 hits #2ins = 183556657 3 hits #2del = 184105105 117 hits #2mut = 184105105 117 hits #3ins = 243618278 28 hits #3del = 245304681 2066 hits #3mut = 245304681 2066 hits #1ins 1mut = 305119181 41 hits #1ins 1del = 305119181 41 hits #1del 1mut = 305967853 234 hits #1 1 1 = 977490276 3275 hits
[ "kullax@feral.dk" ]
kullax@feral.dk
61c91a5a98307bf6308fc87306a01cc429275024
83dc2a8d80a0614c66016efba9630cd60538d4b8
/spider_traffic/test.py
5a9cc750354cadebbffc1e99a03d3901ab54aea1
[]
no_license
hanxianzhe1116/Python_Spider
5095297e6071842aef95d0264b2024d5a0e81ce5
ba3757acf2ed133ab76720a146d380eafe69a092
refs/heads/master
2021-01-04T09:29:13.616769
2020-10-19T02:16:17
2020-10-19T02:16:17
240,488,992
0
0
null
null
null
null
UTF-8
Python
false
false
1,906
py
import requests import re import json import csv from urllib.parse import urlencode import datetime import time ''' 函数说明:输入url及其参数 params: baseUrl:最开始的url cityCode:城市编码,这里我选择的是重庆,重庆编码:132 roadType:道路类型 callBack:返回类型 ''' def getPage(baseUrl,cityCode,roadType,callBack): #headers信息 params = { 'cityCode' : cityCode, 'roadtype' : roadType, 'callback' : callBack } url = baseUrl + urlencode(params) #获取到url参数 # print(requests.get(url).text) try: response = requests.get(url) if response.status_code == 200: #返回成功 return response except requests.ConnectionError as e: print('url出错',e.args) if __name__ == '__main__': url = 'https://jiaotong.baidu.com/trafficindex/city/roadrank?' # with open('transformData.csv','w') as f: f_csv = csv.writer(f) #保存五十分钟的数据 for i in range(10): response = getPage(url,132,0,'jsonp_1553486162746_179718') # print(type(response.text)) transformData = json.loads(re.findall(r'^\w+\((.*)\)$',response.text)[0]) transformData = transformData.get('data').get('list') dateTime = datetime.datetime.now().strftime('%Y-%m-%d') f_csv.writerow(str(dateTime)) dataList = [] for item in transformData: # print(item) list = [] list.append(item.get('roadname')) list.append(item.get('index')) list.append(item.get('speed')) dataList.append(list) # print(datetime.datetime.now().strftime('%Y-%m-%d')) f_csv.writerows(dataList) print(dataList) time.sleep(5) # f_csv.close()
[ "876605943@qq.com" ]
876605943@qq.com
af585888517df64c46a62653fa6ff3912e6b9f0d
508c5e01aa7dce530093d5796250eff8d74ba06c
/code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_get.py
d39692be0008269bf1791e585f1e0e92b09181fa
[ "MIT", "PostgreSQL" ]
permissive
jhkuang11/UniTrade
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
5f68b853926e167936b58c8543b8f95ebd6f5211
refs/heads/master
2022-12-12T15:58:30.013516
2019-02-01T21:07:15
2019-02-01T21:07:15
166,479,655
0
0
MIT
2022-12-07T03:59:47
2019-01-18T22:19:45
Python
UTF-8
Python
false
false
2,132
py
########################################################################## # # pgAdmin 4 - PostgreSQL Tools # # Copyright (C) 2013 - 2017, The pgAdmin Development Team # This software is released under the PostgreSQL Licence # ########################################################################## from pgadmin.browser.server_groups.servers.databases.tests import utils as \ database_utils from pgadmin.browser.server_groups.servers.tests import utils as server_utils from pgadmin.utils.route import BaseTestGenerator from regression import parent_node_dict from regression.python_test_utils import test_utils as utils class SchemaGetTestCase(BaseTestGenerator): """ This class will add new schema under database node. """ scenarios = [ # Fetching default URL for extension node. ('Check Schema Node URL', dict(url='/browser/schema/obj/')) ] def runTest(self): """ This function will delete schema under database node. """ schema = parent_node_dict["schema"][-1] db_id = schema["db_id"] server_id = schema["server_id"] server_response = server_utils.connect_server(self, server_id) if not server_response["data"]["connected"]: raise Exception("Could not connect to server to connect the" " database.") db_con = database_utils.connect_database(self, utils.SERVER_GROUP, server_id, db_id) if not db_con["info"] == "Database connected.": raise Exception("Could not connect to database to get the schema.") schema_id = schema["schema_id"] schema_response = self.tester.get( self.url + str(utils.SERVER_GROUP) + '/' + str(server_id) + '/' + str(db_id) + '/' + str(schema_id), content_type='html/json') self.assertEquals(schema_response.status_code, 200) # Disconnect the database database_utils.disconnect_database(self, server_id, db_id)
[ "jhkuang11@gmail.com" ]
jhkuang11@gmail.com
11aa915574de5fc4f11f5c7671205cfbaa964fe2
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
/venv/Lib/site-packages/cobra/modelimpl/copp/lacpallowhist5min.py
2d5afaedb106d24fcc43463d8548e0ce36b681e4
[]
no_license
bkhoward/aciDOM
91b0406f00da7aac413a81c8db2129b4bfc5497b
f2674456ecb19cf7299ef0c5a0887560b8b315d0
refs/heads/master
2023-03-27T23:37:02.836904
2021-03-26T22:07:54
2021-03-26T22:07:54
351,855,399
0
0
null
null
null
null
UTF-8
Python
false
false
31,598
py
# coding=UTF-8 # ********************************************************************** # Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved # written by zen warriors, do not modify! # ********************************************************************** from cobra.mit.meta import ClassMeta from cobra.mit.meta import StatsClassMeta from cobra.mit.meta import CounterMeta from cobra.mit.meta import PropMeta from cobra.mit.meta import Category from cobra.mit.meta import SourceRelationMeta from cobra.mit.meta import NamedSourceRelationMeta from cobra.mit.meta import TargetRelationMeta from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory from cobra.model.category import MoCategory, PropCategory, CounterCategory from cobra.mit.mo import Mo # ################################################## class LacpAllowHist5min(Mo): """ Mo doc not defined in techpub!!! """ meta = StatsClassMeta("cobra.model.copp.LacpAllowHist5min", "Per Interface Allow Counters for Lacp") counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "LacpAllowed Bytes rate") counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr" meta._counters.append(counter) counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "LacpAllowed Bytes") counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum" counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer" counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr" counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate" meta._counters.append(counter) counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "LacpAllowed Packets rate") counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr" meta._counters.append(counter) counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "LacpAllowed Packets") counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum" counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer" counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin" counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax" counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg" counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct" counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr" counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr" counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate" meta._counters.append(counter) meta.moClassName = "coppLacpAllowHist5min" meta.rnFormat = "HDcoppLacpAllow5min-%(index)s" meta.category = MoCategory.STATS_HISTORY meta.label = "historical Per Interface Allow Counters for Lacp stats in 5 minute" meta.writeAccessMask = 0x1 meta.readAccessMask = 0x1 meta.isDomainable = False meta.isReadOnly = True meta.isConfigurable = False meta.isDeletable = False meta.isContextRoot = False meta.parentClasses.add("cobra.model.svi.If") meta.parentClasses.add("cobra.model.pc.AggrIf") meta.parentClasses.add("cobra.model.l1.PhysIf") meta.parentClasses.add("cobra.model.l3.RtdIf") meta.parentClasses.add("cobra.model.l3.EncRtdIf") meta.superClasses.add("cobra.model.copp.LacpAllowHist") meta.superClasses.add("cobra.model.stats.Item") meta.superClasses.add("cobra.model.stats.Hist") meta.rnPrefixes = [ ('HDcoppLacpAllow5min-', True), ] prop = PropMeta("str", "bytesAvg", "bytesAvg", 32068, PropCategory.IMPLICIT_AVG) prop.label = "LacpAllowed Bytes average value" prop.isOper = True prop.isStats = True meta.props.add("bytesAvg", prop) prop = PropMeta("str", "bytesCum", "bytesCum", 32064, PropCategory.IMPLICIT_CUMULATIVE) prop.label = "LacpAllowed Bytes cumulative" prop.isOper = True prop.isStats = True meta.props.add("bytesCum", prop) prop = PropMeta("str", "bytesMax", "bytesMax", 32067, PropCategory.IMPLICIT_MAX) prop.label = "LacpAllowed Bytes maximum value" prop.isOper = True prop.isStats = True meta.props.add("bytesMax", prop) prop = PropMeta("str", "bytesMin", "bytesMin", 32066, PropCategory.IMPLICIT_MIN) prop.label = "LacpAllowed Bytes minimum value" prop.isOper = True prop.isStats = True meta.props.add("bytesMin", prop) prop = PropMeta("str", "bytesPer", "bytesPer", 32065, PropCategory.IMPLICIT_PERIODIC) prop.label = "LacpAllowed Bytes periodic" prop.isOper = True prop.isStats = True meta.props.add("bytesPer", prop) prop = PropMeta("str", "bytesRate", "bytesRate", 32072, PropCategory.IMPLICIT_RATE) prop.label = "LacpAllowed Bytes rate" prop.isOper = True prop.isStats = True meta.props.add("bytesRate", prop) prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 32084, PropCategory.IMPLICIT_AVG) prop.label = "LacpAllowed Bytes rate average value" prop.isOper = True prop.isStats = True meta.props.add("bytesRateAvg", prop) prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 32083, PropCategory.IMPLICIT_MAX) prop.label = "LacpAllowed Bytes rate maximum value" prop.isOper = True prop.isStats = True meta.props.add("bytesRateMax", prop) prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 32082, PropCategory.IMPLICIT_MIN) prop.label = "LacpAllowed Bytes rate minimum value" prop.isOper = True prop.isStats = True meta.props.add("bytesRateMin", prop) prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 32085, PropCategory.IMPLICIT_SUSPECT) prop.label = "LacpAllowed Bytes rate suspect count" prop.isOper = True prop.isStats = True meta.props.add("bytesRateSpct", prop) prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 32086, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "LacpAllowed Bytes rate thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("bytesRateThr", prop) prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 32087, PropCategory.IMPLICIT_TREND) prop.label = "LacpAllowed Bytes rate trend" prop.isOper = True prop.isStats = True meta.props.add("bytesRateTr", prop) prop = PropMeta("str", "bytesSpct", "bytesSpct", 32069, PropCategory.IMPLICIT_SUSPECT) prop.label = "LacpAllowed Bytes suspect count" prop.isOper = True prop.isStats = True meta.props.add("bytesSpct", prop) prop = PropMeta("str", "bytesThr", "bytesThr", 32070, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "LacpAllowed Bytes thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("bytesThr", prop) prop = PropMeta("str", "bytesTr", "bytesTr", 32071, PropCategory.IMPLICIT_TREND) prop.label = "LacpAllowed Bytes trend" prop.isOper = True prop.isStats = True meta.props.add("bytesTr", prop) prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("deleteAll", "deleteall", 16384) prop._addConstant("deleteNonPresent", "deletenonpresent", 8192) prop._addConstant("ignore", "ignore", 4096) meta.props.add("childAction", prop) prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR) prop.label = "Number of Collections During this Interval" prop.isImplicit = True prop.isAdmin = True meta.props.add("cnt", prop) prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN) prop.label = "None" prop.isDn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("dn", prop) prop = PropMeta("str", "index", "index", 31203, PropCategory.REGULAR) prop.label = "History Index" prop.isConfig = True prop.isAdmin = True prop.isCreateOnly = True prop.isNaming = True meta.props.add("index", prop) prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR) prop.label = "Collection Length" prop.isImplicit = True prop.isAdmin = True meta.props.add("lastCollOffset", prop) prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop.defaultValue = 0 prop.defaultValueStr = "never" prop._addConstant("never", "never", 0) meta.props.add("modTs", prop) prop = PropMeta("str", "pktsAvg", "pktsAvg", 32104, PropCategory.IMPLICIT_AVG) prop.label = "LacpAllowed Packets average value" prop.isOper = True prop.isStats = True meta.props.add("pktsAvg", prop) prop = PropMeta("str", "pktsCum", "pktsCum", 32100, PropCategory.IMPLICIT_CUMULATIVE) prop.label = "LacpAllowed Packets cumulative" prop.isOper = True prop.isStats = True meta.props.add("pktsCum", prop) prop = PropMeta("str", "pktsMax", "pktsMax", 32103, PropCategory.IMPLICIT_MAX) prop.label = "LacpAllowed Packets maximum value" prop.isOper = True prop.isStats = True meta.props.add("pktsMax", prop) prop = PropMeta("str", "pktsMin", "pktsMin", 32102, PropCategory.IMPLICIT_MIN) prop.label = "LacpAllowed Packets minimum value" prop.isOper = True prop.isStats = True meta.props.add("pktsMin", prop) prop = PropMeta("str", "pktsPer", "pktsPer", 32101, PropCategory.IMPLICIT_PERIODIC) prop.label = "LacpAllowed Packets periodic" prop.isOper = True prop.isStats = True meta.props.add("pktsPer", prop) prop = PropMeta("str", "pktsRate", "pktsRate", 32108, PropCategory.IMPLICIT_RATE) prop.label = "LacpAllowed Packets rate" prop.isOper = True prop.isStats = True meta.props.add("pktsRate", prop) prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 32120, PropCategory.IMPLICIT_AVG) prop.label = "LacpAllowed Packets rate average value" prop.isOper = True prop.isStats = True meta.props.add("pktsRateAvg", prop) prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 32119, PropCategory.IMPLICIT_MAX) prop.label = "LacpAllowed Packets rate maximum value" prop.isOper = True prop.isStats = True meta.props.add("pktsRateMax", prop) prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 32118, PropCategory.IMPLICIT_MIN) prop.label = "LacpAllowed Packets rate minimum value" prop.isOper = True prop.isStats = True meta.props.add("pktsRateMin", prop) prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 32121, PropCategory.IMPLICIT_SUSPECT) prop.label = "LacpAllowed Packets rate suspect count" prop.isOper = True prop.isStats = True meta.props.add("pktsRateSpct", prop) prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 32122, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "LacpAllowed Packets rate thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("pktsRateThr", prop) prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 32123, PropCategory.IMPLICIT_TREND) prop.label = "LacpAllowed Packets rate trend" prop.isOper = True prop.isStats = True meta.props.add("pktsRateTr", prop) prop = PropMeta("str", "pktsSpct", "pktsSpct", 32105, PropCategory.IMPLICIT_SUSPECT) prop.label = "LacpAllowed Packets suspect count" prop.isOper = True prop.isStats = True meta.props.add("pktsSpct", prop) prop = PropMeta("str", "pktsThr", "pktsThr", 32106, PropCategory.IMPLICIT_THRESHOLDED) prop.label = "LacpAllowed Packets thresholded flags" prop.isOper = True prop.isStats = True prop.defaultValue = 0 prop.defaultValueStr = "unspecified" prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552) prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736) prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472) prop._addConstant("avgMajor", "avg-severity-major", 1099511627776) prop._addConstant("avgMinor", "avg-severity-minor", 549755813888) prop._addConstant("avgRecovering", "avg-recovering", 34359738368) prop._addConstant("avgWarn", "avg-severity-warning", 274877906944) prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192) prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256) prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512) prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096) prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048) prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128) prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024) prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64) prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2) prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4) prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32) prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16) prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1) prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8) prop._addConstant("maxCrit", "max-severity-critical", 17179869184) prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912) prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824) prop._addConstant("maxMajor", "max-severity-major", 8589934592) prop._addConstant("maxMinor", "max-severity-minor", 4294967296) prop._addConstant("maxRecovering", "max-recovering", 268435456) prop._addConstant("maxWarn", "max-severity-warning", 2147483648) prop._addConstant("minCrit", "min-severity-critical", 134217728) prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304) prop._addConstant("minLow", "min-crossed-low-threshold", 8388608) prop._addConstant("minMajor", "min-severity-major", 67108864) prop._addConstant("minMinor", "min-severity-minor", 33554432) prop._addConstant("minRecovering", "min-recovering", 2097152) prop._addConstant("minWarn", "min-severity-warning", 16777216) prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576) prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768) prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536) prop._addConstant("periodicMajor", "periodic-severity-major", 524288) prop._addConstant("periodicMinor", "periodic-severity-minor", 262144) prop._addConstant("periodicRecovering", "periodic-recovering", 16384) prop._addConstant("periodicWarn", "periodic-severity-warning", 131072) prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968) prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624) prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248) prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984) prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992) prop._addConstant("rateRecovering", "rate-recovering", 562949953421312) prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496) prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656) prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208) prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416) prop._addConstant("trendMajor", "trend-severity-major", 140737488355328) prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664) prop._addConstant("trendRecovering", "trend-recovering", 4398046511104) prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832) prop._addConstant("unspecified", None, 0) meta.props.add("pktsThr", prop) prop = PropMeta("str", "pktsTr", "pktsTr", 32107, PropCategory.IMPLICIT_TREND) prop.label = "LacpAllowed Packets trend" prop.isOper = True prop.isStats = True meta.props.add("pktsTr", prop) prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR) prop.label = "Reporting End Time" prop.isImplicit = True prop.isAdmin = True meta.props.add("repIntvEnd", prop) prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR) prop.label = "Reporting Start Time" prop.isImplicit = True prop.isAdmin = True meta.props.add("repIntvStart", prop) prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN) prop.label = "None" prop.isRn = True prop.isImplicit = True prop.isAdmin = True prop.isCreateOnly = True meta.props.add("rn", prop) prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS) prop.label = "None" prop.isImplicit = True prop.isAdmin = True prop._addConstant("created", "created", 2) prop._addConstant("deleted", "deleted", 8) prop._addConstant("modified", "modified", 4) meta.props.add("status", prop) meta.namingProps.append(getattr(meta.props, "index")) # Deployment Meta meta.deploymentQuery = True meta.deploymentType = "Ancestor" meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg")) def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps): namingVals = [index] Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps) # End of package file # ##################################################
[ "bkhoward@live.com" ]
bkhoward@live.com
ac19567d2c9ed4b4c9852a5ca2ad7fbd7ab5185d
7fdf9c9b4f9601f1f24414da887acb03018e99f1
/gym/gym/settings.py
80b1a8a45b3e88065e60982b4b7505f389a7c7e7
[]
no_license
sid-ncet/fitnesss
d96653022664ec2b1c9a5811fc3f7048a122e3ed
fc62c77abe30659131f0befc93424f8bb9333a0b
refs/heads/master
2023-04-25T19:02:12.719223
2021-06-05T06:53:17
2021-06-05T06:53:17
374,042,630
0
0
null
null
null
null
UTF-8
Python
false
false
3,318
py
""" Django settings for gym project. Generated by 'django-admin startproject' using Django 3.2.3. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure--v@m2t(6erzobw08yshw&tbm9s47$n)99#t8+o87ib22l7$-dj' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'fitness' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'gym.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'gym.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT= BASE_DIR MEDIA_URL= '/images/download/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
[ "siddharthsinghcs11@gmail.com" ]
siddharthsinghcs11@gmail.com
5d5b6258a717833464801f98683c23cb6435e4f2
25ec545186596ea20ade231e1fa2a83faac0aa33
/penncycle/app/models.py
f5690000a90aa27fd65ef1dfff8d9f99576c6dfa
[]
no_license
rattrayalex/PennCycle
a0f43ef7a1390fea3016ed5ac96cca5ab431e8e1
dbcfa68c7bf9c928c559ba310e23be12e01ad998
refs/heads/master
2020-04-28T22:40:43.747751
2013-05-14T22:01:18
2013-05-14T22:01:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
12,559
py
import datetime from django.core.mail import send_mail from django_localflavor_us.models import PhoneNumberField from django.template.defaultfilters import slugify from django.db import models from django.core.validators import RegexValidator from south.modelsinspector import add_introspection_rules # Necessary because South hasn't been updated since localflavors was broken up. add_introspection_rules([], ['django_localflavor_us\.models\.PhoneNumberField']) GENDER_CHOICES = ( ('M', 'Male'), ('F', 'Female'), ) GRAD_YEAR_CHOICES = ( ('2016', '2016'), ('2015', '2015'), ('2014', '2014'), ('2013', '2013'), ('2012', '2012'), ('grad', 'grad student'), ('faculty', 'faculty'), ('staff', 'staff'), ('guest', 'guest'), ) LIVING_LOCATIONS = ( ('Hill', 'Hill'), ('KCECH', 'KCECH'), ('Riepe', 'Riepe'), ('Fisher', 'Fisher'), ('Ware', 'Ware'), ('Harrison', 'Harrison'), ('Harnwell', 'Harnwell'), ('Rodin', 'Rodin'), ('Stouffer', 'Stouffer'), ('Mayer', 'Mayer'), ('Du Bois', 'Du Bois'), ('Gregory', 'Gregory'), ('Sansom', 'Sansom'), ('Off Campus', 'Off Campus'), ) SCHOOL_CHOICES = ( ('C', 'College'), ('W', 'Wharton'), ('E', 'SEAS'), ('N', 'Nursing'), ('ANN', 'Annenberg'), ('DEN', 'Dental'), ('DES', 'Design'), ('GSE', 'Education'), ('LAW', 'Law'), ('MED', 'Medicine'), ('SPP', 'Social Policy & Practice'), ('VET', 'Veterinary'), ('O', 'Other or N/A'), ) PAYMENT_CHOICES = ( ('cash', 'cash'), ('penncash', 'penncash'), ('bursar', 'bursar'), ('credit', 'credit'), ('group', 'group'), ('stouffer', 'stouffer'), ('free', 'free'), ('other', 'other'), ('fisher', 'fisher') ) class Plan(models.Model): name = models.CharField(max_length=100) cost = models.IntegerField() start_date = models.DateField() end_date = models.DateField() description = models.TextField(max_length=150, default="Details coming soon!") banner = models.CharField(max_length=50, default="") def __unicode__(self): return self.name + ': $' + str(self.cost) class Payment(models.Model): amount = models.DecimalField(decimal_places=2, max_digits=6) plan = models.ForeignKey( Plan, default=1, limit_choices_to={ 'end_date__gte': datetime.date.today(), } ) student = models.ForeignKey('Student', related_name="payments") date = models.DateField(auto_now_add=True) satisfied = models.BooleanField(default=False) payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True) status = models.CharField(max_length=100, default='available') def save(self): super(Payment, self).save() self.student.paid = self.student.paid_now def __unicode__(self): return str(self.student) + ' for ' + str(self.plan) class Manufacturer(models.Model): name = models.CharField(max_length=30) address = models.CharField(max_length=50, blank=True) city = models.CharField(max_length=60, blank=True) country = models.CharField(max_length=50, blank=True) website = models.URLField(blank=True) email = models.EmailField(blank=True) def __unicode__(self): return self.name class Student(models.Model): name = models.CharField(max_length=100) email = models.EmailField() phone = PhoneNumberField() penncard = models.CharField(max_length=8, validators=[RegexValidator('\d{8}')], unique=True) last_two = models.CharField(max_length=2, validators=[RegexValidator('\d{2}')], blank=True, null=True) gender = models.CharField(max_length=1, choices=GENDER_CHOICES) grad_year = models.CharField(max_length=50, choices=GRAD_YEAR_CHOICES) join_date = models.DateField(default=datetime.date.today()) school = models.CharField(max_length=100, choices=SCHOOL_CHOICES) major = models.CharField(max_length=50, blank=True) living_location = models.CharField(max_length=100, choices=LIVING_LOCATIONS) waiver_signed = models.BooleanField(default=False) paid = models.BooleanField(default=False) payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True) staff = models.NullBooleanField(default=False) plan = models.ManyToManyField('Plan', blank=True, null=True) @property def paid_now(self): return len(self.current_payments) > 0 @property def current_payments(self): today = datetime.date.today() return self.payments.filter( satisfied=True, plan__start_date__lte=today, plan__end_date__gte=today, ) @property def can_ride(self): if len(self.current_payments.filter(status='available')) > 0 and self.waiver_signed: return True else: return False def __unicode__(self): return u'%s %s' % (self.name, self.penncard) class Bike(models.Model): bike_name = models.CharField(max_length=100, unique=True) manufacturer = models.ForeignKey(Manufacturer) purchase_date = models.DateField() color = models.CharField(max_length=30, blank=True) status = models.CharField(max_length=100, default='available') serial_number = models.CharField(max_length=100, blank=True) tag_id = models.CharField(max_length=100, blank=True) key_serial_number = models.CharField(max_length=100, blank=True) combo = models.CharField(max_length=4, blank=True) combo_update = models.DateField() @property def knows_combo(self): rides = self.rides.filter(checkout_time__gt=self.combo_update) return list(set([ride.rider for ride in rides])) @property def location(self): last_ride = self.rides.filter(checkin_station__isnull=False).order_by('-checkin_time') try: last_ride = last_ride[0] location = last_ride.checkin_station except: location = Station.objects.get(name__contains="PSA") return location def __unicode__(self): return '#%s. Location: %s' % (self.bike_name, self.location.name) days = { "Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3, "Friday": 4, "Saturday": 5, "Sunday": 6, } strings = dict([v, k] for k, v in days.items()) def decimal(time): if len(time) <= 2: return int(time) else: hours, minutes = time.split(":") return int(hours) + float(minutes) / 60 def hour(time): return decimal(time[0]) if (time[1] == "am" or time[0] == "12") else decimal(time[0])+12 def enter_hours(interval, info, day): # print(info) start_time = hour(info[0:2]) end_time = hour(info[3:5]) if day in interval: interval[day].append((start_time, end_time)) else: interval[day] = [(start_time, end_time)] def get_hours(description): intervals = {} day = 0 if not description: # empty station return {} for line in description.split("\n"): # assumes to be in order if line.split()[1] == "-": # there is a range of days # print("range of days") start = days[line.split()[0]] end = days[line.split()[2][:-1]] for i in range(end-start+1): that_day = strings[day] if "and" in line: # multiple ranges enter_hours(intervals, line.split()[3:8], that_day) enter_hours(intervals, line.split()[9:14], that_day) else: enter_hours(intervals, line.split()[3:8], that_day) day += 1 elif line.split()[0][-1] == ":": # print("matched :") that_day = strings[day] if "and" in line: # multiple ranges enter_hours(intervals, line.split()[1:6], that_day) enter_hours(intervals, line.split()[7:12], that_day) else: enter_hours(intervals, line.split()[1:6], that_day) day += 1 else: # 7 days a week. for day in range(7): enter_hours(intervals, line.split()[2:7], strings[day]) return intervals class Station(models.Model): name = models.CharField(max_length=100) latitude = models.FloatField(default=39.9529399) longitude = models.FloatField(default=-75.1905607) address = models.CharField(max_length=300, blank=True) notes = models.TextField(max_length=100, blank=True) hours = models.TextField(max_length=100, blank=True) picture = models.ImageField(upload_to='img/stations', blank=True) capacity = models.IntegerField(default=15) full_name = models.CharField(max_length=100, default="") def __unicode__(self): return self.name @property def is_open(self): ranges = get_hours(self.hours) today = datetime.datetime.today().weekday() this_hour = datetime.datetime.today().hour if strings[today] in ranges: hours = ranges[strings[today]] for opening in hours: if this_hour > opening[0] and this_hour < opening[1]: return True return False @property def comma_name(self): return ", ".join(self.hours.split("\n")) class Ride(models.Model): rider = models.ForeignKey( Student, limit_choices_to={ 'payments__status': 'available', 'waiver_signed': True, 'payments__satisfied': True, 'payments__plan__end_date__gte': datetime.date.today(), 'payments__plan__start_date__lte': datetime.date.today(), }, ) bike = models.ForeignKey('Bike', limit_choices_to={'status': 'available'}, related_name='rides') checkout_time = models.DateTimeField(auto_now_add=True) checkin_time = models.DateTimeField(null=True, blank=True) checkout_station = models.ForeignKey(Station, default=1, related_name='checkouts') checkin_station = models.ForeignKey(Station, blank=True, null=True, related_name='checkins') num_users = models.IntegerField() @property def ride_duration_days(self): if self.checkin_time is None: end = datetime.datetime.now() else: end = self.checkin_time duration = end - self.checkout_time duration_days = duration.days return duration_days @property def status(self): if self.checkin_time is None: return 'out' else: return 'in' def save(self): print 'in Ride save method' if not self.num_users: self.num_users = len(Student.objects.all()) super(Ride, self).save() print 'super saved!' if self.checkin_time is None: self.bike.status = 'out' payment = self.rider.current_payments.filter(status='available')[0] payment.status = 'out' else: self.bike.status = 'available' payment = self.rider.current_payments.filter(status='out')[0] payment.status = 'available' self.bike.save() payment.save() def __unicode__(self): return u'%s on %s' % (self.rider, self.checkout_time) class Page(models.Model): content = models.TextField() name = models.CharField(max_length=100) slug = models.SlugField() def save(self): self.slug = slugify(self.name) super(Page, self).save() def __unicode__(self): return self.name class Comment(models.Model): comment = models.TextField() time = models.DateTimeField(auto_now_add=True) student = models.ForeignKey(Student, blank=True, null=True) ride = models.ForeignKey(Ride, blank=True, null=True) is_problem = models.BooleanField(default=False) def save(self): super(Comment, self).save() message = ''' Comment: \n %s \n \n Time: \n %s \n \n Student: \n %s \n \n Ride: \n %s \n \n Marked as problem? \n %s \n \n ''' % (self.comment, self.time, self.student, self.ride, self.is_problem) send_mail('PennCycle: Comment Submitted', message, 'messenger@penncycle.org', ['messenger@penncycle.org']) def __unicode__(self): return self.comment[:30] class Info(models.Model): message = models.TextField() date = models.DateField(auto_now_add=True) def __unicode__(self): return self.message + " on " + self.date.isoformat()
[ "razzi53@gmail.com" ]
razzi53@gmail.com
fc0d28850895dd119c8a2b4afc9f5481bb7779fe
b9eef16211d4a5f2e5b51c0ddfb7dc0a9608db86
/Chap2InprovingDNN/week2/optimization/optimization.py
e2e8cee8f4713b5cd9743e543af41771e71a40f0
[]
no_license
vinares/DeepLearning
905f44655c0b72c9ba6d52bf1c15146b0d07fc92
c307c3c1063a101dcfa192bc3b8671c2781e31f3
refs/heads/main
2023-06-22T05:45:46.716091
2021-07-21T09:03:02
2021-07-21T09:03:02
369,407,380
0
0
null
null
null
null
UTF-8
Python
false
false
19,039
py
import numpy as np import matplotlib.pyplot as plt import scipy.io import math import sklearn import sklearn.datasets from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset from testCases import * plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # GRADED FUNCTION: update_parameters_with_gd def update_parameters_with_gd(parameters, grads, learning_rate): """ Update parameters using one step of gradient descent Arguments: parameters -- python dictionary containing your parameters to be updated: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients to update each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl learning_rate -- the learning rate, scalar. Returns: parameters -- python dictionary containing your updated parameters """ L = len(parameters) // 2 # number of layers in the neural networks # Update rule for each parameter for l in range(L): ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l + 1)] = parameters['W' + str(l + 1)] - learning_rate * grads['dW' + str(l + 1)] parameters["b" + str(l + 1)] = parameters['b' + str(l + 1)] - learning_rate * grads['db' + str(l + 1)] ### END CODE HERE ### return parameters parameters, grads, learning_rate = update_parameters_with_gd_test_case() parameters = update_parameters_with_gd(parameters, grads, learning_rate) print("W1 =\n" + str(parameters["W1"])) print("b1 =\n" + str(parameters["b1"])) print("W2 =\n" + str(parameters["W2"])) print("b2 =\n" + str(parameters["b2"])) # GRADED FUNCTION: random_mini_batches def random_mini_batches(X, Y, mini_batch_size=64, seed=0): """ Creates a list of random minibatches from (X, Y) Arguments: X -- input data, of shape (input size, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) mini_batch_size -- size of the mini-batches, integer Returns: mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) """ np.random.seed(seed) # To make your "random" minibatches the same as ours m = X.shape[1] # number of training examples mini_batches = [] # Step 1: Shuffle (X, Y) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((1, m)) # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. num_complete_minibatches = math.floor( m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning for k in range(0, num_complete_minibatches): ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size] mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) # Handling the end case (last mini-batch < mini_batch_size) if m % mini_batch_size != 0: ### START CODE HERE ### (approx. 2 lines) mini_batch_X = shuffled_X[:, (num_complete_minibatches ) * mini_batch_size:m] mini_batch_Y = shuffled_Y[:, (num_complete_minibatches ) * mini_batch_size:m] ### END CODE HERE ### mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case() mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size) print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape)) print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape)) print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape)) print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape)) print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape)) print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape)) print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3])) # GRADED FUNCTION: initialize_velocity def initialize_velocity(parameters): """ Initializes the velocity as a python dictionary with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl Returns: v -- python dictionary containing the current velocity. v['dW' + str(l)] = velocity of dWl v['db' + str(l)] = velocity of dbl """ L = len(parameters) // 2 # number of layers in the neural networks v = {} # Initialize velocity for l in range(L): ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape) v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape) ### END CODE HERE ### return v parameters = initialize_velocity_test_case() v = initialize_velocity(parameters) print("v[\"dW1\"] =\n" + str(v["dW1"])) print("v[\"db1\"] =\n" + str(v["db1"])) print("v[\"dW2\"] =\n" + str(v["dW2"])) print("v[\"db2\"] =\n" + str(v["db2"])) # GRADED FUNCTION: update_parameters_with_momentum def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate): """ Update parameters using Momentum Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- python dictionary containing the current velocity: v['dW' + str(l)] = ... v['db' + str(l)] = ... beta -- the momentum hyperparameter, scalar learning_rate -- the learning rate, scalar Returns: parameters -- python dictionary containing your updated parameters v -- python dictionary containing your updated velocities """ L = len(parameters) // 2 # number of layers in the neural networks # Momentum update for each parameter for l in range(L): ### START CODE HERE ### (approx. 4 lines) # compute velocities v["dW" + str(l + 1)] = beta * v["dW" + str(l + 1)] + (1 - beta) * grads["dW" + str(l + 1)] v["db" + str(l + 1)] = beta * v["db" + str(l + 1)] + (1 - beta) * grads["db" + str(l + 1)] # update parameters parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v["dW" + str(l + 1)] parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v["db" + str(l + 1)] ### END CODE HERE ### return parameters, v parameters, grads, v = update_parameters_with_momentum_test_case() parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01) print("W1 = \n" + str(parameters["W1"])) print("b1 = \n" + str(parameters["b1"])) print("W2 = \n" + str(parameters["W2"])) print("b2 = \n" + str(parameters["b2"])) print("v[\"dW1\"] = \n" + str(v["dW1"])) print("v[\"db1\"] = \n" + str(v["db1"])) print("v[\"dW2\"] = \n" + str(v["dW2"])) print("v[\"db2\"] = v" + str(v["db2"])) # GRADED FUNCTION: initialize_adam def initialize_adam(parameters): """ Initializes v and s as two python dictionaries with: - keys: "dW1", "db1", ..., "dWL", "dbL" - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters. Arguments: parameters -- python dictionary containing your parameters. parameters["W" + str(l)] = Wl parameters["b" + str(l)] = bl Returns: v -- python dictionary that will contain the exponentially weighted average of the gradient. v["dW" + str(l)] = ... v["db" + str(l)] = ... s -- python dictionary that will contain the exponentially weighted average of the squared gradient. s["dW" + str(l)] = ... s["db" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural networks v = {} s = {} # Initialize v, s. Input: "parameters". Outputs: "v, s". for l in range(L): ### START CODE HERE ### (approx. 4 lines) v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape) v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape) s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape) s["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape) ### END CODE HERE ### return v, s parameters = initialize_adam_test_case() v, s = initialize_adam(parameters) print("v[\"dW1\"] = \n" + str(v["dW1"])) print("v[\"db1\"] = \n" + str(v["db1"])) print("v[\"dW2\"] = \n" + str(v["dW2"])) print("v[\"db2\"] = \n" + str(v["db2"])) print("s[\"dW1\"] = \n" + str(s["dW1"])) print("s[\"db1\"] = \n" + str(s["db1"])) print("s[\"dW2\"] = \n" + str(s["dW2"])) print("s[\"db2\"] = \n" + str(s["db2"])) # GRADED FUNCTION: update_parameters_with_adam def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8): """ Update parameters using Adam Arguments: parameters -- python dictionary containing your parameters: parameters['W' + str(l)] = Wl parameters['b' + str(l)] = bl grads -- python dictionary containing your gradients for each parameters: grads['dW' + str(l)] = dWl grads['db' + str(l)] = dbl v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, mo v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape) v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape) s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape) learning_rate -- the learning rate, scalar. beta1 -- Exponential decay hyperparameter for the first moment estimates beta2 -- Exponential decay hyperparameter for the second moment estimates epsilon -- hyperparameter preventing division by zero in Adam updates Returns: parameters -- python dictionary containing your updated parameters v -- Adam variable, moving average of the first gradient, python dictionary s -- Adam variable, moving average of the squared gradient, python dictionary """ L = len(parameters) // 2 # number of layers in the neural networks v_corrected = {} # Initializing first moment estimate, python dictionary s_corrected = {} # Initializing second moment estimate, python dictionary # Perform Adam update on all parameters for l in range(L): # Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v". ### START CODE HERE ### (approx. 2 lines) v["dW" + str(l + 1)] = beta1 * v["dW" + str(l + 1)] + (1 - beta1) * grads["dW" + str(l + 1)] v["db" + str(l + 1)] = beta1 * v["db" + str(l + 1)] + (1 - beta1) * grads["db" + str(l + 1)] ### END CODE HERE ### # Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected". ### START CODE HERE ### (approx. 2 lines) v_corrected["dW" + str(l + 1)] = v["dW" + str(l + 1)] / (1 - beta1 ** t) v_corrected["db" + str(l + 1)] = v["db" + str(l + 1)] / (1 - beta1 ** t) ### END CODE HERE ### # Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s". ### START CODE HERE ### (approx. 2 lines) s["dW" + str(l + 1)] = beta2 * s["dW" + str(l + 1)] + (1 - beta2) * (grads["dW" + str(l + 1)] ** 2) s["db" + str(l + 1)] = beta2 * s["db" + str(l + 1)] + (1 - beta2) * (grads["db" + str(l + 1)] ** 2) ### END CODE HERE ### # Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected". ### START CODE HERE ### (approx. 2 lines) s_corrected["dW" + str(l + 1)] = s["dW" + str(l + 1)] / (1 - beta2 ** t) s_corrected["db" + str(l + 1)] = s["db" + str(l + 1)] / (1 - beta2 ** t) ### END CODE HERE ### # Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters". ### START CODE HERE ### (approx. 2 lines) parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v_corrected["dW" + str(l + 1)] / (np.sqrt(s_corrected["dW" + str(l + 1)]) + epsilon) parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v_corrected["db" + str(l + 1)] / (np.sqrt(s_corrected["db" + str(l + 1)]) + epsilon) ### END CODE HERE ### return parameters, v, s parameters, grads, v, s = update_parameters_with_adam_test_case() parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2) print("W1 = \n" + str(parameters["W1"])) print("b1 = \n" + str(parameters["b1"])) print("W2 = \n" + str(parameters["W2"])) print("b2 = \n" + str(parameters["b2"])) print("v[\"dW1\"] = \n" + str(v["dW1"])) print("v[\"db1\"] = \n" + str(v["db1"])) print("v[\"dW2\"] = \n" + str(v["dW2"])) print("v[\"db2\"] = \n" + str(v["db2"])) print("s[\"dW1\"] = \n" + str(s["dW1"])) print("s[\"db1\"] = \n" + str(s["db1"])) print("s[\"dW2\"] = \n" + str(s["dW2"])) print("s[\"db2\"] = \n" + str(s["db2"])) train_X, train_Y = load_dataset() plt.show() def model(X, Y, layers_dims, optimizer, learning_rate=0.0007, mini_batch_size=64, beta=0.9, beta1=0.9, beta2=0.999, epsilon=1e-8, num_epochs=10000, print_cost=True): """ 3-layer neural network model which can be run in different optimizer modes. Arguments: X -- input data, of shape (2, number of examples) Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples) layers_dims -- python list, containing the size of each layer learning_rate -- the learning rate, scalar. mini_batch_size -- the size of a mini batch beta -- Momentum hyperparameter beta1 -- Exponential decay hyperparameter for the past gradients estimates beta2 -- Exponential decay hyperparameter for the past squared gradients estimates epsilon -- hyperparameter preventing division by zero in Adam updates num_epochs -- number of epochs print_cost -- True to print the cost every 1000 epochs Returns: parameters -- python dictionary containing your updated parameters """ L = len(layers_dims) # number of layers in the neural networks costs = [] # to keep track of the cost t = 0 # initializing the counter required for Adam update seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours m = X.shape[1] # number of training examples # Initialize parameters parameters = initialize_parameters(layers_dims) # Initialize the optimizer if optimizer == "gd": pass # no initialization required for gradient descent elif optimizer == "momentum": v = initialize_velocity(parameters) elif optimizer == "adam": v, s = initialize_adam(parameters) # Optimization loop for i in range(num_epochs): # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch seed = seed + 1 minibatches = random_mini_batches(X, Y, mini_batch_size, seed) cost_total = 0 for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # Forward propagation a3, caches = forward_propagation(minibatch_X, parameters) # Compute cost and add to the cost total cost_total += compute_cost(a3, minibatch_Y) # Backward propagation grads = backward_propagation(minibatch_X, minibatch_Y, caches) # Update parameters if optimizer == "gd": parameters = update_parameters_with_gd(parameters, grads, learning_rate) elif optimizer == "momentum": parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate) elif optimizer == "adam": t = t + 1 # Adam counter parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t, learning_rate, beta1, beta2, epsilon) cost_avg = cost_total / m # Print the cost every 1000 epoch if print_cost and i % 1000 == 0: print("Cost after epoch %i: %f" % (i, cost_avg)) if print_cost and i % 100 == 0: costs.append(cost_avg) # plot the cost plt.plot(costs) plt.ylabel('cost') plt.xlabel('epochs (per 100)') plt.title("Learning rate = " + str(learning_rate)) plt.show() return parameters # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "gd") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Gradient Descent optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Momentum optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y) # train 3-layer model layers_dims = [train_X.shape[0], 5, 2, 1] parameters = model(train_X, train_Y, layers_dims, optimizer = "adam") # Predict predictions = predict(train_X, train_Y, parameters) # Plot decision boundary plt.title("Model with Adam optimization") axes = plt.gca() axes.set_xlim([-1.5,2.5]) axes.set_ylim([-1,1.5]) plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
[ "877649518@qq.com" ]
877649518@qq.com
76f0bfb3491090f86e4d11cf509c6a61dde62e2f
1f9d8381f111ee34be61a82cdf2038afc1a44079
/sequenceToLine.py
848d936908c046a2428b7f84395126b3190bd404
[ "BSD-3-Clause" ]
permissive
el-mat/ectools
f35d305c8fd558436cd7534c5fe4db66fffead24
031eb0300c82392915d8393a5fedb4d3452b15bf
refs/heads/master
2021-01-23T21:10:42.869782
2014-12-01T14:07:00
2014-12-01T14:07:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
430
py
#!/usr/bin/env import sys from seqio import iteratorFromExtension from nucio import fileIterator if not len(sys.argv) == 2: sys.exit("sequencToLine.py in.{fa.fq}\n") it = iteratorFromExtension(sys.argv[1]) for record in fileIterator(sys.argv[1], it): if hasattr(record, "desc"): print "\t".join([record.name, record.seq, record.desc, record.qual]) else: print "\t".join([record.name, record.seq])
[ "gurtowsk@mshadoop1.cshl.edu" ]
gurtowsk@mshadoop1.cshl.edu
661b9aa3fceb522de6be34632dd648f5060f74c3
a69b96621abef181606fd3d68eebaa5b655ed529
/Lesson3problem2.py
baf178f6055c80d571cdd52706d09817b15fd3d5
[]
no_license
agonzalez33/Lesson3
3724be5e602a9befafe72be8570a5e3cc6ab2ec0
cc7dfe7a43b3691141a1d9f81d5939d5134a23e3
refs/heads/master
2020-05-03T04:00:58.346507
2019-03-29T13:35:45
2019-03-29T13:35:45
178,411,421
0
0
null
null
null
null
UTF-8
Python
false
false
69
py
for integer in range(1000,0,-1): print(integer) print('Blast off!')
[ "noreply@github.com" ]
agonzalez33.noreply@github.com
849b16fbf6169f6d56be1d9b19ad76d20f75fe68
92dbb16f383754fd9fd8d35c87b68977ec42a586
/Geogria/20200514-graph/map_world.py
847c835b947d72027a50300d73ea3ed4aef2713e
[]
no_license
YWJL/pchong
c0c1bfa4695ac3b143430fd2291b197b4fdab884
eaa98c5ed3daad60e8ac0560634ba631e665f00e
refs/heads/master
2022-11-11T00:01:55.550199
2020-07-01T06:11:56
2020-07-01T06:11:56
276,290,019
0
0
null
null
null
null
UTF-8
Python
false
false
4,565
py
import pandas from pyecharts import options as opts from pyecharts.charts import Bar, Grid, Line from pyecharts import options as opts from pyecharts.charts import Map from pyecharts.faker import Collector, Faker from pyecharts.datasets import register_url import pandas as pd import asyncio from pyecharts.commons.utils import JsCode import math from aiohttp import TCPConnector, ClientSession import pyecharts.options as opts from pyecharts.charts import Map import pyecharts.options as opts from pyecharts.charts import Line from pyecharts import options as opts from pyecharts.charts import Bar, Timeline from pyecharts import options as opts from pyecharts.charts import Grid, Line, Scatter from pyecharts.faker import Faker from pyecharts.commons.utils import JsCode from pyecharts.faker import Faker from pyecharts.charts import Geo import json US_name='daily.csv' death_name='202005014-world-death-data.json.csv' positive_name='202005014-world-confirm-data.json.csv' recovered_name='202005014-world-cover-data.json.csv' RECOVERED=pd.read_csv(recovered_name) DEATH=pd.read_csv(death_name) POSITIVE=pd.read_csv(positive_name) US=pd.read_csv(US_name) US_pos=[] US_Dea=[] US_Rec=[] for i in range(1,56): if math.isnan(US.iloc[i,2]): US.iloc[i, 2]=0 if math.isnan(US.iloc[i, 16]): US.iloc[i, 16]=0 if math.isnan(US.iloc[i, 11]): US.iloc[i,11]=0 US_pos.append(US.iloc[i, 2]) US_Dea.append(US.iloc[i, 16]) US_Rec.append(US.iloc[i, 11]) print('US_pos:',US_pos) print('US_Dea:',US_Dea) print('US_Rec:',US_Rec) country_number_pos=int((POSITIVE.shape[1])/2-1) country_number_dea=int((DEATH.shape[1])/2-1) country_number_rec=int((RECOVERED.shape[1])/2-2) print(country_number_dea) print(RECOVERED.iloc[1,country_number_rec]) day=len(POSITIVE)-1 print(day) country_pos=[] country_dea=[] country_rec=[] positive=[] death=[] recovered=[] print('sum(US_dea):',sum(US_Dea)) print('sum(US_pos):',sum(US_pos)) print('sum(US_rec):',sum(US_Rec)) time="截止至{}全球疫情数据".format(POSITIVE.iloc[-1,0]) for i in range(1,country_number_dea): country_dea.append(DEATH.iloc[1,i]) death.append(DEATH.iloc[day-1,i]) country_dea.append('United States') death.append(sum(US_Dea)) MAP_data_dea=[list(z) for z in zip(country_dea, death)] print('MAP_data_dea:',MAP_data_dea) for i in range(1,country_number_pos): country_pos.append(POSITIVE.iloc[1,i]) positive.append(POSITIVE.iloc[day,i]) country_pos.append('United States') positive.append(sum(US_pos)) MAP_data_pos=[list(z) for z in zip(country_pos, positive)] print(len(positive)) MAP_data_rec=[] # print(type(RECOVERED.iloc[2,0])) for i in range(1,day-6): for j in range(1,day-6): if type(RECOVERED.iloc[i,j])!=str and math.isnan(RECOVERED.iloc[i,j]): RECOVERED.iloc[i,j]=0 for i in range(1,day-6): country_rec.append(RECOVERED.iloc[1,i]) recovered.append(RECOVERED.iloc[day-6,i]) MAP_data_rec=[list(z) for z in zip(country_rec, recovered)] country_rec.append('United States') recovered.append(sum(US_Rec)) print('MAP_data_pos:',MAP_data_pos) # for i in range(1,country_number_dea-1): # for j in range(1,country_number_dea-1): # if country_pos[i]==country_dea[j]: # map1=[list(z) for z in zip(country_dea, country_pos)] # print(map1) # print(country) # print(data.iloc[day,1]) # print(day) NAME_MAP_DATA = { # "key": "value" # "name on the hong kong map": "name in the MAP DATA", } c = ( Map(init_opts=opts.InitOpts(width="1400px", height="800px")) .add( series_name="Positive_number", data_pair=MAP_data_pos, maptype="world", name_map=NAME_MAP_DATA, is_map_symbol_show=False) .add( series_name="Death_number", data_pair=MAP_data_dea, maptype="world", name_map=NAME_MAP_DATA, is_map_symbol_show=False) .add( series_name="Recovered_number", data_pair=MAP_data_rec, maptype="world", name_map=NAME_MAP_DATA, is_map_symbol_show=False) .set_series_opts(label_opts=opts.LabelOpts(is_show=False)) .set_global_opts( title_opts=opts.TitleOpts( title="Map-世界地图", subtitle=time), # subtitle=time, visualmap_opts=opts.VisualMapOpts(max_=sum(US_pos)), tooltip_opts=opts.TooltipOpts( trigger="item", formatter="{b0}<br/>(number:{c}) " ), ) .render("map_world.html") ) # print([list(z) for z in zip(Faker.country, Faker.values())]) print(max(US_pos))
[ "201256153@qq.com" ]
201256153@qq.com
8fcae2a12359d68896b1d9e5d7db84dacd86f151
3f453e74ae03c777d4ca803623cf9f69b70ace87
/mappanel.py
3d92e74b1764f1483756c88d1f68dbe828478608
[]
no_license
acidtobi/weewar_clone
5a348ece62ff22f3a0812867a93ac5f5a370f782
5b0575ee7534278d49df446a852e33d3f232d6e7
refs/heads/master
2021-05-04T10:41:44.741431
2016-02-11T20:47:23
2016-02-11T20:47:23
50,377,074
0
0
null
null
null
null
UTF-8
Python
false
false
3,916
py
from __future__ import division import wx import wx.lib.scrolledpanel class MapPanel(wx.lib.scrolledpanel.ScrolledPanel): def __init__(self, parent, background_tile, size, innerbitmap=None): self.background_tile = background_tile self.InnerSize = size self.innerbitmap = innerbitmap self._Buffer = None self.virtual_x = 0 self.virtual_y = 0 screen_width, screen_height = wx.DisplaySize() self.background = wx.EmptyBitmap(screen_width, screen_height) dc = wx.MemoryDC() dc.SelectObject(self.background) tile_width, tile_height = self.background_tile.Size for rownum in range(int(screen_height / tile_height)): for colnum in range(int(screen_width / tile_width)): dc.DrawBitmap(self.background_tile, colnum * tile_width, rownum * tile_height, True) width_px, height_px = size wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, size=(width_px, height_px)) self.SetupScrolling() self.SetScrollRate(1, 1) self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnResize) #self.OnResize(None) def setInnerBitmap(self, bitmap): self.innerbitmap = bitmap def GetVirtualPosition(self, (x, y)): scrolled_x, scrolled_y = self.CalcScrolledPosition((self.virtual_x, self.virtual_y)) return x - scrolled_x, y - scrolled_y def UpdateDrawing(self): dc = wx.MemoryDC() dc.SelectObject(self._Buffer) self_width, self_height = self.InnerSize sizer_width, sizer_height = self.GetSize() self.virtual_x = max(0, (sizer_width - self_width) / 2) self.virtual_y = max(0, (sizer_height - self_height) / 2) tile_width, tile_height = self.background_tile.Size offset_x, offset_y = self.virtual_x % tile_width, self.virtual_y % tile_height dc.DrawBitmap(self.background, offset_x - tile_width, offset_y - tile_height) if self.innerbitmap: dc.DrawBitmap(self.innerbitmap, self.virtual_x, self.virtual_y, True) del dc self.Refresh(eraseBackground=False) self.Update() def OnPaint(self, e): dc = wx.PaintDC(self) x, y = self.CalcScrolledPosition((0, 0)) dc.DrawBitmap(self._Buffer, x, y) def OnResize(self, e): width, height = e.GetSize() inner_width, inner_height = self.InnerSize self.SetSize((width, height)) self.SetVirtualSize((inner_width, inner_height)) self._Buffer = wx.EmptyBitmap(max(width, inner_width), max(height, inner_height)) self.UpdateDrawing() # ============================================================================================== # tests # ============================================================================================== if __name__ == "__main__": class MainFrame(wx.Frame): def __init__(self, parent, title): background = wx.Bitmap("tiles_background.jpg") background_tile = wx.Bitmap("logo_background_repeating.png") self.foreground = wx.Bitmap("rubberducky.png") wx.Frame.__init__(self, parent, title=title, size=background.Size) self.mappanel = MapPanel(self, background_tile, size=self.foreground.Size, innerbitmap=self.foreground) leftpanel = wx.Panel(self, -1, size=(100, -1)) self.box = wx.BoxSizer(wx.HORIZONTAL) self.box.Add(leftpanel, 0, wx.EXPAND) self.box.Add(self.mappanel, 2, wx.EXPAND) self.SetAutoLayout(True) self.SetSizer(self.box) self.Layout() #self.Bind(wx.EVT_PAINT, self.OnPaint) self.mappanel.setInnerBitmap(self.foreground) app = wx.App() mainframe = MainFrame(None, "Map Panel") mainframe.Show() app.MainLoop()
[ "acidtobi@gmail.com" ]
acidtobi@gmail.com
1f276f5a21289f070e9ebfcc655a747a3d1cd3b1
0104f7736632084592cd6ced20de0be9fb9e24ac
/剑指offer/构建乘积数组.py
09d398118fb634a3669c2c9da9a75f98947ad262
[]
no_license
longkun-uestc/examination
9eb63b6e8ffdb503a90a6be3d049ad2fdb85e46c
ef1d29a769f2fd6d517497f8b42121c02f8307cc
refs/heads/master
2021-06-25T23:11:24.460680
2021-06-23T03:28:55
2021-06-23T03:28:55
228,847,479
1
0
null
null
null
null
UTF-8
Python
false
false
745
py
class Solution: def multiply(self, A): left_to_right = [-1]*len(A) right_to_left = [-1]*len(A) left_to_right[0] = 1 right_to_left[-1] = 1 for i in range(1, len(left_to_right)): left_to_right[i] = left_to_right[i-1] * A[i-1] for i in range(len(right_to_left)-2, -1, -1): right_to_left[i] = right_to_left[i+1] * A[i+1] # B = [1]*len(A) # for i in range(len(B)): # B[i] = left_to_right[i] * right_to_left[i] B = [a*b for a, b in zip(left_to_right, right_to_left)] # print(left_to_right) # print(right_to_left) # print(B) return B if __name__ == '__main__': s = Solution() s.multiply([2,3,4,5, 6])
[ "1256904448@qq.com" ]
1256904448@qq.com
9c49c4755281a3c8a9b671df5099d752953dc5ec
b4ef8fcaf8e8818215add4402efadfef9bda45ee
/sample_code/python/vessels_v2_graphql/run.py
e393f1cdfdf590796dd7068877f0fcb1a4f2fb1a
[]
no_license
ykparkwixon/maritime
aebbbfe8e2f1ebb1bf2dbb01a94127977251285c
378834c9b521ff538395c36e377117c87760fe22
refs/heads/main
2023-07-17T07:00:17.651206
2021-08-30T22:18:14
2021-08-30T22:18:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,062
py
import yaml import json import csv from loguru import logger from utilities import paging, helpers from gql import gql logger.add('demo_client.log', rotation="500 MB", retention="10 days", level='DEBUG') rows_written_to_raw_log: int = 0 rows_written_to_csv: int = 0 pages_processed: int = 0 wrote_csv_header = False def get_settings(): """Reads the settings.yaml file and returns the variables and values :returns data: setting variables and values :rtype data: dict """ with open('settings.yaml') as f: data: dict = yaml.load(f, Loader=yaml.FullLoader) return data def read_query_file(): settings = get_settings() file_name = settings['name_of_gql_query_file'] with open(file_name, 'r') as f: return f.read() def write_raw(data: dict): settings = get_settings() name_of_raw_output_file = settings['name_of_raw_output_file'] if not name_of_raw_output_file: return with open(name_of_raw_output_file, 'a+') as f: f.write(json.dumps(data, indent=4)) def write_csv(data: dict): global rows_written_to_csv, wrote_csv_header settings = get_settings() name_of_csv_file = settings['name_of_csv_file'] if not name_of_csv_file: return members = helpers.get_vessels_v2_members() # get just the keys csv_columns: list = [i[0] for i in members] try: with open(name_of_csv_file, 'a+') as f: writer = csv.DictWriter(f, fieldnames=csv_columns) logger.debug(f"WROTE HEADER: {wrote_csv_header}") if not wrote_csv_header: writer.writeheader() wrote_csv_header = True item: dict for item in data: writer.writerow(item) rows_written_to_csv += 1 except Exception: raise def get_info(): info = f""" TOTAL PAGES WRITTEN TO RAW LOG: {rows_written_to_raw_log} TOTAL ROWS WRITTEN TO CSV: {rows_written_to_csv} TOTAL PAGES PROCESSED: {pages_processed}""" return info def run(): global pages_processed settings = get_settings() test_name = settings['test_name'] pages_to_process = settings['pages_to_process'] # make a client connection client = helpers.get_gql_client() # read file query = read_query_file() if not "pageInfo" or not "endCursor" or not "hasNextPage" in query: logger.error("Please include pageInfo in the query, it is required for paging. See the README.md") return response: dict = dict() try: response = client.execute(gql(query)) except BaseException as e: logger.error(e) raise # initialize paging pg = paging.Paging(response=response) schema_members = helpers.get_vessels_v2_members() # page, write, util complete logger.info("Paging started") while True: response, hasNextPage = pg.page_and_get_response(client, query) logger.debug(f"hasNextPage: {hasNextPage}") if response: write_raw(response) csv_data = helpers.transform_response_for_loading(response=response, schema=schema_members, test_name=test_name) if csv_data: write_csv(csv_data) pages_processed += 1 logger.info(f"Page: {pages_processed}") if pages_to_process == 1: break elif pages_to_process: if not hasNextPage or not response: break if pages_processed >= pages_to_process: break elif not hasNextPage or not response: break else: logger.info("Did not get data for csv, either because there are no more pages, or did not get a response") break else: logger.info("No response or no more responses") break logger.info(get_info()) if __name__ == '__main__': run() logger.info("Done")
[ "78374623+brucebookman@users.noreply.github.com" ]
78374623+brucebookman@users.noreply.github.com
3774ffc4fdcb9c86ca755421da7b371e9f1e7d2c
6f61a105f85f9e4b6b98494b45e96d3099402449
/kapool/settings.py
7993700b0e3d7208e963c459ba82c5a60ebc6863
[ "MIT" ]
permissive
Marah-uwase/carpool
00ca3b230fbe2bfabb4660cbf8974a902dadc85b
6ee69e1ad48352a4d1f59f372b41a2891fc58ec7
refs/heads/models
2023-02-25T19:52:44.180676
2021-02-02T08:22:05
2021-02-02T08:22:05
334,284,598
0
0
null
2021-02-01T13:21:49
2021-01-29T23:14:54
Python
UTF-8
Python
false
false
3,767
py
""" Django settings for kapool project. Generated by 'django-admin startproject' using Django 3.1.5. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path import os import cloudinary import cloudinary.uploader import cloudinary.api from decouple import config # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(c94#zgkxgtwila5*$=yss0nngan+b9l9&r1+#nrd=cd849p76' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'carpool', 'app', 'tinymce', 'bootstrap4', 'rest_framework', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'kapool.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, "templates")], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'kapool.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'karpool', 'USER': 'maranatha', 'PASSWORD':'maman', } } cloudinary.config( api_key = 'AIzaSyCv9Yc1eQAYKqm3qXBpUBfEa-CYW9CVoTQ', ) # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'),) MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') LOGIN_REDIRECT_URL='/' LOGOUT_REDIRECT_URL = '/' REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ) }
[ "maranahuwase12@gmail.com" ]
maranahuwase12@gmail.com
0dca2f890e85ab82a477f193ca5d7b13bb4452f4
5310aad336ad7cdc304a7204d4bd91b4fa754f1e
/Lab3/homework/serious_ex9.py
1c3bf0845b2d742f8ca01016425fdb34f9ee6da7
[]
no_license
dattran1997/trandat-fundamental-c4e17
329e294f68bde1fc04d53c0acd0f9a7e87d7d444
fd2f0648f28e78769f7fbf3e40e9973bf211f1de
refs/heads/master
2020-03-09T22:49:56.228853
2018-05-29T04:22:39
2018-05-29T04:22:39
129,044,126
0
0
null
null
null
null
UTF-8
Python
false
false
244
py
def get_even_list(list): new_list = [] for i in list: if (i % 2 == 0): new_list.append(i) return new_list if __name__ == "__main__": list = [1,4,5,-1,10] newlist = get_even_list(list) print(newlist)
[ "dattran1997@gmail.com" ]
dattran1997@gmail.com
6763b1340462fabc349debc7f52e0774a21e430f
7c59004e0165c9b32dc5b786b96fc4d81f565daf
/predictor_ker.py
db39a01a8c2f10a576acb2f85d032224fae85302
[]
no_license
LeonHardt427/mayi
f04d7d7bca68e0a3a57ca2ef2de14af7db28d2e7
679f688a971075794dd3d4ed0a7cbc50931a422f
refs/heads/master
2020-03-20T05:34:58.677201
2018-07-08T03:23:23
2018-07-08T03:23:23
137,219,188
0
0
null
null
null
null
UTF-8
Python
false
false
1,562
py
# -*- coding: utf-8 -*- # @Time : 2018/5/29 10:11 # @Author : LeonHardt # @File : predictor_ker.py import os import numpy as np import pandas as pd from sklearn.preprocessing import Imputer, OneHotEncoder import tensorflow as tf from keras import Sequential from keras.layers import Dense, Activation # data_path = os.getcwd()+"/data_error/" # x_train = np.loadtxt(data_path+"x_train_error93.txt", delimiter=',') # y_train = np.loadtxt(data_path+"y_train_error93.txt", delimiter=',') # x_test = np.loadtxt(data_path+"x_test_error93.txt", delimiter=',') # # print("ready") # im = Imputer(strategy="most_frequent") # x_train = im.fit_transform(x_train) # x_test = im.transform(x_test) data_path = os.getcwd()+"/data/" x_train = np.loadtxt(data_path+"x_train_most.txt", delimiter=',') y_train = np.loadtxt(data_path+"y_train_filter.txt", delimiter=',') x_test = np.loadtxt(data_path+"x_test_a_most.txt", delimiter=',') enc = OneHotEncoder() y_train = enc.fit_transform(y_train.reshape(-1, 1)) print(y_train) print(y_train.shape) model = Sequential() model.add(Dense(input_dim=297, units=297, activation='relu')) # model.add(Dense(200, activation='relu')) model.add(Dense(200, activation='relu')) model.add(Dense(2, activation='sigmoid')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # cw = {0: 1, 1: 100} model.fit(x_train, y_train, epochs=20, batch_size=10000) prob = model.predict_proba(x_test) np.savetxt(os.getcwd()+"/prediction/ker200_1_error93_1.txt", prob, delimiter=',') # model.save("merge_model")
[ "leonhardt427@126.com" ]
leonhardt427@126.com
be3571f979caec5427a8a79884e69c6e57bd6ecf
27c1ba6ed6c1586a348cdcfe26d17be13ae38b72
/scripts/hello_world_pallavisavant.py
c4a681787dd9cfab9a8e69eddfd8f12183f68509
[]
no_license
codewithgauri/HacktoberfestPR2020
4299f2ae8f44b31c6ecbeaefa058fde26327a253
335310f3d81029938d119e15d3f1a131d745d3f2
refs/heads/master
2022-12-30T20:28:41.937632
2020-10-26T06:47:24
2020-10-26T06:47:24
307,281,958
3
1
null
null
null
null
UTF-8
Python
false
false
60
py
#python code to print 'Hello World" :) print("Hello World")
[ "pallavisavant02@gmail.com" ]
pallavisavant02@gmail.com
b101ab8c3181c1392886b3ff9ddf5ba9a39dd257
7ff1ebbaaccd65665bb0fae19746569c043a8f40
/readcsv.py
28ea6bc771aee30752a72b42cf17a39c26ce3f94
[]
no_license
Mutugiii/bot
1cbc8e8493de5a31b831722c698fd8c7b1f60bf5
3d4ccbf8dbe981e2c363ad6f2774deb2d34ac110
refs/heads/master
2022-12-11T22:04:53.040939
2020-05-20T11:09:54
2020-05-20T11:09:54
247,986,918
0
0
null
2022-12-08T09:33:14
2020-03-17T14:11:54
Python
UTF-8
Python
false
false
77
py
import pandas dataformat = pandas.read_csv('csv/data.csv') print(dataformat)
[ "mutugimutuma@gmail.com" ]
mutugimutuma@gmail.com
eeaa16fb67a5f6fb3382537928469d161d2ee20e
985be2d2d979c1d5ffbd6cd73d9da711951e4f1c
/chat/consumers.py
a2ffba5a9b3687a82d2473cd12f884a487b2d806
[]
no_license
sreesh-mallya/django-channels-demo
6a1492c2ffe3a8f37782ced19562c629fa65ee8f
8a3ac7d3e04ecd8c5053009f760d84e3b9415882
refs/heads/master
2021-01-23T22:16:04.353634
2017-09-19T03:44:00
2017-09-19T03:44:00
102,924,295
2
0
null
null
null
null
UTF-8
Python
false
false
1,740
py
import re import json import logging from channels import Group from channels.sessions import channel_session from .models import Room @channel_session def ws_connect(message): # Could use more error handling here prefix, label = message['path'].strip('/').split('/') try: room = Room.objects.get(label=label) message.reply_channel.send({"accept": True}) # Accept connection print('Room : %s' % room.label) except Room.DoesNotExist: print('Room with label %s does not exist!' % label) return Group('chat-' + label).add(message.reply_channel) message.channel_session['room'] = room.label print(message.keys()) @channel_session def ws_receive(message): # Could use more error handling here label = message.channel_session['room'] try: room = Room.objects.get(label=label) except Room.ObjectDoesNotExist: print('Room with label %s does not exist!' % label) return # Get text message, and parse to json; throw any errors if any try: data = json.loads(message['text']) except ValueError: print('Oops! Your message isn\'t in json!') return # Make sure data is in proper format, i.e, { 'handle': ... , 'message': ... } if set(data.keys()) != {'handle', 'message'}: print('Improper message format : %s ', data) return msg = room.messages.create(handle=data['handle'], message=data['message']) response = json.dumps(msg.as_dict()) Group('chat-' + label).send({'text': response}) @channel_session def ws_disconnect(message): print('disconnecting') label = message.channel_session['room'] Group('chat-' + label).discard(message.reply_channel)
[ "sreeshsmallya@gmail.com" ]
sreeshsmallya@gmail.com
08d2b69ed9e737d8ee5c1f6d6389ece08b8737c4
74dd16cb3d4181d4b7b0d1bcfa3aa0c3a617548b
/src/utilities/video_metadata.py
1545ef9a99282ca5b7d66e4802fd2bebed2c4ba0
[]
no_license
eliasnieminen/vgs-data-annotation
4b87a6ece64eb83b6d5d43d34825539a1ff27fc6
a6569cb52017b88beffa8c1a1332acd9b340646f
refs/heads/main
2023-07-20T05:13:57.492399
2021-08-31T12:16:19
2021-08-31T12:16:19
398,779,632
0
0
null
null
null
null
UTF-8
Python
false
false
927
py
import math from typing import Optional, Union, Dict class VideoMetadata: """Metadata object for videos. """ def __init__(self, dur: Optional[Union[None, float]] = None, fps: Optional[Union[None, float]] = None, metadata: Optional[Union[None, Dict]] = None): self.dur = dur self.framerate = fps self.video_metadata = metadata def set_duration(self, duration): self.dur = duration def set_fps(self, fps): self.framerate = fps def set_video_metadata(self, metadata): self.video_metadata = metadata @property def duration(self): return self.dur @property def fps(self): return self.framerate @property def metadata(self): return self.video_metadata @property def frame_count(self): return math.floor(self.framerate * self.duration)
[ "elias.nieminen@tuni.fi" ]
elias.nieminen@tuni.fi
e0aa72eb56790380371681952975423a0c147795
1a856152b3ab65a8a0cc5cbedf0492d1c3716d27
/dropout_acnd_pe_noprior_nochans.py
2eda907f0b1b6f9a40b45eb2cb3e98dbdb1b24e5
[]
no_license
stablum/thesis
272f7f23ad1ad454c9310775b969bb54c84c9ea0
5c06d78322ddd6e1b8c214261ea6e4464a094bad
refs/heads/master
2021-07-23T04:29:28.438657
2018-08-18T18:59:11
2018-08-18T18:59:11
60,299,071
0
0
null
null
null
null
UTF-8
Python
false
false
8,460
py
#!/usr/bin/env python3 import scipy import ipdb import theano from theano import tensor as T import lasagne import numpy as np import random import sys from tqdm import tqdm tqdm.monitor_interval = 0 import ipdb # local imports import movielens import cftools import config import numutils as nu import augmented_types as at import activation_functions import update_algorithms import model_build update =update_algorithms.get_func() adam_shared = lasagne.updates.adam # FIXME: generalize like the 'update' placeholder #g = lambda x:x g_in = activation_functions.get(config.g_in) g_rij = activation_functions.get(config.g_rij) sigma = 1. sigma_u = 100. sigma_v = 1000. chan_out_dim = config.chan_out_dim hid_dim = config.hid_dim #log = print log = lambda *args: print(*args)#None def main(): dataset = movielens.load(config.movielens_which) U,V = cftools.UV_vectors_np(dataset) U_t, U_m, U_v = update_algorithms.adam_for(U) V_t, V_m, V_v = update_algorithms.adam_for(V) def make_predict_to_1(ui,vj): #o_ui,net_ui_params = make_net(ui,config.K,hid_dim,chan_out_dim,"net_u",g_in,g_in) #o_ui.name = "o_ui" #o_vj,net_vj_params = make_net(vj,config.K,hid_dim,chan_out_dim,"net_v",g_in,g_in) #o_vj.name = "o_vj" comb = T.concatenate([ui,vj],axis=1) comb.name = "comb" prediction_det, prediction_lea, net_comb_params, regularizer_term = model_build.make_net(comb,2*chan_out_dim,hid_dim,1,"net_comb",g_in,g_rij) prediction_det.name = "prediction_det" prediction_lea.name = "prediction_lea" return prediction_det, prediction_lea, net_comb_params, regularizer_term def make_predict_to_5(predict_to_1_sym): ret = (predict_to_1_sym * (config.max_rating - 1. )) + 1. return ret def make_objective_term(ui_mb,vj_mb,Rij_mb,predict_to_1_sym,regularizer_term): eij = ( Rij_mb - predict_to_1_sym ) ** 2 ret = 0.5 * 1./(sigma**2) * eij # error term (gaussian centered in the prediction) # 0-mean gaussian prior on the latent feature vector. # since this term refers to a specific <ui,vj> tuple, then # the update following the prior quantity has to be divided # by how many terms (error term) contain that vector #coef_u = T.constant(0.5/(dataset.N_compressed * sigma_u),"coef_u") #sqsum_u = T.sum(ui_mb**2,axis=1,keepdims=True) #sqsum_u.name = "sqsum_u" #term_u = coef_u * sqsum_u #term_u.name = "term_u" #ret = ret + term_u #coef_v = T.constant(0.5/(dataset.M_compressed * sigma_v),"coef_v") #sqsum_v = T.sum(vj_mb**2,axis=1,keepdims=True) #sqsum_v.name = "sqsum_v" #term_v = coef_v * sqsum_v #term_v.name = "term_v" #ret = ret + term_v #ret.name = "obj_before_sum" ret = T.sum(ret) # on all axes: cost needs to be a scalar ret.name = "obj_after_sum" if config.regularization_lambda > 0: ret = ret + config.regularization_lambda * regularizer_term ret.name = "obj_with_regularizer" return ret print("creating update functions..") ui_mb_sym = T.fmatrix('ui_mb') vj_mb_sym = T.fmatrix('vj_mb') Rij_mb_sym = T.fmatrix('Rij_mb') t_mb_prev_sym = T.fmatrix('t_mb_prev') t_mb_prev_sym = T.addbroadcast(t_mb_prev_sym,1) m_mb_prev_sym = T.fmatrix('m_mb_prev') v_mb_prev_sym = T.fmatrix('v_mb_prev') predict_to_1_sym_det, predict_to_1_sym_lea, params, regularizer_term = make_predict_to_1(ui_mb_sym,vj_mb_sym) # instead of calculating a different count of latent vectors of each # (other side) latent vector, a global estimate (average) is performed obj_term = make_objective_term(ui_mb_sym,vj_mb_sym,Rij_mb_sym,predict_to_1_sym_lea, regularizer_term) grads_ui = T.grad(obj_term, ui_mb_sym) grads_vj = T.grad(obj_term, vj_mb_sym) grads_params = [ T.grad(obj_term,curr) for curr in params ] updates_kwargs = dict(t_prev=t_mb_prev_sym,m_prev=m_mb_prev_sym,v_prev=v_mb_prev_sym) new_for_ui = list(update(ui_mb_sym,grads_ui,**updates_kwargs)) new_for_vj = list(update(vj_mb_sym,grads_vj,**updates_kwargs)) params_updates = adam_shared(grads_params,params,learning_rate=config.lr_begin) common = [ t_mb_prev_sym,m_mb_prev_sym,v_mb_prev_sym,Rij_mb_sym,ui_mb_sym,vj_mb_sym ] ui_update_fn = theano.function(common,new_for_ui) ui_update_fn.name="ui_update_fn" vj_update_fn = theano.function(common,new_for_vj) vj_update_fn.name="vj_update_fn" params_update_fn = theano.function([Rij_mb_sym,ui_mb_sym,vj_mb_sym],[], updates=params_updates) params_update_fn.name = "params_update_fn" predict_to_5_fn = theano.function([ui_mb_sym,vj_mb_sym], [make_predict_to_5(predict_to_1_sym_det)]) predict_to_5_fn.name="predict_to_5_fn" predict_to_1_fn = theano.function([ui_mb_sym,vj_mb_sym], [predict_to_1_sym_det]) predict_to_1_fn.name="predict_to_1_fn" ui_mb_l = [] vj_mb_l = [] Rij_mb_l = [] U_t_mb_l = [] U_m_mb_l = [] U_v_mb_l = [] V_t_mb_l = [] V_m_mb_l = [] V_v_mb_l = [] indices_mb_l = [] def train_with_datapoint(i,j,Rij,lr): nonlocal indices_mb_l nonlocal ui_mb_l nonlocal vj_mb_l nonlocal Rij_mb_l nonlocal U_t_mb_l nonlocal U_m_mb_l nonlocal U_v_mb_l nonlocal V_t_mb_l nonlocal V_m_mb_l nonlocal V_v_mb_l indices_mb_l.append((i,j)) ui_mb_l.append(U[i]) vj_mb_l.append(V[j]) Rij_mb_l.append(Rij) U_t_mb_l.append(U_t[i]) U_m_mb_l.append(U_m[i]) U_v_mb_l.append(U_v[i]) V_t_mb_l.append(V_t[j]) V_m_mb_l.append(V_m[j]) V_v_mb_l.append(V_v[j]) if len(ui_mb_l) >= config.minibatch_size: ui_mb = np.vstack(ui_mb_l).astype('float32') #print('ui_mb.shape',ui_mb.shape) vj_mb = np.vstack(vj_mb_l).astype('float32') #print('vj_mb.shape',vj_mb.shape) Rij_mb = np.vstack(Rij_mb_l).astype('float32') #print('Rij_mb.shape',Rij_mb.shape) U_t_mb = np.vstack(U_t_mb_l ).astype('float32') #print('U_t_mb.shape',U_t_mb.shape) U_m_mb = np.vstack(U_m_mb_l ).astype('float32') #print('U_m_mb.shape',U_m_mb.shape) U_v_mb = np.vstack(U_v_mb_l ).astype('float32') #print('U_v_mb.shape',U_v_mb.shape) V_t_mb = np.vstack(V_t_mb_l ).astype('float32') V_m_mb = np.vstack(V_m_mb_l ).astype('float32') V_v_mb = np.vstack(V_v_mb_l ).astype('float32') Rij_mb = (Rij_mb - 1.) / (config.max_rating - 1.) #log("Rij_mb",Rij_mb) #log("predict_to_1_fn",predict_to_1_fn(ui_mb,vj_mb)) #log("predict_to_5_fn",predict_to_5_fn(ui_mb,vj_mb)) #print("before ui_update_fn, vj_mb.shape=",vj_mb.shape) #print("before ui_update_fn, ui_mb.shape=",ui_mb.shape) new_ui_mb, new_U_t_mb, new_U_m_mb, new_U_v_mb = ui_update_fn( U_t_mb,U_m_mb,U_v_mb,Rij_mb,ui_mb,vj_mb ) #log("ui_mb",ui_mb,"new_ui_mb",new_ui_mb,"diff",ui_mb-new_ui_mb) #print("before vj_update_fn, vj_mb.shape=",vj_mb.shape) #print("before vj_update_fn, ui_mb.shape=",ui_mb.shape) new_vj_mb, new_V_t_mb, new_V_m_mb, new_V_v_mb = vj_update_fn( V_t_mb,V_m_mb,V_v_mb,Rij_mb,ui_mb,vj_mb ) #log("vj_mb",vj_mb,"new_vj_mb",new_vj_mb,"diff",vj_mb-new_vj_mb) for pos,(i,j) in enumerate(indices_mb_l): U[i] = new_ui_mb[pos,:] V[j] = new_vj_mb[pos,:] U_t[i] = new_U_t_mb[pos,:] U_m[i] = new_U_m_mb[pos,:] U_v[i] = new_U_v_mb[pos,:] V_t[j] = new_V_t_mb[pos,:] V_m[j] = new_V_m_mb[pos,:] V_v[j] = new_V_v_mb[pos,:] params_update_fn(Rij_mb,ui_mb,vj_mb) ui_mb_l = [] vj_mb_l = [] Rij_mb_l = [] U_t_mb_l = [] U_m_mb_l = [] U_v_mb_l = [] V_t_mb_l = [] V_m_mb_l = [] V_v_mb_l = [] indices_mb_l = [] print("training pmf...") cftools.mainloop(train_with_datapoint,dataset,U,V,predict_to_5_fn) if __name__=="__main__": main()
[ "stablum@gmail.com" ]
stablum@gmail.com
367ec183a847084b29dd59bd79ca5db7e7418f61
fb46511d2fa968e6a2e74a20a67ace59819e15dd
/ProcessedData/trial.py
781f802ca1ea3ca3e8a244cc236a45615f979a0c
[]
no_license
PushA308/QPQCT
6505f18907f8af3d9be24ebf21a01cc6603d657a
6bd99690df213860a4af83f142423e64fa57c34a
refs/heads/master
2020-04-27T05:03:55.061701
2019-03-06T05:02:51
2019-03-06T05:02:51
174,071,885
0
0
null
null
null
null
UTF-8
Python
false
false
8,423
py
import csv import nltk import shutil import os import sys import traceback import win32com.client #pip install pywin32 import nltk #pip install nltk #nltk.download('punkt') #nltk.download('averaged_perceptron_tagger') #include other ntlk packages, if asked for. ########################################## #initialized variables ########################################## question_no_column = 7 start_index = 16 def process_question_paper(ques_paper_path) : fd = open(os.path.join(os.getcwd(),"processed_data.csv"),'w') headers = "Marks, CO_Type, Module No, Question Type, Question No, SUb Que No., Question" fd.write(headers + '\n') for root, dirs,files in os.walk(ques_paper_path) : for file in files: if file.endswith('.xls') : file_path = os.path.join(root,file) try: excel = win32com.client.Dispatch('Excel.Application') workbook = excel.Workbooks.open(file_path) sheet = workbook.WorkSheets('QuestionPaper') for start_row in range(start_index, 50): try: row, col = start_row, question_no_column question = sheet.Cells(row, col).value if question is not None: row, col = start_row, question_no_column + 1 marks = str(sheet.Cells(row, col).value) row, col = start_row, question_no_column + 2 co_type = str(sheet.Cells(row, col).value) row, col = start_row, question_no_column + 4 module_no = str(sheet.Cells(row, col).value) row, col = start_row, question_no_column - 5 question_type = sheet.Cells(row, col).value row, col = start_row, question_no_column - 2 question_no = sheet.Cells(row, col).value row, col = start_row, question_no_column - 1 sub_question_no = sheet.Cells(row, col).value row, col = start_row, question_no_column - 2 question_no = sheet.Cells(row, col).value row, col = start_row, question_no_column question = sheet.Cells(row, col).value print (question+'\n') fd.write(marks + ','+co_type + ',' + module_no + ',' +question_type + ','+ question_no + ',' + sub_question_no + ',' +question + '\n') else: pass except Exception as e: print ("hhj") pass workbook.Close(True) except Exception as e: print ("ERROR") pass fd.close() def extract_verb(sentence): helping_verbs = ['am','are','is','was','were','be','being','been','have','has','had','shall','will','do','does','did','may','might','must','can','could','would','should'] #sentence = "what is meant by data structure. it has been long time. i didn't do that" tokens = nltk.word_tokenize(sentence) tagged = nltk.pos_tag(tokens) #print("tagged tokens:-") #print(tagged) length = len(tagged) - 1 #print(length) a = list() i=0 flg=0 for item in tagged: if item[1][0] == 'V': if((item[0] in helping_verbs)!=1): a.append(item[0]) #print(item[0]) flg=1 if flg==0: a.append("N.A") #print(a) with open("File/question_verb.csv","a",newline='') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow(a) return a; #analysis of question paper using verbs def final(k): fs = open("File/level.csv","r") reader = csv.reader(fs) i=0 a=list() b=0 d=0 for row in reader: with open("File/Value_level.csv","a",newline='') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow(max(row)) a.append(max(row)) i=i+1 fs.close() if k==1: res=list(map(int,a)) print(res) p=sum(res) print(i) print(p) l=int(input("input the level from 1-6: ")) k=(p/(i*l))*100 print("Average quality per question is {}".format(k)) fs.close() #calculating verb's level using bloom's taxonomy def calculate_level(line): fs = open("File/bloom verbs.csv","r") reader = csv.reader(fs) #included_cols = [1] a=list() b=list() #row = next(reader) #print(line) flg=0 for word in line: i=1 for row in reader: if word.lower() in row: a.append(i) flg=1 i=i+1 if flg==0: a.append(0) #print(line,a,max(a)) with open("File/level.csv","a",newline='') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow(a) fs.close() def view_table(): f=open("File/your_csv1.csv","r") #reader=csv.reader(f) f1=open("File/question_verb.csv","r") reader1=csv.reader(f1) rows1 = list(reader1) print("-------------------------------------------/n") print(rows1) #for row in reader1: # print(row) included_cols=[0] included_cols1=[1] included_cols2=[2] i=1 def compare_Type(): f1=open("File/bloom_type.csv","r") f2=open("File/Value_level.csv","r") r1 = list(f1) length = len(r1) r2 = list(f2) sum=0 for i in range(length): if r1[i]==r2[i]: k=abs(int(r1[i])-int(r2[i])) sum=sum+k print(chr(ord('A') + k)) else: k=abs(int(r1[i])-int(r2[i])) sum=sum+k print(chr(ord('A') + k)) print("Avg quality per question: "+chr(ord('A')+int(sum/length))) #Start: if __name__ == "__main__" : arg_cnt = len(sys.argv) if arg_cnt > 1: ques_paper_path = sys.argv[1] process_question_paper(ques_paper_path) else: print ("Please provide question paper directory path !") f = open("processed_data.csv","r") reader = csv.reader(f) #out_file = open("File\solution1.csv", "w") #writer = csv.writer(out_file) add=0 included_cols = [2] included_cols1=[0] row = next(reader) for row in reader: content = list(row[i] for i in included_cols) #selecting question content1 = list(row[i] for i in included_cols1) #selecting question type with open("File/bloom_type.csv","a",newline='') as csvfile: spamwriter = csv.writer(csvfile) if content1[0]=="remembering": spamwriter.writerow("1") elif content1[0]=="understanding": spamwriter.writerow("2") elif content1[0]=="applying": spamwriter.writerow("3") elif content1[0]=="analyzing": spamwriter.writerow("4") elif content1[0]=="evaluating": spamwriter.writerow("5") elif content1[0]=="creating": spamwriter.writerow("6") a=extract_verb(content[0]) print(a) calculate_level(a) k=int(input("Select the option for Analysis of Question Paper:\n1.Verbs\n2.Question Type\n3.Course Outcome")) if k==1: final(k) elif k==2: final(k) compare_Type() v=int(input("View Information:\n1.Question Paper\n2.Verbs\n3.Bloom's Level\n")) if v==1: f = open("File/UOS_paper.csv","r") reader = csv.reader(f) for row in reader: print(row) f.close() elif v==2: f = open("File/question_verb.csv","r") reader = csv.reader(f) for row in reader: print(row) f.close() elif v==3: f = open("File/Value_level.csv","r") reader = csv.reader(f) for row in reader: print(row) f.close() #print(a) f.close()
[ "noreply@github.com" ]
PushA308.noreply@github.com
b4430e26ab1dde9f74b12f200a1896351cd2722b
4d65f85fb8fba5a3d6582ccbf9d38042ec1ec422
/代码1/hotdog_war.py
f177dd62bdbf8984d14a17f21671d5aee76aa8df
[]
no_license
qsten/game
f50756d001116f41cfdf7715ee061a3dfa3f9400
86164c3dcec869b85aaa777105c7faf738dd8e1f
refs/heads/master
2020-04-28T10:56:36.871862
2019-05-19T13:18:46
2019-05-19T13:18:46
175,218,974
0
0
null
null
null
null
UTF-8
Python
false
false
2,001
py
import pygame from player import Player from settings import Settings import game_functions as gf from pygame.sprite import Group from game_stats import GameStats from button import Button from scoreboard import Scoreboard from lifeboard import Lifeboard from music_button import Music_button from stop_button import Stop_button from statistics_board import Statistics_board from Restart_button import Restart_button from return_button import Return_button from rank_button import Rank_button def run_game(): #初始化游戏并创建一个屏幕对象 ai_settings=Settings() screen=pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height)) pygame.display.set_caption('hotdog_invasion') play_button=Button(screen) player=Player(ai_settings,screen) hotdogs=Group() bombs=Group() stats=GameStats(ai_settings) sb=Scoreboard(screen,stats) life=Lifeboard(screen,stats) music_button = Music_button(screen) stop_button=Stop_button(screen,stats) restart_button=Restart_button(screen,stats) statistics_board=Statistics_board(screen, stats) return_button=Return_button(screen,stats) rank_button=Rank_button(screen) #开始游戏的主循环 while True: gf.check_events(ai_settings,screen,stats,play_button,player,hotdogs,bombs,music_button,stop_button,restart_button,return_button,rank_button) if stats.game_active: gf.create_hotdogs(ai_settings, screen, hotdogs) gf.create_bombs(ai_settings, screen, bombs) player.update(stats) gf.update_hotdog(ai_settings,stats,sb,player,hotdogs) gf.update_bomb(screen,stats, player, bombs,statistics_board,rank_button) music_button.music_play() gf.update_screen(screen,stats,sb,life,player,hotdogs,bombs,play_button,music_button,stop_button,restart_button,return_button,rank_button) if __name__=='__main__': run_game()
[ "noreply@github.com" ]
qsten.noreply@github.com
0a185392606cd314acb4f13f45994b76855c9a6c
500e5426adf70162cc75ae99be0743129639e4c7
/gathering_server/gathering/apps.py
56de823fb91dd525090a6cd2e82d8a62295c84ae
[]
no_license
L3oNav/gathering_server
8b06ff4f176c6dfe3bc7f5c27bce0c9b4dfae8cb
b708fa831b6b5b227bafebd3ea302bcfa35adc46
refs/heads/main
2023-04-01T02:09:11.083248
2021-02-18T01:54:23
2021-02-18T01:54:23
339,837,793
0
0
null
null
null
null
UTF-8
Python
false
false
345
py
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class GatheringConfig(AppConfig): name = "gathering_server.gathering" verbose_name = _("Gathering") def ready(self): try: import gathering_server.gathering.signals # noqa F401 except ImportError: pass
[ "L3oNav@outlook.com" ]
L3oNav@outlook.com
bd21bc1c8fa779e7d91a63e97ee2f3b07852e152
756504caae02535f359baa1bd232038979f5b3b5
/AIA/scanm/apps.py
f211b8310e63fa9572dfb5eceffc97ba91744cd3
[]
no_license
dante993/scantit
b75aac717e68cea25e17a40c44e719c95d0f0376
9a447f02af9f23b433bafdd02de852bd1c4e4d9e
refs/heads/master
2021-01-13T15:04:49.853328
2017-05-29T12:52:54
2017-05-29T12:52:54
79,124,612
0
0
null
null
null
null
UTF-8
Python
false
false
126
py
from __future__ import unicode_literals from django.apps import AppConfig class ScanmConfig(AppConfig): name = 'scanm'
[ "danteejmg@gmail.com" ]
danteejmg@gmail.com
111d16939c63cebf88383cf5a24501665490bbc1
0f7b8d2ae2c0e81941d5ca5fa4c8313cec8d1544
/endApi/migrations/0008_auto_20200904_0734.py
89d799dc142fa35893d751810280051c1fd1bddf
[]
no_license
rajielijah/endpoint
5c061972cb8ab9fc089046dd9e71f194ee6e5aca
6db1d6c92d57fc143446d2c4df13664ffa5b1f2d
refs/heads/master
2022-12-27T23:58:22.232298
2020-10-01T11:46:45
2020-10-01T11:46:45
298,791,670
0
0
null
null
null
null
UTF-8
Python
false
false
407
py
# Generated by Django 3.0.7 on 2020-09-04 07:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('endApi', '0007_post_image'), ] operations = [ migrations.AlterField( model_name='post', name='image', field=models.ImageField(blank=True, null=True, upload_to='feedupload'), ), ]
[ "rajielijah@gmail.com" ]
rajielijah@gmail.com
f84e7e892f22dcef23a66020fb69487611bee303
b37769515f7e078e2215be27a76a0ba199f7676e
/home/migrations/0003_remove_blog_slug.py
c37321351241594e4fb6b823fb4bc8ea1c54e86c
[]
no_license
roxna/eproc
15e532a401291505adec086d2c60c78843c9afc6
f22506e2afd005538c21d7bb678649a3736b6feb
refs/heads/master
2022-12-02T20:38:49.674344
2017-03-28T09:44:03
2017-03-28T09:44:03
72,560,527
0
0
null
2022-11-22T01:20:51
2016-11-01T17:38:57
HTML
UTF-8
Python
false
false
377
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-11-23 17:51 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('home', '0002_blog_slug'), ] operations = [ migrations.RemoveField( model_name='blog', name='slug', ), ]
[ "roxna.irani@gmail.com" ]
roxna.irani@gmail.com
b588764a31f012d092aa5fbb402b4f34eead4552
1071b46a6ea054a186ab5c270dfdba48362adf70
/Python/Examples/juego_adivina_el_numero.py
5231d40690c74099e97d77b93aef36975ff2fe0d
[]
no_license
diegoldsv/technotes
5aaed2d6ef5037217a0c071b6f7b48b04d89d4fd
6cb0b90001c52438b74da72c02c664164938d7e9
refs/heads/main
2023-05-10T22:08:21.189916
2021-05-31T14:14:13
2021-05-31T14:14:13
351,212,081
0
0
null
null
null
null
UTF-8
Python
false
false
557
py
import random def run(): numero_aleatorio = random.randint(1,100) numero_ingresado = input("Elige un número del 1 al 100: ") numero_ingresado = int(numero_ingresado) while numero_ingresado != numero_aleatorio: if numero_ingresado < numero_aleatorio: print("Busca un número más grande") else: print("Busca un número más pequeño") numero_ingresado = input("Elige otro número: ") numero_ingresado = int(numero_ingresado) print("Ganaste!") if __name__ == "__main__": run()
[ "disalvatorediego@gmail.com" ]
disalvatorediego@gmail.com
ce3bfb840d3411bd2a1255ab453499c357ba459b
f407b21811c8eebbf1c32d6aadc502403d83d048
/problem19.py
edb8fdb3cd28c83eb54faa6fca3eb45a9fee0301
[]
no_license
gnikesh/project-euler
37e95cbc0c82ff54ddb23b89f4f38067ec69d5c8
0d39c7b78fc2e11d2f863e7ae40fb27f93a18fbc
refs/heads/master
2021-08-20T04:53:54.266867
2021-01-20T23:21:00
2021-01-20T23:21:00
87,681,964
0
0
null
null
null
null
UTF-8
Python
false
false
1,484
py
# 1 Jan 1900 was a Monday. # Thirty days has September, # April, June and November. # All the rest have thirty-one, # Saving February alone, # Which has twenty-eight, rain or shine. # And on leap years, twenty-nine. # A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. # How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? def get_days(): week_days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"] week_days_num = [i for i in range(1, 8)] months = [i for i in range(1, 13)] month_30_days = [4, 6, 9, 11] month_31_days = [1, 3, 5, 7, 8, 10, 12] month_28_days = [2] cur_day = 1 # 1 Jan 1900 was Monday sundays = 0 for year in range(1900, 2001): for month in range(1, 13): if month in month_30_days: days = 30 elif month in month_31_days: days = 31 elif month in month_28_days: if year % 4 == 0 and not year == 1900: days = 29 else: days = 28 for day in range(1, days + 1): today = week_days[cur_day] if today == "Sun" and day == 1 and year != 1900: sundays += 1 print("Year: ", year, "Month: ", month, "Day: ", day, today) cur_day += 1 cur_day = cur_day % 7 print(sundays) if __name__ == "__main__": get_days()
[ "gnikesh03@gmail.com" ]
gnikesh03@gmail.com
cc4189ead66a7efb115d15670bd7e27b82860536
3874a909e3152fda6a87dbb0ef05b18d6908807c
/la/parse_tabs.py
0de026ab72ba5ab67f99b19a08939f52599c51dc
[ "MIT" ]
permissive
FranchuFranchu/la
f5ef3f8d43aec67d84030018278640d91a77dd05
7afa25d3d102f5a0316f5084a46a04e62976991b
refs/heads/master
2020-07-24T07:50:01.756324
2020-04-18T15:49:03
2020-04-18T15:49:03
207,853,053
0
0
null
null
null
null
UTF-8
Python
false
false
1,332
py
# Converts python-style code into JS-style code def tabs_to_codeblocks(d): list_index = 0 code = list(d) at_newline = True current_indentation = 0 this_line_indentation = 0 while list_index < len(code): if at_newline: if code[list_index] in (" ", "\t"): this_line_indentation += 1 else: at_newline = False difference = this_line_indentation - current_indentation if difference > 0: for i in range(difference): code.insert(list_index,"{") list_index += 1 elif difference < 0: for i in range(-difference): code.insert(list_index,"}") list_index += 1 code.insert(list_index, ";") current_indentation = this_line_indentation if not at_newline: if code[list_index] == "\n": at_newline = True this_line_indentation = 0 code.insert(list_index,";") list_index += 1 list_index += 1 # Close indentation again for i in range(current_indentation): code.insert(list_index,"}") list_index += 1 return "".join(code)
[ "fff999abc999@gmail.com" ]
fff999abc999@gmail.com
6c16d977d5da188d8203250fd478cfac76c891cc
85c9d6fdff58b9cb40f5fdb9f01ff1a0dd386113
/bot_tests/reminder.py
ef7aa772e1bbf39b40113c0d3d7e94d3036748d1
[]
no_license
jmccormac01/karmafleet
5874644c496b0bbcb2037404ad7ed43a1e4caaae
57ebefbbc6ec3aae634cd9196950f103d48eae95
refs/heads/master
2020-03-25T17:24:39.187176
2019-04-20T18:17:05
2019-04-20T18:17:05
143,976,406
0
0
null
null
null
null
UTF-8
Python
false
false
1,922
py
""" Bot for converting EVE times to local timezones """ from datetime import datetime from pytz import timezone import discord from discord.ext import commands import asyncio # pylint: disable=invalid-name Client = discord.Client() client = commands.Bot(command_prefix="!") reminders = {} async def reminder_handler(reminders): await client.wait_until_ready() while not client.is_closed: broke = False print('Checking reminders...') print(reminders) now = datetime.utcnow() for a in reminders: print('Checking for author {}'.format(a)) for t in reminders[a]: if now > t: print(a, reminders[a][t]) await client.send_message(a, reminders[a][t]) # remove the reminder from the list del reminders[a][t] broke = True break if broke: break await asyncio.sleep(10) @client.event async def on_ready(): """ Simple print to say we're ready """ print('Ready for remembering stuff...') @client.event async def on_message(message): """ Handle incoming messages and convert time requests """ sp = message.content.split() return_message = "" error_count = 0 # check we want time conversion from eve time if len(sp) >= 3 and sp[0].lower() == '!reminder': author = message.author await client.delete_message(message) # split the command up reminder_time = datetime.strptime(sp[1], '%Y-%m-%dT%H:%M') note = ' '.join(sp[2:]) if author not in reminders.keys(): reminders[author] = {} reminders[author][reminder_time] = note print(reminders) client.loop.create_task(reminder_handler(reminders)) client.run('NDk0OTQ2Mzg3ODM5MDI1MTYz.Do66Yw.nsleHS3S8UvbWdBugiDtPWHrIKY')
[ "jmccormac001@gmail.com" ]
jmccormac001@gmail.com
0dca7a66a1da77d96fed23a3f91e8168a80f5e26
0ee64034518898893d495639cb01aa9523789f77
/2018 Materials/Resources/Week 4/RaspberryPi.py
63b9c98778493a0ca93bb145474cbfe01bd4c169
[ "MIT" ]
permissive
Phangster/digital-world-for-normal-humans
31187b47e16d4359fce2ecac2ce7b5c1aa88d909
29a479af2e380bdf691f6487167d0d8edf0ba5ed
refs/heads/master
2020-05-07T19:52:37.655375
2018-12-29T14:32:35
2018-12-29T14:32:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,039
py
import RPi.GPIO as GPIO from time import sleep from firebase import firebase url = "https://internet-of-things-c572e.firebaseio.com/" token = 'qcHr20bWwg1ziqik58l39JD8UlcLLIGa8HJ0DaSC' firebase = firebase.FirebaseApplication(url, token) # Use the BCM GPIO numbers as the numbering scheme. GPIO.setmode(GPIO.BCM) # Use GPIO12, 16, 20 and 21 for the buttons. s1 = 12 s2 = 16 s3 = 20 s4 = 21 switch_list = [12, 16, 20, 21] # Set GPIO numbers in the list: [12, 16, 20, 21] as input with pull-down resistor. movement_list = [] GPIO.setup(switch_list, GPIO.IN, GPIO.PUD_DOWN) done = False while done == False: if GPIO.input(12) == GPIO.HIGH: movement_list.append('left') print('Left added.') sleep(0.1) elif GPIO.input(16) == GPIO.HIGH: movement_list.append('right') print('Right added.') sleep(0.1) elif GPIO.input(20) == GPIO.HIGH: movement_list.append('up') print('Up added.') sleep(0.1) elif GPIO.input(21) == GPIO.HIGH: movement_list.append('done') print('Terminating control, uploading sequence to Firebase.') firebase.put('/','movement_list', movement_list) done = True break while done==True: a=firebase.get('/movement_list') # get the value from node age if a == None: done=False sleep(0.5) # Write your code here ''' We loop through the key (button name), value (gpio number) pair of the buttons dictionary and check whether the button at the corresponding GPIO is being pressed. When the OK button is pressed, we will exit the while loop and write the list of movements (movement_list) to the database. Any other button press would be stored in the movement_list. Since there may be debouncing issue due to the mechanical nature of the buttons, we can address it by putting a short delay between each iteration after a key press has been detected. ''' # Write to database once the OK button is pressed
[ "thaddeus.phua@gmail.com" ]
thaddeus.phua@gmail.com
a07905b07cfcf4e19974315b9839310a2d8f725c
d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec
/additional_examples/py2exe_setup__basic_test.py
746c5610f04ff4682414ded9ad60376e1f9e3b2d
[ "MIT" ]
permissive
viblo/pymunk
ca64888e45706db431788368ff8464edf2912d5f
20ac14f665fb38b4ef1bef5acea36a3d612dd0d5
refs/heads/master
2023-08-27T16:37:14.740653
2023-08-16T19:26:16
2023-08-16T19:26:16
13,273,472
855
255
MIT
2023-01-13T10:13:47
2013-10-02T14:36:46
Python
UTF-8
Python
false
false
218
py
"""Simple example of py2exe to create a exe of the no_dependencies example. Tested on py2exe 0.13.0.0 on python 3.11 """ import py2exe py2exe.freeze(console=["no_dependencies.py"], options={"includes": ["pymunk"]})
[ "vb@viblo.se" ]
vb@viblo.se
98239088c3b4a53c50df2bc9f8bf239942107bf9
a36d54fb56bc2898089d6ad407bc2039a55271d4
/zdevicemanager/base/tools.py
8385f630bed268e1b477abec92e22fe0662faa58
[]
no_license
zerynth/core-zerynth-toolchain
443e5180d87b3b783c2b3ec69f24918761715b63
d27b0d6ee47b9c4f320f518705074f1032fedf8a
refs/heads/master
2021-07-25T00:28:00.192322
2021-05-17T14:53:20
2021-05-17T14:53:20
122,219,458
0
1
null
null
null
null
UTF-8
Python
false
false
11,153
py
from .base import * from .fs import * from .cfg import * from .pygtrie import * __all__ = ["tools"] class Tools(): def __init__(self): self.tools = {} self.installed = {} def init(self): #register platform tools if env.is_windows(): self.tools["stty"]="mode" elif env.is_linux(): self.tools["stty"]="/bin/stty -F" else: self.tools["stty"]="/bin/stty -f" for tooldir in fs.dirs(env.sys): self.add_tool(tooldir) for tooldir in fs.dirs(fs.path(env.dist,"sys")): self.add_tool(tooldir) ifile = fs.path(env.dist,"installed.json") self.installed = fs.get_json(ifile) def get_package(self,fullname): return env.repo["packs"][env.repo["byname"][fullname]] def get_packages_by_tag(self,tag): idx = env.repo["bytag"][tag] res = set() for i in idx: pack = env.repo["packs"][i] if pack.get("sys") and pack.get("sys")!=env.platform: # skip other platforms continue res.add(pack["fullname"]) return sorted(list(res)) def get_package_deps(self,fullname): try: pack = self.get_package(fullname) except: pack = {} res = [] for dep in pack.get("deps",[]): res.extend(self.get_packages_by_tag(dep)) res = sorted(list(set(res))) return res def has_all_deps(self,fullname): deps = self.get_package_deps(fullname) for fname in deps: if fname not in self.installed: return False return True def get_pack_info(self,packdir): pfiles = [fs.path(packdir,"z.yml"), fs.path(packdir,"package.json")] for pfile in pfiles: if fs.exists(pfile): pkg = fs.get_yaml_or_json(pfile) return pkg return None def add_tool(self,tooldir): if fs.basename(tooldir) in ["browser","newbrowser","newpython"]: # ignore some sys packages return try: pkg = self.get_pack_info(tooldir) if pkg is None: warning("Can't load tool package",tooldir) return else: fullname = pkg["fullname"] toolname = pkg.get("tool") pkg = pkg["sys"] except Exception as e: warning("Can't load tool",tooldir,e) return if toolname: self.tools[toolname]={} addto = self.tools[toolname] else: addto = self.tools if isinstance(pkg,dict): for k,v in pkg.items(): addto[k]=fs.path(env.sys,tooldir,v) elif isinstance(pkg,list) or isinstance(pkg,tuple): for k,v in pkg: addto[k]=fs.path(env.sys,tooldir,v) else: warning("Can't load tool info",tooldir,err=True) #print(self.tools) def get_tool_dir(self,toolname): for tooldir in fs.dirs(env.sys): if fs.basename(tooldir)==toolname: return tooldir for tooldir in fs.dirs(fs.path(env.dist,"sys")): if fs.basename(tooldir)==toolname: return tooldir return None def __getattr__(self,attr): if attr in self.tools: return self.tools[attr] raise AttributeError def __getitem__(self,attr): if attr in self.tools: return self.tools[attr] raise KeyError def get_vm(self,vmuid,version,chipid,target): vmpath = fs.path(env.vms,target,chipid) vmfs = fs.glob(vmpath,"*.vm") vm = None for vmf in vmfs: vmm = fs.basename(vmf) if vmm.startswith(vmuid+"_"+version+"_"): vm=vmf return vm def get_vm_by_uid(self,vmuid): #for root,dirnames,files in os.walk(fs.path(env.vms)): for target in fs.dirs(env.vms): for chid in fs.dirs(fs.path(env.vms,target)): for ff in fs.files(fs.path(env.vms,target,chid)): path_splitted = ff.split('/') ff_ = fs.basename(ff) if ff_.startswith(vmuid+"_"): return fs.path(ff) return None def get_vms(self,target,chipid=None,full_info=False): vms = {} targetpath = fs.path(env.vms,target) if not fs.exists(targetpath): return vms for chid in fs.dirs(targetpath): chid=fs.basename(chid) if chipid and chipid!=chid: continue vmfs = fs.glob(fs.path(targetpath,chid),"*.vm") for vmf in vmfs: vmbf = fs.basename(vmf) rpos = vmbf.rfind("_") #rtos hpos = vmbf.rfind("_",0,rpos-1) #hash vpos = vmbf.rfind("_",0,hpos-1) #version vmrtos = vmbf[rpos+1:-3] vmhash = vmbf[hpos+1:rpos] vmversion = vmbf[vpos+1:hpos] vmuid = vmbf[0:vpos] #TODO: add check if full_info: vms[vmuid]=(vmf,vmversion,vmrtos,vmhash) else: vms[vmuid]=vmf return vms def get_vm_by_prefix(self,vmuid): #for root,dirnames,files in os.walk(fs.path(env.vms)): res = [] for target in fs.dirs(env.vms): for chid in fs.dirs(fs.path(env.vms,target)): for ff in fs.files(fs.path(env.vms,target,chid)): path_splitted = ff.split('/') ff_ = fs.basename(ff) if ff_.startswith(vmuid): res.append(fs.path(ff)) return res def _parse_order(self,path): try: order = fs.readfile(fs.path(path,"order.txt")) debug("Can't open order.txt at",path) except: return [] lines = order.split("\n") stack = [] rs = [] for line in lines: line = line.strip() if not line or len(line)<4 or line.startswith(";"): continue pos = line.count("#") if pos>0: label = line[pos:] while (len(stack)>=(pos)): stack.pop() stack.append(label) else: try: ex = { "tag":list(stack), "name":line.replace("_"," "), "path":fs.path(path,line), "desc":fs.readfile(fs.path(path,line,"project.md")), "code":fs.readfile(fs.path(path,line,"main.py")), } rs.append(ex) except: pass return rs def _get_examples(self,path): return self._parse_order(path) def get_examples(self): exs = {} exr = [] srcs = [(fs.path(env.stdlib,"examples"),"core.zerynth.stdlib")] repos = fs.dirs(env.libs) if "official" in repos: #put official on top repos.remove("official") repos = ["official"]+repos for repo in repos: nms = fs.dirs(repo) for nm in nms: libs = fs.dirs(nm) for lib in libs: srcs.append((fs.path(lib,"examples"),"lib."+fs.basename(nm)+"."+fs.basename(lib))) for exlib,lib in srcs: if fs.exists(exlib): ee = self._get_examples(exlib) for eee in ee: eee["lib"]=lib exr.extend(ee) return exr def get_devices(self): bdirs = fs.dirs(env.devices) for bdir in bdirs: try: pkg = self.get_pack_info(bdir) if pkg is None: continue bj = fs.get_json(fs.path(bdir,"device.json")) bj["path"] = bdir bj["deps"] = self.get_package_deps(pkg["fullname"]) bj["has_all_deps"] = self.has_all_deps(pkg["fullname"]) bj["fullname"] = pkg["fullname"] yield bj except Exception as e: warning(e) #load custom devices cdirs = fs.dirs(env.cvm) for cdir in cdirs: if not fs.exists(fs.path(cdir,"active")): #not compiled yet, skip continue try: pkg = self.get_pack_info(bdir) if pkg is None: continue bj = fs.get_json(fs.path(cdir,"device.json")) bj["path"] = cdir bj["deps"] = self.get_package_deps(pkg["fullname"]) bj["has_all_deps"] = self.has_all_deps(pkg["fullname"]) bj["fullname"] = pkg["fullname"] yield bj except Exception as e: warning(e) def get_specs(self,specs): options = {} for spec in specs: pc = spec.find(":") if pc<0: fatal("invalid spec format. Give key:value") thespec = spec[pc+1:] if thespec=="null": thespec=None options[spec[:pc]]=thespec return options def get_target(self,target,options={}): import devices _dsc = devices.Discover() return _dsc.get_target(target,options) def get_modules(self): res = {} # libraries rdirs = fs.dirs(env.libs) for r in rdirs: repo = fs.basename(r) nsdirs = fs.dirs(r) for ns in nsdirs: namespace = fs.basename(ns) lbdirs = fs.dirs(ns) for l in lbdirs: lib = fs.basename(l) if repo=="official": if namespace=="zerynth": module = lib else: module = namespace+"."+lib else: module = repo+"."+namespace+"."+lib imports = [] for f in fs.files(l): fl = fs.basename(f) if fl.endswith(".py") and fl!="main.py": imports.append(fl[0:-3]) res[module]=imports return res def get_vhal(self): vhal = {} arch_dirs = fs.dirs(env.vhal) for ad in arch_dirs: fmdirs = fs.dirs(ad) for fm in fmdirs: vhal_file = fs.path(fm,"vhal.json") if fs.exists(vhal_file): vj = fs.get_json(vhal_file) vhal.update(vj) return vhal def disk_usage(self): bytes = fs.dir_size(env.home) return bytes #fs.set_json(rj["data"], fs.path(vmpath,uid+"_"+version+"_"+rj["data"]["hash_features"]+"_"+rj["data"]["rtos"]+".vm")) tools = Tools() # add_init(tools.init)
[ "dev@zerynth.com" ]
dev@zerynth.com
24c2e84b37749a34542141af25758a0b77c195ba
e5ee01bde67fed16b890023cdc33b3294e7acb6d
/python/path_search_stripped/a_star.py
dc833d9ba96cf55f58739839921c943c153c83a2
[]
no_license
dragonfi/a_star_examples
f8ca1494d49abf5170d52408e9efa6179b36b002
a6c43ca4b5f135bbaa848fcc45e74922dc174286
refs/heads/master
2020-06-19T15:07:09.298508
2019-08-06T15:02:56
2019-08-06T15:03:06
196,756,037
0
0
null
null
null
null
UTF-8
Python
false
false
2,063
py
from collections import OrderedDict, namedtuple from .graph import Graph class Path(): def __init__(self, weight, nodes): self.weight = weight self.nodes = nodes @property def dest(self): return self.nodes[-1] @property def source(self): return self.nodes[0] AStarResult = namedtuple("AStarResult", "path explored candidates") class AStar(): def __init__(self, graph , heuristic ) : self._graph = graph self._heuristic = heuristic def shortest_path(self, source , dest ) : return self.shortest_path_with_metadata(source, dest).path def shortest_path_with_metadata(self, source , dest ) : def candidate_sorting_key(candidate ) : node, path = candidate node_data = self._graph.get_node_data(node) dest_data = self._graph.get_node_data(dest) return path.weight + self._heuristic(node_data, dest_data) explored = {} candidates = OrderedDict({source: Path(0, [source])}) while candidates: candidates = OrderedDict(sorted(candidates.items(), key=candidate_sorting_key)) node, path = candidates.popitem(last=False) if node == dest: return AStarResult(path, explored, candidates) if node not in explored.keys() or explored[node].weight > path.weight: explored[node] = path new_candidates = { edge.dest: Path(path.weight + edge.weight, path.nodes + [edge.dest]) for edge in self._graph.edges_from(node) if edge.dest not in explored.keys()} for key, value in new_candidates.items(): if key not in candidates.keys() or candidates[key].weight > value.weight: candidates[key] = value return AStarResult(None, explored, candidates)
[ "david.gabor.bodr@gmail.com" ]
david.gabor.bodr@gmail.com
4c2ce9f4572cc0369d582cfe65ef86a9f3d7106a
e9530da3f17f990a3fade9c8c442ad3fbb4befc4
/test.py
3b60dca7cebd68376caafb431dc6465cd28133bf
[]
no_license
mattyhempstead/syncs-hack-2020
9796565c03560c76f0a4402ded1a536f0f3f7fc8
6e3d69070dad3228ed8bed3eb805dc090d52b56f
refs/heads/master
2022-12-08T10:23:49.395788
2020-08-30T01:00:35
2020-08-30T01:00:35
290,966,904
5
0
null
2020-08-29T11:27:44
2020-08-28T06:17:23
JavaScript
UTF-8
Python
false
false
490
py
import binascii text = "https://www.google.com/" binary_conversion = bin(int.from_bytes(text.encode(), 'big')) binary_conversion = binary_conversion[2:] for count,i in enumerate(binary_conversion): time = 0.5 sound_array = [] if count%8 == 0: sound_array.append(0) base_one = 220 base_two = 440 else: base_one = 320 base_two = 550 if i == 0: sound_array.append(base_one) else: sound_array.append(base_two)
[ "pranav.alavandi" ]
pranav.alavandi
9881b96519fce86f61a5ee3cb7a611005b646983
0d2af397b900fddad3d532a9f772f70473886cf5
/tickets/urls.py
0e2d8c65e60ed82fb02ab25f58af4e4c1d190634
[]
no_license
RobertUJ/Omaha
cc779b06e42c08ebadae0b8df4e006ad67d504d1
650d5e1e5550bf772f1817e16505c574f361bae0
refs/heads/master
2016-08-12T13:51:32.262876
2016-02-12T00:51:52
2016-02-12T00:51:52
49,794,851
0
0
null
2016-01-22T00:04:29
2016-01-16T23:12:39
Python
UTF-8
Python
false
false
264
py
from django.conf.urls import patterns, url from tickets.views import TicketsIndexView, AddTicketView urlpatterns = [ url(r'^tickets/$', TicketsIndexView.as_view(), name='TicketsView'), url(r'^addticket/$', AddTicketView.as_view(), name='AddTicketView'), ]
[ "erickhp12@gmail.com" ]
erickhp12@gmail.com
5a1a67ef9e36c7013d262a0ec9e876fcec96d9c0
75a0e169a7b45a95b5d0de639b12ae2b601af236
/worker.py
94d496b3e2f01dd00dc3ff71faa400d71db56822
[]
no_license
john-peterson/goodreads
65722ef88f66c1ff00a22f308b2497c03cf44a5e
0cf6d294cef6d7d4e1e4526ae02777d206f19ca3
refs/heads/master
2021-01-20T21:31:49.690764
2012-12-22T06:50:56
2012-12-22T06:50:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
18,895
py
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2011, Grant Drake <grant.drake@gmail.com>' __docformat__ = 'restructuredtext en' import socket, re, datetime from collections import OrderedDict from threading import Thread from lxml.html import fromstring, tostring from calibre.ebooks.metadata.book.base import Metadata from calibre.library.comments import sanitize_comments_html from calibre.utils.cleantext import clean_ascii_chars import calibre_plugins.goodreads.config as cfg class Worker(Thread): # Get details ''' Get book details from Goodreads book page in a separate thread ''' def __init__(self, url, result_queue, browser, log, relevance, plugin, timeout=20): Thread.__init__(self) self.daemon = True self.url, self.result_queue = url, result_queue self.log, self.timeout = log, timeout self.relevance, self.plugin = relevance, plugin self.browser = browser.clone_browser() self.cover_url = self.goodreads_id = self.isbn = None def run(self): try: self.get_details() except: self.log.exception('get_details failed for url: %r'%self.url) def get_details(self): try: self.log.info('Goodreads book url: %r'%self.url) raw = self.browser.open_novisit(self.url, timeout=self.timeout).read().strip() except Exception as e: if callable(getattr(e, 'getcode', None)) and \ e.getcode() == 404: self.log.error('URL malformed: %r'%self.url) return attr = getattr(e, 'args', [None]) attr = attr if attr else [None] if isinstance(attr[0], socket.timeout): msg = 'Goodreads timed out. Try again later.' self.log.error(msg) else: msg = 'Failed to make details query: %r'%self.url self.log.exception(msg) return raw = raw.decode('utf-8', errors='replace') #open('c:\\goodreads.html', 'wb').write(raw) if '<title>404 - ' in raw: self.log.error('URL malformed: %r'%self.url) return try: root = fromstring(clean_ascii_chars(raw)) except: msg = 'Failed to parse goodreads details page: %r'%self.url self.log.exception(msg) return try: # Look at the <title> attribute for page to make sure that we were actually returned # a details page for a book. If the user had specified an invalid ISBN, then the results # page will just do a textual search. title_node = root.xpath('//title') if title_node: page_title = title_node[0].text_content().strip() if page_title is None or page_title.find('search results for') != -1: self.log.error('Failed to see search results in page title: %r'%self.url) return except: msg = 'Failed to read goodreads page title: %r'%self.url self.log.exception(msg) return errmsg = root.xpath('//*[@id="errorMessage"]') if errmsg: msg = 'Failed to parse goodreads details page: %r'%self.url msg += tostring(errmsg, method='text', encoding=unicode).strip() self.log.error(msg) return self.parse_details(root) def parse_details(self, root): try: goodreads_id = self.parse_goodreads_id(self.url) except: self.log.exception('Error parsing goodreads id for url: %r'%self.url) goodreads_id = None try: (title, series, series_index) = self.parse_title_series(root) except: self.log.exception('Error parsing title and series for url: %r'%self.url) title = series = series_index = None try: authors = self.parse_authors(root) except: self.log.exception('Error parsing authors for url: %r'%self.url) authors = [] if not title or not authors or not goodreads_id: self.log.error('Could not find title/authors/goodreads id for %r'%self.url) self.log.error('Goodreads: %r Title: %r Authors: %r'%(goodreads_id, title, authors)) return mi = Metadata(title, authors) if series: mi.series = series mi.series_index = series_index mi.set_identifier('goodreads', goodreads_id) self.goodreads_id = goodreads_id try: isbn = self.parse_isbn(root) if isbn: self.isbn = mi.isbn = isbn except: self.log.exception('Error parsing ISBN for url: %r'%self.url) try: mi.rating = self.parse_rating(root) except: self.log.exception('Error parsing ratings for url: %r'%self.url) try: mi.comments = self.parse_comments(root) except: self.log.exception('Error parsing comments for url: %r'%self.url) try: self.cover_url = self.parse_cover(root) except: self.log.exception('Error parsing cover for url: %r'%self.url) mi.has_cover = bool(self.cover_url) try: tags = self.parse_tags(root) if tags: mi.tags = tags except: self.log.exception('Error parsing tags for url: %r'%self.url) try: mi.publisher, mi.pubdate = self.parse_publisher_and_date(root) except: self.log.exception('Error parsing publisher and date for url: %r'%self.url) mi.source_relevance = self.relevance if self.goodreads_id: if self.isbn: self.plugin.cache_isbn_to_identifier(self.isbn, self.goodreads_id) if self.cover_url: self.plugin.cache_identifier_to_cover_url(self.goodreads_id, self.cover_url) self.plugin.clean_downloaded_metadata(mi) self.result_queue.put(mi) def parse_goodreads_id(self, url): return re.search('/show/(\d+)', url).groups(0)[0] def parse_title_series(self, root): title_node = root.xpath('//div[@id="metacol"]/h1[@id="bookTitle"]') if not title_node: return (None, None, None) title_text = title_node[0].text_content().strip() if title_text.find('(') == -1: return (title_text, None, None) # Contains a Title and possibly a series. Possible values currently handled: # "Some title (Omnibus)" # "Some title (#1-3)" # "Some title (Series #1)" # "Some title (Series (digital) #1)" # "Some title (Series #1-5)" # "Some title (NotSeries #2008 Jan)" # "Some title (Omnibus) (Series #1)" # "Some title (Omnibus) (Series (digital) #1)" # "Some title (Omnibus) (Series (digital) #1-5)" text_split = title_text.rpartition('(') title = text_split[0] series_info = text_split[2] hash_pos = series_info.find('#') if hash_pos <= 0: # Cannot find the series # in expression or at start like (#1-7) # so consider whole thing just as title title = title_text series_info = '' else: # Check to make sure we have got all of the series information series_info = series_info[:len(series_info)-1] #Strip off trailing ')' while series_info.count(')') != series_info.count('('): title_split = title.rpartition('(') title = title_split[0].strip() series_info = title_split[2] + '(' + series_info if series_info: series_partition = series_info.rpartition('#') series_name = series_partition[0].strip() if series_name.endswith(','): series_name = series_name[:-1] series_index = series_partition[2].strip() if series_index.find('-'): # The series is specified as 1-3, 1-7 etc. # In future we may offer config options to decide what to do, # such as "Use start number", "Use value xxx" like 0 etc. # For now will just take the start number and use that series_index = series_index.partition('-')[0].strip() try: return (title.strip(), series_name, float(series_index)) except ValueError: # We have a series index which isn't really a series index title = title_text return (title.strip(), None, None) def parse_authors(self, root): get_all_authors = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GET_ALL_AUTHORS] if get_all_authors: author_node = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]/a[@class="authorName"]/span[@itemprop="name"]') if author_node: authors = [] for author_value in author_node: author = tostring(author_value, method='text', encoding=unicode).strip() # If multiple authors with some as editors can result in a trailing , to remove if author[-1:] == ',': author = author[:len(author)-1] authors.append(author) return authors else: # We need to more carefully look at the authors to only bring them in if: # 1. They have no author type specified # 2. They have an author type of 'Goodreads Author' # 3. There are no authors from 1&2 and they have an author type of 'Editor' div_authors = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]') if not div_authors: return authors_html = tostring(div_authors[0], method='text', encoding=unicode).replace('\n','').strip() if authors_html.startswith('by'): authors_html = authors_html[2:] authors_type_map = OrderedDict() for a in authors_html.split(','): author = a.strip() if author.startswith('more...'): author = author[7:] elif author.endswith('...less'): author = author[:-7] author_parts = author.strip().split('(') if len(author_parts) == 1: authors_type_map[author_parts[0]] = '' else: authors_type_map[author_parts[0]] = author_parts[1][:-1] # At this point we have a dict of authors with their contribution if any in values authors = [] valid_contrib = None for a, contrib in authors_type_map.iteritems(): if not contrib or contrib == 'Goodreads Author': authors.append(a) elif len(authors) == 0: authors.append(a) valid_contrib = contrib elif contrib == valid_contrib: authors.append(a) else: break return authors def parse_rating(self, root): rating_node = root.xpath('//div[@id="metacol"]/div[@id="bookMeta"]/span[@class="value rating"]/span') if rating_node: rating_text = tostring(rating_node[0], method='text', encoding=unicode) rating_text = re.sub('[^0-9]', '', rating_text) rating_value = float(rating_text) if rating_value >= 100: return rating_value / 100 return rating_value def parse_comments(self, root): # Look for description in a second span that gets expanded when interactively displayed [@id="display:none"] description_node = root.xpath('//div[@id="metacol"]/div[@id="description"]/span') if description_node: desc = description_node[0] if len(description_node) == 1 else description_node[1] less_link = desc.xpath('a[@class="actionLinkLite"]') if less_link is not None and len(less_link): desc.remove(less_link[0]) comments = tostring(desc, method='html', encoding=unicode).strip() while comments.find(' ') >= 0: comments = comments.replace(' ',' ') comments = sanitize_comments_html(comments) return comments def parse_cover(self, root): imgcol_node = root.xpath('//div[@id="imagecol"]/a/img/@src') if imgcol_node: img_url = imgcol_node[0] # Unfortunately Goodreads sometimes have broken links so we need to do # an additional request to see if the URL actually exists info = self.browser.open_novisit(img_url, timeout=self.timeout).info() if int(info.getheader('Content-Length')) > 1000: return img_url else: self.log.warning('Broken image for url: %s'%img_url) def parse_isbn(self, root): isbn_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[@class="buttons"]/div[@id="bookDataBox"]/div/div') if isbn_node: id_type = tostring(isbn_node[0], method='text', encoding=unicode).strip() if id_type == 'ISBN': isbn10_data = tostring(isbn_node[1], method='text', encoding=unicode).strip() isbn13_pos = isbn10_data.find('ISBN13:') if isbn13_pos == -1: return isbn10_data[:10] else: return isbn10_data[isbn13_pos+8:isbn13_pos+21] elif id_type == 'ISBN13': # We have just an ISBN13, without an ISBN10 return tostring(isbn_node[1], method='text', encoding=unicode).strip() def parse_publisher_and_date(self, root): publisher = None pub_date = None publisher_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[2]') if publisher_node: # Publisher is specified within the div above with variations of: # Published December 2003 by Books On Tape <nobr class="greyText">(first published 1982)</nobr> # Published June 30th 2010 # Note that the date could be "2003", "December 2003" or "December 10th 2003" publisher_node_text = tostring(publisher_node[0], method='text', encoding=unicode) # See if we can find the publisher name pub_text_parts = publisher_node_text.partition(' by ') if pub_text_parts[2]: publisher = pub_text_parts[2].strip() if '(first' in publisher: # The publisher name is followed by (first published xxx) so strip that off publisher = publisher.rpartition('(first')[0].strip() # Now look for the pubdate. There should always be one at start of the string pubdate_text_match = re.search('Published[\n\s]*([\w\s]+)', pub_text_parts[0].strip()) pubdate_text = None if pubdate_text_match is not None: pubdate_text = pubdate_text_match.groups(0)[0] # If we have a first published section of text use that for the date. if '(first' in publisher_node_text: # For the publication date we will use first published date # Note this date could be just a year, or it could be monthname year pubdate_text_match = re.search('.*\(first published ([\w\s]+)', publisher_node_text) if pubdate_text_match is not None: first_pubdate_text = pubdate_text_match.groups(0)[0] if pubdate_text and first_pubdate_text[-4:] == pubdate_text[-4:]: # We have same years, use the first date as it could be more accurate pass else: pubdate_text = first_pubdate_text if pubdate_text: pub_date = self._convert_date_text(pubdate_text) return (publisher, pub_date) def parse_tags(self, root): # Goodreads does not have "tags", but it does have Genres (wrapper around popular shelves) # We will use those as tags (with a bit of massaging) genres_node = root.xpath('//div[@class="stacked"]/div/div/div[contains(@class, "bigBoxContent")]/div/div') if genres_node: genre_tags = list() for genre_node in genres_node: sub_genre_nodes = genre_node.xpath('a') genre_tags_list = [sgn.text_content().strip() for sgn in sub_genre_nodes] if genre_tags_list: genre_tags.append(' > '.join(genre_tags_list)) calibre_tags = self._convert_genres_to_calibre_tags(genre_tags) if len(calibre_tags) > 0: return calibre_tags def _convert_genres_to_calibre_tags(self, genre_tags): # for each tag, add if we have a dictionary lookup calibre_tag_lookup = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GENRE_MAPPINGS] calibre_tag_map = dict((k.lower(),v) for (k,v) in calibre_tag_lookup.iteritems()) tags_to_add = list() for genre_tag in genre_tags: tags = calibre_tag_map.get(genre_tag.lower(), None) if tags: for tag in tags: if tag not in tags_to_add: tags_to_add.append(tag) return list(tags_to_add) def _convert_date_text(self, date_text): # Note that the date text could be "2003", "December 2003" or "December 10th 2003" year = int(date_text[-4:]) month = 1 day = 1 if len(date_text) > 4: text_parts = date_text[:len(date_text)-5].partition(' ') month_name = text_parts[0] # Need to convert the month name into a numeric value # For now I am "assuming" the Goodreads website only displays in English # If it doesn't will just fallback to assuming January month_dict = {"January":1, "February":2, "March":3, "April":4, "May":5, "June":6, "July":7, "August":8, "September":9, "October":10, "November":11, "December":12} month = month_dict.get(month_name, 1) if len(text_parts[2]) > 0: day = int(re.match('([0-9]+)', text_parts[2]).groups(0)[0]) from calibre.utils.date import utc_tz return datetime.datetime(year, month, day, tzinfo=utc_tz)
[ "john.peterson3@hotmail.com" ]
john.peterson3@hotmail.com
359f9c86575cbc6401fa831c42183d3cd110679b
9d278285f2bc899ac93ec887b1c31880ed39bf56
/ondoc/account/migrations/0103_merge_20190905_1609.py
16fb7e5557b47e3db32cd6549c06ffb2218de131
[]
no_license
ronit29/docprime
945c21f8787387b99e4916cb3ba1618bc2a85034
60d4caf6c52a8b70174a1f654bc792d825ba1054
refs/heads/master
2023-04-01T14:54:10.811765
2020-04-07T18:57:34
2020-04-07T18:57:34
353,953,576
0
0
null
null
null
null
UTF-8
Python
false
false
273
py
# Generated by Django 2.0.5 on 2019-09-05 10:39 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('account', '0100_auto_20190902_1653'), ('account', '0102_auto_20190903_1950'), ] operations = [ ]
[ "ankit.s@policybazaar.com" ]
ankit.s@policybazaar.com
41dfb043debbb31d564d9bdcdda0dd997a4a98a5
dca5705c291da76cbfaf3897680eb0ae2eb56e2b
/aayushg_assgn/myauth/views.py
face35c4566395dead6248d30c8430cf8b2fedf8
[]
no_license
gadia-aayush/Django-API-1
41a40598653009def8ca5bda9a578a26b8bf9115
307202ad0aa4357408e756cd74f3723e74fca253
refs/heads/master
2022-12-13T23:09:45.960562
2020-08-30T19:36:16
2020-08-30T19:36:16
273,763,155
0
0
null
null
null
null
UTF-8
Python
false
false
5,723
py
from django.shortcuts import render from django.contrib.auth.models import User from django.shortcuts import redirect from django.contrib.auth import authenticate, login, logout from django.http import JsonResponse from rest_framework.response import Response from rest_framework.views import APIView from rest_framework.decorators import api_view from rest_framework import views from rest_framework.authtoken.models import Token from rest_framework.permissions import IsAuthenticated import re def user_login(request): if request.method == 'POST': username = request.POST.get('phone') password = request.POST.get('password') user = authenticate(username = username, password = password) if user : if user.is_active: login(request,user) data = {"code" : 200, "status" : "OK", "message" : "LogIn Successfull"} return JsonResponse(data) else: data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"} return JsonResponse(data) else: data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"} return JsonResponse(data) else: return render(request,'login.html') # Django Rest Framework used class logout(APIView): permission_classes = (IsAuthenticated,) def get(self, request): user = request.user token = Token.objects.get(user=user) if token: token.delete() data = {"code" : 200, "status" : "OK", "message" : "Log Out Successfull"} return Response(data) def user_signup(request): if request.method == 'POST': username = request.POST.get('phone') password = request.POST.get('password') name = request.POST.get('name') email = request.POST.get('email') #validate whether the phone number is registered or not try: if User.objects.get(username = username): data = {"code" : 403, "status" : "Forbidden", "message" : "Entered Mobile Number is already registered. Try loggin-in"} return JsonResponse(data) except: pass #validate mobile number [must be 10 digits. assumed that all are of India, so ignored prefixed country codes] phoneregex = re.compile(r'^[1-9]\d{9}$') if phoneregex.search(str(username)): pass else: data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Mobile Number should be of 10 digits- ^[1-9]\d{9}$"} return JsonResponse(data) #validate name, making sure it is not empty firstregex = re.compile(r"^[A-Za-z][A-Za-z,.'].*$") if firstregex.search(str(name)): pass else: data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Name should start with an alphabet- ^[A-Za-z][A-Za-z,.']*$"} return JsonResponse(data) #validate email address emailregex = re.compile(r"^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$") if str(email) != "": if emailregex.search(str(email)): pass else: data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Enter a valid email address- ^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$"} return JsonResponse(data) #validate password passregex = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$") if passregex.search(str(password)): pass else: data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Password should be between 8 to 15 characters which contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character- ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$"} return JsonResponse(data) authobj = User.objects.create_user(username = username, password = password, first_name = name, email = email) authobj.save() data = {"code" : 201, "status" : "Created", "message" : "Sign-Up Successfull"} return JsonResponse(data) else: return render(request,'user_signup.html') # Django Rest Framework used @api_view(['POST', ]) def get_token(request): if request.method == 'POST': username = request.data.get('phone') password = request.data.get('password') user = authenticate(username = username, password = password) if user : if user.is_active: tokened = Token.objects.filter(user=user) data = {} if tokened.count()>0: data["code"] = 200 data["status"] = "OK" data["message"] = "Token already Exists" data["phone"] = username data["Token"] = tokened[0].key return Response(data) else: token = Token.objects.create(user=user) data["code"] = 201 data["status"] = "Created" data["message"] = "Token Created" data["Token"] = token.key data["phone"] = username return Response(data) else: data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"} return Response(data) else: data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"} return Response(data)
[ "gadia.aayush@gmail.com" ]
gadia.aayush@gmail.com
0c57b23ce2e57693a0fa07b8ddd2d25521f90145
c6a101547c2b7f36fe83a725974a8a7f02cf176d
/data_structures/binary_trees/flip_tree.py
20c8cbf5f563689f2b9a252bd664a6b22b2a1b23
[ "MIT" ]
permissive
prabhupant/python-ds
737cc35574de5c2ece0f0813cf00775324a8dbe7
f7d6d78fedaf84b7527965bb1798b7a8da989474
refs/heads/master
2023-08-22T05:04:22.937675
2022-10-04T01:29:39
2022-10-04T01:29:39
199,366,418
2,325
704
MIT
2022-10-10T13:01:10
2019-07-29T02:48:57
Python
UTF-8
Python
false
false
643
py
# Flip a tree such like here # https://www.geeksforgeeks.org/flip-binary-tree/ # Flipping subtree algorithm # 1. root->left->left = root->right # 2. root->left->right = root # 3. root->left = NULL # 4. root->right = NULL class Node: def __init__(self, val): self.val = val self.left = None self.right = None def flip_tree(root): if root is None: return root if root.left is None and root.right is None: return root flipped_root = flip_tree(root.left) root.left.left = root.right root.left.right = root root.left = None root.right = None return flipped_root
[ "noreply@github.com" ]
prabhupant.noreply@github.com
ab04985a81690a29fc99f93e08d4a4ec4e364ad5
847273de4b1d814fab8b19dc651c651c2d342ede
/.history/Sudoku_II_004_20180618143456.py
c999da2e6ae97112548cc81b5e4e3de4c117dc62
[]
no_license
Los4U/sudoku_in_python
0ba55850afcffeac4170321651620f3c89448b45
7d470604962a43da3fc3e5edce6f718076197d32
refs/heads/master
2020-03-22T08:10:13.939424
2018-07-04T17:21:13
2018-07-04T17:21:13
139,749,483
0
1
null
null
null
null
UTF-8
Python
false
false
3,415
py
from random import randint # Sudoku1 almost solved sudoku1 = [ [5, 9, 8, 6, 1, 2, 3, 4, 7], [2, 1, 7, 9, 3, 4, 8, 6, 5], [6, 4, 3, 5, 8, 7, 1, 2, 9], [1, 6, 5, 4, 9, 8, 2, 7, 3], [3, 2, 9, 7, 6, 5, 4, 1, 8], [7, 8, 4, 3, 2, 1, 5, 9, 6], [8, 3, 1, 2, 7, 6, 9, 5, 4], [4, 7, 2, 8, 5, 9, 6, 3, 1], [9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2] ] # Sudoku 2 almost solved # row1 = [9,8,7,4,3,2,5,6,1] # row2 = [2,4,3,5,1,6,8,7,9] # row3 = [5,6,1,7,9,8,4,3,2] # row4 = [3,9,5,6,4,7,2,1,8] # row5 = [8,2,4,3,5,1,6,9,7] # row6 = [1,7,6,2,8,9,3,4,5] # row7 = [7,1,2,8,6,3,9,5,4] # row8 = [4,3,8,9,7,5,1,2,6] # row9 = [' ',5,' ',' ',2,' ',7,' ',' '] def printSudoku(): i = 0 while i < 10: if i == 0: print(" 1 2 3 4 5 6 7 8 9") print(" -------------------------") elif i == 3 or i == 6 or i == 9: print(" -------------------------") spaceBar = "|" if i < 9: print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1)) i = i + 1 while True: # prints Sudoku until is solved print("Your sudoku to solve:") printSudoku() print("Input 3 numbers in format a b c, np. 4 5 8") print(" a - row number") print(" b - column number ") print(" c - value") # vprint(" r - reset chart to start\n ") x = input("Input a b c: ") print("") numbers = " 0123456789" # conditions of entering the numbers ! if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or ( str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "): if x == "r": # reset print(" Function reset() will be ready in Next Week") else: print("Error - wrong number format \n ") continue sudoku1[int(x[0])-1][int(x[2])-1] = x[4] try: i = 0 for item in sudoku1: if sum(item) == 45: i = i + 1 if i == 9: print("YOU WIN") break except TypeError: print() ''' print(" ") print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ") print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ") print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ") print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ") print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ") print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ") print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ") print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ") print("To start game input:") print(" r - to load random puzzle:") print(" 1 - to load chart nr 1:") print(" 2 - to load chart nr 2:") print(" 3 - to load chart nr 3:") choice = input("Input here: ") if choice == "R" or choice == "r": sudoku_number = randint(0, 1) rows_fill(sudoku_number) elif int(choice) == 1: rows_fill(0) elif int(choice) == 2: rows_fill(1) elif int(choice) == 3: rows_fill(0) '''
[ "inz.kamil.wos@gmail.com" ]
inz.kamil.wos@gmail.com
ac9c2a9ef0b1cf9f39976b219335f1e2257893fc
d4c2846af2194e8463bff02a9ad49eedc97539eb
/src/RPConfig1.py
77c0a56cf43effb39d46c064b268de9169bf6a08
[]
no_license
rbulha/pytimeclock
8eda6a41ecbe0e5f94238885a4d70e6d5f7e385f
a1cda1edce3d69fa504f55c40e78db9ecb2d837b
refs/heads/master
2021-01-15T22:28:57.382733
2012-08-10T17:58:52
2012-08-10T17:58:52
40,454,915
0
0
null
null
null
null
UTF-8
Python
false
false
2,680
py
import sys import os import time import shelve import dbhash #incluidos apenas para que o instalador encontre os requisitos import anydbm #incluidos apenas para que o instalador encontre os requisitos CONFIGURATION_FILE = 'configuration1.dat' class CRPConfig: global CONFIGURATION_FILE print '[CRPConfig] LOAD CONFIGURATION' sys_path = sys.path[0] if os.path.splitext(sys_path)[1] == '': base = sys.path[0] else: base = os.path.dirname(sys.path[0]) DB_BASE_PATH = os.path.dirname(base) + '\\data\\' caminho = DB_BASE_PATH + CONFIGURATION_FILE DB = shelve.open(caminho) print '[CRPConfig] DB=',len(DB) if (len(DB) != 0) and DB.has_key('C_H_NORMAL') and DB.has_key('H_E_ALMOCO'): C_H_NORMAL = DB['C_H_NORMAL'] C_H_SEXTA = DB['C_H_SEXTA'] T_ALMOCO = DB['T_ALMOCO'] H_E_OFICIAL = DB['H_E_OFICIAL'] H_S_OFICIAL = DB['H_S_OFICIAL'] H_S_OFICIAL_SEXTA = DB['H_S_OFICIAL_SEXTA'] H_S_ALMOCO = DB['H_S_ALMOCO'] H_E_ALMOCO = DB['H_E_ALMOCO'] START_REPORT_DAY = DB['START_REPORT_DAY'] else: H_E_OFICIAL = 7.0 DB['H_E_OFICIAL']=H_E_OFICIAL H_S_OFICIAL = 17.0 DB['H_S_OFICIAL']=H_S_OFICIAL T_ALMOCO = 1.0 DB['T_ALMOCO']=T_ALMOCO H_S_OFICIAL_SEXTA = 16.0 DB['H_S_OFICIAL_SEXTA']=H_S_OFICIAL_SEXTA H_S_ALMOCO = 12.0 DB['H_S_ALMOCO']=H_S_ALMOCO H_E_ALMOCO = 13.0 DB['H_E_ALMOCO']=H_E_ALMOCO #total working day hours C_H_NORMAL = (H_S_OFICIAL - H_E_OFICIAL) - T_ALMOCO#9.1 DB['C_H_NORMAL']=C_H_NORMAL C_H_SEXTA = (H_S_OFICIAL_SEXTA - H_E_OFICIAL) - T_ALMOCO#7.6 DB['C_H_SEXTA']=C_H_SEXTA START_REPORT_DAY = 21 DB['START_REPORT_DAY']=START_REPORT_DAY DB.sync() @staticmethod def GetJorneyInSeconds(): nowtime = time.localtime() if nowtime.tm_wday == 4: #Sexta-feira return CRPConfig.C_H_SEXTA*3600 else: return CRPConfig.C_H_NORMAL*3600 @staticmethod def GetLanchTimeInSeconds(): return CRPConfig.T_ALMOCO*3600 @staticmethod def Get_H_S_OFICIAL(): nowtime = time.localtime() if nowtime.tm_wday == 4: #Sexta-feira return CRPConfig.H_S_OFICIAL_SEXTA else: return CRPConfig.H_S_OFICIAL def main(): config = CRPConfig() if __name__ == '__main__': main()
[ "rbulha@3db46129-f7cc-561c-f858-d950435ae609" ]
rbulha@3db46129-f7cc-561c-f858-d950435ae609
7aade3ac2d090d75cb7eb785668927ac61e0d212
297b6b2a030a0d665fd12780da80bc64a9016f59
/Assignment2/Assignment/makeChange.py
5d9e807a700003f2aa560de428e99a25f0a3393e
[]
no_license
z0t0b/COMP5703
133ed9a90ba2024616a7ad5480937b89a9f70072
bd89faa66f726c9675d4e58855577e2fda1075c4
refs/heads/master
2022-04-21T15:50:39.272916
2020-04-15T02:40:13
2020-04-15T02:40:13
255,782,341
2
0
null
null
null
null
UTF-8
Python
false
false
1,310
py
import decimal changeList = [0, 0, 0, 0, 0, 0, 0, 0] def chop_to_n_decimals(x, n): # rounds x to n decimals (works better for inputs like 0.005 than standard round func) d = decimal.Decimal(repr(x)) targetdigit = decimal.Decimal("1e%d" % -n) chopped = d.quantize(targetdigit, decimal.ROUND_HALF_UP) return float(chopped) def makingChange(inputVal, index, amount): num = int(inputVal / amount) changeList[index] = num inputVal -= (num * amount) if(amount < 1): inputVal = chop_to_n_decimals(inputVal, 2) return inputVal def makeChange(amount = []): if((isinstance(amount, int) or isinstance(amount, float)) and (amount < 99.995 and amount >= 0.0)): roundedAmount = chop_to_n_decimals(amount, 2) roundedAmount = makingChange(roundedAmount, 0, 20) roundedAmount = makingChange(roundedAmount, 1, 10) roundedAmount = makingChange(roundedAmount, 2, 5) roundedAmount = makingChange(roundedAmount, 3, 1) roundedAmount = makingChange(roundedAmount, 4, 0.25) roundedAmount = makingChange(roundedAmount, 5, 0.10) roundedAmount = makingChange(roundedAmount, 6, 0.05) roundedAmount = makingChange(roundedAmount, 7, 0.01) return changeList return None
[ "noreply@github.com" ]
z0t0b.noreply@github.com
c75ea51b954cef8081502d553948e07b0487abe9
bf813d2b877fb8ba62feb4263484db3d0f26d5cd
/early-phd/map_to_flux.py
1c2d0eab20e2c6fa5e1fe3228a8f9507a9b7ba48
[]
no_license
9217392354A/astro-scripts
1e8e8c827097a877518d1f3e10870a5c2609417c
cd7a175bd504b4e291020b551db3077b067bc632
refs/heads/master
2021-01-13T00:40:57.481755
2016-03-25T17:04:28
2016-03-25T17:04:28
54,730,096
0
0
null
null
null
null
UTF-8
Python
false
false
406
py
#Program created by Chris Fuller to test a function for extracting flux's from a fits file using appature photomotry #import stuff from numpy import * import numpy import scipy import math import sys import os from os.path import join as pj #File stuff cat = "bigcoma.csv" catfolder = "/Users/chrisfuller/Dropbox/coma/Catalogues" catout ="comaTEST.csv" folder = "/Users/chrisfuller/Dropbox/coma/flux2/"
[ "chrisfuller@Chriss-MBP.lan" ]
chrisfuller@Chriss-MBP.lan
abb40cfd7886a6089a10fff801f6ff4840838feb
a5a99f646e371b45974a6fb6ccc06b0a674818f2
/RecoBTag/ONNXRuntime/python/pfParticleNetFromMiniAODAK4DiscriminatorsJetTags_cfi.py
b09fabc5e9632fe7d6cba6adb353d5a7f3afbfa9
[ "Apache-2.0" ]
permissive
cms-sw/cmssw
4ecd2c1105d59c66d385551230542c6615b9ab58
19c178740257eb48367778593da55dcad08b7a4f
refs/heads/master
2023-08-23T21:57:42.491143
2023-08-22T20:22:40
2023-08-22T20:22:40
10,969,551
1,006
3,696
Apache-2.0
2023-09-14T19:14:28
2013-06-26T14:09:07
C++
UTF-8
Python
false
false
18,375
py
import FWCore.ParameterSet.Config as cms pfParticleNetFromMiniAODAK4PuppiCentralDiscriminatorsJetTags = cms.EDProducer( 'BTagProbabilityToDiscriminator', discriminators = cms.VPSet( cms.PSet( name = cms.string('BvsAll'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'), ), denominator=cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('CvsL'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('CvsB'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'), ), ), cms.PSet( name = cms.string('QvsG'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('TauVsJet'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), ), cms.PSet( name = cms.string('TauVsEle'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probele'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), ), cms.PSet( name = cms.string('TauVsMu'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probmu'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'), ), ), ) ) pfParticleNetFromMiniAODAK4PuppiForwardDiscriminatorsJetTags = cms.EDProducer( 'BTagProbabilityToDiscriminator', discriminators = cms.VPSet( cms.PSet( name = cms.string('QvsG'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'), cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probg'), ), ), ) ) pfParticleNetFromMiniAODAK4CHSCentralDiscriminatorsJetTags = cms.EDProducer( 'BTagProbabilityToDiscriminator', discriminators = cms.VPSet( cms.PSet( name = cms.string('BvsAll'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'), ), denominator=cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('CvsL'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('CvsB'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'), ), ), cms.PSet( name = cms.string('QvsG'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'), ), ), cms.PSet( name = cms.string('TauVsJet'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), ), cms.PSet( name = cms.string('TauVsEle'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probele'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), ), cms.PSet( name = cms.string('TauVsMu'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probmu'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'), ), ), ) ) pfParticleNetFromMiniAODAK4CHSForwardDiscriminatorsJetTags = cms.EDProducer( 'BTagProbabilityToDiscriminator', discriminators = cms.VPSet( cms.PSet( name = cms.string('QvsG'), numerator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'), ), denominator = cms.VInputTag( cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'), cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probg'), ), ), ) )
[ "stephane.b.cooperstein@cern.ch" ]
stephane.b.cooperstein@cern.ch
658da1160eb4755901ebedf82b585ce6ddcd99da
1b83b79fcd58878cad8c683f7c2fb048abdc9b6c
/magnum/conf/kubernetes.py
2de9370e2bc59fb73dcbfd6a2ef6f75e558c8313
[ "Apache-2.0" ]
permissive
ititandev/magnum
88f7ab8d93e6913fa085d34577827d11aead1790
16ea8b6397f2bafc01e6d4ec474c1ae97f15a484
refs/heads/master
2020-12-28T19:07:02.905485
2020-02-03T17:53:15
2020-02-03T17:53:15
238,458,066
1
0
Apache-2.0
2020-02-05T13:35:13
2020-02-05T13:35:12
null
UTF-8
Python
false
false
1,333
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg kubernetes_group = cfg.OptGroup(name='kubernetes', title='Options for the Kubernetes addons') kubernetes_opts = [ cfg.StrOpt('keystone_auth_default_policy', default="/etc/magnum/keystone_auth_default_policy.json", help='Explicitly specify the path to the file defined default ' 'Keystone auth policy for Kubernetes cluster when ' 'the Keystone auth is enabled. Vendors can put their ' 'specific default policy here'), ] def register_opts(conf): conf.register_group(kubernetes_group) conf.register_opts(kubernetes_opts, group=kubernetes_group) def list_opts(): return { kubernetes_group: kubernetes_opts }
[ "flwang@catalyst.net.nz" ]
flwang@catalyst.net.nz
8d8b46573115c470483434c30bc2fd15efceb159
73785aea08895d0fc15e914ce329716712f057ec
/recipes/errorAnal.py
9208c6a48ac906004212b9520360e38dbc9b8806
[]
no_license
Peder2911/ModelComp
5e93e6db7fbc809e7444448729a91ff7a762b0cc
91ee3835ddc560adeb4af457953905aaeca79cd6
refs/heads/master
2020-05-20T05:09:01.877547
2019-05-18T13:37:34
2019-05-18T13:37:34
185,397,609
0
0
null
null
null
null
UTF-8
Python
false
false
188
py
ppSentences(sentences,y,x): for i,s in enumerate(errorSents): print('#'*38) print(f'{s} - pred: {prediction[err][i]} | actual: {actual[err][i]}') print('\n')
[ "pglandsverk@gmail.com" ]
pglandsverk@gmail.com
fe588b211aefbc83d08eca506d88db9be266716c
0d7247b52044d5bfc498610fe33725c4ca0a2076
/MDD-SG-SD.py
235989f1e5607b3a6d8c9407160ab862c37b7b9d
[]
no_license
SivaArwin/Scraping---Uswitch.com
1ebde73978ce7912d164e8965a47fd79106b5026
f33e3d9b05b9ba23065c5b2ac9073e16174a0585
refs/heads/main
2023-03-03T14:06:52.455351
2021-02-13T18:11:34
2021-02-13T18:11:34
338,635,431
1
0
null
null
null
null
UTF-8
Python
false
false
15,392
py
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from selenium.webdriver.support.ui import Select import pandas as pd import xlsxwriter import time import _Custom_Exception as CE import _Config as config import _EHl_urls overlapXpath = "/html/body/div[@id='loaderDiv']" web_driv = config._WebDriv() _Mdd_links = config._read_MDD_Urls() _savePath = config.savePath() #_regionList = ["Eastern", "East Midlands","London", "MANWEB", "Midlands", "Northern", "NORWEB", "Scottish Hydro", "Scottish Power", "Seeboard", "Southern", "Swalec", "SWEB", "Yorkshire"] """ #MainPage #Postcode postcode = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/div[@class='form-group homepage-cta-input-container']/input[@id='PostCode']") postcode.send_keys("SS26LU") CE._Time_to_delay(1) #Mainpage #Submit button submit = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/button") submit.click() CE._Time_to_delay(10) #Select Both Gas & Elec gas_elec_elementXpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-compare-type']/div[@class='field-input stacked-radio-buttons']/div/input[@id='compare-type-gas-elec']" CE._Pass_Through_Me(web_driv,overlapXpath,gas_elec_elementXpath) CE._Time_to_delay(1) #Select both same supplier sameSupplier_elementxpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-same-supplier']/div[@class='field-input stacked-radio-buttons']/div/input[@id='comparison-type-same-supplier']" CE._Pass_Through_Me(web_driv,overlapXpath,sameSupplier_elementxpath) CE._Time_to_delay(1) #select tariff tariffname = web_driv.find_element_by_xpath("/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid']/div[3]/div[@class='field-input single-radio-button']/select[@id='elecSupplierTariff']") Select(tariffname).select_by_value("string:44") CE._Time_to_delay(1) #select payment method payment_Method_Xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid ng-dirty ng-valid-parse']/div[@id='field-energy-payment-type']/div[@class='field-input stacked-radio-buttons']/div[@class='ng-scope']/input[@id='elec-payment-type-1']" CE._Pass_Through_Me(web_driv,overlapXpath,payment_Method_Xpath) CE._Time_to_delay(1) #Select gas usage radio button gas_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/input[@id='gasKWhUsage']" CE._Pass_Through_Me(web_driv,overlapXpath,gas_button_xpath) CE._Time_to_delay(3) #Passing Gas usage gas_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/div[@class='input-error-container-inline']/input[@id='gasKWhUsage-usageAsKWh']" gs_usage_res = web_driv.find_element_by_xpath(gas_usage_xpath) gs_usage_res.send_keys("12000") CE._Time_to_delay(1) #select Elec usage radio button elec_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/input[@id='elecKWhUsage']" CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath) CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath) #running this code twice because the elec button is not clicked CE._Time_to_delay(3) #Passing Elec usage elec_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/div[@class='input-error-container-inline']/input[@id='elecKWhUsage-usageAsKWh']" elec_usage_res = web_driv.find_element_by_xpath(elec_usage_xpath) elec_usage_res.send_keys("3100") CE._Time_to_delay(1) #Click Submit button #Page2 show_results_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@id='section-spending']/div[2]/div[@id='usageSummary']/div[@class='spending-text ng-scope']/button[@id='show-results']" CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath) CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath) #running this code twice because the elec button is not clicked CE._Time_to_delay(10) #Page 3 #Select Show all results .. #Whole of market #show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']" #CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath) #CE._Time_to_delay(3) """ writer = pd.ExcelWriter(_savePath+'MDD-SG-SD.xlsx', engine='xlsxwriter') try: if(_Mdd_links): for driver in range(len(_Mdd_links)): web_driv.delete_all_cookies() web_driv.get(_Mdd_links[driver]) CE._Time_to_delay(15) Tariff_Name = {} #Result Table output who = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-results-container']/div[@class='funnel-section ng-isolate-scope']/div[@id='section-compare-table']/div[@class='compare-table']/div[@class='compare-table-body']" who_res_final_res = web_driv.find_element_by_xpath(who) gas = '//*[@id="Gas only"]' CE._Pass_Through_Me(web_driv,overlapXpath,gas) CE._Time_to_delay(3) ''' Ele = '//*[@id="Electricity only"]' CE._Pass_Through_Me(web_driv,overlapXpath,Ele) CE._Time_to_delay(3) ''' ## ENQUIRY TARIFFS #Supplier Name on Enquiry for _supplierName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[contains(@class, 'ng-binding') and contains(@class, 'ng-scope')]"): Tariff_Name.setdefault('SupplierName', []).append(_supplierName_enquiry.text) #print("Supplier Name ->", _supplierName_enquiry.text ) print("Fetched Supplier Name Enquire..") #Tariff Name on Enquiry for _tarifName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"): #print("tariff name ->", _tarifName_enquiry.text) Tariff_Name.setdefault('TariffName',[]).append(_tarifName_enquiry.text) print("Fetched Tariff Name....") #Cancellation fees yes or no on apply for cancellation_fees in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[1]/span"): Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text) #print("Cancellation >", cancellation_fees.text) print("Fetched Cancellation status...!!!") #Tariff expiry for tariff_expiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[2]/span"): Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text) #print("Expiry >", tariff_expiry.text) print("Fetched Tariff expiry...!!!") #annual bill value on apply for annual_bill in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"): Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text) #print("Annual Bills >",annual_bill.text) print("Fetched Annual values ...!!!") #On Enquiry for on_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[6]/p[@class='ng-binding']"): if (on_enquiry.text == "This supplier has not made this plan available through us" ): Tariff_Name.setdefault('Status',[]).append("Enquiry") #print("#", on_enquiry.text) print("Fetched on Enquiry ...!!!") #show Apply only show_apply_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[1]/li[@class='left-column']/input[@id='Show plans you can switch me to']" CE._Pass_Through_Me(web_driv,overlapXpath,show_apply_tariffs_xpath) CE._Time_to_delay(3) ### APPLY TARIFFS print("Fetching on apply tariffs now.......") #Supplier Name On Apply #img[@class='supplier-logo ng-scope'] for SA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/img[@class='supplier-logo ng-scope']"): Tariff_Name.setdefault('SupplierName',[]).append(SA.get_attribute('alt')) #print("Supplier Name >", SA.get_attribute('alt')) print("Fetched Supplier Name....!!!") #Tariff Name on Apply for TA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"): Tariff_Name.setdefault('TariffName',[]).append(TA.text) #print("Tariff Name >",TA.text) print("Fetched Tariff Name....!!!") #Cancellation fees yes or no on apply for cancellation_fees in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[1]/span"): Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text) #print("Cancellation fees >", cancellation_fees.text) print("Fetched Cancellation status...!!!") #Tariff expiry for tariff_expiry in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[2]/span"): Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text) #print("Expiry >", tariff_expiry.text) print("Fetched Tariff expiry...!!!") #annual bill value on apply for annual_bill in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"): Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text) #print("Annual Bills >",annual_bill.text) print("Fetched Annual values ...!!!") #On Apply for on_apply in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[6]/button"): if (on_apply.text == "I WANT THIS PLAN"): Tariff_Name.setdefault('Status',[]).append("Apply") #print("#", on_apply.text) print("Fetched on Apply ...!!!") ''' #Page 3 #Select Show all results .. #Whole of market show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']" CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath) CE._Time_to_delay(3) ''' _df = pd.DataFrame.from_dict(Tariff_Name) #for _region in driver: _df.to_excel(writer, sheet_name=str(driver+1), index=False) print("Region %d complete" %(driver+1)) #tn.to_csv('EHL.csv', index=False, sep=',', encoding='utf-8') #print(tn) writer.save() print("File is ready to use!!!") web_driv.close() except TimeoutException: print("Link is broken... Replace new url") web_driv.close()
[ "noreply@github.com" ]
SivaArwin.noreply@github.com
e495a6da64e3b39072332ee3934ad2f8318bb290
b7bb0a3ea2078dbdaa17947fd841fe1c9b5e356b
/oschown/workflows.py
a5b712d06344ce7e3a3a514e1831195fb2f2557f
[ "Apache-2.0" ]
permissive
epim/oschown
48d4a7528ed38fb12cae408baad7a6e370ba86f1
a50d3ad3769dad8d1f56dfe171d5345b3bee517f
refs/heads/master
2021-02-13T21:21:01.223901
2018-09-17T15:28:41
2018-09-17T15:28:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,690
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock import oslo_config.cfg nova_conf = oslo_config.cfg.ConfigOpts() cinder_conf = oslo_config.cfg.ConfigOpts() # NOTE(danms): This is a crazy hack to import these project modules # but with separated global oslo.config objects. Hopefully I can # replace this with something that isn't quite as crazy (and at least # doesn't use mock), but this works for testing. with mock.patch('oslo_config.cfg.CONF', new=cinder_conf): from oschown import chown_cinder with mock.patch('oslo_config.cfg.CONF', new=nova_conf): from oschown import chown_nova from oschown import chown_neutron from oschown import exception LOG = logging.getLogger(__name__) def parse_resource_id(resource_id): return resource_id.split(':', 1) class ResourceCollection(object): """A collection of resources across projects. Collects resources that must be resolved and chown'ed together. """ RESOURCE_TYPES = { 'cinder': chown_cinder.CinderProject(), 'nova': chown_nova.NovaProject(), 'neutron': chown_neutron.NeutronProject(), } def __init__(self, context): self._collected_resources = {} self._context = context def need_resource(self, resource_id): """Mark a resource id like project:id as needed for resolution. Needed resources must be chown'ed with the other resources in the collection. """ if resource_id not in self._collected_resources: self._collected_resources[resource_id] = None @property def resolved_resources(self): """A list of ChownableResource objects that have been resolved.""" return [res for res in self._collected_resources.values() if res is not None] @property def unresolved_resources(self): """A list of resource identifiers that are yet unresolved.""" return [r_id for r_id, r_res in self._collected_resources.items() if r_res is None] @property def have_all_resources(self): """Return whether or not all known resources have been resolved.""" return len(self.unresolved_resources) == 0 def resolve_missing_resources_one(self): """One pass of resource resolution. Make one pass through the list of unresolved resources and try to resolve them (collecting any additional dependencies. """ for resource_id in self.unresolved_resources: project_id, local_id = parse_resource_id(resource_id) if project_id not in self.RESOURCE_TYPES: raise exception.UnknownResourceType() project = self.RESOURCE_TYPES[project_id] resource = project.collect_resource_by_id(self._context, local_id) self._collected_resources[resource_id] = resource for dep in resource.dependencies: self.need_resource(dep) def resolve_missing_resources(self): """Resolve all resources. Attempt to repeatedly resolve all resources in the list of needed ones. This runs until we have resolved all resources or we stop making progress. :raises: exception.UnableToResolveResources if some resources are not resolvable """ last_unresolved = None while not self.have_all_resources: self.resolve_missing_resources_one() now_unresolved = self.unresolved_resources if now_unresolved == last_unresolved: raise exception.UnableToResolveResources() last_unresolved = now_unresolved def chown_resources(self): """Actually change ownership of all resources in the collection. Does not actually change ownership if the context indicates a dry run should be performed. """ for resource in self.resolved_resources: if self._context.dry_run: LOG.info('Would chown resource %s' % resource.identifier) else: LOG.info('Chowning resource %s' % resource.identifier) resource.chown(self._context) def _workflow_main(context, collection): try: collection.resolve_missing_resources() except exception.ChownException as e: LOG.error('Unable to resolve resources: %s' % e) return LOG.info('Resolved %i resources to be chowned: %s' % ( len(collection.resolved_resources), ','.join([r.identifier for r in collection.resolved_resources]))) collection.chown_resources() def workflow_nova(context, instance_id): """Resolve and change ownership of an instance and dependent resources.""" collection = ResourceCollection(context) collection.need_resource('nova:%s' % instance_id) _workflow_main(context, collection) def workflow_cinder(context, volume_id): """Resolve and change ownership of a volume and dependent resources.""" collection = ResourceCollection(context) collection.need_resource('cinder:%s' % volume_id) _workflow_main(context, collection)
[ "dansmith@redhat.com" ]
dansmith@redhat.com
99d5656ae432b56eb9438da7a8014adeca443e39
ee2c15d82ff596f4ca9eda408f8e096b787f0d48
/Python/4 Dictionaries_Sets/4 dictionary/sets_challenge.py
7a56065963a00863f02685fa85a6c29210e88624
[]
no_license
sainimohit23/algorithms
1bbfee3bd4d1049b18425bf0d86ecaacd4c43ea0
911986abe015f7518ef169a5866b1058c7d41d4f
refs/heads/master
2022-11-13T17:40:06.128838
2020-06-30T17:35:35
2020-06-30T17:35:35
268,071,412
1
0
null
null
null
null
UTF-8
Python
false
false
190
py
my_str = input("enter some text please ") my_set = set(my_str) vowels = set("aeiou") finalset = my_set.difference(vowels) finallist = sorted(finalset) for num in finallist: print(num)
[ "sainimohit23@gmail.com" ]
sainimohit23@gmail.com
784e7a40abe66b769c8b6ffca8fcf4ff447532c1
88ff86b95b377a4fd10474d2b215b0cf0b32143c
/src/ralph/scan/plugins/ssh_proxmox.py
5627cf11be6d296a44bcf87c00dae5afd8551d1c
[ "Apache-2.0" ]
permissive
fossabot/ralph
f00fbfd9e64ae779633e0ea1faeb7fbe8f35353f
9eb82955adf6b662bc460112b3d9b2d574ef0d70
refs/heads/master
2020-07-04T15:27:38.758147
2014-04-28T15:08:59
2014-04-28T15:08:59
202,324,100
0
0
NOASSERTION
2019-08-14T09:59:42
2019-08-14T09:59:41
null
UTF-8
Python
false
false
9,507
py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import os import json from django.conf import settings from ralph.discovery.hardware import get_disk_shares from ralph.discovery.models import DeviceType from ralph.scan.errors import ConnectionError, NoMatchError, NoLanError from ralph.scan.plugins import get_base_result_template from ralph.util import network SETTINGS = settings.SCAN_PLUGINS.get(__name__, {}) logger = logging.getLogger("SCAN") def _connect_ssh(ip_address, user, password): if not network.check_tcp_port(ip_address, 22): raise ConnectionError('Port 22 closed on a Proxmox server.') return network.connect_ssh(ip_address, user, password) def _get_master_ip_address(ssh, ip_address, cluster_cfg=None): if not cluster_cfg: stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg") data = stdout.read() else: data = cluster_cfg if not data: stdin, stdout, stderr = ssh.exec_command("pvesh get /nodes") data = stdout.read() if data: for node in json.loads(data): stdin, stdout, stderr = ssh.exec_command( 'pvesh get "/nodes/%s/dns"' % node['node'], ) dns_data = stdout.read() if not dns_data: return ip_address ip_address = json.loads(dns_data)['dns1'] break else: return ip_address nodes = {} current_node = None for line in data.splitlines(): line = line.strip() if line.endswith('{'): current_node = line.replace('{', '').strip() nodes[current_node] = {} elif line.endswith('}'): current_node = None elif ':' in line and current_node: key, value = (v.strip() for v in line.split(':', 1)) nodes[current_node][key] = value for node, pairs in nodes.iteritems(): is_master = node.startswith('master') try: ip_address = pairs['IP'] except KeyError: continue if is_master: return ip_address return ip_address def _get_cluster_member(ssh, ip_address): stdin, stdout, stderr = ssh.exec_command("ifconfig eth0 | head -n 1") mac = stdout.readline().split()[-1] return { 'model_name': 'Proxmox', 'mac_addresses': [mac], 'installed_software': [{ 'model_name': 'Proxmox', 'path': 'proxmox', }], 'system_ip_addresses': [ip_address], } def _get_local_disk_size(ssh, disk): """Return the size of a disk image file, in bytes""" path = os.path.join('/var/lib/vz/images', disk) stdin, stdout, stderr = ssh.exec_command("du -m '%s'" % path) line = stdout.read().strip() if not line: return 0 size = int(line.split(None, 1)[0]) return size def _get_virtual_machine_info( ssh, vmid, master_ip_address, storages, hypervisor_ip_address, ): stdin, stdout, stderr = ssh.exec_command( "cat /etc/qemu-server/%d.conf" % vmid, ) lines = stdout.readlines() if not lines: # Proxmox 2 uses a different directory structure stdin, stdout, stderr = ssh.exec_command( "cat /etc/pve/nodes/*/qemu-server/%d.conf" % vmid, ) lines = stdout.readlines() disks = {} lan_model = None name = 'unknown' for line in lines: line = line.strip() if line.startswith('#') or ':' not in line: continue key, value = line.split(':', 1) if key.startswith('vlan'): lan_model, lan_mac = value.split('=', 1) elif key.startswith('net'): lan_model, lan_mac = value.split('=', 1) if ',' in lan_mac: lan_mac = lan_mac.split(',', 1)[0] elif key == 'name': name = value.strip() elif key == 'sockets': cpu_count = int(value.strip()) elif key.startswith('ide') or key.startswith('virtio'): disks[key] = value.strip() if lan_model is None: raise NoLanError( "No LAN for virtual server %s. Hypervisor IP: %s" % ( vmid, hypervisor_ip_address, ), ) device_info = { 'model_name': 'Proxmox qemu kvm', 'type': DeviceType.virtual_server.raw, 'mac_addresses': [lan_mac], 'management': master_ip_address, # ? 'hostname': name, } detected_disks = [] detected_shares = [] for slot, disk in disks.iteritems(): params = {} if ',' in disk: disk, rawparams = disk.split(',', 1) for kv in rawparams.split(','): if not kv.strip(): continue k, v = kv.split('=', 1) params[k] = v.strip() if ':' in disk: vg, lv = disk.split(':', 1) else: vg = '' lv = disk if vg == 'local': size = _get_local_disk_size(ssh, lv) if not size > 0: continue detected_disks.append({ 'family': 'QEMU disk image', 'size': size, 'label': slot, 'mount_point': lv, }) continue if vg in ('', 'local', 'pve-local'): continue vol = '%s:%s' % (vg, lv) try: wwn, size = storages[lv] except KeyError: logger.warning( 'Volume %s does not exist. Hypervisor IP: %s' % ( lv, hypervisor_ip_address, ), ) continue detected_shares.append({ 'serial_number': wwn, 'is_virtual': True, 'size': size, 'volume': vol, }) if detected_disks: device_info['disks'] = detected_disks if detected_shares: device_info['disk_shares'] = detected_shares detected_cpus = [ { 'family': 'QEMU Virtual', 'model_name': 'QEMU Virtual CPU', 'label': 'CPU {}'.format(i + 1), 'index': i + 1, 'cores': 1, } for i in range(cpu_count) ] if detected_cpus: device_info['processors'] = detected_cpus return device_info def _get_virtual_machines(ssh, master_ip_address, hypervisor_ip_address): detected_machines = [] storages = get_disk_shares(ssh) stdin, stdout, stderr = ssh.exec_command("qm list") for line in stdout: line = line.strip() if line.startswith('VMID'): continue vmid, name, status, mem, bootdisk, pid = ( v.strip() for v in line.split() ) if status != 'running': continue vmid = int(vmid) try: device_info = _get_virtual_machine_info( ssh, vmid, master_ip_address, storages, hypervisor_ip_address, ) except NoLanError as e: logger.warning(unicode(e)) else: detected_machines.append(device_info) return detected_machines def _ssh_proxmox(ip_address, user, password): ssh = _connect_ssh(ip_address, user, password) try: cluster_cfg = None for command in ( 'cat /etc/pve/cluster.cfg', 'cat /etc/pve/cluster.conf', 'cat /etc/pve/storage.cfg', 'pvecm help', ): stdin, stdout, stderr = ssh.exec_command(command) data = stdout.read() if data != '': if command == 'cat /etc/pve/cluster.cfg': cluster_cfg = data break else: raise NoMatchError('This is not a PROXMOX server.') master_ip_address = _get_master_ip_address( ssh, ip_address, cluster_cfg, ) cluster_member = _get_cluster_member(ssh, ip_address) subdevices = _get_virtual_machines( ssh, master_ip_address, ip_address, ) if subdevices: cluster_member['subdevices'] = subdevices finally: ssh.close() return cluster_member def scan_address(ip_address, **kwargs): if 'nx-os' in (kwargs.get('snmp_name') or '').lower(): raise NoMatchError('Incompatible Nexus found.') if kwargs.get('http_family') not in ('Proxmox',): raise NoMatchError('It is not Proxmox.') user = SETTINGS.get('user') password = SETTINGS.get('password') messages = [] result = get_base_result_template('ssh_proxmox', messages) if not user or not password: result['status'] = 'error' messages.append( 'Not configured. Set SSH_USER and SSH_PASSWORD in your ' 'configuration file.', ) else: try: device_info = _ssh_proxmox(ip_address, user, password) except (ConnectionError, NoMatchError) as e: result['status'] = 'error' messages.append(unicode(e)) else: result.update({ 'status': 'success', 'device': device_info, }) return result
[ "andrew.jankowski@gmail.com" ]
andrew.jankowski@gmail.com
a921a15b368f2785bb530b40113b34630061be52
0974dd03a2c169c9186d74bb9c4f80ea68802331
/bin/f2py2
3f101a41698295cedc62c9ebc308c351e8e37718
[]
no_license
devashah7/instameme
edb4a0cf8e80560eef54e0aa40a19fd4deb0c99c
8ba27800dc5624f80672fae3f727ece5fcd779a2
refs/heads/master
2020-08-09T06:25:03.011987
2019-10-09T20:42:57
2019-10-09T20:42:57
214,018,886
0
0
null
null
null
null
UTF-8
Python
false
false
246
#!/home/dshah/Desktop/insta/insta/bin/python2 # -*- coding: utf-8 -*- import re import sys from numpy.f2py.f2py2e import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "dshah@hcn-inc.com" ]
dshah@hcn-inc.com
cc8c69ab62120ec4784513c836d1a7756d9b1a0d
2814757215ea599c47817315902a1642459970df
/object-dev/student-info/two_version/step5.py
1cdf392ac433075bcca876a89264a944d9d516a2
[]
no_license
legolas999/Python-learning
caadf31e60b973864f365c4f27eb9589bc1cdcd2
1a828595bc9596e737cc997bfad1f245b3314e8b
refs/heads/master
2020-05-15T04:11:13.328995
2019-06-08T16:17:04
2019-06-08T16:17:04
182,081,867
1
0
null
null
null
null
UTF-8
Python
false
false
5,281
py
#!/usr/bin/python3.6 #定义全局变量存储学生信息 student_info = [] def print_menu(): '''实现打印功能提示菜单功能''' #1.打印功能提示菜单 print('=' * 40) print('\t{:<40}'.format('学生信息管理系统V1.0')) print('\t{:<40}'.format('1.查询学员信息')) print('\t{:<40}'.format('2.增加学员信息')) print('\t{:<40}'.format('3.修改学员信息')) print('\t{:<40}'.format('4.删除学员信息')) print('\t{:<40}'.format('5.显示学员信息')) print('\t{:<40}'.format('6.保存学员信息')) print('\t{:<40}'.format('7.退出系统')) print('=' * 40) def add_stu_info(): '''实现添加一个新的学生信息功能''' global student_info #获取用户输入的信息 new_number = input('请输入你的学号:') new_name = input('请输入你的姓名:') new_id = input('请输入你的身份证号码:') new_phone = input('请输入你的电话号码:') new_dormitory = input('请输入你的宿舍号码:') new_addr = input('请输入你的籍贯地址:') #定义一个新的字典,来存储新的学生信息 new_info = {} new_info['number'] = new_number new_info['name'] = new_name new_info['id'] = new_id new_info['phone'] = new_phone new_info['dormitory'] = new_dormitory new_info['address'] = new_addr #将新的学生信息,添加到学生整体列表中 student_info.append(new_info) #print(student_info) # for test def find_stu_info(): '''实现查找学员信息功能''' global student_info #获取要查询的学员姓名 find_name = input('请输入要查找的学员姓名:') find_flag = 0 #默认表示没有找到的标志 for item in student_info: if find_name == item['name']: print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯')) print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\ item['dormitory'],item['address'])) find_flag = 1 #表示已经找到了学员信息 break #找到后打印退出 #判断是否找到了学员信息 if find_flag == 0: print("查无此人") def show_stu_info(): '''实现显示所有学生信息功能''' global student_info print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯')) for item in student_info: print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\ item['dormitory'],item['address'])) def save_stu_info(): '''实现将学员信息保存到文件中''' global student_info f = open('stu_info.data','w') f.write(str(student_info)) f.close() def load_stu_info(): '''实现加载学生信息功能''' global student_info try: f = open('stu_info.data') student_info = eval(f.read()) f.close() except Exception: pass def modify_stu_info(): '''实现修改学生信息功能''' global student_info find_flag = 0 modify_name = input('请输入需要修改的学生名字:') for item in student_info: if modify_name == item['name']: modify_number = input('请输入你的新的学号:') modify_id = input('请输入你的新的身份证号码:') modify_phone = input('请输入你的新的电话号码:') modify_dormitory = input('请输入你的新的宿舍号码:') modify_addr = input('请输入你的新的籍贯地址:') item['number'] = modify_number item['id'] = modify_id item['phone'] = modify_phone item['dormitory'] = modify_dormitory item['address'] = modify_addr find_flag = 1 break if find_flag == 0: print('输入的名字不正确,重新输入') def delete_stu_info(): '''实现删除学生信息功能''' global student_info find_flag = 0 del_name = input('请输入要删除的学生名字:') for item in student_info: if del_name == item['name']: del student_info[student_info.index(item)] find_flag = 1 break if find_flag == 0: print('此学生不存在,请重新输入') def main(): #恢复以前数据到程序中 load_stu_info() #打印功能提示菜单 print_menu() while True: #2. 获取用户的输入 num = int(input('请输入操作序号:')) #3. 根据用户的数据执行相应的功能 if num==1: find_stu_info() elif num==2: add_stu_info() elif num==3: modify_stu_info() elif num==4: delete_stu_info() elif num==5: show_stu_info() elif num==6: save_stu_info() elif num==7: break else: print('输入有误,请重新输入') print('-'*50) print('') if __name__ == '__main__': main()
[ "lqr888888@aliyun.com" ]
lqr888888@aliyun.com
715c52973d9758a579026ef80e34afbd30905a12
32134ac2fa760ba7285d9bc844fa4db0be76352a
/perceptron.py
dcc4b74eaf87eec1c00054f55b18a839728e6999
[]
no_license
NJCinnamond/NLPAssignment-1
8143bf8bce8de1044b757de28e2b0afce4169ce1
4204dcf64cf0864e6be2c5ce645f3e1ea810762f
refs/heads/master
2020-12-31T10:45:22.029690
2020-02-25T01:14:43
2020-02-25T01:14:43
239,006,956
0
0
null
null
null
null
UTF-8
Python
false
false
4,636
py
""" Maximum entropy model for Assignment 1: Starter code. You can change this code however you like. This is just for inspiration. """ import os import sys import numpy as np from util import evaluate, load_data from sklearn.metrics import confusion_matrix class PerceptronModel(): """ Maximum entropy model for classification. Attributes: (float) weights (float) bias (int) num_dim (bool) add_bias """ def __init__(self, label_to_index, lr=0.02): self.W = None self.bias = None self.lr = lr self.num_dim = 0 self.num_class = len(label_to_index) self.label_to_index = label_to_index self.index_to_label = {v: k for k, v in label_to_index.items()} def train(self, training_data): """ Trains the maximum entropy model. Inputs: training_data: Suggested type is (list of pair), where each item is a training example represented as an (input, label) pair. """ self.num_dim = len(training_data[0][0]) self.num_epochs = 5 self.W = {c: np.array([0.0 for _ in range(self.num_dim)]) for c in self.label_to_index.keys()} epoch = 0 change_over_epoch = True while change_over_epoch and epoch < self.num_epochs: print("Epoch: ", epoch) epoch += 1 correct = 0 change_over_epoch = False for sample in training_data: #Get numerical value of label label = sample[1] if sample[1] not in self.label_to_index.keys(): label = self.index_to_label[0] # Initialize arg_max value, predicted class. arg_max, predicted_label = 0, self.index_to_label[0] # Multi-Class Decision Rule: for c in self.label_to_index.keys(): current_activation = np.dot(sample[0], self.W[c]) if current_activation >= arg_max: arg_max, predicted_label = current_activation, c # Update Rule: if not (label == predicted_label): change_over_epoch = True self.W[label] += np.dot(self.lr, sample[0]) self.W[predicted_label] -= np.dot(self.lr, sample[0]) else: correct += 1 acc = correct / len(training_data) print("Accuracy: ", str(acc)) def predict(self, model_input): """ Predicts a label for an input. Inputs: model_input (features): Input data for an example, represented as a feature vector. Returns: The predicted class. """ # Initialize predicted label to UNK token arg_max, predicted_label = 0, self.index_to_label[0] # Multi-Class Decision Rule: for c in self.label_to_index.keys(): current_activation = np.dot(model_input, self.W[c]) if current_activation >= arg_max: arg_max, predicted_label = current_activation, c return predicted_label def create_dummy_bias(data): for sample in data: sample[0].append(1) return data if __name__ == "__main__": print("Getting data") train_data, dev_data, test_data, data_type, label_dict = load_data(sys.argv) print("Got data") train_data = create_dummy_bias(train_data) dev_data = create_dummy_bias(dev_data) test_data = create_dummy_bias(test_data) print(len(train_data)) print(len(dev_data)) print(len(test_data)) # Train the model using the training data. model = PerceptronModel(label_to_index=label_dict) model.train(train_data) # Predict on the development set. ''' dev_accuracy = evaluate(model, dev_data, os.path.join("results", "perceptron_" + data_type + "_dev_predictions.csv")) print("Dev accuracy: ", dev_accuracy) ''' pred_label = [model.predict(example[0]) for example in dev_data] true_label = [example[1] for example in dev_data] conf_mat = confusion_matrix(true_label, pred_label, labels=np.sort(np.unique(true_label))) print(conf_mat) print(np.sort(np.unique(true_label))) # Predict on the test set. # Note: We don't provide labels for test, so the returned value from this # call shouldn't make sense. #evaluate(model, # test_data, # os.path.join("results", "perceptron_" + data_type + "_test_predictions.csv"))
[ "54274991+NJCinnamond@users.noreply.github.com" ]
54274991+NJCinnamond@users.noreply.github.com
342f10e5e1c17b196563987f7720df7d1de0ef8e
1361f56a3dc2205455054d144fa30d9cebb9704f
/week-07/project/get_data.py
b6f5c0ca6d65a95f039ea83f0e9e44f705ff9f35
[]
no_license
green-fox-academy/TemExile
31b240f58a0d56364e3b888cd9610b176f244d5e
040882ebb07d10c65b98cd3dc12814f10fa52dc0
refs/heads/master
2020-05-19T18:17:58.468807
2019-06-21T06:22:51
2019-06-21T06:22:51
185,149,461
0
0
null
null
null
null
UTF-8
Python
false
false
1,689
py
import requests from bs4 import BeautifulSoup import numpy as np import pandas as pd from function import get_info city_list = { 'Bath':'116', 'Bridgwater':'212', 'Burnham-On-Sea':'251', 'Chard':'301', 'Cheddar':'306', 'Clevedon':'337', 'Crewkerne':'381', 'Frome':'536', 'Glastonbury':'551', 'Ilminster':'678', 'Minehead':'942', 'Radstock':'1109', 'Shepton+Mallet':'1198', 'Street':'1287', 'Taunton':'1317', 'Wellington':'1414', 'Wells':'1415', 'Weston-Super-Mare':'1437', 'Wincanton':'1458', 'Yeovil':'1497' } # 'https://www.rightmove.co.uk/house-prices/detail.html?' # 'country=england&locationIdentifier=REGION%5E1198&' # 'searchLocation=Shepton+Mallet&referrer=listChangeCriteria&index=0' page_list = [x*25 for x in range(40)] base_url = r'https://www.rightmove.co.uk/house-prices/detail.html?country=england&locationIdentifier=REGION%5E' raw_data_list = [] for key, value in city_list.items(): for n in page_list: url = base_url + value + r'&searchLocation=' + key + '&&referrer=listChangeCriteria&index=' + str(n) page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') data = soup.find_all('li', 'soldUnit') for item in data: dic = {} result = get_info(item) dic['Price'] = result[0] dic['HomeType'] = result[1] dic['HoldType'] = result[2] dic['Type'] = result[3] dic['SoldDate'] = result[4] dic['Bedroom'] = result[5] dic['areaCode'] = result[6] dic['City'] = key raw_data_list.append(dic) df = pd.DataFrame(raw_data_list) df.to_csv('Raw_data.csv')
[ "hxwengl@163.com" ]
hxwengl@163.com
99cd43a8c940db281d4db4d33d06b1cee795bc61
c5291e50a3c72c885922378573a0ad423fcedf05
/analysis/data/urls.py
e7638f31b2b04491d30e6f29d5a4d9826f2a05c3
[]
no_license
raghurammanyam/django-projects
bcc3ed6285882af437a2995514cef33760fb063e
dd20ae354f7f111a0176a1cc047c099bd23e9f05
refs/heads/master
2022-12-12T19:22:31.698114
2018-12-09T09:41:45
2018-12-09T09:41:45
137,443,359
0
0
null
2022-11-22T03:01:07
2018-06-15T05:08:15
Python
UTF-8
Python
false
false
196
py
from django.conf.urls import url from django.urls import path from .views import test,get from django.http import HttpResponse urlpatterns = [ url(r'^date/',test), url(r'^get/',get) ]
[ "manyamraghuram@gmail.com" ]
manyamraghuram@gmail.com
09e93c96494e49a2d86292c5fba4b0ca5dd942cb
bda539dedce550a3242cd9a2a4bb61782a924c12
/git/training/runTraining.py
26cb6e270bf3928f7bc110871628979131099a59
[]
no_license
joohwankim/deepgazekickoff
37222c937faca070a848bf97b1d85df518659d48
b30cc51de247be75cee510240868e6a31e08a815
refs/heads/master
2020-03-18T10:17:21.847000
2018-05-29T14:31:21
2018-05-29T14:31:21
134,605,399
0
0
null
null
null
null
UTF-8
Python
false
false
1,400
py
""" Copyright (C) 2017 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-ND 4.0 license (https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode). """ import argparse, logging, os, dlcore.train, sys if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-j', '--job', required=True, help='Which network to train. Specify a folder containing configuration file') parser.add_argument('-v', '--var', nargs='*', action='append', help='A varaible and value pair') parser.add_argument('-r', '--resume', default=None, help='Address to a checkpoint file. If given, resume training from the checkpoint file.') args = parser.parse_args() #config = dlcore.train.loadModule(os.path.join(args.job,'config.py')) config = dlcore.train.loadModule(args.job) if args.var: for var in args.var: dtype = type(getattr(config, var[0])) if len(var) == 2: setattr(config, var[0], dtype(var[1])) if os.path.abspath(config.result_dir) == os.path.abspath('./'): config.result_dir = os.path.normpath(args.job) logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) #logging.basicConfig(stream=sys.stdout, level=logging.INFO) #logging.basicConfig(filename=os.path.join(config.result_dir,config.log), level=config.log_level) dlcore.train.main(config, args.resume)
[ "sckim@nvidia.com" ]
sckim@nvidia.com
5a18ee6526a8d5b5735523e7efe503c9224f57c1
35631053e6c1e7d01d31c27e10388204ab59b8f2
/Streaming Media Player/pop_up_message.py
aaa1ee198e1f91f7dbfde98f2f21ab3e38f033da
[]
no_license
vanduan/DichVuMang
7cf442498820c6c39362cc69e1fd10b503fca704
c569cf52265356ed67eb703f50ddc65e6ce9e846
refs/heads/master
2021-01-21T13:03:15.699281
2016-04-22T10:41:27
2016-04-22T10:41:27
55,877,543
0
0
null
null
null
null
UTF-8
Python
false
false
900
py
import sys from PyQt4.QtGui import * from PyQt4.QtCore import * def window(): app = QApplication(sys.argv) w = QWidget() b = QPushButton(w) b.setText("Show message!") b.move(50,50) b.clicked.connect(showdialog) w.setWindowTitle("PyQt Dialog demo") w.show() sys.exit(app.exec_()) def showdialog(): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText("This is a message box") msg.setInformativeText("This is additional information") msg.setWindowTitle("MessageBox demo") msg.setDetailedText("The details are as follows:") msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) msg.buttonClicked.connect(msgbtn) retval = msg.exec_() print "value of pressed message box button:", retval def msgbtn(i): print "Button pressed is:",i.text() if __name__ == '__main__': window()
[ "vanduan95.dvp@gmail.com" ]
vanduan95.dvp@gmail.com
f231f73dec833a474cefcee2707d8742f92f9d51
125bc51efb95f383257e7bdb50ae74e5dc05b7f7
/src/belajarIntegerString.py
f28765c84ddfefc5911c0710cd851199053fcd21
[]
no_license
frestea09/learn_ch1_python
f9688fffda5f0fa312b82bd25081b986fa0779e9
510ea59bf85ec024ebc473db2533e92becaefbf3
refs/heads/master
2020-05-26T18:22:31.171688
2019-05-26T05:42:08
2019-05-26T05:42:08
188,334,119
0
0
null
null
null
null
UTF-8
Python
false
false
239
py
from __future__ import print_function def main(): variabelNama = input('nama : ') variabelInteger = int(input('Umur')) print('Nama anda %s dan umur anda %d'%(variabelNama,variabelInteger)) if __name__ == "__main__": main()
[ "ilmanfrasetya@gmail.com" ]
ilmanfrasetya@gmail.com
8f4ca0a46c8c2f2b477ecfa59a36f08c988916bb
c220ac95ee13465d549b721700fe482ed490a2ac
/itty/__init__.py
6363a9929b9908c6a4220c4143cefa060e4c3310
[ "BSD-3-Clause" ]
permissive
ruthenium/itty
66587621e1f36fc66202bf3a24509438d04b48d1
5d4219909c88d21af8b5548d366888accace68f6
refs/heads/master
2021-01-18T08:46:24.315865
2011-08-17T03:10:47
2011-08-17T03:10:47
2,151,905
0
0
null
null
null
null
UTF-8
Python
false
false
1,591
py
from .base import (HTTP_MAPPINGS, Callback as _Callback, Error as _Error, Request, Response, static_file, EnvironmentError, Forbidden, NotFound, AppError, Redirect, App, run_app) APP_METHODS = { } class Callback(_Callback): def __get__(self, instance, owner): if instance is None: return self return self._func @classmethod def decorator(cls, pattern): def wrapper(func): res = cls(pattern, func) APP_METHODS[func.func_name] = res return func return wrapper class GetCallback(Callback): method = 'GET' get = GetCallback.decorator class PostCallback(Callback): method = 'POST' post = PostCallback.decorator class PutCallback(Callback): method = 'PUT' put = PutCallback.decorator class DeleteCallback(Callback): method = 'DELETE' delete = DeleteCallback.decorator class Error(_Error): def __get__(self, instance, owner): if instance is None: return self return self._func @classmethod def decorator(cls, status): def wrapper(func): res = cls(func, status) APP_METHODS[func.func_name] = res return func return wrapper error = Error.decorator def run_itty(host='localhost', port=8080, adapter='wsgiref'): return run_app(type('IttyMainApplication', (base.App, ), APP_METHODS), host, port, adapter)
[ "bubucektop@gmail.com" ]
bubucektop@gmail.com
9da01c5fe4850d89d6df0c28383d6624f962e764
83179abbad0032fd3c8c38a54260ac4239ba9df3
/2021/python/day15/day15.py
24a8f0e5bd6154fc5d2140e760a2d5b58031e146
[]
no_license
yulrizka/adventofcode
448ac89ae543c8a7ee46bb5f86abc62887e3a9ee
74b89528e07ae6282763968d5bb3d8eea38e07ba
refs/heads/master
2023-01-13T03:57:20.688851
2022-12-22T11:11:59
2022-12-22T11:11:59
225,181,497
0
1
null
null
null
null
UTF-8
Python
false
false
1,966
py
import queue import unittest # with open("../../input/day15-sample") as f: with open("../../input/day15") as f: raw = [[int(x) for x in y] for y in f.read().strip().split("\n")] def wrap(x): while x > 9: x = x - 9 return x data2 = raw.copy() for i in range(4): row = list(map(lambda x: list(map(lambda y: wrap(y + (i + 1)), x)), raw)) data2 += row for i, current_row in enumerate(data2): rr = current_row.copy() for j in range(4): row = list(map(lambda y: wrap(y + (j + 1)), current_row)) rr += row data2[i] = rr nr = [-1, 0, 1, 0] nc = [0, 1, 0, -1] def solve(raw): R = len(raw) C = len(raw[0]) # build vertices D = {} G = {} for r in range(R): for c in range(C): D[(r, c)] = float('inf') for dd in range(4): rr = r + nr[dd] cc = c + nc[dd] if 0 <= rr < R and 0 <= cc < C: G[((r, c), (rr, cc))] = int(raw[rr][cc]) D[(0, 0)] = 0 # dijkstra pq = queue.PriorityQueue() pq.put((0, (0, 0))) while not pq.empty(): (dist, current_vertex) = pq.get() for dd in range(4): rr = current_vertex[0] + nr[dd] cc = current_vertex[1] + nc[dd] if 0 <= rr < R and 0 <= cc < C: neighbor = (rr, cc) distance = G[(current_vertex, neighbor)] old_cost = D[neighbor] new_cost = D[current_vertex] + distance if new_cost < old_cost: D[neighbor] = new_cost pq.put((new_cost, neighbor)) return D[(R - 1, C - 1)] def part1(): return solve(raw) def part2(): return solve(data2) class TestSum(unittest.TestCase): def test1(self): ans = part1() print(ans) assert ans == 498 def test2(self): ans = part2() print(ans) assert ans == 2901
[ "yulrizka@users.noreply.github.com" ]
yulrizka@users.noreply.github.com
d1963e7cc009082ee066bf6e7b3db7e2a3f62383
6472cd640341f4bcc3867c3579a87ee8b763ae15
/conventionalAI/venv/lib/python3.6/site-packages/rivescript/rivescript.py
95fdae4013df4e458c3448a8cef0dfdd83a16f8e
[]
no_license
iafjayoza/Machine_Learning
7e9664cb6da6e0521e3475c9f80acd3ff15d1fc8
aba8fd939194b839da03f4c1ebd9eac8331d0b90
refs/heads/master
2023-06-01T14:28:59.391643
2021-06-25T17:12:57
2021-06-25T17:12:57
380,303,445
0
0
null
null
null
null
UTF-8
Python
false
false
103,656
py
#!/usr/bin/env python # The MIT License (MIT) # # Copyright (c) 2016 Noah Petherbridge # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import unicode_literals from six import text_type import sys import os import re import string import random import pprint import copy import codecs from . import __version__ from . import python # Common regular expressions. class RE(object): equals = re.compile('\s*=\s*') ws = re.compile('\s+') objend = re.compile('^\s*<\s*object') weight = re.compile('\{weight=(\d+)\}') inherit = re.compile('\{inherits=(\d+)\}') wilds = re.compile('[\s\*\#\_]+') nasties = re.compile('[^A-Za-z0-9 ]') crlf = re.compile('<crlf>') literal_w = re.compile(r'\\w') array = re.compile(r'\@(.+?)\b') def_syntax = re.compile(r'^.+(?:\s+.+|)\s*=\s*.+?$') name_syntax = re.compile(r'[^a-z0-9_\-\s]') utf8_trig = re.compile(r'[A-Z\\.]') trig_syntax = re.compile(r'[^a-z0-9(\|)\[\]*_#@{}<>=\s]') cond_syntax = re.compile(r'^.+?\s*(?:==|eq|!=|ne|<>|<|<=|>|>=)\s*.+?=>.+?$') utf8_meta = re.compile(r'[\\<>]') utf8_punct = re.compile(r'[.?,!;:@#$%^&*()]') cond_split = re.compile(r'\s*=>\s*') cond_parse = re.compile(r'^(.+?)\s+(==|eq|!=|ne|<>|<|<=|>|>=)\s+(.+?)$') topic_tag = re.compile(r'\{topic=(.+?)\}') set_tag = re.compile(r'<set (.+?)=(.+?)>') bot_tag = re.compile(r'<bot (.+?)>') get_tag = re.compile(r'<get (.+?)>') star_tags = re.compile(r'<star(\d+)>') botstars = re.compile(r'<botstar(\d+)>') input_tags = re.compile(r'<input([1-9])>') reply_tags = re.compile(r'<reply([1-9])>') random_tags = re.compile(r'\{random\}(.+?)\{/random\}') redir_tag = re.compile(r'\{@(.+?)\}') tag_search = re.compile(r'<([^<]+?)>') placeholder = re.compile(r'\x00(\d+)\x00') zero_star = re.compile(r'^\*$') optionals = re.compile(r'\[(.+?)\]') # Version of RiveScript we support. rs_version = 2.0 # Exportable constants. RS_ERR_MATCH = "[ERR: No reply matched]" RS_ERR_REPLY = "[ERR: No reply found]" RS_ERR_DEEP_RECURSION = "[ERR: Deep recursion detected]" RS_ERR_OBJECT = "[ERR: Error when executing Python object]" RS_ERR_OBJECT_HANDLER = "[ERR: No Object Handler]" RS_ERR_OBJECT_MISSING = "[ERR: Object Not Found]" class RiveScript(object): """A RiveScript interpreter for Python 2 and 3.""" # Concatenation mode characters. _concat_modes = dict( none="", space=" ", newline="\n", ) ############################################################################ # Initialization and Utility Methods # ############################################################################ def __init__(self, debug=False, strict=True, depth=50, log="", utf8=False): """Initialize a new RiveScript interpreter. bool debug: Specify a debug mode. bool strict: Strict mode (RS syntax errors are fatal) str log: Specify a log file for debug output to go to (instead of STDOUT). int depth: Specify the recursion depth limit. bool utf8: Enable UTF-8 support.""" ### # User configurable fields. ### # Debugging self._debug = debug # Debug mode self._log = log # Debug log file # Unicode stuff self._utf8 = utf8 # UTF-8 mode self.unicode_punctuation = re.compile(r'[.,!?;:]') # Misc. self._strict = strict # Strict mode self._depth = depth # Recursion depth limit ### # Internal fields. ### self._gvars = {} # 'global' variables self._bvars = {} # 'bot' variables self._subs = {} # 'sub' variables self._person = {} # 'person' variables self._arrays = {} # 'array' variables self._users = {} # 'user' variables self._freeze = {} # frozen 'user' variables self._includes = {} # included topics self._lineage = {} # inherited topics self._handlers = {} # Object handlers self._objlangs = {} # Languages of objects used self._topics = {} # Main reply structure self._thats = {} # %Previous reply structure self._sorted = {} # Sorted buffers self._syntax = {} # Syntax tracking (filenames & line no.'s) self._regexc = { # Precomputed regexes for speed optimizations. "trigger": {}, "subs": {}, "person": {}, } # "Current request" variables. self._current_user = None # The current user ID. # Define the default Python language handler. self._handlers["python"] = python.PyRiveObjects() self._say("Interpreter initialized.") @classmethod def VERSION(self=None): """Return the version number of the RiveScript library. This may be called as either a class method or a method of a RiveScript object.""" return __version__ def _say(self, message): if self._debug: print("[RS] {}".format(message)) if self._log: # Log it to the file. fh = open(self._log, 'a') fh.write("[RS] " + message + "\n") fh.close() def _warn(self, message, fname='', lineno=0): header = "[RS]" if self._debug: header = "[RS::Warning]" if len(fname) and lineno > 0: print(header, message, "at", fname, "line", lineno) else: print(header, message) ############################################################################ # Loading and Parsing Methods # ############################################################################ def load_directory(self, directory, ext=None): """Load RiveScript documents from a directory. Provide `ext` as a list of extensions to search for. The default list is `.rive`, `.rs`""" self._say("Loading from directory: " + directory) if ext is None: # Use the default extensions - .rive is preferable. ext = ['.rive', '.rs'] elif type(ext) == str: # Backwards compatibility for ext being a string value. ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for item in os.listdir(directory): for extension in ext: if item.lower().endswith(extension): # Load this file. self.load_file(os.path.join(directory, item)) break def load_file(self, filename): """Load and parse a RiveScript document.""" self._say("Loading file: " + filename) fh = codecs.open(filename, 'r', 'utf-8') lines = fh.readlines() fh.close() self._say("Parsing " + str(len(lines)) + " lines of code from " + filename) self._parse(filename, lines) def stream(self, code): """Stream in RiveScript source code dynamically. `code` can either be a string containing RiveScript code or an array of lines of RiveScript code.""" self._say("Streaming code.") if type(code) in [str, text_type]: code = code.split("\n") self._parse("stream()", code) def _parse(self, fname, code): """Parse RiveScript code into memory.""" self._say("Parsing code") # Track temporary variables. topic = 'random' # Default topic=random lineno = 0 # Line numbers for syntax tracking comment = False # In a multi-line comment inobj = False # In an object objname = '' # The name of the object we're in objlang = '' # The programming language of the object objbuf = [] # Object contents buffer ontrig = '' # The current trigger repcnt = 0 # Reply counter concnt = 0 # Condition counter isThat = '' # Is a %Previous trigger # Local (file scoped) parser options. local_options = dict( concat="none", # Concat mode for ^Continue command ) # Read each line. for lp, line in enumerate(code): lineno += 1 self._say("Line: " + line + " (topic: " + topic + ") incomment: " + str(inobj)) if len(line.strip()) == 0: # Skip blank lines continue # In an object? if inobj: if re.match(RE.objend, line): # End the object. if len(objname): # Call the object's handler. if objlang in self._handlers: self._objlangs[objname] = objlang self._handlers[objlang].load(objname, objbuf) else: self._warn("Object creation failed: no handler for " + objlang, fname, lineno) objname = '' objlang = '' objbuf = [] inobj = False else: objbuf.append(line) continue line = line.strip() # Trim excess space. We do it down here so we # don't mess up python objects! # Look for comments. if line[:2] == '//': # A single-line comment. continue elif line[0] == '#': self._warn("Using the # symbol for comments is deprecated", fname, lineno) elif line[:2] == '/*': # Start of a multi-line comment. if '*/' not in line: # Cancel if the end is here too. comment = True continue elif '*/' in line: comment = False continue if comment: continue # Separate the command from the data. if len(line) < 2: self._warn("Weird single-character line '" + line + "' found.", fname, lineno) continue cmd = line[0] line = line[1:].strip() # Ignore inline comments if there's a space before and after # the // symbols. if " // " in line: line = line.split(" // ")[0].strip() # Run a syntax check on this line. syntax_error = self.check_syntax(cmd, line) if syntax_error: # There was a syntax error! Are we enforcing strict mode? syntax_error = "Syntax error in " + fname + " line " + str(lineno) + ": " \ + syntax_error + " (near: " + cmd + " " + line + ")" if self._strict: raise Exception(syntax_error) else: self._warn(syntax_error) return # Don't try to continue # Reset the %Previous state if this is a new +Trigger. if cmd == '+': isThat = '' # Do a lookahead for ^Continue and %Previous commands. for i in range(lp + 1, len(code)): lookahead = code[i].strip() if len(lookahead) < 2: continue lookCmd = lookahead[0] lookahead = lookahead[1:].strip() # Only continue if the lookahead line has any data. if len(lookahead) != 0: # The lookahead command has to be either a % or a ^. if lookCmd != '^' and lookCmd != '%': break # If the current command is a +, see if the following is # a %. if cmd == '+': if lookCmd == '%': isThat = lookahead break else: isThat = '' # If the current command is a ! and the next command(s) are # ^, we'll tack each extension on as a line break (which is # useful information for arrays). if cmd == '!': if lookCmd == '^': line += "<crlf>" + lookahead continue # If the current command is not a ^ and the line after is # not a %, but the line after IS a ^, then tack it on to the # end of the current line. if cmd != '^' and lookCmd != '%': if lookCmd == '^': line += self._concat_modes.get( local_options["concat"], "" ) + lookahead else: break self._say("Command: " + cmd + "; line: " + line) # Handle the types of RiveScript commands. if cmd == '!': # ! DEFINE halves = re.split(RE.equals, line, 2) left = re.split(RE.ws, halves[0].strip(), 2) value, type, var = '', '', '' if len(halves) == 2: value = halves[1].strip() if len(left) >= 1: type = left[0].strip() if len(left) >= 2: var = ' '.join(left[1:]).strip() # Remove 'fake' line breaks unless this is an array. if type != 'array': value = re.sub(RE.crlf, '', value) # Handle version numbers. if type == 'version': # Verify we support it. try: if float(value) > rs_version: self._warn("Unsupported RiveScript version. We only support " + rs_version, fname, lineno) return except: self._warn("Error parsing RiveScript version number: not a number", fname, lineno) continue # All other types of defines require a variable and value name. if len(var) == 0: self._warn("Undefined variable name", fname, lineno) continue elif len(value) == 0: self._warn("Undefined variable value", fname, lineno) continue # Handle the rest of the types. if type == 'local': # Local file-scoped parser options. self._say("\tSet parser option " + var + " = " + value) local_options[var] = value elif type == 'global': # 'Global' variables self._say("\tSet global " + var + " = " + value) if value == '<undef>': try: del(self._gvars[var]) except: self._warn("Failed to delete missing global variable", fname, lineno) else: self._gvars[var] = value # Handle flipping debug and depth vars. if var == 'debug': if value.lower() == 'true': value = True else: value = False self._debug = value elif var == 'depth': try: self._depth = int(value) except: self._warn("Failed to set 'depth' because the value isn't a number!", fname, lineno) elif var == 'strict': if value.lower() == 'true': self._strict = True else: self._strict = False elif type == 'var': # Bot variables self._say("\tSet bot variable " + var + " = " + value) if value == '<undef>': try: del(self._bvars[var]) except: self._warn("Failed to delete missing bot variable", fname, lineno) else: self._bvars[var] = value elif type == 'array': # Arrays self._say("\tArray " + var + " = " + value) if value == '<undef>': try: del(self._arrays[var]) except: self._warn("Failed to delete missing array", fname, lineno) continue # Did this have multiple parts? parts = value.split("<crlf>") # Process each line of array data. fields = [] for val in parts: if '|' in val: fields.extend(val.split('|')) else: fields.extend(re.split(RE.ws, val)) # Convert any remaining '\s' escape codes into spaces. for f in fields: f = f.replace('\s', ' ') self._arrays[var] = fields elif type == 'sub': # Substitutions self._say("\tSubstitution " + var + " => " + value) if value == '<undef>': try: del(self._subs[var]) except: self._warn("Failed to delete missing substitution", fname, lineno) else: self._subs[var] = value # Precompile the regexp. self._precompile_substitution("subs", var) elif type == 'person': # Person Substitutions self._say("\tPerson Substitution " + var + " => " + value) if value == '<undef>': try: del(self._person[var]) except: self._warn("Failed to delete missing person substitution", fname, lineno) else: self._person[var] = value # Precompile the regexp. self._precompile_substitution("person", var) else: self._warn("Unknown definition type '" + type + "'", fname, lineno) elif cmd == '>': # > LABEL temp = re.split(RE.ws, line) type = temp[0] name = '' fields = [] if len(temp) >= 2: name = temp[1] if len(temp) >= 3: fields = temp[2:] # Handle the label types. if type == 'begin': # The BEGIN block. self._say("\tFound the BEGIN block.") type = 'topic' name = '__begin__' if type == 'topic': # Starting a new topic. self._say("\tSet topic to " + name) ontrig = '' topic = name # Does this topic include or inherit another one? mode = '' # or 'inherits' or 'includes' if len(fields) >= 2: for field in fields: if field == 'includes': mode = 'includes' elif field == 'inherits': mode = 'inherits' elif mode != '': # This topic is either inherited or included. if mode == 'includes': if name not in self._includes: self._includes[name] = {} self._includes[name][field] = 1 else: if name not in self._lineage: self._lineage[name] = {} self._lineage[name][field] = 1 elif type == 'object': # If a field was provided, it should be the programming # language. lang = None if len(fields) > 0: lang = fields[0].lower() # Only try to parse a language we support. ontrig = '' if lang is None: self._warn("Trying to parse unknown programming language", fname, lineno) lang = 'python' # Assume it's Python. # See if we have a defined handler for this language. if lang in self._handlers: # We have a handler, so start loading the code. objname = name objlang = lang objbuf = [] inobj = True else: # We don't have a handler, just ignore it. objname = '' objlang = '' objbuf = [] inobj = True else: self._warn("Unknown label type '" + type + "'", fname, lineno) elif cmd == '<': # < LABEL type = line if type == 'begin' or type == 'topic': self._say("\tEnd topic label.") topic = 'random' elif type == 'object': self._say("\tEnd object label.") inobj = False elif cmd == '+': # + TRIGGER self._say("\tTrigger pattern: " + line) if len(isThat): self._initTT('thats', topic, isThat, line) self._initTT('syntax', topic, line, 'thats') self._syntax['thats'][topic][line]['trigger'] = (fname, lineno) else: self._initTT('topics', topic, line) self._initTT('syntax', topic, line, 'topic') self._syntax['topic'][topic][line]['trigger'] = (fname, lineno) ontrig = line repcnt = 0 concnt = 0 # Pre-compile the trigger's regexp if possible. self._precompile_regexp(ontrig) elif cmd == '-': # - REPLY if ontrig == '': self._warn("Response found before trigger", fname, lineno) continue self._say("\tResponse: " + line) if len(isThat): self._thats[topic][isThat][ontrig]['reply'][repcnt] = line self._syntax['thats'][topic][ontrig]['reply'][repcnt] = (fname, lineno) else: self._topics[topic][ontrig]['reply'][repcnt] = line self._syntax['topic'][topic][ontrig]['reply'][repcnt] = (fname, lineno) repcnt += 1 elif cmd == '%': # % PREVIOUS pass # This was handled above. elif cmd == '^': # ^ CONTINUE pass # This was handled above. elif cmd == '@': # @ REDIRECT self._say("\tRedirect response to " + line) if len(isThat): self._thats[topic][isThat][ontrig]['redirect'] = line self._syntax['thats'][topic][ontrig]['redirect'] = (fname, lineno) else: self._topics[topic][ontrig]['redirect'] = line self._syntax['topic'][topic][ontrig]['redirect'] = (fname, lineno) elif cmd == '*': # * CONDITION self._say("\tAdding condition: " + line) if len(isThat): self._thats[topic][isThat][ontrig]['condition'][concnt] = line self._syntax['thats'][topic][ontrig]['condition'][concnt] = (fname, lineno) else: self._topics[topic][ontrig]['condition'][concnt] = line self._syntax['topic'][topic][ontrig]['condition'][concnt] = (fname, lineno) concnt += 1 else: self._warn("Unrecognized command \"" + cmd + "\"", fname, lineno) continue def check_syntax(self, cmd, line): """Syntax check a RiveScript command and line. Returns a syntax error string on error; None otherwise.""" # Run syntax checks based on the type of command. if cmd == '!': # ! Definition # - Must be formatted like this: # ! type name = value # OR # ! type = value match = re.match(RE.def_syntax, line) if not match: return "Invalid format for !Definition line: must be '! type name = value' OR '! type = value'" elif cmd == '>': # > Label # - The "begin" label must have only one argument ("begin") # - "topic" labels must be lowercased but can inherit other topics (a-z0-9_\s) # - "object" labels must follow the same rules as "topic", but don't need to be lowercase parts = re.split(" ", line, 2) if parts[0] == "begin" and len(parts) > 1: return "The 'begin' label takes no additional arguments, should be verbatim '> begin'" elif parts[0] == "topic": match = re.match(RE.name_syntax, line) if match: return "Topics should be lowercased and contain only numbers and letters" elif parts[0] == "object": match = re.match(RE.name_syntax, line) if match: return "Objects can only contain numbers and letters" elif cmd == '+' or cmd == '%' or cmd == '@': # + Trigger, % Previous, @ Redirect # This one is strict. The triggers are to be run through the regexp engine, # therefore it should be acceptable for the regexp engine. # - Entirely lowercase # - No symbols except: ( | ) [ ] * _ # @ { } < > = # - All brackets should be matched parens = 0 # Open parenthesis square = 0 # Open square brackets curly = 0 # Open curly brackets angle = 0 # Open angled brackets # Count brackets. for char in line: if char == '(': parens += 1 elif char == ')': parens -= 1 elif char == '[': square += 1 elif char == ']': square -= 1 elif char == '{': curly += 1 elif char == '}': curly -= 1 elif char == '<': angle += 1 elif char == '>': angle -= 1 # Any mismatches? if parens != 0: return "Unmatched parenthesis brackets" elif square != 0: return "Unmatched square brackets" elif curly != 0: return "Unmatched curly brackets" elif angle != 0: return "Unmatched angle brackets" # In UTF-8 mode, most symbols are allowed. if self._utf8: match = re.match(RE.utf8_trig, line) if match: return "Triggers can't contain uppercase letters, backslashes or dots in UTF-8 mode." else: match = re.match(RE.trig_syntax, line) if match: return "Triggers may only contain lowercase letters, numbers, and these symbols: ( | ) [ ] * _ # @ { } < > =" elif cmd == '-' or cmd == '^' or cmd == '/': # - Trigger, ^ Continue, / Comment # These commands take verbatim arguments, so their syntax is loose. pass elif cmd == '*': # * Condition # Syntax for a conditional is as follows: # * value symbol value => response match = re.match(RE.cond_syntax, line) if not match: return "Invalid format for !Condition: should be like '* value symbol value => response'" return None def deparse(self): """Return the in-memory RiveScript document as a Python data structure. This would be useful for developing a user interface for editing RiveScript replies without having to edit the RiveScript code manually.""" # Data to return. result = { "begin": { "global": {}, "var": {}, "sub": {}, "person": {}, "array": {}, "triggers": {}, "that": {}, }, "topic": {}, "that": {}, "inherit": {}, "include": {}, } # Populate the config fields. if self._debug: result["begin"]["global"]["debug"] = self._debug if self._depth != 50: result["begin"]["global"]["depth"] = 50 # Definitions result["begin"]["var"] = self._bvars.copy() result["begin"]["sub"] = self._subs.copy() result["begin"]["person"] = self._person.copy() result["begin"]["array"] = self._arrays.copy() result["begin"]["global"].update(self._gvars.copy()) # Topic Triggers. for topic in self._topics: dest = {} # Where to place the topic info if topic == "__begin__": # Begin block. dest = result["begin"]["triggers"] else: # Normal topic. if topic not in result["topic"]: result["topic"][topic] = {} dest = result["topic"][topic] # Copy the triggers. for trig, data in self._topics[topic].iteritems(): dest[trig] = self._copy_trigger(trig, data) # %Previous's. for topic in self._thats: dest = {} # Where to place the topic info if topic == "__begin__": # Begin block. dest = result["begin"]["that"] else: # Normal topic. if topic not in result["that"]: result["that"][topic] = {} dest = result["that"][topic] # The "that" structure is backwards: bot reply, then trigger, then info. for previous, pdata in self._thats[topic].iteritems(): for trig, data in pdata.iteritems(): dest[trig] = self._copy_trigger(trig, data, previous) # Inherits/Includes. for topic, data in self._lineage.iteritems(): result["inherit"][topic] = [] for inherit in data: result["inherit"][topic].append(inherit) for topic, data in self._includes.iteritems(): result["include"][topic] = [] for include in data: result["include"][topic].append(include) return result def write(self, fh, deparsed=None): """Write the currently parsed RiveScript data into a file. Pass either a file name (string) or a file handle object. This uses `deparse()` to dump a representation of the loaded data and writes it to the destination file. If you provide your own data as the `deparsed` argument, it will use that data instead of calling `deparse()` itself. This way you can use `deparse()`, edit the data, and use that to write the RiveScript document (for example, to be used by a user interface for editing RiveScript without writing the code directly).""" # Passed a string instead of a file handle? if type(fh) is str: fh = codecs.open(fh, "w", "utf-8") # Deparse the loaded data. if deparsed is None: deparsed = self.deparse() # Start at the beginning. fh.write("// Written by rivescript.deparse()\n") fh.write("! version = 2.0\n\n") # Variables of all sorts! for kind in ["global", "var", "sub", "person", "array"]: if len(deparsed["begin"][kind].keys()) == 0: continue for var in sorted(deparsed["begin"][kind].keys()): # Array types need to be separated by either spaces or pipes. data = deparsed["begin"][kind][var] if type(data) not in [str, text_type]: needs_pipes = False for test in data: if " " in test: needs_pipes = True break # Word-wrap the result, target width is 78 chars minus the # kind, var, and spaces and equals sign. width = 78 - len(kind) - len(var) - 4 if needs_pipes: data = self._write_wrapped("|".join(data), sep="|") else: data = " ".join(data) fh.write("! {kind} {var} = {data}\n".format( kind=kind, var=var, data=data, )) fh.write("\n") # Begin block. if len(deparsed["begin"]["triggers"].keys()): fh.write("> begin\n\n") self._write_triggers(fh, deparsed["begin"]["triggers"], indent="\t") fh.write("< begin\n\n") # The topics. Random first! topics = ["random"] topics.extend(sorted(deparsed["topic"].keys())) done_random = False for topic in topics: if topic not in deparsed["topic"]: continue if topic == "random" and done_random: continue if topic == "random": done_random = True tagged = False # Used > topic tag if topic != "random" or topic in deparsed["include"] or topic in deparsed["inherit"]: tagged = True fh.write("> topic " + topic) if topic in deparsed["inherit"]: fh.write(" inherits " + " ".join(deparsed["inherit"][topic])) if topic in deparsed["include"]: fh.write(" includes " + " ".join(deparsed["include"][topic])) fh.write("\n\n") indent = "\t" if tagged else "" self._write_triggers(fh, deparsed["topic"][topic], indent=indent) # Any %Previous's? if topic in deparsed["that"]: self._write_triggers(fh, deparsed["that"][topic], indent=indent) if tagged: fh.write("< topic\n\n") return True def _copy_trigger(self, trig, data, previous=None): """Make copies of all data below a trigger.""" # Copied data. dest = {} if previous: dest["previous"] = previous if "redirect" in data and data["redirect"]: # @Redirect dest["redirect"] = data["redirect"] if "condition" in data and len(data["condition"].keys()): # *Condition dest["condition"] = [] for i in sorted(data["condition"].keys()): dest["condition"].append(data["condition"][i]) if "reply" in data and len(data["reply"].keys()): # -Reply dest["reply"] = [] for i in sorted(data["reply"].keys()): dest["reply"].append(data["reply"][i]) return dest def _write_triggers(self, fh, triggers, indent=""): """Write triggers to a file handle.""" for trig in sorted(triggers.keys()): fh.write(indent + "+ " + self._write_wrapped(trig, indent=indent) + "\n") d = triggers[trig] if "previous" in d: fh.write(indent + "% " + self._write_wrapped(d["previous"], indent=indent) + "\n") if "condition" in d: for cond in d["condition"]: fh.write(indent + "* " + self._write_wrapped(cond, indent=indent) + "\n") if "redirect" in d: fh.write(indent + "@ " + self._write_wrapped(d["redirect"], indent=indent) + "\n") if "reply" in d: for reply in d["reply"]: fh.write(indent + "- " + self._write_wrapped(reply, indent=indent) + "\n") fh.write("\n") def _write_wrapped(self, line, sep=" ", indent="", width=78): """Word-wrap a line of RiveScript code for being written to a file.""" words = line.split(sep) lines = [] line = "" buf = [] while len(words): buf.append(words.pop(0)) line = sep.join(buf) if len(line) > width: # Need to word wrap! words.insert(0, buf.pop()) # Undo lines.append(sep.join(buf)) buf = [] line = "" # Straggler? if line: lines.append(line) # Returned output result = lines.pop(0) if len(lines): eol = "" if sep == " ": eol = "\s" for item in lines: result += eol + "\n" + indent + "^ " + item return result def _initTT(self, toplevel, topic, trigger, what=''): """Initialize a Topic Tree data structure.""" if toplevel == 'topics': if topic not in self._topics: self._topics[topic] = {} if trigger not in self._topics[topic]: self._topics[topic][trigger] = {} self._topics[topic][trigger]['reply'] = {} self._topics[topic][trigger]['condition'] = {} self._topics[topic][trigger]['redirect'] = None elif toplevel == 'thats': if topic not in self._thats: self._thats[topic] = {} if trigger not in self._thats[topic]: self._thats[topic][trigger] = {} if what not in self._thats[topic][trigger]: self._thats[topic][trigger][what] = {} self._thats[topic][trigger][what]['reply'] = {} self._thats[topic][trigger][what]['condition'] = {} self._thats[topic][trigger][what]['redirect'] = {} elif toplevel == 'syntax': if what not in self._syntax: self._syntax[what] = {} if topic not in self._syntax[what]: self._syntax[what][topic] = {} if trigger not in self._syntax[what][topic]: self._syntax[what][topic][trigger] = {} self._syntax[what][topic][trigger]['reply'] = {} self._syntax[what][topic][trigger]['condition'] = {} self._syntax[what][topic][trigger]['redirect'] = {} ############################################################################ # Sorting Methods # ############################################################################ def sort_replies(self, thats=False): """Sort the loaded triggers.""" # This method can sort both triggers and that's. triglvl = None sortlvl = None if thats: triglvl = self._thats sortlvl = 'thats' else: triglvl = self._topics sortlvl = 'topics' # (Re)Initialize the sort cache. self._sorted[sortlvl] = {} self._say("Sorting triggers...") # Loop through all the topics. for topic in triglvl: self._say("Analyzing topic " + topic) # Collect a list of all the triggers we're going to need to worry # about. If this topic inherits another topic, we need to # recursively add those to the list. alltrig = self._topic_triggers(topic, triglvl) # Keep in mind here that there is a difference between 'includes' # and 'inherits' -- topics that inherit other topics are able to # OVERRIDE triggers that appear in the inherited topic. This means # that if the top topic has a trigger of simply '*', then *NO* # triggers are capable of matching in ANY inherited topic, because # even though * has the lowest sorting priority, it has an automatic # priority over all inherited topics. # # The _topic_triggers method takes this into account. All topics # that inherit other topics will have their triggers prefixed with # a fictional {inherits} tag, which would start at {inherits=0} and # increment if the topic tree has other inheriting topics. So we can # use this tag to make sure topics that inherit things will have # their triggers always be on top of the stack, from inherits=0 to # inherits=n. # Sort these triggers. running = self._sort_trigger_set(alltrig) # Save this topic's sorted list. if sortlvl not in self._sorted: self._sorted[sortlvl] = {} self._sorted[sortlvl][topic] = running # And do it all again for %Previous! if not thats: # This will sort the %Previous lines to best match the bot's last reply. self.sort_replies(True) # If any of those %Previous's had more than one +trigger for them, # this will sort all those +triggers to pair back the best human # interaction. self._sort_that_triggers() # Also sort both kinds of substitutions. self._sort_list('subs', self._subs) self._sort_list('person', self._person) def _sort_that_triggers(self): """Make a sorted list of triggers that correspond to %Previous groups.""" self._say("Sorting reverse triggers for %Previous groups...") if "that_trig" not in self._sorted: self._sorted["that_trig"] = {} for topic in self._thats: if topic not in self._sorted["that_trig"]: self._sorted["that_trig"][topic] = {} for bottrig in self._thats[topic]: if bottrig not in self._sorted["that_trig"][topic]: self._sorted["that_trig"][topic][bottrig] = [] triggers = self._sort_trigger_set(self._thats[topic][bottrig].keys()) self._sorted["that_trig"][topic][bottrig] = triggers def _sort_trigger_set(self, triggers): """Sort a group of triggers in optimal sorting order.""" # Create a priority map. prior = { 0: [] # Default priority=0 } for trig in triggers: match, weight = re.search(RE.weight, trig), 0 if match: weight = int(match.group(1)) if weight not in prior: prior[weight] = [] prior[weight].append(trig) # Keep a running list of sorted triggers for this topic. running = [] # Sort them by priority. for p in sorted(prior.keys(), reverse=True): self._say("\tSorting triggers with priority " + str(p)) # So, some of these triggers may include {inherits} tags, if they # came form a topic which inherits another topic. Lower inherits # values mean higher priority on the stack. inherits = -1 # -1 means no {inherits} tag highest_inherits = -1 # highest inheritance number seen # Loop through and categorize these triggers. track = { inherits: self._init_sort_track() } for trig in prior[p]: self._say("\t\tLooking at trigger: " + trig) # See if it has an inherits tag. match = re.search(RE.inherit, trig) if match: inherits = int(match.group(1)) if inherits > highest_inherits: highest_inherits = inherits self._say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherits)) trig = re.sub(RE.inherit, "", trig) else: inherits = -1 # If this is the first time we've seen this inheritance level, # initialize its track structure. if inherits not in track: track[inherits] = self._init_sort_track() # Start inspecting the trigger's contents. if '_' in trig: # Alphabetic wildcard included. cnt = self._word_count(trig) self._say("\t\t\tHas a _ wildcard with " + str(cnt) + " words.") if cnt > 1: if cnt not in track[inherits]['alpha']: track[inherits]['alpha'][cnt] = [] track[inherits]['alpha'][cnt].append(trig) else: track[inherits]['under'].append(trig) elif '#' in trig: # Numeric wildcard included. cnt = self._word_count(trig) self._say("\t\t\tHas a # wildcard with " + str(cnt) + " words.") if cnt > 1: if cnt not in track[inherits]['number']: track[inherits]['number'][cnt] = [] track[inherits]['number'][cnt].append(trig) else: track[inherits]['pound'].append(trig) elif '*' in trig: # Wildcard included. cnt = self._word_count(trig) self._say("\t\t\tHas a * wildcard with " + str(cnt) + " words.") if cnt > 1: if cnt not in track[inherits]['wild']: track[inherits]['wild'][cnt] = [] track[inherits]['wild'][cnt].append(trig) else: track[inherits]['star'].append(trig) elif '[' in trig: # Optionals included. cnt = self._word_count(trig) self._say("\t\t\tHas optionals and " + str(cnt) + " words.") if cnt not in track[inherits]['option']: track[inherits]['option'][cnt] = [] track[inherits]['option'][cnt].append(trig) else: # Totally atomic. cnt = self._word_count(trig) self._say("\t\t\tTotally atomic and " + str(cnt) + " words.") if cnt not in track[inherits]['atomic']: track[inherits]['atomic'][cnt] = [] track[inherits]['atomic'][cnt].append(trig) # Move the no-{inherits} triggers to the bottom of the stack. track[highest_inherits + 1] = track[-1] del(track[-1]) # Add this group to the sort list. for ip in sorted(track.keys()): self._say("ip=" + str(ip)) for kind in ['atomic', 'option', 'alpha', 'number', 'wild']: for wordcnt in sorted(track[ip][kind], reverse=True): # Triggers with a matching word count should be sorted # by length, descending. running.extend(sorted(track[ip][kind][wordcnt], key=len, reverse=True)) running.extend(sorted(track[ip]['under'], key=len, reverse=True)) running.extend(sorted(track[ip]['pound'], key=len, reverse=True)) running.extend(sorted(track[ip]['star'], key=len, reverse=True)) return running def _sort_list(self, name, items): """Sort a simple list by number of words and length.""" def by_length(word1, word2): return len(word2) - len(word1) # Initialize the list sort buffer. if "lists" not in self._sorted: self._sorted["lists"] = {} self._sorted["lists"][name] = [] # Track by number of words. track = {} # Loop through each item. for item in items: # Count the words. cword = self._word_count(item, all=True) if cword not in track: track[cword] = [] track[cword].append(item) # Sort them. output = [] for count in sorted(track.keys(), reverse=True): sort = sorted(track[count], key=len, reverse=True) output.extend(sort) self._sorted["lists"][name] = output def _init_sort_track(self): """Returns a new dict for keeping track of triggers for sorting.""" return { 'atomic': {}, # Sort by number of whole words 'option': {}, # Sort optionals by number of words 'alpha': {}, # Sort alpha wildcards by no. of words 'number': {}, # Sort number wildcards by no. of words 'wild': {}, # Sort wildcards by no. of words 'pound': [], # Triggers of just # 'under': [], # Triggers of just _ 'star': [] # Triggers of just * } ############################################################################ # Public Configuration Methods # ############################################################################ def set_handler(self, language, obj): """Define a custom language handler for RiveScript objects. language: The lowercased name of the programming language, e.g. python, javascript, perl obj: An instance of a class object that provides the following interface: class MyObjectHandler: def __init__(self): pass def load(self, name, code): # name = the name of the object from the RiveScript code # code = the source code of the object def call(self, rs, name, fields): # rs = the current RiveScript interpreter object # name = the name of the object being called # fields = array of arguments passed to the object return reply Pass in a None value for the object to delete an existing handler (for example, to prevent Python code from being able to be run by default). Look in the `eg` folder of the rivescript-python distribution for an example script that sets up a JavaScript language handler.""" # Allow them to delete a handler too. if obj is None: if language in self._handlers: del self._handlers[language] else: self._handlers[language] = obj def set_subroutine(self, name, code): """Define a Python object from your program. This is equivalent to having an object defined in the RiveScript code, except your Python code is defining it instead. `name` is the name of the object, and `code` is a Python function (a `def`) that accepts rs,args as its parameters. This method is only available if there is a Python handler set up (which there is by default, unless you've called set_handler("python", None)).""" # Do we have a Python handler? if 'python' in self._handlers: self._handlers['python']._objects[name] = code self._objlangs[name] = 'python' else: self._warn("Can't set_subroutine: no Python object handler!") def set_global(self, name, value): """Set a global variable. Equivalent to `! global` in RiveScript code. Set to None to delete.""" if value is None: # Unset the variable. if name in self._gvars: del self._gvars[name] self._gvars[name] = value def set_variable(self, name, value): """Set a bot variable. Equivalent to `! var` in RiveScript code. Set to None to delete.""" if value is None: # Unset the variable. if name in self._bvars: del self._bvars[name] self._bvars[name] = value def set_substitution(self, what, rep): """Set a substitution. Equivalent to `! sub` in RiveScript code. Set to None to delete.""" if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] self._subs[what] = rep def set_person(self, what, rep): """Set a person substitution. Equivalent to `! person` in RiveScript code. Set to None to delete.""" if rep is None: # Unset the variable. if what in self._person: del self._person[what] self._person[what] = rep def set_uservar(self, user, name, value): """Set a variable for a user.""" if user not in self._users: self._users[user] = {"topic": "random"} self._users[user][name] = value def get_uservar(self, user, name): """Get a variable about a user. If the user has no data at all, returns None. If the user doesn't have a value set for the variable you want, returns the string 'undefined'.""" if user in self._users: if name in self._users[user]: return self._users[user][name] else: return "undefined" else: return None def get_uservars(self, user=None): """Get all variables about a user (or all users). If no username is passed, returns the entire user database structure. Otherwise, only returns the variables for the given user, or None if none exist.""" if user is None: # All the users! return self._users elif user in self._users: # Just this one! return self._users[user] else: # No info. return None def clear_uservars(self, user=None): """Delete all variables about a user (or all users). If no username is passed, deletes all variables about all users. Otherwise, only deletes all variables for the given user.""" if user is None: # All the users! self._users = {} elif user in self._users: # Just this one. self._users[user] = {} def freeze_uservars(self, user): """Freeze the variable state for a user. This will clone and preserve a user's entire variable state, so that it can be restored later with `thaw_uservars`.""" if user in self._users: # Clone the user's data. self._freeze[user] = copy.deepcopy(self._users[user]) else: self._warn("Can't freeze vars for user " + user + ": not found!") def thaw_uservars(self, user, action="thaw"): """Thaw a user's frozen variables. The `action` can be one of the following options: discard: Don't restore the user's variables, just delete the frozen copy. keep: Keep the frozen copy after restoring the variables. thaw: Restore the variables, then delete the frozen copy (default).""" if user in self._freeze: # What are we doing? if action == "thaw": # Thawing them out. self.clear_uservars(user) self._users[user] = copy.deepcopy(self._freeze[user]) del self._freeze[user] elif action == "discard": # Just discard the frozen copy. del self._freeze[user] elif action == "keep": # Keep the frozen copy afterward. self.clear_uservars(user) self._users[user] = copy.deepcopy(self._freeze[user]) else: self._warn("Unsupported thaw action") else: self._warn("Can't thaw vars for user " + user + ": not found!") def last_match(self, user): """Get the last trigger matched for the user. This will return the raw trigger text that the user's last message matched. If there was no match, this will return None.""" return self.get_uservar(user, "__lastmatch__") def trigger_info(self, trigger=None, dump=False): """Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via last_match(). Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns None if there weren't any matches found. The keys in the trigger info is as follows: * category: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * topic: The topic name * trigger: The raw trigger text * filename: The filename the trigger was found in. * lineno: The line number the trigger was found on. Pass in a true value for `dump`, and the entire syntax tracking tree is returned.""" if dump: return self._syntax response = None # Search the syntax tree for the trigger. for category in self._syntax: for topic in self._syntax[category]: if trigger in self._syntax[category][topic]: # We got a match! if response is None: response = list() fname, lineno = self._syntax[category][topic][trigger]['trigger'] response.append(dict( category=category, topic=topic, trigger=trigger, filename=fname, line=lineno, )) return response def current_user(self): """Retrieve the user ID of the current user talking to your bot. This is mostly useful inside of a Python object macro to get the user ID of the person who caused the object macro to be invoked (i.e. to set a variable for that user from within the object). This will return None if used outside of the context of getting a reply (i.e. the value is unset at the end of the `reply()` method).""" if self._current_user is None: # They're doing it wrong. self._warn("current_user() is meant to be used from within a Python object macro!") return self._current_user ############################################################################ # Reply Fetching Methods # ############################################################################ def reply(self, user, msg, errors_as_replies=True): """Fetch a reply from the RiveScript brain.""" self._say("Get reply to [" + user + "] " + msg) # Store the current user in case an object macro needs it. self._current_user = user # Format their message. msg = self._format_message(msg) reply = '' # If the BEGIN block exists, consult it first. if "__begin__" in self._topics: begin = self._getreply(user, 'request', context='begin', ignore_object_errors=errors_as_replies) # Okay to continue? if '{ok}' in begin: try: reply = self._getreply(user, msg, ignore_object_errors=errors_as_replies) except RiveScriptError as e: if not errors_as_replies: raise reply = e.error_message begin = begin.replace('{ok}', reply) reply = begin # Run more tag substitutions. reply = self._process_tags(user, msg, reply, ignore_object_errors=errors_as_replies) else: # Just continue then. try: reply = self._getreply(user, msg, ignore_object_errors=errors_as_replies) except RiveScriptError as e: if not errors_as_replies: raise reply = e.error_message # Save their reply history. oldInput = self._users[user]['__history__']['input'][:8] self._users[user]['__history__']['input'] = [msg] self._users[user]['__history__']['input'].extend(oldInput) oldReply = self._users[user]['__history__']['reply'][:8] self._users[user]['__history__']['reply'] = [reply] self._users[user]['__history__']['reply'].extend(oldReply) # Unset the current user. self._current_user = None return reply def _format_message(self, msg, botreply=False): """Format a user's message for safe processing.""" # Make sure the string is Unicode for Python 2. if sys.version_info[0] < 3 and isinstance(msg, str): msg = msg.decode('utf8') # Lowercase it. msg = msg.lower() # Run substitutions on it. msg = self._substitute(msg, "subs") # In UTF-8 mode, only strip metacharacters and HTML brackets # (to protect from obvious XSS attacks). if self._utf8: msg = re.sub(RE.utf8_meta, '', msg) msg = re.sub(self.unicode_punctuation, '', msg) # For the bot's reply, also strip common punctuation. if botreply: msg = re.sub(RE.utf8_punct, '', msg) else: # For everything else, strip all non-alphanumerics. msg = self._strip_nasties(msg) return msg def _getreply(self, user, msg, context='normal', step=0, ignore_object_errors=True): # Needed to sort replies? if 'topics' not in self._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Initialize the user's profile? if user not in self._users: self._users[user] = {'topic': 'random'} # Collect data on the user. topic = self._users[user]['topic'] stars = [] thatstars = [] # For %Previous's. reply = '' # Avoid letting them fall into a missing topic. if topic not in self._topics: self._warn("User " + user + " was in an empty topic named '" + topic + "'") topic = self._users[user]['topic'] = 'random' # Avoid deep recursion. if step > self._depth: raise DeepRecursionError # Are we in the BEGIN statement? if context == 'begin': topic = '__begin__' # Initialize this user's history. if '__history__' not in self._users[user]: self._users[user]['__history__'] = { 'input': [ 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined' ], 'reply': [ 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined' ] } # More topic sanity checking. if topic not in self._topics: # This was handled before, which would mean topic=random and # it doesn't exist. Serious issue! raise NoDefaultRandomTopicError("no default topic 'random' was found") # Create a pointer for the matched data when we find it. matched = None matchedTrigger = None foundMatch = False # See if there were any %Previous's in this topic, or any topic related # to it. This should only be done the first time -- not during a # recursive redirection. This is because in a redirection, "lastreply" # is still gonna be the same as it was the first time, causing an # infinite loop! if step == 0: allTopics = [topic] if topic in self._includes or topic in self._lineage: # Get all the topics! allTopics = self._get_topic_tree(topic) # Scan them all! for top in allTopics: self._say("Checking topic " + top + " for any %Previous's.") if top in self._sorted["thats"]: self._say("There is a %Previous in this topic!") # Do we have history yet? lastReply = self._users[user]["__history__"]["reply"][0] # Format the bot's last reply the same way as the human's. lastReply = self._format_message(lastReply, botreply=True) self._say("lastReply: " + lastReply) # See if it's a match. for trig in self._sorted["thats"][top]: botside = self._reply_regexp(user, trig) self._say("Try to match lastReply (" + lastReply + ") to " + trig) # Match?? match = re.match(botside, lastReply) if match: # Huzzah! See if OUR message is right too. self._say("Bot side matched!") thatstars = match.groups() for subtrig in self._sorted["that_trig"][top][trig]: humanside = self._reply_regexp(user, subtrig) self._say("Now try to match " + msg + " to " + subtrig) match = re.match(humanside, msg) if match: self._say("Found a match!") matched = self._thats[top][trig][subtrig] matchedTrigger = subtrig foundMatch = True # Get the stars! stars = match.groups() break # Break if we found a match. if foundMatch: break # Break if we found a match. if foundMatch: break # Search their topic for a match to their trigger. if not foundMatch: for trig in self._sorted["topics"][topic]: # Process the triggers. regexp = self._reply_regexp(user, trig) self._say("Try to match %r against %r (%r)" % (msg, trig, regexp)) # Python's regular expression engine is slow. Try a verbatim # match if this is an atomic trigger. isAtomic = self._is_atomic(trig) isMatch = False if isAtomic: # Only look for exact matches, no sense running atomic triggers # through the regexp engine. if msg == trig: isMatch = True else: # Non-atomic triggers always need the regexp. match = re.match(regexp, msg) if match: # The regexp matched! isMatch = True # Collect the stars. stars = match.groups() if isMatch: self._say("Found a match!") # We found a match, but what if the trigger we've matched # doesn't belong to our topic? Find it! if trig not in self._topics[topic]: # We have to find it. matched = self._find_trigger_by_inheritance(topic, trig) else: # We do have it! matched = self._topics[topic][trig] foundMatch = True matchedTrigger = trig break # Store what trigger they matched on. If their matched trigger is None, # this will be too, which is great. self._users[user]["__lastmatch__"] = matchedTrigger if matched: for nil in [1]: # See if there are any hard redirects. if matched["redirect"]: self._say("Redirecting us to " + matched["redirect"]) redirect = self._process_tags(user, msg, matched["redirect"], stars, thatstars, step, ignore_object_errors) self._say("Pretend user said: " + redirect) reply = self._getreply(user, redirect, step=(step + 1), ignore_object_errors=ignore_object_errors) break # Check the conditionals. for con in sorted(matched["condition"]): halves = re.split(RE.cond_split, matched["condition"][con]) if halves and len(halves) == 2: condition = re.match(RE.cond_parse, halves[0]) if condition: left = condition.group(1) eq = condition.group(2) right = condition.group(3) potreply = halves[1] self._say("Left: " + left + "; eq: " + eq + "; right: " + right + " => " + potreply) # Process tags all around. left = self._process_tags(user, msg, left, stars, thatstars, step, ignore_object_errors) right = self._process_tags(user, msg, right, stars, thatstars, step, ignore_object_errors) # Defaults? if len(left) == 0: left = 'undefined' if len(right) == 0: right = 'undefined' self._say("Check if " + left + " " + eq + " " + right) # Validate it. passed = False if eq == 'eq' or eq == '==': if left == right: passed = True elif eq == 'ne' or eq == '!=' or eq == '<>': if left != right: passed = True else: # Gasp, dealing with numbers here... try: left, right = int(left), int(right) if eq == '<': if left < right: passed = True elif eq == '<=': if left <= right: passed = True elif eq == '>': if left > right: passed = True elif eq == '>=': if left >= right: passed = True except: self._warn("Failed to evaluate numeric condition!") # How truthful? if passed: reply = potreply break # Have our reply yet? if len(reply) > 0: break # Process weights in the replies. bucket = [] for rep in sorted(matched["reply"]): text = matched["reply"][rep] weight = 1 match = re.match(RE.weight, text) if match: weight = int(match.group(1)) if weight <= 0: self._warn("Can't have a weight <= 0!") weight = 1 for i in range(0, weight): bucket.append(text) # Get a random reply. reply = random.choice(bucket) break # Still no reply? if not foundMatch: raise NoMatchError elif len(reply) == 0: raise NoReplyError self._say("Reply: " + reply) # Process tags for the BEGIN block. if context == "begin": # BEGIN blocks can only set topics and uservars. The rest happen # later! reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self._say("Setting user's topic to " + match) self._users[user]["topic"] = match reply = reply.replace('{{topic={match}}}'.format(match=match), '') reSet = re.findall(RE.set_tag, reply) for match in reSet: self._say("Set uservar " + str(match[0]) + "=" + str(match[1])) self._users[user][match[0]] = match[1] reply = reply.replace('<set {key}={value}>'.format(key=match[0], value=match[1]), '') else: # Process more tags if not in BEGIN. reply = self._process_tags(user, msg, reply, stars, thatstars, step, ignore_object_errors) return reply def _substitute(self, msg, kind): """Run a kind of substitution on a message.""" # Safety checking. if 'lists' not in self._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") if kind not in self._sorted["lists"]: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Get the substitution map. subs = None if kind == 'subs': subs = self._subs else: subs = self._person # Make placeholders each time we substitute something. ph = [] i = 0 for pattern in self._sorted["lists"][kind]: result = subs[pattern] # Make a placeholder. ph.append(result) placeholder = "\x00%d\x00" % i i += 1 cache = self._regexc[kind][pattern] msg = re.sub(cache["sub1"], placeholder, msg) msg = re.sub(cache["sub2"], placeholder + r'\1', msg) msg = re.sub(cache["sub3"], r'\1' + placeholder + r'\2', msg) msg = re.sub(cache["sub4"], r'\1' + placeholder, msg) placeholders = re.findall(RE.placeholder, msg) for match in placeholders: i = int(match) result = ph[i] msg = msg.replace('\x00' + match + '\x00', result) # Strip & return. return msg.strip() def _precompile_substitution(self, kind, pattern): """Pre-compile the regexp for a substitution pattern. This will speed up the substitutions that happen at the beginning of the reply fetching process. With the default brain, this took the time for _substitute down from 0.08s to 0.02s""" if pattern not in self._regexc[kind]: qm = re.escape(pattern) self._regexc[kind][pattern] = { "qm": qm, "sub1": re.compile(r'^' + qm + r'$'), "sub2": re.compile(r'^' + qm + r'(\W+)'), "sub3": re.compile(r'(\W+)' + qm + r'(\W+)'), "sub4": re.compile(r'(\W+)' + qm + r'$'), } def _do_expand_array(self, array_name, depth=0): """ Do recurrent array expansion, returning a set of keywords. Exception is thrown when there are cyclical dependencies between arrays or if the @array name references an undefined array.""" if depth > self._depth: raise Exception("deep recursion detected") if not array_name in self._arrays: raise Exception("array '%s' not defined" % (array_name)) ret = list(self._arrays[array_name]) for array in self._arrays[array_name]: if array.startswith('@'): ret.remove(array) expanded = self._do_expand_array(array[1:], depth+1) ret.extend(expanded) return set(ret) def _expand_array(self, array_name): """ Expand variables and return a set of keywords. Warning is issued when exceptions occur.""" ret = self._arrays[array_name] if array_name in self._arrays else [] try: ret = self._do_expand_array(array_name) except Exception as e: self._warn("Error expanding array '%s': %s" % (array_name, str(e))) return ret def _reply_regexp(self, user, regexp): """Prepares a trigger for the regular expression engine.""" if regexp in self._regexc["trigger"]: # Already compiled this one! return self._regexc["trigger"][regexp] # If the trigger is simply '*' then the * there needs to become (.*?) # to match the blank string too. regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp) # Simple replacements. regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?) regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?) regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?) regexp = re.sub(r'\{weight=\d+\}', '', regexp) # Remove {weight} tags regexp = regexp.replace('<zerowidthstar>', r'(.*?)') # Optionals. optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p) new.append(p) # If this optional had a star or anything in it, make it # non-matching. pipes = '|'.join(new) pipes = re.sub(re.escape('(.+?)'), '(?:.+?)', pipes) pipes = re.sub(re.escape('(\d+?)'), '(?:\d+?)', pipes) pipes = re.sub(re.escape('([A-Za-z]+?)'), '(?:[A-Za-z]+?)', pipes) regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*', '(?:' + pipes + r'|(?:\\s|\\b))', regexp) # _ wildcards can't match numbers! regexp = re.sub(RE.literal_w, r'[A-Za-z]', regexp) # Filter in arrays. arrays = re.findall(RE.array, regexp) for array in arrays: rep = '' if array in self._arrays: rep = r'(?:' + '|'.join(self._expand_array(array)) + ')' regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp) # Filter in bot variables. bvars = re.findall(RE.bot_tag, regexp) for var in bvars: rep = '' if var in self._bvars: rep = self._strip_nasties(self._bvars[var]) regexp = regexp.replace('<bot {var}>'.format(var=var), rep) # Filter in user variables. uvars = re.findall(RE.get_tag, regexp) for var in uvars: rep = '' if var in self._users[user]: rep = self._strip_nasties(self._users[user][var]) regexp = regexp.replace('<get {var}>'.format(var=var), rep) # Filter in <input> and <reply> tags. This is a slow process, so only # do it if we have to! if '<input' in regexp or '<reply' in regexp: for type in ['input', 'reply']: tags = re.findall(r'<' + type + r'([0-9])>', regexp) for index in tags: rep = self._format_message(self._users[user]['__history__'][type][int(index) - 1]) regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep) regexp = regexp.replace('<{type}>'.format(type=type), self._format_message(self._users[user]['__history__'][type][0])) # TODO: the Perl version doesn't do just <input>/<reply> in trigs! return re.compile(r'^' + regexp + r'$') def _precompile_regexp(self, trigger): """Precompile the regex for most triggers. If the trigger is non-atomic, and doesn't include dynamic tags like `<bot>`, `<get>`, `<input>/<reply>` or arrays, it can be precompiled and save time when matching.""" if self._is_atomic(trigger): return # Don't need a regexp for atomic triggers. # Check for dynamic tags. for tag in ["@", "<bot", "<get", "<input", "<reply"]: if tag in trigger: return # Can't precompile this trigger. self._regexc["trigger"][trigger] = self._reply_regexp(None, trigger) def _process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True): """Post process tags in a message.""" stars = [''] stars.extend(st) botstars = [''] botstars.extend(bst) if len(stars) == 1: stars.append("undefined") if len(botstars) == 1: botstars.append("undefined") # Tag shortcuts. reply = reply.replace('<person>', '{person}<star>{/person}') reply = reply.replace('<@>', '{@<star>}') reply = reply.replace('<formal>', '{formal}<star>{/formal}') reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}') reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}') reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}') # Weight and <star> tags. reply = re.sub(RE.weight, '', reply) # Leftover {weight}s if len(stars) > 0: reply = reply.replace('<star>', stars[1]) reStars = re.findall(RE.star_tags, reply) for match in reStars: if int(match) < len(stars): reply = reply.replace('<star{match}>'.format(match=match), stars[int(match)]) if len(botstars) > 0: reply = reply.replace('<botstar>', botstars[1]) reStars = re.findall(RE.botstars, reply) for match in reStars: if int(match) < len(botstars): reply = reply.replace('<botstar{match}>'.format(match=match), botstars[int(match)]) # <input> and <reply> reply = reply.replace('<input>', self._users[user]['__history__']['input'][0]) reply = reply.replace('<reply>', self._users[user]['__history__']['reply'][0]) reInput = re.findall(RE.input_tags, reply) for match in reInput: reply = reply.replace('<input{match}>'.format(match=match), self._users[user]['__history__']['input'][int(match) - 1]) reReply = re.findall(RE.reply_tags, reply) for match in reReply: reply = reply.replace('<reply{match}>'.format(match=match), self._users[user]['__history__']['reply'][int(match) - 1]) # <id> and escape codes. reply = reply.replace('<id>', user) reply = reply.replace('\\s', ' ') reply = reply.replace('\\n', "\n") reply = reply.replace('\\#', '#') # Random bits. reRandom = re.findall(RE.random_tags, reply) for match in reRandom: output = '' if '|' in match: output = random.choice(match.split('|')) else: output = random.choice(match.split(' ')) reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output) # Person Substitutions and String Formatting. for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']: matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply) for match in matcher: output = None if item == 'person': # Person substitutions. output = self._substitute(match, "person") else: output = self._string_format(match, item) reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output) # Handle all variable-related tags with an iterative regex approach, # to allow for nesting of tags in arbitrary ways (think <set a=<get b>>) # Dummy out the <call> tags first, because we don't handle them right # here. reply = reply.replace("<call>", "{__call__}") reply = reply.replace("</call>", "{/__call__}") while True: # This regex will match a <tag> which contains no other tag inside # it, i.e. in the case of <set a=<get b>> it will match <get b> but # not the <set> tag, on the first pass. The second pass will get the # <set> tag, and so on. match = re.search(RE.tag_search, reply) if not match: break # No remaining tags! match = match.group(1) parts = match.split(" ", 1) tag = parts[0].lower() data = parts[1] if len(parts) > 1 else "" insert = "" # Result of the tag evaluation # Handle the tags. if tag == "bot" or tag == "env": # <bot> and <env> tags are similar. target = self._bvars if tag == "bot" else self._gvars if "=" in data: # Setting a bot/env variable. parts = data.split("=") self._say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1])) target[parts[0]] = parts[1] else: # Getting a bot/env variable. insert = target.get(data, "undefined") elif tag == "set": # <set> user vars. parts = data.split("=") self._say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1])) self._users[user][parts[0]] = parts[1] elif tag in ["add", "sub", "mult", "div"]: # Math operator tags. parts = data.split("=") var = parts[0] value = parts[1] # Sanity check the value. try: value = int(value) if var not in self._users[user]: # Initialize it. self._users[user][var] = 0 except: insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value) # Attempt the operation. try: orig = int(self._users[user][var]) new = 0 if tag == "add": new = orig + value elif tag == "sub": new = orig - value elif tag == "mult": new = orig * value elif tag == "div": new = orig / value self._users[user][var] = new except: insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, self._users[user][var]) elif tag == "get": insert = self._users[user].get(data, "undefined") else: # Unrecognized tag. insert = "\x00{}\x01".format(match) reply = reply.replace("<{}>".format(match), insert) # Restore unrecognized tags. reply = reply.replace("\x00", "<").replace("\x01", ">") # Streaming code. DEPRECATED! if '{!' in reply: self._warn("Use of the {!...} tag is deprecated and not supported here.") # Topic setter. reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self._say("Setting user's topic to " + match) self._users[user]["topic"] = match reply = reply.replace('{{topic={match}}}'.format(match=match), '') # Inline redirecter. reRedir = re.findall(RE.redir_tag, reply) for match in reRedir: self._say("Redirect to " + match) at = match.strip() subreply = self._getreply(user, at, step=(depth + 1)) reply = reply.replace('{{@{match}}}'.format(match=match), subreply) # Object caller. reply = reply.replace("{__call__}", "<call>") reply = reply.replace("{/__call__}", "</call>") reCall = re.findall(r'<call>(.+?)</call>', reply) for match in reCall: parts = re.split(RE.ws, match) output = '' obj = parts[0] args = [] if len(parts) > 1: args = parts[1:] # Do we know this object? if obj in self._objlangs: # We do, but do we have a handler for that language? lang = self._objlangs[obj] if lang in self._handlers: # We do. try: output = self._handlers[lang].call(self, obj, user, args) except python.PythonObjectError as e: self._warn(str(e)) if not ignore_object_errors: raise ObjectError(str(e)) output = RS_ERR_OBJECT else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_HANDLER) output = RS_ERR_OBJECT_HANDLER else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_MISSING) output = RS_ERR_OBJECT_MISSING reply = reply.replace('<call>{match}</call>'.format(match=match), output) return reply def _string_format(self, msg, method): """Format a string (upper, lower, formal, sentence).""" if method == "uppercase": return msg.upper() elif method == "lowercase": return msg.lower() elif method == "sentence": return msg.capitalize() elif method == "formal": return string.capwords(msg) ############################################################################ # Topic inheritance Utility Methods # ############################################################################ def _topic_triggers(self, topic, triglvl, depth=0, inheritance=0, inherited=False): """Recursively scan a topic and return a list of all triggers.""" # Break if we're in too deep. if depth > self._depth: self._warn("Deep recursion while scanning topic inheritance") # Important info about the depth vs inheritance params to this function: # depth increments by 1 each time this function recursively calls itself. # inheritance increments by 1 only when this topic inherits another # topic. # # This way, '> topic alpha includes beta inherits gamma' will have this # effect: # alpha and beta's triggers are combined together into one matching # pool, and then those triggers have higher matching priority than # gamma's. # # The inherited option is True if this is a recursive call, from a topic # that inherits other topics. This forces the {inherits} tag to be added # to the triggers. This only applies when the top topic 'includes' # another topic. self._say("\tCollecting trigger list for topic " + topic + "(depth=" + str(depth) + "; inheritance=" + str(inheritance) + "; " + "inherited=" + str(inherited) + ")") # topic: the name of the topic # triglvl: reference to self._topics or self._thats # depth: starts at 0 and ++'s with each recursion # Collect an array of triggers to return. triggers = [] # Get those that exist in this topic directly. inThisTopic = [] if topic in triglvl: for trigger in triglvl[topic]: inThisTopic.append(trigger) # Does this topic include others? if topic in self._includes: # Check every included topic. for includes in self._includes[topic]: self._say("\t\tTopic " + topic + " includes " + includes) triggers.extend(self._topic_triggers(includes, triglvl, (depth + 1), inheritance, True)) # Does this topic inherit others? if topic in self._lineage: # Check every inherited topic. for inherits in self._lineage[topic]: self._say("\t\tTopic " + topic + " inherits " + inherits) triggers.extend(self._topic_triggers(inherits, triglvl, (depth + 1), (inheritance + 1), False)) # Collect the triggers for *this* topic. If this topic inherits any # other topics, it means that this topic's triggers have higher # priority than those in any inherited topics. Enforce this with an # {inherits} tag. if topic in self._lineage or inherited: for trigger in inThisTopic: self._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger) triggers.append("{inherits=" + str(inheritance) + "}" + trigger) else: triggers.extend(inThisTopic) return triggers def _find_trigger_by_inheritance(self, topic, trig, depth=0): """Locate the replies for a trigger in an inherited/included topic.""" # This sub was called because the user matched a trigger from the sorted # array, but the trigger doesn't belong to their topic, and is instead # in an inherited or included topic. This is to search for it. # Prevent recursion. if depth > self._depth: self._warn("Deep recursion detected while following an inheritance trail!") return None # inheritance is more important than inclusion: triggers in one topic can # override those in an inherited topic. if topic in self._lineage: for inherits in sorted(self._lineage[topic]): # See if this inherited topic has our trigger. if trig in self._topics[inherits]: # Great! return self._topics[inherits][trig] else: # Check what THAT topic inherits from. match = self._find_trigger_by_inheritance( inherits, trig, (depth + 1) ) if match: # Found it! return match # See if this topic has an "includes" if topic in self._includes: for includes in sorted(self._includes[topic]): # See if this included topic has our trigger. if trig in self._topics[includes]: # Great! return self._topics[includes][trig] else: # Check what THAT topic inherits from. match = self._find_trigger_by_inheritance( includes, trig, (depth + 1) ) if match: # Found it! return match # Don't know what else to do! return None def _get_topic_tree(self, topic, depth=0): """Given one topic, get the list of all included/inherited topics.""" # Break if we're in too deep. if depth > self._depth: self._warn("Deep recursion while scanning topic trees!") return [] # Collect an array of all topics. topics = [topic] # Does this topic include others? if topic in self._includes: # Try each of these. for includes in sorted(self._includes[topic]): topics.extend(self._get_topic_tree(includes, depth + 1)) # Does this topic inherit others? if topic in self._lineage: # Try each of these. for inherits in sorted(self._lineage[topic]): topics.extend(self._get_topic_tree(inherits, depth + 1)) return topics ############################################################################ # Miscellaneous Private Methods # ############################################################################ def _is_atomic(self, trigger): """Determine if a trigger is atomic or not.""" # Atomic triggers don't contain any wildcards or parenthesis or anything # of the sort. We don't need to test the full character set, just left # brackets will do. special = ['*', '#', '_', '(', '[', '<', '@'] for char in special: if char in trigger: return False return True def _word_count(self, trigger, all=False): """Count the words that aren't wildcards in a trigger.""" words = [] if all: words = re.split(RE.ws, trigger) else: words = re.split(RE.wilds, trigger) wc = 0 # Word count for word in words: if len(word) > 0: wc += 1 return wc def _strip_nasties(self, s): """Formats a string for ASCII regex matching.""" s = re.sub(RE.nasties, '', s) return s def _dump(self): """For debugging, dump the entire data structure.""" pp = pprint.PrettyPrinter(indent=4) print("=== Variables ===") print("-- Globals --") pp.pprint(self._gvars) print("-- Bot vars --") pp.pprint(self._bvars) print("-- Substitutions --") pp.pprint(self._subs) print("-- Person Substitutions --") pp.pprint(self._person) print("-- Arrays --") pp.pprint(self._arrays) print("=== Topic Structure ===") pp.pprint(self._topics) print("=== %Previous Structure ===") pp.pprint(self._thats) print("=== Includes ===") pp.pprint(self._includes) print("=== Inherits ===") pp.pprint(self._lineage) print("=== Sort Buffer ===") pp.pprint(self._sorted) print("=== Syntax Tree ===") pp.pprint(self._syntax) ################################################################################ # Exception Classes # ################################################################################ class RiveScriptError(Exception): """RiveScript base exception class""" def __init__(self, error_message=None): super(RiveScriptError, self).__init__(error_message) self.error_message = error_message class NoMatchError(RiveScriptError): """No reply could be matched""" def __init__(self): super(NoMatchError, self).__init__(RS_ERR_MATCH) class NoReplyError(RiveScriptError): """No reply could be found""" def __init__(self): super(NoReplyError, self).__init__(RS_ERR_REPLY) class ObjectError(RiveScriptError): """An error occurred when executing a Python object""" def __init__(self, error_message=RS_ERR_OBJECT): super(ObjectError, self).__init__(error_message) class DeepRecursionError(RiveScriptError): """Prevented an infinite loop / deep recursion, unable to retrieve a reply for this message""" def __init__(self): super(DeepRecursionError, self).__init__(RS_ERR_DEEP_RECURSION) class NoDefaultRandomTopicError(Exception): """No default topic 'random' could be found, critical error""" pass class RepliesNotSortedError(Exception): """sort_replies() was not called after the RiveScript documents were loaded, critical error""" pass ################################################################################ # Interactive Mode # ################################################################################ if __name__ == "__main__": from interactive import interactive_mode interactive_mode() # vim:expandtab
[ "jaykumar.oza@jeppesen.com" ]
jaykumar.oza@jeppesen.com
5827494e28c8324f3fe91b182ec76744a95c029b
aef02ad0a2b36e763af4b6de84399fcbfb788faf
/LPHW/ex6.py
4d116c7b7f3dba9e3e1cb77c6d4b06c35e1b0fbb
[]
no_license
kanishkd4/Python_Learning_code
98cf74cbbeef34f594804b515438f24775feddbf
62a6b1745f4c8624ed4207ab38c83f0a7ead99c9
refs/heads/master
2020-04-15T12:44:52.828258
2018-04-05T09:56:35
2018-04-05T09:56:35
61,795,436
0
1
null
null
null
null
UTF-8
Python
false
false
410
py
x = "there are %d types of people." %10 binary = "binary" do_not = "don't" y = "those who know %s and those who %s." % (binary, do_not) print x print y print "I said %r" % x print "I also said: '%s'" % y hilarious = False joke_evaluation = "Isn't that joke so funny?! %r" print joke_evaluation % hilarious w = "this is the left side of.." e = "a string with a right side." print w + e
[ "noreply@github.com" ]
kanishkd4.noreply@github.com
a6d6d50572836ba4614154dce36cf5e2c21f9c51
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02679/s613915096.py
fec86a56bc93ae2efcf62264eb570f7a448a4ed4
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
667
py
import math, collections N = int(input()) AB = [[int(_) for _ in input().split()] for _ in range(N)] mod = 10**9 + 7 C = collections.Counter() gcd = math.gcd a0 = 0 for a, b in AB: if a == b == 0: a0 += 1 elif a == 0: C[0, -1] += 1 else: g = gcd(a, b) a //= g b //= g if a < 0: a *= -1 b *= -1 C[a, b] += 1 ans = 1 for a, b in C: if C[b, -a]: continue elif C[-b, a]: ans *= (pow(2, C[a, b], mod) + pow(2, C[-b, a], mod) - 1) % mod ans %= mod else: ans *= pow(2, C[a, b], mod) ans %= mod ans += a0 - 1 ans %= mod print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
8951afe2b51d654fd469ed7fd936879e3610aa30
35894bca47cf0c9a51a05caf7b56a0d69c05b033
/04_subrotinas_numpy/25_fibonacci.py
1067f8b8abc1c15bc44a985e9b4f892471d34f46
[]
no_license
alcebytes/Phyton-Estudo
0a2d33f5f3e668e6ab2f99e5e4499545a3bc1273
a3f9a0b3e0a91d71a9359480d6ec17e692572694
refs/heads/master
2023-01-14T17:24:16.486956
2020-10-08T02:02:02
2020-10-08T02:02:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
942
py
import time as time num_iter = int(input("Digitar o valor do número máximo para a sequência de Fibonacci = ")) tempo_inicio = time.time() #tempo_inicio_CPU = time.clock() #ABSOLETO tempo_inicio_CPU = time.process_time() tempo_inicio_CPU_2 = time.perf_counter() # f(0) f = [] f.append(0) print(f) # f(1) f.append(1) print(f) """ f(n + 2) = f(n) + f(n + 1) for n in range(0, num_iter - 2, 1) f.append(f[n] + f[n + 1] ) """ n = 0 while n <= num_iter - 3: f.append(f[n] + f[n + 1]) n = n + 1 print(f) # Imprimir último termo de f print(f[-1]) # Outra forma: print(f[len(f) - 1]) tempo_fim = time.time() - tempo_inicio print("O tempo de execução da aplicação é", tempo_fim, "s") tempo_fim_CPU_2 = time.perf_counter() - tempo_inicio_CPU_2 print("O tempo de execução da CPU é", tempo_fim_CPU_2) tempo_fim_CPU = time.process_time() - tempo_inicio_CPU print("O tempo de execução da CPU é", tempo_fim_CPU)
[ "x_kata@hotmail.com" ]
x_kata@hotmail.com
0d5612bcf83e90343b35f237bfbb6536fe5a32fc
99dbc0388a1396d9d0f636ba6ad4e7ce6b646637
/app/frontend/views.py
4ae6ddaba7060c91bb981e1d735c5289f1895cb6
[]
no_license
thefedoration/tracker-widgets
9469f27a023cc6c4f3cb1161f39452deb58ce282
47bd08030a8ced3b6ddf2c48cc41f8f0b705aa79
refs/heads/master
2021-06-22T04:44:47.565674
2017-05-15T13:32:27
2017-05-15T13:32:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
448
py
from django.shortcuts import render, redirect # serves up frontend bundle def index(request): # if user is logged in, fire up the frontend app if request.user.is_authenticated(): return render(request, 'frontend/index.html') # otherwise not logged in, send them to login screen path = request.path if path[0] == '/': path = path[1:] return redirect('/accounts/login/?next=%s' % path)
[ "fedor@pymetrics.com" ]
fedor@pymetrics.com
ebb0ee33e3d8bde61a40935c59eb8b4e2c250d40
9e8a90e8c9bc90d9ea34b79e7553a7ba2fd4e6bf
/models/networkSwitch.py
bded10111bfacc3b5285280e628f5a076988367a
[]
no_license
lwyanne/CPAE
ddae51affcca8db0266bf66f091f165d95bd7837
e155dfecf3f38ed7121a8a446dc4eeb4067b7e46
refs/heads/master
2023-07-28T13:12:22.372796
2021-08-27T15:09:58
2021-08-27T15:09:58
353,564,787
1
0
null
null
null
null
UTF-8
Python
false
false
185,346
py
from __future__ import print_function from torch.nn.utils.rnn import pack_padded_sequence import inspect import os, sys import logging # add the top-level directory of this project to sys.path so that we can import modules without error from models.loss import Chimera_loss, record_loss, mask_where, mapping_where, mask_mapping_M sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) logger = logging.getLogger("cpc") import numpy as np import torch import torch.nn as nn import math from models.utils import * from models.datareader import * from sklearn.metrics import roc_auc_score from fastai.callbacks import * from fastai.tabular import * from fastai import tabular from models.optimizer import ScheduledOptim from sklearn.metrics import cohen_kappa_score as kappa, mean_absolute_error as mad, roc_auc_score as auroc torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def auroc_score(input, target): input, target = input.cpu().numpy()[:, 1], target.cpu().numpy() return roc_auc_score(target, input) class AUROC(tabular.Callback): """ This is for output AUROC as a metric in fastai training process. This has a small but acceptable issue. #TODO """ _order = -20 # Needs to run before the recorder def __init__(self, learn, **kwargs): self.learn = learn def on_train_begin(self, **kwargs): self.learn.recorder.add_metric_names(['AUROC']) def on_epoch_begin(self, **kwargs): self.output, self.target = [], [] def on_batch_end(self, last_target, last_output, train, **kwargs): if not train: try: self.output.append(last_output) except AttributeError: self.output = [] try: self.target.append(last_target) except AttributeError: self.target = [] def on_epoch_end(self, last_metrics, **kwargs): if len(self.output) > 0: output = torch.cat(self.output).cpu() target = torch.cat(self.target).cpu() preds = F.softmax(output, dim=1) metric = roc_auc_score(target, preds, multi_class='ovo') return add_metrics(last_metrics, [metric]) class biAUROC(tabular.Callback): """ This is for output AUROC as a metric in fastai training process. This has a small but acceptable issue. #TODO """ _order = -20 # Needs to run before the recorder def __init__(self, learn, **kwargs): self.learn = learn def on_train_begin(self, **kwargs): self.learn.recorder.add_metric_names(['AUROC']) def on_epoch_begin(self, **kwargs): self.output, self.target = [], [] def on_batch_end(self, last_target, last_output, train, **kwargs): if not train: try: self.output.append(last_output) except AttributeError: self.output = [] try: self.target.append(last_target) except AttributeError: self.target = [] def on_epoch_end(self, last_metrics, **kwargs): if len(self.output) > 0: output = torch.cat(self.output).cpu() target = torch.cat(self.target).cpu() preds = F.softmax(output, dim=1) metric = auroc_score(preds, target) return add_metrics(last_metrics, [metric]) class MAD(tabular.Callback): _order = -20 def __init__(self, learn, **kwargs): self.learn = learn def on_train_begin(self, **kwargs): self.learn.recorder.add_metric_names(['MAD']) def on_epoch_begin(self, **kwargs): self.output, self.target = [], [] def on_batch_end(self, last_target, last_output, train, **kwargs): if not train: try: self.output.append(last_output) except AttributeError: self.output = [] try: self.target.append(last_target) except AttributeError: self.target = [] def on_epoch_end(self, last_metrics, **kwargs): if len(self.output) > 0: output = torch.cat(self.output) target = torch.cat(self.target) preds = torch.argmax(F.softmax(output, dim=1), dim=1, keepdim=False) metric = mean_absolute_error(preds, target) return add_metrics(last_metrics, [metric]) class CPclassifier(nn.Module): """ Combine the CPC and MLP, to make it possible to fine-tune on the downstream task Note: Fine-tune is implemented via fastai learner. """ def __init__(self, CPmodel, MLP, freeze=False): super(CPclassifier, self).__init__() self.CPmodel = CPmodel self.MLP = MLP if freeze: for param in self.CPmodel.parameters(): param.requires_grad = False def forward(self, x): if 'CP' in self.CPmodel.__class__.__name__ or 'AE_LSTM' in self.CPmodel.__class__.__name__: x = self.CPmodel.get_reg_out(x) else: x = self.CPmodel.get_encode(x) x = self.MLP(x) return x class CPAE1_S(nn.Module): def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE1_S, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.GRU( self.embedded_features, gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers).to(device) self.beforeNCE = True def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): return torch.zeros(1, batch_size, self.gru_out).to(device) def encode(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) return x # output shape: (N,n_features=8,n_points=192) def decode(self, x): for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192) x = self.decoder[i](x) return x # output shape: (N,n_points=192,n_features=76) def recurrent(self, zt): ''' GRU RNN ''' batch_size = self.batch_size # output shape: (N, n_frames, features,1) hidden = self.init_hidden(batch_size) output, hidden = self.gru(zt, hidden) return output, hidden def gru_to_ct(self, zt): ''' return the last time_step of GRU result ''' output, hidden = self.recurrent(zt) c_t = output[:, -1, :].view(self.batch_size, self.gru_out) return c_t, hidden def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def get_reg_out(self, x): self.batch_size = x.shape[0] x = x.squeeze(1).transpose(1, 2) self.n_frames = x.shape[2] z = self.encode(x).transpose(1, 2) z = self.linear(z) forward_seq = z[:, :, :] c_t, hidden = self.gru_to_ct(forward_seq) return c_t def forward(self, x): x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) d = self.decode(z.transpose(1, 2)) self.batch_size = x.shape[0] self.n_frames = x.shape[2] # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) # encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to( device) # e.g. # size z = self.linear(z) for i in np.arange(1, self.time_step + 1): encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :] forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) if self.beforeNCE: # ADD FC layers pred = self.fcs(pred) encode_samples = self.fcs(encode_samples) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(encode_samples, pred) return d, nce, accuracy class CPAE1_NO_BN(nn.Module): def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE1_NO_BN, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), # nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.GRU( self.embedded_features, gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers).to(device) self.beforeNCE = True def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): return torch.zeros(1, batch_size, self.gru_out).to(device) def encode(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) return x # output shape: (N,n_features=8,n_points=192) def decode(self, x): for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192) x = self.decoder[i](x) return x # output shape: (N,n_points=192,n_features=76) def recurrent(self, zt): ''' GRU RNN ''' batch_size = self.batch_size # output shape: (N, n_frames, features,1) hidden = self.init_hidden(batch_size) output, hidden = self.gru(zt, hidden) return output, hidden def gru_to_ct(self, zt): ''' return the last time_step of GRU result ''' output, hidden = self.recurrent(zt) c_t = output[:, -1, :].view(self.batch_size, self.gru_out) return c_t, hidden def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def get_reg_out(self, x): self.batch_size = x.shape[0] x = x.squeeze(1).transpose(1, 2) self.n_frames = x.shape[2] z = self.encode(x).transpose(1, 2) z = self.linear(z) forward_seq = z[:, :, :] c_t, hidden = self.gru_to_ct(forward_seq) return c_t def forward(self, x): x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) d = self.decode(z.transpose(1, 2)) self.batch_size = x.shape[0] self.n_frames = x.shape[2] # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) # encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to( device) # e.g. # size z = self.linear(z) for i in np.arange(1, self.time_step + 1): encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :] forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(encode_samples, pred) return d, nce, accuracy class CPAE1_LSTM(nn.Module): def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE1_LSTM, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.LSTM( self.embedded_features, hidden_size=gru_out, num_layers=2, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers).to(device) self.beforeNCE = True def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): return torch.zeros(1, batch_size, self.gru_out).to(device) def encode(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) return x # output shape: (N,n_features=8,n_points=192) def decode(self, x): for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192) x = self.decoder[i](x) return x # output shape: (N,n_points=192,n_features=76) def recurrent(self, zt): ''' GRU RNN ''' batch_size = self.batch_size # output shape: (N, n_frames, features,1) hidden = self.init_hidden(batch_size) hidden = torch.cat((hidden, hidden), dim=0) hidden = (hidden, hidden) output, hidden = self.gru(zt, hidden) return output, hidden def gru_to_ct(self, zt): ''' return the last time_step of GRU result ''' output, hidden = self.recurrent(zt) c_t = output[:, -1, :].view(self.batch_size, self.gru_out) return c_t, hidden def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def get_reg_out(self, x): self.batch_size = x.shape[0] x = x.squeeze(1).transpose(1, 2) self.n_frames = x.shape[2] z = self.encode(x).transpose(1, 2) z = self.linear(z) forward_seq = z[:, :, :] c_t, hidden = self.gru_to_ct(forward_seq) return c_t def forward(self, x): x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) d = self.decode(z.transpose(1, 2)) self.batch_size = x.shape[0] self.n_frames = x.shape[2] # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) # encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to( device) # e.g. # size z = self.linear(z) for i in np.arange(1, self.time_step + 1): encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :] forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(encode_samples, pred) return d, nce, accuracy class CPAE1_LSTM_NO_BN(nn.Module): def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE1_LSTM_NO_BN, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), # nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.LSTM( self.embedded_features, hidden_size=gru_out, num_layers=2, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers).to(device) self.beforeNCE = True def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): return torch.zeros(1, batch_size, self.gru_out).to(device) def encode(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) return x # output shape: (N,n_features=8,n_points=192) def decode(self, x): for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192) x = self.decoder[i](x) return x # output shape: (N,n_points=192,n_features=76) def recurrent(self, zt): ''' GRU RNN ''' batch_size = self.batch_size # output shape: (N, n_frames, features,1) hidden = self.init_hidden(batch_size) hidden = torch.cat((hidden, hidden), dim=0) hidden = (hidden, hidden) output, hidden = self.gru(zt, hidden) return output, hidden def gru_to_ct(self, zt): ''' return the last time_step of GRU result ''' output, hidden = self.recurrent(zt) c_t = output[:, -1, :].view(self.batch_size, self.gru_out) return c_t, hidden def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def get_reg_out(self, x): self.batch_size = x.shape[0] x = x.squeeze(1).transpose(1, 2) self.n_frames = x.shape[2] z = self.encode(x).transpose(1, 2) z = self.linear(z) forward_seq = z[:, :, :] c_t, hidden = self.gru_to_ct(forward_seq) return c_t def forward(self, x): x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) d = self.decode(z.transpose(1, 2)) self.batch_size = x.shape[0] self.n_frames = x.shape[2] # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) # encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to( device) # e.g. # size z = self.linear(z) for i in np.arange(1, self.time_step + 1): encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :] forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(encode_samples, pred) return d, nce, accuracy class CPAE2_S(CPAE1_S): """ Use conv1dtranspose in CPAE1 """ def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE2_S, self).__init__() # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.enSequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) self.deSequential = lambda inChannel, outChannel: nn.Sequential( nn.ConvTranspose1d(inChannel, outChannel, kernel_size=3, padding=1), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.enSequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.deSequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.GRU( self.embedded_features, gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): # deconvolution nn. unMaxPool class CPAE3_S(CPAE2_S): """ Use conv1dtranspose in CPAE1 & Maxpooling & unpooing """ def __init__( self, embedded_features, gru_out, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], time_step=30, n_points=192, n_features=76, ): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CPAE3_S, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes self.decode_channels = self.channels[::-1] encodelist = [] count = 0 for i, j in zip(self.channels[:-1], self.channels[1:]): encodelist.append(nn.ReflectionPad1d((0, 1))) encodelist.append(nn.Conv1d(i, j, kernel_size=2, padding=0)) encodelist.append(nn.BatchNorm1d(j)) encodelist.append(nn.ReLU(inplace=True)) if count < 2: encodelist.append(nn.ReflectionPad1d((0, 1))) encodelist.append(nn.MaxPool1d(2, stride=1)) count += 1 self.encoder = nn.Sequential(*encodelist) decodelist = [] count = 0 for i, j in zip(self.decode_channels[:-1], self.decode_channels[1:]): decodelist.append(nn.ConvTranspose1d(i, j, kernel_size=3, padding=1)) decodelist.append(nn.BatchNorm1d(j)) decodelist.append(nn.ReLU(inplace=True)) self.decoder = nn.Sequential(*decodelist) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.GRU( self.embedded_features, gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) # def relevant_points(n): # deconvolution nn. unMaxPool class CPAE4_S(CPAE1_S): def __int__(self): super(CPAE4_S, self).__init__() def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) self.batch_size = x.shape[0] self.n_frames = x.shape[2] x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) z = self.linear(z) x = x.transpose(1, 2) # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) # x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to( device) # e.g. # size for i in np.arange(1, self.time_step + 1): x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :] reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(x_samples, reconstruct_samples) return accuracy, nce, x def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy class CPAE4_NO_BN(CPAE1_NO_BN): def __int__(self): super(CPAE4_NO_BN, self).__init__() def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) self.batch_size = x.shape[0] self.n_frames = x.shape[2] x = x.transpose(1, 2) z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1]) z = self.linear(z) x = x.transpose(1, 2) # make change to here # t_samples should at least start from 30 t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to( device) forward_seq = z[:, :int(t_samples) + 1, :] c_t, hidden = self.gru_to_ct(forward_seq) pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) # x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to( device) # e.g. # size for i in np.arange(1, self.time_step + 1): x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :] reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2) # d = self.decode(pred.transpose(1,2).transpose(0,2)) nce, accuracy = self.compute_nce(x_samples, reconstruct_samples) return accuracy, nce, x def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy class CPAE7_S(CPAE4_S): """ this CPAE simply make `f_i(x_i,x_j)` the chimera_loss function """ def __init__(self, embedded_featrues=8, gru_out=8, Lambda=[1, 1, 3]): super(CPAE7_S, self).__init__(embedded_featrues, gru_out) # to initiate the CPAE4 with embedded_featrues = 8, gru_out = 8 self.Lambda = torch.tensor(Lambda).float().cuda() self.Lambda = self.Lambda / sum(self.Lambda) * 10 def weighted_mask(self, x): """ similar to chimera loss """ # x = x.transpose(0,1) # d = d.transpose(0,1) assert (x.shape[1] == 76) mse_m = torch.ones(x.shape).to(device) mask_m, mapping_m = mask_mapping_M(x) return self.Lambda[0] * mse_m + self.Lambda[1] * mask_m + self.Lambda[2] * mapping_m def compute_nce(self, x, d): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......x : x_samples , ( time_step, batch_size, conv_sizes[-1] ) ......d : reconstruct_samples , the same shape as x, self.decode(z_hat) ''' nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): x_w = self.weighted_mask(x[i]) * x[i] total = torch.mm(x_w, torch.transpose(d[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy class CPLSTM(nn.Module): """ Bi-directional LSTM """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5): # Smart way to filter the args self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim super(CPLSTM, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim // 2, dropout=self.drop, bidirectional=True, batch_first=True ) self.lstm2 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, dropout=self.drop, bidirectional=False, batch_first=True ) self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) def encodeRegress(self, x): x, _ = self.lstm1(x) x, state = self.lstm2(x) ht, ct = state return x, ht, ct def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_preds = [0] * self.time_step c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t, :]) h, c = ht, ct for i in range(1, self.time_step + 1): c_preds[i - 1] = self.Wk[i - 1](ht) _, h, c = self.encodeRegress(x[:, t + i, :]) c_latent.append(c) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, c def get_reg_out(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :]) # print(ht.shape) return xt.reshape((x.shape[0], -1)) class CPLSTM2(nn.Module): """ LSTM """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5): # Smart way to filter the args self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim super(CPLSTM2, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, dropout=self.drop, bidirectional=False, batch_first=True ) self.lstm2 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, dropout=self.drop, bidirectional=False, batch_first=True ) self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) def encodeRegress(self, x): x, _ = self.lstm1(x) x, state = self.lstm2(x) ht, ct = state return x, ht, ct def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_preds = [0] * self.time_step c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) h, c = ht, ct for i in range(1, self.time_step + 1): c_preds[i - 1] = self.Wk[i - 1](ht) _, h, c = self.encodeRegress(x[:, t + i, :]) c_latent.append(c) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, c def get_reg_out(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :]) print(xt.shape) return xt # return xt.reshape((x.shape[0],-1)) class CPLSTM3(nn.Module): """ CPLSTM2 with dropout in non-recurrent layers and FC added. """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5): # Smart way to filter the args self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim super(CPLSTM3, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm2 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.dropout = nn.Dropout(self.drop) self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.fcs = nn.Sequential( nn.Linear(self.dim, self.dim), nn.ReLU(inplace=True), nn.Linear(self.dim, self.dim) ) for model in [self.lstm1, self.lstm2, self.fcs]: self.initialize_weights(model) for model in self.Wk: self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def encodeRegress(self, x, warm=False): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.dim) if warm: x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0)) _, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0)) # print([i.shape for i in state1],h0.shape,c0.shape) x, state1 = self.lstm1(x[:, :, :], state1) x, state2 = self.lstm2(x[:, :, :], state2) ht, ct = state2 else: x, state1 = self.lstm1(x[:, :, :], (h0, c0)) x, state2 = self.lstm2(x[:, :, :], (h0, c0)) ht, ct = state2 return x, ht, ct # # def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_preds = [0] * self.time_step c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t, :]) h, c = ht, ct for i in range(1, self.time_step + 1): c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht)) _, h, c = self.encodeRegress(x[:, t + i, :]) c_latent.append(self.fcs(c)) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, c def get_reg_out(self, x, stack=False, warm=False, conti=False): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :], warm) # print(ht.shape) # return xt.reshape((x.shape[0],-1)) if stack: return torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1) return xt[:, -1, :].squeeze(1) class CPLSTM4(nn.Module): """ CPLSTM4------use lstm as Wk mode=1 use hidden states when predict. else use cell states """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False, switch=True): self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim self.mode = mode self.noct = noct super(CPLSTM4, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm2 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm3 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) if self.noct: self.stack_dim = self.dim * 192 else: self.stack_dim = self.dim * 193 self.dropout = nn.Dropout(self.drop) # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.switch = switch if self.switch == False: self.softmax = nn.Softmax(dim=1) self.lsoftmax = nn.LogSoftmax(dim=1) else: self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.fcs = nn.Sequential( nn.Linear(self.dim, self.dim), nn.ReLU(inplace=True), nn.Linear(self.dim, self.dim) ) for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]: self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) def freeze_encode(self): for param in self.lstm1.parameters(): param.requires_grad = False def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def encodeRegress(self, x, warm=False, conti=False): bs = x.shape[0] x = self.dropout(x) if conti: x, state1 = self.lstm1(x) x, state2 = self.lstm2(x) ht, ct = state2 return x, ht, ct (h0, c0) = self.init_hidden(bs, self.dim) if warm: x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0)) _, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0)) # print([i.shape for i in state1],h0.shape,c0.shape) x, state1 = self.lstm1(x[:, :, :], state1) x, state2 = self.lstm2(x[:, :, :], state2) ht, ct = state2 else: x, state1 = self.lstm1(x[:, :, :], (h0, c0)) x, state2 = self.lstm2(x[:, :, :], (h0, c0)) ht, ct = state2 return x, ht, ct # def predict(self, z, hz, cz, ts, mode=1): """" if mode==1: return hidden states; else return cell states""" h, c = hz, cz x_previous = z c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device) for i in range(ts): x_pred, (h, c) = self.lstm3(x_previous, (h, c)) if mode: c_preds[i, :, :] = h else: c_preds[i, :, :] = c # mode = 0 x_previous = x_pred return c_preds def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) for i in range(1, self.time_step + 1): _, h, c = self.encodeRegress(x[:, t + i, :]) # init with zeros c_latent.append(self.fcs(c)) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, c def get_reg_out(self, x, stack=False, warm=False, conti=False): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti) # print(ht.shape) # return xt.reshape((x.shape[0],-1)) if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1))) if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)) return xt[:, -1, :].squeeze(1) class CPLSTM4C(nn.Module): """ re-init hidden at time point t mode=1 use hidden states when predict. else use cell states """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False): self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim self.mode = mode self.noct = noct super(CPLSTM4C, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm2 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm3 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) if self.noct: self.stack_dim = self.dim * 192 else: self.stack_dim = self.dim * 193 self.dropout = nn.Dropout(self.drop) # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.fcs = nn.Sequential( nn.Linear(self.dim, self.dim), nn.ReLU(inplace=True), nn.Linear(self.dim, self.dim) ) for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]: self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def encodeRegress(self, x, warm=False, conti=False): bs = x.shape[0] x = self.dropout(x) if conti: x, state1 = self.lstm1(x) x, state2 = self.lstm2(x) ht, ct = state2 return x, ht, ct (h0, c0) = self.init_hidden(bs, self.dim) if warm: x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0)) _, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0)) # print([i.shape for i in state1],h0.shape,c0.shape) x, state1 = self.lstm1(x[:, :, :], state1) x, state2 = self.lstm2(x[:, :, :], state2) ht, ct = state2 else: x, state1 = self.lstm1(x[:, :, :], (h0, c0)) x, state2 = self.lstm2(x[:, :, :], (h0, c0)) ht, ct = state2 return x, ht, ct # def predict(self, z, hz, cz, ts, mode=1): """" if mode==1: return hidden states; else return cell states""" h, c = hz, cz x_previous = z c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device) for i in range(ts): x_pred, (h, c) = self.lstm3(x_previous, (h, c)) if mode: c_preds[i, :, :] = h else: c_preds[i, :, :] = c x_previous = x_pred return c_preds def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) (h0, c0) = self.init_hidden(self.bs, self.dim) h1, c1 = h0, c0 h2, c2 = h0, c0 for i in range(1, self.time_step + 1): # BUG : self.time_step ? i tmp, (h1, c1) = self.lstm1(x[:, t + i, :], (h1, c1)) _, (h2, c2) = self.lstm2(tmp, (h2, c2)) c_latent.append(self.fcs(c2)) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, None def get_reg_out(self, x, stack=False, warm=False, conti=False): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti) # print(ht.shape) # return xt.reshape((x.shape[0],-1)) if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1))) if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)) return xt[:, -1, :].squeeze(1) class CPLSTM3H(CPLSTM3): def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5): super(CPLSTM3H, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step) def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_preds = [0] * self.time_step c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t, :]) h, c = ht, ct for i in range(1, self.time_step + 1): c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht)) _, h, c = self.encodeRegress(x[:, t + i, :]) c_latent.append(self.fcs(h)) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, c class CPLSTM4H(CPLSTM4): def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1): super(CPLSTM4H, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode) def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] # xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) # c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) # # # for i in range(1, self.time_step + 1): # x, (h, c) = self.lstm1(x[:, t + 1:t+self.time_step+1, :]) # c_latent=self.fcs(x) z_embeds, _ = self.lstm1(x) _, (hidden_ct, cell_ct) = self.lstm2(z_embeds[:, :t + 1, :]) z_preds_time_step = self.fcs( self.predict(hidden_ct.transpose(0, 1), hidden_ct, cell_ct, self.time_step, self.mode)) z_embeds_time_step = z_embeds[:, t + 1:t + self.time_step + 1, :] nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(z_embeds_time_step[:, i, :].squeeze(0), torch.transpose(z_preds_time_step[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, None class CPAELSTM41(nn.Module): """ CPLSTM4------use lstm as Wk mode=1 use hidden states when predict. else use cell states """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False): self.dim = dim self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim self.mode = mode self.noct = noct super(CPAELSTM41, self).__init__() self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm2 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.lstm3 = nn.LSTM( input_size=dim, hidden_size=dim, bidirectional=False, batch_first=True) self.dropout = nn.Dropout(self.drop) # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.de_fc = nn.Sequential(nn.Linear(self.dim, self.input_dim), nn.ReLU(inplace=True), nn.Linear(self.input_dim, self.input_dim), nn.ReLU(inplace=True), ) self.fcs = nn.Sequential( nn.Linear(self.input_dim, self.input_dim), nn.ReLU(inplace=True), nn.Linear(self.input_dim, self.input_dim) ) for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]: self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def encodeRegress(self, x, warm=False, conti=False): bs = x.shape[0] x = self.dropout(x) if conti: x, state1 = self.lstm1(x) x, state2 = self.lstm2(x) ht, ct = state2 return x, ht, ct (h0, c0) = self.init_hidden(bs, self.dim) if warm: x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0)) _, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0)) # print([i.shape for i in state1],h0.shape,c0.shape) x, state1 = self.lstm1(x[:, :, :], state1) x, state2 = self.lstm2(x[:, :, :], state2) ht, ct = state2 else: x, state1 = self.lstm1(x[:, :, :], (h0, c0)) x, state2 = self.lstm2(x[:, :, :], (h0, c0)) ht, ct = state2 return x, ht, ct # # def predict(self, z, hz, cz, ts, mode=1): """" if mode==1: return hidden states; else return cell states""" h, c = hz, cz x_previous = z c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device) for i in range(ts): x_pred, (h, c) = self.lstm3(x_previous, (h, c)) if mode: c_preds[i, :, :] = h else: c_preds[i, :, :] = c x_previous = x_pred return c_preds def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) x_preds = self.fcs(self.de_fc(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, None def get_reg_out(self, x, stack=False, warm=False, conti=False): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti) # print(ht.shape) # return xt.reshape((x.shape[0],-1)) if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1))) if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)) return xt[:, -1, :].squeeze(1) class CPAELSTM42(CPAELSTM41): """ two layer lstm as decoder to reconstruct x. """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1): super(CPAELSTM42, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode) self.lstm3 = nn.LSTM( input_size=self.input_dim, num_layers=1, hidden_size=self.input_dim, bidirectional=False, batch_first=True) # # self.dropout=nn.Dropout(self.drop) # # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) # self.softmax = nn.Softmax(dim=0) # self.lsoftmax = nn.LogSoftmax(dim=0) self.de_fc = nn.Sequential( nn.Linear(self.dim, self.input_dim), nn.ReLU(inplace=True) ) # self.fcs=nn.Sequential( # nn.Linear(self.input_dim,self.input_dim), # nn.ReLU(inplace=True), # # nn.Linear(self.input_dim,self.input_dim) # ) # for model in [self.lstm1,self.lstm2,self.lstm3,self.fcs]: # self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) # BUG def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def encodeRegress(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.dim) x, _ = self.lstm1(x, (h0, c0)) x, state = self.lstm2(x, (h0, c0)) ht, ct = state return x, ht, ct # # def predict(self, z, hz, cz, ts, mode=1): """" if mode==1: return hidden states; else return cell states""" h, c = self.de_fc(hz), self.de_fc(cz) x_previous = self.de_fc(z) x_preds = torch.empty((self.time_step, self.bs, self.input_dim)).to(device) for i in range(ts): x_pred, (h, c) = self.lstm3(x_previous, (h, c)) if mode: x_preds[i, :, :] = h else: x_preds[i, :, :] = c x_previous = x_pred return x_preds def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) x_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) # for i in range(1,self.time_step+1): # _, h,c=self.encodeRegress(x[:,t+i,:]) # c_latent.append(self.fcs(c)) nce = 0 for i in np.arange(0, self.time_step): total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.bs).to(device))) nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.bs * self.time_step accuracy = 1. * correct.item() / self.bs return accuracy, nce, None def get_reg_out(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) # print('reshape x to ',x.shape) xt, ht, ct = self.encodeRegress(x[:, :, :]) # print(ht.shape) # return xt.reshape((x.shape[0],-1)) return xt[:, -1, :].squeeze(1) class CPAELSTM43(CPLSTM4H): """ add decoder constraint in loss function """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1): super(CPAELSTM43, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode) self.lstm4 = nn.LSTM( input_size=self.dim, hidden_size=self.input_dim, bidirectional=False, batch_first=True) def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch self.batch_size = self.bs for i in np.arange(0, self.time_step): try: total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 except IndexError: print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) raise AssertionError # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def encode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.dim) x, _ = self.lstm1(x, (h0, c0)) return x def decode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.input_dim) x, _ = self.lstm4(x, (h0, c0)) return x def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) x_ori = x t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) # for i in range(1, self.time_step + 1): # x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :]) z, (h, c) = self.lstm1(x) c_latent = self.fcs(z[:, t + 1:t + self.time_step + 1, :]) # with memory x_hat = self.decode(z) nce, acc = self.compute_nce(c_latent.transpose(0, 1), c_preds) return x_hat, nce, acc class CPAELSTM44(CPLSTM4): """ add decoder constraint in loss function sim: similarity function. 'dot' for dot product, 'cosine' for cosine similarity """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, t_range=None,mode=1,sym=False, sim='dot',temperature=1,pred_mode='step'): super(CPAELSTM44, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode) self.lstm4 = nn.LSTM( input_size=self.dim, hidden_size=self.input_dim, bidirectional=False, batch_first=True) self.sym=sym self.sim=sim self.temperature=temperature self.t_range=t_range self.pred_mode = pred_mode if self.pred_mode=='future': self.W_pred = nn.Linear(self.dim, self.dim) def sim_func(self,a,b): if self.sim=='cosine': print('use cosine') a=a/a.norm(dim=-1,keepdim=True) b=b/b.norm(dim=-1,keepdim=True) a=self.temperature*a b=self.temperature*b return torch.mm(a,b.T) elif self.sim=='dot': print('use dot') return torch.mm(a,b.T) def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch self.batch_size = self.bs for i in np.arange(0, self.time_step): try: total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8 except IndexError: print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) raise AssertionError # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor if self.sym: nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor else: nce += torch.sum(torch.diag(self.lsoftmax(total))) nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size return nce, accuracy def encode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.dim) x, _ = self.lstm1(x, (h0, c0)) return x def decode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.input_dim) x, _ = self.lstm4(x, (h0, c0)) return x def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) x_ori = x t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) self.bs = x.shape[0] c_latent = [] xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)) # for i in range(1, self.time_step + 1): # x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :]) z_after_t = [] for i in range(1, self.time_step + 1): _, h, c = self.encodeRegress(x[:, t + i, :]) z_after_t.append(self.fcs(c)) z_after_t = torch.cat(z_after_t, 0) c_embeds = self.fcs(z_after_t) z_all = torch.cat((xt, z_after_t.transpose(0, 1)), 1) x_hat = self.decode(z_all) nce, acc = self.compute_nce(c_embeds, c_preds) return x_hat, nce, acc def pred_future(self, x): x=self.check_input(x) # print(self.t_range) # print(self.max_len) t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1]) # print(t_range) # t_range = (self.max_len *2// 3, 4 * self.max_len // 5) # print(x.shape) x_ori = x if self.max_len>192: t=192 else: t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series # print('t is %s'%t) # self.bs = x.shape[0] latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :]) latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :]) del x hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past)) latent_all = torch.cat((latent_past, latent_future), 1) del latent_future,latent_past latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all)) del latent_all x_hat = self.decode(latent_all_attention) nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred) return x_hat, nce, acc class CPAELSTM44_AT(CPLSTM4): """ add decoder constraint in loss function pred_mode: 'step' for timestep prediction 'future' for using past to predict future """ def __init__(self, dim, bn, dropout, task,t_range=None, depth=2, num_classes=1, input_dim=76, flat_attention=False,time_step=5, sim='dot',temperature=1,mode=1, switch=True, pred_mode='step',sym=False): super(CPAELSTM44_AT, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode, switch) self.lstm4 = nn.LSTM( input_size=self.dim, hidden_size=self.input_dim, bidirectional=False, batch_first=True) self.att1 = nn.Linear(self.dim, self.dim) self.att2 = nn.Linear(self.dim, self.dim) self.flat_attention=flat_attention self.sim=sim self.temperature=temperature self.t_range=t_range self.pred_mode = pred_mode if self.pred_mode=='future': self.W_pred = nn.Linear(self.dim, self.dim) self.sym=sym #whether use symmetric loss def cal_att1(self,x): if self.flat_attention: x=self.att1(x) assert x.shape[-1]==self.dim # x=torch.transpose(x,1,2) # torch.nn.BatchNorm1d(self.dim) # x=torch.transpose(x,1,2) nn.Softmax(dim=-1) else: x=self.att1(x) return x def cal_att2(self,x): if self.flat_attention: x=self.att2(x) # x=torch.transpose(x,1,2) # torch.nn.BatchNorm1d(self.dim) # x=torch.transpose(x,1,2) nn.Softmax(dim=-1) else: x=self.att2(x) return x def sim_func(self,a,b): if self.sim=='cosine': a=a/a.norm(dim=-1,keepdim=True) b=b/b.norm(dim=-1,keepdim=True) a=self.temperature*a b=self.temperature*b print('using cosine') return torch.mm(a,b.T) elif self.sim=='dot': print('using dot') return torch.mm(a,b.T) def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch self.batch_size = self.bs if self.pred_mode=='step': for i in np.arange(0, self.time_step): try: print('self.sim is ',self.sim) total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8 except IndexError: print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) raise AssertionError # print(total) if self.sym: nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor else: nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size elif self.pred_mode=='future': total=self.sim_func(encode_samples[0],pred[0]) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor # correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), # torch.arange(0, self.batch_size).cuda())) # correct is a tensor # correct_2=torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=1), # torch.arange(0, self.batch_size).cuda())) # print(correct,correct_2) # print(total) if self.sym: nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor else: nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size accuracy =1. * correct.item() / self.batch_size return nce, accuracy def encodeRegress(self, x, warm=False, conti=False): bs = x.shape[0] x = self.dropout(x) # print(x.shape) latents, state1 = self.lstm1(x) del x latents_to_pred = torch.mul(latents, self.cal_att1(latents)) regs, state2 = self.lstm2(latents_to_pred) del latents_to_pred ht, ct = state2 return latents, regs, ht, ct def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False): bs = x.shape[0] x = self.dropout(x) latents, state1 = self.lstm1(x) # latents_to_pred = torch.mul(latents, self.att1(latents)) regs, state2 = self.lstm2(latents) ht, ct = state2 return regs[:, -1, :].squeeze(1) def encode(self, x): bs = x.shape[0] x = self.dropout(x) x, (h, c) = self.lstm1(x) return x, h, c def decode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.input_dim) x, _ = self.lstm4(x, (h0, c0)) return x def check_input(self, x): if type(x) == dict: dic = x x = dic['data'].squeeze(0) self.max_len = min(dic['length']) self.bs = x.shape[0] elif len(x.shape) == 4: x = x.squeeze(1) self.bs = x.shape[0] self.max_len = x.shape[1] elif x.shape[1] == 76: x = x.transpose(1, 2) self.bs = x.shape[0] self.max_len = x.shape[1] else: self.max_len=x.shape[1] self.bs=x.shape[0] return x def pred_future(self, x): x=self.check_input(x) # print(self.t_range) # print(self.max_len) t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1]) # print(t_range) # t_range = (self.max_len *2// 3, 4 * self.max_len // 5) # print(x.shape) x_ori = x if self.max_len>192: t=192 else: t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series # print('t is %s'%t) # self.bs = x.shape[0] latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :]) latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :]) del x hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past)) latent_all = torch.cat((latent_past, latent_future), 1) del latent_future,latent_past latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all)) del latent_all x_hat = self.decode(latent_all_attention) nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred) return x_hat, nce, acc def pred_timestep(self, x): x=self.check_input(x) t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) # self.bs = x.shape[0] latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :]) latent_preds = self.fcs( self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode)) latent_future = [] for i in range(1, self.time_step + 1): _, h, c = self.encode(x[:, t + i, :]) latent_future.append(self.fcs(c[-1])) latent_future = torch.stack(latent_future, 0) latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1) latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all)) x_hat = self.decode(latent_all_attention) nce, acc = self.compute_nce(latent_future, latent_preds) return x_hat, nce, acc def forward(self, x): if self.pred_mode == 'future': x_hat, nce, acc = self.pred_future(x) else: x_hat, nce, acc = self.pred_timestep(x) return x_hat, nce, acc class SelfAttention(nn.Module): def __init__(self, in_dim): super(SelfAttention,self).__init__() self.chanel_in = in_dim self.Wq = nn.Linear(in_dim , in_dim) self.Wk = nn.Linear(in_dim , in_dim) self.Wv = nn.Linear(in_dim , in_dim) self.gamma = in_dim self.softmax = nn.Softmax(dim=-1) def forward(self, x): """ inputs : x : input feature maps( B X C X W X H) (batch_size X C X 76 X 192) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height) """ # x: (48, 144, 256) m_batchsize, width, height = x.size() proj_query = self.Wq(x) proj_key = self.Wk(x) energy = torch.matmul(proj_key.transpose(1,2),proj_query) / (self.gamma**0.5) attention = self.softmax(energy) proj_value = self.Wv(x) out = torch.matmul(proj_value, attention) return out class CPAELSTM44_selfAT(CPLSTM4): """ add decoder constraint in loss function pred_mode: 'step' for timestep prediction 'future' for using past to predict future """ def __init__(self, dim, bn, dropout, task,t_range=None, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, switch=True, pred_mode='step'): super(CPAELSTM44_selfAT, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode, switch) self.lstm4 = nn.LSTM( input_size=self.dim, hidden_size=self.input_dim, bidirectional=False, batch_first=True) self.att1 = SelfAttention(self.dim) self.att2 = SelfAttention(self.dim) self.t_range=t_range self.pred_mode = pred_mode def compute_nce(self, encode_samples, pred): ''' ----------------------------------------------------------------------------------- --------------Calculate NCE loss-------------- ----------------------------------------------------------------------------------- ...argument: ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) ......pred : Wk[i]( C_t ) ''' nce = 0 # average over time_step and batch self.batch_size = self.bs if self.pred_mode=='step': for i in np.arange(0, self.time_step): try: total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 except IndexError: print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) raise AssertionError # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size * self.time_step accuracy = 1. * correct.item() / self.batch_size elif self.pred_mode=='future': total=torch.mm(encode_samples[0],torch.transpose(pred[0], 0, 1)) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, self.batch_size).cuda())) # correct is a tensor nce = torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * self.batch_size accuracy =1. * correct.item() / self.batch_size return nce, accuracy def encodeRegress(self, x, warm=False, conti=False): bs = x.shape[0] x = self.dropout(x) latents, state1 = self.lstm1(x) del x # latents (48,144,256) latents_to_pred = self.att1(latents) regs, state2 = self.lstm2(latents_to_pred) del latents_to_pred ht, ct = state2 return latents, regs, ht, ct def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False): bs = x.shape[0] x = self.dropout(x) latents, state1 = self.lstm1(x) # latents_to_pred = torch.mul(latents, self.att1(latents)) regs, state2 = self.lstm2(latents) ht, ct = state2 return regs[:, -1, :].squeeze(1) def encode(self, x): bs = x.shape[0] x = self.dropout(x) x, (h, c) = self.lstm1(x) return x, h, c def decode(self, x): bs = x.shape[0] x = self.dropout(x) (h0, c0) = self.init_hidden(bs, self.input_dim) x, _ = self.lstm4(x, (h0, c0)) return x def check_input(self, x): if type(x) == dict: dic = x x = dic['data'].squeeze(0) self.max_len = min(dic['length']) self.bs = x.shape[0] elif len(x.shape) == 4: x = x.squeeze(1) self.bs = x.shape[0] self.max_len = x.shape[1] elif x.shape[1] == 76: x = x.transpose(1, 2) self.bs = x.shape[0] self.max_len = x.shape[1] else: self.max_len=x.shape[1] self.bs=x.shape[0] return x def pred_future(self, x): x=self.check_input(x) # print(self.t_range) # print(self.max_len) t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1]) # print(t_range) # t_range = (self.max_len *2// 3, 4 * self.max_len // 5) # print(x.shape) x_ori = x if self.max_len>192: t=192 else: t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series # print('t is %s'%t) # self.bs = x.shape[0] latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :]) latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :]) del x hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past)) latent_all = torch.cat((latent_past, latent_future), 1) del latent_future,latent_past latent_all_attention = self.att2(latent_all) del latent_all x_hat = self.decode(latent_all_attention) nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred) return x_hat, nce, acc def pred_timestep(self, x): # x (48,192,76) x=self.check_input(x) # x (48,192,76) t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long() # print('reshape x to ',x.shape) # self.bs = x.shape[0] latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :]) latent_preds = self.fcs( self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode)) latent_future = [] for i in range(1, self.time_step + 1): _, h, c = self.encode(x[:, t + i, :]) latent_future.append(self.fcs(c[-1])) latent_future = torch.stack(latent_future, 0) latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1) latent_all_attention = self.att2(latent_all) x_hat = self.decode(latent_all_attention) nce, acc = self.compute_nce(latent_future, latent_preds) return x_hat, nce, acc def forward(self, x): if self.pred_mode == 'future': x_hat, nce, acc = self.pred_future(x) else: x_hat, nce, acc = self.pred_timestep(x) return x_hat, nce, acc # class CPAELSTM45(CPLSTM4): # """ # CPLSTM4+ CPAE4 # """ # # def __init__(self, dim, bn, dropout, task, # depth=2, num_classes=1, # input_dim=76, time_step=5, mode=1): # super(CPAELSTM45, self).__init__(dim, bn, dropout, task, # depth, num_classes, # input_dim, time_step, mode) # # self.fcs3 = nn.Sequential( # nn.Linear(self.input_dim, self.input_dim), # nn.ReLU(inplace=True), # nn.Linear(self.input_dim, self.input_dim) # ) # self.lstm4 = nn.LSTM( # input_size=self.dim, # hidden_size=self.input_dim, # bidirectional=False, # batch_first=True) # # def encode(self, x): # bs = x.shape[0] # x = self.dropout(x) # (h0, c0) = self.init_hidden(bs, self.dim) # x, _ = self.lstm1(x, (h0, c0)) # return x # # def decode(self, x): # bs = x.shape[0] # x = self.dropout(x) # (h0, c0) = self.init_hidden(bs, self.input_dim) # x, _ = self.lstm4(x, (h0, c0)) # return x # # def compute_nce(self, encode_samples, pred): # ''' # ----------------------------------------------------------------------------------- # --------------Calculate NCE loss-------------- # ----------------------------------------------------------------------------------- # ...argument: # ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) # ......pred : Wk[i]( C_t ) # ''' # nce = 0 # average over time_step and batch # self.batch_size = self.bs # for i in np.arange(0, self.time_step): # try: # total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # except IndexError: # print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) # raise AssertionError # # print(total) # correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), # torch.arange(0, self.batch_size).cuda())) # correct is a tensor # nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor # nce /= -1. * self.batch_size * self.time_step # accuracy = 1. * correct.item() / self.batch_size # # return nce, accuracy # # def forward(self, x): # if len(x.shape) == 4: x = x.squeeze(1) # if x.shape[1] == 76: x = x.transpose(1, 2) # x_ori = x # t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # self.bs = x.shape[0] # xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) # # z_after_t = [] # # for i in range(1, self.time_step + 1): # _, h, c = self.encodeRegress(x[:, t + i, :]) # z_after_t.append(c) # z_after_t = torch.cat(z_after_t, 0) # # x_hat = self.decode(z_after_t) # nce, acc = self.compute_nce(self.fcs3(x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), # self.fcs3(x_hat)) # nce2, acc2 = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat) # print('acc after fc', acc) # print('acc before fc', acc2) # return acc, nce, None # # # class CPAELSTM46(CPLSTM4): # """ # CPLSTM4+ CPAE4 # """ # # def __init__(self, dim, bn, dropout, task, # depth=2, num_classes=1, # input_dim=76, time_step=5, mode=1): # super(CPAELSTM46, self).__init__(dim, bn, dropout, task, # depth, num_classes, # input_dim, time_step, mode) # # self.lstm4 = nn.LSTM( # input_size=self.dim, # hidden_size=self.input_dim, # bidirectional=False, # batch_first=True) # # def encode(self, x): # bs = x.shape[0] # x = self.dropout(x) # (h0, c0) = self.init_hidden(bs, self.dim) # x, _ = self.lstm1(x, (h0, c0)) # return x # # def decode(self, x): # bs = x.shape[0] # x = self.dropout(x) # (h0, c0) = self.init_hidden(bs, self.input_dim) # x, _ = self.lstm4(x, (h0, c0)) # return x # # def compute_nce(self, encode_samples, pred): # ''' # ----------------------------------------------------------------------------------- # --------------Calculate NCE loss-------------- # ----------------------------------------------------------------------------------- # ...argument: # ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) # ......pred : Wk[i]( C_t ) # ''' # nce = 0 # average over time_step and batch # self.batch_size = self.bs # for i in np.arange(0, self.time_step): # try: # total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # except IndexError: # print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) # raise AssertionError # # print(total) # correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), # torch.arange(0, self.batch_size).cuda())) # correct is a tensor # nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor # nce /= -1. * self.batch_size * self.time_step # accuracy = 1. * correct.item() / self.batch_size # # return nce, accuracy # # def forward(self, x): # if len(x.shape) == 4: x = x.squeeze(1) # if x.shape[1] == 76: x = x.transpose(1, 2) # x_ori = x # t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # self.bs = x.shape[0] # xt, ht, ct = self.encodeRegress(x[:, :t + 1, :]) # # z_after_t = [] # # for i in range(1, self.time_step + 1): # _, h, c = self.encodeRegress(x[:, t + i, :]) # z_after_t.append(c) # z_after_t = torch.cat(z_after_t, 0) # # x_hat = self.decode(z_after_t) # nce, acc = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat) # return acc, nce, None # # # class CPAELSTM4_AT(CPLSTM4): # def __init__(self, dim, bn, dropout, task, # depth=2, num_classes=1, # input_dim=76, time_step=5, mode=1, switch=True): # super(CPAELSTM4_AT, self).__init__(dim, bn, dropout, task, # depth, num_classes, # input_dim, time_step, mode) # # self.lstm1 = nn.LSTM( # input_size=self.input_dim, # hidden_size=self.dim, # num_layers=3, # bidirectional=False, # batch_first=True # ) # self.lstm4 = nn.LSTM( # input_size=self.dim, # hidden_size=self.input_dim, # bidirectional=False, # batch_first=True) # self.switch = switch # if self.switch == False: # self.softmax = nn.Softmax(dim=1) # self.lsoftmax = nn.LogSoftmax(dim=1) # self.att1 = nn.Linear(self.dim, self.dim) # attend to decoder # self.att2 = nn.Linear(self.dim, self.dim) # attend to predictor # # def encodeRegress(self, x, warm=False, conti=False): # bs = x.shape[0] # x = self.dropout(x) # latents, state1 = self.lstm1(x) # latents_to_pred = torch.mul(latents, self.att1(latents)) # regs, state2 = self.lstm2(latents_to_pred) # ht, ct = state2 # return latents, regs, ht, ct # # def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False): # # TODO: # bs = x.shape[0] # x = self.dropout(x) # latents, state1 = self.lstm1(x) # # # latents_to_pred = torch.mul(latents, self.att1(latents)) # regs, state2 = self.lstm2(latents) # ht, ct = state2 # return regs[:, -1, :].squeeze(1) # # def compute_nce(self, encode_samples, pred): # ''' # ----------------------------------------------------------------------------------- # --------------Calculate NCE loss-------------- # ----------------------------------------------------------------------------------- # ...argument: # ......encode_samples : ( time_step, batch_size, conv_sizes[-1] ) # ......pred : Wk[i]( C_t ) # ''' # nce = 0 # average over time_step and batch # self.batch_size = self.bs # for i in np.arange(0, self.time_step): # try: # total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # except IndexError: # print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape)) # raise AssertionError # # print(total) # correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), # torch.arange(0, self.batch_size).cuda())) # correct is a tensor # nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor # nce /= -1. * self.batch_size * self.time_step # accuracy = 1. * correct.item() / self.batch_size # # return nce, accuracy # # def encode(self, x): # bs = x.shape[0] # x = self.dropout(x) # x, (h, c) = self.lstm1(x) # return x, h, c # # def decode(self, x): # bs = x.shape[0] # x = self.dropout(x) # (h0, c0) = self.init_hidden(bs, self.input_dim) # x, _ = self.lstm4(x, (h0, c0)) # return x # # def forward(self, x): # # check shape # if len(x.shape) == 4: x = x.squeeze(1) # if x.shape[1] == 76: x = x.transpose(1, 2) # self.bs = x.shape[0] # # # randomly choose a time point # t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long() # # # encode the past and put into regressor # latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :]) # latent_preds = self.fcs( # self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode)) # # latent_future = [] # for i in range(1, self.time_step + 1): # _, h, c = self.encode(x[:, t + i, :]) # latent_future.append(self.fcs(c[-1])) # # latent_future = torch.stack(latent_future, 0) # # latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1) # latent_all_attention = torch.mul(latent_all, self.att2(latent_all)) # x_hat = self.decode(latent_all_attention) # nce, acc = self.compute_nce(latent_future, latent_preds) # # return x_hat, nce, acc class CDCK3_S(nn.Module): def __init__( self, embedded_features, gru_out, n_points=192, n_features=76, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], kernel_sizes=[(2, i) for i in [76, 32, 64, 64, 128, 256, 512, 1024, 512, 128, 64]], time_step=30): self.embedded_features = embedded_features self.gru_out = gru_out self.conv_sizes = conv_sizes self.time_step = time_step # kernel_sizes=get_kernel_sizes() #TODO super(CDCK3_S, self).__init__() self.n_features = n_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ).to(device) # self.decode_channels = self.channels[::-1] # self.decoder = nn.ModuleList( # [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] # ).to(device) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) self.gru = nn.GRU( self.embedded_features, gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.beforeNCE = None # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers).to(device) self.beforeNCE = True def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): return torch.zeros(1, batch_size, self.gru_out).to(device) def forward(self, x): batch_size = x.shape[0] # input shape: (N,C=1,n_points=192,n_features=76) if len(x.shape) == 4: x = x.squeeze(1) if x.shape[1] == 192: x = x.transpose(1, 2) for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) # output shape: (N, C=conv_sizes[-1], n_frames,1) # output shape: (N, C=conv_sizes[-1], n_frames,1) self.n_frames = x.shape[2] t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long() encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to( device) # e.g. size c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device) hidden = self.init_hidden(batch_size, use_gpu=True) init_hidden = hidden # reshape for gru x = x.view(batch_size, self.n_frames, self.conv_sizes[-1]) # output shape: (N, n_frames, conv_sizes[-1]) x = self.linear(x) # output shape: (N, n_frames, embedded_features) for i in np.arange(1, self.time_step + 1): hidden = init_hidden encode_samples[i - 1, :, :] = x[:, int(t_samples) + i, :] forward_seq = x[:, :int(t_samples) + 1, :] # ----->SHAPE: (N,t_samples+1,embedded_features) output, hidden = self.gru(forward_seq, hidden) c_t = output[:, -1, :].view(batch_size, self.gru_out) pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) if self.beforeNCE: # ADD FC layers pred = self.fcs(pred) encode_samples = self.fcs(encode_samples) # ----------------------------------------------------------------------------------- # --------------Calculate NCE loss------------------------------------------------ # ----------------------------------------------------------------------------------- nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch_size).to(device))) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * batch_size * self.time_step accuracy = 1. * correct.item() / batch_size return accuracy, nce, hidden def sub_forward(self, x): # input shape: (N,C=1,n_points=192,n_features=76) f = iter(self.convs) g = iter(self.bns) for i in range(len(self.conv_sizes)): x = next(f)(x) x = next(g)(x) x = nn.ReLU(inplace=True)(x) x = x.transpose(1, 3) return x def get_reg_out(self, x, every=False): batch_size = x.shape[0] # input shape: (N,C=1,n_points=192,n_features=76) if len(x.shape) == 4: x = x.squeeze(1) if x.shape[1] == 192: x = x.transpose(1, 2) for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) # zt # output shape: (N, C=conv_sizes[-1], n_frames,1) self.n_frames = x.shape[2] t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long() encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to( device) # e.g. size c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device) hidden = self.init_hidden(batch_size) init_hidden = hidden # reshape for gru x = x.view(batch_size, self.n_frames, self.conv_sizes[-1]) # output shape: (N, n_frames, conv_sizes[-1]) x = self.linear(x) # output shape: (N, n_frames, embedded_features) hidden = init_hidden output, hidden = self.gru(x, hidden) c_t = output[:, -1, :].view(batch_size, self.gru_out) return c_t class CDCK2(nn.Module): def __init__(self, time_step, batch_size, frame_size, fix_frame=True, n_frames=None, conv_sizes=[64, 128, 512, 128, 64, 32, 16], n_flat_features_per_frame=None, embedded_features=22, gru_out=32 ): """data should be formatted as Input: (batch size, n_frames, frame_size, features) *****If the frame_size and n_frames are identical for every batch, *****Please set fix_frame=True, and please provide n_frames :type conv_sizes: list """ super(CDCK2, self).__init__() self.beforeNCE = False self.frame_size = frame_size self.batch_size = batch_size self.time_step = time_step self.fix_frame = fix_frame self.n_frames = n_frames self.n_flat_features_per_frame = n_flat_features_per_frame self.embedded_features = embedded_features self.gru_out = gru_out if not self.fix_frame: self.encoder = nn.Sequential( nn.MaxPool2d(4, stride=1), nn.Conv2d(1, 4, kernel_size=2, stride=1, padding=1, bias=False), nn.BatchNorm2d(4), nn.ReLU(inplace=True), nn.MaxPool2d(3, stride=1), nn.Conv2d(4, 8, kernel_size=2, stride=4, padding=2, bias=False), nn.BatchNorm2d(8), nn.ReLU(inplace=True), nn.Conv2d(8, self.embedded_features, kernel_size=2, stride=2, padding=1, bias=False), nn.BatchNorm2d(self.embedded_features), nn.ReLU(inplace=True), nn.Flatten() ) if self.fix_frame: self.convs = nn.ModuleList([nn.Conv2d(self.n_frames, conv_sizes[0], kernel_size=2, stride=1, padding=2, bias=False, groups=self.n_frames)] + [ nn.Conv2d(i, j, kernel_size=2, stride=1, padding=2, bias=False, groups=self.n_frames) for i, j in zip(conv_sizes[:-1], conv_sizes[1:]) ] ) self.bns = nn.ModuleList( [nn.BatchNorm2d(i) for i in conv_sizes] ) self.maxpooling = nn.MaxPool2d(2, stride=1) self.ReLU = nn.ReLU(inplace=True) self.softmax = nn.Softmax() self.lsoftmax = nn.LogSoftmax() if n_flat_features_per_frame: self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features) self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False, batch_first=True).to(device) self.Wk = nn.ModuleList( [nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # initialize gru for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) def add_fcs(self, hidden=None): """ This function will add FC layers to the embedded features and then compare the features after FC transformations. See NOTION for illustration. :param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set as [n_embedded_features,n_embedded_features] :return: None """ n = self.embedded_features if hidden is None: self.fcs = nn.Sequential( nn.Linear(n, n), nn.ReLU(inplace=True), nn.Linear(n, n) ) else: if type(hidden) != list: hidden = list(hidden) layers = [] for i, j in zip([n] + hidden, hidden + [n]): layers.append(nn.Linear(i, j)) layers.append(nn.ReLU(inplace=True)) layers.pop() # We do not want Relu at the last layer self.fcs = nn.Sequential(*layers) self.beforeNCE = True def update_flat_features(self, n_flat_features_per_frame): self.n_flat_features_per_frame = n_flat_features_per_frame self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features).to(device) self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False, batch_first=True).to( device) self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to( device) # initialize gru for layer_p in self.gru._all_weights: for p in layer_p: if 'weight' in p: nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu') self.apply(self._weights_init) def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def init_hidden(self, batch_size, use_gpu=True): if self.fix_frame: if use_gpu: return torch.zeros(1, batch_size, self.gru_out).to(device) else: return torch.zeros(1, batch_size, self.gru_out) if not self.fix_frame: if use_gpu: return torch.zeros(1, 1, self.gru_out).to(device) else: return torch.zeros(1, 1, self.gru_out) def forward(self, x): # Convert into frames # shape of x:(N,1,n_points,features) x, frame_ends = makeFrameDimension(x, self.frame_size, self.n_frames) # shape of x:(batch_size,n_frames,frame_size, n_features) # shape of x:(N,n_frames,points_per_frame,features) batch_size = x.shape[0] # !warning!!!!! The last batch in the dataset may have batch_size < self.batch_size. # !!!!!!!!!!!!!! So cannot use self.batch_size here self.n_frames = x.shape[1] # ----------------------------------------------------------------------------------- # --------------Pick a random time point------------------------------------------------ # ----------------------------------------------------------------------------------- if not self.fix_frame: t_samples = torch.empty((batch_size, 1)) for i in range(batch_size): try: t_samples[i] = torch.randint(int((frame_ends[i] - self.time_step - 1).item()), size=(1,)).long() # randomly pick time stamps except RuntimeError: # some patients have very few frames so we have to choose the first frame to start frame_ends[i] = self.time_step + 3 t_samples[i] = 1 if self.fix_frame: t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long() # ----------------------------------------------------------------------------------- # --------------DO THE EMBEDDING------------------------------------------------ # ------------------------------------------------------------------------------------ if not self.fix_frame: z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device) for i in range(self.n_frames): y = (x[:, i, :, :].unsqueeze(1)).clone().to(device) y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame) # calculate n_flat_features_per_frame if it is unkown if self.n_flat_features_per_frame == None: self.n_flat_features_per_frame = y.shape[1] logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame) return self.n_flat_features_per_frame y = self.linear(y) # ----->SHAPE: (N,embedded_features) z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features) del x, y if self.fix_frame: # x:(8,24,8,76) (N,n_frames,points_per_frame,features) f = iter(self.convs) g = iter(self.bns) for i in range(len(self.convs)): x = next(f)(x) try: x = nn.MaxPool2d(2, stride=2)(x) except RuntimeError: pass x = next(g)(x) x = self.ReLU(x) x = nn.Flatten(start_dim=2, end_dim=-1)(x) z = x del x # z: (8,144) (N,flat_features) # calculate n_flat_features_per_frame if it is unkown if self.n_flat_features_per_frame == None: self.n_flat_features_per_frame = int(z.shape[2] * z.shape[1] / self.n_frames) logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame) return self.n_flat_features_per_frame z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame) # ---->SHAPE: (N,n_frames,n_flat_features_per_frame) z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features) encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to( device) # e.g. size # ----->SHAPE: (T,N,embedded_features) c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device) # output of GRU,------>SHAPE:(N, n_gru_out) # ----------------------------------------------------------------------------------- # --------------GET GRU OUTPUT------------------------------------------------ # ----------------------------------------------------------------------------------- forward_seq = [] hidden = self.init_hidden(len(z), use_gpu=True) init_hidden = hidden if not self.fix_frame: for j in range(batch_size): hidden = init_hidden t = t_samples[j] for i in np.arange(1, self.time_step + 1): encode_samples[i - 1][j] = z[j, int(t_samples[j].item()) + i, :] forward_seq.append(z[j, :int(t_samples[j].item()) + 1, :]) output, hidden = self.gru(forward_seq[j].unsqueeze(0), hidden) c_t[j] = output[:, -1, :].view(1, self.gru_out) if self.fix_frame: for i in np.arange(1, self.time_step + 1): hidden = init_hidden encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :] forward_seq = z[:, :int(t_samples) + 1, :] # ----->SHAPE: (N,t_samples+1,embedded_features) output, hidden = self.gru(forward_seq, hidden) c_t = output[:, -1, :].view(batch_size, self.gru_out) pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device) for i in np.arange(0, self.time_step): linear = self.Wk[i] pred[i] = linear(c_t) if self.beforeNCE: # ADD FC layers pred = self.fcs(pred) encode_samples = self.fcs(encode_samples) # ----------------------------------------------------------------------------------- # --------------Calculate NCE loss------------------------------------------------ # ----------------------------------------------------------------------------------- nce = 0 # average over time_step and batch for i in np.arange(0, self.time_step): total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8 # print(total) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch_size).to(device))) # correct is a tensor nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor nce /= -1. * batch_size * self.time_step accuracy = 1. * correct.item() / batch_size return accuracy, nce, hidden def get_reg_out(self, x, every=False): """ Get the output of the regression model (GRU). batch_size could be different from the batch_size used in training process This function is only applicable for the case in which the samples share the same length, which means that the self.fix_frame=True """ x, _ = makeFrameDimension(x, self.frame_size, x.shape[1]) self.n_frames = x.shape[1] batch_size = x.size()[0] if self.fix_frame: f = iter(self.convs) g = iter(self.bns) for i in range(len(self.convs)): x = next(f)(x) try: x = nn.MaxPool2d(2, stride=2)(x) except RuntimeError: pass x = next(g)(x) x = self.ReLU(x) x = nn.Flatten(start_dim=2, end_dim=-1)(x) z = x # self.n_flat_features_per_frame=z.shape[1]/self.n_frames z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame) # ---->SHAPE: (N,n_frames,embedded_features) z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features) hidden = self.init_hidden(batch_size) output, hidden = self.gru(z, hidden) # output size e.g. 8*128*256 # ---->SHAPE: (N,n_frames,n_gru_out) else: z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device) for i in range(self.n_frames): y = (x[:, i, :, :].unsqueeze(1)).clone().to(device) y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame) # calculate n_flat_features_per_frame if it is unkown if self.n_flat_features_per_frame == None: self.n_flat_features_per_frame = y.shape[1] logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame) return self.n_flat_features_per_frame y = self.linear(y) # ----->SHAPE: (N,embedded_features) z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features) del x, y c = torch.zeros(size=(batch_size, self.n_frames, self.gru_out)).float().to(device) for j in range(batch_size): hidden = self.init_hidden(batch_size) output, hidden = self.gru(z[j, :, :].unsqueeze(0), hidden) c[j, :, :] = output[:, :, :].view(1, self.n_frames, self.gru_out) output = c if every: return output # return output from gru of every frame # ---->SHAPE: (N,n_frames,n_gru_out) else: return output[:, -1, :] # only return the last output # ---->SHAPE: (N,n_gru_out) def get_latent(self, x, every=True): """ Get the latent vectors of each frame """ batch_size = x.size()[0] x, _ = makeFrameDimension(x, self.frame_size, x.shape[1]) z = self.encoder(x) self.n_flat_features_per_frame = z.shape[1] / self.n_frames z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame) return z class AE1(nn.Module): """ trivial autoencoder """ def __init__( self, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], ): super(AE1, self).__init__() self.conv_sizes = conv_sizes encodelist = [] enChannels = [1] + conv_sizes count = 0 for i in range(len(enChannels) - 1): encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2)) encodelist.append(nn.BatchNorm2d(enChannels[i + 1])) encodelist.append(nn.ReLU(inplace=True)) # if count < 2: # encodelist.append(nn.MaxPool2d(2,stride=1)) count += 1 deChannels = enChannels[::-1] decodelist = [] for i in range(len(enChannels) - 1): # if count >= len(enChannels) - 3: # decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3)) # else: decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2)) decodelist.append(nn.BatchNorm2d(deChannels[i + 1])) decodelist.append(nn.ReLU(inplace=True)) count += 1 self.encoder = nn.Sequential(*encodelist) self.decoder = nn.Sequential(*decodelist) def forward(self, x): y = x if len(x.shape) == 3: x.unsqueeze(1) x = self.encoder(x) # print(x.shape) torch.cuda.empty_cache() x = self.decoder(x) torch.cuda.empty_cache() # print(x.shape) if len(x.shape) == 4: x.squeeze(1) loss = nn.MSELoss(reduction='mean')(x, y) torch.cuda.empty_cache() return -1, loss, x # make sure it is consistent with other models training function class AE2_S(nn.Module): """ Auto encoder, only move via time direction. Same design in CPAE1 """ def __init__( self, embedded_features, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], n_points=192, n_features=76, ): self.conv_sizes = conv_sizes super(AE2_S, self).__init__() self.embedded_features = embedded_features # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ) self.decode_channels = self.channels[::-1] self.decoder = nn.ModuleList( [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] ) self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features) self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1]) def forward(self, x): # input (batch,192,76) if len(x.shape) == 4: x = x.squeeze(1) y = x x = x.transpose(1, 2) # (b,76,192) x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1]) x = self.linear(x) # (batch, time,embedded_features) x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2) x = nn.ReLU(inplace=True).to(device)(x) x = self.delinear(x) # (batch, time, conv[-1]) x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2) x = nn.ReLU(inplace=True).to(device)(x) x = self.decode(x.transpose(1, 2)) # (batch,76,192) x = x.transpose(1, 2) loss = nn.MSELoss(reduction='mean')(x, y) return -1, loss, x def encode(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) return x # output shape: (N,n_features=8,n_points=192) def decode(self, x): for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192) x = self.decoder[i](x) return x def get_encode(self, x): if len(x.shape) == 4: x = x.squeeze(1) x = x.transpose(1, 2) x = self.encode(x).transpose(1, 2) x = nn.Flatten()(x) return x # output shape: (N,192*12) class CAE1(AE1): """ Contrastive Auto-encoder based on AE1 """ def __init__(self): super(CAE1, self).__init__() self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) def forward(self, x): # get batch size bs = x.shape[0] y = x _, _, x = super().forward(x) loss, acc = self.compute_nce(x, y) del y return acc, loss, x def compute_nce(self, x_hat, x): bs = x.shape[0] assert x.shape == x_hat.shape nce = 0 x = x.view(bs, -1) x_hat = x_hat.view(bs, -1) total = torch.mm(x_hat, x.T) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, bs).cuda())) nce = torch.sum(torch.diag(self.lsoftmax(total))) nce /= -1. * bs acc = 1. * correct.item() / bs torch.cuda.empty_cache() del x, x_hat return nce, acc class CAE11(nn.Module): def __init__( self, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], ): super(CAE11, self).__init__() self.conv_sizes = conv_sizes encodelist = [] enChannels = [1] + conv_sizes count = 0 for i in range(len(enChannels) - 1): encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2)) encodelist.append(nn.BatchNorm2d(enChannels[i + 1])) encodelist.append(nn.ReLU(inplace=True)) # if count < 2: # encodelist.append(nn.MaxPool2d(2,stride=1)) count += 1 deChannels = enChannels[::-1] decodelist = [] for i in range(len(enChannels) - 1): # if count >= len(enChannels) - 3: # decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3)) # else: decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2)) decodelist.append(nn.BatchNorm2d(deChannels[i + 1])) decodelist.append(nn.ReLU(inplace=True)) count += 1 self.encoder = nn.Sequential(*encodelist) self.decoder = nn.Sequential(*decodelist) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) def forward(self, x): y = x if len(x.shape) == 3: x.unsqueeze(1) x = self.encoder(x) # print(x.shape) torch.cuda.empty_cache() x = self.decoder(x) torch.cuda.empty_cache() # print(x.shape) if len(x.shape) == 4: x.squeeze(1) torch.cuda.empty_cache() loss, acc = self.compute_nce(x, y) del y return acc, loss, x def compute_nce(self, x_hat, x): bs = x.shape[0] assert x.shape == x_hat.shape nce = 0 x = x.view(bs, -1) x_hat = x_hat.view(bs, -1) total = torch.mm(x_hat, x.T) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, bs).cuda())) nce = torch.sum(torch.diag(self.lsoftmax(total))) nce /= -1. * bs acc = 1. * correct.item() / bs torch.cuda.empty_cache() del x, x_hat return nce, acc class CAE2_S(AE2_S): """ Contrastive auto-encoder based on AE2 """ def __init__( self, embedded_features, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], n_points=192, n_features=76, ): self.conv_sizes = conv_sizes self.embedded_features = embedded_features super(CAE2_S, self).__init__(self.embedded_features, self.conv_sizes) # # . If is int, uses the same padding in all boundaries. # # If a 4-tuple, uses (left ,right ,top ,bottom ) # self.channels = [n_features] + conv_sizes # # # the core part of model list # self.sequential = lambda inChannel, outChannel: nn.Sequential( # nn.ReflectionPad1d((0, 1)), # nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), # nn.BatchNorm1d(outChannel), # nn.ReLU(inplace=True) # ) # # # ** minded the length should be 1 element shorter than # of channels # self.encoder = nn.ModuleList( # [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] # ) # # self.decode_channels = self.channels[::-1] # self.decoder = nn.ModuleList( # [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))] # ) # self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features) # self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) y = x x = x.transpose(1, 2) # (b,76,192) x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1]) x = self.linear(x) # (batch, time,embedded_features) x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2) x = nn.ReLU(inplace=True).to(device)(x) x = self.delinear(x) # (batch, time, conv[-1]) x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2) x = nn.ReLU(inplace=True).to(device)(x) x = self.decode(x.transpose(1, 2)) # (batch,76,192) x = x.transpose(1, 2) loss, acc = self.compute_nce(x, y) # TODO: return acc, loss, x def compute_nce(self, x_hat, x): bs = x.shape[0] assert x.shape == x_hat.shape nce = 0 x = x.view(bs, -1) x_hat = x_hat.reshape(bs, -1) total = torch.mm(x_hat, x.T) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, bs).cuda())) nce = torch.sum(torch.diag(self.lsoftmax(total))) nce /= -1. * bs acc = 1. * correct.item() / bs return nce, acc class Basic_Cnn(nn.Module): def __init__(self, seed, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], n_features=76, out=2): random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) super(Basic_Cnn, self).__init__() torch.manual_seed(seed) # . If is int, uses the same padding in all boundaries. # If a 4-tuple, uses (left ,right ,top ,bottom ) self.out = out self.channels = [n_features] + conv_sizes # the core part of model list self.sequential = lambda inChannel, outChannel: nn.Sequential( nn.ReflectionPad1d((0, 1)), nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0), nn.BatchNorm1d(outChannel), nn.ReLU(inplace=True) ) # ** minded the length should be 1 element shorter than # of channels self.encoder = nn.ModuleList( [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))] ) self.fc = nn.Sequential( nn.Linear(self.channels[-1], 1024), nn.ReLU(inplace=True), nn.Linear(1024, self.out), nn.LogSoftmax(dim=1) ) # dim = 1 !!! self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) # input shape: (N,C=1,n_points=192,n_features=76) # output shape: (N, C=sizes[-1], ) self.apply(self._weights_init) # def relevant_points(n): def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192) x = self.encoder[i](x) # ouput shape: (N,8,192) y = self.fc(x[:, :, -1]) return y def train(args, model, device, train_loader, optimizer, epoch, batch_size, lr=None): # turn on the training mode model.train() logger = logging.getLogger("cpc") if 'CPAE' not in args['model_type'] or 'CPAE4' in args['model_type'] or ( 'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type'] or 'CPAELSTM42' in args['model_type']: for batch_idx, sample in enumerate(train_loader): if sample == 1: continue sigs, labels = zip(*sample) sigs = torch.stack(sigs) labels = torch.stack(labels) data = sigs.float().unsqueeze(1).to(device) # add channel dimension data.requires_grad = True optimizer.zero_grad() # If n_flat_features_per_frame is not provided, then the forward() of the above sentence will return # n_flat_features_per_frame and the below sentence will raise TypeError. # Then get the n_flat_features_per_frame and update this to the model # DO the forward again result = model(data) try: acc, loss, hidden = result except TypeError: n_flat_features_per_frame = result return result loss.backward() optimizer.step() if lr is None: lr = optimizer.update_learning_rate() # See optimizer.py # print(lr) if batch_idx % args['log_interval'] == 0: logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tlr:{:.5f}\tAccuracy: {:.4f}\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), lr, acc, loss.item())) del sigs, labels, sample, data, hidden, acc, loss torch.cuda.empty_cache() elif 'CPAE' in args['model_type']: model.train() logger.info('\n --------------------------- epoch {} ------------------------- \n'.format(epoch)) if args.get('lambda'): logger.info('weights are %s' % args['lambda']) local_loss = [] for ii, batch in enumerate(train_loader): if batch == 1: continue X, y = zip(*batch) X = torch.stack(X).to(device) X.requires_grad = True # y = torch.tensor(y).long().to('cuda') # y is not used here in autoencoder optimizer.zero_grad() D, nce, accuracy = model(X) # decoded l = args.get('Lambda') if l: loss = Chimera_loss(D, X, nce, l) else: loss = Chimera_loss(D, X, nce) loss.backward() optimizer.step() local_loss.append(loss.item()) if ii % 100 == 0: # verbose new_lr = optimizer.update_learning_rate() logger.info('\t {:.5f} {:.5f}'.format(loss.item(), new_lr)) del X, y, batch, D, nce, accuracy, loss, ii torch.cuda.empty_cache() logger.info('\n ---------------------- mean loss : {:.5f} ---------------------- \n'.format( np.mean(local_loss))) torch.cuda.empty_cache() torch.cuda.empty_cache() def validation(model, args, device, validation_loader): logger = logging.getLogger("cpc") logger.info("Starting Validation") if 'CPAE' not in args['model_type'] or 'CPAELSTM42' in args['model_type'] or ('CPAE4' in args['model_type']) or ( 'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type']: model.eval() total_loss = 0 total_acc = 0 with torch.no_grad(): for _, sample in enumerate(validation_loader): if sample == 1: continue sigs, _ = zip(*sample) sigs = torch.stack(sigs) data = sigs.float().unsqueeze(1).to(device) acc, loss, hidden = model(data) total_loss += len(data) * loss total_acc += len(data) * acc torch.cuda.empty_cache() del sigs, sample return total_acc, total_loss else: model.eval() loss_ls = [] total_loss = 0 total_acc = 0 for ii, batch in enumerate(validation_loader): if batch == 1: continue X, y = zip(*batch) X = torch.stack(X).to('cuda') D, nce, accuracy = model(X) # decoded if args.get('lambda'): total_loss += Chimera_loss(D, X, nce, args['lambda']).detach().cpu().numpy() else: total_loss += Chimera_loss(D, X, nce).detach().cpu().numpy() loss_ls.append(record_loss(D, X, nce)) total_acc += len(X) * accuracy torch.cuda.empty_cache() del X, y, batch, D, nce, accuracy loss_ls = np.stack(loss_ls) logger.info('\n ------- validation ------- \n'.format(ii)) logger.info('\t NCE \t MSE \t MASK MSE \t MAPPING MSE') logger.info('\t {:.4f} \t {:.4f} \t {:.4f} \t {:.4f}'.format(*np.mean(loss_ls, axis=0))) return total_acc, total_loss def define_model(args_json, Model, train_loader): model_args = filter_args(args_json, Model) model = Model(**model_args) optimizer = eval(args_json['optimizer']) if args_json.get('n_flat_features_per_frame') is None and Model == CDCK2: args_json['n_flat_features_per_frame'] = train(args_json, model, device, train_loader, optimizer, 2, args_json['batch_size']) del model model_args = filter_args(args_json, Model) model = Model(**model_args) model.update_flat_features(args_json['n_flat_features_per_frame']) if args_json.get('fcs') is not None: model.add_fcs(args_json['fcs']) # add fc layers if required return model.to(device), optimizer def save_intermediate(Model, args_json, device): setting_name = get_setting_name(args_json['model_best']) logging_dir = args_json['logging_dir'] checkpoint_path = os.path.join( args_json['top_path'], 'logs/cpc/', args_json['model_type'], args_json['model_best'] ) checkpoint = torch.load(checkpoint_path, map_location='cpu') print('Starting to generate intermediate data\n') train_loader, validation_loader, test_loader = split_Structure_Inhospital( args_json, percentage=1) # BUG every data sample is the same!!! model, optimizer = define_model(args_json, Model, train_loader) model.load_state_dict(checkpoint['state_dict']) model = model.to(device) context_train = [] context_val = [] context_test = [] y_train = [] y_test = [] y_val = [] model.eval() with torch.no_grad(): for _, sample in enumerate(train_loader): if sample == 1: break x, y = zip(*sample) out = model.get_reg_out( ( torch.stack(x).float().unsqueeze(1).to(device) ) ).cpu() context_train.append(out) torch.cuda.empty_cache() y_train.append((torch.stack(y))) del sample, x, y, out context_train = torch.cat(context_train).cpu().numpy() y_train = torch.cat(y_train).cpu().numpy() np.save(os.path.join(logging_dir, setting_name + '-x_train'), context_train) np.save(os.path.join(logging_dir, setting_name + '-y_train'), y_train) print('Getting training intermediate vectors done. saved in %s' % logging_dir) torch.cuda.empty_cache() del context_train, y_train for _, sample in enumerate(validation_loader): if sample == 1: break x, y = zip(*sample) context_val.append(model.get_reg_out( ( torch.stack( x ).float().unsqueeze(1).to(device) ) ) ) y_val.append((torch.stack(y))) del sample, x, y context_val = torch.cat(context_val).cpu().numpy() y_val = torch.cat(y_val).cpu().numpy() np.save(os.path.join(logging_dir, setting_name + '-x_val'), context_val) np.save(os.path.join(logging_dir, setting_name + '-y_val'), y_val) print('Getting validation intermediate vectors done. saved in %s' % logging_dir) torch.cuda.empty_cache() del context_val, y_val for _, sample in enumerate(test_loader): if sample == 1: break x, y = zip(*sample) context_test.append(model.get_reg_out( ( torch.stack( x ).float().unsqueeze(1).to(device) ) ) ) y_test.append((torch.stack(y))) del sample, x, y context_test = torch.cat(context_test).cpu().numpy() y_test = torch.cat(y_test).cpu().numpy() np.save(os.path.join(logging_dir, setting_name + '-x_test'), context_test) np.save(os.path.join(logging_dir, setting_name + '-y_test'), y_test) print('Getting test intermediate vectors done. saved in %s' % logging_dir) torch.cuda.empty_cache() del context_test, y_test def snapshot(dir_path, run_name, state): snapshot_file = os.path.join(dir_path, run_name + '-model_best.pth') # torch.save can save any object # dict type object in our cases torch.save(state, snapshot_file) logger.info("Snapshot saved to {}\n".format(snapshot_file)) def my_collate(batch): """Add paddings to samples in one batch to make sure that they have the same length. Args: Input: Output: data(tensor): a batch of data of patients with the same length labels(tensor): the labels of the data in this batch durations(tensor): the original lengths of the patients in the batch Shape: Input: Output: data: (batch_size,length,num_features) labels: (batch_size,) durations:(batch_size,) """ if len(batch) == 1: return 1 # if batch size=1, it should be the last batch. we cannot compute the nce loss, so ignore this batch. if len(batch) > 1: data = [] labels = [] durations = [] batch = sorted(batch, key=lambda x: x['duration'], reverse=True) for sample in batch: data.append(sample['patient']) labels.append(sample['death']) durations.append(sample['duration']) max_len, n_feats = data[0].shape data = [np.array(s, dtype=float) for s in data] data = [torch.from_numpy(s).float() for s in data] labels = [label for label in labels] durations = [duration for duration in durations] data = [torch.cat((s, torch.zeros(max_len - s.shape[0], n_feats)), 0) if s.shape[0] != max_len else s for s in data] data = torch.stack(data, 0) # shape:[24,2844,462] labels = torch.stack(labels, 0) durations = torch.stack(durations, 0) # max:2844 return data, labels, durations class MLP(nn.Module): def __init__(self, hidden_sizes, seed, in_features=8, out=2, dropout=True): torch.manual_seed(seed) super(MLP, self).__init__() hidden_sizes = [in_features] + hidden_sizes + [out] l = [] torch.manual_seed(seed) fcs = [nn.Linear(i, j, bias=True) for i, j in zip(hidden_sizes[:-1], hidden_sizes[1:])] relu = nn.ReLU(inplace=True) drop = nn.Dropout(p=0.2) torch.manual_seed(seed) bns = [nn.BatchNorm1d(i) for i in hidden_sizes[1:]] # apply(_weights_init) for i in range(len(hidden_sizes) - 1): l.append(fcs[i]) if i != len(hidden_sizes) - 2: l.append(relu) l.append(bns[i]) if dropout: l.append(drop) self.mymodules = nn.Sequential(*l) for model in self.mymodules: self.initialize_weights(model) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) def forward(self, x): # print(x.shape) if len(x.shape) == 4: x = x.squeeze(1) # fastai has a strange issue here. x = self.mymodules(x) # print (x) # print(x.shape) return x def _weights_init(self, m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def valid(self, data_loader, iterations='all', metrics=None): if metrics == None: metrics = self.metrics loss = [None] * len(metrics) overall_loss = [] self.model.eval() with torch.no_grad(): for i, batch in enumerate(data_loader): if iterations != 'all': if i >= iterations: return overall_loss ct, y = zip(*batch) ct = torch.stack(ct).squeeze(1).to(device) y = torch.stack(y).cpu() pred = self.model(ct).cpu() # forward for i, metric in enumerate(metrics): loss[i] = metric(pred, y) # loss overall_loss.append((loss)) del loss, ct, y, pred return overall_loss class LR(nn.Module): def __init__(self, seed, in_features=8, out=2): random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) super(LR, self).__init__() torch.manual_seed(seed) self.linear = nn.Linear(in_features, out) def forward(self, x): return F.log_softmax(self.linear(x), dim=1) def load_intermediate(top_path, setting_name, model_type): middata_dir = os.path.join(top_path, 'logs', 'imp', model_type) x_train = np.load(os.path.join(middata_dir, setting_name + '-x_train.npy')) y_train = np.load(os.path.join(middata_dir, setting_name + '-y_train.npy')) x_val = np.load(os.path.join(middata_dir, setting_name + '-x_val.npy')) y_val = np.load(os.path.join(middata_dir, setting_name + '-y_val.npy')) x_test = np.load(os.path.join(middata_dir, setting_name + '-x_test.npy')) y_test = np.load(os.path.join(middata_dir, setting_name + '-y_test.npy')) return { 'x_train': x_train, 'y_train': y_train, 'x_val': x_val, 'y_val': y_val, 'x_test': x_test, 'y_test': y_test } def tabular_frame(args_json): data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type']) x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \ data_intermediate['x_val'], data_intermediate['y_val'], \ data_intermediate['x_test'], data_intermediate['y_test'] train_df = pd.DataFrame(np.hstack((x_train, y_train)), columns=list(range(8)) + ['y']) val_df = pd.DataFrame(np.hstack((x_val, y_val)), columns=list(range(8)) + ['y']) test_df = pd.DataFrame(np.hstack((x_test, y_test)), columns=list(range(8)) + ['y']) return train_df, val_df, test_df def dataset_intermediate(args_json): data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type']) x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \ data_intermediate['x_val'], data_intermediate['y_val'], \ data_intermediate['x_test'], data_intermediate['y_test'] train_set, val_set, test_set = TrivialDataset(x_train, y_train), \ TrivialDataset(x_val, y_val), \ TrivialDataset(x_test, y_test) return train_set, val_set, test_set def data_loader_intermediate(args_json): data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type']) x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \ data_intermediate['x_val'], data_intermediate['y_val'], \ data_intermediate['x_test'], data_intermediate['y_test'] train_set, val_set, test_set = TrivialDataset(x_train, y_train), \ TrivialDataset(x_val, y_val), \ TrivialDataset(x_test, y_test) train_loader, val_loader, test_loader = DataLoader(train_set, shuffle=True, batch_size=args_json['batch_size'], collate_fn=my_collate_fix, num_workers=args_json['num_workers']), \ DataLoader(val_set, batch_size=args_json['batch_size'], shuffle=True, collate_fn=my_collate_fix, num_workers=args_json['num_workers']), \ DataLoader(test_set, shuffle=False, batch_size=args_json['batch_size'], collate_fn=my_collate_fix, num_workers=args_json['num_workers']) return train_loader, val_loader, test_loader def binary_acc(y_pred, y_test): y_pred_tag = torch.round(torch.sigmoid(y_pred)) correct_results_sum = (y_pred_tag == y_test).sum().float() acc = correct_results_sum / y_test.shape[0] acc = torch.round(acc * 100) return acc def fastai_dl(train_set, val_set, test_set, device, batch_size=64, num_workers=24): # fastai dataloader return tabular.DataBunch.create(train_ds=train_set, valid_ds=val_set, test_ds=test_set, bs=batch_size, num_workers=num_workers, device=device, ) def train_mlp(model, train_loader, val_loader, epoch, lr, optimizer): lossfn = nn.CrossEntropyLoss() for epoch in range(epoch): train_loss = [] train_acc = [] val_loss = [] val_acc = [] model.train() for i, batch in enumerate(train_loader): ct, y = zip(*batch) ct = torch.stack(ct).squeeze(1).to(device) y = torch.stack(y).to(device) # ---------- train mlp --------- optimizer.zero_grad() pred = model(ct) # forward loss = lossfn(pred, y) # loss acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100 train_acc.append(acc) loss.backward() # compute loss optimizer.step() # update torch.cuda.empty_cache() train_loss.append(loss.item()) del pred, loss, acc, ct, y model.eval() with torch.no_grad(): for i, batch in enumerate(val_loader): ct, y = zip(*batch) ct = torch.stack(ct).squeeze(1).to(device) y = torch.stack(y).to(device) # ---------- validation predicted by mlp --------- pred = model(ct) # forward loss = lossfn(pred, y) # loss acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100 val_acc.append(acc) val_loss.append(loss.item()) torch.cuda.empty_cache() del pred, loss, acc, ct, y # print out statistics verbose(epoch, train_loss, train_acc, val_loss, val_acc) class Basic_LSTM(nn.Module): def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False): self.out = 2 if task in ['ihm', 'dd'] else 10 super(Basic_LSTM, self).__init__() self.lstm1 = nn.LSTM( input_size=input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) self.fc = nn.Sequential( nn.Linear(dim, 1024), nn.ReLU(inplace=True), nn.Linear(1024, self.out), nn.LogSoftmax(dim=1) ) for model in [self.lstm1, self.fc]: self.initialize_weights(model) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def forward(self, x): xt, state1 = self.lstm1(x) y = self.fc(xt[:, -1, :]) return y class AE_LSTM(nn.Module): """ CPLSTM4------use lstm as Wk mode=1 use hidden states when predict. else use cell states """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False): self.dim = dim # hidden dimension self.bn = bn self.drop = dropout self.task = task self.depth = depth self.time_step = time_step self.num_classes = num_classes self.input_dim = input_dim self.mode = mode self.noct = noct super(AE_LSTM, self).__init__() # encoder self.lstm1 = nn.LSTM( input_size=self.input_dim, hidden_size=dim, bidirectional=False, batch_first=True ) # decoder # minded that hidden_size is different self.lstm2 = nn.LSTM( input_size=dim, hidden_size=self.input_dim, bidirectional=False, batch_first=True ) # not used if self.noct: self.stack_dim = self.dim * 192 else: self.stack_dim = self.dim * 193 self.dropout = nn.Dropout(self.drop) # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)]) self.softmax = nn.Softmax(dim=0) self.lsoftmax = nn.LogSoftmax(dim=0) for model in [self.lstm1, self.lstm2]: self.initialize_weights(model) def init_hidden(self, bs, dim): cell_states = torch.zeros(1, bs, dim).to(device) hidden_states = torch.zeros(1, bs, dim).to(device) return (hidden_states, cell_states) def initialize_weights(self, model): if type(model) in [nn.Linear]: nn.init.xavier_uniform_(model.weight) nn.init.zeros_(model.bias) elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]: nn.init.orthogonal_(model.weight_hh_l0) nn.init.xavier_uniform_(model.weight_ih_l0) nn.init.zeros_(model.bias_hh_l0) nn.init.zeros_(model.bias_ih_l0) def get_reg_out(self, x, stack=False, warm=False, conti=False): # check input shape if len(x.shape) == 4: x = x.squeeze(1) if x.shape[1] == 76: x = x.transpose(1, 2) xt, (ht, ct) = self.lstm1(x) if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1))) if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)) return xt[:, -1, :].squeeze(1) def get_encode(self, x): if len(x.shape) == 4: x = x.squeeze(1) if x.shape[1] == 76: x = x.transpose(1, 2) x,_,_ = self.lstm1(x) x = nn.Flatten()(x) return x def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) self.bs = x.shape[0] x_t, state1 = self.lstm1(x) # encoder part : zeros init x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init loss = nn.MSELoss(reduction='mean')(x, x_hat) return -1, loss, x # make sure it is consistent with other models training function class CAE_LSTM(AE_LSTM): """ constrastive auto-encoder with LSTM backbone """ def __init__(self, dim, bn, dropout, task, depth=2, num_classes=1, input_dim=76, time_step=5, mode=1, noct=False): super(CAE_LSTM, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode, noct) # get reg out is also the same as Basic LSTM_AE def forward(self, x): if len(x.shape) == 4: x = x.squeeze(1) # print('shape of x is ' ,x.shape) if x.shape[1] == 76: x = x.transpose(1, 2) self.bs = x.shape[0] x_t, state1 = self.lstm1(x) # encoder part : zeros init x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init loss, acc = self.compute_nce(x_hat, x) return acc, loss, x def compute_nce(self, x_hat, x): bs = x.shape[0] assert x.shape == x_hat.shape nce = 0 x = x.view(bs, -1) x_hat = x_hat.reshape(bs, -1) total = torch.mm(x_hat, x.T) correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, bs).cuda())) nce = torch.sum(torch.diag(self.lsoftmax(total))) nce /= -1. * bs acc = 1. * correct.item() / bs return nce, acc
[ "anonymousparti28@gmail.com" ]
anonymousparti28@gmail.com
428efb464a06b53657e381f76bf9e07b3382ba40
6fddeb3fb4be07e4c1063a0c49d1f25606fa78c2
/WebProject1/primeiroPrograma.py
a6f6ef9bb480d32170e34644221afe8968666b18
[]
no_license
RuanNunes/Logica-de-Programa-o-com-Python
d3f663881c2e51888608d9cf5f51c0956cdd10dd
18b2b41e485d3e58ce2d5cf923e389cd146d7f18
refs/heads/master
2020-03-17T16:27:40.661667
2018-06-03T20:06:29
2018-06-03T20:06:29
133,749,488
0
0
null
null
null
null
UTF-8
Python
false
false
223
py
class primeiroPrograma(object): print('Meu Primeiro Programa') num1 = int(input('Digite Um Numero:')) num2 = int(input('Digite o Segundo Numero:')) print('A soma dos numeros é:', num1 + num2) pass
[ "ruan.nunes@el.com.br" ]
ruan.nunes@el.com.br
9a0f0433298aaf2b0b0aa33f5a64b0273f639e93
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
/eXe/rev2669-2722/left-trunk-2722/twisted/internet/iocpreactor/udp.py
3bf7a5bba392de8252482bdf0e1ba0600cfe27fa
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
joliebig/featurehouse_fstmerge_examples
af1b963537839d13e834f829cf51f8ad5e6ffe76
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
refs/heads/master
2016-09-05T10:24:50.974902
2013-03-28T16:28:47
2013-03-28T16:28:47
9,080,611
3
2
null
null
null
null
UTF-8
Python
false
false
6,644
py
import socket from twisted.internet import interfaces, defer, error, protocol, address from twisted.internet.abstract import isIPAddress from twisted.persisted import styles from twisted.python import log, failure, reflect from ops import ReadFileOp, WriteFileOp, WSARecvFromOp, WSASendToOp from util import StateEventMachineType from zope.interface import implements ERROR_PORT_UNREACHABLE = 1234 class Port(log.Logger, styles.Ephemeral, object): __metaclass__ = StateEventMachineType implements(interfaces.IUDPTransport) events = ["startListening", "stopListening", "write", "readDone", "readErr", "writeDone", "writeErr", "connect"] sockinfo = (socket.AF_INET, socket.SOCK_DGRAM, 0) read_op_class = WSARecvFromOp write_op_class = WSASendToOp reading = False _realPortNumber = None disconnected = property(lambda self: self.state == "disconnected") def __init__(self, bindAddress, proto, maxPacketSize=8192): assert isinstance(proto, protocol.DatagramProtocol) self.state = "disconnected" from twisted.internet import reactor self.bindAddress = bindAddress self._connectedAddr = None self.protocol = proto self.maxPacketSize = maxPacketSize self.logstr = reflect.qual(self.protocol.__class__) + " (UDP)" self.read_op = self.read_op_class(self) self.readbuf = reactor.AllocateReadBuffer(maxPacketSize) self.reactor = reactor def __repr__(self): if self._realPortNumber is not None: return "<%s on %s>" % (self.protocol.__class__, self._realPortNumber) else: return "<%s not connected>" % (self.protocol.__class__,) def handle_listening_connect(self, host, port): if not isIPAddress(host): raise ValueError, "please pass only IP addresses, not domain names" self.state = "connecting" return defer.maybeDeferred(self._connectDone, host, port) def handle_connecting_connect(self, host, port): raise RuntimeError, "already connected, reconnecting is not currently supported (talk to itamar if you want this)" handle_connected_connect = handle_connecting_connect def _connectDone(self, host, port): self._connectedAddr = (host, port) self.state = "connected" self.socket.connect((host, port)) return self._connectedAddr def handle_disconnected_startListening(self): self._bindSocket() host, port = self.bindAddress if isIPAddress(host): return defer.maybeDeferred(self._connectSocket, host) else: d = self.reactor.resolve(host) d.addCallback(self._connectSocket) return d def _bindSocket(self): try: skt = socket.socket(*self.sockinfo) skt.bind(self.bindAddress) except socket.error, le: raise error.CannotListenError, (None, None, le) self._realPortNumber = skt.getsockname()[1] log.msg("%s starting on %s"%(self.protocol.__class__, self._realPortNumber)) self.socket = skt def _connectSocket(self, host): self.bindAddress = (host, self.bindAddress[1]) self.protocol.makeConnection(self) self.startReading() self.state = "listening" def startReading(self): self.reading = True try: self.read_op.initiateOp(self.socket.fileno(), self.readbuf) except WindowsError, we: log.msg("initiating read failed with args %s" % (we,)) def stopReading(self): self.reading = False def handle_listening_readDone(self, bytes, addr = None): if addr: self.protocol.datagramReceived(self.readbuf[:bytes], addr) else: self.protocol.datagramReceived(self.readbuf[:bytes]) if self.reading: self.startReading() handle_connecting_readDone = handle_listening_readDone handle_connected_readDone = handle_listening_readDone def handle_listening_readErr(self, ret, bytes): log.msg("read failed with err %s" % (ret,)) if ret == 1234: # ERROR_PORT_UNREACHABLE self.protocol.connectionRefused() if self.reading: self.startReading() handle_connecting_readErr = handle_listening_readErr handle_connected_readErr = handle_listening_readErr def handle_disconnected_readErr(self, ret, bytes): pass # no kicking the dead horse def handle_disconnected_readDone(self, bytes, addr = None): pass # no kicking the dead horse def handle_listening_write(self, data, addr): self.performWrite(data, addr) def handle_connected_write(self, data, addr = None): assert addr in (None, self._connectedAddr) self.performWrite(data, addr) def performWrite(self, data, addr = None): self.writing = True try: write_op = self.write_op_class(self) if not addr: addr = self._connectedAddr write_op.initiateOp(self.socket.fileno(), data, addr) except WindowsError, we: log.msg("initiating write failed with args %s" % (we,)) def handle_listening_writeDone(self, bytes): log.msg("write success with bytes %s" % (bytes,)) handle_connecting_writeDone = handle_listening_writeDone handle_connected_writeDone = handle_listening_writeDone def handle_listening_writeErr(self, ret, bytes): log.msg("write failed with err %s" % (ret,)) if ret == ERROR_PORT_UNREACHABLE: self.protocol.connectionRefused() handle_connecting_writeErr = handle_listening_writeErr handle_connected_writeErr = handle_listening_writeErr def handle_disconnected_writeErr(self, ret, bytes): pass # no kicking the dead horse def handle_disconnected_writeDone(self, bytes): pass # no kicking the dead horse def writeSequence(self, seq, addr): self.write("".join(seq), addr) def handle_listening_stopListening(self): self.stopReading() self.connectionLost() handle_connecting_stopListening = handle_listening_stopListening handle_connected_stopListening = handle_listening_stopListening def connectionLost(self, reason=None): log.msg('(Port %s Closed)' % self._realPortNumber) self._realPortNumber = None self.protocol.doStop() self.socket.close() del self.socket self.state = "disconnected" def logPrefix(self): return self.logstr def getHost(self): return address.IPv4Address('UDP', *(self.socket.getsockname() + ('INET_UDP',)))
[ "joliebig@fim.uni-passau.de" ]
joliebig@fim.uni-passau.de
344ac01479164503dbab03b95cd598cba2744ea4
047ddbf7dc154786da48f4b5ab8968a7abcad24b
/genprimo.py
51c4484a5e8c896c48e9515a294848b3c6413007
[]
no_license
algoritmos-2019-2/clase-1-JAAD300
ecea409136bcc63e4c778b43c5b339cbb4a718de
0ab81d11a6532956636e8999a2f9ae11d75b8977
refs/heads/master
2020-04-19T20:03:19.553368
2019-04-01T05:40:22
2019-04-01T05:40:22
168,404,653
0
0
null
null
null
null
UTF-8
Python
false
false
350
py
#!/usr/bin/env python3 def checador(n): for i in range(2, n): if (n % i) == 0: return print(n, "no es primo") else: return print(n, "es primo") print("ingrese número") checador(int(input()))
[ "jorge@Jorge-PC.localdomain" ]
jorge@Jorge-PC.localdomain
4ec4d34dab5f7644e361280ca777fc5fb41fdb92
782efe22f3251a701796e68e82fbce27c2ce2d8f
/Discussion/migrations/0042_auto_20200515_1217.py
0b9097e87756d35dd0f7ac9dcbb9599753393a94
[]
no_license
Escalation99/Workev
ffc10e64776bf90d206a4a7a8ef3655c22f0223b
c2312c54c152b823e991ef5955b5d2df7ff58222
refs/heads/main
2023-03-13T05:36:54.386719
2021-03-06T10:27:06
2021-03-06T10:27:06
310,613,595
1
0
null
null
null
null
UTF-8
Python
false
false
586
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-05-15 05:17 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Discussion', '0041_auto_20200513_1402'), ] operations = [ migrations.AlterField( model_name='post', name='category', field=models.CharField(choices=[('Announcement', 'Announcement'), ('Other', 'Other'), ('Meeting', 'Meeting'), ('Jobdesc', 'Jobdesc')], default='Jobdesc', max_length=50), ), ]
[ "raytommy1234@gmail.com" ]
raytommy1234@gmail.com
86c976754fbba24178415faffcb3f295036aef07
b4e9c9927f4839dcf2e03b26d51cc08f1ad5c362
/el7toel8/acme/actors/acmestoragemigrator/actor.py
9a312a6f0840ebb57cf8e6aa7e8d83031aa43db0
[]
no_license
shaded-enmity/isv-repositories
cbde5baacf49029a4122541987ec30f634c9c85f
8f52c44cc6c0d663c5ceef2eea92aea759d20899
refs/heads/master
2022-06-21T00:11:25.417412
2020-05-06T15:05:18
2020-05-06T15:05:18
261,727,873
0
0
null
2020-05-06T15:05:19
2020-05-06T10:39:07
null
UTF-8
Python
false
false
1,380
py
from leapp.actors import Actor from leapp.models import Report, AcmeStorageInfo from leapp.tags import FirstBootPhaseTag, IPUWorkflowTag from leapp import reporting import os class AcmeStorageMigrator(Actor): """ Migrate ACME Storage device from old location to the new one """ name = 'acme_storage_migrator' consumes = (AcmeStorageInfo,) produces = (Report,) tags = (FirstBootPhaseTag, IPUWorkflowTag) def process(self): acme_storage_info = next(self.consume(AcmeStorageInfo),None) # Rename the device if acme_storage_info.has_device and acme_storage_info.has_kernel_module: os.rename('/dev/acme0', '/dev/acme') # Emit a report message informing the system administrator that the device # path has been changed reporting.create_report([ reporting.Title('ACME Storage device path migrated'), reporting.Summary('ACME Storage device path has been changed to /dev/acme'), reporting.Severity(reporting.Severity.INFO), reporting.Tags([reporting.Tags.OS_FACTS]), reporting.RelatedResource('device', '/dev/acme'), reporting.ExternalLink( url='https://acme.corp/storage-rhel', title='ACME Storage on RHEL' ) ])
[ "noreply@github.com" ]
shaded-enmity.noreply@github.com
e9811e3794478cb96fb7f9d5165286664ef1e3d4
67b8c98b89f45780b1a153b2a06ed9b76626df23
/pyparrot_modified/pyparrot/networking/bleConnection.py
95cec165f89ea2033ec44f380e408329ea22c022
[ "MIT" ]
permissive
Hollyqui/PyStalk
fe37dccb583f083b284134d0787dc9ef5dfeee5e
98abdaeb6194e79b402304a619182cec63074f2d
refs/heads/master
2020-06-02T15:51:40.106253
2020-02-11T08:00:47
2020-02-11T08:00:47
191,217,500
3
2
null
null
null
null
UTF-8
Python
false
false
27,229
py
from bluepy.btle import Peripheral, UUID, DefaultDelegate, BTLEException from pyparrot_modified.pyparrot.utils.colorPrint import color_print import struct import time from pyparrot_modified.pyparrot.commandsandsensors.DroneSensorParser import get_data_format_and_size from datetime import datetime class MinidroneDelegate(DefaultDelegate): """ Handle BLE notififications """ def __init__(self, handle_map, minidrone, ble_connection): DefaultDelegate.__init__(self) self.handle_map = handle_map self.minidrone = minidrone self.ble_connection = ble_connection color_print("initializing notification delegate", "INFO") def handleNotification(self, cHandle, data): #print "handling notificiation from channel %d" % cHandle #print "handle map is %s " % self.handle_map[cHandle] #print "channel map is %s " % self.minidrone.characteristic_receive_uuids[self.handle_map[cHandle]] #print "data is %s " % data channel = self.ble_connection.characteristic_receive_uuids[self.handle_map[cHandle]] (packet_type, packet_seq_num) = struct.unpack('<BB', data[0:2]) raw_data = data[2:] if channel == 'ACK_DRONE_DATA': # data received from drone (needs to be ack on 1e) #color_print("calling update sensors ack true", "WARN") self.minidrone.update_sensors(packet_type, None, packet_seq_num, raw_data, ack=True) elif channel == 'NO_ACK_DRONE_DATA': # data from drone (including battery and others), no ack #color_print("drone data - no ack needed") self.minidrone.update_sensors(packet_type, None, packet_seq_num, raw_data, ack=False) elif channel == 'ACK_COMMAND_SENT': # ack 0b channel, SEND_WITH_ACK #color_print("Ack! command received!") self.ble_connection._set_command_received('SEND_WITH_ACK', True) elif channel == 'ACK_HIGH_PRIORITY': # ack 0c channel, SEND_HIGH_PRIORITY #color_print("Ack! high priority received") self.ble_connection._set_command_received('SEND_HIGH_PRIORITY', True) else: color_print("unknown channel %s sending data " % channel, "ERROR") color_print(cHandle) class BLEConnection: def __init__(self, address, minidrone): """ Initialize with its BLE address - if you don't know the address, call findMinidrone and that will discover it for you. :param address: unique address for this minidrone :param minidrone: the Minidrone object for this minidrone (needed for callbacks for sensors) """ self.address = address self.drone_connection = Peripheral() self.minidrone = minidrone # the following UUID segments come from the Minidrone and from the documenation at # http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3 # the 3rd and 4th bytes are used to identify the service self.service_uuids = { 'fa00': 'ARCOMMAND_SENDING_SERVICE', 'fb00': 'ARCOMMAND_RECEIVING_SERVICE', 'fc00': 'PERFORMANCE_COUNTER_SERVICE', 'fd21': 'NORMAL_BLE_FTP_SERVICE', 'fd51': 'UPDATE_BLE_FTP', 'fe00': 'UPDATE_RFCOMM_SERVICE', '1800': 'Device Info', '1801': 'unknown', } # the following characteristic UUID segments come from the documentation at # http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3 # the 4th bytes are used to identify the characteristic # the usage of the channels are also documented here # http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2 self.characteristic_send_uuids = { '0a': 'SEND_NO_ACK', # not-ack commandsandsensors (PCMD only) '0b': 'SEND_WITH_ACK', # ack commandsandsensors (all piloting commandsandsensors) '0c': 'SEND_HIGH_PRIORITY', # emergency commandsandsensors '1e': 'ACK_COMMAND' # ack for data sent on 0e } # counters for each packet (required as part of the packet) self.characteristic_send_counter = { 'SEND_NO_ACK': 0, 'SEND_WITH_ACK': 0, 'SEND_HIGH_PRIORITY': 0, 'ACK_COMMAND': 0, 'RECEIVE_WITH_ACK': 0 } # the following characteristic UUID segments come from the documentation at # http://forum.developer.parrot.com/t/minidrone-characteristics-uuid/4686/3 # the 4th bytes are used to identify the characteristic # the types of commandsandsensors and data coming back are also documented here # http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2 self.characteristic_receive_uuids = { '0e': 'ACK_DRONE_DATA', # drone data that needs an ack (needs to be ack on 1e) '0f': 'NO_ACK_DRONE_DATA', # data from drone (including battery and others), no ack '1b': 'ACK_COMMAND_SENT', # ack 0b channel, SEND_WITH_ACK '1c': 'ACK_HIGH_PRIORITY', # ack 0c channel, SEND_HIGH_PRIORITY } # these are the FTP incoming and outcoming channels # the handling characteristic seems to be the one to send commandsandsensors to (per the SDK) # information gained from reading ARUTILS_BLEFtp.m in the SDK self.characteristic_ftp_uuids = { '22': 'NORMAL_FTP_TRANSFERRING', '23': 'NORMAL_FTP_GETTING', '24': 'NORMAL_FTP_HANDLING', '52': 'UPDATE_FTP_TRANSFERRING', '53': 'UPDATE_FTP_GETTING', '54': 'UPDATE_FTP_HANDLING', } # FTP commandsandsensors (obtained via ARUTILS_BLEFtp.m in the SDK) self.ftp_commands = { "list": "LIS", "get": "GET" } # need to save for communication (but they are initialized in connect) self.services = None self.send_characteristics = dict() self.receive_characteristics = dict() self.handshake_characteristics = dict() self.ftp_characteristics = dict() self.data_types = { 'ACK': 1, 'DATA_NO_ACK': 2, 'LOW_LATENCY_DATA': 3, 'DATA_WITH_ACK': 4 } # store whether a command was acked self.command_received = { 'SEND_WITH_ACK': False, 'SEND_HIGH_PRIORITY': False, 'ACK_COMMAND': False } # instead of parsing the XML file every time, cache the results self.command_tuple_cache = dict() self.sensor_tuple_cache = dict() # maximum number of times to try a packet before assuming it failed self.max_packet_retries = 3 def connect(self, num_retries): """ Connects to the drone and re-tries in case of failure the specified number of times :param: num_retries is the number of times to retry :return: True if it succeeds and False otherwise """ # first try to connect to the wifi try_num = 1 connected = False while (try_num < num_retries and not connected): try: self._connect() connected = True except BTLEException: color_print("retrying connections", "INFO") try_num += 1 # fall through, return False as something failed return connected def _reconnect(self, num_retries): """ Reconnect to the drone (assumed the BLE crashed) :param: num_retries is the number of times to retry :return: True if it succeeds and False otherwise """ try_num = 1 success = False while (try_num < num_retries and not success): try: color_print("trying to re-connect to the minidrone at address %s" % self.address, "WARN") self.drone_connection.connect(self.address, "random") color_print("connected! Asking for services and characteristics", "SUCCESS") success = True except BTLEException: color_print("retrying connections", "WARN") try_num += 1 if (success): # do the magic handshake self._perform_handshake() return success def _connect(self): """ Connect to the minidrone to prepare for flying - includes getting the services and characteristics for communication :return: throws an error if the drone connection failed. Returns void if nothing failed. """ color_print("trying to connect to the minidrone at address %s" % self.address, "INFO") self.drone_connection.connect(self.address, "random") color_print("connected! Asking for services and characteristics", "SUCCESS") # re-try until all services have been found allServicesFound = False # used for notifications handle_map = dict() while not allServicesFound: # get the services self.services = self.drone_connection.getServices() # loop through the services for s in self.services: hex_str = self._get_byte_str_from_uuid(s.uuid, 3, 4) # store the characteristics for receive & send if (self.service_uuids[hex_str] == 'ARCOMMAND_RECEIVING_SERVICE'): # only store the ones used to receive data for c in s.getCharacteristics(): hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4) if hex_str in self.characteristic_receive_uuids: self.receive_characteristics[self.characteristic_receive_uuids[hex_str]] = c handle_map[c.getHandle()] = hex_str elif (self.service_uuids[hex_str] == 'ARCOMMAND_SENDING_SERVICE'): # only store the ones used to send data for c in s.getCharacteristics(): hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4) if hex_str in self.characteristic_send_uuids: self.send_characteristics[self.characteristic_send_uuids[hex_str]] = c elif (self.service_uuids[hex_str] == 'UPDATE_BLE_FTP'): # store the FTP info for c in s.getCharacteristics(): hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4) if hex_str in self.characteristic_ftp_uuids: self.ftp_characteristics[self.characteristic_ftp_uuids[hex_str]] = c elif (self.service_uuids[hex_str] == 'NORMAL_BLE_FTP_SERVICE'): # store the FTP info for c in s.getCharacteristics(): hex_str = self._get_byte_str_from_uuid(c.uuid, 4, 4) if hex_str in self.characteristic_ftp_uuids: self.ftp_characteristics[self.characteristic_ftp_uuids[hex_str]] = c # need to register for notifications and write 0100 to the right handles # this is sort of magic (not in the docs!) but it shows up on the forum here # http://forum.developer.parrot.com/t/minimal-ble-commands-to-send-for-take-off/1686/2 # Note this code snippet below more or less came from the python example posted to that forum (I adapted it to my interface) for c in s.getCharacteristics(): if self._get_byte_str_from_uuid(c.uuid, 3, 4) in \ ['fb0f', 'fb0e', 'fb1b', 'fb1c', 'fd22', 'fd23', 'fd24', 'fd52', 'fd53', 'fd54']: self.handshake_characteristics[self._get_byte_str_from_uuid(c.uuid, 3, 4)] = c # check to see if all 8 characteristics were found allServicesFound = True for r_id in self.characteristic_receive_uuids.values(): if r_id not in self.receive_characteristics: color_print("setting to false in receive on %s" % r_id) allServicesFound = False for s_id in self.characteristic_send_uuids.values(): if s_id not in self.send_characteristics: color_print("setting to false in send") allServicesFound = False for f_id in self.characteristic_ftp_uuids.values(): if f_id not in self.ftp_characteristics: color_print("setting to false in ftp") allServicesFound = False # and ensure all handshake characteristics were found if len(self.handshake_characteristics.keys()) != 10: color_print("setting to false in len") allServicesFound = False # do the magic handshake self._perform_handshake() # initialize the delegate to handle notifications self.drone_connection.setDelegate(MinidroneDelegate(handle_map, self.minidrone, self)) def _perform_handshake(self): """ Magic handshake Need to register for notifications and write 0100 to the right handles This is sort of magic (not in the docs!) but it shows up on the forum here http://forum.developer.parrot.com/t/minimal-ble-commandsandsensors-to-send-for-take-off/1686/2 :return: nothing """ color_print("magic handshake to make the drone listen to our commandsandsensors") # Note this code snippet below more or less came from the python example posted to that forum (I adapted it to my interface) for c in self.handshake_characteristics.values(): # for some reason bluepy characteristic handle is two lower than what I need... # Need to write 0x0100 to the characteristics value handle (which is 2 higher) self.drone_connection.writeCharacteristic(c.handle + 2, struct.pack("<BB", 1, 0)) def disconnect(self): """ Disconnect the BLE connection. Always call this at the end of your programs to cleanly disconnect. :return: void """ self.drone_connection.disconnect() def _get_byte_str_from_uuid(self, uuid, byte_start, byte_end): """ Extract the specified byte string from the UUID btle object. This is an ugly hack but it was necessary because of the way the UUID object is represented and the documentation on the byte strings from Parrot. You give it the starting byte (counting from 1 since that is how their docs count) and the ending byte and it returns that as a string extracted from the UUID. It is assumed it happens before the first - in the UUID. :param uuid: btle UUID object :param byte_start: starting byte (counting from 1) :param byte_end: ending byte (counting from 1) :return: string with the requested bytes (to be used as a key in the lookup tables for services) """ uuid_str = format("%s" % uuid) idx_start = 2 * (byte_start - 1) idx_end = 2 * (byte_end) my_hex_str = uuid_str[idx_start:idx_end] return my_hex_str def send_turn_command(self, command_tuple, degrees): """ Build the packet for turning and send it :param command_tuple: command tuple from the parser :param degrees: how many degrees to turn :return: True if the command was sent and False otherwise """ self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256 packet = struct.pack("<BBBBHh", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], degrees) return self.send_command_packet_ack(packet) def send_auto_takeoff_command(self, command_tuple): """ Build the packet for auto takeoff and send it :param command_tuple: command tuple from the parser :return: True if the command was sent and False otherwise """ # print command_tuple self.characteristic_send_counter['SEND_WITH_ACK'] = ( self.characteristic_send_counter[ 'SEND_WITH_ACK'] + 1) % 256 packet = struct.pack("<BBBBHB", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], 1) return self.send_command_packet_ack(packet) def send_command_packet_ack(self, packet): """ Sends the actual packet on the ack channel. Internal function only. :param packet: packet constructed according to the command rules (variable size, constructed elsewhere) :return: True if the command was sent and False otherwise """ try_num = 0 self._set_command_received('SEND_WITH_ACK', False) while (try_num < self.max_packet_retries and not self.command_received['SEND_WITH_ACK']): color_print("sending command packet on try %d" % try_num, 2) self._safe_ble_write(characteristic=self.send_characteristics['SEND_WITH_ACK'], packet=packet) #self.send_characteristics['SEND_WITH_ACK'].write(packet) try_num += 1 color_print("sleeping for a notification", 2) #notify = self.drone.waitForNotifications(1.0) self.smart_sleep(0.5) #color_print("awake %s " % notify, 2) return self.command_received['SEND_WITH_ACK'] def send_pcmd_command(self, command_tuple, roll, pitch, yaw, vertical_movement, duration): """ Send the PCMD command with the specified roll, pitch, and yaw :param command_tuple: command tuple per the parser :param roll: :param pitch: :param yaw: :param vertical_movement: :param duration: """ start_time = time.time() while (time.time() - start_time < duration): self.characteristic_send_counter['SEND_NO_ACK'] = ( self.characteristic_send_counter['SEND_NO_ACK'] + 1) % 256 packet = struct.pack("<BBBBHBbbbbI", self.data_types['DATA_NO_ACK'], self.characteristic_send_counter['SEND_NO_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], 1, int(roll), int(pitch), int(yaw), int(vertical_movement), 0) self._safe_ble_write(characteristic=self.send_characteristics['SEND_NO_ACK'], packet=packet) # self.send_characteristics['SEND_NO_ACK'].write(packet) notify = self.drone_connection.waitForNotifications(0.1) def send_noparam_command_packet_ack(self, command_tuple): """ Send a command on the ack channel - where all commandsandsensors except PCMD go, per http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2 the id of the last command sent (for use in ack) is the send counter (which is incremented before sending) Ensures the packet was received or sends it again up to a maximum number of times. :param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte :return: True if the command was sent and False otherwise """ self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256 packet = struct.pack("<BBBBH", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2]) return self.send_command_packet_ack(packet) def send_enum_command_packet_ack(self, command_tuple, enum_value, usb_id=None): """ Send a command on the ack channel with enum parameters as well (most likely a flip). All commandsandsensors except PCMD go on the ack channel per http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2 the id of the last command sent (for use in ack) is the send counter (which is incremented before sending) :param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte :param enum_value: the enum index :return: nothing """ self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256 if (usb_id is None): packet = struct.pack("<BBBBBBI", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], 0, enum_value) else: color_print((self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], 0, usb_id, enum_value), 1) packet = struct.pack("<BBBBHBI", self.data_types['DATA_WITH_ACK'], self.characteristic_send_counter['SEND_WITH_ACK'], command_tuple[0], command_tuple[1], command_tuple[2], usb_id, enum_value) return self.send_command_packet_ack(packet) def send_param_command_packet(self, command_tuple, param_tuple=None, param_type_tuple=0, ack=True): """ Send a command packet with parameters. Ack channel is optional for future flexibility, but currently commands are always send over the Ack channel so it defaults to True. Contributed by awm102 on github. Edited by Amy McGovern to work for BLE commands also. :param: command_tuple: the command tuple derived from command_parser.get_command_tuple() :param: param_tuple (optional): the parameter values to be sent (can be found in the XML files) :param: param_size_tuple (optional): a tuple of strings representing the data type of the parameters e.g. u8, float etc. (can be found in the XML files) :param: ack (optional): allows ack to be turned off if required :return: """ # Create lists to store the number of bytes and pack chars needed for parameters # Default them to zero so that if no params are provided the packet size is correct param_size_list = [0] * len(param_tuple) pack_char_list = [0] * len(param_tuple) if param_tuple is not None: # Fetch the parameter sizes. By looping over the param_tuple we only get the data # for requested parameters so a mismatch in params and types does not matter for i, param in enumerate(param_tuple): pack_char_list[i], param_size_list[i] = get_data_format_and_size(param, param_type_tuple[i]) if ack: ack_string = 'SEND_WITH_ACK' data_ack_string = 'DATA_WITH_ACK' else: ack_string = 'SEND_NO_ACK' data_ack_string = 'DATA_NO_ACK' # Construct the base packet self.characteristic_send_counter['SEND_WITH_ACK'] = (self.characteristic_send_counter['SEND_WITH_ACK'] + 1) % 256 # TODO: Amy changed this to match the BLE packet structure but needs to fully test it packet = struct.pack("<BBBBH", self.data_types[data_ack_string], self.characteristic_send_counter[ack_string], command_tuple[0], command_tuple[1], command_tuple[2]) if param_tuple is not None: # Add in the parameter values based on their sizes for i, param in enumerate(param_tuple): packet += struct.pack(pack_char_list[i], param) # TODO: Fix this to not go with ack always return self.send_command_packet_ack(packet) def _set_command_received(self, channel, val): """ Set the command received on the specified channel to the specified value (used for acks) :param channel: channel :param val: True or False :return: """ self.command_received[channel] = val def _safe_ble_write(self, characteristic, packet): """ Write to the specified BLE characteristic but first ensure the connection is valid :param characteristic: :param packet: :return: """ success = False while (not success): try: characteristic.write(packet) success = True except BTLEException: color_print("reconnecting to send packet", "WARN") self._reconnect(3) def ack_packet(self, buffer_id, packet_id): """ Ack the packet id specified by the argument on the ACK_COMMAND channel :param packet_id: the packet id to ack :return: nothing """ #color_print("ack last packet on the ACK_COMMAND channel", "INFO") self.characteristic_send_counter['ACK_COMMAND'] = (self.characteristic_send_counter['ACK_COMMAND'] + 1) % 256 packet = struct.pack("<BBB", self.data_types['ACK'], self.characteristic_send_counter['ACK_COMMAND'], packet_id) #color_print("sending packet %d %d %d" % (self.data_types['ACK'], self.characteristic_send_counter['ACK_COMMAND'], # packet_id), "INFO") self._safe_ble_write(characteristic=self.send_characteristics['ACK_COMMAND'], packet=packet) #self.send_characteristics['ACK_COMMAND'].write(packet) def smart_sleep(self, timeout): """ Sleeps the requested number of seconds but wakes up for notifications Note: NEVER use regular time.sleep! It is a blocking sleep and it will likely cause the BLE to disconnect due to dropped notifications. Always use smart_sleep instead! :param timeout: number of seconds to sleep :return: """ start_time = datetime.now() new_time = datetime.now() diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0) while (diff < timeout): try: notify = self.drone_connection.waitForNotifications(0.1) except: color_print("reconnecting to wait", "WARN") self._reconnect(3) new_time = datetime.now() diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
[ "noreply@github.com" ]
Hollyqui.noreply@github.com
ee4ec4d60d0e2809301b28150cd9934a01d330c6
bffa0938e70732e992a5d5dc5fb30559fd0ceb7b
/Zadanie9/main.py
7be093c8309014ad5a6351c1465cc96dc7c501d2
[]
no_license
LenovoDobrynin/zadanie9.py
281d5d4850dfb374a273701f588a323f0c524b48
54d75ff2645d447b6f31a9327feaadb1fbf0f21e
refs/heads/master
2023-08-28T00:35:01.091714
2021-10-14T20:38:18
2021-10-14T20:38:18
417,274,906
0
0
null
null
null
null
UTF-8
Python
false
false
1,098
py
a1 = float(input('Введите число а1: ')) a2 = float(input('Введите число a2: ')) a3 = float(input('Введите число a3: ')) b1 = float(input('Введите число b1: ')) b2 = float(input('Введите число b2: ')) b3 = float(input('Введите число b3: ')) c1 = float(input('Введите число c1: ')) c2 = float(input('Введите число c2: ')) c3 = float(input('Введите число c3: ')) d1 = float(input('Введите число d1: ')) d2 = float(input('Введите число d2: ')) d3 = float(input('Введите число d3: ')) delta = a1*b2*c3+b1*c2*a3+c1*a2*b3-c1*b2*a3-a1*c2*b3-b1*a2*c3 if delta == 0: print('Главный определитель системы равен нулю') else: delta1 = d1*b2*c3+b1*c2*d3+c1*d2*b3-c1*b2*d3-d1*c2*b3-b1*d2*c3 delta2 = a1*d2*c3+d1*c2*a3+c1*a2*d3-c1*d2*a3-a1*c2*d3-d1*a2*c3 delta3 = a1*b2*d3+b1*d2*a3+d1*a2*b3-d1*b2*a3-a1*d2*b3-b1*a2*d3 x = delta1/delta y = delta2/delta z = delta3/delta print(x,y,z)
[ "noreply@github.com" ]
LenovoDobrynin.noreply@github.com
ed8716a26481360b16a530a1bada6c0c07139b62
b7dc309c2870431ea90710daf829fd364cf2d578
/牛客聪明编辑.py
aa96c3286b912ed9c91a11d94b2d70ef75d0fcfe
[]
no_license
KaGen1999/Leetcode-by-Python
7d65e7890279a2910aae297929a33f52001ad287
ef10b1aa9b7060f949dcd392d62ddaba5fbcdbb8
refs/heads/master
2021-05-20T21:02:44.904731
2020-09-13T16:23:50
2020-09-13T16:23:50
252,415,887
0
0
null
null
null
null
UTF-8
Python
false
false
523
py
n = int(input()) for i in range(n): s = input() b = '' new_s = '' count = 1 state = 0 for each in s: if each != b: if state == 1: new_s = new_s + each state = 2 elif state == 2: new_s = new_s + each state = 0 else: new_s = new_s + each else: if state == 0: state = 1 new_s = new_s + each b = each print(new_s)
[ "1032336124@qq.com" ]
1032336124@qq.com