blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81542872c2cca2262b15eedcfd39f2a819780f02 | e94d6b9d8d24d71c1a3d1d515d42f2c81066899e | /neutrino/neff/plot_neff_phase_zoom.py | 73fa40adb14983e282088b02a85c796f194a1eec | [] | no_license | zhnhou/make_figures | aa6ed9c4fcc799f2d9bdcc85e31fcfed5507b3b7 | 6dda7c7cb308ecc88c8e2f57a8c12ee468b53a37 | refs/heads/master | 2020-04-16T02:20:55.464006 | 2017-01-06T21:05:47 | 2017-01-06T21:05:47 | 54,233,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,716 | py | import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import mpl_toolkits.axisartist as AA
import numpy as np
import cPickle as pickle
from hpylib.util.remote_data import *
from hpylib.cosmology.read_planck_data import *
from hpylib.cosmology.read_spt_data import *
pdf_file = 'neff_phase_zoom.pdf'
planck_pspec_file = '~/data_midway/planck_data/2015/powspec/COM_PowerSpect_CMB_R2.02.fits'
plk = read_planck_pspec_fits(sync_from_remote('midway',planck_pspec_file))
spt = read_spt_bandpower('/Users/zhenhou/data_local/data_spt/bandpowers_lps12')
dl_neff_file = '~/data_midway/projects/neutrino/neff/param_sample/Dl_base_TT_lowP_lensing_const_omegab_zeq_thetas_thetad.pkl'
dl_load = pickle.load(open(sync_from_remote('midway',dl_neff_file,update=False),'rb'))
num_sample = dl_load['num_sample']
dl_neff = dl_load['dl_lensed']
lmax = dl_load['lmax']
fig, ax = plt.subplots()
ax.set_position([0.1,0.1,0.85,0.65])
plk_ell = plk['highl_TT_bands']
plk_l4cl = plk['highl_TT_dbs']*plk_ell**2
plk_err = plk['highl_TT_err']*plk_ell**2
spt_ell = spt['band']
spt_l4cl = spt['dbs_fgsub']*spt_ell**2
spt_err = spt['err']*spt_ell**2
ell = np.arange(lmax+1)
lines = []
colors = ['red','darkorange','deepskyblue','green','purple']
for i in np.arange(1,6):
nm = dl_neff[400,0,0] / dl_neff[400,0,i]
line = plt.plot(ell, dl_neff[:,0,i]*ell**2 * nm, color=colors[i-1], zorder=1, label=r'$\mathrm{N_{eff}}='+str(i)+'$')
err1 = ax.errorbar(plk_ell, plk_l4cl, yerr=plk_err, fmt='o', markersize='0', elinewidth=1.5, capsize=1.5, capthick=1.5,
color='black', zorder=5)
#ax.errorbar(spt_ell, spt_l4cl*1.01, yerr=spt_err, fmt='o', markersize='0', elinewidth=1.5, capsize=1.5, capthick=1.5,
#color='blue', zorder=5)
#ax.errorbar(670,6e8, yerr=0.6e8, fmt='o', markersize='0', elinewidth=1.5, capsize=1.5, capthick=1.5,
#color='black', zorder=5)
#plt.text(750, 5.35e8, 'Planck 2015', horizontalalignment='left', verticalalignment='bottom', fontsize=18)
#ax.errorbar(670,4e8, yerr=0.6e8, fmt='o', markersize='0', elinewidth=1.5, capsize=1.5, capthick=1.5,
#color='blue', zorder=5)
#plt.text(750, 3.35e8, 'SPT-SZ', horizontalalignment='left', verticalalignment='bottom', fontsize=18)
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xlim([1250,1850])
ax.set_ylim([1e9,1.9e9])
plt.yticks([0.9e9,1.5e9],['10','15'])
plt.legend(frameon=False, prop={'size':18})
ax.set_xlabel(r'$\ell$', size=20)
ax.set_ylabel(r'$\ell^2\mathcal{D}_\ell\ \mathrm{[10^8\mu K^2]}$', size=20)
ax.set_title(r'fixing $\Omega_b h^2$, $z_{\rm EQ}$, $\theta_s$, $\theta_d$', fontsize=25, loc='left')
plt.savefig(pdf_file, format='pdf')
| [
"zhn.hou@gmail.com"
] | zhn.hou@gmail.com |
34e9bef4f38535732745fc9fc0abfb95b055de0f | 0ff2814071768f8ec012b3abccf19fda8fe44ba7 | /ex28.py | c6a451f5cba4ad9fb9111b8f57c52aee2834f85b | [] | no_license | leetea/Python_2.7 | 846b58e70362ce539c22d642572fcb39b8c17863 | 93fea523ec9be4520ca225f0e673f912983e076b | refs/heads/master | 2021-01-10T00:53:09.604241 | 2014-08-08T02:28:10 | 2014-08-08T02:28:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | print True and True #True
print False and True #False
print 1 == 1 and 2 == 1 #False
print "test" == "test" #True
print 1 == 1 or 2 != 1 #True
print True and 1 == 1 #True
print False and 0 != 0 #False
print True or 1 == 1 #True
print "test" == "testing" #False
print 1 != 0 and 2 == 1 #False
print "test" != "testing" #True
print "test" == 1 #False
print not (True and False) #True
print not (1 == 1 and 0 != 1) #False
print not (10 == 1 or 1000 == 1000) #False
print not (1 != 10 or 3 ==4 ) #False
print not ("testing" == "testing" and "Zed" == "Cool Guy") #True
print 1 == 1 and not ("testing" == 1 or 1 == 0) #True
print "chunky" == "bacon" and not (3 == 4 or 3 == 3) #False
print 3 == 3 and not ("testing" == "testing" or "Python" == "Fun") #False
| [
"lktea@uci.edu"
] | lktea@uci.edu |
7213c46cafde3e70e6cfa4163b9961b53207d0fc | 349ce7dc79b24d6b003b758410b32a7e75917085 | /2 Python Programming Examples on Mathematical Expressions/6 Prime factors of an integer.py | f3a887291e522cbf88322d7e463959328fb506d1 | [] | no_license | Saifullahshaikh/-IPU-Intensive-Programming-Unit-01 | 2844706ecceacf9296b69ea92e82d64a67fce557 | b176bf0508098a0fde0fe467a3676f722b6d524f | refs/heads/main | 2023-01-12T14:42:05.575846 | 2020-11-16T20:30:56 | 2020-11-16T20:30:56 | 313,419,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | def Prime_Factors(n):
for i in range(2,n+1):
while n%i == 0:
n= n/i
print(i,end=' ')
Prime_Factors(12)
| [
"ssaifullah272@gmail.com"
] | ssaifullah272@gmail.com |
e43252b1c78b9d16a9c21784ae22ba5cd362fffa | d475a6cf49c0b2d40895ff6d48ca9b0298643a87 | /pyleecan/Classes/ImportVectorField.py | 8dbc5501c4eb5fc5d7c8a98afbad66071632c118 | [
"Apache-2.0"
] | permissive | lyhehehe/pyleecan | 6c4a52b17a083fe29fdc8dcd989a3d20feb844d9 | 421e9a843bf30d796415c77dc934546adffd1cd7 | refs/heads/master | 2021-07-05T17:42:02.813128 | 2020-09-03T14:27:03 | 2020-09-03T14:27:03 | 176,678,325 | 2 | 0 | null | 2019-03-20T07:28:06 | 2019-03-20T07:28:06 | null | UTF-8 | Python | false | false | 7,283 | py | # -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Import/ImportVectorField.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Import/ImportVectorField
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Import.ImportVectorField.get_data import get_data
except ImportError as error:
get_data = error
from ._check import InitUnKnowClassError
from .ImportData import ImportData
class ImportVectorField(FrozenClass):
"""Abstract class for Data Import/Generation"""
VERSION = 1
# cf Methods.Import.ImportVectorField.get_data
if isinstance(get_data, ImportError):
get_data = property(
fget=lambda x: raise_(
ImportError(
"Can't use ImportVectorField method get_data: " + str(get_data)
)
)
)
else:
get_data = get_data
# save method is available in all object
save = save
# generic copy method
def copy(self):
"""Return a copy of the class
"""
return type(self)(init_dict=self.as_dict())
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self, components=dict(), name="", symbol="", init_dict=None, init_str=None
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with every properties as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Initialisation by str
from ..Functions.load import load
assert type(init_str) is str
# load the object from a file
obj = load(init_str)
assert type(obj) is type(self)
components = obj.components
name = obj.name
symbol = obj.symbol
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "components" in list(init_dict.keys()):
components = init_dict["components"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "symbol" in list(init_dict.keys()):
symbol = init_dict["symbol"]
# Initialisation by argument
self.parent = None
# components can be None or a dict of ImportData object
self.components = dict()
if type(components) is dict:
for key, obj in components.items():
if isinstance(obj, dict):
self.components[key] = ImportData(init_dict=obj)
else:
self.components[key] = obj
elif components is None:
self.components = dict()
else:
self.components = components # Should raise an error
self.name = name
self.symbol = symbol
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
ImportVectorField_str = ""
if self.parent is None:
ImportVectorField_str += "parent = None " + linesep
else:
ImportVectorField_str += (
"parent = " + str(type(self.parent)) + " object" + linesep
)
if len(self.components) == 0:
ImportVectorField_str += "components = dict()" + linesep
for key, obj in self.components.items():
tmp = (
self.components[key].__str__().replace(linesep, linesep + "\t")
+ linesep
)
ImportVectorField_str += (
"components[" + key + "] =" + tmp + linesep + linesep
)
ImportVectorField_str += 'name = "' + str(self.name) + '"' + linesep
ImportVectorField_str += 'symbol = "' + str(self.symbol) + '"' + linesep
return ImportVectorField_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.components != self.components:
return False
if other.name != self.name:
return False
if other.symbol != self.symbol:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
ImportVectorField_dict = dict()
ImportVectorField_dict["components"] = dict()
for key, obj in self.components.items():
ImportVectorField_dict["components"][key] = obj.as_dict()
ImportVectorField_dict["name"] = self.name
ImportVectorField_dict["symbol"] = self.symbol
# The class name is added to the dict fordeserialisation purpose
ImportVectorField_dict["__class__"] = "ImportVectorField"
return ImportVectorField_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
for key, obj in self.components.items():
obj._set_None()
self.name = None
self.symbol = None
def _get_components(self):
"""getter of components"""
for key, obj in self._components.items():
if obj is not None:
obj.parent = self
return self._components
def _set_components(self, value):
"""setter of components"""
check_var("components", value, "{ImportData}")
self._components = value
components = property(
fget=_get_components,
fset=_set_components,
doc=u"""Dict of components (e.g. {"radial": ImportData})
:Type: {ImportData}
""",
)
def _get_name(self):
"""getter of name"""
return self._name
def _set_name(self, value):
"""setter of name"""
check_var("name", value, "str")
self._name = value
name = property(
fget=_get_name,
fset=_set_name,
doc=u"""Name of the vector field
:Type: str
""",
)
def _get_symbol(self):
"""getter of symbol"""
return self._symbol
def _set_symbol(self, value):
"""setter of symbol"""
check_var("symbol", value, "str")
self._symbol = value
symbol = property(
fget=_get_symbol,
fset=_set_symbol,
doc=u"""Symbol of the vector field
:Type: str
""",
)
| [
"sebgue@gmx.net"
] | sebgue@gmx.net |
9b25e94192465363b096c7abad7b9f5a30894dc8 | d191f4b159077af17101453ec699928e7bb59e8d | /bsddb3-6.2.6/Lib/bsddb/test/test_db.py | 320fab67e3fc3bccf4d5454d95a70c3ee426715f | [
"BSD-3-Clause",
"ZPL-2.1",
"BSD-2-Clause"
] | permissive | wllmwng1/CMPUT291_MiniProject2 | 804fccc5e453aea9c56e3be21d9b91d57475f44c | 3921c92c03763333f09d49ea51c2c524da2153bc | refs/heads/main | 2023-01-11T11:36:06.020623 | 2020-11-01T22:49:44 | 2020-11-01T22:49:44 | 309,783,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,322 | py | """
Copyright (c) 2008-2018, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import unittest
import os, glob
from test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class DB(unittest.TestCase):
def setUp(self):
self.path = get_new_database_path()
self.db = db.DB()
def tearDown(self):
self.db.close()
del self.db
test_support.unlink(self.path)
class DB_general(DB) :
def test_get_open_flags(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertEqual(db.DB_CREATE, self.db.get_open_flags())
def test_get_open_flags2(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE |
db.DB_THREAD)
self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags())
def test_get_dbname_filename(self) :
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertEqual((self.path, None), self.db.get_dbname())
def test_get_dbname_filename_database(self) :
name = "jcea-random-name"
self.db.open(self.path, dbname=name, dbtype=db.DB_HASH,
flags = db.DB_CREATE)
self.assertEqual((self.path, name), self.db.get_dbname())
def test_bt_minkey(self) :
for i in [17, 108, 1030] :
self.db.set_bt_minkey(i)
self.assertEqual(i, self.db.get_bt_minkey())
def test_lorder(self) :
self.db.set_lorder(1234)
self.assertEqual(1234, self.db.get_lorder())
self.db.set_lorder(4321)
self.assertEqual(4321, self.db.get_lorder())
self.assertRaises(db.DBInvalidArgError, self.db.set_lorder, 9182)
def test_priority(self) :
flags = [db.DB_PRIORITY_VERY_LOW, db.DB_PRIORITY_LOW,
db.DB_PRIORITY_DEFAULT, db.DB_PRIORITY_HIGH,
db.DB_PRIORITY_VERY_HIGH]
for flag in flags :
self.db.set_priority(flag)
self.assertEqual(flag, self.db.get_priority())
def test_get_transactional(self) :
self.assertFalse(self.db.get_transactional())
self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
self.assertFalse(self.db.get_transactional())
class DB_hash(DB) :
def test_h_ffactor(self) :
for ffactor in [4, 16, 256] :
self.db.set_h_ffactor(ffactor)
self.assertEqual(ffactor, self.db.get_h_ffactor())
def test_h_nelem(self) :
for nelem in [1, 2, 4] :
nelem = nelem*1024*1024 # Millions
self.db.set_h_nelem(nelem)
self.assertEqual(nelem, self.db.get_h_nelem())
def test_pagesize(self) :
for i in xrange(9, 17) : # From 512 to 65536
i = 1<<i
self.db.set_pagesize(i)
self.assertEqual(i, self.db.get_pagesize())
# The valid values goes from 512 to 65536
# Test 131072 bytes...
self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17)
# Test 256 bytes...
self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8)
class DB_txn(DB) :
def setUp(self) :
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
self.db = db.DB(self.env)
def tearDown(self) :
self.db.close()
del self.db
self.env.close()
del self.env
test_support.rmtree(self.homeDir)
def test_flags(self) :
self.db.set_flags(db.DB_CHKSUM)
self.assertEqual(db.DB_CHKSUM, self.db.get_flags())
self.db.set_flags(db.DB_TXN_NOT_DURABLE)
self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM,
self.db.get_flags())
def test_get_transactional(self) :
self.assertFalse(self.db.get_transactional())
# DB_AUTO_COMMIT = Implicit transaction
self.db.open("XXX", dbtype=db.DB_HASH,
flags = db.DB_CREATE | db.DB_AUTO_COMMIT)
self.assertTrue(self.db.get_transactional())
class DB_recno(DB) :
def test_re_pad(self) :
for i in [' ', '*'] : # Check chars
self.db.set_re_pad(i)
self.assertEqual(ord(i), self.db.get_re_pad())
for i in [97, 65] : # Check integers
self.db.set_re_pad(i)
self.assertEqual(i, self.db.get_re_pad())
def test_re_delim(self) :
for i in [' ', '*'] : # Check chars
self.db.set_re_delim(i)
self.assertEqual(ord(i), self.db.get_re_delim())
for i in [97, 65] : # Check integers
self.db.set_re_delim(i)
self.assertEqual(i, self.db.get_re_delim())
def test_re_source(self) :
for i in ["test", "test2", "test3"] :
self.db.set_re_source(i)
self.assertEqual(i, self.db.get_re_source())
class DB_queue(DB) :
def test_re_len(self) :
for i in [33, 65, 300, 2000] :
self.db.set_re_len(i)
self.assertEqual(i, self.db.get_re_len())
def test_q_extentsize(self) :
for i in [1, 60, 100] :
self.db.set_q_extentsize(i)
self.assertEqual(i, self.db.get_q_extentsize())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DB_general))
suite.addTest(unittest.makeSuite(DB_txn))
suite.addTest(unittest.makeSuite(DB_hash))
suite.addTest(unittest.makeSuite(DB_recno))
suite.addTest(unittest.makeSuite(DB_queue))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| [
"wwong1@ualberta.ca"
] | wwong1@ualberta.ca |
fe23d4216942badda8423cc70f46d0f3ca805036 | 2ea66258df8c51273ded673263b77299ca630582 | /Django/serializer.py | a4a752935fd712b5dd52290f481dedcd819260e5 | [] | no_license | bharati-garde/Djangoapp | 24ccab6a3388ced1392d2193c21db590a7651598 | 5a559512016de2f91587923053fe048fba3230c2 | refs/heads/master | 2023-03-30T12:54:04.790899 | 2021-04-04T14:39:27 | 2021-04-04T14:39:27 | 354,531,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | from rest_framework import serializers
from . import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView."""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""A serializer for our user profile objects."""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
"""Create and return a new user."""
user = models.UserProfile(
email=validated_data['email'],
name=validated_data['name']
)
user.set_password(validated_data['password'])
user.save()
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""A serializer for profile feed items."""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}} | [
"gardebharati123@gmail.com"
] | gardebharati123@gmail.com |
2d1023cba9aba5f467e33f025752b84a3c793dea | 4dd55a769c58a17b9473dc111436a343ae05d4b4 | /utils/alertcheck.py | a1a9871694157233a344cc955e6238af201f209f | [] | no_license | Navu4/Hand-Detection-for-Shredder-Machine- | c54a3a6a53d5bf1f05931e53279b024b4aee54b4 | 1beed2222126cdb8443c2c59d7d8970882e46f75 | refs/heads/main | 2023-04-28T02:51:03.313323 | 2021-05-17T17:28:39 | 2021-05-17T17:28:39 | 342,700,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | import cv2
from playsound import playsound
import pandas as pd
# crossed=0
def drawboxtosafeline(image_np, p1, p2, Line_Position2, Orientation):
# global crossed
if Orientation == "bt":
bounding_mid = (int((p1[0] + p2[0]) / 2), int(p1[1]))
if bounding_mid:
cv2.line(img=image_np, pt1=bounding_mid, pt2=(bounding_mid[0], Line_Position2), color=(255, 0, 0),
thickness=1, lineType=8, shift=0)
distance_from_line = bounding_mid[1] - Line_Position2
elif Orientation == "tb":
bounding_mid = (int((p1[0] + p2[0]) / 2), int(p2[1]))
if bounding_mid:
cv2.line(img=image_np, pt1=bounding_mid, pt2=(bounding_mid[0], Line_Position2), color=(255, 0, 0),
thickness=1, lineType=8, shift=0)
distance_from_line = Line_Position2 - bounding_mid[1]
elif Orientation == "lr":
bounding_mid = (int(p2[0]), int((p1[1] + p2[1]) / 2))
if bounding_mid:
cv2.line(img=image_np, pt1=bounding_mid, pt2=(Line_Position2, bounding_mid[1]), color=(255, 0, 0),
thickness=1, lineType=8, shift=0)
distance_from_line = Line_Position2 - bounding_mid[0]
elif Orientation == "rl":
bounding_mid = (int(p1[0]), int((p1[1] + p2[1]) / 2))
if bounding_mid:
cv2.line(img=image_np, pt1=bounding_mid, pt2=(Line_Position2, bounding_mid[1]), color=(255, 0, 0),
thickness=1, lineType=8, shift=0)
distance_from_line = bounding_mid[1] - Line_Position2
if distance_from_line <= 0:
# crossed+=1
posii = int(image_np.shape[1] / 2)
cv2.putText(image_np, "ALERT", (posii, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)
# sound = os.path.join()
playsound(r"E:\Projects\shredder-machine-hosur\utils\alert.wav")
cv2.rectangle(image_np, (posii - 20, 20), (posii + 85, 60), (255, 0, 0), thickness=3, lineType=8, shift=0)
# to write into xl-sheet
return 1
else:
return 0
| [
"51900952+Navu4@users.noreply.github.com"
] | 51900952+Navu4@users.noreply.github.com |
48a5631543b1b448171a4a54d85b90249941ac1e | 1881553e6bc233a79c9ee5255786e3628b0c86d2 | /util/vocabmapping.py | d32e20b6039646bf5ca7cea2a0a88723a8cb06a5 | [] | no_license | aarthiis/Word-Sense-Disambiguation-by-learning-long-term-dependencies | 7c5fa783055804847a2b06fca185c8278ac03a23 | 3e796b2259df8bf944c151d00529f22b8ca89b07 | refs/heads/master | 2021-07-25T11:20:02.121293 | 2017-11-07T04:28:21 | 2017-11-07T04:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py |
import pickle
class VocabMapping:
def __init__(self, word):
with open("util/vocab_"+word+"_sentences.txt", "rb") as handle:
self.dic = pickle.loads(handle.read())
with open("util/vocab_"+word+"_senses.txt","rb") as handle1:
self.dic1=pickle.loads(handle1.read())
def getIndex(self, token):
try:
return self.dic[token]
except:
return self.dic["<UNK>"]
def getIndex_target(self, token):
try:
return self.dic1[token]
except:
return self.dic1["<UNK>"]
def getSize(self):
return len(self.dic)
def getSize_target(self):
return len(self.dic1)
| [
"lalchandiitk@gmail.com"
] | lalchandiitk@gmail.com |
e94bb0b4072bf172c48f8d8cb3bfe91985a8dd3e | b2de5660d81afdf6b1fba058faee6ece6a51e462 | /amplify/agent/managers/bridge.py | 76902e4239a982a79bdc60e47f872d32cb28807d | [
"BSD-2-Clause"
] | permissive | Ferrisbane/nginx-amplify-agent | 725d8a7da7fb66e0b41cddd8139d25a084570592 | ef769934341374d4b6ede5fcf5ebff34f6cba8de | refs/heads/master | 2021-01-22T00:03:49.686169 | 2016-07-20T17:50:30 | 2016-07-20T17:50:30 | 63,801,713 | 0 | 0 | null | 2016-07-20T17:41:25 | 2016-07-20T17:41:25 | null | UTF-8 | Python | false | false | 7,064 | py | # -*- coding: utf-8 -*-
import gc
import time
from collections import deque
from amplify.agent.common.context import context
from amplify.agent.common.util.backoff import exponential_delay
from amplify.agent.managers.abstract import AbstractManager
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class Bridge(AbstractManager):
"""
Manager that flushes object bins and stores them in deques. These deques are then sent to backend.
"""
name = 'bridge_manager'
def __init__(self, **kwargs):
if 'interval' not in kwargs:
kwargs['interval'] = context.app_config['cloud']['push_interval']
super(Bridge, self).__init__(**kwargs)
self.payload = {}
self.first_run = True
self.last_http_attempt = 0
self.http_fail_count = 0
self.http_delay = 0
# Instantiate payload with appropriate keys and buckets.
self._reset_payload()
@staticmethod
def look_around():
"""
Checks everything around and make appropriate tree structure
:return: dict of structure
"""
# TODO check docker or OS around
tree = {'system': ['nginx']}
return tree
def _run(self):
try:
self.flush_all()
gc.collect()
except:
context.default_log.error('failed', exc_info=True)
raise
def flush_metrics(self):
"""
Flushes only metrics
"""
flush_data = self._flush_metrics()
if flush_data:
self.payload['metrics'].append(flush_data)
self._send_payload()
def flush_all(self, force=False):
"""
Flushes all data
"""
clients = {
'meta': self._flush_meta,
'metrics': self._flush_metrics,
'events': self._flush_events,
'configs': self._flush_configs
}
# Flush data and add to appropriate payload bucket.
if self.first_run:
# If this is the first run, flush meta only to ensure object creation.
flush_data = self._flush_meta()
if flush_data:
self.payload['meta'].append(flush_data)
else:
for client_type in self.payload.keys():
if client_type in clients:
flush_data = clients[client_type].__call__()
if flush_data:
self.payload[client_type].append(flush_data)
now = time.time()
if force or now >= (self.last_http_attempt + self.interval + self.http_delay):
self._send_payload()
def _send_payload(self):
"""
Sends current payload to backend
"""
context.log.debug(
'modified payload; current payload stats: '
'meta - %s, metrics - %s, events - %s, configs - %s' % (
len(self.payload['meta']),
len(self.payload['metrics']),
len(self.payload['events']),
len(self.payload['configs'])
)
)
# Send payload to backend.
try:
self.last_http_attempt = time.time()
self._pre_process_payload() # Convert deques to lists for encoding
context.http_client.post('update/', data=self.payload)
context.default_log.debug(self.payload)
self._reset_payload() # Clear payload after successful
if self.first_run:
self.first_run = False # Set first_run to False after first successful send
if self.http_delay:
self.http_fail_count = 0
self.http_delay = 0 # Reset HTTP delay on success
context.log.debug('successful update, reset http delay')
except Exception as e:
self._post_process_payload() # Convert lists to deques since send failed
self.http_fail_count += 1
self.http_delay = exponential_delay(self.http_fail_count)
context.log.debug('http delay set to %s (fails: %s)' % (self.http_delay, self.http_fail_count))
exception_name = e.__class__.__name__
context.log.error('failed to push data due to %s' % exception_name)
context.log.debug('additional info:', exc_info=True)
context.log.debug(
'finished flush_all; new payload stats: '
'meta - %s, metrics - %s, events - %s, configs - %s' % (
len(self.payload['meta']),
len(self.payload['metrics']),
len(self.payload['events']),
len(self.payload['configs'])
)
)
def _flush_meta(self):
return self._flush(clients=['meta'])
def _flush_metrics(self):
return self._flush(clients=['metrics'])
def _flush_events(self):
return self._flush(clients=['events'])
def _flush_configs(self):
return self._flush(clients=['configs'])
def _flush(self, clients=None):
# get structure
objects_structure = context.objects.tree()
# recursive flush
results = self._recursive_object_flush(objects_structure, clients=clients) if objects_structure else None
return results
def _recursive_object_flush(self, tree, clients=None):
results = {}
object_flush = tree['object'].flush(clients=clients)
if object_flush:
results.update(object_flush)
if tree['children']:
children_results = []
for child_tree in tree['children']:
child_result = self._recursive_object_flush(child_tree, clients=clients)
if child_result:
children_results.append(child_result)
if children_results:
results['children'] = children_results
if results:
return results
def _reset_payload(self):
"""
After payload has been successfully sent, clear the queues (reset them to empty deques).
"""
self.payload = {
'meta': deque(maxlen=360),
'metrics': deque(maxlen=360),
'events': deque(maxlen=360),
'configs': deque(maxlen=360)
}
def _pre_process_payload(self):
"""
ujson.encode does not handle deque objects well. So before attempting a send, convert all the deques to lists.
"""
for key in self.payload.keys():
self.payload[key] = list(self.payload[key])
def _post_process_payload(self):
"""
If a payload is NOT reset (cannot be sent), then we should reconvert the lists to deques with maxlen to enforce
memory management.
"""
for key in self.payload.keys():
self.payload[key] = deque(self.payload[key], maxlen=360)
| [
"dedm@nginx.com"
] | dedm@nginx.com |
f9512d043e688a54cf69e99f9f4b2ac850730de9 | 916b653522a6787a977692468602292e479b2c08 | /main.py | 4afab91a0e8c6794dac3e6b483225056e6cffc50 | [] | no_license | zhangming8/synth_video_text | 680c8eedc5a6c4ade6801a00f3f73e98d86de95a | 8d8076c5878d35e7f979508bb3fade53b246bdd4 | refs/heads/main | 2022-12-29T18:41:10.951406 | 2020-10-17T12:12:19 | 2020-10-17T12:12:19 | 304,783,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | import cv2
import argparse
import glob
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import os
def read_fonts(folder, size=30):
fonts_extend = ['.ttf', '.otf']
fonts_list = []
for i in fonts_extend:
fonts_list += glob.glob(folder + "/*" + i)
# print(fonts_list)
print("在{}中, 找到{}个字体".format(folder, len(fonts_list)))
#fonts = [[ImageFont.truetype(i, size, encoding='utf-8'), i] for i in fonts_list]
return fonts_list
def read_video_list(folder):
video_extend = ['.flv', '.avi', '.mkv', '.mp4']
videos = []
for i in video_extend:
videos += glob.glob(folder + "/*" + i)
print("在{}中, 找到{}个视频".format(folder, len(videos)))
return videos
def draw_text(img, x, y, txt, font, color=(0, 255, 255)):
pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(pil_img)
# 第一个参数是文字的起始坐标,第二个需要输出的文字,第三个是字体颜色,第四个是字体类型
draw.text((x, y), txt, color, font=font)
# PIL图片转cv2
img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
return img
def main():
video_folder = args.video_folder
fonts_folder = args.fonts_folder
np.random.seed(100)
fonts_list = read_fonts(fonts_folder)
img_list = glob.glob("/media/ming/DATA1/dataset/coco2017/images/val2017/*.jpg")
if 1:
for img_p in img_list:
frame = cv2.imread(img_p)
size = np.random.randint(10, 60)
font_file = fonts_list[np.random.randint(len(fonts_list))]
print(font_file)
txt = "大家好,这是一个示例." + font_file
color = (0, 255, 255)
x, y = 30, 30
font = ImageFont.truetype(font_file, size, encoding="utf-8")
frame = draw_text(frame, x, y, txt, font, color)
cv2.imwrite("result/"+os.path.basename(img_p), frame)
#cv2.namedWindow("image", 0)
#cv2.imshow('image', frame)
#k = cv2.waitKey(0)
#if k == 27:
# exit()
#cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Genereate Synthetic Scene-Text Images')
parser.add_argument('--vis', action='store_true', dest='viz', default=False,
help='flag for turning on visualizations')
parser.add_argument('--fonts_folder', type=str, default="./fonts/chinese_fonts")
parser.add_argument('--video_folder', type=str, default='/media/ming/DATA2/video/bilibili_video/1')
args = parser.parse_args()
main()
| [
"zhangming8@github.com"
] | zhangming8@github.com |
43b5224ac800af753a26bb8e66d9830ccb58e12b | af4c313f5e83fb0e0fd22379a1ad3daf4be01771 | /manage.py | bbbbc95ae700765ac573066d788721f443710df0 | [] | no_license | wellington-carvalho/eventex | ee2c428053c6aa765955c8520c041e75b25c7b17 | 05147a744fb5b02dea527aa079359da6b723fc0d | refs/heads/master | 2020-03-20T02:08:28.787663 | 2018-08-07T10:43:32 | 2018-08-07T10:43:32 | 137,100,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eventex.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv) | [
"wellington.carvalho@armazemparaiba.com.br"
] | wellington.carvalho@armazemparaiba.com.br |
80261fed562aa68eeed3feabb91b51944742158c | 29705cfa764b8800a4f611044bb441ae2dbb517e | /ctpbee/indicator/back.py | 0149bc0d761712c1267a20af3ac2da0a1c6fc8f9 | [
"MIT"
] | permissive | ctpbee/ctpbee | 98c720a54999e9c4bb242848a9cd4363f96ea2e1 | 217b73da65931213c1af4733741014d05b3a8bac | refs/heads/master | 2023-03-16T12:47:01.260983 | 2023-03-13T05:49:51 | 2023-03-13T05:49:51 | 202,876,271 | 665 | 186 | MIT | 2023-09-12T12:33:29 | 2019-08-17T12:08:53 | Python | UTF-8 | Python | false | false | 236 | py | """
这里是向量化回测内容以及可视化
主要用于快速回测结果
todo: 通过编写函数对应的参数以及需要执行的函数 来计算出最后的回测结果
"""
class VectorBackTest:
raise NotImplemented
| [
"somewheve@gmail.com"
] | somewheve@gmail.com |
ba330efc4c3d51aa3b56b7900ded652503883d16 | 21303fb98c4b0a535e3f3228099ecf80d5f1d7e6 | /hw1/pinhsuan_lee_hw1/pinhsuan_lee_task2.py | 38b7fbc50bc50b1977ea971e03557a43602db3f8 | [] | no_license | winnie800821/INF553-Data-Mining-Summer-2019- | 2ecfd85c85015c51b0939046ff3d1a4a20ef3e66 | 95737d6908b247c7db831d2910ae7cc17f80f303 | refs/heads/master | 2021-02-13T23:31:34.864332 | 2020-03-03T22:47:19 | 2020-03-03T22:47:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | from pyspark import SparkContext
import sys
import json
import time
sc = SparkContext('local[*]', 'pinhsuan_lee_task2.py')
review = sys.argv[1]
business = sys.argv[2]
Re_textRDD=sc.textFile(review)
Re_textRDD=Re_textRDD.map(lambda line:line.split('\n')).map(lambda line:json.loads(line[0]))
Bus_textRDD=sc.textFile(business)
Bus_textRDD=Bus_textRDD.map(lambda line:line.split('\n')).map(lambda line:json.loads(line[0]))
count_avg_star=Re_textRDD.map(lambda x:(x["business_id"],x["stars"])).sortByKey(lambda a,b: (a[0]+b[0],a[1]+b[1]))
busID_state= Bus_textRDD.map(lambda a:(a["business_id"],a["state"]))
state_star=count_avg_star.join(busID_state)
compute=state_star.map(lambda x: (x[1][1],(x[1][0],1))).reduceByKey(lambda x,y:(x[0]+y[0],x[1]+y[1])).\
sortByKey(True).map(lambda x:(x[0],x[1][0]/x[1][1])).map(lambda x:(x[1],x[0]))
avg_state_star=compute.sortByKey(False).collect()
print(avg_state_star)
output_task2_1=sys.argv[3]
with open(output_task2_1, "w") as output_file1:
output_file1.write("state,stars\n")
for info in avg_state_star:
output_file1.write("{},{}\n".format(info[1], info[0]))
dic_time={}
#method 1
start_timer1=time.time()
method1_RDD=compute.sortByKey(False).collect()
print(method1_RDD[0:5])
end_timer1=time.time()
dic_time["m1"]=end_timer1-start_timer1
#method 2
start_timer2=time.time()
method2_RDD=compute.sortByKey(False).take(5)
print(method2_RDD)
end_timer2=time.time()
dic_time["m2"]=end_timer2-start_timer2
dic_time["explanation"]="In the first method, we collect all the pairs and compare to get the top 5 states."\
"However, in the second method, we compare and get the top 5 states locally."\
"Therefore, the first method takes more time than the second one."
print(dic_time)
output_task2_2=sys.argv[4]
with open(output_task2_2, "w") as output_file2:
json.dump(dic_time, output_file2,indent=1)
| [
"noreply@github.com"
] | noreply@github.com |
2a4c2e2000a7aff2f1657522ab2b84b85f99e5c7 | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/ConfigParser/ConfigParser_9.py | cd739d82a6b2776cc41204fb21e4a9bde96a1869 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
import ConfigParser
parser = ConfigParser.SafeConfigParser()
parser.add_section('bug_tracker')
parser.set('bug_tracker','uri','http://localhost:8080/bugs')
parser.set('bug_tracker','username','Jack')
parser.set('bug_tracker','password','123456')
for section in parser.sections():
print section
for name,value in parser.items(section):
print ' %s = %r' % (name,value) | [
"zhuzhulang@126.com"
] | zhuzhulang@126.com |
d80029f6746432f306553ac45e849cce4e54f135 | 1cda54ec4bba8d3b7ff1beba4e59cd251a3bac04 | /DFS/104_maximum_depth_of_binary_tree.py | 526d2d7e692b0773c8511e7a3c128a58b70dd41b | [] | no_license | dodoyuan/leetcode_python | b7ff8798ab311b5d7116f4fa4d0e0f1ce05e752e | b7e59ef26a00ebdd3c253ca63f66ea079f9bca54 | refs/heads/master | 2021-09-15T23:55:16.515337 | 2018-06-13T06:42:40 | 2018-06-13T06:42:40 | 118,602,025 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Given a binary tree, find its maximum depth.
#
# The maximum depth is the number of nodes along
# the longest path from the root node down to the
# farthest leaf node.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| [
"dodoyuan"
] | dodoyuan |
70cb0e409bf571f5c9070607643820cee16901ee | 5b7a480609dfa34e181ba9f8bb696fa9d5902d7d | /model_Trainer.py | b15fe587600b96175bdc61a2ae1a029c58576a2c | [] | no_license | Majdi1994/Structural-Health-Monitring | e4d8f95acedd2b1de056e33dd8bd5d19aaf90a12 | 53ae48107ef50a452c3c9a369a34659bad6f40d6 | refs/heads/master | 2021-01-26T07:17:51.904636 | 2020-02-10T12:58:45 | 2020-02-10T12:58:45 | 243,362,093 | 0 | 0 | null | 2020-02-26T20:45:47 | 2020-02-26T20:45:46 | null | UTF-8 | Python | false | false | 2,603 | py | import torch.nn as nn
from models.models_config import get_model_config, initlize
from pre_train_test_split import trainer
import torch
from torch.utils.data import DataLoader
from utils import *
device = torch.device('cpu')
# load data
my_data= torch.load('./data/train_test_dataset.pt')
train_dl = DataLoader(MyDataset(my_data['train_data'], my_data['train_labels']), batch_size=10, shuffle=True, drop_last=True)
test_dl = DataLoader(MyDataset(my_data['test_data'], my_data['test_labels']), batch_size=10, shuffle=False, drop_last=False)
class CNN_1D(nn.Module):
def __init__(self, input_dim, hidden_dim,dropout):
super(CNN_1D, self).__init__()
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.dropout=dropout
self.encoder = nn.Sequential(
nn.Conv1d(self.input_dim, 8, kernel_size=7, stride=1, padding=1, dilation=1),
nn.BatchNorm1d(8),
nn.ReLU(),
nn.Conv1d(8, 8, kernel_size=3, stride=2, padding=1, dilation=1),
nn.BatchNorm1d(8),
nn.ReLU(),
nn.Conv1d(8, 8, kernel_size=3, stride=2, padding=1, dilation=1),
nn.BatchNorm1d(8),
nn.ReLU(),
nn.Conv1d(8, 8, kernel_size=3, stride=2, padding=1, dilation=1),
nn.BatchNorm1d(8),
nn.ReLU(),
nn.Conv1d(8, 8, kernel_size=3, stride=2, padding=1, dilation=1),
nn.BatchNorm1d(8),
nn.ReLU(),
nn.Conv1d(8, 8, kernel_size=3, stride=2, padding=1, dilation=1),
nn.Flatten(),
nn.Linear(32, self.hidden_dim))
self.Classifier= nn.Sequential(
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Dropout(p=self.dropout),
nn.Linear(self.hidden_dim, self.hidden_dim//2) ,
nn.ReLU(),
nn.Dropout(p=self.dropout),
nn.Linear(self.hidden_dim//2, 11))
def forward(self, src):
features = self.encoder(src)
predictions = self.Classifier(features)
return predictions, features
model=CNN_1D(1,32,0.5).to(device)
params = {'window_length': 30, 'sequence_length': 30, 'batch_size': 10, 'input_dim': 14, 'pretrain_epoch': 40,
'data_path': r"C:/Users/mohamedr002/OneDrive - Nanyang Technological University/PhD Codes Implementation/Deep Learning for RUL/data/processed_data/cmapps_train_test_cross_domain.pt",
'dropout': 0.5, 'lr': 1e-4}
# load model
config = get_model_config('CNN')
# load data
trained_model=trainer(model, train_dl, test_dl,'SHM' ,config,params) | [
"42439092+mohamedr002@users.noreply.github.com"
] | 42439092+mohamedr002@users.noreply.github.com |
cbbfe6ccf482a46ef5c3e2fb4a96b0687da01563 | 859d81c87545d9ca83ea6c9a3a70b3bf17c0b3ce | /practice03_fun/practice02.py | ee84244fd02e13a474f1acc0553fac29b3586b4a | [] | no_license | bb04265/PythonStudy | cf3746a5d0d4a3d630de5fd03162c0cea35b3b02 | dd6a5b31f611506c528cea07bb1a2295c86643ef | refs/heads/master | 2020-08-14T15:06:47.218742 | 2019-10-20T18:41:01 | 2019-10-20T18:41:01 | 215,188,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # random.choice함수를 사용해서 6자리의 무작위 비밀번호 생성하는 프로그램
import random
pch = "abcdefghijklmnopqrstuvwxyz0123456789"
def generatePassword():
password = ""
for i in range(6):
pw = random.choice(pch)
password += pw
return password
# for문을 사용하여 6번 반복할 수 있게 하고 random.choice함수를 이용해서 pch문자열에서 랜덤으로 추출하여 붙여준다
print(generatePassword())
print(generatePassword())
print(generatePassword())
| [
"asdf01074469118@gmail.com"
] | asdf01074469118@gmail.com |
cb7d797a94bf4428198225fc60a9cc63d8660b24 | ca408d928664d51d02f860eeff45310360570c1f | /vi/metodo/segmentacion/contorno.py | 737d3278c1a16ba4b7029c52aa63c448e4c82392 | [] | no_license | ricardors19/urb-vi | 41fcb17346fa5fbb8f52cb74530401056b1cfb2a | 1cd42584843bbda16c98f817e681003b01efe91c | refs/heads/master | 2021-01-17T23:16:55.740818 | 2013-12-07T03:22:58 | 2013-12-07T03:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # encoding: utf-8
"""Segmetación por búsqueda de contornos """
from __future__ import division
import cv2
import numpy as np
import vi.metodo.segmentacion.contorno_base as contorno_base
class Segmentacion(contorno_base.Segmentacion):
"""Segmentación con filtro por área media """
def __init__(self):
"""Agrega el procesador """
super(Segmentacion, self).__init__()
self._procesadores = [self.procesador_areamedia]
def procesador_areamedia(self, baldosa):
"""En base al promedio, retira elementos que podrían no ser letras """
gris, filtrados = self._filtro_area(baldosa)
mascara = np.zeros(gris.shape, np.uint8)
cv2.drawContours(mascara, filtrados, -1, 255, 1)
shape = (gris.shape[0] + 10, gris.shape[1] + 10)
marco = np.zeros(shape, np.uint8)
marco[5:gris.shape[0]+5, 5:gris.shape[1]+5] = mascara
return marco
| [
"gcca@gcca.tk"
] | gcca@gcca.tk |
ce9e9df56c732065b950f993103b5b277bed9fd3 | 73b793758d0db27e4d67e6effbda40f5b550a9f4 | /clientes/servicios.py | d259696e5c55d7906912ca85379b3f91b8bd3f61 | [] | no_license | RubenMaier/python_crud_ventas | a696fb52ec4a2ac37e5983c211fc8ff9bc4fee00 | c95af4646bb8173c41e79118a6f53919308a0898 | refs/heads/master | 2020-08-12T06:38:02.986974 | 2019-12-29T13:39:30 | 2019-12-29T13:39:30 | 214,707,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py | # modulo que me permite trabajar comodo con el formato de archivo csv (comma-separated values)
import csv
# modulo que me permite manejar funcionalidades del sistema operativo
import os
from clientes.modelo import Cliente
class ServiciosClientes:
def __init__(self, nombre_tabla):
self.nombre_tabla = nombre_tabla # es nuestro csv la tabla
def crear_cliente(self, cliente):
# abrimos el archivo
with open(self.nombre_tabla, mode='a') as archivo:
# creamos el objeto de escritura con formato csv respetando el esquema de columnas definido en la clase "Clientes"
# para eso hacemos uso de la clase csv con su método DictWriter declarando el archivo y el esquema
# csv.DictWriter(archivo, esquema de columnas del csv)
escritura = csv.DictWriter(archivo, Cliente.esquema())
# añadimos una objeto "cliente" con sus atributos en formato diccionario
escritura.writerow(cliente.atributos_a_diccionario())
def listar_clientes(self): # vemos que tipos de clientes tenemos
with open(self.nombre_tabla, mode='r') as archivo:
lectura = csv.DictReader(archivo, Cliente.esquema())
# como el resultado es un iterable lo convertimos en una lista y lo devolvemos
return list(lectura)
def actualizar_cliente(self, cliente_actualizado):
clientes = self.listar_clientes()
# lista auxiliar para ciclar entre los clientes para encontrar al que se necesita actualizar y...
# tener al final como resultado la lista con los no modificados mas el modificado
clientes_actualizados = []
for cliente in clientes:
if cliente['uid'] == cliente_actualizado.uid:
# añadimos solo el cliente actualizado a la lista
clientes_actualizados.append(
cliente_actualizado.atributos_a_diccionario())
else:
# añadimos todos los clientes que no fueron modificados
clientes_actualizados.append(cliente)
self._guardar_en_disco(clientes_actualizados)
def borrar_cliente(self, cliente_a_borrar):
clientes = self.listar_clientes()
clientes_resultantes = []
for cliente in clientes:
if cliente['uid'] != cliente_a_borrar.uid:
clientes_resultantes.append(cliente)
self._guardar_en_disco(clientes_resultantes)
def _guardar_en_disco(self, clientes):
# esta tabla temporal existe porque ya abrimos el archivo y no podemos volverlo a escribir porque...
# lo abrimos en modo lectura cuando obtuvimos nuestra lista de clientes
nombre_tabla_temporal = self.nombre_tabla + '.tmp'
# escribimos un archivo csv en disco con los clientes que nos pasaron por parámetro
with open(nombre_tabla_temporal, mode='w') as archivo:
escritura = csv.DictWriter(archivo, Cliente.esquema())
escritura.writerows(clientes)
# borro el archivo original
os.remove(self.nombre_tabla)
# renombro la tabla temporal al nombre original
os.rename(nombre_tabla_temporal, self.nombre_tabla)
| [
"ruben@MacBook-Pro-2017-15-Inch.local"
] | ruben@MacBook-Pro-2017-15-Inch.local |
1e5487eb2f86724a1ceb955e46b3867b66aba095 | 549ccf72082fe25b8c09f553d02994f99e17b2a7 | /iw/search.py | e1efffc6e42204454554226b39aff6fe300b4b09 | [] | no_license | comex/agora-infra | 8192cb82e56fb56b692b4d16adb059a2a376bd88 | 6ca74d91aef144c91f28a67e80a0429ae9d15b75 | refs/heads/master | 2023-07-19T22:34:40.928464 | 2015-08-16T22:57:28 | 2015-08-16T22:57:28 | 11,080,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,096 | py | import re, sre_parse, sre_compile, sre_constants, collections, time, operator, itertools, sys
from collections import namedtuple
from pystuff import config, mkdir_if_absent
import pystuff, stuff
import StringIO
inf, neginf = float('inf'), float('-inf')
def intersect_iterables(iterables, is_asc):
subits = map(iter, iterables)
minval = neginf if is_asc else inf
less = -1 if is_asc else 1
ids = [(minval, None)] * (len(subits) - 1)
while True:
first_id, ctxs = next(subits[0])
for i, (id, ictxs) in enumerate(ids):
while cmp(id, first_id) == less:
try:
id, ictxs = next(subits[i+1])
except StopIteration:
return # there is nothing more
ids[i] = (id, ictxs)
c = cmp(id, first_id)
if c == -less:
break # not matching
elif c == 0:
ctxs += ictxs
else: # all through
yield (first_id, ctxs)
def union_iterables(iterables, is_asc):
subits = map(iter, iterables)
subit_count = len(subits)
if is_asc:
minval = neginf
maxval = inf
_min = min
else:
minval = inf
maxval = neginf
_min = max
ids = [(minval, None)] * subit_count
while subit_count > 0:
min_id, ctxs = _min(ids)
if min_id != minval:
yield (min_id, ctxs)
for i, (id, ctxs) in enumerate(ids):
if id == min_id:
try:
ids[i] = next(subits[i])
except StopIteration:
ids[i] = (maxval, None)
subit_count -= 1
def subtract_iterables(minuend, subtrahend, is_asc):
minval = neginf if is_asc else inf
maxval = inf if is_asc else neginf
less = -1 if is_asc else 1
bad = minval
for id, ctxs in minuend:
while cmp(bad, id) == less:
try:
bad, _ = next(subtrahend)
except StopIteration:
bad = maxval
if bad != id:
yield id, ctxs
def lex_query(text):
# -bar, but foo-bar is one term
ms = re.finditer(r'''
(?P<or> OR (?= ["/\s()] ) \s* | \| \s* )?
(?P<plusminus> [+-] )?
(?: (?P<operator> [a-zA-Z-]+ ) :)?
(?:
" (?P<quoted> [^"]* ) "
| / (?P<regex> (?: [^\\] | \\. )* ) / (?P<regopts> [a-zA-Z]* )
| (?P<parens> [()] )
| (?P<simple> [^"/\s()]+ )
)
''', text, re.X)
return [{k: v for (k, v) in m.groupdict().iteritems() if v is not None} for m in ms]
def parse_query(tokens, operators, opts):
errors = []
stack = []
def err_if_absent(error):
if error not in errors:
errors.append(error)
existing, was_and = None, False
order = None
got_operator_parens = False
for token in tokens:
inverted = token.get('plusminus') == '-'
is_or = bool(token.get('or'))
if 'operator' in token:
operator = token['operator'].lower()
if operator == 'order':
operand = token.get('simple', '').lower()
if operand not in ('asc', 'desc'):
errors.append('Bad order (must be asc or desc)')
else:
order = operand
continue
if operator not in operators:
errors.append('No such operator %s, have: %s' %
(operator, ', '.join(o for o in operators.keys() if o is not None)))
continue
if 'parens' in token:
got_operator_parens = True
else:
operator = None
if 'parens' in token:
if token['parens'] == '(':
stack.append((existing, inverted, is_or, was_and))
existing, was_and = None, False
continue
else: # )
if not stack:
err_if_absent('Mismatched )')
continue
else:
tree = existing
existing, inverted, is_or, was_and = stack.pop()
if tree is None: continue
elif 'quoted' in token:
tree = ('lit', operator, token['quoted'])
elif 'simple' in token:
if token['simple'] == 'OR':
err_if_absent('Bad OR')
continue
tree = ('lit', operator, token['simple'])
elif 'regex' in token:
regex, regopts = token['regex'], token['regopts']
flags = 0
for opt in regopts.upper():
if opt in 'IMSX':
flags |= getattr(re, opt)
else:
err_if_absent('Unknown regex flags')
try:
p = sre_parse.parse(regex, flags)
except re.error as e:
errors.append('Regex error: %s' % e.message)
continue
db = operators[operator]
if hasattr(db, 'search_trigram'):
trigrams = p_trigrams(p)
if trigrams is None and opts.get('require_trigrams'):
errors.append('Regex un-indexable')
continue
if trigrams is not None:
trigrams = simplify_trigrams(trigrams)
else:
trigrams = None
p_fix_spaces(p)
r = sre_compile.compile(p, flags)
tree = ('regex', operator, r, trigrams)
if inverted: tree = ('not', tree)
if is_or:
if existing is None:
err_if_absent('Bad OR')
continue
if was_and:
existing = ('and', existing[1], ('or', existing[2], tree))
else:
existing = ('or', existing, tree)
was_and = False
else:
if existing is not None:
existing = ('and', existing, tree)
was_and = True
else:
existing = tree
was_and = False
if stack:
errors.append('Mismatched (')
if got_operator_parens:
errors.append('No parens with an operator')
if errors:
return ('errors', errors, None)
elif not existing:
return ('empty', None, None)
else:
return ('ok', existing, order)
def pprint(tree, indent=''):
if isinstance(tree, tuple):
print indent + str(tree[0])
for sub in tree[1:]:
pprint(sub, indent + ' ')
else:
print indent + str(tree)
def can_nest_lit_operators(sup, sub):
# see parentheses crap
return sup == 'and' or sub == 'not'
def optimize_query(tree):
if tree[0] in ('and', 'or'):
if len(tree) == 2:
# avoid weird behavior below with 'not'
return optimize_query(tree[1])
kind = tree[0]
args = []
lits_by_op = {}
def add_arg(r):
if r[0] == kind:
return map(add_arg, r)
elif r[0] == 'lit' and isinstance(r[2], tuple) and r[2][0] == kind:
# this has ugly complexity, wrapping recursively than unwrapping
return [add_arg(('lit', r[1], n)) for n in r[2][1:]]
if r[0] == 'lit' and (not isinstance(r[2], tuple) or can_nest_lit_operators(kind, r[2][0])):
lits_by_op.setdefault(r[1], []).append(r[2])
elif r[0] == 'not' and r[1][0] == 'lit' and not isinstance(r[1][2], 'tuple') and ' ' not in r[1][2]:
# ' ' check is cause we can't do -"foo" with dumb-paren sqlite
lits_by_op.setdefault(r[1][1], []).append(('not', r[1][2]))
else:
args.append(r)
for subtree in [tree[1], tree[2]]:
r = optimize_query(subtree)
add_arg(r)
for operator, lits in lits_by_op.iteritems():
if len(lits) == 1:
args.append(('lit', operator, lits[0]))
else:
args.append(('lit', operator, (kind,) + tuple(lits)))
if len(args) == 1:
return args[0]
else:
return (kind,) + tuple(args)
elif tree[0] == 'not':
sub = tree[1]
negative = True
while sub[0] == 'not':
negative = not negative
sub = sub[1]
sub = optimize_query(sub)
if not negative: return sub
#if sub[0] == 'lit' and isinstance(sub[2], basestring) and ' ' not in sub[2]:
# # can't do -"foo"
# return ('lit', sub[1], ('not', sub[2]))
return ('not', sub)
else:
return tree
class QueryTimeoutException(Exception): pass
def run_query(tree, operators, deadline, limit=None, asc=False):
if time.time() > deadline:
raise QueryTimeoutException
if tree[0] == 'or':
return union_iterables(
[run_query(subtree, operators, deadline, limit, asc)
for subtree in tree[1:]],
asc)
elif tree[0] == 'and':
positive = [run_query(subtree, operators, deadline, None, asc)
for subtree in tree[1:]
if subtree[0] != 'not']
negative = [run_query(subtree[1], operators, deadline, None, asc)
for subtree in tree[1:]
if subtree[0] == 'not']
itr = intersect_iterables(positive, asc)
if negative:
return subtract_iterables(itr, union_iterables(negative, asc), asc)
else:
return itr
elif tree[0] == 'lit':
op = operators[tree[1]]
# idx.word.search
return op.search_word(tree[2], limit=limit, asc=asc)
elif tree[0] == 'regex':
op = operators[tree[1]]
r, trigrams = tree[2], tree[3]
if trigrams is None or pystuff.force_unindexed: # no index
trigram_hits = op.search_get_all(asc=asc)
else:
trigram_hits = ((result, None) for result, _ in op.search_trigram(trigrams, asc=asc))
#list(trigram_hits); import sys; sys.exit(0)
#db.cache_keys(trigram_hits)
#db.cache_keys_done()
results = set()
def func():
for result, text in trigram_hits:
if pystuff.print_trigram_hits: print '?', result
if time.time() > deadline:
raise QueryTimeoutException
if text is None:
text = op.search_get(result)
assert text is not None
it = r.finditer(text)
#it = re.finditer('^', text)
try:
m = next(it)
except StopIteration:
pass
else:
yield (result, [FoundRegex(itertools.chain([m], it))])
return func()
elif tree[0] == 'not':
return subtract_iterables(operators[None].ids()[::(-1, 1)[asc]],
run_query(tree[1], operators, deadline, None, asc))
else:
raise Exception('bad tree')
def do_query(expr, operators, start=0, limit=10, timeout=2.5, asc=False, opts={}):
l = lex_query(expr)
ok, p, order = parse_query(l, operators, opts)
if ok != 'ok':
return (ok, p)
if order is not None:
asc = order == 'asc'
o = optimize_query(p)
if timeout is None:
deadline = float('inf')
else:
deadline = time.time() + 2.5
it = iter(run_query(o, operators, deadline, None if limit is None else start + limit, asc))
try:
for i in xrange(start):
try:
next(it)
except StopIteration:
break
if limit is None:
results = list(it)
else:
results = []
for i in xrange(limit):
try:
result = next(it)
results.append(result)
except StopIteration:
break
return ('ok', results)
except QueryTimeoutException:
return ('timeout', None)
def m_to_range(m):
return (m.start(), m.end())
class FoundLit:
def __init__(self, query):
self.query = query
bits = [re.escape(q) for q in self.query[1:] if not isinstance(q, tuple)]
self.r = r'\b(%s)\b' % '|'.join(bits)
def ranges(self, text):
if not bits:
return []
return (m_to_range(m) for m in re.finditer(self.r, text, re.I))
class FoundRegex:
def __init__(self, it):
self.it = it
def ranges(self, text):
for m in self.it:
yield m_to_range(m)
class HighlightedString:
def __init__(self, text, ranges):
self.text = text
self.ranges = ranges
def plain(self):
return self.text
def ansi(self):
return self.output('\x1b[7m', '\x1b[27m', lambda text: text)
def html(self):
import web
return self.output('<b>', '</b>', lambda text: web.websafe(text))
def output(self, enter, exit, transform):
text = self.text
last_e = 0
result = StringIO.StringIO()
for s, e in self.ranges:
result.write(transform(text[last_e:s]))
result.write(enter)
result.write(transform(text[s:e]))
result.write(exit)
last_e = e
result.write(transform(text[last_e:]))
return result.getvalue()
def fix_ranges(ranges):
ranges.sort()
result = []
last_s, last_e = -1, -1
for s, e in ranges:
if s < last_e:
result[-1] = (last_s, e)
last_e = e
else:
result.append((s, e))
last_s, last_e = s, e
return result
def get_ranges(text, ctxs):
ranges = []
for ctx in ctxs:
ranges += list(ctx.ranges(text))
return fix_ranges(ranges)
def highlight_all(text, ctxs):
ranges = get_ranges(text, ctxs)
return HighlightedString(text, ranges)
def highlight_snippets(text, ctxs):
ranges = get_ranges(text, ctxs)
line_ranges = []
bad_line_ranges = []
for s, e in ranges:
prev_nl, next_nl = text.rfind('\n', 0, s), text.find('\n', e)
lr = (prev_nl + 1, len(text) if next_nl == -1 else next_nl)
(line_ranges if e - s < 100 else bad_line_ranges).append(lr)
if not line_ranges:
line_ranges = bad_line_ranges
line_ranges = fix_ranges(line_ranges)
htext = ''
hranges = []
for ls, le in line_ranges[:3]:
if htext != '':
htext += '\n'
adj = + len(htext) - ls
htext += text[ls:le]
hranges += [(s + adj, e + adj) for (s, e) in ranges if s >= ls and e <= le]
return HighlightedString(htext, hranges)
# http://swtch.com/~rsc/regexp/regexp4.html but simplified
# is the simplification appropriate?...
def p_is_dot_star((kind, arg)):
# can't use == becaue of hidden SubPattern class
if kind is not sre_constants.MAX_REPEAT: return False
min, max, sub = arg
if min != 0 or max < 4294967295 or len(sub) != 1: return False
return sub[0] == (sre_constants.ANY, None)
litmap = []
for arg in xrange(128):
litmap.append(chr(arg).lower().replace('\n', ' ').encode('hex'))
def _p_trigrams(p, litstr=''):
sp_stack = []
trigrams = []
trigrams_set = set()
alternates = []
it = iter(p)
# any character
any_kinds = (sre_constants.ANY,
sre_constants.NOT_LITERAL,
sre_constants.RANGE,
sre_constants.CATEGORY,
sre_constants.IN)
# empty string
empty_kinds = (sre_constants.ASSERT,
sre_constants.ASSERT_NOT,
sre_constants.AT)
branch_kinds = (sre_constants.BRANCH,
sre_constants.GROUPREF_EXISTS)
while True:
try:
kind, arg = next(it)
except StopIteration:
if sp_stack:
it = sp_stack.pop()
continue
else:
break
if kind in any_kinds:
litstr = ''
elif kind in empty_kinds:
pass
elif kind in branch_kinds:
if kind is sre_constants.GROUPREF_EXISTS:
group, yes, no = arg
arg = [yes, no]
else:
something, arg = arg
for sp in arg:
alt = _p_trigrams(sp, litstr)
if alt is not None and alternates is not None:
alternates.append(alt)
else:
alternates = None # could be anything
litstr = ''
elif kind is sre_constants.MAX_REPEAT:
min, max, sub = arg
if min >= 1:
# same trigrams as the string itself
sp_stack.append(it)
it = iter(sub)
else:
litstr = ''
elif kind is sre_constants.SUBPATTERN:
sp_stack.append(it)
group, sub = arg
it = iter(sub)
elif kind is sre_constants.LITERAL:
c = litmap[arg] if arg < 128 else '3f' # '?'
litstr = litstr[-4:] + c
if len(litstr) == 6 and litstr not in trigrams_set:
trigrams_set.add(litstr)
trigrams.append(litstr)
else:
raise Exception('unknown kind %s' % kind)
if len(trigrams) > 10:
trigrams = trigrams[:5] + trigrams[-5:]
texpr = ('and',) + tuple(trigrams)
aexpr = None if alternates is None else ('or',) + tuple(alternates)
if trigrams and alternates:
return ('and', texpr, aexpr)
elif trigrams:
return texpr
elif alternates:
return aexpr
else:
return None
def p_trigrams(p):
while p and p_is_dot_star(p[0]):
p.pop(0)
while p and p_is_dot_star(p[-1]):
p.pop()
return _p_trigrams(p)
def p_fix_spaces(p):
for i, (kind, arg) in enumerate(p):
if kind is sre_constants.LITERAL and arg == 32: # ' '
p[i] = (sre_constants.IN, [(sre_constants.LITERAL, 32), (sre_constants.LITERAL, 10)]) # add '\n'
elif kind is sre_constants.IN and (sre_constants.LITERAL, 32) in arg and (sre_constants.LITERAL, 10) not in arg:
arg.append((sre_constants.LITERAL, 10))
elif kind is sre_constants.MAX_REPEAT:
p_fix_spaces(arg[2])
elif kind is sre_constants.SUBPATTERN:
p_fix_spaces(arg[1])
elif kind is sre_constants.BRANCH:
for sub in arg[1]:
p_fix_spaces(sub)
# xxx
def simplify_trigrams(trigrams):
while len(trigrams) > 8:
trigrams = [trigram for (i, trigram) in enumerate(trigrams) if i % 3 != 1]
return trigrams
class Index:
def __init__(self, name, db):
self.name = name
self.db = db
self.cursor = pystuff.CursorWrapper(self.db.conn.cursor())
if db.new:
self.clear()
self.insert_stmt = 'INSERT INTO %s(docid, text) VALUES(?, ?)' % name
self.search_stmt = 'SELECT docid FROM %s WHERE text MATCH ? ORDER BY docid %%s LIMIT ?' % name
def begin(self):
pass
def commit(self):
pass
def clear(self):
self.cursor.execute('DROP TABLE IF EXISTS %s' % self.name)
self.cursor.execute('CREATE VIRTUAL TABLE %s USING fts4(%s, order=desc, content='', text text);' % (self.name, self.fts_opts))
def _insert(self, docid, text):
self.cursor.execute(self.insert_stmt, (docid, text))
# No SQLITE_ENABLE_FTS3_PARENTHESIS means trouble
@staticmethod
def to_sql(bit):
if not isinstance(bit, tuple): return bit
return {'and': ' ', 'or': ' OR '}[bit[0]].join(
('-%s' % s[1]) if isinstance(s, tuple) and s[0] == 'not'
else ('"%s"' % s)
for s in bit[1:])
def search(self, query, limit=None, asc=False):
sql = Index.to_sql(query)
cursor = pystuff.CursorWrapper(self.db.conn.cursor())
result = cursor.execute(self.search_stmt % ('ASC' if asc else 'DESC'), (sql, 10000000 if limit is None else limit))
return ((docid, [FoundLit(query)]) for docid, in result)
class WordIndex(Index):
fts_opts = 'tokenize=porter'
insert = Index._insert
def trigram_hexlify(text):
if not isinstance(text, unicode):
text = stuff.faildecode(str(text))
return text.encode('ascii', 'replace').lower().replace('\n', ' ').encode('hex')
class TrigramIndex(Index):
fts_opts = 'tokenize=simple'
def insert(self, docid, text):
text = trigram_hexlify(text)
self._insert(docid, ' '.join(set(text[i:i+6] for i in xrange(0, len(text) - 6, 2))))
class CombinedIndex:
def __init__(self, name, db):
self.word = WordIndex(name + '_word', db)
self.trigram = TrigramIndex(name + '_trigram', db)
def begin(self):
self.word.begin()
self.trigram.begin()
def commit(self):
self.word.commit()
self.trigram.commit()
def insert(self, docid, text):
self.word.insert(docid, text)
self.trigram.insert(docid, text)
def clear(self):
self.word.clear()
self.trigram.clear()
if __name__ == '__main__':
class fake_db:
idx = None
operators = {'foo': None, None: fake_db()}
if len(sys.argv) > 1:
examples = sys.argv[1:]
else:
examples = [
'order:/bar/',
'a OR f d OR (g h)',
'+test bar -("hi"/test/+x-f) -f',
'',
'(OR)',
'(-)',
'/foo+/',
'foo:/bar/',
'/abcdefghijklmnopqrstuvwxyz123456789/',
]
for example in examples:
print repr(example)
l = lex_query(example)
print ' lex:', l
ok, p, order = parse_query(l, operators)
if ok == 'ok':
o = optimize_query(p)
print ' par:', ok, order
pprint(o, indent=' ')
else:
print ' par:'
pprint((ok, p), indent=' ')
| [
"comexk@gmail.com"
] | comexk@gmail.com |
189290317dbf654f33692762683932c1f63047b6 | 6ffd0d9c617fdf9d17e9383101a7ceb846996e22 | /checkout/signals.py | b035c9b8abefe44567490e85d7ebda171e2bb085 | [] | no_license | KrisK1978/GuitarBarn | 9ccb8bd0a60d7bd072ecbc78e9dbc105cbeda008 | 82243aa232578202ca987002d95fda1e2a1e0bec | refs/heads/master | 2023-02-27T02:46:10.237860 | 2021-02-01T01:40:51 | 2021-02-01T01:40:51 | 333,077,430 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | """ Signals for Checkout app """
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .models import OrderLineItem
"""
This decorator executes the function any time
the signal is sent
"""
@receiver(post_save, sender=OrderLineItem)
def update_on_save(sender, instance, created, **kwargs):
"""
This updates order total on lineitem update/create
"""
instance.order.update_total()
@receiver(post_delete, sender=OrderLineItem)
def update_on_delete(sender, instance, **kwargs):
"""
This updates order total on lineitem delete
"""
instance.order.update_total
| [
"k_kempa@yahoo.co.uk"
] | k_kempa@yahoo.co.uk |
7254eb56a0fe6e70e118de114ad39641ad5db7b4 | 5a174cd92bf9a09d6cc7e0c7d171f31176abda8f | /src/tartarus.py | 7f98cc89d509855e1f3ec836614095df9a4a4c65 | [
"BSD-3-Clause"
] | permissive | Nova38/Tartarus | 42e74e92b79835a1814771b5afc31896016aa2a6 | db0548f5395bbf16c48a30acbd4a3ba82f4f5e33 | refs/heads/master | 2020-05-09T10:49:49.927046 | 2019-04-15T18:51:15 | 2019-04-15T18:51:15 | 181,059,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,665 | py | #!/usr/bin/env python3
import click
import os
import random
import shutil
import gzip
import bz2
import lzma
import zipfile
import base64
import hexrec.xxd
@click.command()
@click.argument('File', type=click.Path(exists=True, resolve_path=True))
@click.option('-o', '--output', default="", help='output file name')
@click.option('-t', default=1, help='How many interations')
@click.option('--gzip', "gz", is_flag=True, default=False, help="Adds Gunzip to operations to be used on file")
@click.option('--bzip2', "b2", is_flag=True, default=False, help="Adds Bzip2 to operations to be used on file")
@click.option('--lzma', "lz", is_flag=True, default=False, help="Adds lzma to operations to be used on file")
@click.option('--zip', "z", is_flag=True, default=False, help="Adds Zip to operations to be used on file")
@click.option('--xxd', "x", is_flag=True, default=False, help="Adds xxd (hexdump) to operations to be used on file")
@click.option('--base64', "b64", is_flag=True, default=False, help="Adds base64 encoding to operations to be used on file")
@click.option('--base32', "b32", is_flag=True, default=False, help="Adds base64 encoding to operations to be used on file")
@click.option('--all', is_flag=True, default=False, help="Use all encretion and encoding methods above")
def cli(t, file, output, all, gz, b2, lz, z, x, b64, b32):
"""
Takes an file and randomly encodes it for obfuscation
"""
if (output == ""):
output = os.path.join(os.path.dirname(file), "out")
tmp = output + ".tmp"
numOp = 0
operations = []
operationLog = []
if (gz):
operations.append(EGzip)
if (b2):
operations.append(EBZip2)
if (lz):
operations.append(ELzma)
if (z):
operations.append(EZip)
if (x):
operations.append(Exxd)
if (b64):
operations.append(EBase64)
if (b32):
operations.append(EBase32)
if(len(operations) == 0 or all == True):
operations.clear()
operations = [EGzip, EBZip2, ELzma, EZip, Exxd, EBase64, EBase32]
copyFile(file, output)
# start the encode
for x in range(t):
ran = random.randint(0, len(operations)-1)
operationLog.append(ran)
operations[ran](output, tmp)
copyFile(tmp, output)
for x in operationLog:
print(operations[x])
pass
# clean up
open(tmp, "w")
os.remove(tmp)
def copyFile(fin, fout):
"""
Helper function to copy tmp files
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
with open(fin, 'rb') as f_in:
with open(fout, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def EGzip(fin, fout):
"""
Helper function to carry out Gunzip
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
with open(fin, 'rb') as f_in:
with gzip.open(fout, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def EBZip2(fin, fout):
"""
Helper function to carry out Bzip2
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
with open(fin, 'rb') as f_in:
with bz2.open(fout, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def ELzma(fin, fout):
"""
Helper function to carry out Lzma
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
with open(fin, 'rb') as f_in:
with lzma.open(fout, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def EZip(fin, fout):
"""
Helper function to carry out zip
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
myzip = zipfile.ZipFile(fout, 'w')
myzip.write(fin)
copyFile(fout, fin)
def Exxd(fin, fout):
"""
Helper function to carry out xxd (ie Hexdump)
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
hexrec.xxd.xxd(fin, fout)
def EBase64(fin, fout):
"""
Helper function to carry out base64 encoding
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
inFile = open(fin, 'rb').read()
outFile = open(fout, 'wb')
outFile.write(base64.b64encode(inFile))
def EBase32(fin, fout):
"""
Helper function to carry out base32 encoding
Args:
fin (path): Path of the input file
fout (path): Path for the out file
"""
inFile = open(fin, 'rb').read()
outFile = open(fout, 'wb')
outFile.write(base64.b32encode(inFile))
| [
"dev.csnova@gmail.com"
] | dev.csnova@gmail.com |
ad4e66e29bd494bd629bac9884cd7367ed7601f6 | 69526d234c01b1d33b9fb569e55fe363d96beac0 | /api/routes/payments.py | 099b50144d23882f39259fcecf2873101b650077 | [] | no_license | jzamora5/orders_creator_backend | 53b0a773fb88d99354175835cebdfc93c8e7357e | d5dd51ba39a5f549cc55fd9835b6082edd91d0a6 | refs/heads/main | 2023-03-29T11:27:08.602656 | 2021-04-05T22:49:25 | 2021-04-05T22:49:25 | 348,373,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | from api.models.order import Order
from api.models.user import User
from api.models.payment import Payment
from api.routes import app_routes
from flask import abort, jsonify, make_response, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from app import storage
@app_routes.route('/order/<order_id>/payments', methods=['POST'], strict_slashes=False)
@jwt_required()
def post_payment(order_id):
"""
Creates a Payment
"""
order = storage.get(Order, order_id)
if not order:
abort(make_response(jsonify({"error": "Order not found"}), 404))
if get_jwt_identity() != order.user_id:
abort(make_response(jsonify({"error": "forbidden"}), 403))
if not request.get_json():
abort(make_response(jsonify({"error": "Not a JSON"}), 400))
needed_attributes = ["status", "payment_type", "total"]
data = request.get_json()
for needed in needed_attributes:
if needed not in data:
abort(make_response(jsonify({"error": f"Missing {needed}"}), 400))
try:
float(data["total"])
except ValueError:
abort(make_response(
jsonify({"error": "Total must be a valid number"}), 400))
instance = Payment(**data)
instance.order_id = order_id
instance.save()
return make_response(jsonify(instance.to_dict()), 201)
@app_routes.route('/order/<order_id>/payments', methods=['GET'], strict_slashes=False)
@jwt_required()
def get_payments(order_id):
order = storage.get(Order, order_id)
if not order:
abort(make_response(jsonify({"error": "Order not found"}), 404))
if get_jwt_identity() != order.user_id:
abort(make_response(jsonify({"error": "forbidden"}), 403))
payments = order.payments
payments_list = []
for payment in payments:
payments_list.append(payment.to_dict())
return jsonify(payments_list)
# @app_routes.route('/order/<order_id>/payments/<payment_id>',
# methods=['GET'], strict_slashes=False)
# @jwt_required()
# def get_payment(order_id, payment_id):
# order = storage.get(Order, order_id)
# if not order:
# abort(make_response(jsonify({"error": "Order not found"}), 404))
# payment = storage.get(Payment, payment_id)
# if not payment:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if payment.order.id != order.id:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if get_jwt_identity() != order.user_id:
# abort(make_response(jsonify({"error": "forbidden"}), 403))
# payment_dict = payment.to_dict()
# del payment_dict["order"]
# return jsonify(payment_dict)
# @app_routes.route('/order/<order_id>/payments/<payment_id>',
# methods=['PUT'], strict_slashes=False)
# @jwt_required()
# def put_payment(order_id, payment_id):
# order = storage.get(Order, order_id)
# if not order:
# abort(make_response(jsonify({"error": "Order not found"}), 404))
# payment = storage.get(Payment, payment_id)
# if not payment:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if payment.order.id != order.id:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if get_jwt_identity() != order.user_id:
# abort(make_response(jsonify({"error": "forbidden"}), 403))
# ignore = ['id', 'created_at', 'updated_at']
# data = request.get_json()
# for key, value in data.items():
# if key not in ignore:
# setattr(payment, key, value)
# payment.save()
# payment_dict = payment.to_dict()
# del payment_dict["order"]
# return make_response(jsonify(payment_dict), 200)
| [
"jzamora_5@yahoo.com"
] | jzamora_5@yahoo.com |
bbe83a41e209fce4ac10e74b7b02891237b2d179 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok/woColorJ/Sob_k15_s001_EroM/pyr_Tcrop255_p60_j15/pyr_0s/L6/step10_a.py | a013a6de43fe29ab368aa4eb3c0e5c3ab7fab72d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,998 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_W_ch_norm_v2
use_loss_obj = [G_sobel_k15_erose_M_loss_info_builder.set_loss_target("UNet_W").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
6e50fb6fe25f4adbdb8891b82dbd2f019d367234 | 4c94f27501ff060ff0d3aecc5fdc5a3f56971664 | /atom/electron/python/test/test_create_card_client_response_vo.py | 4546858eddaede7ba9cad461d8c1459cee30eb4a | [
"Apache-2.0"
] | permissive | hydrogen-dev/SDK | 7af707b9359f169794fe27582903622a19b0e2bc | f101ce79aacf76fdfeb760f7574918f651a25e53 | refs/heads/master | 2023-08-04T00:09:59.646917 | 2022-06-19T20:39:02 | 2022-06-19T20:39:02 | 180,199,768 | 11 | 11 | Apache-2.0 | 2023-07-22T02:33:24 | 2019-04-08T17:30:24 | C# | UTF-8 | Python | false | false | 997 | py | # coding: utf-8
"""
Hydrogen Electron API
The Hydrogen Electron API # noqa: E501
OpenAPI spec version: 1.3.1
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import electron_api
from electron_api.models.create_card_client_response_vo import CreateCardClientResponseVO # noqa: E501
from electron_api.rest import ApiException
class TestCreateCardClientResponseVO(unittest.TestCase):
"""CreateCardClientResponseVO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCreateCardClientResponseVO(self):
"""Test CreateCardClientResponseVO"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.create_card_client_response_vo.CreateCardClientResponseVO() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"chandrashekhar.paatni@tothenew.com"
] | chandrashekhar.paatni@tothenew.com |
84a82b96a54dcb103897e80e589c155c66bfc308 | cdfa076dbeb3cf5dca174089660aeb10599e9bd1 | /main_site/migrations/0001_initial.py | 7814e4f693985e8a778ff5cb982fbb2a94c0114c | [] | no_license | LiaMed12/LibStore | 3afa480a4995e75ac593a117f02c37d9e3d8ba8a | c690cc461369dbca2013f3b944b28f1e2cd2df75 | refs/heads/master | 2023-05-24T11:43:48.279974 | 2021-06-11T16:15:48 | 2021-06-11T16:15:48 | 366,414,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | # Generated by Django 3.1.6 on 2021-02-12 12:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='tags_list',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='specifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_specification', models.CharField(db_index=True, max_length=50)),
('date', models.DateField()),
('version', models.FloatField()),
('description', models.TextField()),
('text_specification', models.TextField()),
('author', models.ForeignKey(max_length=50, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username')),
('tags', models.ManyToManyField(to='main_site.tags_list')),
],
),
migrations.CreateModel(
name='info_person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('text_about_person', models.TextField(blank=True, null=True)),
('login', models.ForeignKey(max_length=50, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username')),
],
),
]
| [
"lia.medvedeva2017@yandex.ru"
] | lia.medvedeva2017@yandex.ru |
acd10b8f4a7a1c925fe17066c2dada6d620110a8 | f0b33d42741f3c470cc7f616c70a4b10a73fc012 | /scripts/ddd17_steer_export.py | 1af7dd96ef80d87062a2bd107b25ea8fa25b1c88 | [
"MIT"
] | permissive | duguyue100/ddd20-itsc20 | 1e51a7a76fe1f2759746814ae58f4e1e21c0c4e6 | 667bb5e702a06cfff30b20de669697f3271baf04 | refs/heads/master | 2021-09-17T06:34:17.545026 | 2018-06-28T16:35:39 | 2018-06-28T16:35:39 | 114,002,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | """Steer export.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import os
from os.path import join, isfile, isdir
import cPickle as pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import spiker
from spiker.data import ddd17
from spiker.models import utils
# def find_best(exp_dir):
# """find best experiment."""
# exp_dir = os.path.join(spiker.SPIKER_EXPS+"-run-3", exp_dir)
# file_list = os.listdir(exp_dir)
# file_clean_list = []
# for item in file_list:
# if ".hdf5" in item:
# file_clean_list.append(item)
# file_list = sorted(file_clean_list)
# return file_list[-1]
def get_prediction(X_test, exp_type, model_base, sensor_type, model_file):
"""Get prediction."""
model_file_base = exp_type+model_base+sensor_type
model_path = os.path.join(
spiker.SPIKER_EXPS+"-run-3", model_file_base,
model_file)
print ("[MESSAGE]", model_path)
model = utils.keras_load_model(model_path)
prediction = utils.keras_predict_batch(model, X_test, verbose=True)
return prediction
data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
"jul28/rec1501288723-export.hdf5")
frame_cut = [500, 1000]
model_base = "-day-4-"
exp_type = "steering"
sensor_type = ["full", "dvs", "aps"]
load_prediction = os.path.join(
spiker.SPIKER_EXTRA, "pred"+model_base+"result-run-3")
if os.path.isfile(load_prediction):
print ("[MESSAGE] Prediction available")
with open(load_prediction, "r") as f:
(steer_full, steer_dvs, steer_aps) = pickle.load(f)
f.close()
else:
# export ground truth
test_frames, _ = ddd17.prepare_train_data(data_path,
y_name="steering",
frame_cut=frame_cut)
test_frames /= 255.
test_frames -= np.mean(test_frames, keepdims=True)
num_samples = test_frames.shape[0]
num_train = int(num_samples*0.7)
X_test = test_frames[num_train:]
del test_frames
# steering full
steer_full = get_prediction(
X_test, exp_type, model_base, sensor_type[0],
"steering-day-4-full-103-0.02.hdf5")
print ("[MESSAGE] Steering Full")
# steering dvs
steer_dvs = get_prediction(
X_test[:, :, :, 0][..., np.newaxis],
exp_type, model_base, sensor_type[1],
"steering-day-4-dvs-200-0.03.hdf5")
print ("[MESSAGE] Steering DVS")
# steering aps
steer_aps = get_prediction(
X_test[:, :, :, 1][..., np.newaxis],
exp_type, model_base, sensor_type[2],
"steering-day-4-aps-118-0.03.hdf5")
print ("[MESSAGE] Steering APS")
del X_test
save_prediction = os.path.join(
spiker.SPIKER_EXTRA, "pred"+model_base+"result-run-3")
with open(save_prediction, "w") as f:
pickle.dump([steer_full, steer_dvs, steer_aps], f)
origin_data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
"jul28/rec1501288723.hdf5")
num_samples = 500
frames, steering = ddd17.prepare_train_data(data_path,
target_size=None,
y_name="steering",
frame_cut=frame_cut,
data_portion="test",
data_type="uint8",
num_samples=num_samples)
steering = ddd17.prepare_train_data(data_path,
target_size=None,
y_name="steering",
only_y=True,
frame_cut=frame_cut,
data_portion="test",
data_type="uint8")
steer, steer_time = ddd17.export_data_field(
origin_data_path, ['steering_wheel_angle'], frame_cut=frame_cut,
data_portion="test")
steer_time -= steer_time[0]
# in ms
steer_time = steer_time.astype("float32")/1e6
print (steer_time)
idx = 250
fig = plt.figure(figsize=(10, 8))
outer_grid = gridspec.GridSpec(2, 1, wspace=0.1)
# plot frames
frame_grid = gridspec.GridSpecFromSubplotSpec(
1, 2, subplot_spec=outer_grid[0, 0],
hspace=0.1)
aps_frame = plt.Subplot(fig, frame_grid[0])
aps_frame.imshow(frames[idx, :, :, 1], cmap="gray")
aps_frame.axis("off")
aps_frame.set_title("APS Frame")
fig.add_subplot(aps_frame)
dvs_frame = plt.Subplot(fig, frame_grid[1])
dvs_frame.imshow(frames[idx, :, :, 0], cmap="gray")
dvs_frame.axis("off")
dvs_frame.set_title("DVS Frame")
fig.add_subplot(dvs_frame)
# plot steering curve
steering_curve = plt.Subplot(fig, outer_grid[1, 0])
min_steer = np.min(steering*180/np.pi)
max_steer = np.max(steering*180/np.pi)
steering_curve.plot(steer_time, steering*180/np.pi,
label="groundtruth",
color="#08306b",
linestyle="-",
linewidth=2)
steering_curve.plot(steer_time, steer_dvs*180/np.pi,
label="DVS",
color="#3f007d",
linestyle="-",
linewidth=1)
steering_curve.plot(steer_time, steer_aps*180/np.pi,
label="APS",
color="#00441b",
linestyle="-",
linewidth=1)
steering_curve.plot(steer_time, steer_full*180/np.pi,
label="DVS+APS",
color="#7f2704",
linestyle="-",
linewidth=1)
steering_curve.plot((steer_time[idx], steer_time[idx]),
(min_steer, max_steer), color="black",
linestyle="-", linewidth=1)
steering_curve.set_xlim(left=0, right=steer_time[-1])
steering_curve.set_title("Steering Wheel Angle Prediction")
steering_curve.grid(linestyle="-.")
steering_curve.legend(fontsize=10)
steering_curve.set_ylabel("degree")
steering_curve.set_xlabel("time (s)")
fig.add_subplot(steering_curve)
plt.savefig(join(spiker.SPIKER_EXTRA, "cvprfigs",
"vis"+model_base+"result"+".pdf"),
dpi=600, format="pdf",
bbox="tight", pad_inches=0.5)
| [
"duguyue100@gmail.com"
] | duguyue100@gmail.com |
7ba33970e027a501fb55d61aba6e69148fb818d7 | 743002f0b4c14c0ed4c0215dfb8c4110ec7d75b1 | /src/ddGeneralSpeeds.py | 06044a43540756de67e83f2f8437b73b63f51563 | [] | no_license | TavoloPerUno/SouthIndiaSHSpeeds | c3c12ce3d8d29da44c4089a4601854f69fdccb25 | 176db1f27a5c3e6509b64258d552f7fcb34afb3a | refs/heads/master | 2021-01-20T02:24:50.498024 | 2017-07-13T14:08:28 | 2017-07-13T14:08:28 | 89,401,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | import lib.interpolate as itp
import googlemaps
import pandas as pd
import numpy as np
from datetime import datetime
gmaps = googlemaps.Client(key='AIzaSyD3sNjKSAqK_7eBo8FNRbyapjUODTXnh_M')
# Geocoding an address
orig_geocode = gmaps.geocode('SH 20, Belagavi, Karnataka')
dest_geocode = gmaps.geocode('SH 20, Bagalkot, Karnataka')
orgLat = orig_geocode[0]['geometry']['location']['lat']
orgLng = orig_geocode[0]['geometry']['location']['lng']
destLat = dest_geocode[0]['geometry']['location']['lat']
destLng = dest_geocode[0]['geometry']['location']['lng']
waypoints = itp.interpolateRoute(100, orgLat, orgLng, destLat, destLng)
points = pd.DataFrame(columns = ["Id", "Name", "Lat", "Lng"])
for idx, pt in enumerate(waypoints):
if idx == 0:
points.loc[idx] = [idx + 1, "Belgaum", pt[0], pt[1]]
elif idx == len(waypoints) - 1:
points.loc[idx] = [idx + 1, "Bagalkot", pt[0], pt[1]]
else:
points.loc[idx] = [idx + 1, "", pt[0], pt[1]]
np.array_split(points[0:len(points)-1], 100)
rdPoints = pd.DataFrame(columns = ["Id", "Name", "Lat", "Lng"])
i=0
while(i < len(points)):
if(i+99 < len(points)):
tpts = str(points.iloc[0]['Lat']) + ',' + str(points.iloc[0]['Lng']) + '|' + \
''.join([str(pt['Lat']) + ',' + str(pt['Lng']) + '|' for index, pt in points[i:i+98].iterrows()]) \
+ str(points.iloc[len(points) - 1]['Lat']) + ',' + str(points.iloc[len(points) - 1]['Lng'])
rgPts = gmaps.snap_to_roads(tpts, interpolate=False)
if(i == 0):
rdPoints.loc[0] = ["1", "Belgaum", rgPts[0]['location']['latitude'], rgPts[0]['location']['longitude']]
for loc in rgPts[0:99]:
rdPoints.loc[len(rdPoints)] = [str(len(rdPoints)+ 1), "", loc['location']['latitude'], loc['location']['longitude']]
else:
tpts = str(points.iloc[0]['Lat']) + ',' + str(points.iloc[0]['Lng']) + \
''.join(['|' + str(pt['Lat']) + ',' + str(pt['Lng']) for index, pt in points[i:len(points)].iterrows()])
rgPts = gmaps.snap_to_roads(tpts, interpolate=False)
for loc in rgPts[0:len(rgPts)-2]:
rdPoints.loc[len(rdPoints)] = [str(len(rdPoints) + 1), "", loc['location']['latitude'], loc['location']['longitude']]
rdPoints.loc[len(rdPoints)] = [str(len(rdPoints) + 1), "", loc['location']['latitude'], loc['location']['longitude']]
i += 98
print(rdPoints) | [
"morelwmilliam@gmail.com"
] | morelwmilliam@gmail.com |
f4dd93672e29f0e97bad6fb6a0be8c275e83bce5 | 65ceeadf2d042d6724630644f6ba7309ab7e86c3 | /handlers/message/message_urls.py | e8659a504170d18d5e222fcf241fae88218fc8f0 | [] | no_license | jimsick/tornado_pro | c08611fedc10f39b1ae9365276e81c59760d2adc | 8ee9a17aa8c5f8d2925901f459cb1c467abd9a4f | refs/heads/master | 2021-04-12T10:09:21.358651 | 2018-03-22T07:01:49 | 2018-03-22T07:01:49 | 126,292,701 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # coding=utf-8
from message_handler import MessageHandler, MessageWebSocket, SendMessageHandler
message_urls = [
(r'/message/message', MessageHandler),
(r'/message/message_websocket', MessageWebSocket),
(r'/message/send_message', SendMessageHandler),
] | [
"877415861@qq.com"
] | 877415861@qq.com |
acaa05cb8da7ae9fe88ad5617ae580dea52be9e4 | c4a49a77665988b81a426420bddbaa8beed728dc | /PageObject/ImageUploadPage.py | 7d014f77251e7ed623c3406e7d4a7bb603c07a78 | [
"MIT"
] | permissive | zhangxiuqing007/EfUITest | 4a0e6a9a0a368ef9893898954bcb193f6b169e44 | d54c568b3996ef3e732ea335916de61cadc06e84 | refs/heads/master | 2020-09-05T20:08:23.531863 | 2019-11-11T09:33:52 | 2019-11-11T09:33:52 | 220,200,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from selenium import webdriver
class ImageUploadPage:
def __init__(self, driver: webdriver.Firefox):
self.driver = driver
def get_file_input(self):
return self.driver.find_element_by_css_selector('input[name="images"]')
def get_submit_btn(self):
return self.driver.find_element_by_css_selector("input[value='提交']")
| [
"690313521@qq.com"
] | 690313521@qq.com |
48d03b641514cdc7a52746b3b6dd7a2f331010af | 8a0e2144c9cef99cc1d762366a1baf8a1d365a14 | /calc/views.py | 15b8a914468e4c8b007f31db395fd7bed9c34a1c | [] | no_license | venkatesh-kulkarni/User-Authentication-using-Django | f08920964a2eb6a941e5f6a1fe382b8b1e1b29df | f5c30a50927c236c710e6c4c9c746c87a907c84a | refs/heads/master | 2023-05-12T14:59:03.483237 | 2021-05-07T22:04:39 | 2021-05-07T22:04:39 | 365,361,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request):
return render(request, 'home.html', {'name': 'Venkates'})
def add(request):
val1 = int(request.POST['num1'])
val2 = int(request.POST['num2'])
res = val1+val2
return render(request, 'result.html', {'result': res})
| [
"venkateshkulkarni2001@gmail.com"
] | venkateshkulkarni2001@gmail.com |
f9daf2e153b85d048a26f62dfe798465984f0635 | 443e8f16fc1cea32a40211ba02cc7d07c234b495 | /plotpca.py | 60cb9e46236d49ba5a5753491609c6b18ab3834b | [] | no_license | vipinjose90/magik-mice | 0b1f1dedb7c80d3412eb30bbf4013c0916f91905 | eeafc3073e2edec49e679d799b21e8e7e7f9d0f0 | refs/heads/master | 2020-11-26T17:39:19.934861 | 2019-12-20T00:43:11 | 2019-12-20T00:43:11 | 229,162,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | #!/usr/bin/env python
from pylab import scatter, show, cm, colorbar, savefig, axis, \
figure, xlim, axes, hsv, subplots_adjust as adjust
from itertools import izip
from sys import argv, exit
import os.path as osp
import matplotlib.cm as cm
from matplotlib.mlab import PCA
import numpy
from sklearn.decomposition import PCA
def plot(val_fn, pts_fn, output_fn):
points = []
with open(pts_fn) as fp:
for line in fp.xreadlines():
points.append(map(float, line.split()))
values = []
with open(val_fn) as fp:
for line in fp.xreadlines():
values.append(float(line.split()[1]))
xx = [pt[0] for pt in points]
yy = [pt[1] for pt in points]
print "X:", min(xx), max(xx)
print "Y:", min(yy), max(yy)
m = min(values)
values = [(v-m) % 1. for v in values]
print "V:", min(values), max(values)
# hsv()
myData = numpy.array(points)
#results = PCA(myData,2)
pca = PCA(n_components=2)
results = pca.fit_transform(points)
fig = figure()
scatter(results[:,0],results[:,1],s=10,c=values,cmap="spectral")
colorbar()
# ax = fig.add_axes([-.05,-.1,1.1,1.1])
ax = axes()
ax.set_axis_off()
ax.set_aspect('equal', 'box')
# adjust(0,0,1,1,0,0)
fig.savefig(output_fn)
if __name__ == '__main__':
if len(argv) < 3:
print "Usage: %s VALUES POINTS" % argv[0]
exit()
val_fn = argv[1]
pts_fn = argv[2]
output_fn, ext = osp.splitext(val_fn)
output_fn += '.pdf'
plot(val_fn, pts_fn, output_fn)
| [
"u1077193@utah.edu"
] | u1077193@utah.edu |
e2b6d1479c13ce721b4cd3e16d796412e4ceafa8 | 8b2fc2aa3cb4bf2f641a542de0f625639f3e4c8c | /modelado/Mimodelo.py | c9987a7b555b8e95dcb4ac2457f1c18ec3aafcbd | [] | no_license | Programacion-Algoritmos-18-2/2bim-clase-04-paxasaval | d8777f7c24d1f3be22748a1cf69d2a7a2f9ca86b | 594acc0a26814d673b75e29ce33390ac62fabce2 | refs/heads/master | 2020-04-14T21:32:11.721660 | 2019-01-04T16:58:15 | 2019-01-04T16:58:15 | 164,131,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | class Equipo(object):
"""docstring for Equipo"""
def __init__(self, nombre,ciudad,campeonatos,numJugadores):
self.nombre=nombre
self.ciudad=ciudad
self.campeonatos=int(campeonatos)
self.numJugadores=int(numJugadores)
"""
Creamos los metodos GET y SET para cada atributo
"""
def setNombre(self,nombre):
self.nombre=nombre
def getNombre(self):
return self.nombre
def setCiudad(self,ciudad):
self.ciudad=ciudad
def getCiudad(self):
return self.ciudad
def setCampeonatos(self,campeonatos):
self.campeonatos=int(campeonatos)
def getCampeonatos(self):
return self.campeonatos
def setNumJugadores(self,numJugadores):
self.numJugadores=int(numJugadores)
def getNumJugadores(self):
return self.numJugadores
"""
Metodo __str__ y __repr__ para personalziar al rpesentacion de nuestro objeto
"""
def __str__(self):
cadena="Nombre: %s|Ciudad: %s|Partidos Ganados: %d|Partidos Jugados: %d\n"%(self.nombre,self.ciudad,self.campeonatos,self.numJugadores)
return cadena
def __repr__(self):
cadena="Nombre: %s|Ciudad: %s|Partidos Ganados: %d|Partidos Jugados: %d\n"%(self.nombre,self.ciudad,self.campeonatos,self.numJugadores)
return cadena
class Operaciones(object):
def __init__(self, listado):
self.listado_equipos=listado
def ordenar(self, opcion):
"""
https://docs.python.org/3/howto/sorting.html
>>> sorted(student_objects, key=lambda student: student.age) # sort by age
"""
if opcion=="Nombre":
return sorted(self.listado_equipos, key=lambda equipo: equipo.nombre)
elif opcion=="Campeonatos":
return sorted(self.listado_equipos, key=lambda equipo: equipo.campeonatos)
| [
"paxasaval1003@gmail.com"
] | paxasaval1003@gmail.com |
362df6b63b69bd5d5fd4eb04726056f47d873113 | 122f9bf0d996c104f541453ab35c56f6ff3fc7cd | /z수업용문제/JunminLim/2331_반복수열.py | cfed0ed0d334dc85da62587de4d10ebc079ceff3 | [] | no_license | JannaKim/PS | 1302e9b6bc529d582ecc7d7fe4f249a52311ff30 | b9c3ce6a7a47afeaa0c62d952b5936d407da129b | refs/heads/master | 2023-08-10T17:49:00.925460 | 2021-09-13T02:21:34 | 2021-09-13T02:21:34 | 312,822,458 | 0 | 0 | null | 2021-04-23T15:31:11 | 2020-11-14T13:27:34 | Python | UTF-8 | Python | false | false | 281 | py |
n=input()
L=[]
P=[]
while n not in L:
L.append(n)
a = 0
for i in n:
a+=int(i)**2
n = str(a)
while n in L:
L.remove(n)
a = 0
for i in n:
a+=int(i)**2
n = str(a)
print(len(L))
'''
for i in range (len(L)):
dic[L[i], i]
''' | [
"baradamoh@gmail.com"
] | baradamoh@gmail.com |
e258390aa13593f651e7ecf2780121ade1ffe47d | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_com_adobe_granite_acp_platform_platform_servlet_info.py | 488d29b6091afc227c8d8f0f40e8699ea4f50cdc | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,263 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_granite_acp_platform_platform_servlet_info import ComAdobeGraniteAcpPlatformPlatformServletInfo # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeGraniteAcpPlatformPlatformServletInfo(unittest.TestCase):
"""ComAdobeGraniteAcpPlatformPlatformServletInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeGraniteAcpPlatformPlatformServletInfo(self):
"""Test ComAdobeGraniteAcpPlatformPlatformServletInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_granite_acp_platform_platform_servlet_info.ComAdobeGraniteAcpPlatformPlatformServletInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"michael.bloch@shinesolutions.com"
] | michael.bloch@shinesolutions.com |
c81bbc5f7d178a12cd37aad67f78fcc2e0903d63 | 20209d95feaab75411555d883536f741f6c58efd | /example/interfaces.py | f7ac396e4f55635d8a2a88de5828a0e9769432bc | [
"MIT"
] | permissive | tomer/di_container | 3962b35950857dfc07a5e9858ee8af2a181803be | 674c4af4ddeffd60a4dffe2bddb4f79b5a75f3f2 | refs/heads/master | 2022-12-07T07:24:28.893142 | 2020-08-11T15:15:28 | 2020-08-11T15:15:28 | 286,786,758 | 0 | 0 | MIT | 2020-08-11T15:55:31 | 2020-08-11T15:55:30 | null | UTF-8 | Python | false | false | 840 | py | from abc import ABC
class ILogger(ABC):
def __repr__(self):
return f'{self.__class__.__name__}()'
class ICommProtocol(ABC):
def __init__(self, logger: ILogger):
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._logger})'
class ICommunicator(ABC):
def __init__(self, comm_protocol: ICommProtocol, logger: ILogger):
self._comm_protocol = comm_protocol
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._comm_protocol}, {self._logger})'
class IDatabase(ABC):
def __init__(self, database_url: str, logger: ILogger):
self._database_url = database_url
self._logger = logger
def __repr__(self):
return f'{self.__class__.__name__}({self._database_url}, {self._logger})'
| [
"66797887+eyaldror@users.noreply.github.com"
] | 66797887+eyaldror@users.noreply.github.com |
d7ec2bb2519157950683aac1cb92ef4219b589b8 | 0ba5fad491c6222da85d21242eefe6aa85e6efdc | /consoleme/lib/v2/roles.py | 66b98a4e254378878741a39e47e39c84d30ecb6a | [
"Apache-2.0"
] | permissive | ManiAtGenex/consoleme | e0b680534d6ec2b1cb5e11fde653ce1b3f1022ca | 4ad693f73d49f6551ed8bf855a224dda98e316a4 | refs/heads/master | 2023-03-05T15:16:54.583837 | 2021-02-09T18:48:39 | 2021-02-09T18:48:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | from datetime import datetime, timedelta
from typing import Optional, Union
import ujson as json
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.crypto import Crypto
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler, redis_get
from consoleme.models import (
CloudTrailDetailsModel,
CloudTrailError,
CloudTrailErrorArray,
ExtendedRoleModel,
RoleModel,
S3DetailsModel,
S3Error,
S3ErrorArray,
)
stats = get_plugin_by_name(config.get("plugins.metrics", "default_metrics"))()
log = config.get_logger()
crypto = Crypto()
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))()
internal_policies = get_plugin_by_name(
config.get("plugins.internal_policies", "default_policies")
)()
red = RedisHandler().redis_sync()
async def get_config_timeline_url_for_role(role, account_id):
resource_id = role.get("resourceId")
if resource_id:
config_history_url = await get_aws_config_history_url_for_resource(
account_id, resource_id, "AWS::IAM::Role"
)
return config_history_url
async def get_cloudtrail_details_for_role(arn: str):
"""
Retrieves CT details associated with role, if they exist exists
:param arn:
:return:
"""
error_url = config.get("cloudtrail_errors.error_messages_by_role_uri", "").format(
arn=arn
)
errors_unformatted = await internal_policies.get_errors_by_role(
arn, config.get("policies.number_cloudtrail_errors_to_display", 5)
)
ct_errors = []
for event_call, value in errors_unformatted.items():
ct_errors.append(
CloudTrailError(event_call=event_call, count=value.get("count", 0))
)
return CloudTrailDetailsModel(
error_url=error_url, errors=CloudTrailErrorArray(cloudtrail_errors=ct_errors)
)
async def get_s3_details_for_role(account_id: str, role_name: str) -> S3DetailsModel:
"""
Retrieves s3 details associated with role, if it exists
:param arn:
:return:
"""
arn = f"arn:aws:iam::{account_id}:role/{role_name}"
yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y%m%d")
error_url = config.get("s3.query_url", "").format(
yesterday=yesterday, role_name=f"'{role_name}'", account_id=f"'{account_id}'"
)
query_url = config.get("s3.non_error_query_url", "").format(
yesterday=yesterday, role_name=f"'{role_name}'", account_id=f"'{account_id}'"
)
s3_error_topic = config.get("redis.s3_errors", "S3_ERRORS")
all_s3_errors = await redis_get(s3_error_topic)
s3_errors_unformatted = []
if all_s3_errors:
s3_errors_unformatted = json.loads(all_s3_errors).get(arn, [])
s3_errors_formatted = []
for error in s3_errors_unformatted:
s3_errors_formatted.append(
S3Error(
count=error.get("count", ""),
bucket_name=error.get("bucket_name", ""),
request_prefix=error.get("request_prefix", ""),
error_call=error.get("error_call", ""),
status_code=error.get("status_code", ""),
status_text=error.get("status_text", ""),
role_arn=arn,
)
)
return S3DetailsModel(
query_url=query_url,
error_url=error_url,
errors=S3ErrorArray(s3_errors=s3_errors_formatted),
)
async def get_app_details_for_role(arn: str):
"""
Retrieves applications associated with role, if they exist
:param arn:
:return:
"""
return await internal_policies.get_applications_associated_with_role(arn)
async def get_role_template(arn: str):
return await sync_to_async(red.hget)(
config.get("templated_roles.redis_key", "TEMPLATED_ROLES_v2"), arn.lower()
)
async def get_role_details(
account_id: str, role_name: str, extended: bool = False, force_refresh: bool = False
) -> Optional[Union[ExtendedRoleModel, RoleModel]]:
account_ids_to_name = await get_account_id_to_name_mapping()
arn = f"arn:aws:iam::{account_id}:role/{role_name}"
role = await aws.fetch_iam_role(account_id, arn, force_refresh=force_refresh)
# requested role doesn't exist
if not role:
return None
if extended:
template = await get_role_template(arn)
return ExtendedRoleModel(
name=role_name,
account_id=account_id,
account_name=account_ids_to_name.get(account_id, None),
arn=arn,
inline_policies=role["policy"]["RolePolicyList"],
assume_role_policy_document=role["policy"]["AssumeRolePolicyDocument"],
config_timeline_url=await get_config_timeline_url_for_role(
role, account_id
),
cloudtrail_details=await get_cloudtrail_details_for_role(arn),
s3_details=await get_s3_details_for_role(
account_id=account_id, role_name=role_name
),
apps=await get_app_details_for_role(arn),
managed_policies=role["policy"]["AttachedManagedPolicies"],
tags=role["policy"]["Tags"],
templated=True if template else False,
template_link=template,
)
else:
return RoleModel(
name=role_name,
account_id=account_id,
account_name=account_ids_to_name.get(account_id, None),
arn=arn,
)
| [
"noreply@github.com"
] | noreply@github.com |
ddba6dd707eb268a90e0934fe9859c21ef4039a1 | f1e5b7c31b3a7fb61b5aadd53d60182987974c1f | /python/model_definitions.py | f950e19291c0fb39745b9ca9e5b76771dd903a2d | [] | no_license | muratcankilic96/fault-detection-in-wind-turbines | 7f4aa316419e2c287cdc4b845bf8e97bee2eb636 | a61653918ba35565dda91355d92954657c3b32a1 | refs/heads/master | 2023-08-12T01:03:30.031015 | 2021-09-13T19:38:11 | 2021-09-13T19:38:11 | 403,759,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,652 | py |
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras.layers import (Dense, Conv2D, Flatten, MaxPooling2D, BatchNormalization, LeakyReLU,
SpatialDropout2D, RNN, SimpleRNNCell, LSTM, Dropout, Input)
class ModelDefinitions:
mfcc_length = 0
mfcc_width = 0
spectro_length = 0
spectro_width = 0
mel_spectro_length = 0
mel_spectro_width = 0
# Model 1 [Representation = MFCC | Model = CNN]
@staticmethod
def callMFCC_CNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.mfcc_length,ModelDefinitions.mfcc_width,1)))
model.add(Conv2D(32, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.07))
model.add(Conv2D(32, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.07))
model.add(MaxPooling2D())
model.add(SpatialDropout2D(0.07))
model.add(Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.14))
model.add(Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 2 [Representation = Spectrogram | Model = CNN]
@staticmethod
def callSpectrogram_CNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.spectro_length,ModelDefinitions.spectro_width,1)))
model.add(Conv2D(32, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(32, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D())
model.add(Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64, kernel_size=3, kernel_regularizer=regularizers.l2(0.0005)))
model.add(LeakyReLU(alpha=0.1))
model.add(Flatten())
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 3 [Representation = Mel Spectrogram | Model = CNN]
@staticmethod
def callMelSpectrogram_CNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.mel_spectro_length,ModelDefinitions.mel_spectro_width,1)))
model.add(Conv2D(50, kernel_size=5, activation='relu', kernel_regularizer=regularizers.l2(0.0005)))
model.add(Conv2D(100, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l2(0.0005)))
model.add(MaxPooling2D())
model.add(Conv2D(200, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l2(0.0005)))
model.add(Conv2D(400, kernel_size=3, activation='relu', kernel_regularizer=regularizers.l2(0.0005)))
model.add(Flatten())
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 4 [Representation = MFCC | Model = RNN]
@staticmethod
def callMFCC_RNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.mfcc_length, ModelDefinitions.mfcc_width)))
model.add(RNN(SimpleRNNCell(100), return_sequences=True))
model.add(RNN(SimpleRNNCell(100), return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 5 [Representation = Spectrogram | Model = RNN]
@staticmethod
def callSpectrogram_RNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.spectro_length, ModelDefinitions.spectro_width)))
model.add(RNN(SimpleRNNCell(100), return_sequences=True))
model.add(RNN(SimpleRNNCell(100), return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 6 [Representation = Mel Spectrogram | Model = RNN]
@staticmethod
def callMelSpectrogram_RNN():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.mel_spectro_length, ModelDefinitions.mel_spectro_width)))
model.add(RNN(SimpleRNNCell(256)))
model.add(Dense(64, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
return model
# Model 7 [Representation = MFCC | Model = LSTM]
@staticmethod
def callMFCC_LSTM():
model = Sequential()
model.add(Input(shape=(ModelDefinitions.mfcc_length, ModelDefinitions.mfcc_width)))
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100, return_sequences=False))
model.add(Dropout(0.1))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Model 8 [Representation = Spectrogram | Model = LSTM]
@staticmethod
def callSpectrogram_LSTM():
model = Sequential()
model.add(Input())
model.add(LSTM(512))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Model 9 [Representation = Mel Spectrogram | Model = LSTM]
@staticmethod
def callMelSpectrogram_LSTM():
model = Sequential()
model.add(Input())
model.add(LSTM(128, return_sequences=True))
model.add(RNN(SimpleRNNCell(128), return_sequences=True))
model.add(LSTM(128, return_sequences=True))
model.add(RNN(SimpleRNNCell(128)))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
| [
"hakkibluoyd@gmail.com"
] | hakkibluoyd@gmail.com |
ad087cd2f2a184b6c8c7ed3614035bfa3825fbc1 | 594a306bed275997343209d73306de0d6a65091b | /main_window.py | 22986f561937506bb3f915c062922e3f7a9b7a62 | [
"MIT"
] | permissive | tomdonegan/tarkovammo | c9941792a7d18932e7dccb80e482340dcffebc5f | 151692b7ce96a5687cb2358e64bfdd6133016863 | refs/heads/master | 2023-04-02T20:25:45.013164 | 2021-04-05T09:07:35 | 2021-04-05T09:07:35 | 307,288,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,365 | py | import sys
import os
import database as db
import requests
import urllib.request
# import wget
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
_AppName_ = 'Tarkov Ammo'
__author__ = 'Tom Donegan'
__license__ = 'The MIT License (MIT)'
__version__ = 0.11
__maintainer__ = 'Tom Donegan'
__email__ = 'tomdonegan@live.co.uk'
__status__ = 'Beta'
class MainMenuUi(QWidget):
def __init__(self):
super(MainMenuUi, self).__init__()
self.setWindowTitle('Tarkov Ammo Data by ToMiSmE')
self.setWindowIcon(QIcon('tarkov.ico'))
# Stops the window from being resized
self.setFixedSize(340, 400)
self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.WindowMinimizeButtonHint)
# The below is a in-script css sheet, this removes the requirement for an external css file
# and makes packing into an .exe easier
self.styleSheet = (
"""
QPushButton {
background-color: grey;
border-radius: 5px;
color: white;
font-size: 13px;
width: 140px;
height: 30px;
}
MainMenuUi {
background: rgb(241,241,241);
}
QLabel {
background-color: grey;
color: white;
height: 60px;
border-radius: 5px;
}
QGroupBox {
border: 3px solid grey;
border-radius: 5px;
}"""
)
self.setStyleSheet(self.styleSheet)
self.createGridLayout()
window_layout = QVBoxLayout()
window_layout.addWidget(self.horizontalGroupBox)
self.setLayout(window_layout)
self.show()
def createGridLayout(self):
self.horizontalGroupBox = QGroupBox()
layout = QGridLayout()
layout.setColumnStretch(0, 3)
layout.setColumnStretch(1, 3)
button_list = ['.300 Blackout', '5.56x45mm',
'.338 Lapua Magnum', '5.7x28mm',
'.366mm', '7.62x25mm',
'.45mm', '7.62x39mm',
'12 Gauge Shot', '7.62x51mm',
'12 Gauge Slugs', '7.62x54R',
'12.7x55mm', '9x18mm',
'20 Gauge', '9x19mm',
'23x75mm', '9x21mm',
'4.6x30mm', '9x39mm',
'5.45x39mm', 'Mounted Weapons',
'Other', 'Check for Updates']
for i in button_list:
btn = QPushButton(i)
if 'Updates' in btn.text():
btn.clicked.connect(self.show_update_window)
layout.addWidget(btn)
#last_row = layout.rowCount()
# Adds the "Check for Updates" button to the bottom of the table.
#layout.addWidget(btn, last_row, 0, 1, 2)
else:
btn.clicked.connect(lambda pass_ammo, param=btn.text(): self.show_ammo_data(param))
layout.addWidget(btn)
self.horizontalGroupBox.setLayout(layout)
def show_ammo_data(self, ammo_size):
self.ammo_table_window = AmmoTableWindow(ammo_size)
self.ammo_table_window.show()
def show_update_window(self):
UpdateCheck()
class UpdateCheck(QWidget):
def __init__(self):
# super(UpdateCheck, self).__init__()
# self.setWindowTitle('This is the window')
# self.progressBar = QProgressBar(self)
super().__init__()
self.update_check()
# Below function checks Github for version data. If current version number is lower,
# files will be downloaded after confirmation from the user.
def update_check(self):
msg = QMessageBox()
styleSheet = (
"""
QPushButton {
background-color: grey;
border-radius: 5px;
color: white;
font-size: 13px;
width: 100%;
height: 30px;
}
QLabel {
background-color: white;
color: black;
height: 60px;
border-radius: 5px;
}
"""
)
self.setStyleSheet(styleSheet)
try:
git_version_data = float(requests.get('https://raw.githubusercontent.com/'
'tomdonegan/tarkovammo/master/version.txt').text)
local_version = float(__version__)
if local_version < git_version_data:
update_selection = msg.information(self, 'Update Available',
f'Current Version: {local_version} '
f'\nAvailable Version: {git_version_data} '
f'\nDownload update?',
QMessageBox.Yes | QMessageBox.No)
if update_selection == msg.Yes:
self.update_downloader()
else:
msg.information(self, 'Update Check', 'You are all up to date.', QMessageBox.Ok)
except ValueError:
msg.information(self, 'Update Error', 'Could not retrieve update!\nConnection Unavailable.',
QMessageBox.Ok)
def get_download_path(self):
"""Returns the default downloads path for linux or windows"""
if os.name != 'nt':
return os.path.join(os.path.expanduser('~'), 'downloads')
import winreg
sub_key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, sub_key) as key:
downloads_guid = '{374DE290-123F-4565-9164-39C4925E467B}'
location = winreg.QueryValueEx(key, downloads_guid)[0]
return location
def update_downloader(self):
# specify the url of the file which is to be downloaded
down_url = 'https://github.com/tomdonegan/tarkovammo/raw/master/dist/TarkovAmmo.rar' # specify download url here
# specify save location where the file is to be saved
save_loc = self.get_download_path() + '/TarkovAmmo.rar'
# Dowloading using urllib
urllib.request.urlretrieve(down_url, save_loc, self.handle_progress)
def handle_progress(self, blocknum, blocksize, totalsize):
if totalsize > 0:
## calculate the progress
read_data = blocknum * blocksize
download_percentage = read_data * 100 / totalsize
print(download_percentage)
# self.progressBar.setValue(download_percentage)
QApplication.processEvents()
class AmmoTableWindow(QWidget):
def __init__(self, ammo_size):
super(AmmoTableWindow, self).__init__()
self.setWindowTitle('Tarkov Ammo Data - ' + str(ammo_size))
self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.WindowMinimizeButtonHint)
self.styleSheet = (
"""
AmmoTableWindow {
background: rgb(241,241,241);
}
#TableWidget {
background-color: rgb(241,241,241);
color: black;
border-radius: 1px;
border:1px solid grey;
gridline-color: grey;
}
QPushButton {
background-color: grey;
border-radius: 5px;
color: black;
font-weight: bold;
font-size: 13px;
height: 30px;
}
QHeaderView::section:horizontal {
background: rgb(217, 217, 217);
font-weight: bold;
color: black;
}
QGroupBox {
border: 3px solid grey;
border-radius: 5px;
}"""
)
self.setStyleSheet(self.styleSheet)
self.setWindowIcon(QIcon('tarkov.ico'))
self.ammo_size = ammo_size
self.table_widget = QTableWidget()
self.table_widget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table_widget.verticalHeader().setDefaultAlignment(Qt.AlignCenter)
self.reset_button = QPushButton('Reset Data')
self.reset_button.clicked.connect(self.reset_table)
self.table_widget.horizontalHeader()
self.create_table(self.ammo_size)
self.layout = QVBoxLayout()
self.layout.addWidget(self.table_widget)
self.layout.addWidget(self.reset_button)
self.setLayout(self.layout)
self.show()
self.setFixedSize(self.width(), self.height())
self.table_widget.setSortingEnabled(True)
# This section of code is fucking garbage
def reset_table(self):
self.table_widget.setRowCount(0)
self.table_widget.setColumnCount(0)
self.create_table(self.ammo_size)
def create_table(self, ammo_size):
db.ammo_data_list = []
self.ammo_size = ammo_size
self.table_widget.setColumnCount(len(db.titles_list) - 1)
self.table_widget.setObjectName('TableWidget')
row = self.table_widget.rowCount()
col = self.table_widget.columnCount()
self.table_widget.setRowCount(row)
self.table_widget.verticalHeader().hide()
self.table_widget.verticalScrollBar().hide()
self.table_widget.horizontalScrollBar().hide()
self.table_widget.setHorizontalHeaderLabels(db.titles_list[1:])
db.ammo_by_size(self.ammo_size)
for i in db.ammo_data_list:
self.add_table_row(self.table_widget, i)
self.table_widget.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.table_widget.resizeColumnsToContents()
def add_table_row(self, table, row_data):
row = self.table_widget.rowCount()
self.table_widget.setRowCount(row + 1)
for col, item in enumerate(row_data):
cell = QTableWidgetItem(str(item))
table.setItem(row, col, cell)
# Change color of cell depending on the value.
for row in range(row + 1):
for col in range(5, 11):
if self.table_widget.item(row, col).text() == '6':
self.table_widget.item(row, col).setBackground(QColor(75, 240, 86))
elif self.table_widget.item(row, col).text() == '5':
self.table_widget.item(row, col).setBackground(QColor(134, 212, 61))
elif self.table_widget.item(row, col).text() == '4':
self.table_widget.item(row, col).setBackground(QColor(192, 184, 37))
elif self.table_widget.item(row, col).text() == '3':
self.table_widget.item(row, col).setBackground(QColor(249, 157, 14))
elif self.table_widget.item(row, col).text() == '2':
self.table_widget.item(row, col).setBackground(QColor(234, 108, 10))
elif self.table_widget.item(row, col).text() == '1':
self.table_widget.item(row, col).setBackground(QColor(220, 59, 7))
elif self.table_widget.item(row, col).text() == '0':
self.table_widget.item(row, col).setBackground(QColor(206, 11, 4))
if __name__ == '__main__':
app = QApplication(sys.argv)
m = MainMenuUi()
m.show()
sys.exit(app.exec_())
| [
"tomdonegan@live.co.uk"
] | tomdonegan@live.co.uk |
26f2f4fa282fac3064a8d18fa75e67c517c1a09c | a1c8731a8527872042bd46340d8d3e6d47596732 | /programming-laboratory-I/70b7/seguro.py | f49bcb6be7708680dab29dab7ae4ee5f91b095ce | [
"MIT"
] | permissive | MisaelAugusto/computer-science | bbf98195b0ee954a7ffaf58e78f4a47b15069314 | d21335a2dc824b54ffe828370f0e6717fd0c7c27 | refs/heads/master | 2022-12-04T08:21:16.052628 | 2020-08-31T13:00:04 | 2020-08-31T13:00:04 | 287,621,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | # coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Cálculo de Seguro
def calcula_seguro(valor_veiculo, lista):
dados_cliente = []
verdadeiros = [10, 20, 20, 20, 10]
falsos = [20, 10, 10, 10, 20]
pontos = 0
if lista[0] <= 21:
pontos += 20
elif 22 <= lista[0] <= 30:
pontos += 15
elif 31 <= lista[0] <= 40:
pontos += 12
elif 41 <= lista[0] <= 60:
pontos += 10
else:
pontos += 20
for i in range(1, len(lista) - 1):
if lista[i]:
pontos += verdadeiros[i - 1]
else:
pontos += falsos[i - 1]
if lista[-1] == "Lazer" or lista[-1] == "Misto":
pontos += 20
else:
pontos += 10
if pontos <= 80:
mensagem = "Risco Baixo"
valor = valor_veiculo * 0.1
elif 80 < pontos <= 100:
mensagem = "Risco Medio"
valor = valor_veiculo * 0.2
else:
mensagem = "Risco Alto"
valor = valor_veiculo * 0.3
dados_cliente.append(pontos)
dados_cliente.append(mensagem)
dados_cliente.append(valor)
return dados_cliente
print calcula_seguro(2000.0, [21, True, True, True, True, True, "Misto"])
| [
"misael.costa@ccc.ufcg.edu.br"
] | misael.costa@ccc.ufcg.edu.br |
908089328125da6d2932952090fa6f013bd47ea4 | 97dfc004ce16d097594a87b41565c4dfbe496749 | /Week_2/Day_2/app/urls.py | 2522625e8df06fb7bbac4f722019497efbc09845 | [] | no_license | instnadia/python-03-20 | 7df44f2893f51847704de082fb2cf309906eebe5 | d0713c167d6cc461cf2c39469827fd4c4befc2ca | refs/heads/master | 2021-02-11T04:45:23.261105 | 2020-03-19T17:25:43 | 2020-03-19T17:25:43 | 244,455,706 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from django.urls import path
from . import views
# NO LEADING SLASHES
urlpatterns = [
path('', views.index, name='index'),
path('new_person', views.newPerson),
path('new_pet', views.newPet)
] | [
"nnguyen@codingdojo.com"
] | nnguyen@codingdojo.com |
4dfeba5fcaac6dcaad798a1fffa238cb388cd3b7 | 774f82d7cd43aed9f7252abb2dea7b4ecfee5e12 | /thre_data.py | 73356ad43f856fe27d2f4bf0a90eb38cddf146e4 | [] | no_license | zxliu17/Python_code_for_dissertation | 0531332e65456382e9679a9c226141f737ba3302 | a24d47f7643ade019be38f7072b487f55782b907 | refs/heads/master | 2022-11-08T04:44:08.446243 | 2018-06-21T01:34:11 | 2018-06-21T01:34:11 | 132,671,131 | 0 | 1 | null | 2022-10-16T21:57:14 | 2018-05-08T22:17:47 | Python | UTF-8 | Python | false | false | 10,740 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 01:12:37 2018
@author: liuzx17
"""
# -*- coding: utf-8 -*-
"""
Created on Thur May 31 16:13:27 2018
@author: liuzx
"""
import matplotlib.pyplot as plt
import numpy as np
import random
import copy
import math
import time
'''
create a belief world which includes all possible beliefs.
'''
def create_world(propsition_number):
single_agent =set()# [set()]*an # use SET as for any agents
# agents_temp = []
agent_tuple = [] # belief as tuple(tuple is unchangeable)
states = [0]*propsition_number #proposition as list
world = [] #A list of sets of tuple
# initialise the agents
agents_number = int(math.pow(2,propsition_number))
for i in range(agents_number):
basic = i+1
for j in range (propsition_number):
states[j] = basic%2
basic = basic//2
states_set = tuple(states)
agent_tuple.append(states_set)
single_agent = set (agent_tuple)
#print (states_set)
#print (single_agent)
agent_tuple.clear()
#print (single_agent)
world.append(single_agent)
# world_set = world[0]
# for k in range(agents_number):
# world_set = world_set|world[k]
return world
def random_initialise(agents_number, propsition_number,world):
'''
initialise the agents randomly (with a random number of beliefs)
'''
single_agent =set()# [set()]*an # use SET as for any agents
# agents_temp = []
agent_tuple = [] # belief as tuple(tuple is unchangeable)
states = [0]*propsition_number #proposition as list
agents = [] #A list of sets of tuple
indexlist = list(range(2**propsition_number))
# print(indexlist)
# initialise the agents
for i in range(agents_number):
for j in range (propsition_number):
states[j] = (random.randint(0,1))
states_set = tuple(states)
agent_tuple.append(states_set)
single_agent = set (agent_tuple)
#print (states_set)
#print (single_agent)
agent_tuple.clear()
#print (single_agent)
agents.append(single_agent)
for num in range(len(agents)):
num_blf = random.randint(1,int(math.pow(2,propsition_number)))
agt = [agents[num]]
random.shuffle(indexlist)
k = 0
while cal_card(agt) < num_blf :
# print (agt)
# print(cal_card(agt))
# print (num_blf)
# print(agents)
agents[num] = agents[num]|world[indexlist[k]]
k=k+1
agt = [agents[num]]
return agents
def initialise_agents(agents_number, propsition_number):
'''
initialise the agents randomly (with a single belief)
'''
single_agent =set()# [set()]*an # use SET as for any agents
# agents_temp = []
agent_tuple = [] # belief as tuple(tuple is unchangeable)
states = [0]*propsition_number #proposition as list
agents = [] #A list of sets of tuple
# initialise the agents
for i in range(agents_number):
for j in range (propsition_number):
states[j] = (random.randint(0,1))
states_set = tuple(states)
agent_tuple.append(states_set)
single_agent = set (agent_tuple)
#print (states_set)
#print (single_agent)
agent_tuple.clear()
#print (single_agent)
agents.append(single_agent)
return agents
'''compute the average cardinality of all agents'''
def cal_card(agents):
sumcard = 0;
N =len(agents)
for i in range(N):
sumcard = sumcard + len (agents[i])
mean_card = sumcard/N
return mean_card
def deleteDuplicatedElementFromList(listx):
resultList = []
for item in listx:
if not item in resultList:
resultList.append(item)
return resultList
''' transfer binary string to DEC number'''
def trans2dec(list_set_of_tuple):
dec= []
for index in range(1):# range(len(list_set_of_tuple)):
# print(set_of_tuple)
for x in list_set_of_tuple[index]:
a=0
for i in range(len(x)):
a = a+x[i]*math.pow(2,len(x)-i-1)
# print(a)
dec.append(a)
return dec
'''compute the average simlarity of all agents'''
def cal_similarity(agents):
#calculate similarity
similarity = []
simtotal = []
an = len(agents)
for i in range(an):
for j in range (i,an):
Num_inter =len(agents[i]&agents[j])
Num_uni = len(agents[i]|agents[j])
similarity.append((Num_inter/Num_uni))
simtotal.append(similarity)
return similarity
def simi(agent_1,agent_2):
Num_inter =len(agent_1&agent_2)
Num_uni = len(agent_1|agent_2)
sim = (Num_inter/Num_uni)
return sim
'''combine beliefs'''
def iterationSim(agents,agent_number, iteration_times,threshold):
an = agent_number
#sn = proposition_number
N = iteration_times
averagesim = [];
iteration=0 # iteration time count
cardinality = [];
while iteration < N:
iteration =iteration +1
# print (iteration)
index1 = random.randint(0,an-1)
index2 = random.randint(0,an-1)
#t = agents [index1]
#s = agents [index2]
Intersection = agents[index1]&agents[index2]
Union = agents[index1]|agents[index2]
#distance = hammingdis(s,t) # check if overlap exists
if simi(agents[index1],agents[index2])>=threshold:
#print(simi(agents[index1],agents[index2]))
#print(1)
if (Intersection == set()) :
agents [index1] =Union
agents [index2] =Union
else:
agents [index1]=Intersection#intersect if not
agents [index2]=Intersection
mean_card = cal_card(agents)
cardinality.append(mean_card)
similarity = cal_similarity(agents)
averagesim.append(sum(similarity)/len(similarity))
return averagesim, cardinality , agents
def text_save(content,filename,mode='a'):
# Try to save a list variable in txt file.
file = open(filename,mode)
for i in range(len(content)):
file.write(str(content[i])+'\n')
file.close()
start1 = time.time()
#long running
#do something other
start = time.clock()
threshold = 0#(more than 0.1)
thre_store = []
sim_store = []
card_store = []
stdsim_store = []
stdcard_store = []
while (threshold <=1):
an = 50 #Number of agents
sn = 5 #Number of propsitions
N = 2000 # Times of iterations
T = 50
sim = []
card = []
AVEsim=[]
AVEcard = []
convergePos = []
belief_num =[]
#averagesim = [0]*T
#cardinality = [0]*T
#agents = initialise_agents(an, sn);
for i in range (T):
'''when change the initialise method,
Remember to change the FILENAME and FIGURENAME'''
#agents = initialise_agents(an, sn)
agents = random_initialise(an, sn,create_world(sn))
trans = copy.deepcopy(agents)
#print (agents)
(averagesim, cardinality, store) = iterationSim(trans,an,N,threshold)
store2 = deleteDuplicatedElementFromList(store)
dec = trans2dec(store2)
pos = copy.deepcopy(dec)
#print (agents)
#print (pos)
convergePos.append(pos)
belief_num.append(len(store2))
sim.append(averagesim)
card.append(cardinality)
#print (averagesim)
sumsim = [0]*len(averagesim)
sumcard = [0]*len(cardinality)
countagt = [0]*int(math.pow(2,sn))
for i in range (T):
sumsim = (np.sum([sumsim,sim[i]],axis = 0))
sumcard =(np.sum([sumcard,card[i]],axis = 0))
countagt[int(convergePos[i][0])]=countagt[int(convergePos[i][0])]+1
xaxis = np.arange(1, len(countagt)+1)
AVEsim=sumsim/T
AVEcard=sumcard/T
stdsim = np.std(sim,axis=0)
stdcard = np.std(card,axis=0)
stdsim_f = []
stdcard_f= []
AVEcard_f = []
AVEsim_f = []
index = []
j=0
while j < len(stdsim):
index.append(j)
stdsim_f.append(stdsim[j])
stdcard_f.append(stdcard[j])
AVEcard_f.append(AVEcard[j])
AVEsim_f.append(AVEsim[j])
j = j+50
thre_store.append(threshold)
stdsim_store.append(stdsim[-1])
stdcard_store.append(stdcard[-1])
sim_store.append(AVEsim[-1])
card_store.append(AVEcard[-1])
threshold = threshold +0.02
filename ='data_similar'+str(T)
path = ''
text_save([thre_store, stdsim_store, stdcard_store,sim_store,card_store],path+filename,mode='a')
end1 = time.time()
print("Time1 used:",end1-start1)
elapsed = (time.clock() - start)
print("Time used:",elapsed)
'''
filename ='data'+str(an)+'_'+str(sn)+'_'+str(N)+'_'+str(T)+'_'+str(int(threshold*10))#+'single'
figurename = str(an)+'_'+str(sn)+'_'+str(N)+'_'+str(T)+'_'+str(int(threshold*10))#+'single'
path = 'figsSimlarity/'
plt.figure(1)
plt.plot(AVEsim)
plt.ylabel('Similarity')
plt.xlabel('Iterations')
#plt.ylim((0,1))
plt.title('Similarity-Iteration')
#plt.legend()
plt.savefig(path+'Sim'+figurename+'.png',dpi = 600)
plt.show()
plt.figure(2)
plt.plot(AVEsim,color = 'brown')
plt.errorbar(index, AVEsim_f, yerr = stdsim_f, fmt ='o',color = 'brown')
plt.ylabel('Similarity')
plt.xlabel('Iterations')
#plt.ylim((0,1))
plt.title('Similarity-Iteration with errorbar')
#plt.legend()
plt.savefig(path+'SimErr'+figurename+'.png',dpi = 600)
plt.show()
plt.figure(3)
plt.plot(AVEcard)
plt.ylabel('Cardinality')
plt.xlabel('Iterations')
#plt.ylim((0,int(math.pow(2,sn))))
plt.title('Cardinality-Iteration')
#plt.legend()
plt.savefig(path+'Card'+figurename+'.png',dpi = 600)
plt.show()
plt.figure(4)
plt.plot(AVEcard,color = 'brown')
plt.errorbar(index, AVEcard_f, yerr = stdcard_f, fmt ='o',color = 'brown')
plt.ylabel('Cardinality')
plt.xlabel('Iterations')
#plt.ylim((0,int(math.pow(2,sn))))
plt.title('Cardinality-Iteration with errorbar')
#plt.legend()
plt.savefig(path+'CardErr'+figurename+'.png',dpi = 600)
plt.show()
plt.figure(5)
plt.bar(xaxis,countagt,color = 'black',width = 0.4)
plt.xlabel('Agent Number')
plt.ylabel('Times')
plt.title("Times of covergence")
plt.savefig(path+'agt'+figurename+'.png',dpi = 600)
plt.show()
#print (belief_num)
plt.figure(6)
plt.plot(belief_num,color = 'black')
plt.ylabel('Number of Final Beliefs')
plt.xlabel('Iteration')
#plt.ylim((0,int(math.pow(2,sn))))
plt.title("Number of Final Beliefs")
plt.savefig(path+'numbef'+figurename+'.png',dpi = 600)
plt.show()
text_save([AVEsim, AVEcard, stdsim_f,stdcard_f,countagt,elapsed],path+filename+'txts',mode='a')
f= open(path+filename, 'wb')
pickle.dump([AVEsim, AVEcard, stdsim_f,stdcard_f,countagt,elapsed], f)
f.close()
''' | [
"35967051+zxliu17@users.noreply.github.com"
] | 35967051+zxliu17@users.noreply.github.com |
7321886ae0c42a4712676a31e27e7eabcdd0dd76 | 8d770eefa9c0b0e6605f99c47ac52c4d0c60f064 | /cfod/__init__.py | b42f6fb4bbd8a8bcc12edac7a0eaf93146cd4ff5 | [
"MIT"
] | permissive | chime-frb-open-data/chime-frb-open-data | a1ff270568d80686e2b705c92ce1c42fdf815af2 | fd488ed8dc1c81c9571dc9595569f096e94c5602 | refs/heads/master | 2023-08-30T00:28:47.141794 | 2023-08-24T19:10:14 | 2023-08-24T19:10:14 | 166,888,117 | 11 | 7 | MIT | 2023-08-17T14:11:09 | 2019-01-21T22:14:28 | Python | UTF-8 | Python | false | false | 923 | py | import logging
import os
from pathlib import Path
from cfod.routines import catalogs
logging.basicConfig(format="%(levelname)s:%(message)s")
log = logging.getLogger(__name__)
BASE_DIR: Path = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR: Path = BASE_DIR / "data"
CSV_CATALOG: Path = DATA_DIR / "catalog.csv"
FITS_CATALOG: Path = DATA_DIR / "catalog.fits"
if os.environ.get("DEBUG", False):
log.setLevel(logging.DEBUG)
log.debug("Logging Level: Debug")
if not len(list(DATA_DIR.glob("catalog.*"))):
from cfod.utilities import fetch
log.debug("Fetching CHIME/FRB Catalogs")
fetch.csv_catalog()
fetch.fits_catalog()
if CSV_CATALOG.exists():
catalog = catalogs.Catalogs(filename=CSV_CATALOG.absolute().as_posix())
elif FITS_CATALOG.exists():
catalog = catalogs.Catalogs(filename=FITS_CATALOG.absolute().as_posix())
else:
log.error("Unable to locate CHIME/FRB Catalog.")
| [
"charanjotbrar@gmail.com"
] | charanjotbrar@gmail.com |
19449ceb8ae2c48004179fa709a46ba407f0cf5e | e5860a008d6a6265dd92501450cace40367e9b7d | /MiniJavaCompiler/MiniJavaCompiler/bin/Debug/netcoreapp2.1/exampleC.tac | 4cc277b5c9f07ef32c95ea53ca33b5fecfc26931 | [] | no_license | jdholst/MiniJavaCompiler | fd52d5c066b720af8b41b45e573a677c753be3a3 | 9b0c79c91796076bd9a7c270e4e5a6cd155d8aaf | refs/heads/master | 2022-06-18T20:59:47.899745 | 2020-05-08T17:36:07 | 2020-05-08T17:36:07 | 237,890,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | tac | proc test1
_bp-6 = 5
_bp+4 = _bp-6
_bp-6 = 10
_bp+6 = _bp-6
_bp-6 = _bp+4 + _bp+6
_bp-6 = _bp-6
endp test1
proc main
endp main
| [
"jacob.holst@jacks.sdstate.edu"
] | jacob.holst@jacks.sdstate.edu |
7379d9371c3922d86ed73492c5400df4bd96a4b1 | fb72d7eb880c7777e414587347d54a0446e962a3 | /pycis/wrappers/base_wrapper.py | ce076d12e21a7994866c5b8b5224567e4a2ce62d | [
"MIT"
] | permissive | marcwebbie/pycis | 4ad806aeb9f257f5178dcb19741666b0f4576721 | 4c123c5805dac2e302f863c6ed51c9e2e05a67c8 | refs/heads/master | 2016-09-06T01:10:11.301029 | 2013-12-28T09:39:36 | 2013-12-28T09:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | class BaseWrapper(object):
""" BaseWrapper gives the default interface for wrappers.
It also add utility functions to be shared by sub classes.
Sub classes should override:
self.site_url:
Wrapped site base url
get_streams(self, media):
Get a list of stream for given Media
search(self, search_query, best_match=False):
Search wrapped site for Media objects. Return a list of Media.
When best_match is True it returns only one media with best
search match ratio.
index(self):
Return a list of options to be navigated by user
"""
def __init__(self):
self.site_url = None
def __str__(self):
class_name = self.__class__.__name__
return "{}(name={}, site_url={})".format(class_name, self.name, self.site_url)
@property
def name(self):
class_name = self.__class__.__name__.lower().replace('wrapper', '')
return class_name
def get_streams(self, media):
raise NotImplemented("get_streams wasn't overriden by base class")
def get_children(self, media):
raise NotImplemented("get_children wasn't overriden by base class")
def search(self, search_query, best_match=False):
raise NotImplemented("search wasn't overriden by base class")
def index(self):
return None
| [
"marcwebbie@gmail.com"
] | marcwebbie@gmail.com |
67002af47a503c77d6428d2314e97e80f26e92a0 | a683cd2e1139e314d1473a70dc85d54c29f7d7de | /www/app.py | 3787d053e5662211cdf512aca1be0fdf00875515 | [] | no_license | narata/awesome-webapp | d8dc511215ee3588dca3e3ebf83a17986be918c8 | 752b7e5bca88deb6803be8985e600912ec3a3f21 | refs/heads/master | 2020-03-09T12:41:14.974775 | 2018-04-12T16:05:39 | 2018-04-12T16:05:39 | 128,791,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import logging
import asyncio
from aiohttp import web
logging.basicConfig(level=logging.INFO)
def index(request):
return web.Response(body='abc')
@asyncio.coroutine
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| [
"mac@narata.com"
] | mac@narata.com |
6726e26d26e7add78314772b18f26038174e56e8 | 64a80df5e23b195eaba7b15ce207743e2018b16c | /Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_onewire/device.py | 8e2dcb3c176bb555d7382ad17baa965dce45f366 | [] | no_license | aferlazzo/messageBoard | 8fb69aad3cd7816d4ed80da92eac8aa2e25572f5 | f9dd4dcc8663c9c658ec76b2060780e0da87533d | refs/heads/main | 2023-01-27T20:02:52.628508 | 2020-12-07T00:37:17 | 2020-12-07T00:37:17 | 318,548,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | # The MIT License (MIT)
#
# Copyright (c) 2017 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_onewire.device`
====================================================
Provides access to a single device on the 1-Wire bus.
* Author(s): Carter Nelson
"""
__version__ = "1.2.2"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
_MATCH_ROM = b"\x55"
class OneWireDevice:
"""A class to represent a single device on the 1-Wire bus."""
def __init__(self, bus, address):
self._bus = bus
self._address = address
def __enter__(self):
self._select_rom()
return self
def __exit__(self, *exc):
return False
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self._bus.readinto(buf, start=start, end=end)
if start == 0 and end is None and len(buf) >= 8:
if self._bus.crc8(buf):
raise RuntimeError("CRC error.")
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
return self._bus.write(buf, start=start, end=end)
def _select_rom(self):
self._bus.reset()
self.write(_MATCH_ROM)
self.write(self._address.rom)
| [
"aferlazzo@gmail.com"
] | aferlazzo@gmail.com |
1ed7c76a486eeeb7bb21f4557d9cbf587e6ff3ba | a4195b810ae2b4739f8a6a0575ca36e06f24b3fa | /server/utils.py | c29ba62541bb2a86d1de9a3636faa6afc9546869 | [
"MIT"
] | permissive | githubalvin/hi-box | e2821a02d87ed2b28881562ae0a04318dddfa011 | 4bca09ea6df9c91fd2344a4346d037f5a1be643b | refs/heads/master | 2022-12-03T08:36:14.547128 | 2020-08-24T13:13:53 | 2020-08-24T13:13:53 | 288,623,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py |
if "_SINGLE_OBJ" not in globals():
_SINGLE_OBJ = {}
class SingletonMeta(type):
def __new__(cls, name ,bases, attrs):
init_func = attrs.get("__init__", None)
if init_func is None and bases:
init_func = getattr(bases[0], "__init__", None)
if init_func is not None:
def __myinit__(obj, *args, **kwargs):
if obj.__class__.single_inited:
return
_SINGLE_OBJ[obj.__class__.__name__] = obj
init_func(obj, *args, **kwargs)
obj.__class__.single_inited = True
attrs["__init__"] = __myinit__
return super(SingletonMeta, cls).__new__(cls, name ,bases, attrs)
class Singleton(metaclass=SingletonMeta):
"""单例类"""
def __new__(cls, *args, **kwargs):
if cls.__name__ not in _SINGLE_OBJ:
obj = super(Singleton, cls).__new__(cls, *args, **kwargs)
obj.__class__.single_inited = False
_SINGLE_OBJ[cls.__name__] = obj
return _SINGLE_OBJ[cls.__name__]
| [
"370736605@qq.com"
] | 370736605@qq.com |
dd2814da60c91c78a52183544b9e0e11c7d2484a | 64f608bd1313f21151443970ef28bca8c8d6fcd0 | /module_reverse.py | f8d60624ceb222648c75a6235064c79b943aa0d3 | [] | no_license | AshishWilson/Python_lab_MCA | 86b194eb0585412e2db73c9a8c1a808b7a33e4b2 | d60944ee666e1411bb383f750b06672abca31e1f | refs/heads/main | 2023-02-21T05:11:00.897017 | 2021-01-25T09:33:07 | 2021-01-25T09:33:07 | 332,985,015 | 0 | 0 | null | 2021-01-26T05:46:55 | 2021-01-26T05:46:54 | null | UTF-8 | Python | false | false | 57 | py | def reverse(get_string):
return get_string[ : :-1]
| [
"noreply@github.com"
] | noreply@github.com |
49af52b275442377a1ba8e116182257f67349b56 | 4753858dabfab1f3ba9f8820680cb1ab4e30c906 | /rcdsubbot/lexer.py | c14ab28d649ee23a72d96adbae72c578c6b48b93 | [
"MIT"
] | permissive | habibutsu/tapl-py | a8077c77ca2f5ca55316bc0141c0e833db862b07 | 8c33255cd1995b75ee006c91e2a6179429843339 | refs/heads/master | 2020-05-21T13:14:50.701971 | 2015-06-29T23:44:21 | 2015-06-29T23:44:21 | 33,068,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | import ply.lex as lex
import re
class Lexer:
_unescape_tokens = [
# Symbols
("_", "USCORE"),
# ("'", "APOSTROPHE"),
# ("\"", "DQUOTE"),
# ("!", "BANG"),
# ("#", "HASH"),
# ("$", "TRIANGLE"),
# ("*", "STAR"),
# ("|", "VBAR"),
(".", "DOT"),
(";", "SEMI"),
(",", "COMMA"),
# ("/", "SLASH"),
(":", "COLON"),
# ("::", "COLONCOLON"),
("=", "EQ"),
# ("==", "EQEQ"),
# ("[", "LSQUARE"),
# ("<", "LT"),
("{", "LCURLY"),
("(", "LPAREN"),
# ("<-", "LEFTARROW"),
# ("{|", "LCURLYBAR"),
# ("[|", "LSQUAREBAR"),
("}", "RCURLY"),
(")", "RPAREN"),
# ("]", "RSQUARE"),
# (">", "GT"),
# ("|}", "BARRCURLY"),
# ("|>", "BARGT"),
# ("|]", "BARRSQUARE"),
# Special compound symbols:
# (":=", "COLONEQ"),
("->", "ARROW"),
# ("=>", "DARROW"),
# ("==>", "DDARROW"),
]
_tokens = (
#(r"[a-z_][a-z_0-9]*", "LCID"),
#(r"[A-Z_][A-Z_0-9]*", "UCID"),
)
reserved = {
"lambda" : "LAMBDA",
"Top": "TTOP",
"Bot": "TBOT"
}
tokens = [
"LCID",
"INTV"
# "UCID"
]
def __init__(self, **kwargs):
self.tokens = list(self.tokens)
for value, name in self._unescape_tokens:
self.tokens.append(name)
setattr(self, "t_%s" % name, re.escape(value))
for value, name in self._tokens:
self.tokens.append(name)
attr_name = "t_%s" % name
if not hasattr(self, attr_name):
setattr(self, attr_name, value)
self.tokens += self.reserved.values()
self.lexer = lex.lex(module=self, **kwargs)
def t_createID(self, t):
r'[A-Za-z_][A-Za-z_0-9]*'
ttype = self.reserved.get(t.value)
if ttype is None:
if t.value[0].isupper():
ttype = "UCID"
else:
ttype = "LCID"
t.type = ttype
return t
def t_comment(self, t):
r'(/\*(.|\n)*?\*/)|(//.*)'
pass
# Define a rule so we can track line numbers
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def t_INTV(self, t):
r'([0-9]+)'
t.value = int(t.value)
return t
| [
"habibutsu@gmail.com"
] | habibutsu@gmail.com |
a5b28f5ed094fbac63ce8b975715abc9271525ea | 3b07655b0da227eec2db98ab9347c9e7bdbc3ffd | /scielomanager/journalmanager/migrations/0004_auto__add_articleslinkage__add_field_article_articles_linkage_are_pend.py | 17462aec38eb32090fcba388c22b5043891a505a | [
"BSD-2-Clause"
] | permissive | scieloorg/scielo-manager | a1b7cc199e5f7c4d4b34fd81d46e180028299d7d | 0945f377376de8ef0ada83c35b4e2312062cdf45 | refs/heads/beta | 2023-07-12T08:23:59.494597 | 2017-09-28T18:39:39 | 2017-09-28T18:39:39 | 1,778,118 | 9 | 5 | BSD-2-Clause | 2023-09-05T19:42:58 | 2011-05-20T19:41:53 | Python | UTF-8 | Python | false | false | 35,762 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ArticlesLinkage'
db.create_table('journalmanager_articleslinkage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('referrer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='links_to', to=orm['journalmanager.Article'])),
('link_to', self.gf('django.db.models.fields.related.ForeignKey')(related_name='referrers', to=orm['journalmanager.Article'])),
('link_type', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('journalmanager', ['ArticlesLinkage'])
# Adding field 'Article.articles_linkage_is_pending'
db.add_column('journalmanager_article', 'articles_linkage_is_pending',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Article.doi'
db.add_column('journalmanager_article', 'doi',
self.gf('django.db.models.fields.CharField')(default=u'', max_length=2048, db_index=True),
keep_default=False)
# Adding field 'Article.article_type'
db.add_column('journalmanager_article', 'article_type',
self.gf('django.db.models.fields.CharField')(default=u'', max_length=32, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'ArticlesLinkage'
db.delete_table('journalmanager_articleslinkage')
# Deleting field 'Article.articles_linkage_is_pending'
db.delete_column('journalmanager_article', 'articles_linkage_is_pending')
# Deleting field 'Article.doi'
db.delete_column('journalmanager_article', 'doi')
# Deleting field 'Article.article_type'
db.delete_column('journalmanager_article', 'article_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.datachangeevent': {
'Meta': {'object_name': 'DataChangeEvent'},
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'national_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
| [
"gustavo@gfonseca.net"
] | gustavo@gfonseca.net |
34d2f8fdea69394296424fcca4b9210c110d4620 | 353a95c6762e9fb2522d1109a0df4a0b96670890 | /webapp/profile/forms.py | 2f9c10d156ff11fe884c36c25a4e6d4eb895092e | [] | no_license | gearbox/yandex2goods | 8caa724ba5fba4a84e9e454a530fd076bac13f59 | 918c2e9a6c7d1a0ad5309372821a39a98ab9dfd7 | refs/heads/master | 2021-01-07T09:43:14.378827 | 2020-03-22T19:00:02 | 2020-03-22T19:00:02 | 241,653,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | from flask_wtf import FlaskForm
# from flask_login import current_user
from wtforms import StringField, SubmitField # , PasswordField, TextField
# from wtforms.fields.html5 import URLField
# from wtforms.widgets.html5 import URLInput
from wtforms.validators import DataRequired, url # , URL # , Email, EqualTo, Length
def validate_url(_form, field):
if not field.data.startswith('http'):
field.data = 'http://' + field.data
class CompanyProfile(FlaskForm):
email = StringField('Email')
company_name = StringField('Название компании', [
DataRequired(message='Это обязательное поле')
])
shop_name = StringField('Название магазина', [
DataRequired(message='Это обязательное поле')
])
shop_url = StringField('URL магазина', [
DataRequired(message='Это обязательное поле'),
validate_url,
url(message='URL адрес указан с ошибкой'),
])
# shop_currency = StringField('Принимаемая валюта', [
# DataRequired(message='Это обязательное поле')
# ])
# currency_rate = StringField('Курс валюты к рублю', [
# DataRequired(message='Это обязательное поле')
# ])
# shop_outlet = StringField('ID Склада', [
# DataRequired(message='Это обязательное поле')
# ])
submit = SubmitField('Сохранить')
| [
"mtayrov@gmail.com"
] | mtayrov@gmail.com |
64b0ae44f2089d3323417912e1a7691351477d3c | 3ee8df34733bd96261f5e4cc51c0880c08f65a3b | /Test-Model.py | 76b286e0d6729656708daaa56178232aabad6d10 | [
"MIT"
] | permissive | skynet1010/ICoTSC2020_Example | e702345d29fb99737c2811a002f25662941d9509 | 8340ef10c06bc1e9d1097763f962e03390694ea0 | refs/heads/master | 2022-11-14T22:50:12.231774 | 2020-07-15T09:51:31 | 2020-07-15T09:51:31 | 279,590,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from Data.DataPreprocessor import DataPreprocessor
from Model.TestSession import TestSession
from Model.TestDefinition import TestDefinition
from Model.VectorOutput import create_model_vector_gru
dp = DataPreprocessor()
sess = TestSession(name='Test', resample_window=30, data_preprocessor=dp)
sess.tests.append(TestDefinition(batch_size=128, sequence_length=200, prediction_length=20, loss='MSE', optimizer='Adam', steps_epoch=200,
epoch=300, layer=create_model_vector_gru(20, dp.num_sensors), model_name='VectorOutput_GRU'))
sess.run()
| [
"andreas.klos@fernuni-hagen.de"
] | andreas.klos@fernuni-hagen.de |
483b6068787fbdd1524d75258e6e39bb8c9191ec | 4a4f4f4cc7585ab27105346d2999d7eb97851a71 | /BloodTestReportOCR/pd_predict.py | 12d731dcac61dbd4d73b2450641b3163884298ce | [
"Apache-2.0"
] | permissive | sa16225193/BloodTestReportOCR | 82eeb3b3bc037f9365a1a6639a649024337bec8a | 95f25d3426e5e4ad850b0fbfb651b9645dea4c32 | refs/heads/master | 2021-07-22T15:47:47.930078 | 2017-09-29T03:33:09 | 2017-09-29T03:33:09 | 109,948,944 | 2 | 2 | null | 2017-11-08T08:41:59 | 2017-11-08T08:35:05 | Python | UTF-8 | Python | false | false | 397 | py | # -*- coding: utf-8 -*-
from py_paddle import swig_paddle
import sys
sys.path.append("..")
from PaddlePaddle import prediction_sex,prediction_age
def predict(arr):
swig_paddle.initPaddle("--use_gpu=0")
data = [arr.tolist()]
#直接填充4个0
for i in range(4):
data[0][0].append(0)
sex = prediction_sex.predict(data)
age = prediction_age.predict(data)
return sex,age
| [
"王孟之"
] | 王孟之 |
50ed86316a7f9d4f0518c79fc491aace11f5da1b | 29b240fed70f9585567fa83a627dc6a7a11e4fba | /python/ledticker/postillon.py | cc80b4f66d5939464a7d3ae4f6417b72f2fee9da | [] | no_license | hickerspace/API-Examples | 2567f582b293afdecfd5d52f9bdbbb5ab351bc81 | d4d2696c45451f4591c14e0fd1e1cc2db54921a7 | refs/heads/master | 2020-12-24T13:44:30.149829 | 2013-05-08T16:54:15 | 2013-05-08T16:54:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import feedparser, re, random, helper
def replaceAll(text, replaceDict):
for key in replaceDict:
text = text.replace(key, replaceDict[key])
return text
def postillon():
postillon = []
feed = feedparser.parse("http://feeds.feedburner.com/blogspot/rkEL")
for entry in feed["entries"]:
if entry["title"][:10] == "Newsticker":
# remove html
ticker = re.sub('<[^<]+?>', '', entry["description"])
# get newsticker messages
ticker = re.findall("\+\+\+\+ (.*?) \+\+\+\+", ticker)
for t in ticker:
postillon.append(t)
postillon = map(lambda tickerMessage: helper.replaceSpChars(tickerMessage.encode("utf-8")),
postillon)[:15]
selected = []
try:
for i in random.sample(range(len(postillon)), min([5, len(postillon)])):
selected.append(postillon[i])
except ValueError:
selected = postillon
return "Der Postillon: %s" % " +++ ".join(selected)
| [
"basti@h1747297.stratoserver.net"
] | basti@h1747297.stratoserver.net |
3f8a95e7e90757fac808d96cfcd2c317291992dc | d528235dffbe582adda5d697dafc045091b1783e | /code/visualizer.py | f86b3a2a667ad22b284197a3c659a024deb30af4 | [] | no_license | slugwarz05/proj2-code | 9bdbacb1a507e19ce56b163968a5a29bd9b3cb00 | cab17a6762c88b00a06f8c0b521360c440e45111 | refs/heads/master | 2020-12-27T07:04:32.498421 | 2016-05-01T20:01:44 | 2016-05-01T20:01:44 | 56,452,111 | 0 | 0 | null | 2016-04-17T18:36:14 | 2016-04-17T18:36:14 | null | UTF-8 | Python | false | false | 3,851 | py | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as plticker
class Visualizer(object):
def __init__(self, log_scale=False, series=()):
"""
Creates a new Visualizer.
Args:
log: Boolean. Whether to use the log scale.
Returns:
A new Visualizer instance.
"""
# Set whether or not to use a log scale.
self.log_scale = log_scale
# Set the data series for the graph.
self.series = series
# Enable interactive mode.
plt.ion()
# Initialize the plot.
f, self.axarr = plt.subplots(len(series), sharex=True)
self._setup()
def update(self):
"""
Updates the scatter plot, re-rendering it for viewing by the user.
Args:
None.
Returns:
None.
"""
# Plot the data.
self._plot()
# If the log scale option has been provided, plot the data using a
# log-scale.
if self.log_scale:
for indx, series in enumerate(self.series):
self.axarr[indx].set_yscale('log')
# A short pause so Mac OS X 10.11.3 doesn't break.
plt.pause(0.0001)
def add_data(self, x, data={}):
"""
Adds data for plotting.
Args:
x: Integer. A data point for the x-axis on the graph.
data: Dictionary. Should be a dictionary of the form:
{ 'series1': Integer, 'series2': Integer }
The provided integer data points will be appended to the
existing data arrays for each of the provided series.
Returns:
None.
"""
# Append the given x-axis data point.
self.x.append(x)
# For each provided series, append the given data point to the list
# for the correct series.
for label, datum in data.iteritems():
self.y[label]['data'].append(datum)
def savefig(self):
"""
Saves the current figure as an image.
Args:
None.
Returns:
None.
"""
plt.savefig('results/results.png', bbox_inches='tight')
# Private methods
def _setup(self):
""" Sets up the plot. Initializes each series and adds a legend. """
# Initialize the x-axis data points.
self.x = []
# Initialize the y-axis series.
self.y = { s: { 'color': np.random.rand(3,1), 'data': [] } for s in self.series }
# Render the plot.
self._plot()
# Create the legend.
for indx, s in enumerate(self.series):
self.axarr[indx].legend(loc='center right', bbox_to_anchor=(1.3, 0.5), fancybox=True)
def _plot(self):
""" Re-renders the plot. """
# For each series, plot the current data points.
for indx, (label, data_dict) in enumerate(self.y.iteritems()):
data = data_dict['data']
if len(data) is not 0:
the_min = np.min(data)
the_max = np.max(data)
spacing = int((the_max - the_min) / float(3.0))
spacing = 1 if spacing == 0.0 else spacing
spacing = self._roundup(spacing)
loc = plticker.MultipleLocator(base=spacing)
self.axarr[indx].yaxis.set_major_locator(loc)
self.axarr[indx].scatter(self.x, data, c=data_dict['color'], label=label)
def _roundup(self, x):
""" Rounds up to a nice number. """
if x < 100:
fact = 10
elif x < 1000:
fact = 100
elif x < 10000:
fact = 1000
elif x < 100000:
fact = 10000
else:
fact = 100000
return x if x % fact == 0 else x + fact - x % fact
| [
"matthewbentonmay@gmail.com"
] | matthewbentonmay@gmail.com |
cf7b51a007d973d5c333a9a150c50a6d598fab5d | 11066ab31dea56912363171a10c674648520120c | /node_modules/websocket/build/config.gypi | 0d53ee3dea57af4dea48d4637ec355b03a9a4cf2 | [
"Apache-2.0"
] | permissive | bhaskarpraveen/Ai-nodejs | 25570a5bd05c2e60a9f929345d33c4249872b69b | 6865c8cd3855c8ff1b5c38d1ce3b65336caf79bd | refs/heads/master | 2023-03-04T17:39:36.437079 | 2020-03-02T06:45:44 | 2020-03-02T06:45:44 | 244,299,368 | 0 | 0 | null | 2023-03-02T19:39:42 | 2020-03-02T06:42:42 | JavaScript | UTF-8 | Python | false | false | 5,081 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"arch_triplet": "x86_64-linux-gnu",
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 1,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "63",
"llvm_version": 0,
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr",
"node_relative_path": "lib/x86_64-linux-gnu/nodejs:share/nodejs:lib/nodejs",
"node_release_urlbase": "",
"node_shared": "true",
"node_shared_cares": "true",
"node_shared_http_parser": "false",
"node_shared_libuv": "true",
"node_shared_nghttp2": "true",
"node_shared_openssl": "true",
"node_shared_zlib": "true",
"node_tag": "",
"node_target_type": "shared_library",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.64",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/usr/include/nodejs",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.8.0 node/v10.15.2 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/praveen/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"dry_run": "",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/praveen/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/bin/node-gyp",
"prefer_offline": "",
"color": "true",
"no_proxy": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "true",
"access": "",
"also": "",
"unicode": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "10.15.2",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"globalconfig": "/etc/npmrc",
"dev": "",
"init_module": "/home/praveen/.npm-init.js",
"parseable": "",
"globalignorefile": "/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"praveennaidu264@gmail.com"
] | praveennaidu264@gmail.com |
7025d3b3f9bcfb2ad2f093578320bee7358554ef | 01752687e20be48e06a6b863a0036a2524cdf58d | /test/functional/p2p-compactblocks.py | 94c01e0a0d1cda8944145c894264e08c6d9d489d | [
"MIT"
] | permissive | BitcoinInterestOfficial/BitcoinInterest | 8930042b26665f430ff1ae6e25c86c4998d1349c | 9d4eeee6bef0b11ccc569c613daae90d23804b5d | refs/heads/master | 2021-06-21T16:48:58.797426 | 2019-02-01T07:29:49 | 2019-02-01T07:29:49 | 115,038,373 | 30 | 20 | MIT | 2019-02-01T02:55:11 | 2017-12-21T19:10:21 | C | UTF-8 | Python | false | false | 44,276 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
assert wait_until(self.received_block_announcement, timeout=30)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
return wait_until(received_hash, timeout=timeout)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
success = wait_until(lambda: not self.connected, timeout=timeout)
if not success:
logger.error("send_await_disconnect failed!")
raise AssertionError("send_await_disconnect failed!")
return success
class CompactBlocksTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
got_message = wait_until(received_sendcmpct, timeout=30)
assert(received_sendcmpct())
assert(got_message)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
assert(test_node.wait_for_block_announcement(tip))
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False, True))
[tx.calc_sha256() for tx in block.vtx]
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
success = wait_until(lambda: "getheaders" in test_node.last_message, timeout=30)
assert(success)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: "getdata" in test_node.last_message, timeout=30)
assert(success)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
success = wait_until(lambda: "getdata" in test_node.last_message, timeout=10)
assert(success)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False, True))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
success = wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)
assert(success)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
assert(success)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: "block" in test_node.last_message, timeout=30)
assert(success)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block), '', True)
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False, True), '', True)
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| [
"aaron.mathis@soferox.com"
] | aaron.mathis@soferox.com |
dd3317312edc41fbbf0c2373970715d2e21aa6b4 | f421edab15be07a6b66a80fff04408916f036617 | /tkinter/projet_v1.0.py | 932d236e73b3a43f3715b23f3b65503325bfec3a | [] | no_license | antoinech2/ISN-exercices-Python-Ann-e-2 | fb729cf830741fe7e22e4d42d7da1f607aac4d61 | d2018409037d8be1c055a7dcdff8bbd49cb52d54 | refs/heads/master | 2022-11-05T17:04:15.277544 | 2020-06-28T18:54:24 | 2020-06-28T18:54:24 | 275,648,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,573 | py | "ISN - Python - TKinter - Mini-projet"
"Jeu du clic-balle"
"Une balle se déplace au hasard sur un canevas, à vitesse faible. Le joueur doit essayer de cliquer sur cette balle à l'aide de la souris. S'il y arrive, il gagne un point, mais la balle se déplace plus rapidement et ainsi de suite. Arrêter le jeu au bout d'un certain nombre de clics et afficher le score atteint."
#######################################################
# Encodage: UTF-8 #
# Programme Python 3.7 #
# Auteur : Antoine Cheucle #
# Pas de licence, libre d'utilisation #
# Dépendences: module TKinter, random, math #
#######################################################
#########################################
#Importation des modules externes:
from tkinter import *
from random import randint, randrange, random
from math import sqrt, pi, cos, sin
#########################################
#########################################
#Définition des constantes de jeu:
fenetre = (800,800) # Taille de la fenêtre (canvas)
ball_size = 20 # Rayon de la balle
change_direction_chance = 150 # Chance de changement de direction
#Définition des variables de jeu:
ball_speed = 0.5 # Vitesse initiale de la balle
ball_position = [300, 300] # Coordonnées initiales de la balle [x,y]
game_running = False # Etat du jeu initial
#########################################
def __main__():
"Programme principal, création de la fenêtre graphique"
global canv, fen
#Création de la fenêtre graphique
fen = Tk()
canv = Canvas(fen, width=fenetre[0], height = fenetre[1])
canv.pack(side=TOP)
but_stop = Button(fen, text="Quitter le jeu", command=StopGame)
but_stop.pack(side=BOTTOM)
but_start = Button(fen, text="Démarrer le jeu", command=StartGame)
but_start.pack(side=BOTTOM)
#Démarrage du réceptionnaire d'évènements
fen.mainloop()
#Définition des fonctions des gestionnaires d'évènements:
def StartGame():
"Démarrage du jeu"
global game_running, ball
if game_running == False:
#Création de la balle
ball = canv.create_oval(ball_position[0]-ball_size, ball_position[1]-ball_size, ball_position[0]+ball_size, ball_position[1]+ball_size, outline="black", fill="red")
#Démarrage du jeu
game_running = True
#Définition d'une direction initiale
ChangeDirection()
#Démarrage du mouvement de la balle
MoveBall()
def StopGame():
"Arrêt du jeu"
global game_running
#Arrêt de la boucle principale
game_running = False
#Fermeture de la fenêtre
fen.quit()
fen.destroy()
def MoveBall():
"Procédure générale de déplacement"
global ball_position,vitesse
#Générateur RNG pour choisir si on doit changer la direction
rand = randint(0,change_direction_chance)
if rand == 0:
ChangeDirection()
#Calcul de la nouvelle possition en appliquant la vitesse à la position pour déplacer la balle en fonction de sa direction
ball_position = [ball_position[0]+vitesse[0], ball_position[1]+vitesse[1]]
#Vérification que la balle ne soit pas sortie du canvas, et changement de position et direction si c'est le cas
#Gauche
if ball_position[0]-ball_size < 0:
vitesse[0] = -vitesse[0]
ball_position[0] = ball_size
#Droite
elif ball_position[0]+ball_size > fenetre[0]:
vitesse[0] = -vitesse[0]
ball_position[0] = fenetre[0]-ball_size
#Haut
if ball_position[1]-ball_size < 0:
vitesse[1] = -vitesse[1]
ball_position[1] = ball_size
#Bas
elif ball_position[1]+ball_size > fenetre[1]:
vitesse[1] = -vitesse[1]
ball_position[1] = fenetre[1]-ball_size
#Mise à jour de la position graphique de la balle
canv.coords(ball, ball_position[0]-ball_size, ball_position[1]-ball_size, ball_position[0]+ball_size, ball_position[1]+ball_size)
#Appel de la fonction récursivement après un délai si le jeu n'est pas arrêté.
if game_running:
fen.after(10, MoveBall)
def ChangeDirection():
"Procédure générale de changement de direction"
global vitesse
#Calcul d'un angle aléatoire (entre 0 et 2pi rad)
new_angle = random()*2*pi
#Calcul des composantes x et y de la vitesse en fonction de l'angle
vitesse_x = cos(new_angle)*ball_speed
vitesse_y = sin(new_angle)*ball_speed
#Définition de la nouvelle vitesse avec le nouvel angle
vitesse = [vitesse_x ,vitesse_y]
#Appel de la fonction principale
__main__()
| [
"noreply@github.com"
] | noreply@github.com |
a33dc8e845cac335d832d9c5e3f45d970634fe61 | 8988e287cb60924c030656a93b0045c9956127f2 | /demo/my_demo/tokenizer_test.py | 50c1ee135f746d655e06a703633f724db4bccaf6 | [] | no_license | TATlong/keras_bert | 7080a1fe685da296dddb86684c5a9681e75c8579 | fa42a817b921181490d8beb3b9ad4fd04ec913c9 | refs/heads/master | 2023-04-01T14:52:17.796633 | 2020-01-17T08:06:54 | 2020-01-17T08:06:54 | 234,500,471 | 2 | 0 | null | 2023-03-24T23:21:37 | 2020-01-17T08:02:06 | Python | UTF-8 | Python | false | false | 2,371 | py | from bertTAT.bert import Tokenizer
# 字典存放着 token 和 id 的映射,字典里还有BERT特有的 token;
# 文本拆分出来的字在字典不存在,它的 id 会是 5,代表 [UNK],即 unknown
token_dict = {
'[CLS]': 0,
'[SEP]': 1,
'un': 2,
'##aff': 3,
'##able': 4,
'[UNK]': 5,
'明': 6,
'天': 7,
}
# 分词:中文的话,在CJK字符集内的中文以单字分割;英文的话采用最大贪心匹配
tokenizer = Tokenizer(token_dict)
print(tokenizer.tokenize("上班使我快乐!", "明天还要上班!"))
print(tokenizer.tokenize("unaffable"))
print(tokenizer.tokenize('unaffable', '钢'))
# 拆分不存在字典中的单词,结果如下,可以看到英语中会直接把不存在字典中的部分直接按字母拆分
# ['[CLS]', 'un', '##k', '##n', '##o', '##w', '##n', '[SEP]']
print(tokenizer.tokenize('unknown'))
# 英文:下标和段落下标
strs = 'unaffable'
print("分词结果:", strs, tokenizer.tokenize(strs))
indices, segments = tokenizer.encode(strs)
print("对应token下标:{}".format(indices))
print("段落下标:", segments)
# 中文:下标和段落下标,同时可以指定截取的最大长度
print("分词结果:", tokenizer.tokenize("上班使我快乐!", "明天还要上班!"))
indices, segments = tokenizer.encode("上班使我快乐!", "明天还要上班!")
print("对应的token下标:", indices)
print("段落下标:", segments)
# max_len:最大的字下标长度,如果拆分完的字不超过max_len,则用 0 填充
print("分词结果:", tokenizer.tokenize(first='unaffable', second='钢'))
indices, segments = tokenizer.encode(first='unaffable', second='钢', max_len=10)
print("对应的token下标:", indices)
print("段落下标:", segments)
# 匹配分词后的结果在原始文本中的起始和终止下标,只是绝对位置的匹配,从运行结果更容易看清除
print("原始文本:", tokenizer.tokenize(first="All rights reserved."))
print(Tokenizer.rematch("All rights reserved.", ["[UNK]", "righs", "[UNK]", "ser", "[UNK]", "[UNK]"]))
print("原始文本:", tokenizer.tokenize(first="嘛呢,吃了吗?"))
print("对应的下标:", tokenizer.encode(first="嘛呢,吃了吗?"))
print("匹配结果:", Tokenizer.rematch("你嘛呢,吃了吗?", ["你", "呢", "[UNK]", ",", "[UNK]", "了", "吗", "?"]))
| [
"1171942852@qq.com"
] | 1171942852@qq.com |
b83295a8369b760b60f547080929ab85c25711bb | 34652a47355a8dbe9200db229a1bbc62619de364 | /BASE SCRIPTS/Logic/__init__.py | 8ea073819416baffd947870cfc2da404aedfb85c | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | # Created by Bogdan Trif on 23-09-2018 , 10:07 PM. | [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
c6fe12a1333b2fbeac90c730d484776d8331de04 | 6c978a74e94771c37b387b9ed7b09411dffe814d | /packageopt/services/agents/implementations/vola_agent.py | 9159885acad8cd1bfcd72937274c0d37a47de650 | [
"MIT"
] | permissive | nspostnov/for-article-optimal-position-liquidation | 23aa4f3120301cb568f8a2770bbce54ea6badbe0 | 857152b0450c39cfcdb3d329e57ed07efe344356 | refs/heads/master | 2023-04-17T04:51:07.541344 | 2021-04-27T11:47:59 | 2021-04-27T11:47:59 | 289,775,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from ..abstract_base_classes.agent import Agent
__all__ = ['VolaAgent']
class VolaAgent(Agent):
def __init__(self, volarepo, volasolver):
self._volarepo = volarepo
self._volasolver = volasolver
def get(self, key):
vola = self._volarepo.get(key)
if vola is None:
vola = self._volasolver.calculate(key)
self._volarepo.set(vola)
vola = self._volarepo.get(key)
return vola
| [
"postnov.ns@yandex.ru"
] | postnov.ns@yandex.ru |
57ba1aad7d54dc6660b731881b3a63758ed432b9 | e279c36f869bff57f76da76ba3b75e594ad1d4af | /news/migrations/0004_auto_20170318_1320.py | 6dd91325d8cae9f4b858fbb7aa205b896b205c1c | [] | no_license | angleCV/m.roothan.com | 386d97fc1bb910e9a7fe8564f54b35353f2ec506 | e6ac25803c833d656d19209705311cd0fb33247d | refs/heads/master | 2021-01-25T04:42:03.491651 | 2017-06-06T02:18:40 | 2017-06-06T02:18:40 | 93,465,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-18 05:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20170318_1319'),
]
operations = [
migrations.AddField(
model_name='focusarticle',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
migrations.AddField(
model_name='lawarticle',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
migrations.AddField(
model_name='policyarticle',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
migrations.AddField(
model_name='popularscience',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
migrations.AddField(
model_name='talentsarticle',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
migrations.AddField(
model_name='techarticle',
name='pic',
field=models.ImageField(default='uploads/top_images/default.jpg', upload_to='uploads/top_images/', verbose_name='头条图片436*200'),
),
]
| [
"actanble@163.com"
] | actanble@163.com |
9e32b4bbcdbc11ec7d8f2448aa53c0eb16a438b0 | cf3e1b7c984ce9611b14a8a2475a9025e0a3c208 | /object_detection_3.py | 52327be89107fa10d5377f7e605fd378ab45dc59 | [] | no_license | ravo9/TensorFlow_Object_Detection_App | ed2753be8962f2b5a2d20a0dc02aa5269437965a | bead935cd1893bb11f93e3f49c9d3ac5fc5e934a | refs/heads/master | 2020-11-27T14:36:52.533653 | 2019-12-22T23:09:07 | 2019-12-22T23:09:07 | 229,492,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | import os
import pathlib
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
import urllib.request
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from flask import Flask, request
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from json import dumps
from flask_jsonpify import jsonify
# Patch the location of gfile
tf.gfile = tf.io.gfile
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'data/mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
app = Flask(__name__)
api = Api(app)
@app.route('/analyze', methods=['POST'])
def api_root():
picture = request.form['picture']
print(picture)
# Save the result.
if (picture != None):
filename = 'receivedImage.jpg'
picture = picture.strip('\'"')
urllib.request.urlretrieve(picture, "local-filename.jpg")
#picture = np.asarray(picture)
#cv2.imwrite(filename, picture)
return "POST OK"
def load_model(model_name):
base_url = 'http://download.tensorflow.org/models/object_detection/'
model_file = model_name + '.tar.gz'
model_dir = tf.keras.utils.get_file(
fname=model_name,
origin=base_url + model_file,
untar=True)
model_dir = pathlib.Path(model_dir)/"saved_model"
model = tf.saved_model.load(str(model_dir))
model = model.signatures['serving_default']
return model
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
output_dict = model(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def prepareVisualization(model, output_dict, image_np):
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=8)
#cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
#if cv2.waitKey(1000) & 0xFF == ord('q'):
#cap.release()
#cv2.destroyAllWindows()
# Save the result.
filename = 'savedResult.jpg'
cv2.imwrite(filename, image_np)
detection_model = load_model(model_name)
detection_model.output_dtypes
detection_model.output_shapes
# 1. Load the image.
originalImage = cv2.imread('./image.jpg')
# 2. Analyze image.
analysisResult = run_inference_for_single_image(detection_model, originalImage)
# 3. Display result.
prepareVisualization(detection_model, analysisResult, originalImage)
#image = cv2.resize(image, (800, 600))
#cv2.imshow("OpenCV Image Reading", image)
#cv2.waitKey(0)
if __name__ == '__main__':
app.run(port='2137')
| [
"rafalozog@gmail.com"
] | rafalozog@gmail.com |
2c8c00f24140851a86f35ee98958ad47aed4209f | 1e1b6e28104485c47d8f4d7fd8ee527c0d4a579f | /Scientific Computing with Python/boilerplate-polygon-area-calculator/shape_calculator.py | 2a7e28afb673fb99a72794665a621509d59f8a51 | [] | no_license | cdeanatx/FCC-Projects | 82f680a25b90037b526ae0529c13d4ef937ae543 | 22a0ee1875e2edb85684700881f954290f8b0ad5 | refs/heads/main | 2023-08-15T01:15:01.877153 | 2021-10-07T19:57:33 | 2021-10-07T19:57:33 | 397,318,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | import math
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def __str__(self):
return "Rectangle(width=" + str(self.width) + ", height=" + str(self.height) + ")"
def set_width(self, width):
self.width = width
return self.width
#print("width =", self.width)
def set_height(self, height):
self.height = height
return self.height
#print("height =", self.height)
def get_area(self):
area = self.height * self.width
return area
#print("area =", area)
def get_perimeter(self):
perimeter = 2 * (self.height + self.width)
return perimeter
#print("perimeter =", perimeter)
def get_diagonal(self):
diagonal = (self.width ** 2 + self.height ** 2) ** .5
return diagonal
#print("diagonal =", diagonal)
def get_picture(self):
if self.width > 50 or self.height > 50:
return "Too big for picture."
picture = ""
for i in range(self.height):
picture += "*" * self.width + "\n"
return picture
#print(picture)
def get_amount_inside(self, shape):
amount = 0
print("self.width =", self.width)
print("self.height =", self.height)
print("shape.width =", shape.width)
print("shape.height =", shape.height)
if shape.width > self.width or shape.height > self.height:
return amount
amount = math.floor(self.width / shape.width) * math.floor(self.height / shape.height)
return amount
#print("amount =", amount)
class Square(Rectangle):
def __init__(self, side):
super().__init__(side, side)
self.side = side
def __str__(self):
return "Square(side=" + str(self.side) + ")"
def set_side(self, side):
self.side = side
Rectangle.width = self.side
Rectangle.height = self.side
Rectangle.set_height(self, self.side)
Rectangle.set_width(self, self.side)
return self.side
def set_width(self, side):
self.side = side
return self.side
def set_height(self, side):
self.side = side
return self.side
| [
"cdean.atx@gmail.com"
] | cdean.atx@gmail.com |
1b64fd425fcf04cf639ecf7d7de4a1fe6855afb0 | 732fe0652fbd5895a01d3e16ee12faf903cb86f1 | /data_generator/augment_data.py | eafc8a8665ce58b9487a8b4acdb41ee240bf0fc8 | [
"MIT"
] | permissive | KishanChandravanshi/forecast-cyclone-path | 0fd0e9fa6d77bb5f226ea01f99d5ed0834769744 | 766bb8440206b24d34ccd0eae488e2651d4e0467 | refs/heads/master | 2020-04-01T04:04:42.841764 | 2018-12-14T18:40:19 | 2018-12-14T18:40:19 | 152,849,144 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import cv2
import imutils
import numpy as np
import random
import glob
# Run this script to generate randomly rotated images
folder_name = 'dataset'
destination = 'augmented_data'
files_path = glob.glob(folder_name + "\\*.jpg")
# set this to the number which is last in your dataset
# for e.g. if i've images named 1.jpg, 2.jpg, ..... 1000.jpg, then set the count to 1001
count = 1001
random_rotation = [50, 90, 74, 123, 350, 200, 180]
print('Augmenting data, Please wait...')
for file in files_path:
try:
img = cv2.imread(file)
random_index = random.randint(0, 6)
rotated_img = imutils.rotate(img, random_rotation[random_index])
# save it to different folder
cv2.imwrite(destination + "\\" + str(count) + ".jpg", rotated_img)
count += 1
except Exception as e:
print(str(e))
| [
"milkyway.sagittarius.a@gmail.com"
] | milkyway.sagittarius.a@gmail.com |
e45cc02f03429c514d46d0434bc9d613598dc5fd | c202ee6f075b14d70712b1e93824a5476ac0c6f6 | /content/labs/lab10/solutions/bo2.py | 58b8e305c91bd6009d1804d17937ea188b9aed82 | [
"MIT"
] | permissive | Harvard-IACS/2021-CS109A | 0ab7797d10a04653e88722d1e597909e9c5bc443 | 0f57c3d80b7cef99d660f6a77c0166cffc1253e8 | refs/heads/master | 2022-07-31T17:57:48.205607 | 2021-12-16T16:47:36 | 2021-12-16T16:47:36 | 385,618,658 | 103 | 75 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | from tqdm.notebook import trange
#logarithmic values:
exp_powers = list(range(-6,1))
exp_vals = list(np.exp(exp_powers))
#Find Optimal Learning Rate for Ada-Boosting
staged_train_scores, staged_test_scores = {}, {}
score_train, score_test = {}, {}
for i in trange(len(exp_vals)):
model = AdaBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=3),
n_estimators=200, learning_rate=exp_vals[i])
model.fit(x_train.values, y_train)
score_train[exp_vals[i]] = accuracy_score(y_train, model.predict(x_train.values))
score_test[exp_vals[i]] = accuracy_score(y_test, model.predict(x_test.values))
staged_train_scores[exp_vals[i]] = list(model.staged_score(x_train.values, y_train))
staged_test_scores[exp_vals[i]] = list(model.staged_score(x_test.values, y_test))
#Plot
lists1 = sorted(score_train.items())
lists2 = sorted(score_test.items())
x1, y1 = zip(*lists1)
x2, y2 = zip(*lists2)
plt.figure(figsize=(10,7))
plt.ylabel("Accuracy")
plt.xlabel("Log Learning Rate Log($\lambda$)")
plt.title('Variation of Accuracy with Depth - ADA Boost Classifier')
plt.plot(np.log(x1), y1, 'b-', label='Train')
plt.plot(np.log(x2), y2, 'g-', label='Test')
plt.legend()
plt.show()
##export this to a function or delete it.
fig, ax = plt.subplots(1,2, figsize=(10,7))
for key, val in staged_train_scores.items():
ax[0].plot(list(val),label='train')
for i, (key, val) in enumerate(staged_test_scores.items()):
ax[1].plot(list(val),label='$\lambda$=exp({})'.format(exp_powers[i]))
ax[1].set_title("h")
plt.legend(loc = 4)
sets = ["Train", "Test"]
for i in range(2):
ax[i].set_xlabel('Iteration')
ax[i].set_ylabel('Accuracy')
ax[i].set_title(sets[i] + " Set Accuracy vs Iterations - ADA Boost")
plt.show()
| [
"chrisgumb@gmail.com"
] | chrisgumb@gmail.com |
81e70233fa819d68054341510f9384b71ad6bbd7 | 50e4b03b9adae4c04db0bb9c58e9776f664c8e8e | /lotto.py | b412b4e3709605023b35f6f02c9e524976213c46 | [] | no_license | hotval/lotto | 8c80faed44e6da54178ac51e6d602519c3689536 | fcc76437229e38952933352cf5f99f450428dbce | refs/heads/master | 2021-12-10T11:18:58.982042 | 2016-08-15T20:24:22 | 2016-08-15T20:24:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | # widgets
from tkinter import *
window = Tk()
img = PhotoImage(file = "logo.gif")
imgLabel = Label(window, image = img)
label1 = Label(window, relief = "groove", width = 2)
label2 = Label(window, relief = "groove", width = 2)
label3 = Label(window, relief = "groove", width = 2)
label4 = Label(window, relief = "groove", width = 2)
label5 = Label(window, relief = "groove", width = 2)
label6 = Label(window, relief = "groove", width = 2)
getButton = Button(window)
resetButton = Button(window)
# layout
imgLabel.grid(row = 1, column = 1, rowspan = 2)
label1.grid(row = 1, column = 2, padx = 10)
label2.grid(row = 1, column = 3, padx = 10)
label3.grid(row = 1, column = 4, padx = 10)
label4.grid(row = 1, column = 5, padx = 10)
label5.grid(row = 1, column = 6, padx = 10)
label6.grid(row = 1, column = 7, padx = 10)
getButton.grid(row = 2, column = 2, columnspan = 4)
resetButton.grid(row = 2, column = 6, columnspan = 2)
# windows properties
window.title("Lotto - Random Number Generator")
window.resizable(0,0)
getButton.configure(text = "Generate Random Numbers")
resetButton.configure(text = "Reset")
# initial properties
label1.configure(text = "...")
label2.configure(text = "...")
label3.configure(text = "...")
label4.configure(text = "...")
label5.configure(text = "...")
label6.configure(text = "...")
resetButton.configure(state = DISABLED)
# generate random numbers
from random import sample
def generateRandomNumbers():
nums = sample(range(1, 99), 6)
label1.configure(text = nums[0])
label2.configure(text = nums[1])
label3.configure(text = nums[2])
label4.configure(text = nums[3])
label5.configure(text = nums[4])
label6.configure(text = nums[5])
getButton.configure(state = DISABLED)
resetButton.configure(state = NORMAL)
def reset():
label1.configure(text = "...")
label2.configure(text = "...")
label3.configure(text = "...")
label4.configure(text = "...")
label5.configure(text = "...")
label6.configure(text = "...")
getButton.configure(state = NORMAL)
resetButton.configure(state = DISABLED)
getButton.configure(command = generateRandomNumbers)
resetButton.configure(command = reset)
# infite loop to sustain window
window.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
8c759a8187699d7a2fe3f577cfc3b1b8d9379710 | 6661380e964a5912afad40d06722ed4485420925 | /ex2/exercise_2.ugo.py | 9e8d0a53ace709b829e4478241f78b32eaf2fcda | [] | no_license | smbadiwe/NewbiesLearningPython | d3227e7458a0b232f10200a498149fb9288b21db | 4934d2543c4dcceefc0c4b7fcf45546e5bb573b8 | refs/heads/master | 2022-12-03T23:12:49.890454 | 2020-08-18T00:46:37 | 2020-08-18T00:46:37 | 288,317,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,382 | py | import random
def check_speed(speed: int):
"""
This function is for checking how fast a driver is driving.
It takes one parameter: speed. Units is km/h.
You are to implement it such that it does the following:
- If speed is less than 60km, it should print "OK".
- Otherwise, for every 5km above the speed limit (60), it should give the driver one warning point and print the total number of warning points. For example, if the speed is 80, it should print "Points: 4".
- If the driver gets more than 12 points, it should print "License suspended".
"""
speed = int(input("Enter driver's speed:\t"))
limit = 60
d = 0
if speed < limit:
print("Ok")
if speed > 60 and speed % 5 == 0:
d += ((speed - 60)/5) + 1
print('Point: ', d)
if d >= 12:
print("License Suspended.")
def guess_number(answer: int):
"""
- Ask the user to guess the number on your mind. This number on your mind is the 'answer' parameter.
- If the number the user guessed is greater than 'answer', it should tell the user "Your guess is greater"
- If the number the user guessed is less than 'answer', it should tell the user "Your guess is smaller"
- If the number the user guessed is equal to 'answer', it should tell the user "Your guess is correct!" and end the game.
"""
guesses_taken = 0
answer = random.randint(0, 1000000)
sans = 50
print("Guessing Game\nGuess the number on my mind.\nIt's a number between 0 and 1,000,000 (1 million)\nYou'll get multiple attempts and I'll be giving you hints to nidge you towards the correct answer.")
while guesses_taken < 20:
print("Take a Guess:")
guess = input()
guess = int(guess)
guesses_taken = guesses_taken + 1
if guess < sans:
print("Your guess is smaller")
if guess > sans:
print("Your guess is greater")
if guess == sans:
print("Your guess is correct. You Won!")
break
def guess_number_advanced(answer: int):
"""
Implement this function to do the same thing that `guess_number` function does; with the following modifications
- Keep track of the number of guesses the user is making.
- If the user has guessed 20 times and have not guessed the correct answer, tell the user "Maximum attempts reached", show the user the correct answer and end the game.
"""
guesses_taken = 0
answer = random.randint(0, 1000000)
sans = 50
print("Guessing Game\nGuess the number on my mind.\nIt's a number between 0 and 1,000,000 (1 million)\nYou'll get multiple attempts and I'll be giving you hints to nidge you towards the correct answer.")
while guesses_taken < 20:
print("Take a Guess:")
guess = input()
guess = int(guess)
guesses_taken = guesses_taken + 1
if guess < sans:
print("Your guess is smaller")
if guess > sans:
print("Your guess is greater")
if guess == sans:
print("Your guess is correct. You Won!")
break
if guess != sans:
sans = str(sans)
print("Maximum attempt reached. The correct answer is " + sans)
def ex_2_qn_1():
speed = input("Enter driver's speed:\t")
check_speed(speed)
def ex_2_qn_2():
answer = random.randint(0, 1000001)
print("Guessing Game\nGuess the number on my mind.\nIt's a number between 0 and 1,000,000 (1 million)\nYou'll get multiple attempts and I'll be giving you hints to nidge you towards the correct answer.")
guess_number(answer)
def ex_2_qn_3():
answer = random.randint(0, 1000001)
print("Guessing Game [Advanced]\nGuess the number on my mind.\nIt's a number between 0 and 1,000,000 (1 million)\nYou'll get multiple attempts and I'll be giving you hints to nidge you towards the correct answer.")
guess_number_advanced(answer)
if __name__ == "__main__":
while True:
qn = input("Which question do you want to run? (Type q to exit)\nAns: ")
if qn == 'q':
break
if qn not in ['1', '2', '3']:
print(f"Invalid: There's no question {qn}")
continue
print(f"Running Ex. 2 Qn. {qn}...\n")
locals()[f"ex_2_qn_{qn}"]()
print(f"\nDone running Ex. 2 Qn. {qn}...\n")
break
print("\nBye!")
| [
"somasystemsng@gmail.com"
] | somasystemsng@gmail.com |
c501550b0527a427a4e37f01076e814d0968db8a | 376625bac2866debbb4e1771a4cb2c130400bc64 | /product/views.py | 17e4667803aeaae72c5baee19112b413dca9ce6d | [] | no_license | karthickshiva18/pyhtonshopping | 5a2f37faaeaa8a36aecdcac9debe2c0b33f00e74 | 7932d22db6829c5967e3680fc01b8657ed24a180 | refs/heads/master | 2020-09-21T02:10:36.448736 | 2019-11-29T10:34:02 | 2019-11-29T10:34:02 | 224,650,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from django.http import HttpResponse
from django.shortcuts import render
from .models import Product
def index(request):
products = Product.objects.all()
return render(request, 'index.html', {'product': products})
return render(request, 'index.html', {''})
def new(request):
return HttpResponse('new product')
# Create your views here.
| [
"karthickshekar@live.com"
] | karthickshekar@live.com |
601526ff38f7c0f7bf7b2ca8b976c36bd65d1610 | 2f43ce29b4054e7dd877b001b5ab94447275fa38 | /build/lib/bgcArgo/__init__.py | c3fd188519a00e3fe4aa74684d9de1aafb2b29fc | [
"MIT"
] | permissive | synapticarbors/BGC-QC | e9b38c094c8c01c250f45fc77a87148b085c7652 | 41da86a4e6774ffed625d5b834c4d22324e042a2 | refs/heads/master | 2022-12-07T13:11:15.913352 | 2020-08-28T17:44:53 | 2020-08-28T17:44:53 | 291,386,705 | 0 | 0 | MIT | 2020-08-30T02:40:33 | 2020-08-30T02:40:32 | null | UTF-8 | Python | false | false | 1,734 | py | """
Argo Canada BGC Quality Control
`python` library of functions for quality controlling dissolved oxygen data. Heavily based on the SOCCOM BGC Argo QC methods (https://github.com/SOCCOM-BGCArgo/ARGO_PROCESSING) program in `matlab`, uses either NCEP (https://psl.noaa.gov/data/gridded/data.ncep.reanalysis.html) or World Ocean Atlas (https://www.nodc.noaa.gov/OC5/woa18/) data tocalculate oxygen gains (Johnson et al. 2015, https://doi.org/10.1175/JTECH-D-15-0101.1).
bgcArgo dependencies
- Must run on `python3`, not supported on `python2.x` (uses pathlib (https://docs.python.org/3/library/pathlib.html), introduced in python version 3.4)
- The seawater (https://pypi.org/project/seawater/) package
- netCDF4 (https://pypi.org/project/netCDF4/) module for `.nc` files
- pandas (https://pandas.pydata.org/) and seaborn (https://seaborn.pydata.org/) are recommended but not required, through there will be some reduced (non-essential) functionality without pandas
- cmocean (https://matplotlib.org/cmocean/) is also recommended for nicer plots, but not required
version history
0.1: April 20, 2020 - Initial creation
0.2: May 13, 2020 - Major change to how end user would use module, change to more object-oriented, create argo class
0.2.1: June 23, 2020 - pandas is now required, makes reading of global index significantly easier and more efficient
"""
from __future__ import absolute_import
from .core import *
from . import fplt
from . import unit
from . import io
from . import interp
from . import diagnostic
__all__ = ['fplt', 'unit', 'io', 'interp', 'diagnostic']
__author__ = ['Christopher Gordon <chris.gordon@dfo-mpo.gc.ca>']
__version__ = '0.2.1'
# check age of index file, or if it exists
io.check_index()
| [
"chris.gordon@dfo-mpo.gc.ca"
] | chris.gordon@dfo-mpo.gc.ca |
43ba37c8a93da732faad900adadf63fa7b11126f | 9fa72c7944bd8d2223e8fe95544830ea30999259 | /CargaDatos.py | f039dc0cd5e09f45446ba1ac9024a3a6fdc012af | [] | no_license | vnbl/experimento_ub | eec374a3ce5309e65c395afea7ef0bc7a7312ebf | 81aac4f225b922a4e3ad596491dde3143bef1069 | refs/heads/master | 2023-02-06T08:10:27.108308 | 2020-12-25T20:07:13 | 2020-12-25T20:07:13 | 295,383,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,023 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 10:30:49 2020
@author: fernanda
"""
import pandas as pd #Para manipular datasets
import numpy as np #Funciones matematicas de manera vectorial
import random as rand
import sklearn.metrics as metrics # Para utilizar algoritmos de machine learning. Metrics: utiliza m'etricas de desempeno, funciones de score, etc. de forma a cuantificar la calidad de predicciones
import seaborn as sns # Visualizacion de datos
# matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as patches
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.3)
#Pruebas
s1_info = [364288, 364289, 364290, 364291, 364292, 364293, 364294, 364298, 364299, 364301]
s1_info_tag = ['P1', 'DDB', 'IO', 'ALGE', 'CAL', 'MD', 'FIS', 'ALGO', 'P2', 'ED']
s2_info = [364297, 364300, 364303, 364305, 364302, 364296, 364295, 364306, 364304, 364307]
s2_info_tag = ['ELEC', 'AA', 'DS', 'EC', 'ICC', 'EMP', 'PIE', 'PAE', 'PIS', 'SO1']
s3_info = [364314, 364308, 364322, 364315, 364309, 364311, 364323, 364328, 364310, 364312]
s3_info_tag = ['IA', 'SO2', 'TNUI', 'VA', 'XAR', 'BD', 'FHIC', 'GiVD', 'LIL', 'SWD']
#concatenacion de datos
info_ids = s1_info + s2_info + s3_info
info_tags = s1_info_tag + s2_info_tag + s3_info_tag
# Matematica
s1_mates = [360142, 360140, 360136, 360138, 360134, 360135, 360139, 360143, 360137, 360141]
s1_mates_tag = ['ADIP', 'ELPR', 'IACD', 'LIRM', 'MAVE', 'ALLI', 'ARIT', 'FISI', 'IACI', 'PRCI']
s2_mates = [360144, 360148, 360151, 360150, 360146, 360145, 360152, 360161, 360153, 360155]
s2_mates_tag = ['CDDV', 'ESAL', 'GELI', 'GRAF', 'MNU1', 'CIDV', 'GEPR', 'HIMA', 'MMSD', 'TOPO']
s3_mates = [360158, 360149, 360156, 360147, 360162, 360159, 360154, 360163, 360160, 360157]
s3_mates_tag = ['ANMA', 'EQAL', 'GDCS', 'MNU2', 'PROB', 'ANCO', 'EQDI', 'ESTA', 'MODE', 'TGGS']
mates_ids = s1_mates + s2_mates + s3_mates
mates_tags = s1_mates_tag + s2_mates_tag + s3_mates_tag
# Derecho
s1_dret = [362441, 362442, 362444, 362451, 362446, 362443, 362452, 362449, 362450, 362447]
s1_dret_tag = ['TTC', 'CP', 'FDD', 'DRO', 'PIC', 'EC', 'SDL', 'FDPTD', 'HD', 'DCP']
s2_dret = [362448, 362453, 362454, 362456, 362459, 362461, 362469, 362458]
s2_dret_tag = ['OTE', 'PD', 'DOC', 'DIC', 'DFT', 'FDA', 'DPC', 'IDCE']
s3_dret = [362507, 362460, 362462, 362466, 362465, 362470, 362467, 362463]
s3_dret_tag = ['DR', 'PST', 'CAA', 'DEM', 'DTS', 'DPP', 'DS', 'BPU']
dret_ids = s1_dret + s2_dret + s3_dret
dret_tags = s1_dret_tag + s2_dret_tag + s3_dret_tag
# Educacion
s1_edu = [361020, 361032, 361039, 361041, 361044, 361046, 361047, 361049, 361094]
s1_edu_tag = ['PIP', 'PED', 'PDAA', 'AT', 'SOC', 'LCAT', 'LESP', 'DIDA', 'LENG']
s2_edu = [361029, 361036, 361051, 361069, 361072, 361087, 361091, 361099, 361704]
s2_edu_tag = ['INCL', 'SEOE', 'DIDC', 'DIDM', 'CINA', 'PLST', 'EDFI', 'PRAC', 'DGEO']
edu_ids = s1_edu + s2_edu
edu_tags = s1_edu_tag + s2_edu_tag
##### Importamos los datos
raw_grades_mates = pd.read_csv("data/grades_mates_2010-2016.csv", index_col=0) #index_col esta asociado a la primera columna, que es de
raw_grades_info = pd.read_csv("data/grades_info_2011-2017.csv", index_col=0) # el id_alumno.
raw_grades_edu = pd.read_csv("data/grades_edu_2009-2014.csv", index_col=0)
raw_grades_dret = pd.read_csv("data/grades_dret_2009-2015.csv", index_col=0)
print("Notas")
print(raw_grades_mates)
print("Fin Notas")
##### Filtramos los datos
####### hacer ya una base de datos limpia y usar esa.
def filter_dataset(grades, t1, t2, t3, th1=8, th2=7, th3=0, gt=11, fill="row"):
''' Pivots raw datasets and cleans / fills missing data, returns tuple of filtered, all and filled'''
_grades = grades.copy()
_grades_all = _grades.copy() #si usamos el método tradicional para asignar, se modifica el dato original, por eso usamos copy()
# Separamos por anho, aplicamos threshold, unimos
_grades_first = _grades[t1]
_grades_first = _grades_first.dropna(thresh=th1) #si hay al menos 8, no echa (este numero parece ser arbitrario)
_grades_second = _grades[t2]
_grades_second = _grades_second.dropna(thresh=th2) # si hay al menos 7, no echa
_grades_third = _grades[t3]
_grades_third = _grades_third.dropna(thresh=th3) # no echa, si hay al menos 0 (que siempre hay)
# Los threshholds difieren aca segun lo que dice en el paper.
# # Join back as "inner" Concatenacion de datos de 1er anho, 2ndo y 3ro FILTRADO
## join(how = inner) > interseccion entre datos de alumnos de primer, segundo y tercer semestre (solo une los que tienen datos en los 3 semestres)
_grades = _grades_first.join(_grades_second, how="inner").join(_grades_third, how="inner")
print("all samples ", _grades_all.count().sum())
print("cleaned samples ", _grades.count().sum())
print("total students ", _grades_all.shape[0])
print("sampled students ", _grades.shape[0])
#### ESTO NO NOS GUSTA.
if fill != 'row':
# Fill with column mean
_grades_fill = _grades.fillna(_grades.mean().round(1)) #que onda este redondeo tekk
else:
# Fill with row mean
# Axis 1= fila, axis 0 = columna
######## ESTA FUNCION NO ENTENDEMOS BIEN, ESTUDIARLA MAS
_row_mean = pd.DataFrame({col: _grades.mean(axis=1).round(1) for col in _grades.columns}) # for abreviado?
_grades_fill = _grades.fillna(_row_mean)
return _grades, _grades_all, _grades_fill
# _grades = todas las notas habiendo eliminado a los alumnos que no cumplen la condicion del problema
# _grades_all = todas las notas incluyendo a los alumnos que no cumplen la condicion del problema
# _grades_fill = datos filtrados, pero con las notas faltantes estimadas con ese round raro.
# Imprimimos los resultados
def combinarArchivos(df1, df2):
dfi = df1.merge(df2, how = "inner", on = df1.index) #inteserccion
sid_col = pd.DataFrame()
for s
| [
"fernanda.carles@gmail.com"
] | fernanda.carles@gmail.com |
fff0d9fe421cc561458c5bb3b740ae55d0f1b448 | ea6b2704dba7aba4e5954b3379f949bf120d3dc5 | /energy/forms.py | 51a7ee8f61ac0e9aee5ae46fb66885e5d61f0a31 | [] | no_license | damourus/photovoltaic | 87db18bedcb7794433f915bed9359c213581d6ba | 21227990a60f00527394646cd48ae593eb032c0d | refs/heads/main | 2023-07-16T17:59:05.218348 | 2021-09-05T17:36:57 | 2021-09-05T17:36:57 | 378,584,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from .models import *
from django import forms
class PvForm(forms.ModelForm):
class Meta:
model = Photovoltaic
fields = ('pv_model', )
class InverterForm(forms.ModelForm):
class Meta:
model = Inverter
fields = ('inverter_model', )
class RadiationForm(forms.ModelForm):
class Meta:
model = Radiation
fields = ('location', 'azimuth', 'slope')
class InputForm(forms.Form):
TYPES = {
('South Roof', 'South Roof'),
('North Roof', 'North Roof'),
('West Roof', 'West Roof'),
('East Roof', 'East Roof'),
}
Choices = {
('South', 'South'),
('North', 'North'),
('West', 'West'),
('East', 'East'),
}
facility_name1 = forms.CharField(label = 'Facility Name', max_length=10)
envelope_selection = forms.ChoiceField(label = 'Envelope Selection', choices=TYPES)
direction = forms.ChoiceField(label = 'Direction', choices=Choices)
number_of_modules = forms.IntegerField(label = 'Number of Modules (EA)')
non_vertical_surface_solar_attenuation_rate = forms.FloatField(label = 'Non-Vertical Surface Solar Attenuation Rate')
total_equipment_cost = forms.IntegerField(label = 'Total Equipment Cost')
| [
"damourus01@gmail.com"
] | damourus01@gmail.com |
e26aa9b8fd359a2976468c02c32eb815b4ca3d67 | 6cefa769477b4d72517783e9b7e57abf1455271c | /access_pb2.py | 0b4633bb1216dcf2b201d1080f7bd42d6d0bb476 | [] | no_license | saewitz/flow-python-protocol | cbdec194e85c803ebc63b2ddafeac8f618b805bb | 1feee35c37c2076dd6c9ebc2d0cd7d547446e94d | refs/heads/master | 2023-03-08T07:53:30.594094 | 2021-02-28T09:59:42 | 2021-02-28T09:59:42 | 343,058,825 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | true | 66,266 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: access.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from flow.entities import account_pb2 as flow_dot_entities_dot_account__pb2
from flow.entities import block_header_pb2 as flow_dot_entities_dot_block__header__pb2
from flow.entities import block_pb2 as flow_dot_entities_dot_block__pb2
from flow.entities import collection_pb2 as flow_dot_entities_dot_collection__pb2
from flow.entities import event_pb2 as flow_dot_entities_dot_event__pb2
from flow.entities import transaction_pb2 as flow_dot_entities_dot_transaction__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='access.proto',
package='flow.access',
syntax='proto3',
serialized_options=b'Z\006access',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0c\x61\x63\x63\x65ss.proto\x12\x0b\x66low.access\x1a\x1b\x66low/entities/account.proto\x1a flow/entities/block_header.proto\x1a\x19\x66low/entities/block.proto\x1a\x1e\x66low/entities/collection.proto\x1a\x19\x66low/entities/event.proto\x1a\x1f\x66low/entities/transaction.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\r\n\x0bPingRequest\"\x0e\n\x0cPingResponse\"0\n\x1bGetLatestBlockHeaderRequest\x12\x11\n\tis_sealed\x18\x01 \x01(\x08\"\'\n\x19GetBlockHeaderByIDRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\"/\n\x1dGetBlockHeaderByHeightRequest\x12\x0e\n\x06height\x18\x01 \x01(\x04\"@\n\x13\x42lockHeaderResponse\x12)\n\x05\x62lock\x18\x01 \x01(\x0b\x32\x1a.flow.entities.BlockHeader\"*\n\x15GetLatestBlockRequest\x12\x11\n\tis_sealed\x18\x01 \x01(\x08\"!\n\x13GetBlockByIDRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\")\n\x17GetBlockByHeightRequest\x12\x0e\n\x06height\x18\x01 \x01(\x04\"4\n\rBlockResponse\x12#\n\x05\x62lock\x18\x01 \x01(\x0b\x32\x14.flow.entities.Block\"&\n\x18GetCollectionByIDRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\"C\n\x12\x43ollectionResponse\x12-\n\ncollection\x18\x01 \x01(\x0b\x32\x19.flow.entities.Collection\"I\n\x16SendTransactionRequest\x12/\n\x0btransaction\x18\x01 \x01(\x0b\x32\x1a.flow.entities.Transaction\"%\n\x17SendTransactionResponse\x12\n\n\x02id\x18\x01 \x01(\x0c\"#\n\x15GetTransactionRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\"F\n\x13TransactionResponse\x12/\n\x0btransaction\x18\x01 \x01(\x0b\x32\x1a.flow.entities.Transaction\"\xb1\x01\n\x19TransactionResultResponse\x12\x30\n\x06status\x18\x01 \x01(\x0e\x32 .flow.entities.TransactionStatus\x12\x13\n\x0bstatus_code\x18\x02 \x01(\r\x12\x15\n\rerror_message\x18\x03 \x01(\t\x12$\n\x06\x65vents\x18\x04 \x03(\x0b\x32\x14.flow.entities.Event\x12\x10\n\x08\x62lock_id\x18\x05 \x01(\x0c\"$\n\x11GetAccountRequest\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0c\"=\n\x12GetAccountResponse\x12\'\n\x07\x61\x63\x63ount\x18\x01 \x01(\x0b\x32\x16.flow.entities.Account\"1\n\x1eGetAccountAtLatestBlockRequest\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0c\":\n\x0f\x41\x63\x63ountResponse\x12\'\n\x07\x61\x63\x63ount\x18\x01 \x01(\x0b\x32\x16.flow.entities.Account\"G\n\x1eGetAccountAtBlockHeightRequest\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0c\x12\x14\n\x0c\x62lock_height\x18\x02 \x01(\x04\"F\n!ExecuteScriptAtLatestBlockRequest\x12\x0e\n\x06script\x18\x01 \x01(\x0c\x12\x11\n\targuments\x18\x02 \x03(\x0c\"T\n\x1d\x45xecuteScriptAtBlockIDRequest\x12\x10\n\x08\x62lock_id\x18\x01 \x01(\x0c\x12\x0e\n\x06script\x18\x02 \x01(\x0c\x12\x11\n\targuments\x18\x03 \x03(\x0c\"\\\n!ExecuteScriptAtBlockHeightRequest\x12\x14\n\x0c\x62lock_height\x18\x01 \x01(\x04\x12\x0e\n\x06script\x18\x02 \x01(\x0c\x12\x11\n\targuments\x18\x03 \x03(\x0c\"&\n\x15\x45xecuteScriptResponse\x12\r\n\x05value\x18\x01 \x01(\x0c\"X\n\x1eGetEventsForHeightRangeRequest\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x14\n\x0cstart_height\x18\x02 \x01(\x04\x12\x12\n\nend_height\x18\x03 \x01(\x04\">\n\x1bGetEventsForBlockIDsRequest\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x11\n\tblock_ids\x18\x02 \x03(\x0c\"\xd3\x01\n\x0e\x45ventsResponse\x12\x33\n\x07results\x18\x01 \x03(\x0b\x32\".flow.access.EventsResponse.Result\x1a\x8b\x01\n\x06Result\x12\x10\n\x08\x62lock_id\x18\x01 \x01(\x0c\x12\x14\n\x0c\x62lock_height\x18\x02 \x01(\x04\x12$\n\x06\x65vents\x18\x03 \x03(\x0b\x32\x14.flow.entities.Event\x12\x33\n\x0f\x62lock_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x1d\n\x1bGetNetworkParametersRequest\"0\n\x1cGetNetworkParametersResponse\x12\x10\n\x08\x63hain_id\x18\x01 \x01(\t\"\'\n%GetLatestProtocolStateSnapshotRequest\";\n\x1dProtocolStateSnapshotResponse\x12\x1a\n\x12serializedSnapshot\x18\x01 \x01(\x0c\x32\xfe\x0f\n\tAccessAPI\x12;\n\x04Ping\x12\x18.flow.access.PingRequest\x1a\x19.flow.access.PingResponse\x12\x62\n\x14GetLatestBlockHeader\x12(.flow.access.GetLatestBlockHeaderRequest\x1a .flow.access.BlockHeaderResponse\x12^\n\x12GetBlockHeaderByID\x12&.flow.access.GetBlockHeaderByIDRequest\x1a .flow.access.BlockHeaderResponse\x12\x66\n\x16GetBlockHeaderByHeight\x12*.flow.access.GetBlockHeaderByHeightRequest\x1a .flow.access.BlockHeaderResponse\x12P\n\x0eGetLatestBlock\x12\".flow.access.GetLatestBlockRequest\x1a\x1a.flow.access.BlockResponse\x12L\n\x0cGetBlockByID\x12 .flow.access.GetBlockByIDRequest\x1a\x1a.flow.access.BlockResponse\x12T\n\x10GetBlockByHeight\x12$.flow.access.GetBlockByHeightRequest\x1a\x1a.flow.access.BlockResponse\x12[\n\x11GetCollectionByID\x12%.flow.access.GetCollectionByIDRequest\x1a\x1f.flow.access.CollectionResponse\x12\\\n\x0fSendTransaction\x12#.flow.access.SendTransactionRequest\x1a$.flow.access.SendTransactionResponse\x12V\n\x0eGetTransaction\x12\".flow.access.GetTransactionRequest\x1a .flow.access.TransactionResponse\x12\x62\n\x14GetTransactionResult\x12\".flow.access.GetTransactionRequest\x1a&.flow.access.TransactionResultResponse\x12M\n\nGetAccount\x12\x1e.flow.access.GetAccountRequest\x1a\x1f.flow.access.GetAccountResponse\x12\x64\n\x17GetAccountAtLatestBlock\x12+.flow.access.GetAccountAtLatestBlockRequest\x1a\x1c.flow.access.AccountResponse\x12\x64\n\x17GetAccountAtBlockHeight\x12+.flow.access.GetAccountAtBlockHeightRequest\x1a\x1c.flow.access.AccountResponse\x12p\n\x1a\x45xecuteScriptAtLatestBlock\x12..flow.access.ExecuteScriptAtLatestBlockRequest\x1a\".flow.access.ExecuteScriptResponse\x12h\n\x16\x45xecuteScriptAtBlockID\x12*.flow.access.ExecuteScriptAtBlockIDRequest\x1a\".flow.access.ExecuteScriptResponse\x12p\n\x1a\x45xecuteScriptAtBlockHeight\x12..flow.access.ExecuteScriptAtBlockHeightRequest\x1a\".flow.access.ExecuteScriptResponse\x12\x63\n\x17GetEventsForHeightRange\x12+.flow.access.GetEventsForHeightRangeRequest\x1a\x1b.flow.access.EventsResponse\x12]\n\x14GetEventsForBlockIDs\x12(.flow.access.GetEventsForBlockIDsRequest\x1a\x1b.flow.access.EventsResponse\x12k\n\x14GetNetworkParameters\x12(.flow.access.GetNetworkParametersRequest\x1a).flow.access.GetNetworkParametersResponse\x12\x80\x01\n\x1eGetLatestProtocolStateSnapshot\x12\x32.flow.access.GetLatestProtocolStateSnapshotRequest\x1a*.flow.access.ProtocolStateSnapshotResponseB\x08Z\x06\x61\x63\x63\x65ssb\x06proto3'
,
dependencies=[flow_dot_entities_dot_account__pb2.DESCRIPTOR,flow_dot_entities_dot_block__header__pb2.DESCRIPTOR,flow_dot_entities_dot_block__pb2.DESCRIPTOR,flow_dot_entities_dot_collection__pb2.DESCRIPTOR,flow_dot_entities_dot_event__pb2.DESCRIPTOR,flow_dot_entities_dot_transaction__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_PINGREQUEST = _descriptor.Descriptor(
name='PingRequest',
full_name='flow.access.PingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=257,
)
_PINGRESPONSE = _descriptor.Descriptor(
name='PingResponse',
full_name='flow.access.PingResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=273,
)
_GETLATESTBLOCKHEADERREQUEST = _descriptor.Descriptor(
name='GetLatestBlockHeaderRequest',
full_name='flow.access.GetLatestBlockHeaderRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='is_sealed', full_name='flow.access.GetLatestBlockHeaderRequest.is_sealed', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=323,
)
_GETBLOCKHEADERBYIDREQUEST = _descriptor.Descriptor(
name='GetBlockHeaderByIDRequest',
full_name='flow.access.GetBlockHeaderByIDRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.access.GetBlockHeaderByIDRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=325,
serialized_end=364,
)
_GETBLOCKHEADERBYHEIGHTREQUEST = _descriptor.Descriptor(
name='GetBlockHeaderByHeightRequest',
full_name='flow.access.GetBlockHeaderByHeightRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='flow.access.GetBlockHeaderByHeightRequest.height', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=366,
serialized_end=413,
)
_BLOCKHEADERRESPONSE = _descriptor.Descriptor(
name='BlockHeaderResponse',
full_name='flow.access.BlockHeaderResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block', full_name='flow.access.BlockHeaderResponse.block', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=479,
)
_GETLATESTBLOCKREQUEST = _descriptor.Descriptor(
name='GetLatestBlockRequest',
full_name='flow.access.GetLatestBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='is_sealed', full_name='flow.access.GetLatestBlockRequest.is_sealed', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=523,
)
_GETBLOCKBYIDREQUEST = _descriptor.Descriptor(
name='GetBlockByIDRequest',
full_name='flow.access.GetBlockByIDRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.access.GetBlockByIDRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=525,
serialized_end=558,
)
_GETBLOCKBYHEIGHTREQUEST = _descriptor.Descriptor(
name='GetBlockByHeightRequest',
full_name='flow.access.GetBlockByHeightRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='flow.access.GetBlockByHeightRequest.height', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=560,
serialized_end=601,
)
_BLOCKRESPONSE = _descriptor.Descriptor(
name='BlockResponse',
full_name='flow.access.BlockResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block', full_name='flow.access.BlockResponse.block', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=603,
serialized_end=655,
)
_GETCOLLECTIONBYIDREQUEST = _descriptor.Descriptor(
name='GetCollectionByIDRequest',
full_name='flow.access.GetCollectionByIDRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.access.GetCollectionByIDRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=657,
serialized_end=695,
)
_COLLECTIONRESPONSE = _descriptor.Descriptor(
name='CollectionResponse',
full_name='flow.access.CollectionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='collection', full_name='flow.access.CollectionResponse.collection', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=764,
)
_SENDTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SendTransactionRequest',
full_name='flow.access.SendTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='flow.access.SendTransactionRequest.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=766,
serialized_end=839,
)
_SENDTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SendTransactionResponse',
full_name='flow.access.SendTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.access.SendTransactionResponse.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=841,
serialized_end=878,
)
_GETTRANSACTIONREQUEST = _descriptor.Descriptor(
name='GetTransactionRequest',
full_name='flow.access.GetTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.access.GetTransactionRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=880,
serialized_end=915,
)
_TRANSACTIONRESPONSE = _descriptor.Descriptor(
name='TransactionResponse',
full_name='flow.access.TransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='flow.access.TransactionResponse.transaction', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=917,
serialized_end=987,
)
_TRANSACTIONRESULTRESPONSE = _descriptor.Descriptor(
name='TransactionResultResponse',
full_name='flow.access.TransactionResultResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='flow.access.TransactionResultResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status_code', full_name='flow.access.TransactionResultResponse.status_code', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error_message', full_name='flow.access.TransactionResultResponse.error_message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='events', full_name='flow.access.TransactionResultResponse.events', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_id', full_name='flow.access.TransactionResultResponse.block_id', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=990,
serialized_end=1167,
)
_GETACCOUNTREQUEST = _descriptor.Descriptor(
name='GetAccountRequest',
full_name='flow.access.GetAccountRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='flow.access.GetAccountRequest.address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1169,
serialized_end=1205,
)
_GETACCOUNTRESPONSE = _descriptor.Descriptor(
name='GetAccountResponse',
full_name='flow.access.GetAccountResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account', full_name='flow.access.GetAccountResponse.account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1207,
serialized_end=1268,
)
_GETACCOUNTATLATESTBLOCKREQUEST = _descriptor.Descriptor(
name='GetAccountAtLatestBlockRequest',
full_name='flow.access.GetAccountAtLatestBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='flow.access.GetAccountAtLatestBlockRequest.address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1270,
serialized_end=1319,
)
_ACCOUNTRESPONSE = _descriptor.Descriptor(
name='AccountResponse',
full_name='flow.access.AccountResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account', full_name='flow.access.AccountResponse.account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1321,
serialized_end=1379,
)
_GETACCOUNTATBLOCKHEIGHTREQUEST = _descriptor.Descriptor(
name='GetAccountAtBlockHeightRequest',
full_name='flow.access.GetAccountAtBlockHeightRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='flow.access.GetAccountAtBlockHeightRequest.address', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_height', full_name='flow.access.GetAccountAtBlockHeightRequest.block_height', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1381,
serialized_end=1452,
)
_EXECUTESCRIPTATLATESTBLOCKREQUEST = _descriptor.Descriptor(
name='ExecuteScriptAtLatestBlockRequest',
full_name='flow.access.ExecuteScriptAtLatestBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='script', full_name='flow.access.ExecuteScriptAtLatestBlockRequest.script', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='arguments', full_name='flow.access.ExecuteScriptAtLatestBlockRequest.arguments', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1454,
serialized_end=1524,
)
_EXECUTESCRIPTATBLOCKIDREQUEST = _descriptor.Descriptor(
name='ExecuteScriptAtBlockIDRequest',
full_name='flow.access.ExecuteScriptAtBlockIDRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block_id', full_name='flow.access.ExecuteScriptAtBlockIDRequest.block_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='script', full_name='flow.access.ExecuteScriptAtBlockIDRequest.script', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='arguments', full_name='flow.access.ExecuteScriptAtBlockIDRequest.arguments', index=2,
number=3, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1526,
serialized_end=1610,
)
_EXECUTESCRIPTATBLOCKHEIGHTREQUEST = _descriptor.Descriptor(
name='ExecuteScriptAtBlockHeightRequest',
full_name='flow.access.ExecuteScriptAtBlockHeightRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block_height', full_name='flow.access.ExecuteScriptAtBlockHeightRequest.block_height', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='script', full_name='flow.access.ExecuteScriptAtBlockHeightRequest.script', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='arguments', full_name='flow.access.ExecuteScriptAtBlockHeightRequest.arguments', index=2,
number=3, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1612,
serialized_end=1704,
)
_EXECUTESCRIPTRESPONSE = _descriptor.Descriptor(
name='ExecuteScriptResponse',
full_name='flow.access.ExecuteScriptResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='flow.access.ExecuteScriptResponse.value', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1706,
serialized_end=1744,
)
_GETEVENTSFORHEIGHTRANGEREQUEST = _descriptor.Descriptor(
name='GetEventsForHeightRangeRequest',
full_name='flow.access.GetEventsForHeightRangeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.access.GetEventsForHeightRangeRequest.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_height', full_name='flow.access.GetEventsForHeightRangeRequest.start_height', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_height', full_name='flow.access.GetEventsForHeightRangeRequest.end_height', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1746,
serialized_end=1834,
)
_GETEVENTSFORBLOCKIDSREQUEST = _descriptor.Descriptor(
name='GetEventsForBlockIDsRequest',
full_name='flow.access.GetEventsForBlockIDsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.access.GetEventsForBlockIDsRequest.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_ids', full_name='flow.access.GetEventsForBlockIDsRequest.block_ids', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1836,
serialized_end=1898,
)
_EVENTSRESPONSE_RESULT = _descriptor.Descriptor(
name='Result',
full_name='flow.access.EventsResponse.Result',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block_id', full_name='flow.access.EventsResponse.Result.block_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_height', full_name='flow.access.EventsResponse.Result.block_height', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='events', full_name='flow.access.EventsResponse.Result.events', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='block_timestamp', full_name='flow.access.EventsResponse.Result.block_timestamp', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1973,
serialized_end=2112,
)
_EVENTSRESPONSE = _descriptor.Descriptor(
name='EventsResponse',
full_name='flow.access.EventsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='flow.access.EventsResponse.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_EVENTSRESPONSE_RESULT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1901,
serialized_end=2112,
)
_GETNETWORKPARAMETERSREQUEST = _descriptor.Descriptor(
name='GetNetworkParametersRequest',
full_name='flow.access.GetNetworkParametersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2114,
serialized_end=2143,
)
_GETNETWORKPARAMETERSRESPONSE = _descriptor.Descriptor(
name='GetNetworkParametersResponse',
full_name='flow.access.GetNetworkParametersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='chain_id', full_name='flow.access.GetNetworkParametersResponse.chain_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2145,
serialized_end=2193,
)
_GETLATESTPROTOCOLSTATESNAPSHOTREQUEST = _descriptor.Descriptor(
name='GetLatestProtocolStateSnapshotRequest',
full_name='flow.access.GetLatestProtocolStateSnapshotRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2195,
serialized_end=2234,
)
_PROTOCOLSTATESNAPSHOTRESPONSE = _descriptor.Descriptor(
name='ProtocolStateSnapshotResponse',
full_name='flow.access.ProtocolStateSnapshotResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='serializedSnapshot', full_name='flow.access.ProtocolStateSnapshotResponse.serializedSnapshot', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2236,
serialized_end=2295,
)
_BLOCKHEADERRESPONSE.fields_by_name['block'].message_type = flow_dot_entities_dot_block__header__pb2._BLOCKHEADER
_BLOCKRESPONSE.fields_by_name['block'].message_type = flow_dot_entities_dot_block__pb2._BLOCK
_COLLECTIONRESPONSE.fields_by_name['collection'].message_type = flow_dot_entities_dot_collection__pb2._COLLECTION
_SENDTRANSACTIONREQUEST.fields_by_name['transaction'].message_type = flow_dot_entities_dot_transaction__pb2._TRANSACTION
_TRANSACTIONRESPONSE.fields_by_name['transaction'].message_type = flow_dot_entities_dot_transaction__pb2._TRANSACTION
_TRANSACTIONRESULTRESPONSE.fields_by_name['status'].enum_type = flow_dot_entities_dot_transaction__pb2._TRANSACTIONSTATUS
_TRANSACTIONRESULTRESPONSE.fields_by_name['events'].message_type = flow_dot_entities_dot_event__pb2._EVENT
_GETACCOUNTRESPONSE.fields_by_name['account'].message_type = flow_dot_entities_dot_account__pb2._ACCOUNT
_ACCOUNTRESPONSE.fields_by_name['account'].message_type = flow_dot_entities_dot_account__pb2._ACCOUNT
_EVENTSRESPONSE_RESULT.fields_by_name['events'].message_type = flow_dot_entities_dot_event__pb2._EVENT
_EVENTSRESPONSE_RESULT.fields_by_name['block_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVENTSRESPONSE_RESULT.containing_type = _EVENTSRESPONSE
_EVENTSRESPONSE.fields_by_name['results'].message_type = _EVENTSRESPONSE_RESULT
DESCRIPTOR.message_types_by_name['PingRequest'] = _PINGREQUEST
DESCRIPTOR.message_types_by_name['PingResponse'] = _PINGRESPONSE
DESCRIPTOR.message_types_by_name['GetLatestBlockHeaderRequest'] = _GETLATESTBLOCKHEADERREQUEST
DESCRIPTOR.message_types_by_name['GetBlockHeaderByIDRequest'] = _GETBLOCKHEADERBYIDREQUEST
DESCRIPTOR.message_types_by_name['GetBlockHeaderByHeightRequest'] = _GETBLOCKHEADERBYHEIGHTREQUEST
DESCRIPTOR.message_types_by_name['BlockHeaderResponse'] = _BLOCKHEADERRESPONSE
DESCRIPTOR.message_types_by_name['GetLatestBlockRequest'] = _GETLATESTBLOCKREQUEST
DESCRIPTOR.message_types_by_name['GetBlockByIDRequest'] = _GETBLOCKBYIDREQUEST
DESCRIPTOR.message_types_by_name['GetBlockByHeightRequest'] = _GETBLOCKBYHEIGHTREQUEST
DESCRIPTOR.message_types_by_name['BlockResponse'] = _BLOCKRESPONSE
DESCRIPTOR.message_types_by_name['GetCollectionByIDRequest'] = _GETCOLLECTIONBYIDREQUEST
DESCRIPTOR.message_types_by_name['CollectionResponse'] = _COLLECTIONRESPONSE
DESCRIPTOR.message_types_by_name['SendTransactionRequest'] = _SENDTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SendTransactionResponse'] = _SENDTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetTransactionRequest'] = _GETTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['TransactionResponse'] = _TRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['TransactionResultResponse'] = _TRANSACTIONRESULTRESPONSE
DESCRIPTOR.message_types_by_name['GetAccountRequest'] = _GETACCOUNTREQUEST
DESCRIPTOR.message_types_by_name['GetAccountResponse'] = _GETACCOUNTRESPONSE
DESCRIPTOR.message_types_by_name['GetAccountAtLatestBlockRequest'] = _GETACCOUNTATLATESTBLOCKREQUEST
DESCRIPTOR.message_types_by_name['AccountResponse'] = _ACCOUNTRESPONSE
DESCRIPTOR.message_types_by_name['GetAccountAtBlockHeightRequest'] = _GETACCOUNTATBLOCKHEIGHTREQUEST
DESCRIPTOR.message_types_by_name['ExecuteScriptAtLatestBlockRequest'] = _EXECUTESCRIPTATLATESTBLOCKREQUEST
DESCRIPTOR.message_types_by_name['ExecuteScriptAtBlockIDRequest'] = _EXECUTESCRIPTATBLOCKIDREQUEST
DESCRIPTOR.message_types_by_name['ExecuteScriptAtBlockHeightRequest'] = _EXECUTESCRIPTATBLOCKHEIGHTREQUEST
DESCRIPTOR.message_types_by_name['ExecuteScriptResponse'] = _EXECUTESCRIPTRESPONSE
DESCRIPTOR.message_types_by_name['GetEventsForHeightRangeRequest'] = _GETEVENTSFORHEIGHTRANGEREQUEST
DESCRIPTOR.message_types_by_name['GetEventsForBlockIDsRequest'] = _GETEVENTSFORBLOCKIDSREQUEST
DESCRIPTOR.message_types_by_name['EventsResponse'] = _EVENTSRESPONSE
DESCRIPTOR.message_types_by_name['GetNetworkParametersRequest'] = _GETNETWORKPARAMETERSREQUEST
DESCRIPTOR.message_types_by_name['GetNetworkParametersResponse'] = _GETNETWORKPARAMETERSRESPONSE
DESCRIPTOR.message_types_by_name['GetLatestProtocolStateSnapshotRequest'] = _GETLATESTPROTOCOLSTATESNAPSHOTREQUEST
DESCRIPTOR.message_types_by_name['ProtocolStateSnapshotResponse'] = _PROTOCOLSTATESNAPSHOTRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PingRequest = _reflection.GeneratedProtocolMessageType('PingRequest', (_message.Message,), {
'DESCRIPTOR' : _PINGREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.PingRequest)
})
_sym_db.RegisterMessage(PingRequest)
PingResponse = _reflection.GeneratedProtocolMessageType('PingResponse', (_message.Message,), {
'DESCRIPTOR' : _PINGRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.PingResponse)
})
_sym_db.RegisterMessage(PingResponse)
GetLatestBlockHeaderRequest = _reflection.GeneratedProtocolMessageType('GetLatestBlockHeaderRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLATESTBLOCKHEADERREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetLatestBlockHeaderRequest)
})
_sym_db.RegisterMessage(GetLatestBlockHeaderRequest)
GetBlockHeaderByIDRequest = _reflection.GeneratedProtocolMessageType('GetBlockHeaderByIDRequest', (_message.Message,), {
'DESCRIPTOR' : _GETBLOCKHEADERBYIDREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetBlockHeaderByIDRequest)
})
_sym_db.RegisterMessage(GetBlockHeaderByIDRequest)
GetBlockHeaderByHeightRequest = _reflection.GeneratedProtocolMessageType('GetBlockHeaderByHeightRequest', (_message.Message,), {
'DESCRIPTOR' : _GETBLOCKHEADERBYHEIGHTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetBlockHeaderByHeightRequest)
})
_sym_db.RegisterMessage(GetBlockHeaderByHeightRequest)
BlockHeaderResponse = _reflection.GeneratedProtocolMessageType('BlockHeaderResponse', (_message.Message,), {
'DESCRIPTOR' : _BLOCKHEADERRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.BlockHeaderResponse)
})
_sym_db.RegisterMessage(BlockHeaderResponse)
GetLatestBlockRequest = _reflection.GeneratedProtocolMessageType('GetLatestBlockRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLATESTBLOCKREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetLatestBlockRequest)
})
_sym_db.RegisterMessage(GetLatestBlockRequest)
GetBlockByIDRequest = _reflection.GeneratedProtocolMessageType('GetBlockByIDRequest', (_message.Message,), {
'DESCRIPTOR' : _GETBLOCKBYIDREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetBlockByIDRequest)
})
_sym_db.RegisterMessage(GetBlockByIDRequest)
GetBlockByHeightRequest = _reflection.GeneratedProtocolMessageType('GetBlockByHeightRequest', (_message.Message,), {
'DESCRIPTOR' : _GETBLOCKBYHEIGHTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetBlockByHeightRequest)
})
_sym_db.RegisterMessage(GetBlockByHeightRequest)
BlockResponse = _reflection.GeneratedProtocolMessageType('BlockResponse', (_message.Message,), {
'DESCRIPTOR' : _BLOCKRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.BlockResponse)
})
_sym_db.RegisterMessage(BlockResponse)
GetCollectionByIDRequest = _reflection.GeneratedProtocolMessageType('GetCollectionByIDRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCOLLECTIONBYIDREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetCollectionByIDRequest)
})
_sym_db.RegisterMessage(GetCollectionByIDRequest)
CollectionResponse = _reflection.GeneratedProtocolMessageType('CollectionResponse', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.CollectionResponse)
})
_sym_db.RegisterMessage(CollectionResponse)
SendTransactionRequest = _reflection.GeneratedProtocolMessageType('SendTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _SENDTRANSACTIONREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.SendTransactionRequest)
})
_sym_db.RegisterMessage(SendTransactionRequest)
SendTransactionResponse = _reflection.GeneratedProtocolMessageType('SendTransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _SENDTRANSACTIONRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.SendTransactionResponse)
})
_sym_db.RegisterMessage(SendTransactionResponse)
GetTransactionRequest = _reflection.GeneratedProtocolMessageType('GetTransactionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETTRANSACTIONREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetTransactionRequest)
})
_sym_db.RegisterMessage(GetTransactionRequest)
TransactionResponse = _reflection.GeneratedProtocolMessageType('TransactionResponse', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTIONRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.TransactionResponse)
})
_sym_db.RegisterMessage(TransactionResponse)
TransactionResultResponse = _reflection.GeneratedProtocolMessageType('TransactionResultResponse', (_message.Message,), {
'DESCRIPTOR' : _TRANSACTIONRESULTRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.TransactionResultResponse)
})
_sym_db.RegisterMessage(TransactionResultResponse)
GetAccountRequest = _reflection.GeneratedProtocolMessageType('GetAccountRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetAccountRequest)
})
_sym_db.RegisterMessage(GetAccountRequest)
GetAccountResponse = _reflection.GeneratedProtocolMessageType('GetAccountResponse', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetAccountResponse)
})
_sym_db.RegisterMessage(GetAccountResponse)
GetAccountAtLatestBlockRequest = _reflection.GeneratedProtocolMessageType('GetAccountAtLatestBlockRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTATLATESTBLOCKREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetAccountAtLatestBlockRequest)
})
_sym_db.RegisterMessage(GetAccountAtLatestBlockRequest)
AccountResponse = _reflection.GeneratedProtocolMessageType('AccountResponse', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNTRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.AccountResponse)
})
_sym_db.RegisterMessage(AccountResponse)
GetAccountAtBlockHeightRequest = _reflection.GeneratedProtocolMessageType('GetAccountAtBlockHeightRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTATBLOCKHEIGHTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetAccountAtBlockHeightRequest)
})
_sym_db.RegisterMessage(GetAccountAtBlockHeightRequest)
ExecuteScriptAtLatestBlockRequest = _reflection.GeneratedProtocolMessageType('ExecuteScriptAtLatestBlockRequest', (_message.Message,), {
'DESCRIPTOR' : _EXECUTESCRIPTATLATESTBLOCKREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.ExecuteScriptAtLatestBlockRequest)
})
_sym_db.RegisterMessage(ExecuteScriptAtLatestBlockRequest)
ExecuteScriptAtBlockIDRequest = _reflection.GeneratedProtocolMessageType('ExecuteScriptAtBlockIDRequest', (_message.Message,), {
'DESCRIPTOR' : _EXECUTESCRIPTATBLOCKIDREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.ExecuteScriptAtBlockIDRequest)
})
_sym_db.RegisterMessage(ExecuteScriptAtBlockIDRequest)
ExecuteScriptAtBlockHeightRequest = _reflection.GeneratedProtocolMessageType('ExecuteScriptAtBlockHeightRequest', (_message.Message,), {
'DESCRIPTOR' : _EXECUTESCRIPTATBLOCKHEIGHTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.ExecuteScriptAtBlockHeightRequest)
})
_sym_db.RegisterMessage(ExecuteScriptAtBlockHeightRequest)
ExecuteScriptResponse = _reflection.GeneratedProtocolMessageType('ExecuteScriptResponse', (_message.Message,), {
'DESCRIPTOR' : _EXECUTESCRIPTRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.ExecuteScriptResponse)
})
_sym_db.RegisterMessage(ExecuteScriptResponse)
GetEventsForHeightRangeRequest = _reflection.GeneratedProtocolMessageType('GetEventsForHeightRangeRequest', (_message.Message,), {
'DESCRIPTOR' : _GETEVENTSFORHEIGHTRANGEREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetEventsForHeightRangeRequest)
})
_sym_db.RegisterMessage(GetEventsForHeightRangeRequest)
GetEventsForBlockIDsRequest = _reflection.GeneratedProtocolMessageType('GetEventsForBlockIDsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETEVENTSFORBLOCKIDSREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetEventsForBlockIDsRequest)
})
_sym_db.RegisterMessage(GetEventsForBlockIDsRequest)
EventsResponse = _reflection.GeneratedProtocolMessageType('EventsResponse', (_message.Message,), {
'Result' : _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), {
'DESCRIPTOR' : _EVENTSRESPONSE_RESULT,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.EventsResponse.Result)
})
,
'DESCRIPTOR' : _EVENTSRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.EventsResponse)
})
_sym_db.RegisterMessage(EventsResponse)
_sym_db.RegisterMessage(EventsResponse.Result)
GetNetworkParametersRequest = _reflection.GeneratedProtocolMessageType('GetNetworkParametersRequest', (_message.Message,), {
'DESCRIPTOR' : _GETNETWORKPARAMETERSREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetNetworkParametersRequest)
})
_sym_db.RegisterMessage(GetNetworkParametersRequest)
GetNetworkParametersResponse = _reflection.GeneratedProtocolMessageType('GetNetworkParametersResponse', (_message.Message,), {
'DESCRIPTOR' : _GETNETWORKPARAMETERSRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetNetworkParametersResponse)
})
_sym_db.RegisterMessage(GetNetworkParametersResponse)
GetLatestProtocolStateSnapshotRequest = _reflection.GeneratedProtocolMessageType('GetLatestProtocolStateSnapshotRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLATESTPROTOCOLSTATESNAPSHOTREQUEST,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.GetLatestProtocolStateSnapshotRequest)
})
_sym_db.RegisterMessage(GetLatestProtocolStateSnapshotRequest)
ProtocolStateSnapshotResponse = _reflection.GeneratedProtocolMessageType('ProtocolStateSnapshotResponse', (_message.Message,), {
'DESCRIPTOR' : _PROTOCOLSTATESNAPSHOTRESPONSE,
'__module__' : 'access_pb2'
# @@protoc_insertion_point(class_scope:flow.access.ProtocolStateSnapshotResponse)
})
_sym_db.RegisterMessage(ProtocolStateSnapshotResponse)
DESCRIPTOR._options = None
_ACCESSAPI = _descriptor.ServiceDescriptor(
name='AccessAPI',
full_name='flow.access.AccessAPI',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2298,
serialized_end=4344,
methods=[
_descriptor.MethodDescriptor(
name='Ping',
full_name='flow.access.AccessAPI.Ping',
index=0,
containing_service=None,
input_type=_PINGREQUEST,
output_type=_PINGRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLatestBlockHeader',
full_name='flow.access.AccessAPI.GetLatestBlockHeader',
index=1,
containing_service=None,
input_type=_GETLATESTBLOCKHEADERREQUEST,
output_type=_BLOCKHEADERRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetBlockHeaderByID',
full_name='flow.access.AccessAPI.GetBlockHeaderByID',
index=2,
containing_service=None,
input_type=_GETBLOCKHEADERBYIDREQUEST,
output_type=_BLOCKHEADERRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetBlockHeaderByHeight',
full_name='flow.access.AccessAPI.GetBlockHeaderByHeight',
index=3,
containing_service=None,
input_type=_GETBLOCKHEADERBYHEIGHTREQUEST,
output_type=_BLOCKHEADERRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLatestBlock',
full_name='flow.access.AccessAPI.GetLatestBlock',
index=4,
containing_service=None,
input_type=_GETLATESTBLOCKREQUEST,
output_type=_BLOCKRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetBlockByID',
full_name='flow.access.AccessAPI.GetBlockByID',
index=5,
containing_service=None,
input_type=_GETBLOCKBYIDREQUEST,
output_type=_BLOCKRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetBlockByHeight',
full_name='flow.access.AccessAPI.GetBlockByHeight',
index=6,
containing_service=None,
input_type=_GETBLOCKBYHEIGHTREQUEST,
output_type=_BLOCKRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetCollectionByID',
full_name='flow.access.AccessAPI.GetCollectionByID',
index=7,
containing_service=None,
input_type=_GETCOLLECTIONBYIDREQUEST,
output_type=_COLLECTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='SendTransaction',
full_name='flow.access.AccessAPI.SendTransaction',
index=8,
containing_service=None,
input_type=_SENDTRANSACTIONREQUEST,
output_type=_SENDTRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetTransaction',
full_name='flow.access.AccessAPI.GetTransaction',
index=9,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_TRANSACTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetTransactionResult',
full_name='flow.access.AccessAPI.GetTransactionResult',
index=10,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_TRANSACTIONRESULTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetAccount',
full_name='flow.access.AccessAPI.GetAccount',
index=11,
containing_service=None,
input_type=_GETACCOUNTREQUEST,
output_type=_GETACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetAccountAtLatestBlock',
full_name='flow.access.AccessAPI.GetAccountAtLatestBlock',
index=12,
containing_service=None,
input_type=_GETACCOUNTATLATESTBLOCKREQUEST,
output_type=_ACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetAccountAtBlockHeight',
full_name='flow.access.AccessAPI.GetAccountAtBlockHeight',
index=13,
containing_service=None,
input_type=_GETACCOUNTATBLOCKHEIGHTREQUEST,
output_type=_ACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ExecuteScriptAtLatestBlock',
full_name='flow.access.AccessAPI.ExecuteScriptAtLatestBlock',
index=14,
containing_service=None,
input_type=_EXECUTESCRIPTATLATESTBLOCKREQUEST,
output_type=_EXECUTESCRIPTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ExecuteScriptAtBlockID',
full_name='flow.access.AccessAPI.ExecuteScriptAtBlockID',
index=15,
containing_service=None,
input_type=_EXECUTESCRIPTATBLOCKIDREQUEST,
output_type=_EXECUTESCRIPTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ExecuteScriptAtBlockHeight',
full_name='flow.access.AccessAPI.ExecuteScriptAtBlockHeight',
index=16,
containing_service=None,
input_type=_EXECUTESCRIPTATBLOCKHEIGHTREQUEST,
output_type=_EXECUTESCRIPTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetEventsForHeightRange',
full_name='flow.access.AccessAPI.GetEventsForHeightRange',
index=17,
containing_service=None,
input_type=_GETEVENTSFORHEIGHTRANGEREQUEST,
output_type=_EVENTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetEventsForBlockIDs',
full_name='flow.access.AccessAPI.GetEventsForBlockIDs',
index=18,
containing_service=None,
input_type=_GETEVENTSFORBLOCKIDSREQUEST,
output_type=_EVENTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetNetworkParameters',
full_name='flow.access.AccessAPI.GetNetworkParameters',
index=19,
containing_service=None,
input_type=_GETNETWORKPARAMETERSREQUEST,
output_type=_GETNETWORKPARAMETERSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetLatestProtocolStateSnapshot',
full_name='flow.access.AccessAPI.GetLatestProtocolStateSnapshot',
index=20,
containing_service=None,
input_type=_GETLATESTPROTOCOLSTATESNAPSHOTREQUEST,
output_type=_PROTOCOLSTATESNAPSHOTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ACCESSAPI)
DESCRIPTOR.services_by_name['AccessAPI'] = _ACCESSAPI
# @@protoc_insertion_point(module_scope)
| [
"jonathansaewitz@gmail.com"
] | jonathansaewitz@gmail.com |
82a16cd345d6ca544ea367fa613b86c7f22ffdc1 | afafaa82a058a3ac1d3721039a11e587278bc80b | /script/plot_condition_numbers.py | b1f2c2ccefdab57544cf7d8851cff68ddbec1b06 | [
"BSD-3-Clause"
] | permissive | tonymcdaniel/sfepy | 24ec0b84bd0ee94ac3935ce01a25db5e6574110a | b7a70547515c6b0faf642dcc127841b782a51200 | refs/heads/master | 2021-01-15T20:13:28.735206 | 2012-07-23T14:33:32 | 2012-07-23T15:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,961 | py | #!/usr/bin/env python
"""
Plot conditions numbers w.r.t. polynomial approximation order of reference
element matrices for various FE polynomial spaces (bases).
"""
from optparse import OptionParser
import time
import numpy as nm
import matplotlib.pyplot as plt
from sfepy import data_dir
from sfepy.base.base import output, assert_
from sfepy.fem import Mesh, Domain, Field, FieldVariable, Material, Integral
from sfepy.terms import Term
from sfepy.solvers import eig
usage = '%prog [options]\n' + __doc__.rstrip()
help = {
'basis' :
'name of the FE basis [default: %default]',
'max_order' :
'maximum order of polynomials [default: %default]',
'matrix_type' :
'matrix type, one of "elasticity", "laplace" [default: %default]',
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %default]',
}
def main():
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-b', '--basis', metavar='name',
action='store', dest='basis',
default='lagrange', help=help['basis'])
parser.add_option('-n', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=10, help=help['max_order'])
parser.add_option('-m', '--matrix', metavar='type',
action='store', dest='matrix_type',
default='laplace', help=help['matrix_type'])
parser.add_option('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=help['geometry'])
options, args = parser.parse_args()
dim, n_ep = int(options.geometry[0]), int(options.geometry[2])
output('reference element geometry:')
output(' dimension: %d, vertices: %d' % (dim, n_ep))
n_c = {'laplace' : 1, 'elasticity' : dim}[options.matrix_type]
output('matrix type:', options.matrix_type)
output('number of variable components:', n_c)
output('polynomial space:', options.basis)
output('max. order:', options.max_order)
mesh = Mesh.from_file(data_dir + '/meshes/elements/%s_1.mesh'
% options.geometry)
domain = Domain('domain', mesh)
omega = domain.create_region('Omega', 'all')
orders = nm.arange(1, options.max_order + 1, dtype=nm.int)
conds = []
order_fix = 0 if options.geometry in ['2_4', '3_8'] else 1
for order in orders:
output('order:', order, '...')
field = Field('fu', nm.float64, n_c, omega,
space='H1', poly_space_base=options.basis,
approx_order=order)
to = field.approx_order
quad_order = 2 * (max(to - order_fix, 0))
output('quadrature order:', quad_order)
u = FieldVariable('u', 'unknown', field, n_c)
v = FieldVariable('v', 'test', field, n_c, primary_var_name='u')
m = Material('m', lam=1.0, mu=1.0)
integral = Integral('i', order=quad_order)
if options.matrix_type == 'laplace':
term = Term.new('dw_laplace(m.mu, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = 1
else:
assert_(options.matrix_type == 'elasticity')
term = Term.new('dw_lin_elastic_iso(m.lam, m.mu, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = (dim + 1) * dim / 2
term.setup()
output('assembling...')
tt = time.clock()
mtx, iels = term.evaluate(mode='weak', diff_var='u')
output('...done in %.2f s' % (time.clock() - tt))
mtx = mtx[0][0, 0]
try:
assert_(nm.max(nm.abs(mtx - mtx.T)) < 1e-10)
except:
from sfepy.base.base import debug; debug()
output('matrix shape:', mtx.shape)
eigs = eig(mtx, method='eig.sgscipy', eigenvectors=False)
eigs.sort()
# Zero 'true' zeros.
eigs[:n_zero] = 0.0
ii = nm.where(eigs < 0.0)[0]
if len(ii):
output('matrix is not positive semi-definite!')
ii = nm.where(eigs[n_zero:] < 1e-12)[0]
if len(ii):
output('matrix has more than %d zero eigenvalues!' % n_zero)
output('smallest eigs:\n', eigs[:10])
ii = nm.where(eigs > 0.0)[0]
emin, emax = eigs[ii[[0, -1]]]
output('min:', emin, 'max:', emax)
cond = emax / emin
conds.append(cond)
output('condition number:', cond)
output('...done')
plt.figure(1)
plt.semilogy(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.figure(2)
plt.loglog(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.show()
if __name__ == '__main__':
main()
| [
"cimrman3@ntc.zcu.cz"
] | cimrman3@ntc.zcu.cz |
49ba897073a99a01573464ac331ed88f9c6d4f09 | 0291e9e3abcac5fc524af184cfff7fbddb7bb548 | /project_base/contact/migrations/0001_initial.py | f3072939b984db72d96babed879d9d052ce3462e | [] | no_license | anwar03/Ecommerce | 1e34bec1fc4ac3793efb3cb7c69ab7c015c1bbb8 | 250bef57b48bc900cf572ee9a5fbaa45ac29af1a | refs/heads/master | 2022-12-14T23:26:39.844120 | 2019-08-15T13:21:13 | 2019-08-15T13:21:13 | 201,460,949 | 0 | 0 | null | 2022-12-04T07:35:29 | 2019-08-09T12:12:46 | Python | UTF-8 | Python | false | false | 2,883 | py | # Generated by Django 2.2.4 on 2019-08-09 20:12
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email')),
('mobile', models.CharField(blank=True, max_length=20, null=True, validators=[django.core.validators.RegexValidator(message='Enter your valid contact number.', regex='(^[+0-9]{1,3})*([0-9]{10,11}$)')], verbose_name='mobile')),
('telephone', models.CharField(blank=True, max_length=20, null=True, validators=[django.core.validators.RegexValidator(message='Enter your valid contact number.', regex='(^[+0-9]{1,3})*([0-9]{10,11}$)')], verbose_name='telephone')),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='name')),
('country_code2', models.CharField(blank=True, max_length=2, null=True, verbose_name='country code')),
('dialing_code', models.CharField(blank=True, max_length=5, null=True, verbose_name='dialing code')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line1', models.CharField(blank=True, max_length=255, null=True, verbose_name='address line1')),
('line2', models.CharField(blank=True, max_length=255, null=True, verbose_name='address line2')),
('zip', models.CharField(blank=True, max_length=20, null=True, verbose_name='zip')),
('state', models.CharField(blank=True, max_length=100, null=True, verbose_name='state')),
('city', models.CharField(blank=True, max_length=100, null=True, verbose_name='city')),
('lat', models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True, verbose_name='latitude')),
('lng', models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True, verbose_name='longitude')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='contact.Country', verbose_name='country')),
],
),
]
| [
"anwarazim03@gmail.com"
] | anwarazim03@gmail.com |
471644494010bd6231f991eb1d71af66b413f1f8 | b3114d6f20d066d403bd9b0b290fc3ebef2999a1 | /core/Red9_Meta.py | 680db54cf41af320c40b1a10f75bdee3abf2b7a4 | [
"BSD-3-Clause"
] | permissive | MaxSteven/Red9_StudioPack | f50491fd39def2a8730c799bc88dfa8a68d82d42 | 8dbad05b1a761cfadc86b1a07696dc10839780bc | refs/heads/master | 2021-05-28T05:48:42.845756 | 2014-07-06T12:48:04 | 2014-07-06T12:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121,274 | py | '''
..
Red9 Studio Pack: Maya Pipeline Solutions
Author: Mark Jackson
email: rednineinfo@gmail.com
Red9 blog : http://red9-consultancy.blogspot.co.uk/
MarkJ blog: http://markj3d.blogspot.co.uk
This is the Core of the MetaNode implementation of the systems.
NOTE: if you're inheriting from 'MetaClass' in your own class you
need to make sure that the registerMClassInheritanceMapping() is called
such that the global RED9_META_REGISTERY is rebuilt and includes
your inherited class.
Basic MetaClass Use:
--------------------
Now moved to the examples folder for more detailed explanations
- *Red9/examples/MetaData_Getting_started.py*
- *Red9/examples/MetaRig_Morpheus.py*
Also see the unittesting folder to see what the code can do and
what each function is expected to return
- *Red9/tests*
'''
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMaya as OpenMaya
from functools import partial
from functools import wraps
import sys
import os
import Red9_General as r9General
import Red9.startup.setup as r9Setup
from Red9_AnimationUtils import MirrorHierarchy
'''
#=============================================
NOTE: we can't import anything else here that imports this
Module as it screw the Class Registry and we get Cyclic imports
hence the r9Anim is LazyLoaded where needed
import Red9_AnimationUtils as r9Anim
#=============================================
'''
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
try:
import json as json
except:
#Meta Fails under Maya2009 because of Python2.5 issues
log.warning('json is not supported in Python2.5')
#import Red9.packages.simplejson as json
'''
CRUCIAL - REGISTER INHERITED CLASSES! ==============================================
Register available MetaClass's to a global so that other modules could externally
extend the functionality and use the base MetaClass. Note we're building this up
from only those active Python classes who inherit from MetaClass
global RED9_META_REGISTERY
====================================================================================
'''
def registerMClassInheritanceMapping():
global RED9_META_REGISTERY
RED9_META_REGISTERY={}
RED9_META_REGISTERY['MetaClass']=MetaClass
for mclass in r9General.itersubclasses(MetaClass):
log.debug('registering : %s' % mclass)
RED9_META_REGISTERY[mclass.__name__]=mclass
def printSubClassRegistry():
for m in RED9_META_REGISTERY:
print m
def getMClassMetaRegistry():
'''
Generic getWrapper to return the Registry from the global
'''
return RED9_META_REGISTERY
def getMClassInstances(mInstances):
'''
return a list of Registered metaClasses that are subclassed from the given
classes. This is so in code/UI's you can group metaClasses by their
inheritance . . . ie, give me all export metaClasses that are registered
:param mInstanes: given metaClass to test inheritance - cls or [cls]
'''
subClasses=[]
if not type(mInstances)==list:
mInstances=[mInstances]
for mClass in RED9_META_REGISTERY.values():
for instance in mInstances:
if issubclass(mClass, instance):
subClasses.append(mClass)
return subClasses
def mTypesToRegistryKey(mTypes):
'''
make sure we're dealing with a list of class keys to process
against the registry. Allows us to pass in str 'MetaRig' or
r9Meta.MetaRig to the args for type checking
'''
if not type(mTypes)==list:
mTypes=[mTypes]
keys=[]
for cls in mTypes:
try:
keys.append(cls.__name__)
except:
keys.append(cls)
return keys
def getMClassDataFromNode(node):
'''
from the node get the class to instantiate, this gives us a level of
flexibility over mClass attr rather than pure hard coding as it was previously
'''
if cmds.attributeQuery('mClass', exists=True, node=node):
return cmds.getAttr('%s.%s' % (node,'mClass'))
elif 'Meta%s' % cmds.nodeType(node) in RED9_META_REGISTERY.keys():
return 'Meta%s' % cmds.nodeType(node)
# NodeType Management ---------------------------
def registerMClassNodeMapping(nodeTypes=None):
'''
Hook to allow you to extend the type of nodes included in all the
getMeta searches. Allows you to expand into using nodes of any type
as metaNodes
:param nodeTypes: allows you to expand metaData and use any nodeType
default is always 'network'
.. note::
this now validates 'nodeTypes' against Maya registered nodeTypes before being
allowed into the registry. Why, well lets say you have a new nodeType from a
plugin but that plugin isn't currently loaded, this now stops that type being
generically added by any custom boot sequence.
'''
baseTypes=['network','objectSet','HIKCharacterNode','HIKControlSetNode']
global RED9_META_NODETYPE_REGISTERY
if nodeTypes and RED9_META_NODETYPE_REGISTERY:
baseTypes = RED9_META_NODETYPE_REGISTERY
RED9_META_NODETYPE_REGISTERY = []
if nodeTypes:
if not type(nodeTypes)==list:
nodeTypes=[nodeTypes]
baseTypes.extend(nodeTypes)
try:
MayaRegisteredNodes=cmds.allNodeTypes()
for nType in baseTypes:
if not nType in RED9_META_NODETYPE_REGISTERY and nType in MayaRegisteredNodes:
log.debug('nodeType : "%s" : added to NODETYPE_REGISTRY' % nType)
RED9_META_NODETYPE_REGISTERY.append(nType)
else:
log.debug('nType: "%s" is an invalid Maya NodeType' % nType)
except:
log.warning('registerMClassNodeMapping failure - seems to have issues in Maya2009')
#raise StandardError('registerMClassNodeMapping failure - seems to have issues in Maya2009')
def printMetaTypeRegistry():
for t in RED9_META_NODETYPE_REGISTERY:
print t
def getMClassNodeTypes():
'''
Generic getWrapper for all nodeTypes registered in the Meta_NodeType global
'''
return RED9_META_NODETYPE_REGISTERY
def resetMClassNodeTypes():
registerMClassNodeMapping(nodeTypes=None)
# ====================================================================================
def attributeDataType(val):
'''
Validate the attribute type for all the cmds handling
'''
if issubclass(type(val),str):
log.debug('Val : %s : is a string' % val)
return 'string'
if issubclass(type(val),unicode):
log.debug('Val : %s : is a unicode' % val)
return 'unicode'
if issubclass(type(val),bool):
log.debug('Val : %s : is a bool')
return 'bool'
if issubclass(type(val),int):
log.debug('Val : %s : is a int')
return 'int'
if issubclass(type(val),float):
log.debug('Val : %s : is a float')
return 'float'
if issubclass(type(val),dict):
log.debug('Val : %s : is a dict')
return 'complex'
if issubclass(type(val),list):
log.debug('Val : %s : is a list')
return 'complex'
if issubclass(type(val),tuple):
log.debug('Val : %s : is a tuple')
return 'complex'
#@pymelHandler
def isMetaNode(node, mTypes=[]):
'''
Simple bool, Maya Node is or isn't an mNode
:param node: Maya node to test
:param mTypes: only match given MetaClass's - str or class accepted
.. note::
this does not instantiate the mClass to query it like the
isMetaNodeInherited which has to figure the subclass mapping
'''
mClassInstance=False
if not node:
return False
if issubclass(type(node), MetaClass):
node=node.mNode
mClassInstance=True
mClass=getMClassDataFromNode(node)
if mClass:
if mClass in RED9_META_REGISTERY:
if mTypes:
if mClass in mTypesToRegistryKey(mTypes):
return True
else:
return False
else:
return True
else:
log.debug('isMetaNode>>InValid MetaClass attr : %s' % mClass)
return False
else:
if mClassInstance:
log.debug('isMetaNode = True : node is a Wrapped StandardMaya Node MClass instance')
return True
else:
return False
#def isMetaNodeInherited(node, mInstances=[]):
# '''
# unlike isMetaNode which checks the node against a particular MetaClass,
# this expands the check to see if the node is inherited from or a subclass of
# a given Meta base class, ie, part of a system
# TODO : we COULD return the instantiated metaClass object here rather than just a bool??
# '''
# if isMetaNode(node):
# mClass=MetaClass(node) #instantiate the metaClass so we can work out subclass mapping
# for inst in mTypesToRegistryKey(mInstances):
# #log.debug('testing class inheritance: %s > %s' % ( inst, RED9_META_REGISTERY[inst],type(mClass)))
# if issubclass(type(mClass), RED9_META_REGISTERY[inst]):
# log.debug('MetaNode %s is of subclass >> %s' % (mClass,inst))
# return True
def isMetaNodeInherited(node, mInstances=[]):
'''
unlike isMetaNode which checks the node against a particular MetaClass,
this expands the check to see if the node is inherited from or a subclass of
a given Meta base class, ie, part of a system
TODO : we COULD return the instantiated metaClass object here rather than just a bool??
'''
if not node:
return False
if issubclass(type(node), MetaClass):
node=node.mNode
mClass=getMClassDataFromNode(node)
if mClass and mClass in RED9_META_REGISTERY:
for inst in mTypesToRegistryKey(mInstances):
log.debug('testing class inheritance: %s > %s' % (inst, mClass))
if issubclass(RED9_META_REGISTERY[mClass], RED9_META_REGISTERY[inst]):
log.debug('MetaNode %s is of subclass >> %s' % (mClass,inst))
return True
return False
@r9General.Timer
def getMetaNodes(mTypes=[], mInstances=[], mAttrs=None, dataType='mClass', nTypes=None, **kws):
'''
Get all mClass nodes in scene and return as mClass objects if possible
:param mTypes: only return meta nodes of a given type
:param mInstances: idea - this will check subclass inheritance, ie, MetaRig would
return ALL nodes who's class is inherited from MetaRig. Allows you to
group the data more efficiently by base classes and their inheritance
:param mAttrs: uses the FilterNode.lsSearchAttributes call to match nodes via given attrs
:param dataType: default='mClass' return the nodes already instantiated to
the correct class object. If not then return the Maya node itself
'''
mNodes=[]
if not nTypes:
nodes = cmds.ls(type=getMClassNodeTypes(), l=True)
else:
nodes = cmds.ls(type=nTypes, l=True)
#if mTypes and not type(mTypes)==list:mTypes=[mTypes]
for node in nodes:
#for node in cmds.ls(type=getMClassNodeTypes(), l=True):
if not mInstances:
if isMetaNode(node, mTypes=mTypes):
mNodes.append(node)
else:
if isMetaNodeInherited(node,mInstances):
mNodes.append(node)
if not mNodes:
return mNodes
if mAttrs:
#lazy to avoid cyclic imports
import Red9_CoreUtils as r9Core
mNodes=r9Core.FilterNode().lsSearchAttributes(mAttrs, nodes=mNodes)
if dataType=='mClass':
return[MetaClass(node,**kws) for node in mNodes]
else:
return mNodes
@r9General.Timer
def getConnectedMetaNodes(nodes, source=True, destination=True, mTypes=[], mInstances=[], \
mAttrs=None, dataType='mClass', nTypes=None, **kws):
'''
From a given set of Maya Nodes return all connected mNodes
Default return is mClass objects
:param nodes: nodes to inspect for connected meta data
:param source: `bool` clamp the search to the source side of the graph
:param destination: `bool` clamp the search to the destination side of the graph
:param mTypes: return only given MetaClass's
:param mInstances: idea - this will check subclass inheritance, ie, MetaRig would
return ALL nodes who's class is inherited from MetaRig. Allows you to
group the data more efficiently by base classes and their inheritance
:param mAttrs: uses the FilterNode.lsSearchAttributes call to match nodes via given attrs
:param dataType: default='mClass' return the nodes already instantiated to
the correct class object. If not then return the Maya node
'''
mNodes=[]
connections=[]
if not nTypes:
nTypes = getMClassNodeTypes()
#if mTypes and not type(mTypes)==list:mTypes=[mTypes]
for nType in nTypes:
#for nType in getMClassNodeTypes():
cons = cmds.listConnections(nodes, type=nType, s=source, d=destination, c=True)
if cons:
# NOTE we're only interested in connected nodes via message linked attrs
for plug, node in zip(cons[::2], cons[1::2]):
if cmds.getAttr(plug, type=True) == 'message':
if not node in connections:
connections.append(node)
log.debug(node)
if not connections:
return mNodes
for node in connections:
if not mInstances:
if isMetaNode(node, mTypes=mTypes):
mNodes.append(node)
else:
if isMetaNodeInherited(node,mInstances):
mNodes.append(node)
if mAttrs:
#lazy to avoid cyclic imports
import Red9_CoreUtils as r9Core
mNodes=r9Core.FilterNode().lsSearchAttributes(mAttrs, nodes=mNodes)
if dataType=='mClass':
return [MetaClass(node,**kws) for node in set(mNodes)]
else:
return list(set(mNodes))
def getConnectedMetaSystemRoot(node, **kws):
'''
From a given node see if it's part of a MetaData system, if so
walk up the parent tree till you get to top meta node and return the class.
'''
mNodes=getConnectedMetaNodes(node,**kws)
if not mNodes:
return
else:
mNode=mNodes[0]
if type(mNode)==MetaRig:
return mNode
else:
runaways=0
while mNode and not runaways==100:
log.debug('walking network : %s' % mNode.mNode)
parent=mNode.getParentMetaNode()
if not parent:
log.debug('node is top of tree : %s' % mNode)
return mNode
runaways+=1
mNode=parent
def convertNodeToMetaData(nodes,mClass):
'''
pass in a node and convert it to a MetaNode, assuming that the nodeType
is valid in the metaNodeTypesRegistry
'''
if not type(nodes)==list:
nodes=[nodes]
for node in nodes:
mNode=MetaClass(node)
mNode.addAttr('mClass', value=mTypesToRegistryKey(mClass)[0])
mNode.addAttr('mNodeID', value=node.split('|')[-1].split(':')[-1])
mNode.attrSetLocked('mClass', True)
mNode.attrSetLocked('mNodeID', True)
return [MetaClass(node) for node in nodes]
class MClassNodeUI():
'''
Simple UI to display all MetaNodes in the scene
'''
def __init__(self, mTypes=None, mInstances=None, closeOnSelect=False, \
funcOnSelection=None, sortBy='byClass', allowMulti=True):
'''
:param mTypes: MetaNode class to search and display 'MetaRig'
:param mInstances: MetaNode inheritance map, ie show all subclass of mType..
:param closeOnSelect: on text select close the UI
:param funcOnSelection: function to run where the selected mNode is expected
as first arg, ie funcOnSelection=cmd.select so that when run the item is
selected in the UI cmds.select(item) is run. Basically used as a dynamic callback
:param sortBy: Sort the nodes found 'byClass' or 'byName'
:param allowMulti: allow multiple selection in the UI
'''
self.mInstances=mInstances
self.mTypes=mTypes
self.closeOnSelect=closeOnSelect
self.func=funcOnSelection # Given Function to run on the selected node when UI selected
self.sortBy=sortBy
self.allowMulti=allowMulti
self.win = 'MetaClassFinder'
self.mNodes=None
@classmethod
def show(cls):
cls()._showUI()
def _showUI(self):
if cmds.window(self.win, exists=True):
cmds.deleteUI(self.win, window=True)
window = cmds.window(self.win, title=self.win)
cmds.menuBarLayout()
cmds.menu(l="VimeoHelp")
cmds.menuItem(l="Vimeo Help: MetaData-Part1",
ann='Part1 goes through the main attribute handling inside Meta',
c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('https://vimeo.com/61841345')")
cmds.menuItem(l="Vimeo Help: MetaData-Part2",
ann='Part2 goes through the class structures and the basic factory aspect of Meta',
c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('https://vimeo.com/62546103')")
cmds.menuItem(l="Vimeo Help: MetaData-Part3",
ann='Part3 shows how to add metaRig to your systems, all the connectChild and addRigCtrl calls',
c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('https://vimeo.com/64258996')")
cmds.menuItem(l="Vimeo Help: MetaData-Part4",
ann='Part4 goes through subclassing Meta and using it in your own systems',
c="import Red9.core.Red9_General as r9General;r9General.os_OpenFile('https://vimeo.com/72006183')")
cmds.menuItem(divider=True)
cmds.menuItem(divider=True)
cmds.menuItem(l="Contact Me",c=lambda *args:(r9Setup.red9ContactInfo()))
cmds.menu(l="Debug")
cmds.menuItem(l="Print :Registered NodeTypes",
ann='Prints the currently registered nodeTypes from the Meta Registry',
c=self.printRegisteredNodeTypes)
cmds.menuItem(l="Print :Registered MetaClasses",
ann='Prints the currently registered MetaClasses from the Meta Registry',
c=self.printRegisteredMetaClasses)
cmds.scrollLayout('slMetaNodeScroll',rc=lambda *args:self.fitTextScrollFucker())
cmds.columnLayout(adjustableColumn=True)
cmds.separator(h=15, style='none')
txt=self.mTypes
if not self.mTypes:
txt='All'
cmds.text(label='Scene MetaNodes of type : %s' % txt)
cmds.separator(h=15, style='none')
if not self.allowMulti:
cmds.textScrollList('slMetaNodeList',font="fixedWidthFont")
else:
cmds.textScrollList('slMetaNodeList',font="fixedWidthFont", allowMultiSelection=True)
cmds.popupMenu('r9MetaNodeUI_Popup')
cmds.menuItem(label='Select Children', ann='NOTE doubleClick on the UI also runs the selectChildren call"',
command=partial(self.doubleClick))
cmds.menuItem(label='Delete Selected mNodes', ann='call self.delete() on the selected nModes',
command=partial(self.deleteCall))
cmds.menuItem(divider=True)
cmds.menuItem(label='SortBy : ClassName', command=partial(self.fillScroll,'byClass'))
cmds.menuItem(label='SortBy : NodeName', command=partial(self.fillScroll,'byName'))
cmds.menuItem(label='Graph Selected Networks', command=partial(self.graphNetwork))
cmds.menuItem(divider=True)
cmds.menuItem(label='Class : All Registered', command=partial(self.fillScroll,'byName'))
cmds.menuItem(divider=True)
for mCls in sorted(RED9_META_REGISTERY):
cmds.menuItem(label='Class : %s' % mCls, command=partial(self.fillScroll,'byName', mCls))
cmds.button(label='Refresh', command=partial(self.fillScroll))
cmds.separator(h=15,style='none')
cmds.iconTextButton(style='iconOnly', bgc=(0.7,0,0), image1='Rocket9_buttonStrap2.bmp',
c=lambda *args:(r9Setup.red9ContactInfo()),h=22,w=200)
cmds.showWindow(window)
self.fillScroll()
def fitTextScrollFucker(self):
'''
bodge to resize tghe textScroll as the default Maya control is SHITE!
'''
cmds.textScrollList('slMetaNodeList',e=True,h=int(cmds.scrollLayout('slMetaNodeScroll',q=True,h=True))-120)
cmds.textScrollList('slMetaNodeList',e=True,w=int(cmds.scrollLayout('slMetaNodeScroll',q=True,w=True))-10)
def graphNetwork(self,*args):
if r9Setup.mayaVersion()<2013:
mel.eval('hyperGraphWindow( "", "DG")')
else:
mel.eval('NodeEditorWindow;NodeEditorGraphUpDownstream;')
def selectCmd(self,*args):
'''
callback run on select in the UI, allows you to run the func passed
in by the funcOnSelection arg
'''
indexes=cmds.textScrollList('slMetaNodeList',q=True,sii=True)
if indexes:
cmds.select(cl=True)
for i in indexes:
node=MetaClass(self.mNodes[i - 1])
log.debug('selected : %s' % node)
#func is a function passed into the UI via the funcOnSelection arg
#this allows external classes to use this as a signal call on select
if self.func:
self.func(node.mNode)
else:
cmds.select(node.mNode,add=True)
if self.closeOnSelect:
cmds.deleteUI('MetaClassFinder',window=True)
def deleteCall(self,*args):
result = cmds.confirmDialog(
title='Confirm metaNode Delete',
button=['Yes', 'Cancel'],
message='Confirm deletion of metaNode\nare you absolutely\n\nSURE\n\nyou meant to do this?',
defaultButton='Cancel',
bgc=(0.5,0.1,0.1),
cancelButton='Cancel',
dismissString='Cancel')
if result == 'Yes':
try:
indexes=cmds.textScrollList('slMetaNodeList',q=True,sii=True)
if indexes:
for i in indexes:
MetaClass(self.mNodes[i - 1]).delete()
self.fillScroll()
except:
log.warning('delete failed')
def doubleClick(self,*args):
'''
run the generic meta.getChildren call and select the results
'''
cmds.select(cl=True)
nodes=[]
for i in cmds.textScrollList('slMetaNodeList',q=True,sii=True):
nodes.extend(MetaClass(self.mNodes[i-1]).getChildren(walk=True))
if nodes:
cmds.select(nodes)
else:
log.warning('no child nodes found from given metaNode')
#cmds.select(self.mNodes[cmds.textScrollList('slMetaNodeList',q=True,sii=True)[0]-1].getChildren(walk=True))
def fillScroll(self, sortBy=None, mClassToShow=None, *args):
cmds.textScrollList('slMetaNodeList', edit=True, ra=True)
if mClassToShow:
self.mNodes=getMetaNodes(mTypes=mClassToShow,mInstances=None,dataType='node')
else:
mClassToShow=self.mTypes
self.mNodes=getMetaNodes(mTypes=mClassToShow,mInstances=self.mInstances,dataType='node')
if not sortBy:
sortBy=self.sortBy
if sortBy=='byClass':
#self.mNodes=sorted(self.mNodes, key=lambda x: x.mClass.upper())
self.mNodes=sorted(self.mNodes, key=lambda x: getMClassDataFromNode(x).upper())
elif sortBy=='byName':
#self.mNodes=sorted(self.mNodes, key=lambda x: x.mNode.upper())
self.mNodes=sorted(self.mNodes, key=lambda x: x.upper())
if self.mNodes:
width=len(self.mNodes[0])
#figure out the width of the first cell
for meta in self.mNodes:
if len(meta)>width:
width=len(meta)
width+=3
#fill the scroll list
for meta in self.mNodes:
cmds.textScrollList('slMetaNodeList', edit=True,
append=('{0:<%i}:{1:}' % width).format(meta, getMClassDataFromNode(meta)),
sc=lambda *args:self.selectCmd(),
dcc=lambda *x:self.doubleClick())
def printRegisteredNodeTypes(self,*args):
print '\nRED9_META_NODETYPE_REGISTERY:\n============================='
print getMClassNodeTypes()
def printRegisteredMetaClasses(self,*args):
data = getMClassMetaRegistry()
print '\nRED9_META_REGISTERY:\n===================='
for key, value in sorted(data.items()):
print key, ' : ', value
# Decorators ==========================================================
def nodeLockManager(func):
'''
Simple decorator to manage metaNodes which are locked. Why lock??
Currently just the metaRig and therefore any subclasses of that are locked.
The reason is that the Maya 'network' node I use has issues when certain
connections are deleted, the node itself can get deleted and cleanup, removing
the entire network! Try it, make a metaNode and key an attr on it, then run
cutKeys...the node will be deleted.
This decorator is used to manage the unlocking of self for all calls that
require change access rights to the 'network' node itself.
'''
@wraps(func)
def wrapper(*args, **kws):
res=None
err=None
locked=False
try:
locked=False
mNode=args[0] # args[0] is self
#log.debug('nodeLockManager > func : %s : metaNode / self: %s' % (func.__name__,mNode.mNode))
if mNode.mNode and mNode._lockState:
locked=True
cmds.lockNode(mNode.mNode,lock=False)
#log.debug( 'nodeLockManager > func : %s : node being unlocked' % func.__name__)
res=func(*args, **kws)
except StandardError, error:
err=error
finally:
if locked:
#log.debug( 'nodeLockManager > func : %s : node being relocked' % func.__name__)
cmds.lockNode(mNode.mNode, lock=True)
if err:
traceback = sys.exc_info()[2] # get the full traceback
raise StandardError(StandardError(err), traceback)
return res
return wrapper
def pymelHandler(func):
def wrapper(*args, **kws):
res=None
err=None
try:
#inputNodes=args[0]
#if 'pymel' in str(type(inputNodes)):
# print 'pymel Node passed in!!!!!!!!!!'
# print 'type : ', args
# #args[0]=str(inputNodes)
res=func(*args, **kws)
except StandardError, error:
err=error
finally:
if err:
traceback = sys.exc_info()[2] # get the full traceback
raise StandardError(StandardError(err), traceback)
return res
return wrapper
# Main Meta Class ==========================================================
class MetaClass(object):
def __new__(cls, *args, **kws):
'''
Idea here is if a MayaNode is passed in and has the mClass attr
we pass that into the super(__new__) such that an object of that class
is then instantiated and returned.
'''
mClass=None
mNode=None
if args:
mNode=args[0]
if isMetaNode(mNode):
mClass=getMClassDataFromNode(mNode)
if mClass:
log.debug("mClass derived from MayaNode Attr : %s" % mClass)
if mClass in RED9_META_REGISTERY:
_registeredMClass=RED9_META_REGISTERY[mClass]
try:
log.debug('Instantiating existing mClass : %s >> %s' % (mClass,_registeredMClass))
return super(cls.__class__, cls).__new__(_registeredMClass,*args,**kws)
except:
log.debug('Failed to initialize mClass : %s' % _registeredMClass)
pass
else:
raise StandardError('Node has an unRegistered mClass attr set')
else:
log.debug("mClass not found or Registered")
return super(cls.__class__, cls).__new__(cls)
#@pymelHandler
def __init__(self, node=None, name=None, nodeType='network', autofill='all', **kws):
'''
Base Class for Meta support. This manages all the attribute and class
management for all subsequent inherited classes. This is the core of
the MetaData factory API
:param node: Maya Node - if given we test it for the mClass attribute, if it exists
we initialize a class of that type and return. If not passed in then we
make a new network node for the type given.
:param name: only used on create, name to set for the new Maya Node (self.mNode)
:param nodeType: allows you to specify a node of type to create as a new mClass node.
default is 'network', not that for any node to show up in the get
calls that type MUST be registered in the RED9_META_NODETYPE_REGISTERY
:param autofill: 'str' cast all the MayaNode attrs into the class dict by default.
Updated: modes: 'all' or 'messageOnly'. all casts every attr, messageOnly
fills the node with just message linked attrs (designed for MetaClass work
with HIK characterNode)
.. note::
mNode is now a wrap on the MObject so will always be in sync even if the node is renamed/parented
'''
#data that will not get pushed to the Maya node
object.__setattr__(self, '_MObject', '')
object.__setattr__(self, '_MObjectHandle', '')
object.__setattr__(self, 'UNMANAGED', ['mNode',
'_MObject',
'_MObjectHandle',
'_lockState',
'lockState',
'_forceAsMeta']) # note - UNMANAGED bypasses the Maya node in setattr calls
object.__setattr__(self, '_lockState', False)
object.__setattr__(self, '_forceAsMeta', False) # force all getAttr calls to return mClass objects even for starndard Maya nodes
if not node:
if not name:
name=self.__class__.__name__
#no MayaNode passed in so make a fresh network node (default)
node=cmds.createNode(nodeType,name=name)
self.mNode=node
self.addAttr('mClass', value=str(self.__class__.__name__)) # ! MAIN ATTR !: used to know what class to instantiate.
self.addAttr('mNodeID', value=name) # ! MAIN NODE ID !: used by pose systems to ID the node.
log.debug('New Meta Node Created')
cmds.setAttr('%s.%s' % (self.mNode,'mClass'), e=True,l=True) # lock it
cmds.setAttr('%s.%s' % (self.mNode,'mNodeID'),e=True,l=True) # lock it
else:
self.mNode=node
if not self.hasAttr('mNodeID'):
#for casting None MetaData, standard Maya nodes into the api
self.mNodeID=node.split('|')[-1].split(':')[-1]
if isMetaNode(node):
log.debug('Meta Node Passed in : %s' % node)
else:
log.debug('Standard Maya Node being metaManaged')
self.lockState=False
#bind any default attrs up - note this should be overloaded where required
self.__bindData__()
#This is useful! so we have a node with a lot of attrs, or just a simple node
#this block if activated will auto-fill the object.__dict__ with all the available
#Maya node attrs, so you get autocomplete on ALL attrs in the script editor!
if autofill=='all' or autofill=='messageOnly':
self.__fillAttrCache__(autofill)
def __bindData__(self):
'''
This is intended as an entry point to allow you to bind whatever attrs or extras
you need at a class level. It's called by the __init__ ...
Intended to be overloaded as and when needed when inheriting from MetaClass
..note::
To bind a new attr and serilaize it to the self.mNode (Maya node)
self.addAttr('newDefaultAttr',attrType='string')
To bind a new attribute to the python object only, not serialized to Maya node
self.newClassAttr=None :or: self.__setattr__('newAttr',None)
'''
pass
def isValid(self):
'''
a metaNode in this case is valid if it has connections, if not it's classed invalid
'''
if not self.isValidMObject():
return False
if self.hasAttr('mClass') and not cmds.listConnections(self.mNode):
return False
return True
def isValidMObject(self):
'''
validate the MObject, without this Maya will crash if the pointer is no longer valid
TODO: thinking of storing the dagPath when we fill in the mNode to start with and
if this test fails, ie the scene has been reloaded, then use the dagPath to refind
and refil the mNode property back in.... maybe??
'''
try:
mobjHandle=object.__getattribute__(self, "_MObjectHandle")
return mobjHandle.isValid()
except:
log.info('_MObjectHandle not yet setup')
#Cast the mNode attr to the actual MObject so it's no longer limited by string dagpaths
#yes I know Pymel does this for us but I don't want the overhead!
def __get_mNode(self):
mobjHandle=object.__getattribute__(self, "_MObjectHandle")
if mobjHandle:
try:
if not mobjHandle.isValid():
log.info('MObject is no longer valid - %s - object may have been deleted or the scene reloaded?'\
% object.__getattribute__(self,'mNodeID'))
return
#if we have an object thats a dagNode, ensure we return FULL Path
mobj=object.__getattribute__(self, "_MObject")
if OpenMaya.MObject.hasFn(mobj, OpenMaya.MFn.kDagNode):
dPath = OpenMaya.MDagPath()
OpenMaya.MDagPath.getAPathTo(mobj,dPath)
return dPath.fullPathName()
else:
depNodeFunc = OpenMaya.MFnDependencyNode(mobj)
return depNodeFunc.name()
except StandardError,error:
raise StandardError(error)
def __set_mNode(self, node):
if node:
try:
mobj=OpenMaya.MObject()
selList=OpenMaya.MSelectionList()
selList.add(node)
selList.getDependNode(0,mobj)
object.__setattr__(self, '_MObject', mobj)
object.__setattr__(self, '_MObjectHandle',OpenMaya.MObjectHandle(mobj))
except StandardError, error:
raise StandardError(error)
mNode = property(__get_mNode, __set_mNode)
@property
def mNodeMObject(self):
'''
exposed wrapper to return the MObject directly, this passes via the MObjectHandle
to ensure that the MObject cached is still valid
'''
mobjHandle=object.__getattribute__(self, "_MObjectHandle")
if mobjHandle:
try:
if not mobjHandle.isValid():
log.info('MObject is no longer valid - %s - object may have been deleted or the scene reloaded?'\
% object.__getattribute__(self,'mNodeID'))
return
#if we have an object thats a dagNode, ensure we return FULL Path
return object.__getattribute__(self, "_MObject")
except StandardError,error:
raise StandardError(error)
#property managing the lockNode state of the mNode
def __get_lockState(self):
return self._lockState
def __set_lockState(self, state):
try:
cmds.lockNode(self.mNode, lock=state)
self._lockState=state
except:
log.debug("can't set the nodeState for : %s" % self.mNode)
lockState = property(__get_lockState, __set_lockState)
def __repr__(self):
if self.hasAttr('mClass'):
return "%s(mClass: '%s', node: '%s')" % (self.__class__, self.mClass, self.mNode.split('|')[-1])
else:
return "%s(Wrapped Standard MayaNode, node: '%s')" % (self.__class__, self.mNode.split('|')[-1])
def __eq__(self, obj):
'''
Equals calls are handled via the MObject cache
'''
if isinstance(obj, self.__class__):
if obj._MObject and self._MObject:
if obj._MObject == self._MObject:
return True
else:
return False
else:
return False
else:
return False
@r9General.Timer
def __fillAttrCache__(self, level):
'''
go through all the attributes on the given node and cast each one of them into
the main object.__dict__ this means they all show in the scriptEditor and autocomplete!
This is ONLY for ease of use when dot complete in Maya, nothing more
'''
if level=='messageOnly':
attrs=self.listAttrsOfType(Type='message')
#attrs=[attr for attr in cmds.listAttr(self.mNode) if cmds.getAttr('%s.%s' % (self.mNode,attr),type=True)=='message']
else:
#attrs=self.listAttrs()
attrs=cmds.listAttr(self.mNode)
for attr in attrs:
try:
#we only want to fill the __dict__ we don't want the overhead
#of reading the attr data as thats done on demand.
object.__setattr__(self, attr, None)
except:
pass
# Attribuite Management block
#-----------------------------------------------------------------------------------
def __setEnumAttr__(self, attr, value):
'''
Enums : I'm allowing you to set value by either the index or the display text
'''
if attributeDataType(value)=='string':
log.debug('set enum attribute by string : %s' % value)
enums=cmds.attributeQuery(attr, node=self.mNode, listEnum=True)[0].split(':')
try:
value=enums.index(value)
except:
raise ValueError('Invalid enum string passed in: string is not in enum keys')
log.debug('set enum attribute by index : %s' % value)
cmds.setAttr('%s.%s' % (self.mNode, attr), value)
def __setMessageAttr__(self, attr, value, force=True):
'''
Message : by default in the __setattr_ I'm assuming that the nodes you pass in are to be
the ONLY connections to that msgLink and all other current connections will be deleted
hence cleanCurrent=True
'''
if cmds.attributeQuery(attr, node=self.mNode, multi=True)==False:
if attributeDataType(value)=='complex':
raise ValueError("You can't connect multiple nodes to a singluar message plug via __setattr__")
log.debug('set singular message attribute connection: %s' % value)
self.connectChild(value, attr, cleanCurrent=True, force=force)
else:
log.debug('set multi-message attribute connection: %s' % value)
self.connectChildren(value, attr, cleanCurrent=True, force=force)
@nodeLockManager
def __setattr__(self, attr, value, force=True, **kws):
'''
Overload the base setattr to manage the MayaNode itself
'''
object.__setattr__(self, attr, value)
if attr not in self.UNMANAGED and not attr=='UNMANAGED':
if cmds.attributeQuery(attr, exists=True, node=self.mNode):
locked=False
if self.attrIsLocked(attr) and force:
self.attrSetLocked(attr,False)
locked=True
#Enums Handling
if cmds.attributeQuery(attr, node=self.mNode, enum=True):
self.__setEnumAttr__(attr, value)
#Message Link handling
elif cmds.attributeQuery(attr, node=self.mNode, message=True):
self.__setMessageAttr__(attr, value, force)
#Standard Attribute
else:
attrString='%s.%s' % (self.mNode, attr) # mayaNode.attribute for cmds.get/set calls
attrType=cmds.getAttr(attrString, type=True) # the MayaNode attribute valueType
valueType=attributeDataType(value) # DataType passed in to be set as Value
log.debug('valueType : %s' % valueType)
log.debug('setting %s attribute' % attrType)
if attrType=='string':
if valueType=='string' or valueType=='unicode':
log.debug('set string attribute: %s' % value)
cmds.setAttr(attrString, value, type='string')
return
elif valueType=='complex':
log.debug('set string attribute to complex : %s' % self.__serializeComplex(value))
cmds.setAttr(attrString, self.__serializeComplex(value), type='string')
return
elif attrType in ['double3','float3'] and valueType=='complex':
try:
cmds.setAttr(attrString, value[0], value[1], value[2])
except ValueError, error:
raise ValueError(error)
elif attrType == 'doubleArray':
cmds.setAttr(attrString, value, type='doubleArray')
elif attrType == 'matrix':
cmds.setAttr(attrString, value, type='matrix')
#elif attrType=='TdataCompound': #ie blendShape weights = multi data or joint.minRotLimitEnable
# pass
else:
try:
cmds.setAttr(attrString, value)
except StandardError,error:
log.debug('failed to setAttr %s - might be connected' % attrString)
raise StandardError(error)
if locked:
self.attrSetLocked(attr,True)
else:
log.debug('attr : %s doesnt exist on MayaNode > class attr only' % attr)
def __getMessageAttr__(self, attr):
'''
separated func as it's the kind of thing that other classes may want to overload
the behaviour of the returns etc
'''
msgLinks=cmds.listConnections('%s.%s' % (self.mNode,attr),destination=True,source=True)
if msgLinks:
msgLinks=cmds.ls(msgLinks,l=True)
if not cmds.attributeQuery(attr, node=self.mNode, m=True): # singular message
if isMetaNode(msgLinks[0]):
return MetaClass(msgLinks[0])
for i,link in enumerate(msgLinks):
if isMetaNode(link) or self._forceAsMeta:
msgLinks[i]=MetaClass(link)
log.debug('%s : Connected data is an mClass Object, returning the Class' % link)
# if not cmds.attributeQuery(attr, node=self.mNode, m=True): # singular message
# #log.debug('getattr for multi-message attr: connections =[%s]' % ','.join(msgLinks))
# if isMetaNode(msgLinks[0]):
# return msgLinks[0] # MetaClass(msgLinks[0])
return msgLinks
else:
log.debug('nothing connected to msgLink %s.%s' % (self.mNode,attr))
return []
def __getattribute__(self, attr):
'''
Overload the method to always return the MayaNode
attribute if it's been serialized to the MayaNode
'''
#if callable(object.__getattribute__(self, attr)):
# log.debug("callable attr, bypassing tests : %s" % attr)
# return object.__getattribute__(self, attr)
if callable(attr):
log.debug("callable attr, bypassing tests : %s" % attr)
return attr
try:
#stops recursion, do not getAttr on mNode here
mNode=object.__getattribute__(self, "mNode")
if not mNode or not cmds.objExists(mNode):
attrVal=object.__getattribute__(self, attr)
return attrVal
else:
#MayaNode processing - retrieve attrVals on the MayaNode
if cmds.attributeQuery(attr, exists=True, node=mNode):
attrType=cmds.getAttr('%s.%s' % (mNode,attr),type=True)
#Message Link handling
#=====================
if attrType=='message':
return self.__getMessageAttr__(attr)
#Standard Maya Attr handling
#===========================
attrVal=cmds.getAttr('%s.%s' % (mNode,attr))
if attrType=='string':
#for string data we pass it via the JSON decoder such that
#complex data can be managed and returned correctly
try:
attrVal=self.__deserializeComplex(attrVal)
if type(attrVal)==dict:
return attrVal
#log.debug('Making LinkedDict')
#return self.LinkedDict([self,attr],attrVal)
except:
log.debug('string is not JSON deserializable')
elif attrType=='double3' or attrType=='float3':
return attrVal[0] # return (x,x,x) not [(x,x,x)] as standard Maya does
else:
attrVal=object.__getattribute__(self, attr)
return attrVal
except StandardError,error:
raise StandardError(error)
def __serializeComplex(self, data):
'''
Serialize complex data such as dicts to a JSON string
Test the len of the string, anything over 32000 (16bit) gets screwed by the
Maya attribute template and truncated IF you happened to select the string in the
Attribute Editor. For long strings we need to force lock the attr here!
bit thanks to MarkJ for that as it was doing my head in!!
http://markj3d.blogspot.co.uk/2012/11/maya-string-attr-32k-limit.html
'''
if len(data)>32700:
log.debug('Warning >> Length of string is over 16bit Maya Attr Template limit - lock this after setting it!')
return json.dumps(data)
def __deserializeComplex(self, data):
'''
Deserialize data from a JSON string back to it's original complex data
'''
#log.debug('deserializing data via JSON')
if type(data) == unicode:
return json.loads(str(data))
return json.loads(data)
@nodeLockManager
def __delattr__(self, attr):
try:
log.debug('attribute delete : %s , %s' % (self,attr))
object.__delattr__(self, attr)
if cmds.attributeQuery(attr, exists=True, node=self.mNode):
cmds.deleteAttr('%s.%s' % (self.mNode, attr))
except StandardError,error:
raise StandardError(error)
def hasAttr(self, attr):
'''
simple wrapper check for attrs on the mNode itself.
Note this is not run in some of the core internal calls in this baseClass
'''
return cmds.attributeQuery(attr, exists=True, node=self.mNode)
def attrIsLocked(self,attr):
return cmds.getAttr('%s.%s' % (self.mNode,attr),l=True)
@nodeLockManager
def attrSetLocked(self, attr, state):
try:
if not self.isReferenced():
cmds.setAttr('%s.%s' % (self.mNode,attr),l=state)
except StandardError,error:
log.debug(error)
@nodeLockManager
def addAttr(self, attr, value=None, attrType=None, hidden=False, **kws):
'''
Wrapped version of Maya addAttr that manages the basic type flags for you
whilst also setting the attr on the MayaNode/class object itself.
I now merge in **kws to the dict I pass to the add and set commands here so you
can specify all standard cmds.addAttr, setAttr flags in the same call.
ie min, max, l, k, cb
:param attr: attribute name to add (standard 'longName' flag)
:param value: initial value to set, if a value is given the attribute type is automatically
determined for you.
:param attrType: specify the exact type of attr to add. By default I try and resolve
this for you from the type of value passed in.
:param hidden: whether the attr is set available in the channelBox (only applies keyable attrs)
.. note::
specific attr management for given types below:
>>> double3: self.addAttr(attr='attrName', attrType='double3',value=(value1,value2,value3))
>>> float3: self.addAttr(attr='attrName', attrType='float3', value=(value1,value2,value3))
>>> enum: self.addAttr(attr='attrName', attrType='enum', value=1, enumName='Centre:Left:Right')
>>> doubleArray: self.addAttr(attr='attrName', attrType='doubleArray', value=[1.0,2.0,3.0,4.0,5.0])
>>> complex: self.addAttr('jsonDict', {'a':1.0,'b':2.0,'c':3.3,'d':['a','b','c']})
.. note::
max values for int is 2,147,483,647 (int32)
'''
DataTypeKws = {'string': {'longName':attr, 'dt':'string'}, \
'unicode': {'longName':attr, 'dt':'string'}, \
'int': {'longName':attr, 'at':'long'}, \
'bool': {'longName':attr, 'at':'bool'}, \
'float': {'longName':attr, 'at':'double'}, \
'float3': {'longName':attr, 'at':'float3'}, \
'double3': {'longName':attr, 'at':'double3'}, \
'doubleArray':{'longName':attr, 'dt':'doubleArray'}, \
'enum': {'longName':attr, 'at':'enum'}, \
'complex': {'longName':attr, 'dt':'string'}, \
'message': {'longName':attr, 'at':'message', 'm':True, 'im':True}, \
'messageSimple':{'longName':attr, 'at':'message', 'm':False}}
Keyable=['int','float','bool','enum','double3']
if attrType and attrType=='enum' and not 'enumName' in kws:
raise ValueError('enum attrType must be passed with "enumName" keyword in args')
#ATTR EXSISTS - EDIT CURRENT
#---------------------------
if cmds.attributeQuery(attr, exists=True, node=self.mNode):
#if attr exists do we force the value here?? NOOOO as I'm using this only
#to ensure that when we initialize certain classes base attrs exist with certain properties.
log.debug('"%s" : Attr already exists on the Node' % attr)
#allow some of the standard edit flags to be run even if the attr exists
addCmdEditFlags=['min','minValue','max','maxValue','defaultValue','dv',
'softMinValue','smn','softMaxValue','smx','enumName']
setCmdEditFlags=['keyable','k','lock','l','channelBox','cb']
addkwsToEdit={}
setKwsToEdit={}
if kws:
for kw,v in kws.items():
if kw in addCmdEditFlags:
addkwsToEdit[kw]=v
elif kw in setCmdEditFlags:
setKwsToEdit[kw]=v
if addkwsToEdit:
cmds.addAttr('%s.%s' % (self.mNode,attr),e=True,**addkwsToEdit)
log.debug('addAttr Edit flags run : %s = %s' % (attr, addkwsToEdit))
if setKwsToEdit:
cmds.setAttr('%s.%s' % (self.mNode,attr),**setKwsToEdit)
log.debug('setAttr Edit flags run : %s = %s' % (attr, setKwsToEdit))
return
#ATTR IS NEW, CREATE IT
#----------------------
else:
try:
if not attrType:
attrType=attributeDataType(value)
DataTypeKws[attrType].update(kws) # merge in **kws, allows you to pass in all the standard addAttr kws
log.debug('addAttr : valueType : %s > dataType kws: %s' % (attrType,DataTypeKws[attrType]))
cmds.addAttr(self.mNode, **DataTypeKws[attrType])
if attrType=='double3' or attrType=='float3':
attr1='%sX' % attr
attr2='%sY' % attr
attr3='%sZ' % attr
cmds.addAttr(self.mNode,longName=attr1,at='double',parent=attr,**kws)
cmds.addAttr(self.mNode,longName=attr2,at='double',parent=attr,**kws)
cmds.addAttr(self.mNode,longName=attr3,at='double',parent=attr,**kws)
object.__setattr__(self, attr1, None) # don't set it, just add it to the object
object.__setattr__(self, attr2, None) # don't set it, just add it to the object
object.__setattr__(self, attr3, None) # don't set it, just add it to the object
if attrType in Keyable and not hidden:
cmds.setAttr('%s.%s' % (self.mNode,attr1),e=True,keyable=True)
cmds.setAttr('%s.%s' % (self.mNode,attr2),e=True,keyable=True)
cmds.setAttr('%s.%s' % (self.mNode,attr3),e=True,keyable=True)
elif attrType=='doubleArray':
#have to initialize this type or Maya doesn't pick the attrType up!
cmds.setAttr('%s.%s' % (self.mNode, attr),[],type='doubleArray')
else:
if attrType in Keyable and not hidden:
cmds.setAttr('%s.%s' % (self.mNode, attr),e=True,keyable=True)
if value:
self.__setattr__(attr, value, force=False)
else:
#bind the attr to the python object if no value passed in
object.__setattr__(self, attr, None)
except StandardError,error:
raise StandardError(error)
def listAttrsOfType(self, Type='message'):
'''
this is a fast method to list all attrs of type on the mNode
>>> [attr for attr in cmds.listAttr(self.mNode) if cmds.getAttr('%s.%s' % (self.mNode,attr),type=True)=='message']
Simply using the above cmds calls is DOG SLOW upto this which goes via the Api.
TODO: expand the Type support here
'''
depNodeFn=OpenMaya.MFnDependencyNode(self.mNodeMObject)
attrCount = depNodeFn.attributeCount()
ret = []
for i in range(attrCount):
attrObject = depNodeFn.attribute(i)
if Type:
if Type=='message':
if not attrObject.hasFn(OpenMaya.MFn.kMessageAttribute):
continue
mPlug = depNodeFn.findPlug(attrObject)
ret.append(mPlug.name().split('.')[1])
return ret
# Utity Functions
#-------------------------------------------------------------------------------------
def select(self):
cmds.select(self.mNode)
@nodeLockManager
def rename(self, name):
'''
rename the mNode itself
'''
cmds.rename(self.mNode, name)
#self.mNode=name
def delete(self):
'''
delete the mNode and this class instance
WORKAROUND: Looks like there's a bug in the Network node in that deletion of a node
will also delete all other connected networks...BIG DEAL. AD are looking into this for us
'''
if cmds.lockNode(self.mNode, q=True):
cmds.lockNode(self.mNode,lock=False)
cmds.delete(self.mNode)
del(self)
@nodeLockManager
def convertMClassType(self, newMClass, **kws):
'''
change the current mClass type of the node and re-initialize the object
'''
if newMClass in RED9_META_REGISTERY:
#cmds.setAttr('%s.%s' % (self.mNode,'mClass'),e=True,l=False)
self.mClass=newMClass
#cmds.setAttr('%s.%s' % (self.mNode,'mClass'),e=True,l=True)
return MetaClass(self.mNode, **kws)
else:
raise StandardError('given class is not in the mClass Registry : %s' % newMClass)
def isReferenced(self):
'''
is node.mNode referenced?
'''
return cmds.referenceQuery(self.mNode,inr=True)
def nameSpace(self):
'''
If the namespace is nested this will return a list where
[-1] is the direct namespace of the node
'''
return self.mNode.split(':')[:-1]
def shortName(self):
return self.mNode.split('|')[-1].split(':')[-1]
# Connection Management Block
#---------------------------------------------------------------------------------
def _getNextArrayIndex(self, node, attr):
'''
get the next available index in a multiMessage array
'''
ind=cmds.getAttr('%s.%s' % (node,attr),multiIndices=True)
if not ind:
return 0
else:
for i in ind:
if not cmds.listConnections('%s.%s[%i]' % (node,attr,i)):
return i
return ind[-1]+1
def isChildNode(self, node, attr=None, srcAttr=None):
'''
test if a node is already connected to the mNode via a given attr link.
Why the wrap? well this gets over the issue of array index's in the connections
cmds.isConnected('node.attr[0]','other.attr[0]')
fails if simply asked:
cmds.isConnected('node.attr',other.attr')
'''
if issubclass(type(node), MetaClass):
node=node.mNode
if attr:
cons=cmds.ls(cmds.listConnections('%s.%s' % (self.mNode,attr),s=False,d=True,p=True),l=True)
else:
cons=cmds.ls(cmds.listConnections(self.mNode,s=False,d=True,p=True),l=True)
if cons:
for con in cons:
if srcAttr:
if '%s.%s' % (cmds.ls(node,l=True)[0],srcAttr) in con:
return True
else:
if '%s.' % cmds.ls(node,l=True)[0] in con:
return True
return False
@nodeLockManager
def connectChildren(self, nodes, attr, srcAttr=None, cleanCurrent=False, force=True, allowIncest=False):
'''
Fast method of connecting multiple nodes to the mNode via a message attr link.
This call generates a MULTI message on both sides of the connection and is designed
for more complex parent child relationships
:param nodes: Maya nodes to connect to this mNode
:param attr: Name for the message attribute
:param srcAttr: if given this becomes the attr on the child node which connects it
to self.mNode. If NOT given this attr is set to self.mNodeID
:param cleanCurrent: Disconnect and clean any currently connected nodes to this attr.
Note this is operating on the mNode side of the connection, removing
any currently connected nodes to this attr prior to making the new ones
:param force: Maya's default connectAttr 'force' flag, if the srcAttr is already connected
to another node force the connection to the new attr
:param allowIncest: Over-ride the default behaviour when dealing with child nodes that are
standard Maya Nodes not metaNodes. Default in this case is to NOT index manage
the plugs, this flag overloads that, allow multiple parents.
TODO: check the attr type, if attr exists and is a non-multi messgae then don't run the indexBlock
'''
#make sure we have the attr on the mNode
self.addAttr(attr, attrType='message')
if not issubclass(type(nodes),list):
nodes=[nodes]
if cleanCurrent:
self.__disconnectCurrentAttrPlugs(attr) # disconnect/cleanup current plugs to this attr
if not srcAttr:
srcAttr=self.mNodeID # attr on the nodes source side for the child connection
if not nodes:
#this allows 'None' to be passed into the set attr calls and in turn, allow
#self.mymessagelink=None to clear all current connections
return
for node in nodes:
ismeta=False
if isMetaNode(node):
ismeta=True
if not issubclass(type(node), MetaClass): # allows you to pass in an metaClass
MetaClass(node).addAttr(srcAttr, attrType='message')
else:
node.addAttr(srcAttr, attrType='message')
node=node.mNode
elif not cmds.attributeQuery(srcAttr, exists=True, node=node):
if allowIncest:
MetaClass(node).addAttr(srcAttr, attrType='message')
else:
cmds.addAttr(node, longName=srcAttr, at='message', m=True, im=False)
try:
#also we need to add the self.allowIncest flag to trigger managed message links like this.
if not self.isChildNode(node, attr, srcAttr):
if ismeta or allowIncest:
if ismeta:
log.debug('connecting MetaData nodes via indexes : %s.%s >> %s.%s' % (self.mNode,attr,node,srcAttr))
elif allowIncest:
log.debug('connecting Standard Maya nodes via indexes : %s.%s >> %s.%s' % (self.mNode,attr,node,srcAttr))
cmds.connectAttr('%s.%s[%i]' % (self.mNode, attr, self._getNextArrayIndex(self.mNode,attr)),
'%s.%s[%i]' % (node, srcAttr, self._getNextArrayIndex(node,srcAttr)), f=force)
else:
log.debug('connecting %s.%s >> %s.%s' % (self.mNode,attr,node,srcAttr))
cmds.connectAttr('%s.%s' % (self.mNode,attr),'%s.%s' % (node,srcAttr), f=force)
else:
raise StandardError('"%s" is already connected to metaNode "%s"' % (node,self.mNode))
except StandardError,error:
log.warning(error)
@nodeLockManager
def connectChild(self, node, attr, srcAttr=None, cleanCurrent=True, force=True):
'''
Fast method of connecting a node to the mNode via a message attr link. This call
generates a NONE-MULTI message on both sides of the connection and is designed
for simple parent child relationships.
NOTE: this call by default manages the attr to only ONE CHILD to
avoid this use cleanCurrent=False
:param node: Maya node to connect to this mNode
:param attr: Name for the message attribute
:param srcAttr: If given this becomes the attr on the child node which connects it
to self.mNode. If NOT given this attr is set to self.mNodeID
:param cleanCurrent: Disconnect and clean any currently connected nodes to this attr.
Note this is operating on the mNode side of the connection, removing
any currently connected nodes to this attr prior to making the new ones
:param force: Maya's default connectAttr 'force' flag, if the srcAttr is already connected
to another node force the connection to the new attr
TODO: do we move the cleanCurrent to the end so that if the connect fails you're not left
with a half run setup?
'''
#make sure we have the attr on the mNode, if we already have a MULIT-message
#should we throw a warning here???
self.addAttr(attr, attrType='messageSimple')
try:
if cleanCurrent:
self.__disconnectCurrentAttrPlugs(attr) # disconnect/cleanup current plugs to this attr
if not srcAttr:
srcAttr=self.mNodeID # attr on the nodes source side for the child connection
if not node:
#this allows 'None' to be passed into the set attr calls and in turn, allow
#self.mymessagelink=None to clear all current connections
return
if isMetaNode(node):
if not issubclass(type(node), MetaClass): # allows you to pass in an metaClass
MetaClass(node).addAttr(srcAttr,attrType='messageSimple')
else:
node.addAttr(srcAttr,attrType='messageSimple')
node=node.mNode
elif not cmds.attributeQuery(srcAttr, exists=True, node=node):
cmds.addAttr(node, longName=srcAttr, at='message', m=False)
if not self.isChildNode(node, attr, srcAttr):
cmds.connectAttr('%s.%s' % (self.mNode,attr),'%s.%s' % (node,srcAttr), f=force)
else:
raise StandardError('%s is already connected to metaNode' % node)
except StandardError, error:
log.warning(error)
@nodeLockManager
def connectParent(self, node, attr, srcAttr=None, cleanCurrent=True):
'''
Fast method of connecting message links to the mNode as parents
:param nodes: Maya nodes to connect to this mNode
:param attr: Name for the message attribute on eth PARENT!
:param srcAttr: If given this becomes the attr on the node which connects it
to the parent. If NOT given this attr is set to parents shortName
:param cleanCurrent: Exposed from teh connectChild code which is basically what this is running in reverse
TODO: Modify so if a metaClass is passed in use it's addAttr cmd so the new
attr is registered in the class given
TODO: Manage connection Index like the connectChildren call does?
'''
if not issubclass(type(node), MetaClass):
node=MetaClass(node)
if not srcAttr:
srcAttr=node.shortName()
#self.addAttr(srcAttr, attrType='message')
try:
# if not cmds.attributeQuery(attr, exists=True, node=node):
# #add to parent node
# cmds.addAttr(node,longName=attr, at='message', m=False)
# cmds.connectAttr('%s.%s' % (node,attr),'%s.%s' % (self.mNode,srcAttr))
node.connectChild(self, attr, srcAttr, cleanCurrent=cleanCurrent)
except StandardError,error:
log.warning(error)
@nodeLockManager
def __disconnectCurrentAttrPlugs(self, attr):
'''
from a given attr on the mNode disconnect any current connections and
clean up the plugs by deleting the existing attributes
'''
currentConnects=self.__getattribute__(attr)
if currentConnects:
if not isinstance(currentConnects,list):
currentConnects=[currentConnects]
for connection in currentConnects:
try:
#log.debug('Disconnecting %s.%s >> from : %s' % (self.mNode,attr,connection))
self.disconnectChild(connection, attr=attr, deleteSourcePlug=True, deleteDestPlug=False)
except:
log.warning('Failed to unconnect current message link')
@nodeLockManager
def disconnectChild(self, node, attr=None, deleteSourcePlug=True, deleteDestPlug=True):
'''
disconnect a given child node from the mNode. Default is to remove
the connection attribute in the process, cleaning up both sides of
the connection. Note that the attrs only get removed if nothing
else is connected to it, ie, it's safe to do so.
:param node: the Maya node to disconnect from the mNode
:param deleteSourcePlug: if True delete SOURCE side attribiute after disconnection
but ONLY if it's no longer connected to anything else.
:param deleteDestPlug: if True delete the DESTINATION side attribiute after disconnection
but ONLY if it's no longer connected to anything else.
>>> #testCode:
>>> master = r9Meta.MetaClass(name = 'master')
>>> master2 = r9Meta.MetaClass(name = 'master2')
>>> child1 = r9Meta.MetaClass(name = 'child1')
>>> child2 = r9Meta.MetaClass(name = 'child2')
>>> cube=cmds.ls(cmds.polyCube()[0],l=True)[0]
>>> master.connectChildren([child1,child2,cube],'modules','puppet')
>>> master2.connectChildren([child1.mNode,child2.mNode,cube],'time','master',force=True)
>>> master.connectChildren([child1,child2],'time','master',cleanCurrent=True)
>>>
>>> master.disconnectChild(child2,'time')
>>> #or
>>> master.disconnectChild(child2)
'''
sPlug=None
dPlug=None
sPlugMeta=None
searchConnection='%s.' % self.mNode.split('|')[-1]
if attr:
searchConnection='%s.%s' % (self.mNode.split('|')[-1],attr)
if isMetaNode(node): # and issubclass(type(node), MetaClass):
sPlugMeta=node
node=node.mNode
cons=cmds.listConnections(node,s=True,d=False,p=True,c=True)
if not cons:
raise StandardError('%s is not connected to the mNode %s' % (node,self.mNode))
for sPlug,dPlug in zip(cons[0::2],cons[1::2]):
log.debug('attr Connection inspected : %s << %s' % (sPlug,dPlug))
print 'searchCon : ', searchConnection
print 'dPlug : ', dPlug
if searchConnection in dPlug:
log.debug('Disconnecting %s >> %s as %s found in dPlug' % (dPlug,sPlug,searchConnection))
cmds.disconnectAttr(dPlug,sPlug)
if deleteSourcePlug: # child node
try:
allowDelete=True
attr=sPlug.split('[')[0] # split any multi-indexing from the plug ie node.attr[0]
if cmds.listConnections(attr):
allowDelete=False
log.debug('sourceAttr connections remaining: %s' % \
','.join(cmds.listConnections(attr)))
if allowDelete:
log.debug('Deleting deleteSourcePlug Attr %s' % (attr))
if sPlugMeta:
delattr(sPlugMeta,attr.split('.')[-1])
else:
cmds.deleteAttr(attr)
else:
log.debug('deleteSourcePlug attr aborted as node still has connections')
except StandardError,error:
log.warning('Failed to Remove mNode Connection Attr')
log.debug(error)
if deleteDestPlug: # self
try:
allowDelete=True
attr=dPlug.split('[')[0] # split any multi-indexing from the plug ie node.attr[0]
if cmds.listConnections(attr):
allowDelete=False
log.debug('sourceAttr connections remaining: %s' % \
','.join(cmds.listConnections(attr)))
if allowDelete:
log.debug('Deleting deleteDestPlug Attr %s' % (attr))
delattr(self,attr.split('.')[-1])
#cmds.deleteAttr(attr)
else:
log.debug('deleteDestPlug attr aborted as node still has connections')
except StandardError,error:
log.warning('Failed to Remove Node Connection Attr')
log.debug(error)
# Get Nodes Management Block
#---------------------------------------------------------------------------------
def addChildMetaNode(self, mClass, attr, srcAttr=None, nodeName=None, **kws):
'''
Generic call to add a MetaNode as a Child of self
:param mClass: mClass to generate, given as a valid key to the
RED9_META_REGISTERY ie 'MetaRig' OR a class object, ie r9Meta.MetaRig
:param attr: message attribute to wire the new node too
:param name: optional name to give the new name
'''
key=mTypesToRegistryKey(mClass)[0]
if key in RED9_META_REGISTERY:
childClass=RED9_META_REGISTERY[key]
mChild=childClass(name=nodeName,**kws)
self.connectChild(mChild, attr, srcAttr=srcAttr, **kws)
return mChild
@r9General.Timer
def getChildMetaNodes(self, walk=False, mAttrs=None, **kws):
'''
Find any connected Child MetaNodes to this mNode
:param walk: walk the connected network and return ALL children conntected in the tree
:param mAttrs: only return connected nodes that pass the given attribute filter
.. note::
mAttrs is only searching attrs on the mNodes themselves, not all children
and although there is no mTypes flag, you can use mAttrs to get chilnodes of type
by going getChildMetaNodes(mAttrs='mClass=MetaRig')
.. note::
Because the **kws are passed directly to the getConnectedMetaNods func, it will
also take ALL of that functions **kws functionality in the initial search:
source=True, destination=True, mTypes=[], mInstances=[], mAttrs=None, dataType='mClass'
:TODO: allow this to walk over nodes, at the moment if the direct child isn't of the correct
type (if using the mTypes flag) then the walk will stop. This should continue over non matching
nodes down the hierarchy so all children are tested.
'''
if not walk:
return getConnectedMetaNodes(self.mNode, source=False, destination=True, mAttrs=mAttrs, dataType='mClass', **kws)
else:
metaNodes=[]
children=getConnectedMetaNodes(self.mNode, source=False, destination=True, mAttrs=mAttrs, dataType='unicode', **kws)
if children:
runaways=0
depth=0
processed=[]
extendedChildren=[]
while children and runaways<=1000:
for child in children:
mNode=child
if mNode not in processed:
metaNodes.append(child)
else:
#print('skipping as node already processed : %s' % mNode)
children.remove(child)
continue
#log.info('mNode added to metaNodes : %s' % mNode)
children.remove(child)
processed.append(mNode)
#log.info( 'connections too : %s' % mNode)
extendedChildren.extend(getConnectedMetaNodes(mNode,source=False,destination=True,mAttrs=mAttrs, dataType='unicode', **kws))
#log.info('left to process : %s' % ','.join([c.mNode for c in children]))
if not children:
if extendedChildren:
log.debug('Child MetaNode depth extended %i' % depth)
#log.debug('Extended Depth child List: %s' % ','.join([c.mNode for c in extendedChildren]))
children.extend(extendedChildren)
extendedChildren=[]
depth+=1
runaways+=1
return [MetaClass(node) for node in metaNodes]
return []
def getParentMetaNode(self, **kws):
'''
Find any connected Parent MetaNode to this mNode
.. note::
Because the **kws are passed directly to the getConnectedMetaNods func, it will
also take ALL of that functions kws
source=True, destination=True, mTypes=[], mInstances=[], mAttrs=None, dataType='mClass'
TODO: implement a walk here to go upstream
'''
mNodes=getConnectedMetaNodes(self.mNode,source=True,destination=False, **kws)
if mNodes:
return mNodes[0]
# def __getChildren__(self, mNode, mAttrs=None, cAttrs=[]):
# log.debug('MetaNode : %s' % mNode)
# attrs=cmds.listAttr(mNode,ud=True,st=cAttrs)
# children=[]
# if attrs:
# for attr in attrs:
# if cmds.getAttr('%s.%s' % (mNode,attr),type=True)=='message':
# msgLinked=cmds.listConnections('%s.%s' % (mNode,attr),destination=True,source=False)
# if msgLinked:
# msgLinked=cmds.ls(msgLinked,l=True) #cast to longNames!
# children.extend(msgLinked)
# else:
# log.debug('no matching attrs : %s found on node %s' % (cAttrs,mNode))
# return children
@r9General.Timer
def getChildren(self, walk=True, mAttrs=None, cAttrs=[]):
'''
This finds all UserDefined attrs of type message and returns all connected nodes
This is now being run in the MetaUI on doubleClick. This is a generic call, implemented
and over-loaded on a case by case basis. At the moment the MetaRig class simple calls
mRig.getRigCtrls() in the call, but it means that we don't call .mRig.getRigCtrls()
in generic meta functions.
:param walk: walk all subMeta connections and include all their children too
:param mAttrs: only search connected mNodes that pass the given attribute filter (attr is at the metaSystems level)
:param cAttrs: only pass connected children whos connection to the mNode matches the given attr (accepts wildcards)
.. note::
mAttrs is only searching attrs on the mNodes themselves, not the children
cAttrs is searching the connection attr names from the mNodes, uses the cmds.listAttr 'st' flag
'''
childMetaNodes=[self]
children=[]
if walk:
childMetaNodes.extend([node for node in self.getChildMetaNodes(walk=True, mAttrs=mAttrs)])
for node in childMetaNodes:
log.debug('MetaNode getChildren : %s' % node.mNode)
attrs=cmds.listAttr(node.mNode,ud=True,st=cAttrs)
if attrs:
for attr in attrs:
if cmds.getAttr('%s.%s' % (node.mNode,attr),type=True)=='message':
msgLinked=cmds.listConnections('%s.%s' % (node.mNode,attr),destination=True,source=False)
if msgLinked:
msgLinked=cmds.ls(msgLinked,l=True) # cast to longNames!
children.extend(msgLinked)
else:
log.debug('no matching attrs : %s found on node %s' % (cAttrs,node))
if self._forceAsMeta:
return [MetaClass(node) for node in children]
return children
def getNodeConnectionMetaDataMap(self, node, mTypes=[]): # toself=False, allplugs=False):
'''
This is a generic wrapper to extract metaData connection info for any given node
used currently to build the pose dict up, and compare / match the data on load.
In the base implementation this gives you a dict of mNodeID and attr which the nodes is connected too.
:param node: node to inspect and get the connection data back from
:return: mNodes={} which is directly pushed into the PoseFile under the [metaData] key
.. note::
This is designed to be overloaded so you can craft your own metaData block in the
poseFiles, allows you to craft the data you want to store against a node.
'''
if type(node)==list:
raise StandardError("getNodeConnectionMetaDataMap: node must be a single node, not an list")
mNodes={}
#why not use the r9Meta.getConnectedMetaNodes ?? > well here we're using
#the c=True flag to get both plugs back in one go to process later
connections=[]
for nType in getMClassNodeTypes():
con=cmds.listConnections(node,type=nType,s=True,d=False,c=True,p=True)
if con:
connections.extend(con)
if not connections:
return connections
log.debug('%s : connectionMap : %s' % (node.split('|')[-1].split(':')[-1],connections[1::2]))
for con in connections[1::2]:
data = con.split('.') # attr
if isMetaNode(data[0], mTypes=mTypes):
mNodes['metaAttr'] = data[1]
try:
mNodes['metaNodeID']=cmds.getAttr('%s.mNodeID' % data[0])
except:
mNodes['metaNodeID']=node.split(':')[-1].split('|')[-1]
return mNodes
elif mTypes:
continue
if not mTypes: # if not mTypes passed bail the loop and return the first connection
return mNodes
return mNodes
def getNodeConnetionAttr(self, node):
'''
really light wrapper, designed to return the attr via which a node
is connected to this metaNode
:param node: node to test connection attr for
.. note::
This will be depricated soon and replaced by getNodeConnections which is
more flexible as it returns and filters all plugs between self and the given node.
'''
log.info('getNodeConnectionAttr will be depricated soon!!!!')
for con in cmds.listConnections(node,s=True,d=False,p=True):
if self.mNode in con.split('.')[0]:
return con.split('.')[1]
def getNodeConnections(self, node, filters=[]):
'''
really light wrapper, designed to return all connections
between a given node and the mNode
:param node: node to test connection attr for
:param filters: filter string to match for the returns
'''
cons=[]
for con in cmds.listConnections(node,s=True,d=False,p=True):
if self.mNode in con.split('.')[0]:
if filters:
for flt in filters:
if flt in con.split('.')[1]:
cons.append(con.split('.')[1])
else:
cons.append(con.split('.')[1])
return cons
def deleteEntireMetaRigStructure(searchNode=None):
'''
This is a hard core unplug and cleanup of all attrs added by the
MetaRig, all connections and all nodes. Use CAREFULLY!
'''
import Red9_AnimationUtils as r9Anim # lazy to stop cyclic as anim also import meta
if searchNode and not cmds.objExists(searchNode):
raise StandardError('given searchNode doesnt exist')
if not searchNode:
searchNode=cmds.ls(sl=True)[0]
mRig=getConnectedMetaSystemRoot(searchNode)
if not mRig:
raise StandardError('No root MetaData system node found from given searchNode')
mNodes=[]
mNodes.append(mRig)
mNodes.extend(mRig.getChildMetaNodes(walk=True))
mNodes.reverse()
for a in mNodes:
print a
for metaChild in mNodes:
for child in metaChild.getChildren(walk=False):
metaChild.disconnectChild(child)
r9Anim.MirrorHierarchy().deleteMirrorIDs(child)
#For the time being I'm adding the OLD mirror markers to this
#call for the sake of cleanup on old rigs
if cmds.attributeQuery('mirrorSide', exists=True, node=child):
cmds.deleteAttr('%s.mirrorSide' % child)
if cmds.attributeQuery('mirrorIndex', exists=True, node=child):
cmds.deleteAttr('%s.mirrorIndex' % child)
if cmds.attributeQuery('mirrorAxis', exists=True, node=child):
cmds.deleteAttr('%s.mirrorAxis' % child)
metaChild.delete()
class MetaRig(MetaClass):
'''
Initial test for a MetaRig labelling system
'''
def __init__(self,*args,**kws):
'''
:param name: name of the node and in this case, the RigSystem itself
'''
super(MetaRig, self).__init__(*args,**kws)
self.CTRL_Prefix='CTRL' # prefix for all connected CTRL_ links added
self.rigGlobalCtrlAttr='CTRL_Main' # attribute linked to the top globalCtrl in the rig
self.lockState = True # lock the node to avoid accidental removal
self.parentSwitchAttr='parent' # attr used for parentSwitching
self.MirrorClass = None # capital as this binds to the MirrorClass directly
#self.poseSkippedAttrs = [] # attributes which are to be IGNORED by the posesaver
def __bindData__(self):
self.addAttr('version',1.0) # ensure these are added by default
self.addAttr('rigType', '') # ensure these are added by default
self.addAttr('renderMeshes', attrType='message')
self.addAttr('exportSkeletonRoot', attrType='messageSimple')
@property
def characterSet(self):
'''
return the first connected characterSet found to children
'''
for node in self.getChildren(walk=True):
chSet=cmds.listConnections(node, type='character')
if chSet:
return chSet[0]
def addGenericCtrls(self, nodes):
'''
Pass in a list of objects to become generic, non specific
controllers for a given setup. These are all connected to the same slot
so don't have the search capability that the funct below gives
'''
self.connectChildren(nodes, 'RigCtrls')
def addRigCtrl(self, node, ctrType, mirrorData=None, boundData=None):
'''
Add a single CTRL of managed type as a child of this mRig.
:param node: Maya node to add
:param ctrType: Attr name to assign this too
:param mirrorData: {side:'Left', slot:int, axis:'translateX,rotateY,rotateZ'..}
:param boundData: {} any additional attrData, set on the given node as attrs
.. note::
| mirrorData[slot] must NOT == 0 as it'll be handled as not set by the core.
| ctrType >> 'Main' is the equivalent of the RootNode in the FilterNode calls.
'''
#import Red9_AnimationUtils as r9Anim # lazy load to avoid cyclic imports
if isinstance(node,list):
raise StandardError('node must be a single Maya Object')
self.connectChild(node,'%s_%s' % (self.CTRL_Prefix,ctrType))
if mirrorData:
mirror = MirrorHierarchy()
axis=None
if 'axis' in mirrorData:
axis = mirrorData['axis']
mirror.setMirrorIDs(node,
side=mirrorData['side'],
slot=mirrorData['slot'],
axis=axis)
if boundData:
if issubclass(type(boundData),dict):
for key, value in boundData.iteritems():
log.debug('Adding boundData to node : %s:%s' %(key,value))
MetaClass(node).addAttr(key, value=value)
def getRigCtrls(self, walk=False, mAttrs=None):
'''
Depricated Code - use getChildren call now
'''
return self.getChildren(walk, mAttrs)
def getChildren(self, walk=False, mAttrs=None, cAttrs=None):
'''
Massively important big of code, this is used by most bits of code
to find the child controllers linked to this metaRig instance.
'''
if not cAttrs:
cAttrs=['RigCtrls', '%s_*' % self.CTRL_Prefix]
return super(MetaRig, self).getChildren(walk=walk, mAttrs=mAttrs, cAttrs=cAttrs)
#return self.getRigCtrls(walk=walk, mAttrs=mAttrs)
def getSkeletonRoots(self):
'''
get the Skeleton Root, used in the poseSaver. By default this looks
for a message link via the attr "exportSkeletonRoot" to the skeletons root jnt
always returns a list!
'''
if self.hasAttr('exportSkeletonRoot'):
return self.exportSkeletonRoot
return None
# def getParentSwitchData(self):
# '''
# Simple func for over-loading. This returns a list of tuples [(node,attr)] for all
# found parentSwitch attrs on your rig. This is used by the PoseLaoder to maintain
# parentSwitching when a pose is applied.
# Note: that by default I assume you use the same attr name for all parent switching
# on your rig. If not then you'll have to over-load this more carefully.
# '''
# parentSwitches=[]
# for child in self.getChildren(walk=True):
# if cmds.attributeQuery(self.parentSwitchAttr, exists=True,node=child):
# parentSwitches.append((child, self.parentSwitchAttr, cmds.getAttr('%s.%s' % (child,self.parentSwitchAttr))))
# return parentSwitches
#
#Do we supply a few generic presets?
#---------------------------------------------------------------------------------
def addWristCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Wrist' % side[0],
mirrorData={'side':side, 'slot':1,'axis':axis})
def addElbowCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Elbow' % side[0],
mirrorData={'side':side, 'slot':2,'axis':axis})
def addClavCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Clav' % side[0],
mirrorData={'side':side, 'slot':3,'axis':axis})
def addFootCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Foot' % side[0],
mirrorData={'side':side, 'slot':4,'axis':axis})
def addKneeCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Knee' % side[0],
mirrorData={'side':side, 'slot':5,'axis':axis})
def addPropCtrl(self,node,side,axis=None):
self.addRigCtrl(node,'%s_Prop' % side[0],
mirrorData={'side':side, 'slot':6,'axis':axis})
#NOTE: Main should be the Top World Space Control for the entire rig
#====================================================================
def addMainCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Main',
mirrorData={'side':side, 'slot':1,'axis':axis})
def addRootCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Root',
mirrorData={'side':side, 'slot':2,'axis':axis})
def addHipCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Hips',
mirrorData={'side':side, 'slot':3,'axis':axis})
def addChestCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Chest',
mirrorData={'side':side, 'slot':4,'axis':axis})
def addHeadCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Head',
mirrorData={'side':side, 'slot':5,'axis':axis})
def addNeckCtrl(self,node,side='Centre',axis=None):
self.addRigCtrl(node,'Neck',
mirrorData={'side':side, 'slot':6,'axis':axis})
def addSupportMetaNode(self, attr, nodeName=None, mClass='MetaRigSupport', **kws):
'''
Not sure the best way to do this, but was thinking that the main mRig
node should be able to have sub MetaClass nodes to cleanly define
what nodes are AnimCtrls, and what nodes you want to tag as Support
subsystems, ie, ikSolvers and construction nodes within the rig
:param attr: Attribute used in the message link. Note this is what you use
to transerve the Dag tree so use something sensible!
:param nodeName: Name of the MetaClass network node created
:param mClass: the class to be used for the support node - 'MetaRigSupport' by default
'''
if not nodeName:
nodeName=attr
return self.addChildMetaNode(mClass, attr=attr, nodeName=nodeName, **kws)
def addSupportNode(self, node, attr, boundData=None):
'''
Add a single MAYA node flagged as a SUPPORT node of managed type
Really in the MetaRig design these should be wired to a MetaRigSupport node
:param node: Maya node to add
:param attr: Attr name to assign this too
:param boundData: {} Data to set on the given node as attrs
'''
self.connectChild(node,'SUP_%s' % attr)
if boundData:
if issubclass(type(boundData),dict):
for key, value in boundData.iteritems():
log.debug('Adding boundData to node : %s:%s' %(key,value))
MetaClass(node).addAttr(key, value=value)
def addMetaSubSystem(self, systemType, side, attr=None, nodeName=None, mClass='MetaRigSubSystem'):
'''
Basic design of a MetaRig is that you have sub-systems hanging off an mRig
node, managing all controllers and data for a particular system, such as an
Arm system.
:param systemType: Attribute used in the message link. Note this is what you use
to transerve the Dag tree so use something sensible!
:param mirrorSide: Side to designate the system. This is an enum: Centre,Left,Right
:param nodeName: Name of the MetaClass network node created
:param mClass: the class to be used for the support node - 'MetaRigSubSystem' by default
'''
MirrorHierarchy()._validateMirrorEnum(side) # ??? do we just let the enum __setattr__ handle this?
if not attr:
attr='%s_%s_System' % (side[0],systemType)
if not nodeName:
nodeName=attr
subSystem=self.addChildMetaNode(mClass, attr=attr, nodeName=nodeName)
#set the attrs on the newly created subSystem MetaNode
subSystem.systemType=systemType
subSystem.mirrorSide=side
return subSystem
def getMirrorData(self):
'''
Bind the MirrorObject to this instance of MetaRig.
.. note::
you must run this binding function before using any of
the inbuilt mirror functions
'''
self.MirrorClass = MirrorHierarchy(nodes=self.getRigCtrls(walk=True))
self.MirrorClass.getMirrorSets()
log.debug('Filling the MirrorClass attr on demand')
return self.MirrorClass
def loadMirrorDataMap(self, mirrorMap):
'''
load a mirror setup onto this rig from a stored mirrorMap file
'''
if not self.MirrorClass:
self.MirrorClass = self.getMirrorData()
if not os.path.exists(mirrorMap):
raise IOError('Given MirrorMap file not found : %s' % mirrorMap)
MirrorHierarchy(self.getChildren()).loadMirrorSetups(mirrorMap)
def getMirror_opposites(self, nodes):
'''
from the given nodes return a map of the opposite pairs of controllers
so if you pass in a right controller of mirrorIndex 4 you get back the
left[4] mirror node and visa versa. Centre controllers pass straight through
'''
if not self.MirrorClass:
self.MirrorClass = self.getMirrorData()
oppositeNodes=[]
for node in nodes:
side=self.MirrorClass.getMirrorSide(node)
if not side:
continue
if side=='Left':
oppositeNodes.append(self.MirrorClass.mirrorDict['Right'][str(self.MirrorClass.getMirrorIndex(node))]['node'])
if side=='Right':
oppositeNodes.append(self.MirrorClass.mirrorDict['Left'][str(self.MirrorClass.getMirrorIndex(node))]['node'])
if side=='Centre':
oppositeNodes.append(node)
return oppositeNodes
def mirror(self, nodes=None, mode='Anim'):
'''
direct mapper call to the Mirror functions
'''
if not self.MirrorClass:
self.MirrorClass = self.getMirrorData()
self.MirrorClass.mirrorData(nodes, mode)
@nodeLockManager
def poseCacheStore(self, attr=None, filepath=None, *args, **kws):
'''
intended as a cached pose for this mRig, if an attr is given then
the cached pose is stored internally on the node so it can be loaded
back from the mNode internally. If not given then the pose is cached
on this object instance only.
:param attr: optional - attr to store the cached pose to
:param filepath: optional - path to store the pose too
'''
import Red9.core.Red9_PoseSaver as r9Pose # lazy loaded
self.poseCache=r9Pose.PoseData()
self.poseCache.metaPose=True
self.poseCache.poseSave(self.mNode, filepath=filepath, useFilter=True, *args, **kws) # no path so cache against this pose instance
if attr:
if not self.hasAttr(attr):
self.addAttr(attr, value=self.poseCache.poseDict, hidden=True)
else:
setattr(self, attr, self.poseCache.poseDict)
self.attrSetLocked(attr,True)
def poseCacheLoad(self, nodes=None, attr=None, filepath=None, *args, **kws):
'''
load a cached pose back to this mRig. If attr is given then its assumed
that that attr is a cached poseDict on the mNode. If not given then it
will load the cached pose from this objects instance, if there is one stored.
:param nodes: if given load only the cached pose to the given nodes
:param attr: optional - attr in which a pose has been stored internally on the mRig
:param filepath: optional - posefile to load back
TODO: add relative flags so that they can pass through this call
'''
import Red9.core.Red9_PoseSaver as r9Pose # lazy loaded
if attr or filepath:
self.poseCache=r9Pose.PoseData()
self.poseCache.metaPose=True
if attr:
self.poseCache.poseDict=getattr(self,attr)
if self.poseCache:
if not nodes:
self.poseCache.poseLoad(self.mNode, filepath=filepath, useFilter=True, *args, **kws)
else:
self.poseCache.poseLoad(nodes, filepath=filepath, useFilter=False, *args, **kws)
def poseCompare(self, poseFile, supressWarning=False, compareDict='skeletonDict', filterMap=[], ignoreBlocks=[]):
'''
Integrated poseCompare, this checks the mRigs current pose against
a given poseFile. This checks against the 'skeletonDict'
:param poseFile: given .pose file with valid skeletonDict block
:param supressWarning: if False raise the confirmDialogue
:param compareDict: what block in the poseFile to compare the data against
:param ignoreBlocks: used to stop certain blocks in the compare from causing a fail eg : ['missingKeys']
:return: returns a 'PoseCompare' class object with all the compare data in it
'''
import Red9.core.Red9_PoseSaver as r9Pose # lazy loaded
self.poseCacheStore()
compare=r9Pose.PoseCompare(self.poseCache,poseFile, compareDict=compareDict, filterMap=filterMap, ignoreBlocks=ignoreBlocks)
if not compare.compare():
info='Selected Pose is different to the rigs current pose\nsee script editor for debug details'
else:
info='Poses are the same'
if not supressWarning:
cmds.confirmDialog(title='Pose Compare Results',
button=['Close'],
message=info,
defaultButton='Close',
cancelButton='Close',
dismissString='Close')
return compare
@nodeLockManager
def saveAttrMap(self):
'''
store AttrMap to the metaRig, saving the chBox state of ALL attrs for ALL nodes in the hierarchy
'''
import Red9_CoreUtils as r9Core # lazy loaded
chn = r9Core.LockChannels()
chn.saveChannelMap(filepath=None,
nodes=getattr(self,'%s_Main' % self.CTRL_Prefix),
hierarchy=True,
serializeNode=self.mNode)
def loadAttrMap(self):
'''
load AttrMap from the metaRig, returning the chBox state of ALL attrs for ALL nodes in the hierarchy
'''
import Red9_CoreUtils as r9Core # lazy loaded
chn = r9Core.LockChannels()
chn.loadChannelMap(filepath=None,
nodes=getattr(self,'%s_Main' % self.CTRL_Prefix),
hierarchy=True,
serializeNode=self.mNode)
@nodeLockManager
def saveZeroPose(self):
'''
serialize the r9Pose file to the node itself
'''
self.poseCacheStore(attr='zeroPose')
def loadZeroPose(self, nodes=None):
'''
load the zeroPose form the internal dict
:param nodes: optional, load at subSystem level for given nodes
'''
self.poseCacheLoad(nodes=nodes, attr='zeroPose')
class MetaRigSubSystem(MetaRig):
'''
SubClass of the MRig, designed to organize Rig sub-systems (ie L_ArmSystem, L_LegSystem..)
within a complex rig structure. This or MetaRig should have the Controllers wired to it
'''
def __init__(self,*args,**kws):
super(MetaRigSubSystem, self).__init__(*args,**kws)
def __bindData__(self):
self.addAttr('systemType', attrType='string')
self.addAttr('mirrorSide',enumName='Centre:Left:Right',attrType='enum')
class MetaRigSupport(MetaClass):
'''
SubClass of MetaClass, designed to organize support nodes, solvers and other internal
nodes within a complex rig structure which you may need to ID at a later date.
Controllers should NOT be wired to this node
'''
def __init__(self,*args,**kws):
super(MetaRigSupport, self).__init__(*args,**kws)
def __bindData__(self):
'''
over-load and blank so that the MetaRig bindData doesn't get inherited
'''
pass
def addSupportNode(self, node, attr, boundData=None):
'''
Add a single MAYA node flagged as a SUPPORT node of managed type
:param node: Maya node to add
:param attr: Attr name to assign this too
:param boundData: {} Data to set on the given node as attrs
'''
self.connectChild(node,'SUP_%s' % attr)
if boundData:
if issubclass(type(boundData),dict):
for key, value in boundData.iteritems():
log.debug('Adding boundData to node : %s:%s' %(key,value))
MetaClass(node).addAttr(key, value=value)
class MetaFacialRig(MetaRig):
'''
SubClass of the MetaRig, designed to be manage Facial systems in the MetaData
Dag tree for organizing Facial Controllers and support nodes
'''
def __init__(self,*args,**kws):
super(MetaFacialRig, self).__init__(*args,**kws)
self.CTRL_Prefix='FACE'
def __bindData__(self):
'''
over-load and blank so that the MetaRig bindData doesn't get inherited
'''
pass
class MetaFacialRigSupport(MetaClass):
'''
SubClass of the MetaClass, designed to organize support nodes, solvers and other internal
nodes within a complex rig structure which you may need to ID at a later date.
Controllers should NOT be wired to this node
'''
def __init__(self,*args,**kws):
super(MetaFacialRigSupport, self).__init__(*args,**kws)
self.CTRL_Prefix='SUP'
def addSupportNode(self, node, attr, boundData=None):
'''
Add a single MAYA node flagged as a SUPPORT node of managed type
:param node: Maya node to add
:param attr: Attr name to assign this too
:param boundData: {} Data to set on the given node as attrs
'''
self.connectChild(node,'%s_%s' % (self.CTRL_Prefix,attr))
if boundData:
if issubclass(type(boundData),dict):
for key, value in boundData.iteritems():
log.debug('Adding boundData to node : %s:%s' %(key,value))
MetaClass(node).addAttr(key, value=value)
class MetaHIKCharacterNode(MetaRig):
'''
Casting HIK directly to a metaClass so it's treated as meta by default.
Why the hell not, it's a complex character node that is default in Maya
and useful for management in the systems
'''
def __init__(self, *args, **kws):
#kws.setdefault('autofill','messageOnly')
super(MetaHIKCharacterNode, self).__init__(*args,**kws)
def __getMessageAttr__(self, attr):
'''
overloaded so that the main message wires return as single nodes
'''
data = super(MetaHIKCharacterNode,self).__getMessageAttr__(attr)
if data:
if type(data) == list:
return data[0]
return data
def getHIKControlSetNode(self):
controlNode=cmds.listConnections(self.mNode,type='HIKControlSetNode')
if controlNode:
return controlNode[0]
class MetaHIKControlSetNode(MetaRig):
'''
Casting HIK directly to a metaClass so it's treated as meta by default.
Why the hell not, it's a complex character node that is default in Maya
and useful for management in the systems
'''
def __init__(self, *args, **kws):
kws.setdefault('autofill','messageOnly')
super(MetaHIKControlSetNode, self).__init__(*args,**kws)
self.CTRL_Main = self.Reference
def __getMessageAttr__(self, attr):
'''
overloaded so that the main message wires return as single nodes
'''
data = super(MetaHIKControlSetNode,self).__getMessageAttr__(attr)
if data:
if type(data) == list:
return data[0]
return data
def getHIKCharacterNode(self):
return cmds.listConnections(self.mNode,type='HIKCharacterNode')[0]
def getChildren(self, walk=False, mAttrs=None, cAttrs=None):
'''
Carefully over-loaded for HIK system
'''
children=[]
attrs=cmds.listAttr(self.mNode)
if attrs:
for attr in attrs:
if cmds.getAttr('%s.%s' % (self.mNode,attr),type=True)=='message':
effector=cmds.listConnections('%s.%s' % (self.mNode,attr),destination=False,source=True)
if effector:
for e in effector:
if cmds.nodeType(e) in ['hikIKEffector','hikFKJoint']:
children.extend(cmds.ls(e,l=True))
return children
# EXPERIMENTAL CALLS ==========================================================
def monitorHUDaddCBAttrs():
'''
Adds selected attrs from the CB to a MetaHUD node for monitoring,
if HUD node already exists this will simply add more attrs to it
'''
import Red9_CoreUtils as r9Core
node=cmds.ls(sl=True,l=True)[0]
attrs=cmds.channelBox('mainChannelBox', q=True,selectedMainAttributes=True)
currentHUDs=getMetaNodes(mTypes=MetaHUDNode,mAttrs='mNodeID=CBMonitorHUD')
if not currentHUDs:
metaHUD = MetaHUDNode(name='CBMonitorHUD')
else:
metaHUD=currentHUDs[0]
if attrs:
for attr in attrs:
log.info('connecting cbAttr to meta: %s' % attr)
monitoredAttr='%s_%s' % (r9Core.nodeNameStrip(node), attr)
metaHUD.addMonitoredAttr(monitoredAttr,
value=cmds.getAttr('%s.%s' % (node,attr)),
refresh=False)
cmds.connectAttr('%s.%s' % (node,attr), '%s.%s' % (metaHUD.mNode, monitoredAttr))
metaHUD.refreshHud()
cmds.select(node)
def monitorHUDManagement(func):
'''
kill any current MetaHUD headsUpDisplay blocks
'''
metaHUD=None
currentHUDs=getMetaNodes(mTypes=MetaHUDNode,mAttrs='mNodeID=CBMonitorHUD')
if currentHUDs:
metaHUD=currentHUDs[0]
if func=='delete':
if metaHUD:
metaHUD.delete()
else:
#No metaData node, scene may have been deleted but the HUD
#may still be up and active
HUDS=cmds.headsUpDisplay(lh=True)
for hud in HUDS:
if 'MetaHUDConnector' in hud:
print 'killing HUD : ',hud
cmds.headsUpDisplay(hud,remove=True)
if func=='refreshHeadsUp':
metaHUD.headsUpOnly=True
metaHUD.refreshHud()
if func=='refreshSliders':
metaHUD.headsUpOnly=False
metaHUD.refreshHud()
if func=='kill':
metaHUD.killHud()
def monitorHUDremoveCBAttrs():
'''
remove attrs from the MetaHUD
'''
import Red9_CoreUtils as r9Core
currentHUDs=getMetaNodes(mTypes=MetaHUDNode,mAttrs='mNodeID=CBMonitorHUD')
if currentHUDs:
metaHUD=currentHUDs[0]
node=cmds.ls(sl=True,l=True)[0]
attrs=cmds.channelBox('mainChannelBox', q=True,selectedMainAttributes=True)
if attrs:
metaHUD.killHud()
for attr in attrs:
monitoredAttr='%s_%s' % (r9Core.nodeNameStrip(node), attr)
print 'removing attr :',attr,monitoredAttr
try:
metaHUD.removeMonitoredAttr(monitoredAttr)
except:
pass
metaHUD.refreshHud()
# EXPERIMENTAL CALLS ==========================================================
class MetaHUDNode(MetaClass):
'''
SubClass of the MetaClass, designed as a simple interface
for HUD management in Maya. Any monitored attrs added to the MetaNode
will show in the HUD when drawn.
TODO: Look if we can link the Section and Block attrs to the refresh func
via an attrChange callback
'''
def __init__(self,*args,**kws):
super(MetaHUDNode, self).__init__(*args,**kws)
self.hudGroupActive=False
self.eventTriggers=cmds.headsUpDisplay(le=True)
self.size='small'
self.headsUpOnly=True
self.addAttr('monitorAttrCache', value='[]', attrType='string') # cache the HUD names so this runs between sessions
self.monitorAttrs=self.monitorAttrCache
self.addAttr('section', 1)
self.addAttr('block', 1)
self.addAttr('allowExpansion', True) # if a section can't contain all elements then expand to the section below
self.addAttr('eventTrigger', attrType='enum', value=0,enumName=':'.join(['attachToRefresh','timeChanged']))
HUDS=cmds.headsUpDisplay(lh=True)
for hud in HUDS:
if 'MetaHUDConnector' in hud:
self.hudGroupActive=True
def addMonitoredAttr(self, attr, value=None, attrType=None, refresh=True):
'''
wrapper that not only adds an attr to the metaNode, but also adds it
to the internal list of attributes that are monitored and added
to the HUD when drawn
'''
if not attr in self.monitorAttrs:
self.addAttr(attr, value=value, attrType=attrType)
self.monitorAttrs.append(attr)
#serialize back to the node
self.monitorAttrCache=self.monitorAttrs
if self.hudGroupActive==True and refresh:
try:
self.refreshHud()
except:
log.debug('addMonitorAttr failed')
else:
log.info('Hud attr already exists on metaHud Node')
def removeMonitoredAttr(self,attr):
'''
Remove an attr from the MetaNode and refresh the HUD to reflect the removal
'''
self.__delattr__(attr)
#def getEventTrigger(self,*args):
# return self.eventTriggers[self.eventTrigger]
def getHudDisplays(self):
'''
each line ing the HUD is actually a separate HUD in itself so we need
to carefully manage this list
'''
return ['MetaHUDConnector%s' % attr for attr in self.monitorAttrs]
def drawHUD(self):
#Attributes:
# - Section 1, block 0, represents the top second slot of the view.
# - Set the blockSize to "medium", instead of the default "small"
# - Assigned the HUD the label: "Position"
# - Defined the label font size to be large
# - Assigned the HUD a command to run on a SelectionChanged trigger
# - Attached the attributeChange node change to the SelectionChanged trigger
# to allow the update of the data on attribute changes.
for i,attr in enumerate(self.monitorAttrs):
section = self.section
block=self.block+i
if self.allowExpansion and i>17:
section = self.section+5
block = block-17
i=0
metaHudItem='MetaHUDConnector%s' % attr
if self.headsUpOnly:
if self.eventTrigger==1: # timeChanged
cmds.headsUpDisplay(metaHudItem,
section=section,
block=block,
blockSize=self.size,
label=attr,
labelFontSize=self.size,
allowOverlap=True,
command=partial(getattr,self,attr),
event='timeChanged')
else:
cmds.headsUpDisplay(metaHudItem,
section=section,
block=block,
blockSize=self.size,
label=attr,
labelFontSize=self.size,
allowOverlap=True,
attachToRefresh=True,
command=partial(getattr,self,attr))
else:
print 'node : ', self.mNode,' attrs : ', attr
connectedData=cmds.listConnections('%s.%s' % (self.mNode,attr),
connections=True,
skipConversionNodes=True,
plugs=True)[-1].split('.')
cmds.hudSliderButton(metaHudItem,
section=section,
block=block,
vis=True,
sliderLabel=attr,
sliderDragCommand=partial(self.setSlidertoAttr, metaHudItem, '%s.%s' % (connectedData[0],connectedData[1])),
value=0, type='float',
sliderLabelWidth=150,
valueWidth=60,
sliderLength=150,
bl='Reset',
bw=60, bsh='rectangle',
buttonReleaseCommand=partial(self.resetSlider, metaHudItem, '%s.%s' % (connectedData[0],connectedData[1])))
try:
attrMin=cmds.attributeQuery(connectedData[1], node=connectedData[0], min=True)
if attrMin:
cmds.hudSliderButton(metaHudItem, e=True, min=attrMin[0])
except:
cmds.hudSliderButton(metaHudItem, e=True, min=-1000)
try:
attrMax=cmds.attributeQuery(connectedData[1], node=connectedData[0], max=True)
if attrMax:
cmds.hudSliderButton(metaHudItem, e=True, max=attrMax[0])
except:
cmds.hudSliderButton(metaHudItem, e=True, max=1000)
self.hudGroupActive=True
def getConnectedAttr(self, attr):
return cmds.listConnections('%s.%s' % (self.mNode,attr),c=True,p=True)[-1]
def getConnectedNode(self, attr):
return cmds.listConnections('%s.%s' % (self.mNode,attr))[0]
def setSlidertoAttr(self, slider, attr):
cmds.setAttr(attr, cmds.hudSliderButton(slider, query=True, v=True))
def resetSlider(self, slider, attr):
value=0
try:
value=cmds.addAttr(q=True,dv=True)
except:
pass
cmds.setAttr(attr, value)
cmds.hudSliderButton(slider, e=True, v=value)
def showHud(self,value):
for hud in self.getHudDisplays():
cmds.headsUpDisplay(hud, edit=True, visible=value)
def killHud(self):
for hud in self.getHudDisplays():
if cmds.headsUpDisplay(hud,exists=True):
cmds.headsUpDisplay(hud,remove=True)
self.hudGroupActive=False
def refreshHud(self):
if self.hudGroupActive==True:
self.killHud()
self.drawHUD()
def delete(self):
'''
full cleanup, remove the metaNode and all HUDs in the process
'''
self.killHud()
super(MetaHUDNode, self).delete()
def __delattr__(self, attr):
'''
delete an attr on the metaNode and remove it from the monitored list
'''
wasActive=False
if self.hudGroupActive==True:
self.killHud()
wasActive=True
self.monitorAttrs.remove(attr)
#serialize back to the node
self.monitorAttrCache=self.monitorAttrs
super(MetaHUDNode, self).__delattr__(attr)
if wasActive==True:
self.drawHUD()
'''
if we reload r9Meta on it's own then the registry used in construction of
the nodes will fall out of sync and invalidate the systems. This is a catch
to that.
'''
#registerMClassInheritanceMapping()
| [
"rednineinfo@gmail.com"
] | rednineinfo@gmail.com |
6e64d297d9172f886c6b437cb286797e7c2fbf47 | 170912538f1cf46f58ed2dd5ae26506b0d885785 | /solutions/p7.py | ffdbdbb0df81aba2f9cce9c83c05bd7bc2c30164 | [] | no_license | gridl/Matasano-Crypto-Solutions | 0147329804ad89e8e8fda562ff7a421a8b48383d | 447baf78e7ee213733c023e102807e6e39407ad3 | refs/heads/master | 2020-03-23T01:57:47.481473 | 2016-01-28T04:06:19 | 2016-01-28T04:06:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | __author__ = 'Amirali Sanatinia'
import base64
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
def aes_dec_ecb(key, cipher_text):
""" Decrypt a cipher using AES ECB """
backend = default_backend()
key = key
cipher = Cipher(algorithms.AES(key), modes.ECB(), backend)
decryptor = cipher.decryptor()
return decryptor.update(cipher_text) + decryptor.finalize()
if __name__ == '__main__':
cipher_text = open('7.txt').read().replace('\n', '')
print aes_dec_ecb("YELLOW SUBMARINE", base64.b64decode(cipher_text)) | [
"amiralis@users.noreply.github.com"
] | amiralis@users.noreply.github.com |
8a73389fe82f6d6d6ea7df89a51735607184e012 | f5d7b66802ae7e76274ac07010df76e3fb65f34c | /Learn and Practice PyThOn/chepter-7 (String)/startswith() and endswith().py | 99559c0a7ade034060602e9b6e8c0204ccbfe3c1 | [] | no_license | darkcoders321/fahim | e11ab8989ebad62534dad9ad4dad66f848384049 | e2b2d5a7c00003952f39dfbeb177abc2afcc3443 | refs/heads/master | 2020-09-09T14:41:01.221868 | 2019-11-13T14:13:11 | 2019-11-13T14:13:11 | 221,471,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | name="Fahim"
start=name.startswith("F")
end=name.endswith("m")
print(start,end)
#conditonal example
name="Mr.Fahim"
if name.startswith("Mr."):
print("Dear sir")
| [
"darkcoders321@gmail.com"
] | darkcoders321@gmail.com |
71d2a13f8caafddcea1f116f0df66bf877ac26e0 | 49eaaf069da00421918dd15f66bc5a02cbc2245a | /csp_bluetooth_demo/src/csp_bluetooth_demo_node.py | abf53f59d88a50532538a4ded29cb67a071fdc6b | [] | no_license | YuryXW/csp_bluetooth_practice | 471a73eded83c93887f9776404a9a90b9949c3d4 | afbef42b66353fa6461d19b4dcea994008112042 | refs/heads/master | 2021-08-19T08:01:13.278075 | 2017-11-25T10:58:48 | 2017-11-25T10:58:48 | 110,141,487 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | #!/usr/bin/env python
## @package docstring
# This is the typical ROS template node for python.
#
import rospy
import sys
import signal
from std_msgs.msg import String
from duckietown_msgs.msg import Twist2DStamped
from duckietown_msgs.msg import BoolStamped
from std_msgs.msg import Header
#--------------------------------- Constants ----------------------------------#
TAG = "CSP Bluetooth Demo:" ## Node verbosity tag
node_name = "bluetooth_demo_node" ## ROS node name
#------------------------------------------------------------------------------#
## Application class
#
class Application:
# Current speed and omega
v = 0.0
omega = 0.0
v_const = 0
## Init function
# @param self The object pointer.
def __init__(self):
# Assigning the SIGINT handler
signal.signal(signal.SIGINT, self.sigint_handler)
# Starting the node
rospy.init_node(node_name, anonymous=True)
# Subscribing to bluetooth_bridge output
rospy.Subscriber("/bluetooth/received", String, self.message_handler)
rospy.Subscriber("stop_line_filter_node/at_stop_line", BoolStamped, self.stop_line_handler)
# Publishers
self.cmd_pub = rospy.Publisher("joy_mapper_node/car_cmd", Twist2DStamped, queue_size = 10)
self.bt_pub = rospy.Publisher("/bluetooth/send", String, queue_size = 10)
# Run the node in eternal loop
rospy.spin()
## SIGINT Signal handler
# @param self The object pointer.
# @param signal The signal itself.
# @param frame The timeframe signal occured.
def sigint_handler(self, signal, frame):
print ""
print TAG,"Interrupt!"
print TAG,"Terminated"
sys.exit(0)
## Handling messages obtained from bluetooth_bridge
def message_handler(self, message):
s = message.data
msgs = s.split(";")
for msg in msgs:
# Ordering robot to GO - move forward at constant speed
if (msg=="GO"):
print msg
self.v_const = 0.4
# Ordering robot to stop
elif (msg=="STOP"):
print msg
self.v_const = 0.0
self.v = 0.0
else:
# Controlling robot with Roll and Pitch values from accelerometer
c_msg = msg.split(":")
# Try to invert these values
if c_msg[0]=="Roll": # Roll controls omega
self.omega = (float(c_msg[1])/90.0)*8.0
if c_msg[0]=="Pitch": # Pitch controls velocity
self.v = (float(c_msg[1])/90.0)
# Let's send a message now via bluetooth_bridge
h = Header()
h.stamp = rospy.Time.now()
topic_message = Twist2DStamped()
topic_message.v = self.v+self.v_const #Velocity is a sum of constant speed (in case of GO)
#and a variable speed from accelerometer.
topic_message.omega = self.omega
if topic_message.v > 1.0: #Do not let the velocity exceed -1.0..1.0 range.
topic_message.v = 1.0
if topic_message.v < -1.0:
topic_message.v = -1.0
self.cmd_pub.publish(topic_message) #Publish the topic to bluetooth_bridge
# Handling the Stop Line detection
def stop_line_handler(self, data):
print "STOP LINE DETECTED!"
self.bt_pub.publish("VIBRATE")
#------------------------------------- Main -----------------------------------#
if __name__ == '__main__':
print TAG,"Started"
app = Application()
print TAG,"Terminated"
| [
"SoulCrash@yandex.ru"
] | SoulCrash@yandex.ru |
5e73ef0d3118c4e024fe986a11cdce3910655b65 | 01b04d980b2746b4d4db1c2be1a263f77e2a7596 | /liangsongyou.blog/blog/views.py | 95f46d736ea14fb65f41de2b0e1d859dea64a6e2 | [] | no_license | liangsongyou/quarkblob | e9763efefe91f30b6da278ca6787564770cef4ec | 5d926ab40881a5f499734bfcbcb083d8bbb5e03e | refs/heads/master | 2022-11-26T17:30:47.276314 | 2018-11-28T09:47:54 | 2018-11-28T09:47:54 | 155,494,671 | 0 | 0 | null | 2022-11-22T03:07:32 | 2018-10-31T03:42:41 | Python | UTF-8 | Python | false | false | 1,505 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import permission_required
from blog.models import Post
from blog.forms import PostForm
def post(request, slug=None):
item = get_object_or_404(Post, slug=slug)
return render(request, 'blog/post.html', {'item':item,'title':item,})
@permission_required('blog.add_post')
def add_post(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
item = form.save(commit=False)
item.author = request.user
item.save()
form.save_m2m()
return redirect(item.get_absolute_url())
else:
form = PostForm()
return render(request, 'blog/post_form.html', {'form':form,
'title':'Add Post',})
@permission_required('blog.edit_post')
def edit_post(request, pk=None):
item = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=item)
if form.is_valid():
form.save()
return redirect(item.get_absolute_url())
else:
form = PostForm(instance=item)
title = 'Eidt: %s' % item
return render(request, 'blog/post_form.html', {'form':form,
'item':item,
'title':title,})
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
80a6deb154d10fde1361416d0b9801cad698cb91 | 13de9dc09d02f8287f266ae526ee62538696e646 | /src/billing/admin.py | 75f9e57c571a045f67d2a9530e37161751f4ef37 | [
"MIT"
] | permissive | b4isty/ecommerce_udemy | 187dd981913549cfc0766dbb21e73f22802824b0 | 1c631613afe10a9f51f61cedf509fc79bface2ed | refs/heads/master | 2020-04-07T07:50:35.778308 | 2018-12-19T15:03:41 | 2018-12-19T15:03:41 | 158,191,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from django.contrib import admin
from .models import BillingProfile
admin.site.register([BillingProfile])
| [
"baishakhi@digitalaptech.com"
] | baishakhi@digitalaptech.com |
da026e790a650039b294a25fa6452544897d4cd1 | b00ede2347d9364c2ab8dacc18aae1889db0dfa6 | /webapp/server/__init__.py | 284df315d0d786c32643f6d2ab520c700c337368 | [] | no_license | JZYoshi/AFDA | e81c0f5e10a1ff6c22c102ae050ff4d97610d2c2 | 7d19bdbcf32bdcdf795fa29beab1215796b12ff8 | refs/heads/main | 2023-03-28T12:45:37.618189 | 2021-03-18T15:09:23 | 2021-03-18T15:09:23 | 303,192,043 | 0 | 1 | null | 2021-03-18T15:09:24 | 2020-10-11T19:01:03 | Jupyter Notebook | UTF-8 | Python | false | false | 5,072 | py | from flask import Flask, render_template, current_app, request
import os
from flask.json import jsonify
import numpy as np
from scipy.stats import gaussian_kde, entropy
from . import db
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, template_folder="../vue/client/dist", static_folder="../vue/client/dist/static")
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, '../../data/descriptors.db'),
)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/generalinfo', methods=['GET'])
def general_info():
df = db.db_to_pandas(current_app.config['DATABASE'])
nb_flights = df[['airline','flight_id']].groupby('airline').count().rename(columns={'flight_id':'number of flights'})
nb_flights.sort_values('number of flights', ascending=False, inplace=True)
fig_data = nb_flights.to_dict('list')
nb_airlines = len(nb_flights)
tt_flights = int(nb_flights['number of flights'].sum())
return jsonify({
'nb_airlines': nb_airlines,
'tt_flights': tt_flights,
'fig_data': fig_data
})
@app.route('/flightsnumbers', methods=['GET'])
def get_flights_numbers():
df = db.db_to_pandas(current_app.config['DATABASE'])
nb_flights = df[['airline','flight_id']].groupby('airline').count().rename(columns={'flight_id':'number of flights'})
nb_flights.sort_values(by='number of flights', ascending=False, inplace=True)
nb_flights.info()
nb_flight_dict = { 'airlines': nb_flights.index.tolist(), 'flight_number': nb_flights['number of flights'].tolist() }
return jsonify(nb_flight_dict)
@app.route('/airlinestat', methods=['POST'])
def get_airline():
post_data = request.get_json()
airline = post_data.get('airline')
df = db.db_to_pandas(current_app.config['DATABASE'])
my_airline = df[df['airline']==airline]
my_airline.drop(columns=['flight_id','icao','airline', 'icao_airline'], inplace=True)
descriptors = my_airline.columns
figlist=[]
for descriptor in descriptors:
figlist.append({ 'label': descriptor, 'values': my_airline[descriptor].dropna().tolist() })
return jsonify(figlist)
@app.route('/allairlines', methods=['GET', 'POST'])
def get_all_airlines():
df = db.db_to_pandas(current_app.config['DATABASE'])
airlines = []
if (request.method == 'GET'):
airlines = list(set(df[df['airline'].notnull()]['airline'].tolist()))
else:
threshold = request.get_json().get('threshold')
s = df['airline'].value_counts()
s = s[s >= threshold]
airlines = s.index.tolist()
return jsonify({
'airlines': airlines
})
@app.route('/descriptors', methods=['GET'])
def get_all_descriptors():
df = db.db_to_pandas(current_app.config['DATABASE'])
descriptors = list(df.drop(columns=['flight_id','icao','airline', 'icao_airline']).columns)
return jsonify({
'descriptors': descriptors
})
@app.route('/compareairlines', methods=['POST'])
def get_airlines_compare_res():
df = db.db_to_pandas(current_app.config['DATABASE'])
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(inplace=True)
post_data = request.get_json()
airlines = post_data.get('airlines')
descriptors = post_data.get('descriptors')
figlist = []
for descriptor in descriptors:
x_min = min(df[descriptor])
x_max = max(df[descriptor])
x = np.linspace(x_min,x_max, 100)
airline_stat_list = []
kde_values_list = []
for airline in airlines:
values = df[df['airline']==airline][descriptor].dropna()
kde = gaussian_kde(values)
kde_values = kde(x)
airline_stat_list.append({
'airline': airline,
'descriptor_values': values.tolist(),
'kde_values': list(kde_values),
'x_kde': list(x)
})
kde_values_list.append(kde_values)
step = x[1] - x[0]
n = len(airlines)
mat_dist = np.zeros((n,n))
for i in range(n):
for j in range(n):
mat_dist[i, j] = np.sqrt(step*sum((kde_values_list[i] - kde_values_list[j])**2))
mat_dist = np.where(~np.isfinite(mat_dist), None, mat_dist)
figlist.append({
'descriptor': descriptor,
'airlines': airline_stat_list,
'kde_entropy': mat_dist.tolist()
})
return jsonify(figlist)
return app | [
"jzydsrs617@hotmail.com"
] | jzydsrs617@hotmail.com |
4555c577dc463c13fc775ebb44fbd7a8dfc4a298 | 08e847cc3f05fc5c29193f2da2f53af1cb97ad12 | /Python基础课件/代码/第四天的代码/11-函数的介绍.py | 3fef965691c9e92c8631c16d77061b66987dbdb6 | [] | no_license | MayWorldPeace/QTP | d6605191392e2075053037cc8038de8d43a85680 | 9b0b2d689ffb7ab608f3956642da22d9b6f0a132 | refs/heads/master | 2020-06-06T10:28:16.173186 | 2019-07-04T13:24:10 | 2019-07-04T13:24:10 | 192,714,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py |
# python中函数
# 函数的定义
# 打印一个佛祖镇楼 -> 一个功能点的解释
def print_info():
print("测试")
print(" _ooOoo_ ")
print(" o8888888o ")
print(" 88 . 88 ")
print(" (| -_- |) ")
print(" O\\ = /O ")
print(" ____/`---'\\____ ")
print(" . ' \\| |// `. ")
print(" / \\||| : |||// \\ ")
print(" / _||||| -:- |||||- \\ ")
print(" | | \\\\\\ - /// | | ")
print(" | \\_| ''\\---/'' | | ")
print(" \\ .-\\__ `-` ___/-. / ")
print(" ___`. .' /--.--\\ `. . __ ")
print(" ."" '< `.___\\_<|>_/___.' >'"". ")
print(" | | : `- \\`.;`\\ _ /`;.`/ - ` : | | ")
print(" \\ \\ `-. \\_ __\\ /__ _/ .-` / / ")
print(" ======`-.____`-.___\\_____/___.-`____.-'====== ")
print(" `=---=' ")
print(" ")
print(" ............................................. ")
print(" 佛祖镇楼 BUG辟易 ")
print(" 佛曰: ")
print(" 写字楼里写字间,写字间里程序员; ")
print(" 程序人员写程序,又拿程序换酒钱。 ")
print(" 酒醒只在网上坐,酒醉还来网下眠; ")
print(" 酒醉酒醒日复日,网上网下年复年。 ")
print(" 但愿老死电脑间,不愿鞠躬老板前; ")
print(" 奔驰宝马贵者趣,公交自行程序员。 ")
print(" 别人笑我忒疯癫,我笑自己命太贱; ")
print(" 不见满街漂亮妹,哪个归得程序员?")
a = 10
if a == 10:
# 占位 防止语法错误
print_info()
# 函数 -> python
num = 1
if num == 1:
print_info()
| [
"liangdoudou@example.com"
] | liangdoudou@example.com |
09ee19f59fcbf8de31c5285d7d5cfcf228701935 | de33ba7be349eed5e2a1fc3f2bd9fce5bfdb9f13 | /phenocube/lib/python3.8/site-packages/setuptools/__init__.py | 25b4679b185857fa015cb43acc5f8b34a0faf3b3 | [
"MIT"
] | permissive | SteveMHill/phenocube-py | 9bebf239e24af3f97e59b080560228605e6611c5 | cb262aef1c0925efd2e955170bacd2989da03769 | refs/heads/main | 2023-02-24T03:35:11.461869 | 2020-12-22T12:15:22 | 2020-12-22T12:15:22 | 334,703,261 | 0 | 0 | MIT | 2021-01-31T16:37:21 | 2021-01-31T16:36:47 | null | UTF-8 | Python | false | false | 7,430 | py | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
"setup",
"Distribution",
"Command",
"Extension",
"Require",
"SetuptoolsDeprecationWarning",
"find_packages",
]
if PY3:
__all__.append("find_namespace_packages")
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ["lib2to3.fixes"]
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where=".", exclude=(), include=("*",)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(
cls._find_packages_iter(
convert_path(where),
cls._build_filter("ez_setup", "*__pycache__", *exclude),
cls._build_filter(*include),
)
)
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, ".")
# Skip directory trees that are not valid packages
if "." in dir or not cls._looks_like_package(full_path):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, "__init__.py"))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(
dict(
(k, v)
for k, v in attrs.items()
if k in ("dependency_links", "setup_requires")
)
)
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError(
"'%s' must be a %s (got `%s`)" % (option, what, val)
)
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r",\s*|\s+", val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)" % (option, val)
)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
# Apply monkey patches
monkey.patch_all()
| [
"steven.smhill@gmail.com"
] | steven.smhill@gmail.com |
203d9a37000a582dcdc625710f4e7bbb0c159639 | 78f65f6c8be381773cc847c93da4b28eb4eeefae | /fastmri/models/__init__.py | d1d79c2627e2cc480fe53930fa89a0e984117d8d | [
"MIT"
] | permissive | soumickmj/fastMRI | af7bc3c654eda93905e19c24ab40dd255eb6c128 | 2056879fd9444c14599447af38ba0507f1222901 | refs/heads/master | 2022-11-29T22:32:26.152484 | 2022-03-09T20:50:02 | 2022-03-09T20:50:02 | 214,513,364 | 1 | 0 | MIT | 2022-11-08T08:29:57 | 2019-10-11T19:22:54 | Python | UTF-8 | Python | false | false | 270 | py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .unet import Unet
from .varnet import NormUnet, SensitivityModel, VarNet, VarNetBlock
| [
"matt.muckley@gmail.com"
] | matt.muckley@gmail.com |
046c7fadc7a3e3cdc813caf214a79d19c739ddf2 | 1a66e07fbdd333e9feee3ae06f90e11cd5c3c79e | /qiskit/providers/honeywell/honeywelljob.py | 63ad4acef11e3f90f8c5ca5bcf2bbd37118c539e | [
"Apache-2.0"
] | permissive | stjordanis/qiskit-honeywell-provider | d385588ca2452ba0e10ddb6f68b03ca41064ed98 | d9c15a0edfb95ab1d715e98711748d2008a52040 | refs/heads/master | 2023-05-05T20:13:19.586574 | 2021-05-21T22:02:11 | 2021-05-21T22:02:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,857 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# Copyright 2019-2020 Honeywell, Intl. (www.honeywell.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=arguments-differ
"""HoneywellJob module
This module is used for creating asynchronous job objects for Honeywell.
"""
import asyncio
import json
import logging
from collections import Counter
from datetime import datetime, timezone
from time import sleep
import nest_asyncio
import websockets
from qiskit.assembler.disassemble import disassemble
from qiskit.providers import BaseJob, JobError
from qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus
from qiskit.qobj import validate_qobj_against_schema
from qiskit.result import Result
from .apiconstants import ApiJobStatus
from .api import HoneywellClient
logger = logging.getLogger(__name__)
# Because Qiskit is often used with the Jupyter notebook who runs its own asyncio event loop
# (via Tornado), we must be able to apply our own event loop. This is something that is done
# in the IBMQ provider as well
nest_asyncio.apply()
class HoneywellJob(BaseJob):
"""Representation of a job that will be execute on a Honeywell backend.
Represent the jobs that will be executed on Honeywell devices. Jobs are
intended to be created calling ``run()`` on a particular backend.
Currently jobs that are created using a qobj can only have one experiment
in the qobj. If more that one experiment exists in the qobj only the first
experiment will be run and the rest will be ignored.
Creating a ``Job`` instance does not imply running it. You need to do it in
separate steps::
job = HoneywellJob(...)
job.submit()
An error while submitting a job will cause the next call to ``status()`` to
raise. If submitting the job successes, you can inspect the job's status by
using ``status()``. Status can be one of ``JobStatus`` members::
from qiskit.backends.jobstatus import JobStatus
job = HoneywellJob(...)
job.submit()
try:
job_status = job.status() # It will query the backend API.
if job_status is JobStatus.RUNNING:
print('The job is still running')
except JobError as ex:
print("Something wrong happened!: {}".format(ex))
A call to ``status()`` can raise if something happens at the API level that
prevents Qiskit from determining the status of the job. An example of this
is a temporary connection lose or a network failure.
``Job`` instances also have `id()` and ``result()`` methods which will
block::
job = HoneywellJob(...)
job.submit()
try:
job_id = job.id()
print('The job {} was successfully submitted'.format(job_id))
job_result = job.result() # It will block until finishing.
print('The job finished with result {}'.format(job_result))
except JobError as ex:
print("Something wrong happened!: {}".format(ex))
Both methods can raise if something at the API level happens that prevent
Qiskit from determining the status of the job.
Note:
When querying the API for getting the status, two kinds of errors are
possible. The most severe is the one preventing Qiskit from getting a
response from the backend. This can be caused by a network failure or a
temporary system break. In these cases, calling ``status()`` will raise.
If Qiskit successfully retrieves the status of a job, it could be it
finished with errors. In that case, ``status()`` will simply return
``JobStatus.ERROR`` and you can call ``error_message()`` to get more
info.
"""
def __init__(self, backend, job_id, api=None, qobj=None):
"""HoneywellJob init function.
We can instantiate jobs from two sources: A QObj, and an already submitted job returned by
the API servers.
Args:
backend (BaseBackend): The backend instance used to run this job.
job_id (str or None): The job ID of an already submitted job.
Pass `None` if you are creating a new job.
api (HoneywellClient): Honeywell api client.
qobj (Qobj): The Quantum Object. See notes below
Notes:
It is mandatory to pass either ``qobj`` or ``job_id``. Passing a ``qobj``
will ignore ``job_id`` and will create an instance to be submitted to the
API server for job creation. Passing only a `job_id` will create an instance
representing an already-created job retrieved from the API server.
"""
super().__init__(backend, job_id)
if api:
self._api = api
else:
self._api = HoneywellClient(backend.provider().credentials)
print(backend.provider().credentials.api_url)
self._creation_date = datetime.utcnow().replace(tzinfo=timezone.utc).isoformat()
# Properties used for caching.
self._cancelled = False
self._api_error_msg = None
self._result = None
self._job_ids = []
self._experiment_results = []
self._qobj_payload = {}
if qobj:
validate_qobj_against_schema(qobj)
self._qobj_payload = qobj.to_dict()
# Extract individual experiments
# if we want user qobj headers, the third argument contains it
self._experiments, self._qobj_config, _ = disassemble(qobj)
self._status = JobStatus.INITIALIZING
else:
self._status = JobStatus.INITIALIZING
self._job_ids.append(job_id)
def submit(self):
"""Submit the job to the backend."""
backend_name = self.backend().name()
for exp in self._experiments:
submit_info = self._api.job_submit(backend_name, self._qobj_config, exp.qasm())
# Error in job after submission:
# Transition to the `ERROR` final state.
if 'error' in submit_info:
self._status = JobStatus.ERROR
self._api_error_msg = str(submit_info['error'])
# Don't continue
return
self._job_ids.append(submit_info['job'])
# Take the last submitted job's info
self._creation_date = submit_info.get('submit-date')
self._status = submit_info.get('status')
self._job_id = submit_info.get('job')
def result(self, timeout=300):
"""Return the result of the job.
Args:
timeout (float): number of seconds to wait for job
Returns:
qiskit.Result: Result object
Raises:
JobError: if attempted to recover a result on a failed job.
Notes:
Currently when calling get_counts() on a result returned by a Honeywell
backend, since Honeywell backends currently support only running one
experiment per job, do not supply an argument to the get_counts() function.
Doing so may raise an exception.
"""
if self._result:
return self._result
# Wait for results sequentially
for job_id in self._job_ids:
self._experiment_results.append(
asyncio.get_event_loop().run_until_complete(self._get_status(job_id, timeout))
)
# Process results
self._result = self._process_results()
if not (self._status is JobStatus.DONE or self._status is JobStatus.CANCELLED):
raise JobError('Invalid job state. The job should be DONE or CANCELLED but '
'it is {}'.format(str(self._status)))
if not self._result:
raise JobError('Server did not return result')
return self._result
def cancel(self):
"""Attempt to cancel job."""
pass
async def _get_status(self, job_id, timeout=300):
"""Query the API to update the status.
Returns:
qiskit.providers.JobStatus: The api response including the job status
Raises:
JobError: if there was an exception in the future being executed
or the server sent an unknown answer.
"""
if job_id is None or self._status in JOB_FINAL_STATES:
return self._status
try:
api_response = self._api.job_status(job_id)
if 'websocket' in api_response:
task_token = api_response['websocket']['task_token']
execution_arn = api_response['websocket']['executionArn']
credentials = self.backend().provider().credentials
websocket_uri = credentials.url.replace('https://', 'wss://ws.')
async with websockets.connect(
websocket_uri, extra_headers={
'Authorization': credentials.access_token}) as websocket:
body = {
"action": "OpenConnection",
"task_token": task_token,
"executionArn": execution_arn
}
await websocket.send(json.dumps(body))
api_response = await asyncio.wait_for(websocket.recv(), timeout=timeout)
api_response = json.loads(api_response)
else:
logger.warning('Websockets via proxy not supported. Falling-back to polling.')
residual_delay = timeout/1000 # convert us -> s
request_delay = min(1.0, residual_delay)
while api_response['status'] not in ['failed', 'completed', 'canceled']:
sleep(request_delay)
api_response = self._api.job_status(job_id)
residual_delay = residual_delay - request_delay
if residual_delay <= 0:
# break if we have exceeded timeout
break
# Max-out at 10 second delay
request_delay = min(min(request_delay*1.5, 10), residual_delay)
except Exception as err:
raise JobError(str(err))
return api_response
def status(self, timeout=300):
"""Query the API to update the status.
Returns:
qiskit.providers.JobStatus: The status of the job, once updated.
Raises:
JobError: if there was an exception in the future being executed
or the server sent an unknown answer.
"""
# Wait for results sequentially
for job_id in self._job_ids:
self._experiment_results.append(
asyncio.get_event_loop().run_until_complete(self._get_status(job_id, timeout))
)
# Process results
self._result = self._process_results()
return self._status
def error_message(self):
"""Provide details about the reason of failure.
Returns:
str: An error report if the job errored or ``None`` otherwise.
"""
for job_id in self._job_ids:
if self.status(job_id) is not JobStatus.ERROR:
return None
if not self._api_error_msg:
self._api_error_msg = 'An unknown error occurred.'
return self._api_error_msg
def _process_results(self):
"""Convert Honeywell job result to qiskit.Result"""
results = []
self._status = JobStatus.DONE
for i, res_resp in enumerate(self._experiment_results):
status = res_resp.get('status', 'failed')
if status == 'failed':
self._status = JobStatus.ERROR
res = res_resp['results']
counts = dict(Counter(hex(int("".join(r), 2)) for r in [*zip(*list(res.values()))]))
experiment_result = {
'shots': self._qobj_payload.get('config', {}).get('shots', 1),
'success': ApiJobStatus(status) is ApiJobStatus.COMPLETED,
'data': {'counts': counts},
'header': self._qobj_payload[
'experiments'][i]['header'] if self._qobj_payload else {},
'job_id': self._job_ids[i]
}
results.append(experiment_result)
result = {
'success': self._status is JobStatus.DONE,
'job_id': self._job_id,
'results': results,
'backend_name': self._backend.name(),
'backend_version': self._backend.status().backend_version,
'qobj_id': self._job_id
}
return Result.from_dict(result)
def creation_date(self):
"""Return creation date."""
return self._creation_date
def job_ids(self):
""" Return all the job_ids associated with this experiment """
return self._job_ids
| [
"noreply@github.com"
] | noreply@github.com |
bc2488d589a8001efc0e25e3dab4c87c1764de6c | 7859860eeff25f715c970b8fb5dfd2751f3c4264 | /time_problem_5.py | ed7c6c129fc6e79d14b64a2b9f4729b52d6819ee | [] | no_license | dakuapraveen/python_problem_adhoc | d3c3a82b0b7b07f8516fa8ff645772acc5f24bf2 | d79fdebda11908b9a474a912e466719cd0590746 | refs/heads/master | 2020-06-02T17:51:47.462892 | 2019-06-17T06:08:39 | 2019-06-17T06:08:39 | 191,255,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import datetime
name=input("Enter your name:")
current_time=datetime.datetime.now()
if 0 <= current_time.hour < 12:
print("good morning",name)
elif 12 <= current_time.hour < 17:
print("goodafter noon",name)
elif 17 <= current_time.hour < 20:
print("good evening",name)
elif 20 <= current_time.hour <= 23:
print("good night",name) | [
"noreply@github.com"
] | noreply@github.com |
0dcf83210fc7b8fd635949e29c1c1f76450b0d2b | 98161af1d00f76fee3c6634ecd82badad10a3287 | /PG/pg_mcts.py | fb91bed10d53e6fc23794cf6157e135a158e37de | [] | no_license | joeleung00/DRL-for-Crack-Attack | 6e18e6c7a91fc66f485a9eb0d628e5d6daf09f3f | 1e5aabdc4c48dda0b7a05c4af98206c2608ca9b2 | refs/heads/master | 2022-09-16T21:14:37.296442 | 2020-05-28T07:11:32 | 2020-05-28T07:11:32 | 216,518,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,599 | py | import random
import sys
import math
from copy import copy, deepcopy
import sys
sys.path.insert(1, '../game_simulation')
sys.path.insert(1, '../cnn')
from GameBoard import GameBoard
from GameCLI import Game
from parameters import Parameter
from policy_net import Net
import pickle
from collections import deque
import numpy as np
import torch
import multiprocessing
network_path = "./network/"
train_data_path = './train_data/'
NUM_OF_COLOR = Parameter.NUM_OF_COLOR
ROW_DIM = Parameter.ROW_DIM
COLUMN_DIM = Parameter.COLUMN_DIM
C = 500
MAX_ROUND_NUMBER = Parameter.MAX_ROUND_NUMBER
TAU = 0.5 ## cannot be zero
MAX_ROLLOUT_ROUND_NUMBER = 3
GAMMA_RATE = 0.8
## train an episode per iteration
TRAIN_ITERATION = 11
EPISODE_PER_ITERATION = 150
SAVE_MODEL_PERIOD = 10
DATA_SIZE_PER_TRAIN = 50
NUM_OF_PROCESSES = Parameter.NUM_OF_PROCESSES
COMPUTATION_BUDGET = 150
replay_memory = deque(maxlen = 100000)
net = Net()
class Node:
def __init__(self, state, parent = None):
self.parent = parent
self.child = {}
self.visited_times = 0
## Q-value is the expected reward of the next actions, not counting reward come from this state
self.quality_value = 0.0
self.state = state
self.policy_probi = {}
def is_all_expanded(self):
return len(self.child) == self.state.num_available_choices
def add_child(self, node, child_id):
self.child[child_id] = node
class State:
def __init__(self, board, round_index, cumulative_choices, num_available_choices = 0, action_reward = 0):
self.current_board = deepcopy(board)
self.current_round_index = round_index
self.cumulative_choices = deepcopy(cumulative_choices)
self.num_available_choices = num_available_choices
self.action_reward = action_reward
def is_terminal(self, rollout = False):
## Add one more case - check is there any possible move?
if not rollout:
max_round = MAX_ROUND_NUMBER
else:
max_round = MAX_ROLLOUT_ROUND_NUMBER
if self.current_round_index == max_round:
return True
elif self.num_available_choices == 0:
return True
else:
return False
def compute_reward(self, simulation_board):
return simulation_board.score
def get_next_state_with_random_choice(self, simulation_board, exclude=None):
## AVAILABLE_CHOICES is a double integer tupple list
available_choices = simulation_board.get_available_choices()
random_choice = random.choice(available_choices)
child_id = flatten_action(random_choice)
if exclude != None:
while flatten_action(random_choice) in exclude:
random_choice = random.choice(available_choices)
child_id = flatten_action(random_choice)
## going to create new state
action_reward = simulation_board.proceed_next_state(random_choice[0], random_choice[1])
available_choices = simulation_board.get_available_choices()
next_state = State(simulation_board.board, self.current_round_index + 1,
self.cumulative_choices + [random_choice], len(available_choices), action_reward)
return next_state, child_id
def get_next_best_state(self, simulation_board):
available_choices = simulation_board.get_available_choices()
best_choice = available_choices[0]
best_score = 0
for choice in available_choices:
tmp_board = GameBoard(simulation_board.board, simulation = True)
score = tmp_board.proceed_next_state(choice[0], choice[1])
if score > best_score:
best_score = score
best_choice = choice
action_reward = simulation_board.proceed_next_state(best_choice[0], best_choice[1])
available_choices = simulation_board.get_available_choices()
next_state = State(simulation_board.board, self.current_round_index + 1,
self.cumulative_choices + [best_choice], len(available_choices), action_reward)
return next_state
def get_choice(self):
return self.cumulative_choices[-1]
def net_index2action_index(index):
offset = index // (COLUMN_DIM - 1)
return index + offset
def action_index2net_index(index):
offset = index // COLUMN_DIM
return index - offset
def pre_process_features(raw_board):
onehot = np.zeros((NUM_OF_COLOR, ROW_DIM, COLUMN_DIM))
for row in range(ROW_DIM):
for col in range(COLUMN_DIM):
color = raw_board[row][col]
onehot[int(color), row, col] = 1
return onehot
def flatten_action(action):
return action[0] * COLUMN_DIM + action[1]
def deflatten_action(action):
return action // COLUMN_DIM, action % COLUMN_DIM
def pre_process_features(raw_board):
onehot = np.zeros((NUM_OF_COLOR, ROW_DIM, COLUMN_DIM))
for row in range(ROW_DIM):
for col in range(COLUMN_DIM):
color = raw_board[row][col]
onehot[int(color), row, col] = 1
return onehot
def get_action(current_state):
onehot_current_state = pre_process_features(current_state)
onehot_current_state = torch.from_numpy(onehot_current_state).type(torch.float32)
with torch.no_grad():
probi = net(onehot_current_state.unsqueeze(0)) ## output is a qvalue tensor for all actionss(size of 72)
value, index = torch.max(probi[0], 0)
#print(value)
return net_index2action_index(index.item())
def tree_policy(node):
# Check if the current node is the leaf node
while node.state.is_terminal() == False:
if node.is_all_expanded():
node = best_child(node, True)
else:
# Return the new sub node
sub_node = expand(node)
#print(node.state.current_board)
return sub_node
# Return the leaf node
return node
def default_policy(node):
# Get the state of the game
current_state = node.state
## pre_process_features:
onehot_board = pre_process_features(current_state.current_board)
onehot_current_state = torch.from_numpy(onehot_board).type(torch.float32)
with torch.no_grad():
probi = net(onehot_current_state.unsqueeze(0))
probi = probi[0]
for i, value in enumerate(probi):
action_index = net_index2action_index(i)
node.policy_probi[action_index] = value.item()
## return a rollout value
simulation_board = GameBoard(current_state.current_board)
for i in range(MAX_ROLLOUT_ROUND_NUMBER):
##pick an action
choice = get_action(simulation_board.board)
choice2d = deflatten_action(choice)
simulation_board.proceed_next_state(choice2d[0], choice2d[1])
return simulation_board.score
def expand(node):
child_node_state_set = set()
for key in node.child:
child_node_state_set.add(key)
simulation_board = GameBoard(node.state.current_board)
new_state, child_id = node.state.get_next_state_with_random_choice(simulation_board, exclude=child_node_state_set)
# Check until get the new state which has the different action from others
# while new_state in tried_sub_node_states:
# new_state = node.state.get_next_state_with_random_choice()
sub_node = Node(parent=node, state=new_state)
node.add_child(sub_node, child_id)
return sub_node
def best_child(node, is_exploration):
# TODO: Use the min float value
best_score = -sys.maxsize
best_sub_node = None
probi = np.zeros(len(node.child))
child_list_index = np.zeros(len(node.child))
sum = 0
# Travel all sub nodes to find the best one
for i, key in enumerate(node.child):
sub_node = node.child[key]
child_list_index[i] = key
# Ignore exploration for sinference
if is_exploration:
# old: UCB = quality / times + C * sqrt(2 * ln(total_times) / times)
# C = 1 / math.sqrt(2.0)
# left = sub_node.quality_value / sub_node.visited_times
# right = 2.0 * math.log(node.visited_times) / sub_node.visited_times
# score = left + C * math.sqrt(right)
## new: a = argmax(a) quality + C * P(a|s) / (1 + N(a|s))
left = sub_node.quality_value
right = C * node.policy_probi[key] / (1 + sub_node.visited_times)
score = left + right
if score > best_score:
best_sub_node = sub_node
best_score = score
else:
#score = sub_node.visited_times ** (1/TAU)
probi[i] = sub_node.visited_times ** (1/TAU)
sum += probi[i]
# if not is_exploration:
# probi /= sum
# cu_sum = np.cumsum(probi)
# rand = random.random()
# for i, value in enumerate(cu_sum):
# if rand <= value:
# key = child_list_index[i]
# best_sub_node = node.child[key]
# best_i = i
# break
# if not is_exploration:
# best_sub_node = get_best_child(node)
if is_exploration:
#print(best_score)
# if right > 1:
# print("left: " + str(left))
#
# print("right: " + str(right))
return best_sub_node
else:
probi /= sum
best_i = np.random.choice(range(len(probi)), p=probi)
key = child_list_index[best_i]
best_sub_node = node.child[key]
#print(probi[best_i])
return best_sub_node
def backup(node, reward):
node.quality_value = reward + node.state.action_reward
node.visited_times = 1
## this reward in the respective of parent node
reward = GAMMA_RATE * node.quality_value
node = node.parent
# Update util the root node
while node != None:
# Update the visit times
node.visited_times += 1
# Update the quality value
node.quality_value += (1/node.visited_times) * (reward + node.state.action_reward - node.quality_value)
## this reward in the respective of parent node
reward = GAMMA_RATE * reward
# Change the node to the parent node
node = node.parent
# def backup(node, reward):
#
# node.quality_value = reward + node.state.action_reward
# node.visited_times = 1
# reward = node.quality_value
# node = node.parent
#
# # Update util the root node
# while node != None:
# # Update the visit times
# node.visited_times += 1
#
# # Update the quality value
# new_quality = reward + node.state.action_reward
# if new_quality > node.quality_value:
# node.quality_value = new_quality
#
# reward = GAMMA_RATE * node.quality_value
# # Change the node to the parent node
# node = node.parent
def monte_carlo_tree_search(node):
computation_budget = COMPUTATION_BUDGET
# Run as much as possible under the computation budget
for i in range(computation_budget):
# 1. Find the best node to expand
expand_node = tree_policy(node)
# 2. Random run to add node and get reward
reward = default_policy(expand_node)
# 3. Update all passing nodes with reward
backup(expand_node, reward)
# N. Get the best next node
best_next_node = best_child(node, False)
#print("my quality_value :" + str(node.quality_value))
return best_next_node
def get_best_child(node):
best_quality_value = -sys.maxsize
best_child = None
for key in node.child:
if node.child[key].quality_value > best_quality_value:
best_quality_value = node.child[key].quality_value
best_child = node.child[key]
return best_child
def load_net(number):
net.load_state_dict(torch.load(network_path + "network" + number + ".pth"))
def save_net(net, number):
net_name = "net" + str(number) + ".pth"
torch.save(net.state_dict(), network_path + net_name)
def save_train_data(train_data, number, name=None):
if name == None:
fullpathname = train_data_path + "data" + str(number)
else:
fullpathname = train_data_path + "data_" + name + "_" + str(number)
fd = open(fullpathname, 'wb')
pickle.dump(train_data, fd)
def load_train_data(number):
global replay_memory
fullpathname = train_data_path + "data" + str(number)
fd = open(fullpathname, 'rb')
replay_memory = pickle.load(fd)
def get_batch_from_memory():
## min_batch are all python data type (state, action, reward)
train_data = random.sample(replay_memory, DATA_SIZE_PER_TRAIN)
## they are batch states
states = np.zeros((DATA_SIZE_PER_TRAIN, ROW_DIM, COLUMN_DIM)).astype(int)
actions = []
rewards = []
for i, value in enumerate(train_data):
states[i] = value[0]
actions.append(value[1])
rewards.append(value[2])
## return data are all ten
return (states, actions, rewards)
def init_first_node(gameboard):
num_available_choices = len(gameboard.get_available_choices())
init_state = State(gameboard.board, 0, [], num_available_choices)
root_node = Node(state=init_state)
## init visited_times, quality_value, policy_probi
onehot_board = pre_process_features(gameboard.board)
onehot_current_state = torch.from_numpy(onehot_board).type(torch.float32)
with torch.no_grad():
probi = net(onehot_current_state.unsqueeze(0))
probi = probi[0]
for i, value in enumerate(probi):
action_index = net_index2action_index(i)
root_node.policy_probi[action_index] = value.item()
root_node.visited_times = 1
root_node.quality_value = 0
return root_node
def policy_iteration(start_iteration=0):
## list of [state, action, reward]
pool = multiprocessing.Pool(processes = NUM_OF_PROCESSES)
for i in range(start_iteration, TRAIN_ITERATION):
# for j in range(EPISODE_PER_ITERATION):
# train_data = run_episode()
train_data = pool.map(thread_thunk, range(NUM_OF_PROCESSES))
for value in train_data:
replay_memory.extend(value)
print("size of replay_memory: " + str(len(replay_memory)))
print("Finish " + str((i + 1) * EPISODE_PER_ITERATION) + " episode")
if len(replay_memory) >= DATA_SIZE_PER_TRAIN:
states, actions, rewards = get_batch_from_memory()
net.train(states, actions, rewards)
if i % SAVE_MODEL_PERIOD == 0 and i != 0:
save_net(net, i)
save_train_data(replay_memory, i)
def policy_iteration_get_data(name, start_iteration=0):
for i in range(start_iteration, TRAIN_ITERATION):
for j in range(EPISODE_PER_ITERATION):
train_data = run_episode()
replay_memory.extend(train_data)
print("Finish " + str((i + 1) * EPISODE_PER_ITERATION) + " episode")
if i % SAVE_MODEL_PERIOD == 0 and i != 0:
save_train_data(replay_memory, i, name)
def thread_thunk(useless):
train_data = []
for i in range(EPISODE_PER_ITERATION // NUM_OF_PROCESSES):
train_data.extend(run_episode())
return train_data
def run_episode():
train_data = []
game = Game(show = False)
current_node = init_first_node(game.gameboard)
while not game.termination():
current_node = monte_carlo_tree_search(current_node)
choice = current_node.state.get_choice()
flat_choice = flatten_action(choice)
net_index = action_index2net_index(flat_choice)
one_data = [deepcopy(game.gameboard.board), net_index, 0]
state, reward = game.input_pos(choice[0], choice[1])
one_data[2] = reward
train_data.append(one_data)
## correct the reward
for i in reversed(range(len(train_data) - 1)):
train_data[i][2] += GAMMA_RATE * train_data[i + 1][2]
return train_data
if __name__ == "__main__":
if len(sys.argv) != 2 and len(sys.argv) != 3:
print("enter your mode:")
print("new or continue(number) or getdata name(number)")
exit(0)
mode = sys.argv[1]
if mode != "new" and not mode.isdigit() and mode != "getdata":
print("Undefined mode!!")
exit(0)
if mode == "new":
#load_net("3")
policy_iteration()
elif mode == "getdata":
name = sys.argv[2]
if not name.isdigit():
print("Undefined name!!")
exit(0)
policy_iteration_get_data(name)
else:
load_train_data(int(mode))
load_net(int(mode))
policy_iteration(int(mode))
| [
"joeleung00@gmail.com"
] | joeleung00@gmail.com |
777934ec38c175bfce59c84dc3617ee2fb805a03 | 3a3eb6f6808070e5827acc3d4c382d59bcee8676 | /utils/position_provider.py | 12ae0128ca118014444a4ec85757e73c395011b3 | [] | no_license | tunaalaygut/the-slate-prototype | 5f0e582d06873cb8ba8f5daf49f7de1605d8b610 | 8e2b37c352479404302bfb2d12b0be4d1f29b8c4 | refs/heads/master | 2020-12-19T16:35:48.452788 | 2020-04-17T11:05:17 | 2020-04-17T11:05:17 | 235,790,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | #!/usr/bin/env python
"""
position_provider.py: Module that, provides positions to be used when
drawing rectangles in the form of a tuple (topLeft, bottomRight).
Positions are calculated based on the frame size. Difference between points is
based on the scale input.
"""
# Information
__author__ = "Tuna ALAYGUT"
__copyright__ = "Copyright 2020, The Slate Project"
__status__ = "Development"
__email__ = "alaygut@gmail.com"
def get_top_left(image, scale=0.25, padding=25):
"""
Args:
image: Image that the rectangle will be drawn on top of.
scale: Scale of the rectangle wrt the smaller side of the image.
padding: Padding to be left on the top and left of the image.
Returns:
Returns the suitable position of a rectangle that can be drawn on the
top left of the given image.
"""
(height, width, _) = image.shape
rectangle_size = get_rectangle_size(height, width, scale)
top_left = (padding, padding)
bottom_right = (padding + rectangle_size, padding + rectangle_size)
return top_left, bottom_right
def get_top_right(image, scale=0.25, padding=25):
"""
Args:
image: Image that the rectangle will be drawn on top of.
scale: Scale of the rectangle wrt the smaller side of the image.
padding: Padding to be left on the top and left of the image.
Returns:
Returns the suitable position of a rectangle that can be drawn on the
top right of the given image.
"""
(height, width, _) = image.shape
rectangle_size = get_rectangle_size(height, width, scale)
top_left = (width - (padding + rectangle_size), padding)
bottom_right = (width - padding, padding + rectangle_size)
return top_left, bottom_right
def get_center(image, scale=0.25):
"""
Args:
image: Image that the rectangle will be drawn on top of.
scale: Scale of the rectangle wrt the smaller side of the image.
Returns:
Returns the positions of the rectangle that can be drawn on the
center of an image.
"""
(height, width, _) = image.shape
rectangle_size = get_rectangle_size(height, width, scale)
center_x = int(width/2)
center_y = int(height/2)
top_left = (center_x - int(rectangle_size/2),
center_y - int(rectangle_size/2))
bottom_right = (center_x + int(rectangle_size/2),
center_y + int(rectangle_size/2))
return top_left, bottom_right
# Scaling will be applied based on the smaller side
def get_rectangle_size(height, width, scale):
if height < width: # Scaling is applied based on height.
return int(height * scale)
# Scaling is applied based on the width.
return int(width * scale)
def main():
print("Excuse me?")
if __name__ == "__main__":
main()
| [
"alaygut@gmail.com"
] | alaygut@gmail.com |
eb4207d2b4d3b04a745f8b479e228557e21a8021 | 706b3537037ef3a779357ee7c747bc336ed670d1 | /hier_labels/model/old/data_utils_wo_error_analysis.py | 0ac4e96e8b35ce1c77d31100ab5c152d70a8d657 | [
"Apache-2.0"
] | permissive | medical-projects/PICO | 22dea30ab790861d403aad6ab0472cc40577292a | 5b134e32c804a73d960e28528aebf52dd02b8430 | refs/heads/master | 2021-10-01T00:10:29.411607 | 2018-11-26T06:55:12 | 2018-11-26T06:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,707 | py | import numpy as np
import os
# shared global variables to be imported from model also
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "None"
# special error message
class MyIOError(Exception):
def __init__(self, filename):
# custom error message
message = """
ERROR: Unable to locate file {}.
FIX: Have you tried running python build_data.py first?
This will build vocab file from your train, test and dev sets and
trimm your word vectors.
""".format(filename)
super(MyIOError, self).__init__(message)
class CoNLLDataset(object):
"""Class that iterates over CoNLL Dataset
__iter__ method yields a tuple (words, tags)
words: list of raw words
tags: list of raw tags
If processing_word and processing_tag are not None,
optional preprocessing is appplied
Example:
```python
data = CoNLLDataset(filename)
for sentence, tags in data:
pass
```
"""
def __init__(self, filename, processing_word=None, processing_tag=None,
max_iter=None):
"""
Args:
filename: path to the file
processing_words: (optional) function that takes a word as input
processing_tags: (optional) function that takes a tag as input
max_iter: (optional) max number of sentences to yield
"""
self.filename = filename
self.processing_word = processing_word
self.processing_tag = processing_tag
self.max_iter = max_iter
self.length = None
def __iter__(self):
niter = 0
with open(self.filename) as f:
words, tags = [], []
for line in f:
line = line.strip()
if (len(line) == 0 or line.startswith("-DOCSTART-")):
if len(words) != 0:
niter += 1
if self.max_iter is not None and niter > self.max_iter:
break
yield words, tags
words, tags = [], []
else:
ls = line.split(' ')
word, pos, tag = ls[0], ls[1], ls[-1]
if self.processing_word is not None:
word = self.processing_word(word)
if self.processing_tag is not None:
tag = self.processing_tag(tag)
words += [word]
tags += [tag]
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def get_vocabs(datasets):
"""Build vocabulary from an iterable of datasets objects
Args:
datasets: a list of dataset objects
Returns:
a set of all the words in the dataset
"""
print("Building vocab...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags
def get_char_vocab(dataset):
"""Build char vocabulary from an iterable of datasets objects
Args:
dataset: a iterator yielding tuples (sentence, tags)
Returns:
a set of all the characters in the dataset
"""
vocab_char = set()
for words, _ in dataset:
for word in words:
vocab_char.update(word)
return vocab_char
#change to w2v
def get_glove_vocab(filename):
"""Load vocab from file
Args:
filename: path to the glove vectors
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(filename) as f:
for line in f:
word = line.strip().split(' ')[0]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
print("Writing vocab...")
with open(filename, "w") as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
try:
d = dict()
with open(filename) as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise MyIOError(filename)
return d
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
glove_filename: a path to a glove file
trimmed_filename: a path where to store a matrix in npy
dim: (int) dimension of embeddings
"""
embeddings = np.zeros([len(vocab), dim])
with open(glove_filename) as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(trimmed_filename, embeddings=embeddings)
def get_trimmed_glove_vectors(filename):
"""
Args:
filename: path to the npz file
Returns:
matrix of embeddings (np array)
"""
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise MyIOError(filename)
def get_processing_word(vocab_words=None, vocab_chars=None,
lowercase=False, chars=False, allow_unk=True):
"""Return lambda function that transform a word (string) into list,
or tuple of (list, id) of int corresponding to the ids of the word and
its corresponding characters.
Args:
vocab: dict[word] = idx
Returns:
f("cat") = ([12, 4, 32], 12345)
= (list of char ids, word id)
"""
def f(word):
# 0. get chars of words
if vocab_chars is not None and chars == True:
char_ids = []
for char in word:
# ignore chars out of vocabulary
if char in vocab_chars:
char_ids += [vocab_chars[char]]
# 1. preprocess word
if lowercase:
word = word.lower()
if word.isdigit():
word = NUM
# 2. get id of word
if vocab_words is not None:
if word in vocab_words:
word = vocab_words[word]
else:
if allow_unk:
word = vocab_words[UNK]
else:
raise Exception("Unknow key is not allowed. Check that "\
"your vocab (tags?) is correct")
# 3. return tuple char ids, word id
if vocab_chars is not None and chars == True:
return char_ids, word
else:
return word
return f
def _pad_sequences(sequences, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
Returns:
a list of list where each sublist has same length
"""
sequence_padded, sequence_length = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_length] + [pad_tok]*max(max_length - len(seq), 0)
sequence_padded += [seq_]
sequence_length += [min(len(seq), max_length)]
return sequence_padded, sequence_length
def pad_sequences(sequences, pad_tok, nlevels=1):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
nlevels: "depth" of padding, for the case where we have characters ids
Returns:
a list of list where each sublist has same length
"""
if nlevels == 1:
max_length = max(map(lambda x : len(x), sequences))
sequence_padded, sequence_length = _pad_sequences(sequences,
pad_tok, max_length)
elif nlevels == 2:
max_length_word = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequence_padded, sequence_length = [], []
for seq in sequences:
# all words are same length now
sp, sl = _pad_sequences(seq, pad_tok, max_length_word)
sequence_padded += [sp]
sequence_length += [sl]
max_length_sentence = max(map(lambda x : len(x), sequences))
sequence_padded, _ = _pad_sequences(sequence_padded,
[pad_tok]*max_length_word, max_length_sentence)
sequence_length, _ = _pad_sequences(sequence_length, 0,
max_length_sentence)
return sequence_padded, sequence_length
def minibatches(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatch_size:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
| [
"maxaalexeeva@gmail.com"
] | maxaalexeeva@gmail.com |
f5c9a73d8b790153413ae5e3b3afae4b84702db1 | 3d30cf9c7837f0570dec13da02564fad0671f7ae | /src/multiprocessed/error_investigation/this_gives_fnf_error_withOrWithout_manager.py | 9497bccad7fae4955d8cedf61afc6cc5c32ec88f | [] | no_license | phil-shenk/erosion | 6045cb902698aa89ff472ae1d0253a320b93a3fc | e8bf8580aa5452b20b93b4513dd73e5c074bfd7c | refs/heads/master | 2023-02-16T18:56:53.582280 | 2021-01-11T06:49:03 | 2021-01-11T06:49:03 | 287,793,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | import numpy as np
import multiprocessing
#from multiprocessing import shared_memory
from multiprocessing.shared_memory import SharedMemory
from multiprocessing.managers import SharedMemoryManager
import matplotlib.pyplot as plt
def initialize_shared_ndarray_for_reading(shape):
# make an array to store terrain
init_grid = np.random.normal(0, 1, shape)
#print(init_grid)
# create a section of shared memory of the same size as the grid array
with SharedMemoryManager() as smm:
#shm = shared_memory.SharedMemory(create=True, size=init_grid.nbytes)
shm = smm.SharedMemory(size=init_grid.nbytes)
# create another ndarray of the same shape & type as grid, backed by shared memory
prev_grid = np.ndarray(init_grid.shape, dtype=init_grid.dtype, buffer=shm.buf)
np.copyto(prev_grid, init_grid)
print("shared array",shm.name,"has been initialized")
return shm.name, init_grid.shape, init_grid.dtype
def read_from_shared(x,y,shm_name, shm_shape, shm_dtype):
print("SWONEEE")
# Locate the shared memory by its name
shm = SharedMemory(shm_name)
# Create the np.recarray from the buffer of the shared memory
np_array = np.recarray(shape=shm_shape, dtype=shm_dtype, buf=shm.buf)
return np.nansum(np_array.val)
"""
print("swine",x,y,shared_ndarray_name)
print("attempting to get reference to shared memory",shared_ndarray_name)
shared_arr = shared_memory.SharedMemory(name=shared_ndarray_name)
print('swane AA!!!')
for i in range(-5,6):
for j in range(-5,6):
print("guzzle",end="")
print(shared_arr[(x+i)%400][(y+j)%400])
print('read from',x,y)
"""
def pool_read(name, shm_shape, shm_dtype):
# Create a multiprocessing Pool
pool = multiprocessing.Pool(2)
# read multiple times with specified args
args = [(19,53,name, shm_shape, shm_dtype),(35,52,name, shm_shape, shm_dtype),(24,63,name, shm_shape, shm_dtype),(7,86,name, shm_shape, shm_dtype)]
pool.starmap(read_from_shared, args)
# parallelized portion is finished, close the pool
# not sure if this is entirely necessary here
print("closing pool...")
#pool.close()
#print("pool closed")
pool.join()
print("pool joined")
def main():
# initialize a shared 400x400 ndarray called "prev_grid"
name, shm_shape, shm_dtype = initialize_shared_ndarray_for_reading((400,400))
print("initialized ndarray memory named",name)
# read without pool
#read_from_shared(51,25,name, shm_shape, shm_dtype
# everyone get in the pool to read
pool_read(name, shm_shape, shm_dtype)
if __name__== '__main__':
main() | [
"shenk.philip@gmail.com"
] | shenk.philip@gmail.com |
4ab09766bdc8e293d8f7f6f4e76a8bc6a5df8c02 | fdc17d049336b24a645dcd585c070e4f7b1a3208 | /components/lifter.py | 440f7c4d7ce5eb5353dee4ca8544c96a8d2aa728 | [] | no_license | team1225/2018-CubeKong-py | 9c5e668719a6da50c53ea7295285649041d5b040 | 1652c4e8a648747e14583ab989bdc35973f21c45 | refs/heads/master | 2020-03-12T08:08:51.137977 | 2018-08-06T17:46:17 | 2018-08-06T17:46:17 | 130,521,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import wpilib
class Lifter:
# The lifting mechanism, will be injected in robot.py
lifter: wpilib.DoubleSolenoid
position: bool
def set_lift(self, new_position):
if new_position == True:
self.position = True
elif new_position == False:
self.position = False
else:
raise ExpectedBool
def toggle_position(self):
if self.position == True:
self.position = False
elif self.position == False:
self.position = True
else:
raise ExpectedBool
def on_enable(self):
# Called when the robot enters teleop or autonomous mode
pass
def execute(self):
# Executes during robot runtime
if self.position:
self.lifter.set(wpilib.DoubleSolenoid.Value.kReverse)
else:
self.lifter.set(wpilib.DoubleSolenoid.Value.kForward)
| [
"hfiantaca@gmail.com"
] | hfiantaca@gmail.com |
466abeb5379b00a0e982d6cb3a40a44efd641980 | 6e0286c37278848f635b68fc675758cf44fe5837 | /money/admin.py | c82b45b2c9d92ba006a62a61422094f69bbaab84 | [] | no_license | davidbonnaud/monthly-money-tracker | 98846f8702edc2c88db57ba0b26bf9c2e9f97383 | ebf7464d7ba5cbd221d914034da8cda557e06509 | refs/heads/master | 2023-04-20T11:44:58.858901 | 2021-05-04T12:03:56 | 2021-05-04T12:03:56 | 364,242,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
from .models import Transaction, Balance
admin.site.register(Balance)
admin.site.register(Transaction)
# Register your models here.
| [
"david@davidbonnaud.com"
] | david@davidbonnaud.com |
987e31305b1cc7f071b646a09fda38217bed64cd | 7a40b3a8d6e3ff8c7beb7f07459ade7e828696df | /mailer.py | a0309208527394c7fe5dc0973cbd5d4a3ee94d95 | [] | no_license | kpj/WAM | 02ec3bab643252488557f559b368308aa6ec109c | 47e1a5e7eefdd8a34464d232108897665a5eef5a | refs/heads/master | 2021-01-10T20:36:01.338988 | 2011-12-14T13:39:21 | 2011-12-14T13:39:21 | 2,919,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,446 | py | ###############################################
# #
# Send SIGUSR1 to update the subscribe-list #
# example: killall -USR1 python #
# #
###############################################
import smtplib, imaplib, getpass, logging, signal
import email, email.mime.text, email.Iterators, email.header
import time, sys, random, math, re
# Enable logging
level = logging.INFO
log = logging.getLogger(sys.argv[0])
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(message)s")
ch.setFormatter(formatter)
log.addHandler(ch)
class signalHandler(object):
def __init__(self, sig, func):
self.func = func
signal.signal(sig, self.handler)
def handler(self, signum, frame):
log.info("Executed function...")
self.func()
class getData(object):
def __init__(self):
self.fileWithMails = 'mails.txt'
self.fd = open(self.fileWithMails, 'a+')
def genMailList(self):
lines = self.fd.read()
mailList = lines.split('\n')
for i in range(len(mailList)):
try:
mailList.remove('')
except ValueError:
pass
return mailList
class mailer(object):
def __init__(self, address, passwd):
self.__address = address
self.__passwd = passwd
self.send_server = smtplib.SMTP('smtp.gmail.com', 587)
self.send_server.ehlo()
self.send_server.starttls()
self.send_server.ehlo()
self.send_server.login(self.__address, self.__passwd)
self.recv_server = imaplib.IMAP4_SSL("imap.googlemail.com")
self.recv_server.login(self.__address, self.__passwd)
self.recv_server.select()
def sendMail(self, subject, content, target):
msg = email.mime.text.MIMEText(content, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = self.__address
msg['To'] = target
self.send_server.sendmail(self.__address, target, msg.as_string())
def getMail(self, subject, delete = True):
self.recv_server.select()
typ, data = self.recv_server.search(None, 'SUBJECT', '"%s"' % (subject))
output = []
for num in data[0].split():
# m.get_body(email.message_from_string(m.g(1)[0][1]))
typ, data = self.recv_server.fetch(num, '(RFC822)')
mail = email.message_from_string(data[0][1])
content = self.get_body(mail)
output.append(content)
if delete:
self.recv_server.store(num, '+FLAGS', r'(\Deleted)')
return output
def getHeader(self, num):
h = self.recv_server.fetch(num, '(BODY[HEADER])')
header_text = h[1][0][1]
parser = email.parser.HeaderParser()
head = parser.parsestr(header)
return head
def get_charset(self, message, default="ascii"):
if message.get_content_charset():
return message.get_content_charset()
if message.get_charset():
return message.get_charset()
return default
def get_body(self, message):
if message.is_multipart():
text_parts = [part for part in email.Iterators.typed_subpart_iterator(message, 'text', 'plain')]
body = []
for part in text_parts:
charset = self.get_charset(part, self.get_charset(message))
body.append(unicode(part.get_payload(decode=True), charset, "replace"))
return u"\n".join(body).strip()
else:
body = unicode(message.get_payload(decode=True), self.get_charset(message), "replace")
return body.strip()
def __del__(self):
self.send_server.close()
self.recv_server.close()
class useful(object):
def __init__(self):
pass
def genRandID(self, fromInt, toInt = -1):
if toInt == -1:
toInt = fromInt
tmp = ""
for i in range(random.randint(fromInt,toInt)):
tmp += str(random.randint(0,9))
return int(tmp)
class story(object):
def __init__(self, identity):
self.openFile = 'story.txt'
self.story = []
self.fd = open(self.openFile, 'a+')
self.story = [line.replace('\n','').decode('utf-8') for line in self.fd.readlines()]
self.identity = identity
def setID(self, ID):
self.identity = ID
def append(self, text):
text = text.replace('\n', '')
self.story.append(text)
text = text.encode('utf-8')
print >> self.fd, text
self.fd.flush()
def printStory(self):
print '\n'.join(self.story)
def lastPhrase(self):
try:
return self.story[-1]
except IndexError:
return 'Kein letzter Satz.'
def __del__(self):
self.printStory()
self.fd.close()
class looper(object):
def __init__(self):
self.ownMail = 'kpjkpjkpjkpjkpjkpj@googlemail.com'
self.u = useful()
self.m = mailer('kpjkpjkpjkpjkpjkpj+WAM@googlemail.com', getpass.getpass())
self.g = getData()
self.runInterval = 2 # in seconds
self.subject = 'WAM - Write and Mail'
self.content = '\n'.join([
'Hey,',
'schoen, dass du mitspielst!',
'Antworte einfach und schreibe entweder ueber,',
'oder unter diesem Abschnitt.',
'(Am Besten mit ein paar Leerzeilen ;))',
'Der vorhergehende Satz war:',
'',
'%s',
'',
'Viel Spasz wuenscht kpj',
])
self.genSubscribers()
self.s = signalHandler(signal.SIGUSR1, self.genSubscribers)
self.gotThisMail = []
self.gotLastMail = []
self.story = story(self.u.genRandID(5,10))
self.currentSubject = '%s (%i)' % (self.subject, self.story.identity)
def genSubscribers(self):
self.subscriber = self.g.genMailList()
self.num2Send = int(math.ceil(float(len(self.subscriber))/3))
def getRecipient(self):
for x in range(self.num2Send):
recipient = random.choice(self.subscriber)
while recipient in self.gotLastMail or recipient in self.gotThisMail:
if len(self.gotLastMail) == len(self.subscriber):
log.debug("Only one email-address ?!")
break
recipient = random.choice(self.subscriber)
self.gotThisMail.append(recipient)
self.gotLastMail = self.gotThisMail[:] # Really create new list
self.gotThisMail = []
return self.gotLastMail
def getStory(self, mail):
m = mail.split('\n')
comment_pattern = '>'
# Just always delete my own address:
for i in range(len(m)):
if self.ownMail in m[i]:
m.remove(m[i])
break
pos = -1
hadComment = False
for p in m:
pos += 1
if comment_pattern in p:
hadComment = True
break
if hadComment:
m.pop(pos-1)
toDel = []
for i in range(len(m)):
if comment_pattern in m[i]:
toDel.append(m[i])
for i in toDel:
m.remove(i)
for i in range(len(m)):
try:
m.remove('')
except ValueError:
pass
m.remove('\r')
mail = '\n'.join(m)
return mail
def sendMails(self):
for recipient in self.getRecipient():
log.info("Sending mail to %s" % recipient)
self.m.sendMail(self.currentSubject,
self.content % (
self.story.lastPhrase()),
recipient)
def start(self):
self.sendMails()
while True:
mails = []
while True:
log.debug("Receiving mails (%s)" % repr(self.currentSubject))
mails = self.m.getMail(self.currentSubject)
if mails:
log.info("Received 1 mail" if len(mails) == 1 else "Received %i mails"%len(mails))
break
else:
log.debug("Received 0 mails")
time.sleep(self.runInterval)
self.story.setID(self.u.genRandID(5,10))
self.currentSubject = '%s (%i)' % (self.subject, self.story.identity)
content = ''.join(self.getStory(mails[0]))
self.story.append(content)
self.sendMails()
def __del__(self):
pass
looper().start()
#getData().genMailList()
#m=mailer('kpjkpjkpjkpjkpjkpj+WAM@googlemail.com', getpass.getpass())
#l = looper()
# vim: autoindent:
| [
"kpjkpjkpjkpjkpjkpj@googlemail.com"
] | kpjkpjkpjkpjkpjkpj@googlemail.com |
494e005bf465f0567890224eb9e0fc3ebf7b085f | 595ff50f59cc29ee8ff8d5780971b81cdd768d21 | /manage.py | e4d1f918800657b994e5d21218941857761272e9 | [] | no_license | shermanxiong/scam-detection | 62a3c20dd08fa443ad91bf82e4062339a8fb6c45 | d8f33024cd0331f78bf8afe39ac82400464b5c08 | refs/heads/master | 2020-06-06T06:12:05.735943 | 2019-06-21T01:17:46 | 2019-06-21T01:17:46 | 192,660,704 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scamDetectionTask.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"sherman.xiong@gmail.com"
] | sherman.xiong@gmail.com |
b789dcc8c2c8b5c5cc7429535c32875a9f690efc | 8cfee59143ecd307fe7d7a27986c3346aa8ce60c | /AI/1. Machine Learning/163_mnist-tocsv.py | cf18d04e18f98b2720fada6f34a867fd43f3f5a4 | [] | no_license | kiminhan/Python | daafc1fde804f172ebfb1385ab9d6205c7a45970 | dc6af486aaf7d25dbe13bcee4e115207f37d4696 | refs/heads/master | 2020-03-08T19:18:10.173346 | 2018-09-06T06:11:40 | 2018-09-06T06:11:40 | 128,288,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | import struct
def to_csv(name, maxdata):
# 레이블 파일과 이미지 파일 열기
lbl_f = open("./mnist_/"+name+"-labels-idx1-ubyte", "rb")
img_f = open("./mnist_/"+name+"-images-idx3-ubyte", "rb")
csv_f = open("./mnist_/"+name+".csv", "w", encoding="utf-8")
# 헤더 정보 읽기 --- (※1)
mag, lbl_count = struct.unpack(">II", lbl_f.read(8))
mag, img_count = struct.unpack(">II", img_f.read(8))
rows, cols = struct.unpack(">II", img_f.read(8))
pixels = rows * cols
# 이미지 데이터를 읽고 CSV로 저장하기 --- (※2)
res = []
for idx in range(lbl_count):
if idx > maxdata: break
label = struct.unpack("B", lbl_f.read(1))[0]
bdata = img_f.read(pixels)
sdata = list(map(lambda n: str(n), bdata))
csv_f.write(str(label)+",")
csv_f.write(",".join(sdata)+"\r\n")
# 잘 저장됐는지 이미지 파일로 저장해서 테스트하기 -- (※3)
if idx < 10:
s = "P2 28 28 255\n"
s += " ".join(sdata)
iname = "./mnist_/{0}-{1}-{2}.pgm".format(name,idx,label)
with open(iname, "w", encoding="utf-8") as f:
f.write(s)
csv_f.close()
lbl_f.close()
img_f.close()
# 결과를 파일로 출력하기 --- (※4)
to_csv("train", 1000)
to_csv("t10k", 500) | [
"rladlsgks4@naver.com"
] | rladlsgks4@naver.com |
2c4bb73122378d72b0354014879f4c1218f974a8 | 92d1de8f4b010b6f29621e95961f2d3682ad6ef9 | /blackwidow/error.py | c7fbaa022e864a9bc8396c403b5979b470077742 | [
"MIT"
] | permissive | tdstarkisoc/BlackWidow | 3495c66ecc14368e9374357a1f8e6163ed700f78 | b03353f75104fbc9a3854214524006ae586dba2f | refs/heads/master | 2022-11-27T19:53:47.896259 | 2020-08-13T10:48:48 | 2020-08-13T10:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | class BlackWidowError(Exception):
pass
class BlackWidowIOError(BlackWidowError, IOError):
pass
class BlackWidowValueError(BlackWidowError, ValueError):
pass
| [
"madison@indico.io"
] | madison@indico.io |
2d08f717b2851b1e9f4fc9e7217b39fbf022aadd | 88f25352d2c8b39d149187736bb260cb84097d93 | /lista4/rootNewton.py | 1ce24e0f01a47ba61667893f8bb07406bc59e0c3 | [] | no_license | ducast/alc | 0af83200aa73dc7f00657b3d6fdfad58ab517029 | dccc7426b0722b192a21226a9df534495f37c34d | refs/heads/master | 2021-01-23T14:46:58.863020 | 2017-06-11T21:49:39 | 2017-06-11T21:49:39 | 93,260,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from math import *
import numdifftools as nd
def rootNewton(f,x0,max_it,tol):
print ("==== Método de Newton para Raízes ====")
for it in range(max_it):
x = x0 - f(x0) / nd.Derivative(f)(x0)
tolk = abs(x - x0)
print (int(it),round(x,2))
if tolk < tol:
print ("Convergiu para %d iterações.\nXi = "%(it+1),x)
return x
x0 = x
print ("Não convergiu para %d iterações.\nXk = "%(it+1),x)
# def f1(x):
# return x**2-4*cos(x)
#
# f=f1
# x0 = 10
# max_it = 10000
# tol = 10**(-4)
#
# rootNewton(f1,x0,max_it,tol)
| [
"eduardo-castanho@hotmail.com"
] | eduardo-castanho@hotmail.com |
b8137ddbd4d31ee1e675044996c2784fc45b202a | 28c1c3afaf5e70c0530b864ead16fa8762ef1ca4 | /ch05_Array/list_size.py | 78660ba7e14c0f9ebf85b4bc2b7a1d1726f1190f | [] | no_license | luoshao23/Data_Structure_and_Algorithm_in_Python | 8059381c21580e3e4f1276089b9fe4f96de385f8 | 051754963ca2eb818b981ba72583314a043e5df4 | refs/heads/master | 2020-04-29T06:29:02.148886 | 2019-05-15T02:46:48 | 2019-05-15T02:46:48 | 175,917,337 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import sys
data = []
n = 27
for k in range(n):
a = len(data)
b = sys.getsizeof(data)
print('Length: {0:3d}; Size in bytes: {1:4d}'.format(a, b))
data.append(None)
| [
"luoshao23@gmail.com"
] | luoshao23@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.