blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fda194aff772871c7c4b2ea781497dc72cf05c8a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_cryings.py
|
62b48a889fcdd5f58f940b8aca110dd0c8ff2b83
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from xai.brain.wordbase.verbs._cry import _CRY
#calss header
class _CRYINGS(_CRY, ):
def __init__(self,):
_CRY.__init__(self)
self.name = "CRYINGS"
self.specie = 'verbs'
self.basic = "cry"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
240ee78594f9cde78e782abfe63180952dadd898
|
440b918affee34f5ae09f9389e10212eb00738d4
|
/pir.py
|
205cb238cc331fa40307c6183c3ad1b3dbda6cca
|
[] |
no_license
|
wildtangent/raspberry-pi-examples
|
4cd0deaf2b8bba7d6e08ba58f3a1f991b96fdde4
|
4f74f9cfb6008bfa953c8b9ee3b5bfbd30662d35
|
refs/heads/master
| 2021-01-10T02:14:54.855946
| 2015-11-03T01:51:14
| 2015-11-03T01:51:14
| 45,083,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
import RPi.GPIO as GPIO
class MyPiPIR:
# Current Pin on GPIO
DEFAULT = 7
# States
INACTIVE = 0
ACTIVE = 1
ACTIVATED = 2
DEACTIVATED = 3
def __init__(self, pin):
self.pin = pin
self.current_state = 0
self.previous_state = 0
self.setup()
def setup(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin, GPIO.IN)
# Wait for PIR to settle down
while GPIO.input(self.pin) == 1:
self.ready = False
self.ready = True
self.current_state = self.__state()
def __state(self):
return GPIO.input(self.pin)
def state(self):
self.current_state = self.__state()
if self.current_state == 1 and self.previous_state == 0:
self.previous_state = 1
return self.__class__.ACTIVATED
elif self.current_state == 0 and self.previous_state == 1:
self.previous_state = 0
return self.__class__.DEACTIVATED
elif self.current_state == 1 and self.previous_state == 1:
return self.__class__.ACTIVE
elif self.current_state == 0 and self.previous_state == 0:
return self.__class__.INACTIVE
|
[
"joe@authomedia.co.uk"
] |
joe@authomedia.co.uk
|
30df20c459875066299b819277787ea6cd268ad7
|
0fd506e2651fde07ff65ae80c12226a18d7778a2
|
/wildfire/pandas_cut_test.py
|
d3bbc09ac55c3f2995b8e3a3325024673de427c4
|
[] |
no_license
|
MillerWu2014/remote_project
|
af69e20d84809ea3d1e121e5cac57715073f70d6
|
7458ec6571b5b046f07ce2f89dcb393e0dd2e478
|
refs/heads/master
| 2020-08-03T15:43:36.028810
| 2019-09-30T07:45:06
| 2019-09-30T07:45:06
| 211,804,058
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
import pandas as pd
x = pd.Series(range(100))
print(pd.cut(x, 50, labels=range(50)))
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
cd846f89d90d6f2f5ce61fa895e49409d4e39009
|
604ffaf79c5f9c816bb1a2151ae33fbf29bca52b
|
/cloudstoragetui/keypress.py
|
6eaf37d5320d02c08c43e2c2b52b735c33eabb6f
|
[
"MIT"
] |
permissive
|
joeyism/cloud-storage-tui
|
1069092b51f1d11daa033ea5896b625e42e55691
|
8fda9bc8551756e88db706944489f1bbcc95a52c
|
refs/heads/master
| 2023-05-31T00:33:07.979555
| 2021-06-07T17:19:26
| 2021-06-07T17:19:26
| 352,346,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
import curses
from typing import List
from cloudstoragetui.constants import KEY_QUIT, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT, KEY_ENTER, ESC, UP, DOWN, LEFT, RIGHT
from cloudstoragetui.draw import DrawnBox
from cloudstoragetui.cursor_state import CursorState
from cloudstoragetui.debug import log
def _extract_min_max(box):
min_y = box.top_left_y + 1
min_x = box.top_left_x + 1
max_y = box.length_y + box.top_left_y - 2
max_x = (box.index + 1) * box.length_x - 1
return (min_y, min_x, max_y, max_x)
def _eval_keypress(screen, key, boxes, cursor_state):
curs_y, curs_x = curses.getsyx()
box = boxes[cursor_state.column]
min_y, min_x, max_y, max_x = _extract_min_max(box)
action = None
if key in KEY_QUIT:
action = ESC
elif key in KEY_UP:
cursor_state.move_row_up(min_y)
screen.move(max(curs_y - 1, min_y), curs_x)
action = UP
elif key in KEY_DOWN:
cursor_state.move_row_down(max_y)
screen.move(min(curs_y + 1, max_y), curs_x)
action = DOWN
elif key in KEY_LEFT:
if curs_x == min_x:
cursor_state.move_column_left()
box = boxes[cursor_state.column]
min_y, min_x, max_y, max_x = _extract_min_max(box)
screen.move(min_y, min_x)
else:
screen.move(curs_y, max(curs_x - 1, min_x))
action = LEFT
elif key in KEY_RIGHT + KEY_ENTER:
cursor_state.move_column_right()
box = boxes[cursor_state.column]
screen.move(box.top_left_y + 1, box.top_left_x + 1)
action = RIGHT
screen.refresh()
return action
def eval_keypress(screen, key: int, boxes: List[DrawnBox], cursor_state: CursorState):
return _eval_keypress(screen, key, boxes, cursor_state)
|
[
"sham.joey@gmail.com"
] |
sham.joey@gmail.com
|
17b06bd2a43cb53c3dac057bdad33d265da42666
|
3df70c9573f55311f03ad02a329b580eb8f4997f
|
/hms.py
|
dca8751cf4a45a172e85f7c24234b0d4d7634d97
|
[] |
no_license
|
sumukharm/Distributed-Systems-projects-and-Implementations
|
02b57f34dfbd5582c630d04e027033dccfb929ea
|
61fbe949614e6d7eaeda530cf6ee1c22322ba898
|
refs/heads/master
| 2020-03-29T00:40:28.160427
| 2019-03-23T05:24:30
| 2019-03-23T05:24:30
| 149,350,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
# -*- generated by 1.1.0b13 -*-
import da
PatternExpr_254 = da.pat.TuplePattern([da.pat.ConstantPattern('ack')])
PatternExpr_259 = da.pat.BoundPattern('_BoundPattern260_')
PatternExpr_298 = da.pat.TuplePattern([da.pat.ConstantPattern('push'), da.pat.FreePattern('secrets'), da.pat.FreePattern('predecessor')])
PatternExpr_346 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_351 = da.pat.SelfPattern()
PatternExpr_371 = da.pat.TuplePattern([da.pat.ConstantPattern('done')])
PatternExpr_376 = da.pat.BoundPattern('_BoundPattern378_')
PatternExpr_354 = da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.FreePattern(None), da.pat.SelfPattern()]), da.pat.TuplePattern([da.pat.ConstantPattern('done')])])
PatternExpr_379 = da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.FreePattern(None), da.pat.BoundPattern('_BoundPattern385_')]), da.pat.TuplePattern([da.pat.ConstantPattern('done')])])
PatternExpr_261 = da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.FreePattern(None), da.pat.BoundPattern('_BoundPattern267_')]), da.pat.TuplePattern([da.pat.ConstantPattern('ack')])])
_config_object = {}
import sys
import time
from random import randint
from statistics import stdev
'\nclass for Hear My Secret, where:\nAgent i calls agent j if he (agent i) does not know whether j is familiar with his secret.\nThis protocol follows push mode of communication\n'
class HMS(da.DistProcess):
def __init__(self, procimpl, forwarder, **props):
super().__init__(procimpl, forwarder, **props)
self._HMSReceivedEvent_0 = []
self._HMSReceivedEvent_2 = []
self._HMSReceivedEvent_3 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_HMSReceivedEvent_0', PatternExpr_254, sources=[PatternExpr_259], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_HMSReceivedEvent_1', PatternExpr_298, sources=None, destinations=None, timestamps=None, record_history=None, handlers=[self._HMS_handler_297]), da.pat.EventPattern(da.pat.ReceivedEvent, '_HMSReceivedEvent_2', PatternExpr_346, sources=[PatternExpr_351], destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_HMSReceivedEvent_3', PatternExpr_371, sources=[PatternExpr_376], destinations=None, timestamps=None, record_history=True, handlers=[])])
def setup(self, agents, secret, tout, n, mLoss, mDelay, **rest_407):
super().setup(agents=agents, secret=secret, tout=tout, n=n, mLoss=mLoss, mDelay=mDelay, **rest_407)
self._state.agents = agents
self._state.secret = secret
self._state.tout = tout
self._state.n = n
self._state.mLoss = mLoss
self._state.mDelay = mDelay
self._state.AgentWithMySecret = set()
self._state.knownSecrets = set()
self._state.exitCondition = False
self._state.knownSecrets.add(self._state.secret)
def run(self):
while (not PatternExpr_354.match_iter(self._HMSReceivedEvent_2, SELF_ID=self._id)):
self.gossip()
super()._label('_st_label_368', block=False)
_st_label_368 = 0
while (_st_label_368 == 0):
_st_label_368 += 1
if PatternExpr_379.match_iter(self._HMSReceivedEvent_3, _BoundPattern385_=self.parent(), SELF_ID=self._id):
_st_label_368 += 1
else:
super()._label('_st_label_368', block=True)
_st_label_368 -= 1
'Correctness Test: \n\t\tIf the number of known/learned secrets are equal to the total number of agents in the network,\n\t\tthen an agent is an expert and the protocol is correct\n\t\t'
if (self._state.n == len(self._state.knownSecrets)):
print('<HMS>Testing correctness; Number of secrets learnt equals Number of different processes, Result :Verified', flush=True)
else:
print('<HMS>Failed in correctness testing', flush=True)
def gossip(self):
' \n\t\tCore function of the protocol, contains the protocol specific logic.\n\t\tAgent i calls agent j if he (agent i) does not know whether j is familiar with his secret.\n\t\tThis protocol follows push mode of communication\n\t\t'
super()._label('yeild', block=False)
self._state.exitCondition = True
for agent in self._state.agents:
"check if there's atleast one agent which is not aware of his/her secret. \n\t\t\tIf not, then the exit condition remains true.\n\t\t\t"
if (not (agent in self._state.AgentWithMySecret)):
self._state.exitCondition = False
print('{} sending secret to {}'.format(self._id, agent), flush=True)
self.send(('push', self._state.knownSecrets, self._id), to=agent)
super()._label('_st_label_251', block=False)
_st_label_251 = 0
while (_st_label_251 == 0):
_st_label_251 += 1
if PatternExpr_261.match_iter(self._HMSReceivedEvent_0, _BoundPattern267_=agent, SELF_ID=self._id):
self._state.AgentWithMySecret.add(agent)
_st_label_251 += 1
elif self._timer_expired:
print('timeout', flush=True)
_st_label_251 += 1
else:
super()._label('_st_label_251', block=True, timeout=self._state.tout)
_st_label_251 -= 1
else:
if (_st_label_251 != 2):
continue
self._timer_start()
if (_st_label_251 != 2):
break
if self._state.exitCondition:
self.send(('done',), to=self._id)
self.send(('done',), to=self.parent())
def _HMS_handler_297(self, secrets, predecessor):
'function called when an agent receives secret from the caller agent.\n\t\tAll the received secrets are added to the known set of secrets\n\t\t'
print('{} receiving secret entries from {}'.format(self._id, predecessor), flush=True)
self.send(('ack',), to=predecessor)
' If csller agent is familiar with his/her secret, \n\t\tthen add them to the set of known agents'
if (self._state.secret in secrets):
self._state.AgentWithMySecret.add(predecessor)
self._state.knownSecrets = self._state.knownSecrets.union(secrets)
_HMS_handler_297._labels = None
_HMS_handler_297._notlabels = None
|
[
"noreply@github.com"
] |
sumukharm.noreply@github.com
|
ca010878792d0bc73fec72213f7db9f251dfd0e5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_bandaged.py
|
9ef009f331d8a4fe83aa934179c391e624957156
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.nouns._bandage import _BANDAGE
#calss header
class _BANDAGED(_BANDAGE, ):
def __init__(self,):
_BANDAGE.__init__(self)
self.name = "BANDAGED"
self.specie = 'nouns'
self.basic = "bandage"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
a51a6f80ca166b4e727549a24872fb0c2e79b437
|
84b35f9c88de110056ae8fc4463a8b5218219112
|
/validations/float_validations.py
|
24e9afe52009e25fb1db90d5f58945298a8a43e5
|
[] |
no_license
|
JMGalvan/python
|
7657889acba2a7eefacb62632903ad0fe3362765
|
38c6ae0b67d2e1913380f53db1b12fcffd6b922e
|
refs/heads/main
| 2023-03-25T18:09:49.989527
| 2021-03-28T12:54:51
| 2021-03-28T12:54:51
| 351,547,667
| 0
| 0
| null | 2021-03-28T12:54:51
| 2021-03-25T19:09:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,814
|
py
|
# -*- coding: utf-8 -*-
"""
@author: José Manuel Galván Díaz
@course: 08GIIN Metodología de Programación
"""
#
# isValidFloat: Comprueba si el valor es un número Real, en caso de no serlo,
# revisa si se puede transformar a Real
# @param value (Integer): Valor a revisar
# @return boolean
#
def isValidFloat(value):
if type(value) != float and type(value) != int:
try:
if float(value) or float(value) == 0 :
return True
except:
return False
else:
return True
#
# valueFloatIncluded: Comprueba si el valor recibido, está comprendido entre el valor mínimo y máximo establecido.
# @param value (Float): Valor que se tiene que validar.
# @param minValue (Float): Valor mínimo válido, el valor introducido por el usuario debe de ser mayor o igual a este.
# @param maxValue (Float): Valor máximo válido, el valor introducido por el usuario debe de ser menor a este.
# @param decimals (Integer): Número máximo de decimales.
# @return Boolean
#
def valueFloatIncluded(value, minValue, maxValue, decimals):
if isValidFloat(value) and isValidFloat(minValue) and isValidFloat(maxValue):
return float(value) >= float(minValue) and float(value) < float(maxValue) and minDecimals(value, decimals)
else:
return False
#
# minDecimals: Comprueba si el valor recibido, no supera los decimales máximos.
# @param value (Float): Valor que se tiene que validar.
# @param decimals (Integer): Número máximo de decimales.
# @return Boolean
#
def minDecimals(value, decimals):
numbers = str(value).split(".")
if len(numbers) == 1:
return True
elif len(numbers) > 1:
return len(numbers[1]) <= decimals
|
[
"noreply@github.com"
] |
JMGalvan.noreply@github.com
|
0708d1f53178ef53448b79db82b98db55f6ac3f9
|
7540483a0fc5c529c00e8af2bcf0734b731c0478
|
/src/python/modules/setup.py
|
39115cf421b7a0cea8061112f70e494ca7183a86
|
[
"MIT"
] |
permissive
|
Mart1250/bluebrain
|
a10bbae853efa0ed0c70bf02bb972e7f67873453
|
69670cb43c74e38b3dece288478b0c86bb9e4e23
|
refs/heads/master
| 2021-01-21T09:33:46.713262
| 2015-06-30T07:08:33
| 2015-06-30T07:08:33
| 38,633,562
| 1
| 0
| null | 2015-07-06T17:08:57
| 2015-07-06T17:08:57
| null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
from distutils.core import setup
setup(
name='Cannybots',
version='0.1dev',
packages=['cannybots','cannybots.clients'],
license='MIT license',
long_description=open('README.txt').read(),
)
|
[
"wayne@cannybots.com"
] |
wayne@cannybots.com
|
b3646cc8d6f341f85ee10da44ac42e4e9153fe19
|
12747304dbdfced3ac425870c2c0f680ae3db26e
|
/todo/migrations/0002_auto_20180904_1255.py
|
c78e8f801d0741b67e4be50ed9668ab8ee029803
|
[] |
no_license
|
Rudresh17/jarvisr
|
c0079333691092cc55171c757b6f1dfc8a3a1eb6
|
9ce0d1d96545bcd150b3533f508f1d764870e0ef
|
refs/heads/master
| 2020-03-28T18:23:46.240550
| 2018-09-15T06:58:11
| 2018-09-15T06:58:11
| 148,877,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
# Generated by Django 2.0.5 on 2018-09-04 07:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='todo',
new_name='add',
),
]
|
[
"rudreshlegend@gmail.com"
] |
rudreshlegend@gmail.com
|
8020760bc5cd0a1d148739c5991cea3a09beb85f
|
5aadc1f06bdb68a73bb003b23cc85af528d61bf4
|
/detection_network/src/rl/ppo.py
|
b71a185fbb72e8e16bc734d17634f6bdea14b165
|
[] |
no_license
|
zideajang/zi_full_self_drive_system
|
81dca2ca0541dfab7c021c6e3a0e58701bbf1693
|
fee2e4057619a19a585fbd8b9622f69c25946be1
|
refs/heads/master
| 2023-09-03T02:41:35.720600
| 2021-09-27T02:37:41
| 2021-09-27T02:37:41
| 358,083,188
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
import torch
import torch.optim as optim
class RolloutStorage:
def __init__(self,num_steps,num_processes,action_size):
pass
class PPO(object):
def __init__(
self,
controller,
clip_param,
lr,
baseline_decay,
action_size = 18,
ppo_epoch=1,
num_mini_batch=100,
max_grad_norm=2.0,
entropy_coef=0,
num_steps=100,
num_processes=1
):
self.ppo_epoch = ppo_epoch
self.controller = controller
self.optimizer = optim.Adam(controller.parameters(),lr=lr)
self.num_mini_batch = num_mini_batch
self.clip_param = clip_param
self.max_grad_norm = max_grad_norm
self.entropy_coef = entropy_coef
self.rollouts = RolloutStorage(num_steps,num_processes,action_size)
self.baseline = None
self.decay = baseline_decay
def state_dict(self):
return {
"baseline":self.baseline,
"rollouts":self.controller.state_dict(),
"optimizer:":self.optimizer.state_dict()
}
def load_state_dict(self,states):
pass
def update(self, sample, is_train=True):
reward, action, log_prob = sample
if self.baseline is None:
self.baseline = reward
else:
self.baseline = self.decay * self.baseline + (1 - self.decay) * reward
self.rollouts.insert(action, log_prob, reward)
if not is_train:
return -1,-1
advantages = self.rollouts.rewards - self.baseline
loss_epoch = 0
entropy_epoch = 0
for _ in range(self.ppo_epoch):
data_generator = self.rollouts.generator(advantages, self.num_mini_batch)
for sample in data_generator:
(
actions_batch,
reward_batch,
old_actions_log_probs_batch,
adv_targ,
) = sample
action_log_probs, entropy = self.controller.evaluate_actions(
actions_batch
)
ratio = torch.exp(
action_log_probs - torch.from_numpy(adv_targ)
)
adv_targ_th = torch.from_numpy(adv_targ).float()
|
[
"you@example.com"
] |
you@example.com
|
38c7d9d6bb8677f9a4868e805cbb65a375dc0c84
|
352d5acef4e8fca2d1306d9e749444869ed4ef2f
|
/easy/WeakestRowInAMatrix.py
|
213a28f74b1de73ce29e57445ecf4565a51837dc
|
[] |
no_license
|
rupafn/leetcode
|
d73672a5fce8fd2027d3a7d0118d9ff7b9e89c5e
|
2c75625db500d806e2a55954c85f389cfde2c3aa
|
refs/heads/master
| 2021-06-07T20:01:07.992640
| 2021-05-07T15:29:15
| 2021-05-07T15:29:15
| 128,645,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
class Solution:
def kWeakestRows(self, mat, k):
sums = []
for i in range(0,len(mat)):
sums.append(sum(mat[i]))
res = []
while(k>0):
ind = sums.index(min(sums))
sums[ind] = 100000
res.append(ind)
k-=1
return res
obj = Solution()
mat = [[1,1,0,0,0],[1,1,1,1,0],[1,0,0,0,0],[1,1,0,0,0],[1,1,1,1,1]]
k = 3
obj.kWeakestRows(mat,k)
|
[
"rupafn@gmail.com"
] |
rupafn@gmail.com
|
4cd13153a646bc5c96f4e50481b78f266720eb61
|
4d7f0434526a8f286e98a36379b991a1a307a6cb
|
/WikiTextsDataset/WrapperTextsSource.py
|
85b66a99e4284c66a10a0443e95af82e0643595d
|
[] |
no_license
|
PerfectStepCoder/NeuroProjects
|
464acbaa50e0067f70710d13bd0024dbd1117825
|
d385828a0e0b668760f9990a75c13cb05f5efef4
|
refs/heads/master
| 2020-03-24T21:30:14.668472
| 2018-08-09T11:13:15
| 2018-08-09T11:13:15
| 143,035,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
import os
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from src.NN.WikiTextsDataset.wiki_utils import Texts
wikitext_folder = os.path.abspath(os.path.join(os.path.curdir, 'wikitext'))
class WikitextDataset(Dataset):
"""
Dataset of Wikitext
"""
def __init__(self, path_folder, transform=None, sequence_length=10):
self.origin_corpus = Texts(path_folder)
self.transform = transform
self.sequence_length = sequence_length
def __len__(self):
return len(self.origin_corpus.valid) - self.sequence_length
def __getitem__(self, idx):
end = idx + self.sequence_length
id_symbols_x = self.origin_corpus.valid[idx:end]
id_symbols_y = self.origin_corpus.valid[end + 1]
if self.transform:
# Буквы перводить в id или в one-hot вектора
id_symbols_x = self.transform(id_symbols_x, self.origin_corpus.dictionary)
id_symbols_y = self.transform(id_symbols_y, self.origin_corpus.dictionary)[0]
sample = {'serial': id_symbols_x, 'predict_letter': id_symbols_y}
return sample
class ToSymbols(object):
"""
id -> letter
"""
def __call__(self, x, dic):
output = []
if len(x.size()) != 0:
for item in x:
output.append(dic.idx2symbol[int(item)])
else:
output.append(dic.idx2symbol[int(x)])
return output
class ToOneHot(object):
"""
id -> one hot vector
"""
def __call__(self, x, dic):
output = []
if len(x.size()) != 0:
for item in x:
output.append([1 if i == item else 0 for i in range(len(dic))])
else:
output.append([1 if i == int(x) else 0 for i in range(len(dic))])
return output
class Nothing(object):
def __call__(self, x):
return x
if __name__ == "__main__":
#wikitextDataset = WikitextDataset('wikitext', transform=ToSymbols()) # сэмлы из символов
wikitextDataset = WikitextDataset('wikitext', transform=ToOneHot()) # сэмлы из one_hot векторов
dataloader = DataLoader(wikitextDataset, batch_size=4, shuffle=True, num_workers=1, collate_fn=Nothing())
# Покажет первые 10 бачей
for i_batch, sample_batched in enumerate(dataloader):
for one_batch in sample_batched:
print(one_batch)
print("-"*10)
if (i_batch + 1) % 10 == 0:
break
print("Done!")
|
[
"xfile2003@mail.ru"
] |
xfile2003@mail.ru
|
1fe3fb6fa971710011542bc58df695cb0c6d7730
|
c3082eb2adc43b311dd3c9ff16fd3ed9df85f266
|
/python/examples/fastapi/dynamic-response/main.py
|
e9ecf608f59322153963fae72ce85a28b0f05e1f
|
[] |
no_license
|
szabgab/slides
|
78818c7138331b3ba9e221c81da3678a46efe9b3
|
63bba06678554db737602f2fbcd6510c36037e8a
|
refs/heads/main
| 2023-08-31T07:13:51.536711
| 2023-08-29T13:17:59
| 2023-08-29T13:17:59
| 122,212,527
| 87
| 69
| null | 2023-05-19T06:55:11
| 2018-02-20T14:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
from fastapi import FastAPI
import datetime
app = FastAPI()
@app.get("/")
async def root():
return {"message": f"Hello World at {datetime.datetime.now()}"}
|
[
"gabor@szabgab.com"
] |
gabor@szabgab.com
|
c5248a3cae6dcafab9c6ad505abc712db1980a72
|
74ec860957869ea48af8535bf32f9fd87cc81011
|
/dna-methylation/scripts/develop/illumina450k/residuals/plot/scatter_comparison.py
|
22a26b4bf6f95cc4078b5e99f2a407784322a5a3
|
[] |
no_license
|
GillianGrayson/dna-methylation
|
f1a0878f4aa8c917bee9e5230387d6145826fb3a
|
e602ba91f3d275d92aadf0f874ac6f189adf547b
|
refs/heads/master
| 2022-02-08T03:31:22.423781
| 2022-02-01T16:50:37
| 2022-02-01T16:50:37
| 164,105,085
| 0
| 1
| null | 2020-03-20T18:08:24
| 2019-01-04T12:30:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,721
|
py
|
import pydnameth as pdm
import pandas as pd
import os.path
from scripts.develop.routines import *
max_rows = 10
fn = 'scatter_comparison_rows.xlsx'
rows_dict = {}
if os.path.isfile(fn):
df = pd.read_excel(fn)
tmp_dict = df.to_dict()
for key in tmp_dict:
curr_dict = tmp_dict[key]
rows_dict[key] = list(curr_dict.values())
fn = 'scatter_comparison_cols.xlsx'
cols_dict = {}
if os.path.isfile(fn):
df = pd.read_excel(fn)
tmp_dict = df.to_dict()
for key in tmp_dict:
curr_dict = tmp_dict[key]
cols_dict[key] = list(curr_dict.values())
data_bases = cols_dict['data_bases']
data_list = []
annotations_list = []
attributes_list = []
observables_list = []
data_params_list = []
for data_base in data_bases:
data = pdm.Data(
path='',
base=data_base
)
data_list.append(data)
annotations = pdm.Annotations(
name='annotations',
type='450k',
exclude='bad_cpgs',
select_dict={
'CHR': ['-X', '-Y']
}
)
annotations_list.append(annotations)
observables = pdm.Observables(
name='observables',
types={}
)
cells = pdm.Cells(
name='cells',
types='any'
)
target = get_target(data.base)
obs = get_observables_list(data.base)
data_params = get_data_params(data.base)
data_params['cells'] = ['Bcell', 'CD4T', 'CD8T', 'Gran', 'NK']
data_params['observables'] = ['gender']
attributes = pdm.Attributes(
target='age',
observables=observables,
cells=cells
)
attributes_list.append(attributes)
observables_list.append(obs)
data_params_list.append(data_params)
for run_id in range(0, len(rows_dict['items']), max_rows):
s_id = run_id
f_id = min(s_id + max_rows, len(rows_dict['items']))
curr_dict = {}
for key in rows_dict:
curr_dict[key] = rows_dict[key][s_id:f_id][::-1]
pdm.residuals_plot_scatter_comparison(
data_list=data_list,
annotations_list=annotations_list,
attributes_list=attributes_list,
observables_list=observables_list,
data_params_list=data_params_list,
rows_dict=curr_dict,
cols_dict=cols_dict,
method_params={
'line': 'no',
'fit': 'yes',
'semi_window': 4,
'box_b': 'Q1',
'box_t': 'Q99',
'legend_size': 1,
'add': 'none'
}
# method_params = {
# 'line': 'no',
# 'fit': 'no',
# 'semi_window': 4,
# 'box_b': 'Q1',
# 'box_t': 'Q99',
# 'legend_size': 1,
# 'add': 'none'
# }
)
|
[
"hewitt.archie@yandex.ru"
] |
hewitt.archie@yandex.ru
|
9e4a2dfbb16d5446314128083d3801b0a07f75b3
|
f2e5abc20da551051f0d1f306227b1b3838d7997
|
/NEZHA_MODEL/utils/data_utils.py
|
828586476622b66fb3540c628099fc1f1c89e1f0
|
[] |
no_license
|
chenmingwei00/bert-ner
|
3224fec5f979a5c1cfbcf5762fd3957dfded4206
|
6ca5cc34c71be879a34333c7dc4b1fbaa0870cc9
|
refs/heads/master
| 2022-11-30T04:31:42.682793
| 2020-08-11T14:08:38
| 2020-08-11T14:08:38
| 286,760,176
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,904
|
py
|
import tensorflow as tf
import os, codecs, pickle, collections, json
from NEZHA_TensorFlow.NEZHA_MODEL.nezha_bert import tokenization
from NEZHA_TensorFlow.NEZHA_MODEL.server.helper import set_logger
from NEZHA_TensorFlow.NEZHA_MODEL.nezha_bert.models import create_model, InputFeatures, InputExample
logger = set_logger('NER Training')
class DataProcessor(object):
"""Base class for NERdata converters for sequence classification NERdata sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this NERdata set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""Reads a BIO NERdata."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
tokens = contends.split(' ')
if len(tokens) == 2:
words.append(tokens[0])
labels.append(tokens[1])
else:
if len(contends) == 0:
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([l, w])
words = []
labels = []
continue
if contends.startswith("-DOCSTART-"):
words.append('')
continue
return lines
class NerProcessor(DataProcessor):
def __init__(self, output_dir):
self.labels = set()
self.output_dir = output_dir
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_test_examples(self, data_dir, stype='predict'):
if stype == 'predict':
return self._create_example(
self._read_test(os.path.join(data_dir, "validate_data.json")), "test")
else:
return self._create_example(
self._read_test2(os.path.join(data_dir, "predict.txt")), "test")
def get_labels(self, labels=None):
if labels is not None:
try:
# 支持从文件中读取标签类型
if os.path.exists(labels) and os.path.isfile(labels):
with codecs.open(labels, 'r', encoding='utf-8') as fd:
for line in fd:
self.labels.append(line.strip())
else:
# 否则通过传入的参数,按照逗号分割
self.labels = labels.split(',')
self.labels = set(self.labels) # to set
except Exception as e:
print(e)
# 通过读取train文件获取标签的方法会出现一定的风险。
if os.path.exists(os.path.join(self.output_dir, 'label_list.pkl')):
with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'rb') as rf:
self.labels = pickle.load(rf)
else:
if len(self.labels) > 0:
self.labels = self.labels.union(set(["X", "[CLS]", "[SEP]"]))
with codecs.open(os.path.join(self.output_dir, 'label_list.pkl'), 'wb') as rf:
pickle.dump(self.labels, rf)
else:
self.labels = ["O", 'B-TIM', 'I-TIM', "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X",
"[CLS]", "[SEP]"]
return self.labels
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if set_type != 'test':
guid = "%s-%s" % (set_type, i)
text = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
# if i == 0:
# print('label: ', label)
examples.append(InputExample(guid=guid, text=text, label=label))
else:
guid = tokenization.convert_to_unicode(line[0])
text = tokenization.convert_to_unicode(line[1])
# if i == 0:
# print('label: ', label)
examples.append(InputExample(guid=guid, text=text))
return examples
def _read_data(self, input_file):
"""Reads a BIO NERdata."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
lines = []
words = []
labels = []
for line in f:
contends = line
tokens = contends.split('\t')
if len(tokens) == 2:
words.append(tokens[0])
labels.append(tokens[-1].strip())
else:
if contends.strip() == '---' and len(words) > 0:
label = []
word = []
for l, w in zip(labels, words):
if len(l) > 0 and len(w) > 0:
label.append(l)
self.labels.add(l)
word.append(w)
assert len(label) == len(word)
lines.append(['|'.join(label), '|'.join(word)])
words = []
labels = []
continue
if contends.startswith("-DOCSTART-"):
continue
return lines
def _read_test(self, input_file):
"""Reads a BIO NERdata."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
test_data = json.load(f)
lines = []
for key_ids, value_input in test_data.items():
lines.append([key_ids, value_input])
return lines
def _read_test2(self, input_file):
"""Reads a BIO NERdata."""
with codecs.open(input_file, 'r', encoding='utf-8') as f:
test_data = f.readlines()
lines = []
for temp_text in test_data:
key_ids, value_input = temp_text.split("|-")
lines.append([str(key_ids), value_input])
return lines
def write_tokens(tokens, output_dir, mode):
"""
将序列解析结果写入到文件中
只在mode=test的时候启用
:param tokens:
:param mode:
:return:
"""
if mode == "test":
path = os.path.join(output_dir, "token_" + mode + ".txt")
wf = codecs.open(path, 'a', encoding='utf-8')
for token in tokens:
if token != "**NULL**":
wf.write(token + '\n')
wf.close()
import jieba.posseg as pseg
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, output_dir, mode,
part_speech_dcit):
"""
将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
:param ex_index: index
:param example: 一个样本
:param label_list: 标签列表
:param max_seq_length:
:param tokenizer:
:param output_dir
:param mode:
:return:
"""
label_map = {}
# 1表示从1开始对label进行index化
for (i, label) in enumerate(label_list, 1):
label_map[label] = i
# 保存label->index 的map
if not os.path.exists(os.path.join(output_dir, 'label2id.pkl')):
with codecs.open(os.path.join(output_dir, 'label2id.pkl'), 'wb') as w:
pickle.dump(label_map, w)
# cut_words = pseg.cut(''.join(example.text.split('|'))) # 里边包含空格符所以
# words_parts = []
# for w in cut_words:
# # if len(w.word.strip()) == 0: continue
# for k in range(len(w.word)):
# words_parts.append(part_speech_dcit[w.flag]) # bia
# # real_words.append(w.flag)
if mode != 'test':
labellist = example.label.split('|')
textlist = example.text.split('|')
assert len(textlist) == len(labellist)
tokens = []
labels = []
for i, word in enumerate(textlist):
# 分词,如果是中文,就是分字,但是对于一些不在BERT的vocab.txt中得字符会被进行WordPice处理(例如中文的引号),可以将所有的分字操作替换为list(input)
token = tokenizer.tokenize(word)
if len(token) > 1:
print('1111111')
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else: # 一般不会出现else
labels.append("X")
assert len(tokens) == len(labels)
else:
tokens_list_index = [] # 表示原始对应当前分词后的结果
tokens = []
words_parts_ids=[]
assert len(words_parts)==len(list(example.text))
for i, word in enumerate(list(example.text)):
current_tokens_index = []
token = tokenizer.tokenize(word)
for toke in token:
current_tokens_index.append(len(tokens))
words_parts_ids.append(words_parts[i])
tokens.append(toke)
tokens_list_index.append(current_tokens_index)
words_parts=words_parts_ids
# tokens = tokenizer.tokenize(example.text)
try:
assert len(words_parts)==len(tokens)
except:
print('1111111111111111111111')
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)] # -2 的原因是因为序列需要加一个句首和句尾标志
words_parts = words_parts[0:(max_seq_length - 2)]
if mode != 'test':
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
if mode != 'test':
label_ids = []
label_ids.append(label_map["[CLS]"]) # O OR CLS 没有任何影响,不过我觉得O 会减少标签个数,不过拒收和句尾使用不同的标志来标注,使用LCS 也没毛病
ntokens.append("[CLS]") # 句子开始设置CLS 标志
segment_ids.append(0)
# words_parts.insert(0, part_speech_dcit['[CLS]'])
# append("O") or append("[CLS]") not sure!
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if mode != 'test':
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]") # 句尾添加[SEP] 标志
segment_ids.append(0)
# words_parts.append(part_speech_dcit['[SEP]'])
# append("O") or append("[SEP]") not sure!
if mode != 'test':
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens) # 将序列中的字(ntokens)转化为ID形式
input_mask = [1] * len(input_ids)
# label_mask = [1] * len(input_ids)
# padding, 使用
# assert len(words_parts)==len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
# words_parts.append(0)
# we don't concerned about it!
if mode != 'test':
label_ids.append(0)
ntokens.append("**NULL**")
# label_mask.append(0)
# print(len(input_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# assert len(words_parts) == max_seq_length
if mode != 'test':
assert len(label_ids) == max_seq_length
# assert len(label_mask) == max_seq_length
# 打印部分样本数据信息
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
# logger.info("label_mask: %s" % " ".join([str(x) for x in label_mask]))
# 结构化为一个类
if mode != 'test':
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
# label_mask = label_mask
)
else:
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=None,
guid=example.guid,
)
# mode='test'的时候才有效
write_tokens(ntokens, output_dir, mode)
return feature
def filed_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, output_dir, mode=None, part_speech_dcit=None):
"""
将数据转化为TF_Record 结构,作为模型数据输入
:param examples: 样本
:param label_list:标签list
:param max_seq_length: 预先设定的最大序列长度
:param tokenizer: tokenizer 对象
:param output_file: tf.record 输出路径
:param mode:
:return:
"""
writer = tf.python_io.TFRecordWriter(output_file)
# 遍历训练数据
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
# 对于每一个训练样本,
if mode != 'test':
assert len(example.text.split('|')) == len(example.label.split('|'))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, output_dir, mode,
part_speech_dcit)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(
value=[value.encode('utf - 8') if type(
value) == str else value]))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if mode != 'test':
features["label_ids"] = create_int_feature(feature.label_ids)
else:
features["guid"] = create_bytes_feature(feature.guid)
# features["words_parts_ids"] = create_int_feature(feature.words_parts)
# features["label_mask"] = create_int_feature(feature.label_mask)
# tf.train.Example/Feature 是一种协议,方便序列化???
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
if is_training:
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
# "words_parts_ids": tf.FixedLenFeature([seq_length], tf.int64),
# "label_mask": tf.FixedLenFeature([seq_length], tf.int64),
}
else:
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"guid": tf.FixedLenFeature([], tf.string),
# "words_parts_ids": tf.FixedLenFeature([seq_length], tf.int64),
# "label_ids": tf.FixedLenFeature([seq_length], tf.int64),
# "label_ids":tf.VarLenFeature(tf.int64),
# "label_mask": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=300)
d = d.apply(tf.data.experimental.map_and_batch(lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_calls=8, # 并行处理数据的CPU核心数量,不要大于你机器的核心数
drop_remainder=drop_remainder))
d = d.prefetch(buffer_size=4)
return d
return input_fn
|
[
"614489362@qq.com"
] |
614489362@qq.com
|
2e307cc53432f29673c4057851212ede07e1b46d
|
7f1830dd630a6ee7fe63a01fd0bff31c7c1979a3
|
/users/urls.py
|
3d83515c2b19584e96029f25c21881319baa06a0
|
[] |
no_license
|
kot6egemot/musicwebsite
|
d610c81a764e809b502b762b7355e74157a5d26e
|
585f7a34a3ecbcd9b6b827240b8cf7875b1955de
|
refs/heads/true_media
| 2022-12-03T00:57:38.215244
| 2019-03-15T21:22:22
| 2019-03-15T21:22:22
| 165,709,813
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from django.urls import path
from django.conf.urls import url
from django.contrib.auth.views import auth_login
from .views import logout_view,login_view,register_view
app_name = 'users'
urlpatterns = [
path('login/', login_view, name = 'login_view'),
path('logout/', logout_view, name = 'logout_view'),
path('register/', register_view, name = 'register_view'),
]
|
[
"ex-2k@yandex.ru"
] |
ex-2k@yandex.ru
|
d64dd27b9a3bc143aa7bf6611da63721f4d4da41
|
5a4539b69985e9d61162a5b1876dea244da98c07
|
/config.py
|
a7d974a5f6e917e179e1f1a9a6a6152acb1acaf3
|
[] |
no_license
|
Koperundevi/user-profile-api
|
50d5af289ae22a43eef772b6c0a26dcc4b96f645
|
4e6c9a63665e9ffb69a76a325085e33c493746e2
|
refs/heads/master
| 2023-04-01T07:14:25.872925
| 2021-04-04T11:03:31
| 2021-04-04T11:03:31
| 353,040,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
from datetime import timedelta
import sys, os
'''get config based on environment'''
def getEnvConfig():
env = os.getenv('FLASK_ENV')
return app_config[env] if env else DevelopmentConfig
class Config(object):
"""
Common configurations
"""
PROPAGATE_EXCEPTIONS = True
class DevelopmentConfig(Config):
"""
Development configurations
"""
SQLALCHEMY_ECHO = False
DEBUG = True
#github authorization URI
GITHUB_AUTH_URI = """https://github.com/login/oauth/authorize?response_type=code&client_id=5a429afe708ee411ec8f&redirect_uri=http%3A%2F%2F127.0.0.1%3A8000%2Flogin%2Fcallback&scope=user"""
#postgresql://user:pw@host:port/db
SQLALCHEMY_DATABASE_URI = """postgresql://postgres:postgres@localhost:5432/userprofileapp"""
#github access token header
GITHUB_ACCESS_TOKEN_HEADER = {"client_id": "5a429afe708ee411ec8f",
"client_secret": "a927daabebe346f36a27bf75aa0a0de99d487d34",
"code":""
}
SQLALCHEMY_TRACK_MODIFICATIONS = True
JWT_SECRET_KEY = 'a927daabebe346f36a27bf75aa0a0de99d487d34'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=10)
FLASK_RUN_HOST = '127.0.0.1'
FLASK_RUN_PORT = 8000
LOGIN_SUCCESS_REDIRECT_URI = "http://localhost:4200/profile?token="
class ProductionConfig(Config):
"""
Production configurations
"""
SQLALCHEMY_ECHO = False
DEBUG = False
#github authorization URI
GITHUB_AUTH_URI = """https://github.com/login/oauth/authorize?response_type=code&client_id=90b8b080bb9c3ac59350&redirect_uri=http%3A%2F%2Fuser-profile-api.herokuapp.com%2Flogin%2Fcallback&scope=user"""
#postgresql://user:pw@host:port/db
SQLALCHEMY_DATABASE_URI = """postgresql://tozpjdfkrgpjjd:b1f7915e9f8574f731d58214ca6a76e5d8a95999869fd37cb8575f3889824e59@ec2-52-45-73-150.compute-1.amazonaws.com:5432/dccoog9uvjg7bg"""
#github access token header
GITHUB_ACCESS_TOKEN_HEADER = {"client_id": "90b8b080bb9c3ac59350",
"client_secret": "e84f6c8fe2ed62d024eaa13c3e743bee1653589c",
"code":""
}
SQLALCHEMY_TRACK_MODIFICATIONS = True
JWT_SECRET_KEY = 'e84f6c8fe2ed62d024eaa13c3e743bee1653589c'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=5)
FLASK_RUN_HOST = '0.0.0.0'
FLASK_RUN_PORT=5000
LOGIN_SUCCESS_REDIRECT_URI = "https://user-profile-appln.herokuapp.com/profile?token="
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig
}
|
[
"koperundevianbu@gmail.com"
] |
koperundevianbu@gmail.com
|
a9582fe1ff3a16c1aa108f54b5ff1ae3984f5ccb
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_171/ch65_2019_06_07_01_24_18_525767.py
|
79aa0ff8a8f6334244edcac2ba434f9ec46d556f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
def acha_bigramas(string):
lista=[]
i=0
while i<len(string):
if string[i:i+2] not in lista and len(string[i:i+2])>3:
lista.append(string[i:i+2])
i+=1
return lista
|
[
"you@example.com"
] |
you@example.com
|
26571c6b5080ba1be22ee2d52d6fd124c821b6c7
|
b5952ec93687e66bc0fc0ee7dbd6b2a2cfc6bbaa
|
/Server/app/__init__.py
|
a4e30a7b0af85f379d2994d8e66779eb2078688e
|
[] |
no_license
|
elektrobohemian/imi-unicorns
|
32001448ea7cbc5276a9c3fb54fc7e51d0029138
|
a3dac66d8d6d1d36749473883ced8de7e2678e26
|
refs/heads/master
| 2020-03-31T08:37:48.046267
| 2019-03-16T07:06:24
| 2019-03-16T07:06:24
| 152,065,376
| 2
| 0
| null | 2018-10-08T10:59:58
| 2018-10-08T10:59:58
| null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
from flask import Flask
from pymongo import MongoClient
# Setup Flask app
app = Flask(__name__)
app.config.from_object('app.config')
# Setup MongoDB connection
client = MongoClient('localhost', 27017)
db = client['unicorns']
from app import category_controller, image_controller, book_controller, genre_controller, maps_controller, color_controller
# Instantiate controller classes so they can be used in the project
category_controller = category_controller.Category_Controller()
image_controller = image_controller.Image_Controller()
books_controller = book_controller.Book_Controller()
genre_controller = genre_controller.Genre_Controller()
maps_controller = maps_controller.Maps_Controller()
color_controller = color_controller.Color_Controller()
from app import api
if __name__ == '__main__':
app.run(debug=True)
|
[
"caglar.oezel@gmail.com"
] |
caglar.oezel@gmail.com
|
6e90658a4808635df02ace2c0dd08c4d5d2ceb3e
|
4beabdb5089e3284251dcaf046366c35d3afe02f
|
/communication/multithreading/Client.py
|
8743a1106aebdb0b5ab603d90c530f73da03cc10
|
[] |
no_license
|
AndrewFendrich/Mandelbrot
|
c3fa2b1463d6e01b91ac0a3c53ef88c8e1716641
|
074ebd9028c13a9f840c2436ab2c8c3d2275dbf6
|
refs/heads/master
| 2021-01-13T00:52:24.060863
| 2017-05-08T14:30:02
| 2017-05-08T14:30:02
| 50,623,517
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from multiprocessing.connection import Client
from array import array
address = ('localhost', 6000)
with Client(address, authkey=b'secret password') as conn:
print(conn.recv()) # => [2.25, None, 'junk', float]
print(conn.recv_bytes()) # => 'hello'
arr = array('i', [0, 0, 0, 0, 0])
print(conn.recv_bytes_into(arr)) # => 8
print(arr) # => array('i', [42, 1729, 0, 0, 0])
print(conn.recv())
|
[
"afendrich@gmail.com"
] |
afendrich@gmail.com
|
2e3e48d88ad0126676c3194181242ceb8c20f327
|
3d110d335dc51c8f270676675c742cf406ecdeb5
|
/RESTSerializer/serializers.py
|
f8dc80dafac6c9d175cf5572054eb4ebbf4f8339
|
[] |
no_license
|
1284753334/RESTAPI
|
2de797cc931214e4c32731c71e5f0163ca00e3fb
|
f96b2a868a892912d6933cc8444e5ad7b8e91f62
|
refs/heads/master
| 2022-11-18T08:12:00.783699
| 2020-07-16T04:30:32
| 2020-07-16T04:30:32
| 280,053,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from rest_framework import serializers
from RESTSerializer.models import Person, Student, Book
# 原生序列化 代码比person 多
class PersonSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
p_name = serializers.CharField(max_length=32)
p_age = serializers.IntegerField(default=1)
P_sex = serializers.BooleanField(default=False)
def create(self, validated_data):
return Person.objects.create(**validated_data)
def update(self, instance, validated_data):
# 如果拿不到值,就用原来的值
instance.p_name = validated_data.get('p_name',instance.p_name)
instance.p_age = validated_data.get('p_age',instance.p_age)
instance.p_sex = validated_data.get('p_sex',instance.p_sex)
instance.save()
return instance
# 模型序列化 ModelSerializer
class StudentSerializer(serializers.ModelSerializer):
class Meta:
#模型序列化 不带括号
model = Student
fields = ('s_name','s_age')
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = ('b_price','b_name')
|
[
"xiaomin@169.com"
] |
xiaomin@169.com
|
8df0a5de30770486d65f5750ddf7332158529917
|
385ce240ae264a1449079c21bd0c4cbe7c0fe3b8
|
/myowntests/ifelseladder.py
|
6be8802a946907017b902d6c6c70418b5968deb2
|
[] |
no_license
|
Maxcousin123/Python-workspace
|
3ed60ae80d790b5c055bf47872ff0fdd39f4ec58
|
326b023190a12e082dcb35ae5ab8ef644c32159b
|
refs/heads/master
| 2022-11-24T11:05:08.707003
| 2020-07-29T06:32:08
| 2020-07-29T06:32:08
| 283,415,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
maths=int(input('fill your math grade'))
physics=int(input('fill your physics grade'))
chemistry=int(input('fill your chemistry grade'))
av=(maths+physics+chemistry)/3
if maths<35:
print('Exam Failed')
else:print('Exam passed')
if physics<35:
print('Exam failed')
else:print('Exam passed')
if physics<35:
print('Exam failed')
else:print('Exam passed')
if maths and physics and chemistry<35:
print('Exams failed')
elif av<=59:
print('your grade is c')
elif 59>av<=69:
print('your grade is b')
else:
print('your grade is a')
#69
|
[
"66350396+Maxcousin123@users.noreply.github.com"
] |
66350396+Maxcousin123@users.noreply.github.com
|
1bbcca3940cb02865ed6ba250a1b7e06fe44680c
|
e72337dd656adfabe62325a472763a2836f30f8e
|
/problem19.py
|
2837b3d4e8213832c0d860e66e9fe792f7222d5a
|
[] |
no_license
|
deepachari/euler
|
1b3285b43c136122d59a418740f4ae4fc853a54f
|
6995293cd6cb391772bd883d17c12d545aa162a3
|
refs/heads/master
| 2021-07-25T05:17:36.860375
| 2017-11-05T11:01:19
| 2017-11-05T11:01:19
| 109,484,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 969
|
py
|
# You are given the following information, but you may prefer to do some research for yourself.
#
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
from datetime import date, timedelta
def date_generator(start, end):
while start <= end:
yield start
start += timedelta(days=1)
def problem19(start, end):
num_sundays = 0
for d in date_generator(start, end):
if d.day == 1 and d.weekday() == 6:
num_sundays += 1
return num_sundays
start = date(year=1901, month=1, day=1)
end = date(year=2000, month=12, day=31)
print(problem19(start, end))
|
[
"dchari@newclassrooms.org"
] |
dchari@newclassrooms.org
|
a30c882225f0729f7727634a091398bc4b341d00
|
a58fcf9467749de7d269c5b17430773069e29791
|
/designate/exceptions.py
|
bd807f15966c85208c564dccc08126f802c00c8e
|
[
"Apache-2.0"
] |
permissive
|
Woody89/designate-private
|
586df6c28a2da573663487e4728c3fddfef095af
|
0a6ed5a1d7cdac5cb1e9dec8fd3ddfb9a77c58f5
|
refs/heads/master
| 2021-01-22T19:22:49.391876
| 2017-08-19T06:16:53
| 2017-08-19T06:16:53
| 100,774,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,905
|
py
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
class Base(Exception):
error_code = 500
error_type = None
error_message = None
errors = None
def __init__(self, *args, **kwargs):
self.errors = kwargs.pop('errors', None)
self.object = kwargs.pop('object', None)
super(Base, self).__init__(*args, **kwargs)
if len(args) > 0 and isinstance(args[0], six.string_types):
self.error_message = args[0]
class Backend(Exception):
pass
class RelationNotLoaded(Base):
error_code = 500
error_type = 'relation_not_loaded'
def __init__(self, *args, **kwargs):
self.relation = kwargs.pop('relation', None)
super(RelationNotLoaded, self).__init__(*args, **kwargs)
self.error_message = "%(relation)s is not loaded on %(object)s" % \
{"relation": self.relation, "object": self.object.obj_name()}
def __str__(self):
return self.error_message
class AdapterNotFound(Base):
error_code = 500
error_type = 'adapter_not_found'
class NSD4SlaveBackendError(Backend):
pass
class NotImplemented(Base, NotImplementedError):
pass
class XFRFailure(Base):
pass
class ConfigurationError(Base):
error_type = 'configuration_error'
class UnknownFailure(Base):
error_code = 500
error_type = 'unknown_failure'
class CommunicationFailure(Base):
error_code = 504
error_type = 'communication_failure'
class NeutronCommunicationFailure(CommunicationFailure):
"""
Raised in case one of the alleged Neutron endpoints fails.
"""
error_type = 'neutron_communication_failure'
class NoFiltersConfigured(ConfigurationError):
error_code = 500
error_type = 'no_filters_configured'
class NoServersConfigured(ConfigurationError):
error_code = 500
error_type = 'no_servers_configured'
class MultiplePoolsFound(ConfigurationError):
error_code = 500
error_type = 'multiple_pools_found'
class NoPoolTargetsConfigured(ConfigurationError):
error_code = 500
error_type = 'no_pool_targets_configured'
class OverQuota(Base):
error_code = 413
error_type = 'over_quota'
expected = True
class QuotaResourceUnknown(Base):
error_type = 'quota_resource_unknown'
class InvalidObject(Base):
error_code = 400
error_type = 'invalid_object'
expected = True
class BadRequest(Base):
error_code = 400
error_type = 'bad_request'
expected = True
class EmptyRequestBody(BadRequest):
error_type = 'empty_request_body'
expected = True
class InvalidUUID(BadRequest):
error_type = 'invalid_uuid'
class InvalidRecord(BadRequest):
error_type = 'invalid_record'
class NetworkEndpointNotFound(BadRequest):
error_type = 'no_endpoint'
error_code = 403
class MarkerNotFound(BadRequest):
error_type = 'marker_not_found'
class NotEqual(Base):
error_type = 'udn_record_count not equals record in db'
class NoChange(Base):
error_type = 'No changes'
class ValueError(BadRequest):
error_type = 'value_error'
class InvalidMarker(BadRequest):
error_type = 'invalid_marker'
class InvalidSortDir(BadRequest):
error_type = 'invalid_sort_dir'
class InvalidLimit(BadRequest):
error_type = 'invalid_limit'
class InvalidSortKey(BadRequest):
error_type = 'invalid_sort_key'
class InvalidJson(BadRequest):
error_type = 'invalid_json'
class NoneIpAddress(BadRequest):
error_type = 'none_ip_address'
class InvalidOperation(BadRequest):
error_code = 400
error_type = 'invalid_operation'
class UnsupportedAccept(BadRequest):
error_code = 406
error_type = 'unsupported_accept'
class UnsupportedContentType(BadRequest):
error_code = 415
error_type = 'unsupported_content_type'
class InvalidZoneName(Base):
error_code = 400
error_type = 'invalid_zone_name'
expected = True
class InvalidAclName(Base):
error_code = 400
error_type = 'invalid_acl_name'
expected = True
class InvalidRecordSetName(Base):
error_code = 400
error_type = 'invalid_recordset_name'
expected = True
class InvalidRecordSetLocation(Base):
error_code = 400
error_type = 'invalid_recordset_location'
expected = True
class InvaildZoneTransfer(Base):
error_code = 400
error_type = 'invalid_zone_transfer_request'
class InvalidTTL(Base):
error_code = 400
error_type = 'invalid_ttl'
class ZoneHasSubZone(Base):
error_code = 400
error_type = 'zone_has_sub_zone'
class Forbidden(Base):
error_code = 403
error_type = 'forbidden'
expected = True
class IllegalChildZone(Forbidden):
error_type = 'illegal_child'
class IllegalParentZone(Forbidden):
error_type = 'illegal_parent'
class IncorrectZoneTransferKey(Forbidden):
error_type = 'invalid_key'
class Duplicate(Base):
expected = True
error_code = 409
error_type = 'duplicate'
class DuplicateServiceStatus(Duplicate):
error_type = 'duplicate_service_status'
class DuplicateQuota(Duplicate):
error_type = 'duplicate_quota'
class DuplicateServer(Duplicate):
error_type = 'duplicate_server'
class DuplicateTsigKey(Duplicate):
error_type = 'duplicate_tsigkey'
class DuplicateZone(Duplicate):
error_type = 'duplicate_zone'
class DuplicateAcl(Duplicate):
error_type = 'duplicate_acl'
class DuplicateTld(Duplicate):
error_type = 'duplicate_tld'
class DuplicateRecordSet(Duplicate):
error_type = 'duplicate_recordset'
class DuplicateRecord(Duplicate):
error_type = 'duplicate_record'
class DuplicateBlacklist(Duplicate):
error_type = 'duplicate_blacklist'
class DuplicatePoolManagerStatus(Duplicate):
error_type = 'duplication_pool_manager_status'
class DuplicatePool(Duplicate):
error_type = 'duplicate_pool'
class DuplicatePoolAttribute(Duplicate):
error_type = 'duplicate_pool_attribute'
class DuplicatePoolNsRecord(Duplicate):
error_type = 'duplicate_pool_ns_record'
class DuplicatePoolNameserver(Duplicate):
error_type = 'duplicate_pool_nameserver'
class DuplicatePoolTarget(Duplicate):
error_type = 'duplicate_pool_target'
class DuplicatePoolTargetOption(Duplicate):
error_type = 'duplicate_pool_target_option'
class DuplicatePoolTargetMaster(Duplicate):
error_type = 'duplicate_pool_target_master'
class DuplicatePoolAlsoNotify(Duplicate):
error_type = 'duplicate_pool_also_notify'
class DuplicateZoneImport(Duplicate):
error_type = 'duplicate_zone_import'
class DuplicateZoneExport(Duplicate):
error_type = 'duplicate_zone_export'
class DuplicateViewDuplicate(Duplicate):
error_type = 'duplicate_view_export'
class DuplicateZdnsViewInfo(Duplicate):
error_type = 'duplicate_zdns_view_info'
class DuplicateViewZdnsView(Duplicate):
error_type = 'duplicate_view_zdns_view_association'
class DuplicateView(Duplicate):
error_type = 'duplicate_view'
class NeedView(BadRequest):
error_type = 'attributes_need_view'
class MethodNotAllowed(Base):
expected = True
error_code = 405
error_type = 'method_not_allowed'
class DuplicateZoneTransferRequest(Duplicate):
error_type = 'duplicate_zone_transfer_request'
class DuplicateZoneTransferAccept(Duplicate):
error_type = 'duplicate_zone_transfer_accept'
class DuplicateZoneAttribute(Duplicate):
error_type = 'duplicate_zone_attribute'
class DuplicateZoneMaster(Duplicate):
error_type = 'duplicate_zone_attribute'
class NotFound(Base):
expected = True
error_code = 404
error_type = 'not_found'
class Failed(Base):
expected = True
error_code = 500
error_type = 'create_failed'
class ServiceStatusNotFound(NotFound):
error_type = 'service_status_not_found'
class QuotaNotFound(NotFound):
error_type = 'quota_not_found'
class ServerNotFound(NotFound):
error_type = 'server_not_found'
class TsigKeyNotFound(NotFound):
error_type = 'tsigkey_not_found'
class BlacklistNotFound(NotFound):
error_type = 'blacklist_not_found'
class ZoneNotFound(NotFound):
error_type = 'zone_not_found'
class AclNotFound(NotFound):
error_type = 'acl_not_found'
class ZoneMasterNotFound(NotFound):
error_type = 'zone_master_not_found'
class ZoneAttributeNotFound(NotFound):
error_type = 'zone_attribute_not_found'
class TldNotFound(NotFound):
error_type = 'tld_not_found'
class RecordSetNotFound(NotFound):
error_type = 'recordset_not_found'
class RecordNotFound(NotFound):
error_type = 'record_not_found'
class AllFailed(Failed):
error_type = 'all record-create failed'
class PartlyFailed(Failed):
error_type = 'some record-create failed'
class ReportNotFound(NotFound):
error_type = 'report_not_found'
class PoolManagerStatusNotFound(NotFound):
error_type = 'pool_manager_status_not_found'
class PoolNotFound(NotFound):
error_type = 'pool_not_found'
class NoValidPoolFound(NotFound):
error_type = 'no_valid_pool_found'
class PoolAttributeNotFound(NotFound):
error_type = 'pool_attribute_not_found'
class PoolNsRecordNotFound(NotFound):
error_type = 'pool_ns_record_not_found'
class PoolNameserverNotFound(NotFound):
error_type = 'pool_nameserver_not_found'
class PoolTargetNotFound(NotFound):
error_type = 'pool_target_not_found'
class PoolTargetOptionNotFound(NotFound):
error_type = 'pool_target_option_not_found'
class PoolTargetMasterNotFound(NotFound):
error_type = 'pool_target_master_not_found'
class PoolAlsoNotifyNotFound(NotFound):
error_type = 'pool_also_notify_not_found'
class ZoneTransferRequestNotFound(NotFound):
error_type = 'zone_transfer_request_not_found'
class ZoneTransferAcceptNotFound(NotFound):
error_type = 'zone_transfer_accept_not_found'
class ZoneImportNotFound(NotFound):
error_type = 'zone_import_not_found'
class ZoneExportNotFound(NotFound):
error_type = 'zone_export_not_found'
class ViewNotFound(NotFound):
error_type = 'view_not_found'
class ViewAclNotFound(NotFound):
error_type = 'view_acl_not_found'
class AclsIsNone(NotFound):
error_type = 'acl_ids_is_none'
class ParamsIsNotLegal(NotFound):
error_type = 'params_is_not_legal'
class AclidsMustBeList(NotFound):
error_type = 'acl_ids_must_be_list'
class CreateViewFailed(NotFound):
error_type = 'create_view_failed'
class LastServerDeleteNotAllowed(BadRequest):
error_type = 'last_server_delete_not_allowed'
EZDNS = {
"1": "any or none acl is read only",
"2": "acl already exists",
"3": "operate non-exist acl",
"4": "dns64 prefix should be a ipv6 addr",
"5": "invalid dns64 prefix netmask",
"6": "suffix is needed if netmask of prefix smaller than 96",
"7": "DNS64 setting already exists",
"8": "operate non-exist DNS64 setting",
"9": "tsig key already exists",
"10": "delete acl is using by view",
"11": "operate non-exist zone",
"12": "cache file not exist",
"13": "cache size too large",
"14": "operate non-exist view",
"15": "get zone from backend server failed",
"16": "zone already exists",
"17": "unsupported meta data type",
"18": "view already exists",
"19": "delete default view",
"20": "cann't modify acl of default view",
"21": "operate non-exist rr",
"22": "conflict key secret",
"23": "not supported zone type",
"24": "operate non-exist shared rr",
"25": "cann't delete the last shared rr",
"26": "operate non-exist tsig key",
"27": "reconfig dns server failed",
"28": "no rndc-confgen installed",
"29": "lack/white list already exists",
"30": "operate non-exist back/white list",
"31": "zone owner doesn't has view owner",
"32": "unsupport acl action",
"33": "no pine-control installed",
"34": "server already started",
"35": "RR format error",
"36": "zone transfer failed",
"37": "more than one ad zone owner",
"38": "update zone failed",
"39": "shared rr already exists",
"40": "add duplicate rr",
"41": "add exclusive rr",
"42": "short of glue rr",
"43": "conflict with exists cname",
"44": "delete unknown rr",
"45": "can't delete soa rr",
"46": "no ns left after delete",
"47": "delete glue needed by other rr",
"48": "reverse zone doesn't exist",
"49": "rdata is valid",
"50": "rr is out of zone",
"51": "onfigure value isn't valid",
"52": "unknown forward style",
"53": "duplicate zone master",
"54": "forwarder exists",
"55": "operate non-exist forwarder",
"56": "operate non-exist view on node",
"57": "already exists root zone",
"58": "only A/AAAA NS is allowed in hint zone",
"59": "already has root configuration",
"60": "rr type isn't supported",
"61": "can't update slave zone",
"62": "duplicate local domain policy",
"63": "zone name isn't valid",
"64": "add duplicate host",
"65": "soa serial number degraded",
"66": "root isn't support in local policy",
"67": "auth zone with same name already exists",
"68": "stub zone with same name already exists",
"69": "forward zone with same name already exists",
"70": "acl is used by view",
"71": "acl is used by AD zone",
"72": "rrl policy already exist",
"73": "non-exist rrl policy",
"74": "delete monitor strategy in use",
"75": "monitor strategy already exist",
"76": "non exist monitor strategy",
"77": "node's view querysource already exists",
"78": "node's view querysource not exist",
"79": "too much rrls(over 999)",
"100": "version is unknown",
"101": "patch file broken",
"102": "source code isn't a release version",
"103": "binding different iface with same ip address",
"104": "ntp interval out of range",
"105": "send a test mail failed, check the configuration",
"300": "invalid ip address",
"301": "no dns server installed",
"302": "not enough params",
"303": "not supported backup method",
"304": "not supported command method",
"305": "service hasn't been init",
"306": "not supported ha type",
"307": "member is not accessible",
"308": "wrong username and password",
"309": "nic config failed",
"310": "service hasn't been started",
"311": "init params is required",
"312": "invalid port",
"313": "verify node failed",
"314": "request body json format error",
"315": "connect backup server timeout",
"316": "data recovery failed",
"317": "data backup failed",
"318": "lower limit bigger than upper limit",
"319": "execute command timeout",
"320": "password/role failed",
"404": "Wrong url, please check it",
"421": "Equipment internal error !",
"600": "operate non-exist group",
"601": "member with same ip alreasy exists",
"602": "member with same name alreasy exists",
"603": "operate non-exist member",
"604": "not supported service type",
"605": "member command queue is full",
"606": "member is performing data recovery",
"607": "group already exists",
"608": "cann't operate local group",
"609": "user already exists",
"610": "operate non-exist user",
"611": "init member service failed",
"612": "owners is required",
"613": "cann't delete the last owner for resource",
"614": "add duplicate owners",
"615": "old password is wrong",
"616": "cann't delete local group",
"617": "cann't delete local member",
"618": "permission denied",
"619": "unkown authority rule",
"620": "authority rule already exist",
"621": "invalid backup data",
"622": "device already under management",
"623": "some devices don't exist any more",
"624": "cann't operation inactive cloud",
"625": "cann't add multi backup devices",
"626": "no backup device",
"627": "not master device",
"628": "not backup device",
"629": "not slave device",
"630": "hasn't managed by cloud yet",
"631": "node can't communicate with master",
"632": "invalid exception handle method",
"800": "time out while sending alarm msg"
}
class ZdnsErrMessage(Base):
error_type = "Equipment Internal Error"
expected = True
def __init__(self,*args,**kwargs):
self.errors = kwargs.pop('errors', None)
self.object = kwargs.pop('object', None)
super(Base, self).__init__(*args, **kwargs)
if len(args) > 0 and isinstance(args[0], six.string_types):
self.error_message = str(args[0]) + ": " + EZDNS[args[0]]
# @staticmethod
# def getmsg(cord):
# msg = str(cord) + ": " + EZDNS[cord]
# return msg
class AclUsedByView(Base):
error_type = 'acl used by view'
|
[
"dongpzh@adtec.com.cn"
] |
dongpzh@adtec.com.cn
|
e730f6b2b0eca72e7e22623e746eb8a5f9fc2013
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_2side/bce_s001_tv_s0p1_L8/step09_2side_L8.py
|
150ed37ed776a944b01f8ce846b25ba5de404b7b
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,319
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_Generate_M_see
from step09_c_train_step import train_step_Single_output_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1__2side_0 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
pyramid_1side_1__2side_1 = [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
pyramid_1side_2__2side_0 = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
pyramid_1side_2__2side_1 = [2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2]
pyramid_1side_2__2side_2 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2]
pyramid_1side_3__2side_0 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]
pyramid_1side_3__2side_1 = [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2]
pyramid_1side_3__2side_2 = [2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2]
pyramid_1side_3__2side_3 = [2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2]
pyramid_1side_4__2side_0 = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]
pyramid_1side_4__2side_1 = [2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2]
pyramid_1side_4__2side_2 = [2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2]
pyramid_1side_4__2side_3 = [2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2]
pyramid_1side_4__2side_4 = [2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
pyramid_1side_5__2side_0 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
pyramid_1side_5__2side_1 = [2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2]
pyramid_1side_5__2side_2 = [2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2]
pyramid_1side_5__2side_3 = [2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2]
pyramid_1side_5__2side_4 = [2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2]
pyramid_1side_5__2side_5 = [2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_0 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
pyramid_1side_6__2side_1 = [2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2]
pyramid_1side_6__2side_2 = [2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2]
pyramid_1side_6__2side_3 = [2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2]
pyramid_1side_6__2side_4 = [2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2]
pyramid_1side_6__2side_5 = [2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_6 = [2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_0 = [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_7__2side_1 = [2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_7__2side_2 = [2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_7__2side_3 = [2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_7__2side_4 = [2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_7__2side_5 = [2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_6 = [2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_7 = [2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_0 = [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_8__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_8__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_8__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_8__2side_4 = [2, 2, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_8__2side_5 = [2, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_6 = [2, 2, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_7 = [2, 2, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_8 = [2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_0 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_9__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_9__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_9__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_9__2side_4 = [2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_9__2side_5 = [2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_6 = [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_7 = [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_8 = [2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_9 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
#########################################################################################
ch032_pyramid_1side_1__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_2__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_2__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_3__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_3__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_3__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_4__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_4__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_4__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_4__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_5__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_5__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_5__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_5__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_5__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_6__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_7__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_8__2side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_1, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
ch032_pyramid_1side_9__2side_9 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9, ch_upper_bound= 2 ** 14).set_gen_op(I_Generate_M_see).set_train_step(train_step_Single_output_I_to_M)
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4__2side_2
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
4ebf8a054879eac94e2f5d8095ece9d5956ab041
|
71dad77ffa026e8ce2f80ac4214220b02eaa77fd
|
/src/concordion/impl/java.py
|
26eccce54f06e2fe190ccd312eb7875ad62ff44a
|
[] |
no_license
|
jcplessis/pyconcordion
|
bf288e3f8fb4a5896396e3a6090cd0672262f1be
|
09fdbc0094c17f046f46202d216f8d91f8f5bcd3
|
refs/heads/master
| 2021-01-22T11:38:33.060065
| 2015-03-16T08:19:14
| 2015-03-16T08:19:14
| 32,134,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,984
|
py
|
import os
import glob
import sys
package = """
package %s;
"""
imports = """
import java.net.*;
import org.apache.ws.commons.util.NamespaceContextImpl;
import org.apache.xmlrpc.common.TypeFactoryImpl;
import org.apache.xmlrpc.common.XmlRpcController;
import org.apache.xmlrpc.common.XmlRpcStreamConfig;
import org.apache.xmlrpc.parser.NullParser;
import org.apache.xmlrpc.parser.TypeParser;
import org.apache.xmlrpc.serializer.NullSerializer;
import org.apache.xmlrpc.client.XmlRpcClient;
import org.apache.xmlrpc.client.XmlRpcClientConfigImpl;
import org.apache.xmlrpc.XmlRpcException;
import org.concordion.integration.junit3.ConcordionTestCase;
import org.concordion.api.%(expected)s;
import java.lang.reflect.Array;
import java.lang.System;
import java.util.*;
"""
class_declaration = """
@%(expected)s
public class %(name)s extends ConcordionTestCase{
"""
attributes = """
XmlRpcClient client = null;
"""
constructor = """
public %(class_name)s() throws MalformedURLException{
XmlRpcClientConfigImpl config = new XmlRpcClientConfigImpl();
config.setServerURL(new URL("http://localhost:%(port)s/"));
this.client = new XmlRpcClient();
this.client.setTypeFactory(new MyTypeFactory(this.client));
this.client.setConfig(config);
%(extensions)s
}"""
footer = """
class MyTypeFactory extends TypeFactoryImpl {
public MyTypeFactory(XmlRpcController pController) {
super(pController);
}
@Override
public TypeParser getParser(XmlRpcStreamConfig pConfig,
NamespaceContextImpl pContext, String pURI, String pLocalName) {
if ("".equals(pURI) && NullSerializer.NIL_TAG.equals(pLocalName)) {
return new NullParser();
} else {
return super.getParser(pConfig, pContext, pURI, pLocalName);
}
}
}
}"""
method_template = """
public Object %(name)s(%(args_declaration)s) throws XmlRpcException{
Object result = this.client.execute("%(class_name)s_%(name)s", new Object[]{%(args_list)s});
if(result != null && result.getClass().isArray()){
List<Object> list = new ArrayList<Object>();
for(int i = 0; i < Array.getLength(result); i++){
list.add(Array.get(result, i));
}
return list;
}
return result;
}"""
void_method_template = """
public void %(name)s() throws XmlRpcException{
this.client.execute("%(class_name)s_%(name)s", new Object[]{});
}"""
suite_template = """import junit.framework.Test;
import junit.framework.TestSuite;
public class Suite {
public static Test suite(){
TestSuite suite = new TestSuite();
suite.setName("pyConcordion test suite");
%(tests)s
return suite;
}
}
"""
add_test_template = ' suite.addTest(new TestSuite(%(class_full_path)s.class));'
void_methods = ["setUp", "tearDown"]
class JavaClassGenerator:
def __init__(self, root_dir, configuration=None):
if not configuration:
configuration = {'server_port':1337}
self.configuration = configuration
self.root_dir = root_dir
def run(self, python_files):
result = []
for python_file in python_files:
java_file = python_file.replace(".py", ".java")
python_module = {}
execfile(python_file, python_module)
python_class_name = os.path.split(python_file)[1].replace(".py", "");
if python_class_name not in python_module:
print "Class %s not found in %s file !!! Please make sure that your test class complies with the naming convention." % (python_class_name, python_file)
sys.exit(-1)
python_class = python_module[python_class_name]
java_content = self.generate(python_class, python_file)
file(java_file, "w").write(java_content)
result.append(python_file.replace(".py", ".java"))
return result
def suite(self, java_files):
add_tests = []
for file in java_files:
file_from_root = os.path.abspath(file)[len(os.path.abspath(self.root_dir)) + 1:]
full_path = file_from_root.replace('.java', '').replace(os.sep, '.')
add_tests.append(add_test_template % {"class_full_path":full_path})
suite_file = os.path.join(self.root_dir, "Suite.java")
open(suite_file, "w").write(suite_template % {"tests" : "\n".join(add_tests)})
return suite_file
def generate(self, python_class, python_file):
expected = "ExpectedToPass"
try:
expected = python_class._pyconcordion_expected
except AttributeError:
pass
extensions = ""
if self.configuration.get("extensions"):
extensions = 'System.setProperty("concordion.extensions", "%s");' % self.configuration.get("extensions")
return "".join([self.generate_package(python_file),
imports % {"expected" : expected},
class_declaration % {"name":python_class.__name__, "expected" : expected},
attributes,
constructor % {
"class_name":python_class.__name__,
"port":self.configuration.get('server_port'),
"extensions":extensions
},
"\n".join(self.generateMethods(python_class)),
footer])
def generate_package(self, python_file):
file_from_root = os.path.abspath(python_file)[len(os.path.abspath(self.root_dir)) + 1:]
file_package = os.path.split(file_from_root)[0]
if file_package == "":
return ""
else:
return package % file_package.replace(os.sep, ".")
def generateMethods(self, python_class):
methods = []
for method_name in dir(python_class):
if not method_name.startswith("_"):
method = getattr(python_class, method_name)
if isinstance(method, type(self.generateMethods)):
func_code = getattr(method, "real_func_code", method.func_code)
arguments = func_code.co_varnames[:func_code.co_argcount]
arguments_list = ", ".join(arguments[1:])
arguments_declaration = ", ".join(["String " + x for x in arguments[1:]])
if method_name in void_methods:
methods.append(void_method_template % {"name":method_name, "class_name":python_class.__name__})
else:
methods.append(method_template % {"name":method_name, "class_name":python_class.__name__, "args_declaration":arguments_declaration, "args_list":arguments_list})
return methods
class Classpath:
def __init__(self, path):
self.path = path
self.directories = []
def getClasspath(self):
files = glob.glob(os.path.join(self.path, "*.jar"))
absolute_files = map(os.path.abspath, files)
absolute_files.extend(self.directories)
return '"' + os.pathsep.join(absolute_files) + '"'
def addDirectory(self, path):
self.directories.append(path)
def removeDirectory(self, path):
self.directories.remove(path)
def addDirectories(self, paths):
for path in paths:
self.addDirectory(path)
class JavaFileCompiler:
def __init__(self, config, classpath, executor):
self.configuration = config
self.classpath = classpath
self.executor = executor
def compile(self, javaFiles):
command = " ".join([self.configuration.get("javac_command"), "-cp", self.classpath.getClasspath(), " ".join(javaFiles)])
if self.executor.run(command) != 0:
raise Exception("Sorry, an exception occured in the compilation process")
def modifyExtension(file):
name, extension = os.path.splitext(file)
return name + ".class"
return map(modifyExtension, javaFiles)
class JavaTestLauncher:
def __init__(self, config, classpath, executor, root_dir):
self.configuration = config
self.classpath = classpath
self.executor = executor
self.root_dir = root_dir
def launch(self, classFile):
(directory, name) = os.path.split(classFile)
className = name.replace(".class", "")
package = os.path.abspath(directory)[len(os.path.abspath(self.root_dir)) + 1:].replace(os.sep, ".")
class_full_path = None
if package == "":
class_full_path = className
else:
class_full_path = package + "." + className
self.classpath.addDirectory(self.root_dir)
command = " ".join([self.configuration.get('java_command'),
"-Dconcordion.output.dir=" + self.configuration.get('output_folder'),
"-cp",
self.classpath.getClasspath(),
"junit.textui.TestRunner",
class_full_path])
execution_result = self.executor.run(command, True)
self.classpath.removeDirectory(self.root_dir)
return execution_result
|
[
"jcplessis@3b747d5a-b732-11dd-8af8-77a15857c330"
] |
jcplessis@3b747d5a-b732-11dd-8af8-77a15857c330
|
c737af5d1ff073a22f5a3aaaf91937cb8797fb95
|
8164fd930d78efbd3885198efbfd9692c585319b
|
/week7/CrawWeb/craw_web.py
|
b837f0305e260bb319dc6622a4866529aa9c6f96
|
[] |
no_license
|
kobso1245/Hack_BG
|
7a7b7524b20fada3d9856a583e02c6959d442e66
|
7ffdb8ccefd67aeca5a49c9a9354e65c77149ad4
|
refs/heads/master
| 2020-05-31T17:43:10.316633
| 2015-10-05T21:43:20
| 2015-10-05T21:43:20
| 30,460,888
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
from Crawler import *
from Histogram import *
from Plotter import *
if __name__ == '__main__':
craw("http://register.start.bg/", "histogram2")
plot("path_to_database_file/websites.db")
|
[
"kalo.evt@mail.bg"
] |
kalo.evt@mail.bg
|
91ba9347fb713f1ec0e076a66bf3ce839adf7db1
|
087c9782bfad42a3fe33ba8d5a28577661a2e33a
|
/modules/EstadoSesion.py
|
f8109a2204cf2ea8f88c31abbee0a84f4575eaeb
|
[] |
no_license
|
jav-rojas/capacitaciones
|
5d9ec7406eb4ad2018f4cbd000bd62f096328b14
|
fcff5aa9d87b0d6341240a8b2e84fa70487b9c9c
|
refs/heads/main
| 2023-01-12T05:39:15.737496
| 2020-11-17T20:55:47
| 2020-11-17T20:55:47
| 301,771,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
try:
import streamlit.ReportThread as ReportThread
from streamlit.server.Server import Server
except Exception:
# Streamlit >= 0.65.0
import streamlit.report_thread as ReportThread
from streamlit.server.server import Server
class SessionState(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def get(**kwargs):
ctx = ReportThread.get_report_ctx()
this_session = None
current_server = Server.get_current()
if hasattr(current_server, '_session_infos'):
# Streamlit < 0.56
session_infos = Server.get_current()._session_infos.values()
else:
session_infos = Server.get_current()._session_info_by_id.values()
for session_info in session_infos:
s = session_info.session
if (
# Streamlit < 0.54.0
(hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg)
or
# Streamlit >= 0.54.0
(not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue)
or
# Streamlit >= 0.65.2
(not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr)
):
this_session = s
if this_session is None:
raise RuntimeError(
'No se pudo recuperar la sesión de Streamlit. ¿Estás haciendo algo con threads?')
if not hasattr(this_session, '_custom_session_state'):
this_session._custom_session_state = SessionState(**kwargs)
return this_session._custom_session_state
|
[
"54907635+jav-rojas@users.noreply.github.com"
] |
54907635+jav-rojas@users.noreply.github.com
|
d022916a345025cb425f8776ec7613b96ad69f8d
|
abd471c7ef4a5ebae08a0b478e724ce4f5779c24
|
/setup.py
|
687c70dcd18e7a0dc3c3a41a281db8df46a2b83b
|
[
"MIT"
] |
permissive
|
lazy-scrivener-games/homebrewery-to-libris
|
f0334df94a33ba14411fe00080d72581f75a8706
|
4420657b72c7ccc3339614c1e7937637b486a8af
|
refs/heads/main
| 2023-08-23T21:02:35.177631
| 2021-10-21T22:23:36
| 2021-10-21T22:23:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
from setuptools import setup
setup(
name='homebrewery-to-libris',
version='1.1.0',
description='Converter between homebrewery and libris markdown formats.',
url='https://github.com/lazy-scrivener-games/homebrewery-to-libris',
download_url='https://github.com/lazy-scrivener-games/homebrewery-to-libris/archive/refs/tags/v1.1.tar.gz',
author='Chris Muller',
author_email='chris@lazyscrivenergames.com',
keywords=[
'utility',
'pdf',
'html',
'markdown',
'conversion',
'book',
'roleplaying',
'game',
'homebrewery',
'libris'
],
license='MIT',
packages=[
'homebrewery_to_libris'
],
scripts=[
'scripts/homebrewery-to-libris'
],
install_requires=[
'markdown2 == 2.4.1'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'Topic :: Text Processing :: Markup :: Markdown',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
]
)
|
[
"illusorycaesura@gmail.com"
] |
illusorycaesura@gmail.com
|
c008cb5d4d87cdea6e0a73a3afebeb74385aead1
|
bf9df944e54c4cca22f62f79c3150cf6b7f33fea
|
/NTupleMaker/bin/preAnalyzerTauTau_Batch_Summer15.py
|
2a24532b0a43b28671f3510bce77f74775f063ad
|
[] |
no_license
|
bobovnii/DesyTauAnalysesRun2_25ns
|
39dce38e34486f98e630fbed515c7410a4a905f3
|
c45d0071f3024136a97c0af83c00219d22d2242a
|
refs/heads/master
| 2020-12-11T07:59:58.389966
| 2016-04-29T08:07:23
| 2016-04-29T08:07:23
| 58,554,259
| 1
| 0
| null | 2016-05-11T14:54:33
| 2016-05-11T14:54:33
| null |
UTF-8
|
Python
| false
| false
| 3,549
|
py
|
#!/usr/bin/env python
import commands
import re
import os
import sys
sys.path.append('./')
###########################################
###########################################
def preAnalyze( ana, sample, runInSeries=False):
#os.system( 'mkdir batch/' )
stream = "TauTau"
print "Stream ", stream
#os.system('python makeTreeSkimmerTauTau_Summer14.py')
if runInSeries:
print "Running in series via the command ..."
configFileName = "./Configs/preAnalyzerTauTau_Summer15_%s_%s_cfg.py" % (sample,ana)
runInSeries = 'preAnalyzer'+stream+' '+configFileName+'\n'
print runInSeries
os.system(runInSeries)
else:
nameJob = 'job_'+sample+'_'+ana+'_'+stream
if 'Data' in sample:
nameJob = 'job_'+sample+'_'+stream
else:
nameJob = 'job_'+sample+'_'+stream+'_'+ana
fileJob = 'batch/'+nameJob+'.sh'
fileLog = 'batch/log/'+nameJob+'.txt'
print "Creating the shell file : "+nameJob
##
f = open(fileJob,'w')
f.write('#!/bin/sh\n\n')
configFileName = "./Configs/preAnalyzerTauTau_Summer15_%s_%s_cfg.py" % (sample,ana)
f.write('cfg='+configFileName+'\n')
f.write('cat > $cfg.zsh <<EOF\n')
f.write('#!/bin/zsh\n')
f.write('#\n')
f.write('#(make sure the right shell will be used)\n')
f.write('#$ -S /bin/zsh\n')
f.write('#\n')
f.write('#(the cpu time for this job)\n')
f.write('#$ -l h_cpu=02:29:00\n')
f.write('#\n')
f.write('#(the maximum memory usage of this job)\n')
f.write('#$ -l h_vmem=2000M\n')
f.write('#\n')
f.write('#(use hh site)\n')
f.write('#$ -l site=hh\n')
f.write('#(stderr and stdout are merged together to stdout)\n')
f.write('#$ -j y\n')
f.write('#\n')
f.write('# use SL6\n')
f.write('#$ -l os=sld6\n')
f.write('#\n')
f.write('# use current dir and current environment\n')
f.write('#$ -cwd\n')
f.write('#$ -V\n')
f.write('#\n')
f.write('#$ -o $cfg.out\n')
f.write('#\n')
f.write('#$ -e $cfg.err\n')
f.write('preAnalyzer'+stream+' $cfg\n')
f.write('EOF\n')
f.write('rm $cfg.out\n')
f.write('rm $cfg.err\n')
f.write('chmod u+x $cfg.zsh\n')
f.write('qsub $cfg.zsh\n')
f.close()
os.system('chmod u+x batch/*.sh')
os.system('python preAnalyzerTauTau_Summer15.py')
###########################################
###########################################
##Data
preAnalyze("nominal","Run2015B-Data_TauTau",True)
##Signal
#preAnalyze("nominal","GGFH125",False)
#preAnalyze("nominal","VBFH125",False)
#preAnalyze("nominal","SUSYGGH160",True)
#preAnalyze("nominal","SUSYBBH160",True)
## Background
#preAnalyze("nominal","DYJets_TauTau",True)
#preAnalyze("nominal","WJetsToLNu",True)
#preAnalyze("nominal","TTJets",True)
#preAnalyze("nominal","SingleTop_t",True)
##preAnalyze("nominal","SingleAntiTop_t",True)
#preAnalyze("nominal","SingleTop_tW",True)
#preAnalyze("nominal","SingleAntiTop_tW",True)
##preAnalyze("nominal","WWTo2L2Nu",True)
##preAnalyze("nominal","WWTo4Q",True)
##preAnalyze("nominal","WWToLNuQQ",True)
##preAnalyze("nominal","WZTo1L1Nu2Q",True)
##preAnalyze("nominal","WZTo3LNu",True)
##preAnalyze("nominal","ZZ",True)
|
[
"francesco.costanza@desy.de"
] |
francesco.costanza@desy.de
|
7fa564e54a994f80546bfabe9d520dff0eacb74a
|
fa46fd872bd6aae13721569ef24c09d2dd09bc83
|
/ch_16_coroutines/simple_coroutines.py
|
0dbe70a9082015c8c4887d93e0fee9a4d61495ce
|
[] |
no_license
|
justincrapse/fluent_python
|
0da1c7ce713c03fd3ed2ca5519b9d8a1a0b2dfcd
|
fa58da79adfa2bf6031667bc6290e272d9715d5c
|
refs/heads/master
| 2022-06-13T05:15:02.103596
| 2020-05-10T19:22:15
| 2020-05-10T19:22:15
| 257,097,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
from inspect import getgeneratorstate
def simple_coroutine(a):
print('-> Started: a = ', a)
b = yield a # b != a. b receives what is sent in my_coro.send(), which is 28 in the code below
print('-> Received: b =', b)
c = yield a + b
print('-> Received: c =', c)
my_coro = simple_coroutine(14)
print(getgeneratorstate(my_coro))
next(my_coro) # prints first message and runs to yeild a, yielding number 14
print(getgeneratorstate(my_coro))
print(my_coro.send(28)) # assigns 28 to b, prints second message, and runs through to next yield (a + b '42')
try:
print(my_coro.send(99)) # runs the rest of the code, then terminates with StopIteration exception
except StopIteration:
print(getgeneratorstate(my_coro))
|
[
"justincrapse@gmail.com"
] |
justincrapse@gmail.com
|
d913221b2594a670e1a1296137e8184576959356
|
d5862abce5457bbe3865c6c8fc0fcec42e8c27f7
|
/mgn/tools/run_train_e2e.py
|
c287a63a310f9980b196ca64f2f1e4e47df4b8ee
|
[
"MIT"
] |
permissive
|
cdsfcesf/mgn
|
ecaf64bd51c3209d96646eb1beebf6b1ea28cb96
|
341d9feae7c400c0d56f3849633a2b357df8d8ed
|
refs/heads/master
| 2023-04-14T11:14:17.723482
| 2021-04-30T22:48:10
| 2021-04-30T22:48:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File: run_train_e2e.py
# Author: anon
# Email: anon@cs.anon.edu
# Created on: 2020-05-18
#
# This file is part of MGN
# Distributed under terms of the MIT License
import os
import sys
_dir = os.path.split(os.getcwd())[0]
if _dir not in sys.path:
sys.path.insert(0, _dir)
sys.path.insert(0, ".")
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger('imported_module').setLevel(logging.WARNING)
from options.train_options import TrainOptions
from datasets import get_dataloader
from executors import get_executor
from models.parser import Seq2seqParser
from trainer import Trainer
import numpy as np
import random
np.random.seed(42)
random.seed(42)
import clevr_parser
graph_parser = clevr_parser.Parser(backend='spacy', model='en_core_web_sm',
has_spatial=True,
has_matching=True).get_backend(identifier='spacy')
embedder = clevr_parser.Embedder(backend='torch', parser=graph_parser).get_backend(identifier='torch')
opt = TrainOptions().parse()
train_loader = get_dataloader(opt, 'train', graph_parser=graph_parser, embedder=embedder)
val_loader = get_dataloader(opt, 'val', graph_parser=graph_parser, embedder=embedder)
model = Seq2seqParser(opt)
executor = get_executor(opt)
trainer = Trainer(opt, train_loader, val_loader, model, executor)
trainer.train()
|
[
"raeidsaqur@cs.toronto.edu"
] |
raeidsaqur@cs.toronto.edu
|
2736296850dec612e5894e270ae5b568fc2c1cb0
|
f2c27c44ee4d54f1441df40cf5453b199e0f8142
|
/test3.py
|
0fc400769530d178a248c7fedc04935d734ec257
|
[] |
no_license
|
weinan123/tests
|
e05f308ef7f5c3a1409b4be6eb644a6a11d05e6d
|
0a7e91e1977942babb574ea8bdf45a571d74ba4a
|
refs/heads/master
| 2020-04-27T06:48:01.596598
| 2019-03-26T08:12:50
| 2019-03-26T08:12:50
| 174,118,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
# -*- coding: UTF-8 -*-
#高阶函数
f = abs #函数本身可以赋值给变量
print f(-10)
''' 函数接收另外一个函数作为参数,称为高阶函数'''
def add(x,y,f):
return f(x)+f(y)
result = add(-5,-6,abs)
print result
#map
def f(x):
return x*x
r = map(f,[1,2,3,4,5,6,7])
print list(r)
#capitalize()首字母大写,其他字母小写
def normalize(name):
return name.capitalize()
L1 = ['adam','LISA','barT']
L2 = list(map(normalize,L1))
print L2
def prod(L):
return reduce(lambda x,y:x*y,L)
str = prod([3,5,7,9])
print str
#filter
def _odd_iter():
n = 1
while True:
n = n+2
yield n
def _not_divisible(n):
return lambda x:x%n>0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n),it)
for n in primes():
if n <1000:
print n
else:
break
def is_palindrome(n):
return n and n == reversed(n)
listsqwe = list(filter(is_palindrome,[123,121,345,23432]))
print listsqwe
from functools import reduce
def str2num(s):
return int(s)
def calc(exp):
ss = exp.split('+')
ns = map(str2num, ss)
return reduce(lambda acc, x: acc + x, ns)
def main():
r = calc('100 + 200 + 345')
print('100 + 200 + 345 =', r)
r = calc('99 + 88 + 7.6')
print('99 + 88 + 7.6 =', r)
main()
|
[
"tom.weng@yff.com"
] |
tom.weng@yff.com
|
ce19c5dcc0781b0f973bef61f84f4c439f3f4947
|
924c65166eee1da93c0a0c85f067c028b1d7c6be
|
/deepforest/deepforest.py
|
19e80e8a67f29e7e8a2266c1c25eca30a205a678
|
[
"MIT"
] |
permissive
|
geo-py/DeepForest
|
4cf8e1fd742c6a52b67cb57d2f6825e149a0903b
|
39cb1db4b57ca7fbb64f0f87fee0f74487e4d7e3
|
refs/heads/master
| 2021-04-08T09:48:58.526848
| 2020-03-19T03:04:01
| 2020-03-19T03:04:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,798
|
py
|
"""
Deepforest main module. This module holds the deepforest class for model building and training
"""
import os
import csv
import warnings
from PIL import Image
with warnings.catch_warnings():
#Suppress some of the verbose tensorboard warnings, compromise to avoid numpy version errors
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
import pandas as pd
import cv2
import numpy as np
from matplotlib import pyplot as plt
from deepforest import get_data
from deepforest import utilities
from deepforest import predict
from deepforest import preprocess
from deepforest.retinanet_train import main as retinanet_train
from deepforest.retinanet_train import parse_args
from keras_retinanet import models
from keras_retinanet.models import convert_model
from keras_retinanet.bin.train import create_models
from keras_retinanet.preprocessing.csv_generator import CSVGenerator, _read_classes
from keras_retinanet.utils.eval import evaluate
from keras_retinanet.utils.eval import _get_detections
from keras_retinanet.utils.visualization import draw_box
class deepforest:
'''
Class for training and predicting tree crowns in RGB images
Args:
weights (str): Path to model saved on disk from keras.model.save_weights(). A new model is created and weights are copied. Default is None.
saved_model: Path to a saved model from disk using keras.model.save(). No new model is created.
Attributes:
model: A keras training model from keras-retinanet
'''
def __init__(self, weights=None, saved_model=None):
self.weights = weights
self.saved_model = saved_model
#Read config file - if a config file exists in local dir use it, if not use installed.
if os.path.exists("deepforest_config.yml"):
config_path = "deepforest_config.yml"
else:
try:
config_path = get_data("deepforest_config.yml")
except Exception as e:
raise ValueError(
"No deepforest_config.yml found either in local directory or in installed package location. {}"
.format(e))
print("Reading config file: {}".format(config_path))
self.config = utilities.read_config(config_path)
#Create a label dict, defaults to "Tree"
self.read_classes()
#release version id to flag if release is being used
self.__release_version__ = None
#Load saved model if needed
if self.saved_model:
print("Loading saved model")
#Capture user warning, not relevant here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.model = utilities.load_model(saved_model)
if self.weights is not None:
print("Creating model from weights")
backbone = models.backbone(self.config["backbone"])
self.model, self.training_model, self.prediction_model = create_models(
backbone.retinanet, num_classes=1, weights=self.weights)
else:
print(
"A blank deepforest object created. To perform prediction, either train or load an existing model."
)
self.model = None
def read_classes(self):
"""Read class file in case of multi-class training. If no file has been created, DeepForest assume there is 1 class, Tree"""
# parse the provided class file
self.labels = {}
try:
with open(self.classes_file, 'r') as file:
self.classes = _read_classes(csv.reader(file, delimiter=','))
for key, value in self.classes.items():
self.labels[value] = key
except:
self.labels[0] = "Tree"
def train(self,
annotations,
input_type="fit_generator",
list_of_tfrecords=None,
comet_experiment=None,
images_per_epoch=None):
'''Train a deep learning tree detection model using keras-retinanet.
This is the main entry point for training a new model based on either existing weights or scratch
Args:
annotations (str): Path to csv label file, labels are in the format -> path/to/image.png,x1,y1,x2,y2,class_name
comet_experiment: A comet ml object to log images. Optional.
list_of_tfrecords: Ignored if input_type != "tfrecord", list of tf records to process
input_type: "fit_generator" or "tfrecord"
images_per_epoch: number of images to override default config of # images in annotations file / batch size. Useful for debug
Returns:
model (object): A trained keras model
prediction model: with bbox nms
trained model: without nms
'''
#Test if there is a new classes file in case # of classes has changed.
self.classes_file = utilities.create_classes(annotations)
self.read_classes()
arg_list = utilities.format_args(annotations, self.classes_file, self.config,
images_per_epoch)
print("Training retinanet with the following args {}".format(arg_list))
#Train model
self.model, self.prediction_model, self.training_model = retinanet_train(
forest_object=self,
args=arg_list,
input_type=input_type,
list_of_tfrecords=list_of_tfrecords,
comet_experiment=comet_experiment)
def use_release(self, gpus=1):
'''Use the latest DeepForest model release from github and load model. Optionally download if release doesn't exist
Returns:
model (object): A trained keras model
gpus: number of gpus to parallelize, default to 1
'''
#Download latest model from github release
release_tag, self.weights = utilities.use_release()
#load saved model and tag release
self.__release_version__ = release_tag
print("Loading pre-built model: {}".format(release_tag))
if gpus == 1:
with warnings.catch_warnings():
#Suppress compilte warning, not relevant here
warnings.filterwarnings("ignore", category=UserWarning)
self.model = utilities.read_model(self.weights, self.config)
#Convert model
self.prediction_model = convert_model(self.model)
elif gpus > 1:
backbone = models.backbone(self.config["backbone"])
n_classes = len(self.labels.keys())
self.model, self.training_model, self.prediction_model = create_models(
backbone.retinanet,
num_classes=n_classes,
weights=self.weights,
multi_gpu=gpus)
#add to config
self.config["weights"] = self.weights
def predict_generator(self,
annotations,
comet_experiment=None,
iou_threshold=0.5,
max_detections=200,
return_plot=False):
"""Predict bounding boxes for a model using a csv fit_generator
Args:
annotations (str): Path to csv label file, labels are in the format -> path/to/image.png,x1,y1,x2,y2,class_name
iou_threshold(float): IoU Threshold to count for a positive detection (defaults to 0.5)
max_detections (int): Maximum number of bounding box predictions
comet_experiment(object): A comet experiment class objects to track
return_plot: Whether to return prediction boxes (False) or Images (True). If True, files will be written to current working directory if model.config["save_path"] is not defined.
Return:
boxes_output: If return_plot=False, a pandas dataframe of bounding boxes for each image in the annotations file
None: If return_plot is True, images are written to save_dir as a side effect.
"""
#Format args for CSV generator
classes_file = utilities.create_classes(annotations)
arg_list = utilities.format_args(annotations, classes_file, self.config)
args = parse_args(arg_list)
#create generator
generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
config=args.config,
shuffle_groups=False,
)
if self.prediction_model:
boxes_output = []
#For each image, gather predictions
for i in range(generator.size()):
#pass image as path
plot_name = generator.image_names[i]
image_path = os.path.join(generator.base_dir, plot_name)
result = self.predict_image(image_path,
return_plot=return_plot,
score_threshold=args.score_threshold)
if return_plot:
if not self.config["save_path"]:
print(
"model.config['save_path'] is None, saving images to current working directory"
)
save_path = "."
else:
save_path = self.config["save_path"]
#Save image
fname = os.path.join(save_path, plot_name)
cv2.imwrite(fname, result)
continue
else:
#Turn boxes to pandas frame and save output
box_df = pd.DataFrame(result)
#use only plot name, not extension
box_df["plot_name"] = os.path.splitext(plot_name)[0]
boxes_output.append(box_df)
else:
raise ValueError(
"No prediction model loaded. Either load a retinanet from file, download the latest release or train a new model"
)
if return_plot:
return None
else:
#if boxes, name columns and return box data
boxes_output = pd.concat(boxes_output)
boxes_output.columns = [
"xmin", "ymin", "xmax", "ymax", "score", "label", "plot_name"
]
boxes_output = boxes_output.reindex(
columns=["plot_name", "xmin", "ymin", "xmax", "ymax", "score", "label"])
return boxes_output
def evaluate_generator(self,
annotations,
comet_experiment=None,
iou_threshold=0.5,
max_detections=200):
""" Evaluate prediction model using a csv fit_generator
Args:
annotations (str): Path to csv label file, labels are in the format -> path/to/image.png,x1,y1,x2,y2,class_name
iou_threshold(float): IoU Threshold to count for a positive detection (defaults to 0.5)
max_detections (int): Maximum number of bounding box predictions
comet_experiment(object): A comet experiment class objects to track
Return:
mAP: Mean average precision of the evaluated data
"""
#Format args for CSV generator
classes_file = utilities.create_classes(annotations)
arg_list = utilities.format_args(annotations, classes_file, self.config)
args = parse_args(arg_list)
#create generator
validation_generator = CSVGenerator(
args.annotations,
args.classes,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side,
config=args.config,
shuffle_groups=False,
)
average_precisions = evaluate(validation_generator,
self.prediction_model,
iou_threshold=iou_threshold,
score_threshold=args.score_threshold,
max_detections=max_detections,
save_path=args.save_path,
comet_experiment=comet_experiment)
# print evaluation
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
print('{:.0f} instances of class'.format(num_annotations),
validation_generator.label_to_name(label),
'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
if sum(total_instances) == 0:
print('No test instances found.')
return
print('mAP using the weighted average of precisions among classes: {:.4f}'.format(
sum([a * b for a, b in zip(total_instances, precisions)]) /
sum(total_instances)))
mAP = sum(precisions) / sum(x > 0 for x in total_instances)
print('mAP: {:.4f}'.format(mAP))
return mAP
def predict_image(self,
image_path=None,
numpy_image=None,
return_plot=True,
score_threshold=0.05,
show=False,
color=None):
"""Predict tree crowns based on loaded (or trained) model
Args:
image_path (str): Path to image on disk
numpy_image (array): Numpy image array in BGR channel order following openCV convention
color (tuple): Color of bounding boxes in BGR order (0,0,0) black default
show (bool): Plot the predicted image with bounding boxes. Ignored if return_plot=False
return_plot: Whether to return image with annotations overlaid, or just a numpy array of boxes
Returns:
predictions (array): if return_plot, an image. Otherwise a numpy array of predicted bounding boxes, with scores and labels
"""
#Check for model save
if (self.prediction_model is None):
raise ValueError(
"Model currently has no prediction weights, either train a new model using deepforest.train, loading existing model, or use prebuilt model (see deepforest.use_release()"
)
#Check the formatting
if isinstance(image_path, np.ndarray):
raise ValueError(
"image_path should be a string, but is a numpy array. If predicting a loaded image (channel order BGR), use numpy_image argument."
)
#Check for correct formatting
#Warning if image is very large and using the release model
if numpy_image is None:
numpy_image = cv2.imread(image_path)
#Predict
prediction = predict.predict_image(self.prediction_model,
image_path=image_path,
raw_image=numpy_image,
return_plot=return_plot,
score_threshold=score_threshold,
color=color,
classes=self.labels)
#cv2 channel order to matplotlib order
if return_plot & show:
plt.imshow(prediction[:, :, ::-1])
plt.show()
return prediction
def predict_tile(self,
raster_path=None,
numpy_image=None,
patch_size=400,
patch_overlap=0.15,
iou_threshold=0.15,
return_plot=False):
"""
For images too large to input into the model, predict_tile cuts the image into overlapping windows, predicts trees on each window and reassambles into a single array.
Args:
raster_path: Path to image on disk
numpy_image (array): Numpy image array in BGR channel order following openCV convention
iou_threshold: Minimum iou overlap among predictions between windows to be suppressed. Defaults to 0.5. Lower values suppress more boxes at edges.
return_plot: Should the image be returned with the predictions drawn?
Returns:
boxes (array): if return_plot, an image. Otherwise a numpy array of predicted bounding boxes, scores and labels
"""
if numpy_image:
pass
else:
#Load raster as image
raster = Image.open(raster_path)
numpy_image = np.array(raster)
#Compute sliding window index
windows = preprocess.compute_windows(numpy_image, patch_size, patch_overlap)
#Save images to tmpdir
predicted_boxes = []
for index, window in enumerate(windows):
#Crop window and predict
crop = numpy_image[windows[index].indices()]
#Crop is RGB channel order, change to BGR
crop = crop[..., ::-1]
boxes = self.predict_image(numpy_image=crop,
return_plot=False,
score_threshold=self.config["score_threshold"])
#transform coordinates to original system
xmin, ymin, xmax, ymax = windows[index].getRect()
boxes.xmin = boxes.xmin + xmin
boxes.xmax = boxes.xmax + xmin
boxes.ymin = boxes.ymin + ymin
boxes.ymax = boxes.ymax + ymin
predicted_boxes.append(boxes)
predicted_boxes = pd.concat(predicted_boxes)
#Non-max supression for overlapping boxes among window
if patch_overlap == 0:
mosaic_df = predicted_boxes
else:
with tf.Session() as sess:
print("{} predictions in overlapping windows, applying non-max supression".
format(predicted_boxes.shape[0]))
new_boxes, new_scores, new_labels = predict.non_max_suppression(
sess,
predicted_boxes[["xmin", "ymin", "xmax", "ymax"]].values,
predicted_boxes.score.values,
predicted_boxes.label.values,
max_output_size=predicted_boxes.shape[0],
iou_threshold=iou_threshold)
#Recreate box dataframe
image_detections = np.concatenate([
new_boxes,
np.expand_dims(new_scores, axis=1),
np.expand_dims(new_labels, axis=1)
],axis=1)
mosaic_df = pd.DataFrame(
image_detections,
columns=["xmin", "ymin", "xmax", "ymax", "score", "label"])
mosaic_df.label = mosaic_df.label.str.decode("utf-8")
print("{} predictions kept after non-max suppression".format(
mosaic_df.shape[0]))
if return_plot:
#Draw predictions
for box in mosaic_df[["xmin", "ymin", "xmax", "ymax"]].values:
draw_box(numpy_image, box, [0, 0, 255])
#Mantain consistancy with predict_image
return numpy_image
else:
return mosaic_df
def plot_curves(self):
"""Plot training curves"""
if self.history:
# Plot training & validation regression loss values
fig, axes, = plt.subplots(nrows=1, ncols=3)
axes = axes.flatten()
#Regression Loss
axes[0].plot(self.history.history['regression_loss'])
axes[0].set_title('Bounding Box Loss')
axes[0].set_ylabel('Loss')
axes[0].set_xlabel('Epoch')
#Classification Loss
axes[1].plot(self.history.history['classification_loss'])
axes[1].set_title('Classification Loss')
axes[1].set_ylabel('Loss')
axes[1].set_xlabel('Epoch')
# Plot validation mAP
if "mAP" in self.history.history.keys():
axes[2].plot(self.history.history['mAP'])
axes[2].set_title('Validation: Mean Average Precision')
axes[2].set_ylabel('mAP')
axes[2].set_xlabel('Epoch')
plt.show()
else:
print("No training history found.")
return None
|
[
"benweinstein2010@gmail.com"
] |
benweinstein2010@gmail.com
|
b808890d42c126b65c5b6213a5c1efc90ba24f4f
|
8e8487a365988830bce965cf6a80c179e7125292
|
/GitHub_test.py
|
ff877ce4130281f84b86dfab7e69f72ce4396bd3
|
[] |
no_license
|
github-ruben/NeuralNetwork
|
f93d98b6c021dd91088fe96f33c6983606a550ad
|
d78f2a4c8cd32854f9c0d29b98b72eae4ba5b25f
|
refs/heads/master
| 2020-05-06T12:19:58.993602
| 2019-04-08T14:51:59
| 2019-04-08T14:51:59
| 180,117,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,941
|
py
|
import sys
import os
import pandas as pd
feature_sel_NN = ['SpdLimit', 'LengthMeter', 'IsPaved', 'HasSpdBumps', 'NrSharpTurns'
,'IsPriority', 'IsUrban', 'IsRamp']
training_data = (r'data_vrachtwagen_training.csv')
test_data = (r'data_vrachtwagen_test.csv')
def load_and_prepare(data_file_name):
#Load data file with speeds and factors for road segments
data_set_learning = pd.read_csv(data_file_name , sep = ';')
data_set_learning = data_cleaning(data_set_learning)
return data_set_learning
def data_cleaning(data):
#Ignore speed if GPS point is more than 20m away
data = data[data.MeterAwayFromGps <= 20]
#Ignore is speed is on ferry edge
data = data[data.IsFerry == 0]
#Ignore speed limits of >130
data = data[data.SpdLimit <= 130]
return data
training_data = load_and_prepare(training_data)
test_data = load_and_prepare(test_data)
#----- Setup independent and depedent (target) variables ----------------------------------------------------------------------------------------
features = feature_sel_NN
nr_features = len(features)
X_train = training_data[features]
X_test = test_data[features]
y_train = training_data.CrwlKmph
y_test = test_data.CrwlKmph
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#----- Building NN -------------------------------------------------------------------------------------------
# Importing the Keras libraries and packages
import keras
from keras.optimizers import Adam
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
#from keras.layers import Dropout
# Building the Neural Network
def build_regressor(nr_neurons, nr_hiddenlayers, nr_features, optimizer = 'adam'):
#reproduce()
regressor = Sequential()
# Adding the input layer and the first hidden layer
regressor.add(Dense(units = nr_neurons, kernel_initializer = 'uniform', activation = 'relu', input_dim = nr_features))
for layers in range(nr_hiddenlayers-1):
# Adding (nr_hiddenlayers - 1) to NN
regressor.add(Dense(units = nr_neurons, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'linear'))
# Compiling the ANN
regressor.compile(optimizer = optimizer, loss = 'mean_squared_error')
#print(regressor.get_weights())
return regressor
#----- Learning --------------------------------------------------------------------------------------------------------------------------
'''
def run(x):
nr_neuron = int(x[0,0])
nr_hiddenlayer = int(x[0,1])
epoch = int(x[0,2])
batch_size = int(x[0,3])
print(nr_neurons, nr_hiddenlayers,epoch,batch_size)
regressor = build_regressor(nr_neuron, nr_hiddenlayer)
callbacks = [EarlyStopping(monitor='val_loss', patience=2)]
regressor.fit(X_train, y_train, batch_size = batch_size, nb_epoch = epoch, callbacks = callbacks, validation_data = (X_test, y_test))
return regressor.evaluate(X_test, y_test)
'''
#---- Hyperparameter tuning ----------------------------------------- https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
#Common hyperparameters for Neural Networks, from most important to least important, are:
#Learning rate – α (is also adam parameter)
#Number of hidden units for different layers
#Momentum – β ()
#Mini-batch size
#Number of hidden layers
#Adam’s hyperparameter – β1, β2, ε (lr=0.001,beta_1=0.9,beta_2 = 0.999)
#Learning rate decay
import GPyOpt
import GPy
from GPy import kern
from GPy import models
#from GPy import priors
'''
def hyp_optimizing(x):
nr_neurons = int(x[0,0])
nr_hiddenlayers = int(x[0,1])
epoch = int(x[0,2])
batch_size = int(x[0,3])
print(nr_neurons, nr_hiddenlayers, epoch, batch_size)
#regressor = build_regressor(nr_neurons, nr_hiddenlayers)
#callbacks = [EarlyStopping(monitor='val_loss', patience=2), ModelCheckpoint(filepath = 'best_model_NN',monitor = 'val-loss', save_best_only = 'True')]
neural_network = KerasRegressor(build_fn = build_regressor,
batch_size= batch_size,
epochs = epoch,
nr_neurons = nr_neurons,
nr_hiddenlayers = nr_hiddenlayers,
nr_features = nr_features,
verbose = 2)
cv_result = cross_val_score(estimator = neural_network,
X = X_train, y = y_train,
cv=5, verbose =3,
scoring = 'mean_squared_error',
fit_params = {'callbacks':[EarlyStopping(monitor='val_loss', patience=5),
ModelCheckpoint(filepath = 'best_model_NN',monitor = 'val-loss', save_best_only = 'True')],
'validation_data' : (X_test, y_test)})
loss = cv_result.mean() #regressor.evaluate(X_test, y_test)
print ('The loss of the CV = ', loss)
print ('')
return loss#regressor.evaluate(X_test, y_test)
mixed_domain =[{'name': 'nr_neurons' , 'type': 'discrete', 'domain': [x for x in range(6,20)] },
{'name': 'nr_hiddenlayers', 'type': 'discrete', 'domain': [x for x in range(1,5)] },
{'name': 'epochs' , 'type': 'discrete', 'domain': [x for x in range(2,3)] },
{'name': 'batch_size' , 'type': 'discrete', 'domain': [x for x in range(150,200)] }]
myProblem = GPyOpt.methods.BayesianOptimization(f = hyp_optimizing,
domain = mixed_domain,
initial_design_numdata = 2,
acquisition_type = 'EI_MCMC',
model_type = 'GP_MCMC',
maximize = True)
myProblem.run_optimization(1)
myProblem.plot_convergence()
myProblem.plot_acquisition()
#------ Train best model ------------------------------------------------------------------------------------------
print(myProblem.x_opt, myProblem.fx_opt)
hyper_train = myProblem.x_opt
nr_neurons, nr_hiddenlayers, nr_epochs, batch_size = int(hyper_train[0]), int(hyper_train[1]), int(hyper_train[2]), int(hyper_train[3])
'''
nr_neurons, nr_hiddenlayers, nr_epochs, batch_size = 20,10,100, 150
'''
filepath = r"PointBasedMethods\ModelCheckPoint\Best_NN_model-{val_loss:.4f}.hdf5"
#Remove files from modelcheckpoint --> otherwise error occurs
for c in os.listdir(ModelCheckPoint_folder):
full_path = os.path.join(ModelCheckPoint_folder, c)
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
'''
regressor = build_regressor(nr_neurons, nr_hiddenlayers, nr_features)
#regressor = build_regressor(15, 8, nr_features)
#callbacks = [EarlyStopping(monitor='val_loss', patience=5), ModelCheckpoint(filepath ,mode= 'min',monitor = 'val_loss', save_best_only = 'True', verbose=1)]
regressor.fit(X_train, y_train, batch_size = batch_size, nb_epoch = nr_epochs, verbose = 2)#callbacks = callbacks, validation_data = (X_test, y_test), verbose =2)
|
[
"noreply@github.com"
] |
github-ruben.noreply@github.com
|
8ebabb8929c847e3c9edcd7a71bcd0940adfa0c2
|
d44bfb67b8b19f3773558870a71a42e0cd3ec002
|
/telemetry-library/telemetry/telemetry_mqtt.py
|
cc84776095c2500d7a5842bdd8449b8635c5956f
|
[
"Apache-2.0"
] |
permissive
|
Abstract-Horizon/pyros-telemetry
|
764cdbb8cc98b7d72b1b2a04490c4989c003cbd2
|
7ecb5deaf266689555cbf0721f9c156e4dfe28d7
|
refs/heads/master
| 2023-08-17T19:52:24.684594
| 2021-10-11T17:46:40
| 2021-10-11T17:46:40
| 272,370,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,580
|
py
|
################################################################################
# Copyright (C) 2016-2020 Abstract Horizon
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License v2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# Daniel Sendula - initial API and implementation
#
#################################################################################
import paho.mqtt.client as mqtt
import random
import re
import sys
import threading
import time
import traceback
from telemetry.telemetry_logger import TelemetryLogger, LocalPipeTelemetryLoggerDestination, PubSubTelemetryLoggerClient
from telemetry.telemetry_client import PubSubTelemetryClient
class MQTTLocalPipeTelemetryLogger(TelemetryLogger):
def __init__(self, stream_name, host="localhost", port=1883, topic='telemetry'):
self.mqtt = MQTTWrapper(host, port)
super(MQTTLocalPipeTelemetryLogger, self).__init__(stream_name,
destination=LocalPipeTelemetryLoggerDestination(),
telemetry_client=PubSubTelemetryLoggerClient(topic, self.mqtt.publish, self.mqtt.subscribe))
def init(self):
while not self.mqtt.is_connected():
self.mqtt.loop(0.02)
super(MQTTLocalPipeTelemetryLogger, self).init()
while not self.stream_ready and self.registration_error == 0:
self.mqtt.loop(0.02)
class MQTTTelemetryClient(PubSubTelemetryClient):
def __init__(self, host="localhost", port=1883, topic='telemetry'):
self.mqtt = MQTTWrapper(host, port)
super(MQTTTelemetryClient, self).__init__(topic, self.mqtt.publish, self.mqtt.subscribe)
class MQTTWrapper:
def __init__(self, host="localhost", port=1883, auto_init=True):
self.client = None
self.host = host
self.port = port
self.name = "telemetry-server-" + str(random.randint(10000, 99999))
self._subscribers = []
self._regexToLambda = {}
self._received = False
self.connected = False
if auto_init:
self.init()
def init(self, wait_to_connect=True):
self.client = mqtt.Client(self.name)
self.client.on_disconnect = self._on_disconnect
self.client.on_connect = self._on_connect
self.client.on_message = self._on_message
if self.host is not None:
self._connect()
if wait_to_connect:
print(" " + self.name + " waiting to connect to broker...")
while not self.connected:
self.loop(0.02)
print(" " + self.name + " connected to broker.")
def _connect(self):
self.connected = False
if self.client is not None:
try:
self.client.disconnect()
except Exception:
pass
self.client.connect_async(self.host, self.port, 60)
thread = threading.Thread(target=self._reconnect)
thread.daemon = True
thread.start()
def _on_disconnect(self, _mqtt_client, _data, _rc):
self._connect()
def _on_connect(self, mqtt_client, _data, _flags, rc):
if rc == 0:
self.connected = True
for subscriber in self._subscribers:
mqtt_client.subscribe(subscriber, 0)
else:
print("ERROR: Connection returned error result: " + str(rc))
sys.exit(rc)
def _on_message(self, _mqtt_client, _data, msg):
global _received
_received = True
topic = msg.topic
try:
for regex in self._regexToLambda:
matching = regex.match(topic)
if matching:
method = self._regexToLambda[regex]
method(topic, msg.payload)
return
except Exception as ex:
print("ERROR: Got exception in on message processing; " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
def _reconnect(self):
try:
self.client.reconnect()
except Exception:
pass
def publish(self, topic, message):
if self.connected:
self.client.publish(topic, message)
def subscribe(self, topic, method):
self._subscribers.append(topic)
regex_string = "^" + topic.replace("+", "([^/]+)").replace("#", "(.*)") + "$"
regex = re.compile(regex_string)
self._regexToLambda[regex] = method
if self.connected:
self.client.subscribe(topic, 0)
def is_connected(self):
return self.connected
def sleep(self, delta_time):
self.loop(self, delta_time)
def loop(self, delta_time, _inner=None):
current_time = time.time()
self._received = False
self.client.loop(0.0005) # wait for 0.5 ms
until = current_time + delta_time
while current_time < until:
if self._received:
self._received = False
self.client.loop(0.0005) # wait for 0.1 ms
current_time = time.time()
else:
time.sleep(0.002) # wait for 2 ms
current_time = time.time()
if current_time + 0.0005 < until:
self.client.loop(0.0005) # wait for 0.1 ms
current_time = time.time()
def forever(self, delta_time, outer=None, inner=None):
current_time = time.time()
next_time = current_time
while True:
next_time = next_time + delta_time
try:
if outer is not None:
outer()
except BaseException as ex:
print("ERROR: Got exception in main loop; " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
current_time = time.time()
sleep_time = next_time - current_time
if sleep_time < 0.002:
next_time = current_time
self._received = False
self.client.loop(0.0005) # wait for 0.1 ms
count = 10 # allow at least 5 messages
while count > 0 and self._received:
self._received = True
count -= 1
self.client.loop(0.0005) # wait for 0.1 ms
else:
self.loop(sleep_time, inner=inner)
|
[
"natdan@users.noreply.github.com"
] |
natdan@users.noreply.github.com
|
35aed6992cd479774142420d442e2ef3961ae208
|
77fd9e792b1e72ee4522ed6173b51b7ae923bf8f
|
/p5/gamefile/sketch.py
|
f7eddf763efd4b55c8d2ea4d46bc53a70cf21371
|
[] |
no_license
|
christophejacques/examples
|
28e2f46ca8b60ff3adb48eb8f03e2693c01f81a0
|
80ad880298bf9f6a2f32f87ec90e43068380c7a5
|
refs/heads/master
| 2023-08-17T01:30:19.180254
| 2023-08-05T18:13:16
| 2023-08-05T18:13:16
| 188,475,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
import random
from __init__ import background, createCanvas, stroke, fill, circle, rect, square, rectMode
from __init__ import P5, strokeWeight, noStroke, map, Vector, line
from __init__ import *
SIZE = 10
ROWS, COLS = 0, 0
grid = []
updating = True
olds = 0
compteur = 0
def nbVoisins(x, y):
somme = 0
for i in range(max(0, x-1), min(COLS, x+2)):
for j in range(max(0, y-1), min(ROWS, y+2)):
somme += grid[i][j]
return somme
def init(grid, rand=True):
grid.clear()
for _ in range(COLS):
grid.append([random.randint(0, 1) if rand else 0 for _ in range(ROWS)])
def update():
global grid
newgrid = []
init(newgrid, False)
for i, col in enumerate(grid):
for j, c in enumerate(col):
nb = nbVoisins(i, j)
if grid[i][j] == 1 and nb in (3, 4) or grid[i][j] == 0 and nb == 3:
newgrid[i][j] = 1
else:
newgrid[i][j] = 0
grid = newgrid.copy()
def keyReleased():
global updating
if P5.keyCode == pygame.K_SPACE:
updating = not updating
elif P5.keyCode in (pygame.K_KP_ENTER, pygame.K_RETURN):
init(grid)
def mousePressed():
x = P5.mouseX // SIZE
y = P5.mouseY // SIZE
grid[y][x] = 1
def setup():
global COLS, ROWS
createCanvas(600, 400)
ROWS = P5.WIDTH // SIZE
COLS = P5.HEIGHT // SIZE
init(grid)
fill(0, 50, 50)
stroke(0)
def draw():
global olds, compteur
if P5.mouseIsPressed:
mousePressed()
s = sum([sum(x) for x in grid])
if olds == s:
compteur += 1
else:
compteur = 0
olds = s
if compteur > 100:
init(grid)
background(0)
for i, col in enumerate(grid):
for j, c in enumerate(col):
if c == 1:
fill(200, 200, 200)
else:
fill(0, 50, 50)
square(j*SIZE, i*SIZE, SIZE)
if not P5.mouseIsPressed and updating:
update()
__import__("run")
|
[
"cjacques.programmes@numericable.fr"
] |
cjacques.programmes@numericable.fr
|
414d29786eb51284f28473d7090b7778c546c6c3
|
dd860973103347b382d8a04ef68a9376561725ea
|
/wazimap_ng/profile/serializers/highlights_serializer.py
|
003ee147dc77b5daa5f28a8c58f772775589eb56
|
[
"Apache-2.0"
] |
permissive
|
mauricemojito/wazimap-ng
|
7a7da6c9fc653054c376d77c22df120ed0abb653
|
e03748cb1258cbafb43faba441bbc37dd0556a2a
|
refs/heads/master
| 2023-03-12T01:45:53.173039
| 2020-09-30T13:55:29
| 2020-09-30T13:55:29
| 342,342,503
| 0
| 0
|
Apache-2.0
| 2021-02-25T18:55:09
| 2021-02-25T18:34:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
from wazimap_ng.datasets.models import IndicatorData
from wazimap_ng.utils import mergedict
def get_subindicator(highlight):
subindicators = highlight.indicator.subindicators
idx = highlight.subindicator if highlight.subindicator is not None else 0
return subindicators[idx]
def sibling(highlight, geography):
siblings = geography.get_siblings()
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography__in=siblings)
subindicator = get_subindicator(highlight)
numerator = None
denominator = 0
for datum in indicator_data:
if datum.geography == geography:
numerator = datum.data["subindicators"].get(subindicator, 0)
s = datum.data["subindicators"][subindicator]
denominator += s
if denominator > 0 and numerator is not None:
return numerator / denominator
return None
def absolute_value(highlight, geography):
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography=geography)
if indicator_data.count() > 0:
subindicator = get_subindicator(highlight)
data = indicator_data.first().data # TODO what to do with multiple results
return data["subindicators"].get(subindicator, 0)
return None
def subindicator(highlight, geography):
indicator_data = IndicatorData.objects.filter(indicator__profilehighlight=highlight, geography=geography)
if indicator_data.count() > 0:
indicator_data = indicator_data.first() # Fix this need to cater for multiple results
subindicator = get_subindicator(highlight)
numerator = indicator_data.data["subindicators"].get(subindicator, 0)
denominator = 0
for datum, count in indicator_data.data["subindicators"].items():
denominator += count
if denominator > 0 and numerator is not None:
return numerator / denominator
return None
algorithms = {
"absolute_value": absolute_value,
"sibling": sibling,
"subindicators": subindicator
}
def HighlightsSerializer(profile, geography):
highlights = []
profile_highlights = profile.profilehighlight_set.all().order_by("order")
for highlight in profile_highlights:
denominator = highlight.denominator
method = algorithms.get(denominator, absolute_value)
val = method(highlight, geography)
if val is not None:
highlights.append({"label": highlight.label, "value": val, "method": denominator})
return highlights
|
[
"adi@openup.org.za"
] |
adi@openup.org.za
|
1cf5824c168f9b478a4172a158f56e06d2c4acc3
|
3110d538f3801370d5853be7f7388dfcd1c47064
|
/app/migrations/0008_answer_empresa.py
|
a2e2b46945259e9daa7c4be7d2bfc58008f54e85
|
[] |
no_license
|
thiagosouza448/ask-tech
|
1090e8ed0986e86cf7f23dae727c1c9b4652b2ad
|
765ea936b17ac50919fa96af8b504ee7bdedfb9c
|
refs/heads/master
| 2023-08-10T12:40:33.374088
| 2023-06-13T13:11:51
| 2023-06-13T13:11:51
| 223,251,326
| 0
| 0
| null | 2023-07-25T21:13:00
| 2019-11-21T19:40:59
|
Python
|
UTF-8
|
Python
| false
| false
| 510
|
py
|
# Generated by Django 2.1 on 2018-08-19 16:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0007_answer_data_resposta'),
]
operations = [
migrations.AddField(
model_name='answer',
name='empresa',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='app.Empresa'),
preserve_default=False,
),
]
|
[
"tgraca@grupogbi.com"
] |
tgraca@grupogbi.com
|
3291856511a2416a25502d0b24cfff8ea292b989
|
a5db32e65fac3b30a50c3ee0e049a180db491a87
|
/checkf2.py
|
f07fab697157f670f308b5fd5f3853d45dac34d7
|
[] |
no_license
|
ulibanesu/project
|
d3ea9db5be57b22201c48c7a8ae991ac438ecd86
|
13b703a8fc4f21c315c79b17f20cf4cc7513d20b
|
refs/heads/master
| 2020-04-06T07:10:07.319812
| 2016-09-01T20:57:06
| 2016-09-01T20:57:06
| 62,409,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:792eae919c97ecd16cd24965e89be8385de6483f1ba1247940486a64db21a9bf
size 1613
|
[
"olivier.khatib@gmail.com"
] |
olivier.khatib@gmail.com
|
910e66c7519bc439e5c9b699b496a6a967a0b70a
|
bef3ef5c44d97c468b5359b81bb8c4c4b95f151d
|
/Api1/tests/resources/test_login.py
|
6df447b1a79e092b571bd91388fcc1d5f23efa04
|
[] |
no_license
|
stsiwo/sts_blogs
|
3a90f883f65b4a680f0b271e3792c63bcdc25aef
|
57231d4f040e5d4be557dc14cc583d269b812602
|
refs/heads/master
| 2023-01-14T01:37:22.538717
| 2020-04-02T18:03:52
| 2020-04-02T18:03:52
| 212,732,622
| 0
| 0
| null | 2023-01-04T23:30:34
| 2019-10-04T04:00:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,668
|
py
|
from flask import Response
import json
from utils.util import prettyPrint
from flask_jwt_extended import decode_token
def test_login_endpoint_no_json_data_should_response_with_400(client):
rv = client.post('/login')
assert 400 == rv.status_code
def test_login_endpoint_no_json_data_should_response_with_400_with_error_msg(client):
rv: Response = client.post('/login')
data = json.loads(rv.get_data())
assert 'message' in data
def test_login_endpoint_no_json_data_should_response_with_400_with_bundle_error_msg(client):
rv: Response = client.post('/login')
data = json.loads(rv.get_data())
assert len(data['message']) > 1
def test_user_logined_successfully(client, usersSeededFixture, httpHeaders):
rv = client.post('/login', 'http://localhost', json={
'email': 'test@test.com',
'password': 'test'
}, headers=httpHeaders)
assert 200 == rv.status_code
def test_user_logined_successfully_and_get_jwt_tokens(
client,
usersSeededFixture,
httpHeaders):
rv = client.post('/login', 'http://localhost', json={
'email': 'test@test.com',
'password': 'test'
}, headers=httpHeaders)
cookies = [cookie[1] for cookie in rv.headers if (cookie[0] == 'Set-Cookie')]
assert 200 == rv.status_code
assert any('access_token' in s for s in cookies) is True
assert any('refresh_token' in s for s in cookies) is True
assert any('csrf_access_token' in s for s in cookies) is True
assert any('csrf_refresh_token' in s for s in cookies) is True
def test_user_logined_successfully_and_token_include_role_claim(
client,
application,
usersSeededFixture,
httpHeaders
):
rv = client.post('/login', 'http://localhost', json={
'email': 'test@test.com',
'password': 'test'
}, headers=httpHeaders)
access_token = [cookie[1].replace(";", "=").split("=")[1] for cookie in rv.headers if (cookie[0] == 'Set-Cookie' and 'access_token' in cookie[1])]
user_claims = None
with application.app_context():
prettyPrint(decode_token(access_token[0]))
user_claims = decode_token(access_token[0])['user_claims']
assert 200 == rv.status_code
assert user_claims.get('name') is not None
assert user_claims.get('roles') is not None
def test_user_logined_failed_since_not_found_and_receive_404(
client,
usersSeededFixture,
httpHeaders):
rv = client.post('/login', 'http://localhost', json={
'email': 'not-test@test.com',
'password': 'test'
}, headers=httpHeaders)
assert 404 == rv.status_code
|
[
"stsiwo@gmail.com"
] |
stsiwo@gmail.com
|
825adaf12bd86b90c9eab39068698a70bb4a198e
|
433fe4a402a29737d580661ff23bbfe3b6b19729
|
/pyloom/worker.py
|
2eeba4ac84b2478359b1bc586833b8e0fe056404
|
[] |
no_license
|
forgeries/PyLoom
|
082de65674039d5d315280215f7cf66335aa04dd
|
cf447ec3a22d7a9562bc55509f0246affe7b15aa
|
refs/heads/master
| 2021-10-27T03:04:02.710632
| 2019-04-15T15:57:38
| 2019-04-15T15:57:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,765
|
py
|
import redis
import signal
import traceback
import threading
import multiprocessing
from . import buckets
from .utils import *
from .tasks import Task, execute
from .scheduler import Spider, Queue
logger = logging.getLogger("worker")
def worker_process(redis_conf, spiders, threads, token_curr, token_new):
"""
Worker子进程,负责启动线程
Args:
redis_conf: redis数据库
spiders: 所有爬虫配置表,{name: (path, version)}
threads: 线程数
token_curr: 新建进程时的token
token_new: 父进程中最新的token
当token_curr与token_new不同时,表示父进程已更新了路由,
线程在完成当前生命周期后需自行退出
"""
logger.debug("Worker进程已启动")
# Manager的共享变量在并发启动过多进程时会出现ConnectionRefusedError
for _ in range(60):
try:
spiders.items()
break
except Exception:
pass
else:
logger.fatal("Worker进程退出,spiders超时未就绪")
return
thread_ids = []
# 构造路由,{name: [[regex, task]...]}
routers = {}
for name, (path, version) in spiders.items():
tasks = import_tasks(path)
if tasks:
routers[name] = tasks
logger.info("载入爬虫成功", name, version)
else:
logger.info("载入爬虫失败,未发现合规Task类", name, version)
# 启动线程
try:
logger.info("正在启动Worker线程")
signal.signal(signal.SIGINT, signal.SIG_IGN) # 忽略Ctrl+C
for thread_index in range(threads):
thread = threading.Thread(
target=worker_thread,
args=(redis_conf, routers, token_curr, token_new, thread_index)
)
thread.start()
thread_ids.append(thread)
logger.info("Worker线程启动成功")
except Exception as e:
logger.fatal("Worker进程结束,启动Worker线程时出现异常", e, '\n', traceback.format_exc())
return
for i in itertools.count():
try:
# 清理进程内的过期键
if i % 500 == 0:
count = buckets.LocalBucket.purge()
if count:
logger.debug(f"完成清理LocalBucket", count)
# 线程全部退出后,结束进程
if not any([t.is_alive() for t in thread_ids]):
logger.info("Worker进程结束,线程已全部退出")
return
time.sleep(2)
except Exception as e:
logger.fatal("Worker进程异常", e, '\n', traceback.format_exc())
time.sleep(5)
def worker_thread(redis_conf, routers, token_curr, token_new, thread_index):
"""
循环:申请任务->执行任务->上报结果
线程内捕捉所有异常,永不退出(Ctrl+C除外)
"""
logger.debug("Worker线程已启动")
db = redis.StrictRedis.from_url(redis_conf)
pop_failure_count = 0
while True:
try:
# 结束线程
try:
if token_curr != token_new.value:
logger.info("Worker线程结束,收到退出信号")
return
except ConnectionRefusedError:
logger.debug("Token未就绪")
time.sleep(1)
continue
except (BrokenPipeError, FileNotFoundError, EOFError):
logger.info("Worker线程结束,Token已关闭")
return
# 从队列中弹出URL
if not routers:
logger.info("本地爬虫列表为空,等待加载爬虫")
while not routers:
time.sleep(1)
keys = list(routers.keys())
url, name, address = Queue.pop(db, keys)
if not url:
if pop_failure_count % 20 == 0: # 避免日志过多
logger.debug("暂无已就绪任务,稍后重试")
time.sleep(thread_index / 10 + 0.1)
pop_failure_count += 1
continue
logger.info("获得任务", name, url, address)
pop_failure_count = 0
# 匹配Task类并执行
tasks = routers.get(name, None)
queue = Queue(db, name)
if tasks is None:
logger.warning("爬虫匹配失败", name, url)
queue.report_error("none_spider", url)
continue
for regex, task_cls in tasks:
if not regex.match(url):
continue
# 实例化Task并执行
task = task_cls(name, url, db, address)
links = execute(task)
for priority, urls in links.items():
count = queue.add(urls, priority)
logger.debug("添加任务", priority, f"{count}/{len(urls)}")
logger.debug("报告任务完成", queue.report_finish(url), url)
break
else:
logger.warning("任务匹配失败", name, url)
queue.report_error("none_task", url)
except Exception as e:
logger.error("Worker线程异常", e, '\n', traceback.format_exc())
time.sleep(5)
def import_tasks(path):
"""
扫描并导入爬虫模块中的Tasks
Return:
[[regex, task]...]
"""
tasks = []
# 导入模块
parent = os.path.dirname(path)
if parent not in sys.path:
sys.path.append(parent)
basename = os.path.basename(path)
try:
logger.debug("加载爬虫模块", basename)
_module = importlib.import_module(basename)
except Exception as e:
logger.error("加载爬虫模块异常", e, '\n', traceback.format_exc())
return []
# 扫描模块中合规的Task子类
# 何为合规?
# 1.Task的子类; 2.filters成员; 3.导入无异常; 4.名称不以'__'开头
for name in dir(_module):
if name.startswith("__"):
continue
var = getattr(_module, name)
try:
is_subclass = issubclass(var, Task)
except TypeError:
continue
try:
if is_subclass:
if hasattr(var, 'filters') and isinstance(var.filters, (list, tuple, str)):
if isinstance(var.filters, str):
filters = [var.filters]
else:
filters = var.filters
for regex in filters:
tasks.append([re.compile(regex), var])
logger.info("导入Task类", var.__name__)
else:
logger.warning("忽略Task类", var.__name__, "filters不合规")
continue
else:
continue
except Exception as e:
logger.error("加载Task类异常", e, '\n', traceback.format_exc())
continue
return tasks
def start(spider_path, redis_conf, spider_configs, proxies, processes, threads):
"""
重置爬虫状态后运行指定爬虫
Args:
spider_path: 爬虫目录
redis_conf: Redis配置
spider_configs: 爬虫配置
proxies: 使用代理运行
processes: 进程数量
threads: 每个进程的线程数量
"""
logger.info("正在启动爬虫")
db = redis.StrictRedis.from_url(redis_conf)
name = os.path.basename(spider_path) # 取目录名为爬虫名
RedisScripts.load(db)
spider = Spider(db, name)
# 注册爬虫/更新同名爬虫配置
logger.info("注册爬虫", name)
logger.info("爬虫配置", spider_configs)
spider.upsert(spider_configs['seeders'], spider_configs['interval'],
spider_configs['timeout'], spider_configs['precision'],
spider_configs['args'], proxies, time.time())
# 重置爬虫状态
status = spider.get_field("status")
if status != 10:
spider.set_field("status", 10)
logger.info(f"重置爬虫状态", "{status} -> 10")
# 回滚'timeout'异常队列
queue = Queue(db, name)
logger.debug("清理Redis")
Queue.purge(db)
logger.info("回滚超时任务")
queue.rollback_tag("timeout", 0)
# 启动Worker
logger.info("正在启动Worker")
spiders = multiprocessing.Manager().dict({name: [spider_path, 0]})
pool = []
token = multiprocessing.Manager().Value('d', 0)
for _ in range(processes):
p = multiprocessing.Process(
target=worker_process,
args=(redis_conf, spiders, threads, token.value, token)
)
p.start()
pool.append(p)
logger.info("Worker启动成功")
try:
# 循环检查爬虫状态,当爬虫停止时终止运行
while True:
time.sleep(0.2)
spider = Spider(db, name)
status = spider.get_field("status")
if status < 10:
logger.info("爬虫停止,当前状态为:", Spider.status.get(status, "未知"))
break
except KeyboardInterrupt:
logger.info("收到Ctrl+C", 'main')
for p in pool:
p.terminate()
logger.info("爬虫停止", "Ctrl+C")
except Exception as e:
logger.error("爬虫停止", "未知异常", e, '\n', traceback.format_exc())
def start_all(redis_conf, spiders_path, processes, threads):
"""
启动所有爬虫
Args:
redis_conf: Redis配置
spiders_path: 放置所有爬虫的目录
processes: 进程数量
threads: 每个进程的线程数量
"""
|
[
"ss@uutoto.com"
] |
ss@uutoto.com
|
e190d58d3252c1adf559ac5037d33c99ebfaa92f
|
d007e87ae0e76035ddcb25959e74303c44fb2e5e
|
/test_openCV/cards/test_opencv_exposure.py
|
dbc41e72e090c64e8ed61a9b6ac463f38fc53b7d
|
[] |
no_license
|
nju-luke/Test
|
a52ac1390c241d42a950a5eea1175771259e87ba
|
a70754e7cc619ab6363a35c5940efd5f82f78e6e
|
refs/heads/master
| 2020-07-12T22:56:02.517965
| 2017-09-28T06:31:15
| 2017-09-28T06:31:15
| 73,898,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
# -*- coding: utf-8 -*-
# @Time : 10/14/16 18:25
# @Author : Luke
# @Software: PyCharm
import cv2
import numpy as np
import tflearn
from matplotlib import pyplot as plt
img1 = cv2.imread('11.jpg')
img2 = cv2.imread('22.jpg')
img3 = cv2.imread('33.jpg')
cv2.imshow("fig1.1",img1)
cv2.imshow("fig2.1",img2)
cv2.imshow("fig3.1",img3)
mul = 0
img1_1 = np.asarray(img1,dtype='float')/255
delta = (1-img1_1)*img1_1
if mul != 1:
mul = (0.5-np.mean(img1_1))/np.mean(delta)
img1_2 = img1_1 + (1-img1_1)*img1_1*mul
cv2.imshow("fig1.2",img1_2)
img1_1 = np.asarray(img2,dtype='float')/255
delta = (1-img1_1)*img1_1
if mul != 1:
mul = (0.5-np.mean(img1_1))/np.mean(delta)
img1_2 = img1_1 + (1-img1_1)*img1_1*mul
cv2.imshow("fig2.2",img1_2)
img1_1 = np.asarray(img3,dtype='float')/255
delta = (1-img1_1)*img1_1
if mul != 1:
mul = (0.5-np.mean(img1_1))/np.mean(delta)
img1_2 = img1_1 + (1-img1_1)*img1_1*mul
cv2.imshow("fig3.2",img1_2)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"nju.hyhb@gmail.com"
] |
nju.hyhb@gmail.com
|
f8c398deeee8b69e7cadf5ddbcf2202196b38563
|
50d7b77d1185320cf4989d5a096a073531991cc0
|
/app.py
|
2b440b1fc7bb0615f31398fc8cd8239f80214e7e
|
[] |
no_license
|
kashrocks/Market-Place-Backeng-API-Development
|
7380835351810a1970a9b8f9df4b2d6e971562a8
|
fdf1642210371c2254d1c0fd093fd6b3672bf5ff
|
refs/heads/master
| 2020-04-16T05:25:38.803466
| 2019-01-17T02:56:44
| 2019-01-17T02:56:44
| 165,304,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,161
|
py
|
# Shopify 2019 Developer Challenge
# Author: Kashyap Achar
# Date: 2019-01-0-12
# Python Version: 3.7.2
# README available at https://github.com/kashrocks/Shopify-Developer-Challenge
from flask import Flask, jsonify, request
app = Flask(__name__)
# global dictionary of shoes that customer will see
all_shoes = [
{
"title": "nike",
"price": 100,
"inventory_count": 3
},
{
"title": "adidas",
"price": 120,
"inventory_count": 15
},
{
"title": "allbirds",
"price": 90,
"inventory_count": 0
},
{
"title": "reebok",
"price": 60,
"inventory_count": 30
}
]
# customers cart initialization
cart = [{}, {'total_value': 0}]
@app.route('/', methods=['GET'])
def welcome():
# displays the welcome text and complete inventory
return jsonify({'customer_shopping_cart': cart},
{'welc_message': 'Welcome to the greatest backend market '
'place for all things shoes! Please see '
'below for our complete inventory'},
{'all_shoes': all_shoes})
@app.route('/inventory', methods=['GET'])
def inventory():
# displays inventory of shoes only of which have inventory greater than 0
to_show = []
for shoe in all_shoes:
if shoe['inventory_count'] > 0:
to_show.append(dict.copy(shoe))
return jsonify({'inventory': to_show})
@app.route('/inventory/<string:brand>', methods=['GET'])
def oneBrand(brand):
# displays a selected brand of shoe and shows inventory count, price
# and title
check = urlcheck(brand)
if not check:
return jsonify({'error': 'Invalid brand, please try again.'})
for shoe in all_shoes:
if shoe['title'] == brand:
return jsonify({'brand': shoe})
return
@app.route('/inventory/<string:brand>/add', methods=['GET'])
def addCart(brand):
# adds the selected brand to the shopping cart
# makes sure there is inventory of the shoe
check = urlcheck(brand)
if not check:
return jsonify({'error': 'Invalid brand, please try again.'})
for shoe in all_shoes:
if shoe['title'] == brand:
select = shoe
break
if select['inventory_count'] <= 0:
return jsonify({'error': 'The shoes you have selected are no longer '
'available'})
elif brand not in cart[0]:
# adds the brand to the cart if it was not there previously
cart[0][brand] = 0
elif select['inventory_count'] == cart[0][brand]:
# checks to see if there is actually enough inventory for the desired
# order
return jsonify(
{'msg': 'For this brand of shoe, you already have our stores '
'inventory worth in your cart. Thus we are unable to '
'add more of this brand to your cart.'})
cart[1]['total_value'] += select['price'] # updates the total value of cart
cart[0][brand] += 1 # updates quantity of the brand in cart
return jsonify(
{'msg': 'The shoes have been added to your cart. Please look at our '
'other shoes!'})
@app.route('/cart', methods=['GET'])
def showCart():
# shows customers shopping cart with its total value
# if the cart is empty will tell customer to shop more
if cart[1]['total_value'] == 0:
return jsonify({'msg': 'Your cart is empty. Shop some more!'})
return jsonify({'customer_cart': cart})
@app.route('/cart/del/<string:brand>', methods=['DELETE'])
def delfrCart(brand):
# deletes one instance of a brand of shoe from the stores inventory
check = urlcheck(brand)
if not check:
return jsonify({'error': 'Invalid brand, please try again.'})
for shoe in all_shoes:
if shoe['title'] == brand:
s_val = shoe['price']
for shoe in cart[0]:
if brand in shoe:
cart[0][brand] -= 1 # lowers the count of the brand by one
if cart[0][brand] == 0:
# if the count is now 0, removes brand from cart
del cart[0][brand]
cart[1]['total_value'] -= s_val
# lowers the value of cart by value of the one brand
break
return showCart()
@app.route('/cart/buy', methods=['POST'])
def buyAll():
# purchases all shoes in the shopping cart and decreases inventory of each
if cart[0] == {}: # if cart is empty will display message
return jsonify({'msg': 'Your cart is empty. Shop some more!'})
for shoe in all_shoes: # decreases inventory of respective brand
if shoe['title'] in cart[0]:
shoe['inventory_count'] -= cart[0][shoe['title']]
cart[0], cart[1]['total_value'] = {}, 0 # resets the shopping cart
return jsonify(
{'msg': 'The shoes have been purchased and your shopping cart has '
'been reset. Thanks for shopping!'})
@app.route('/new', methods=['POST'])
def addBrand():
# adds a new brand of shoe to the stores inventory
params = request.get_json('Params')
all_shoes.append(params) # adds the brand to the story inventory
return inventory()
@app.route('/del/<string:brand>', methods=['DELETE'])
def delBrand(brand):
# deletes a brand of shoe from the stores inventory
check = urlcheck(brand)
if not check:
return jsonify({'error': 'Invalid brand, please try again.'})
for shoe in all_shoes:
if shoe['title'] == brand:
# finds index of brand and deletes from inventory
del all_shoes[all_shoes.index(shoe)]
break
return inventory()
def urlcheck(brand):
# checks to see if the brand entered in url is valid
for b in all_shoes:
if b['title'] == brand:
return True
return False
if __name__ == '__main__':
app.run(debug=True, port=6020)
|
[
"noreply@github.com"
] |
kashrocks.noreply@github.com
|
69f4c17eca81e4e9cf3c95d9894745677f59a7b4
|
42a5277bb987a623489330187847356682a41783
|
/spec/part_spec.py
|
c88fbcdf150154104728318883abc52e09049142
|
[] |
no_license
|
GestaoDeReuniao/gest-o-de-reuni-o
|
05d7bd1bb8014c77218589fbf344ba1ba0515102
|
9d1b8acfd1719866e7cbe766f94ab6b638c7ba1c
|
refs/heads/master
| 2021-01-19T07:24:30.293758
| 2014-03-11T22:55:52
| 2014-03-11T22:55:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
import unittest
from should_dsl import should
from part import Part
class PartSpec(unittest.TestCase):
def it_criar_part(self):
part = Part('123')
part.tempo |should| equal_to('123')
|
[
"thiagolessam@hotmail.com"
] |
thiagolessam@hotmail.com
|
229bb954ad637b3c780475f96f2f202e84d0a4db
|
c8cf1bdacdbf6de75e61cc6a2ce8617479c19ec6
|
/test/mobile/test_bytecode.py
|
95baa86d5763e5e308c727fa87d6d77972080125
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
Afonso-2403/pytorch
|
7f5ddf8370de938045b4ec412b98bef9dfc193ed
|
e35e6237d24b6c96b122deb21f015c0fe3eccb13
|
refs/heads/master
| 2023-08-21T18:43:43.019194
| 2021-09-13T17:58:00
| 2021-09-13T17:58:00
| 363,847,561
| 1
| 0
|
NOASSERTION
| 2021-07-08T19:06:16
| 2021-05-03T07:16:49
|
C++
|
UTF-8
|
Python
| false
| false
| 13,355
|
py
|
import fnmatch
import io
import shutil
import tempfile
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_model_bytecode_version,
_get_model_ops_and_info,
_backport_for_mobile_to_buffer,
_backport_for_mobile)
from torch.testing._internal.common_utils import TestCase, run_tests
from pathlib import Path
pytorch_test_dir = Path(__file__).resolve().parents[1]
# script_module_v4.ptl and script_module_v5.ptl source code
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
# output_model_path = Path(tmpdirname, "script_module_v5.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(
# str(output_model_path))
SCRIPT_MODULE_V4_BYTECODE_PKL = '''
(4,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('LOADC', 1, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int'), ('aten::add', 'Scalar'))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, '0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_V5_BYTECODE_PKL = '''
(5,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('LOADC', 1, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int'), ('aten::add', 'Scalar'))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, 'constants/0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_V6_BYTECODE_PKL = '''
(6,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int', 2), ('aten::add', 'Scalar', 2))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, '0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_BYTECODE_PKL = {
4: {
"bytecode_pkl": SCRIPT_MODULE_V4_BYTECODE_PKL,
"model_name": "script_module_v4.ptl"},
}
# The minimum version a model can be backported to
# Need to be updated when a bytecode version is completely retired
MINIMUM_TO_VERSION = 4
class testVariousModelVersions(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert(actual_version == expect_version)
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
model_path = pytorch_test_dir / "cpp" / "jit" / model_info["model_name"]
check_model_version(model_path, version)
def test_bytecode_values_for_all_backport_functions(self):
# Find the maximum version of the checked in models, start backporting to the minimum support version,
# and comparing the bytecode pkl content.
# It can't be merged to the test `test_all_backport_functions`, because optimization is dynamic and
# the content might change when optimize function changes. This test focuses
# on bytecode.pkl content validation. For the content validation, it is not byte to byte check, but
# regular expression matching. The wildcard can be used to skip some specific content comparison.
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
current_from_version = maximum_checked_in_model_version
with tempfile.TemporaryDirectory() as tmpdirname:
while current_from_version > MINIMUM_TO_VERSION:
# Load model v5 and run forward method
model_name = SCRIPT_MODULE_BYTECODE_PKL[current_from_version]["model_name"]
input_model_path = pytorch_test_dir / "cpp" / "jit" / model_name
# A temporary model file will be export to this path, and run through bytecode.pkl
# content check.
tmp_output_model_path_backport = Path(tmpdirname, "tmp_script_module_backport.ptl")
current_to_version = current_from_version - 1
backport_success = _backport_for_mobile(input_model_path, tmp_output_model_path_backport, current_to_version)
assert(backport_success)
expect_bytecode_pkl = SCRIPT_MODULE_BYTECODE_PKL[current_to_version]["bytecode_pkl"]
buf = io.StringIO()
torch.utils.show_pickle.main(
["", tmpdirname + "/" + tmp_output_model_path_backport.name + "@*/bytecode.pkl"],
output_stream=buf)
output = buf.getvalue()
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expect_bytecode_pkl.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
current_from_version -= 1
shutil.rmtree(tmpdirname)
# Please run this test manually when working on backport.
# This test passes in OSS, but fails internally, likely due to missing step in build
# def test_all_backport_functions(self):
# # Backport from the latest bytecode version to the minimum support version
# # Load, run the backport model, and check version
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
# module_input = 1
# expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
# # temporary input model file and output model file will be exported in the temporary folder
# with tempfile.TemporaryDirectory() as tmpdirname:
# tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(str(tmp_input_model_path))
# current_from_version = _get_model_bytecode_version(tmp_input_model_path)
# current_to_version = current_from_version - 1
# tmp_output_model_path = Path(tmpdirname, "tmp_script_module_backport.ptl")
# while current_to_version >= MINIMUM_TO_VERSION:
# # Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, current_to_version)
# assert(backport_success)
# backport_version = _get_model_bytecode_version(tmp_output_model_path)
# assert(backport_version == current_to_version)
# # Load model and run forward method
# mobile_module = _load_for_lite_interpreter(str(tmp_input_model_path))
# mobile_module_result = mobile_module(module_input)
# torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
# current_to_version -= 1
# # Check backport failure case
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, MINIMUM_TO_VERSION - 1)
# assert(not backport_success)
# # need to clean the folder before it closes, otherwise will run into git not clean error
# shutil.rmtree(tmpdirname)
# Check just the test_backport_bytecode_from_file_to_file mechanism but not the function implementations
def test_backport_bytecode_from_file_to_file(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = pytorch_test_dir / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
maximum_checked_in_model_version]["model_name"]
if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_backport_model_path = Path(tmpdirname, "tmp_script_module_v5_backported_to_v4.ptl")
# backport from file
success = _backport_for_mobile(
script_module_v5_path,
tmp_backport_model_path,
maximum_checked_in_model_version - 1)
assert(success)
buf = io.StringIO()
torch.utils.show_pickle.main(
["", tmpdirname + "/" + tmp_backport_model_path.name + "@*/bytecode.pkl"],
output_stream=buf)
output = buf.getvalue()
expected_result = SCRIPT_MODULE_V4_BYTECODE_PKL
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expected_result.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
# Load model v4 and run forward method
mobile_module = _load_for_lite_interpreter(str(tmp_backport_model_path))
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
shutil.rmtree(tmpdirname)
# Check just the _backport_for_mobile_to_buffer mechanism but not the function implementations
def test_backport_bytecode_from_file_to_buffer(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = pytorch_test_dir / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
maximum_checked_in_model_version]["model_name"]
if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
# Backport model to v4
script_module_v4_buffer = _backport_for_mobile_to_buffer(
script_module_v5_path, maximum_checked_in_model_version - 1)
buf = io.StringIO()
# Check version of the model v4 from backport
bytesio = io.BytesIO(script_module_v4_buffer)
backport_version = _get_model_bytecode_version(bytesio)
assert(backport_version == maximum_checked_in_model_version - 1)
# Load model v4 from backport and run forward method
bytesio = io.BytesIO(script_module_v4_buffer)
mobile_module = _load_for_lite_interpreter(bytesio)
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
def test_get_model_ops_and_info(self):
# TODO update this to be more in the style of the above tests after a backport from 6 -> 5 exists
script_module_v6 = pytorch_test_dir / "cpp" / "jit" / "script_module_v6.ptl"
ops_v6 = _get_model_ops_and_info(script_module_v6)
assert(ops_v6["aten::add.int"].num_schema_args == 2)
assert(ops_v6["aten::add.Scalar"].num_schema_args == 2)
if __name__ == '__main__':
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
3a1d2b7b1ca964b83fb27615d0d4a1acf3343435
|
dd512d0f5f217c40340d2c4a1a3b77ac047c286e
|
/venv/bin/pip3
|
5ff136764df2057a356e506cdd40fbeb570b5a40
|
[] |
no_license
|
alexdemeo/RokuApp
|
b7256f67036ca6b6964a9d04c00731cd7bb62584
|
fb20ab3c571c4396d16f521a38b54ea6549512f0
|
refs/heads/master
| 2020-06-12T23:37:48.338812
| 2020-04-27T21:57:52
| 2020-04-27T21:57:52
| 194,462,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
#!/Users/alexdemeo/Documents/Other/RokuApp/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"alex.demeo129@gmail.com"
] |
alex.demeo129@gmail.com
|
|
373835e7e99ec0d675171328272741ec1442da52
|
55f012626575008a5497b99e98c583996c75da55
|
/Permutations of a number.py
|
6c3384480fe51b110e345366944f3a3a66ddf9af
|
[] |
no_license
|
nitish283/Python_Coding
|
15fa861779f2fe325cf3b2f532df9ee3c12e95a7
|
5b165ddebec7234e0f42ba3db2a70c3f6016b0f5
|
refs/heads/master
| 2022-12-13T06:27:54.385836
| 2020-09-07T09:28:36
| 2020-09-07T09:28:36
| 293,481,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
import copy
def perm(n):
if(n==2):
x=[[1,2],[2,1]]
return(x)
else:
t=[]
x=perm(n-1)
b=copy.deepcopy(x)
for i in range(len(x)):
for j in range(len(x[i])+1):
b[i].insert(j,n)
t.append(b[i])
b=copy.deepcopy(x)
return(t)
N=int(input())
print((perm(N)))
|
[
"noreply@github.com"
] |
nitish283.noreply@github.com
|
631f910181d17efb14583a50b22675b9e5c41974
|
2300b448e9c13fd39068a46a1062baa19acd3853
|
/day18/part2/main.py
|
1a8925840cf33420ff56b60937946ebe6ef0771f
|
[] |
no_license
|
cryptable/adventcode2020
|
42764bc1a26ee5e242f2b267fd82ac2f2fb05b09
|
6a65ba00c93d92c627e7b71243faccee69f304a9
|
refs/heads/master
| 2023-02-05T22:42:53.641937
| 2020-12-26T15:19:53
| 2020-12-26T15:19:53
| 318,576,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,900
|
py
|
# Vocabularia
# statement : '(' <mul-expression> ')' | <number>
# mul-expression : plus-expression ('*' plus-expression)*
# plus-expression : statement ('+' statement)*
# <operand> : '+' | '*'
# <number> : [1-9][0-9]*
class Token:
def __init__(self, token, value):
self.token = token
self.value = value
def __str__(self):
return str('[kind:{},value:{}]'.format(self.token, self.value))
class ASTNode:
def __init__(self, operand_token, left, right):
self.token = operand_token
self.left = left
self.right = right
class AST:
def __init__(self, ast_node):
self.root = ast_node
def traverse(self, node):
if node.token.token == 'number':
return node.token.value
if node.token.token == 'operand':
if node.token.value == '+':
return self.traverse(node.left) + self.traverse(node.right)
if node.token.value == '*':
return self.traverse(node.left) * self.traverse(node.right)
def walk(self):
return self.traverse(self.root)
def _print_tree(self, node, level):
slevel = "-" * level
if node.token.token == 'number':
print("{}: '{}' ".format(slevel,node.token.value))
if node.token.token == 'operand':
if node.token.value == '+':
self._print_tree(node.left, level+1)
print("{}: {}".format(slevel, node.token.value))
self._print_tree(node.right, level+1)
if node.token.value == '*':
self._print_tree(node.left, level+1)
print("{}: {}".format(slevel, node.token.value))
self._print_tree(node.right, level+1)
def print(self):
self._print_tree(self.root, 0)
def processToken(token, line, i):
if token == '(':
return (i, Token('(', ''))
if token == ')':
return (i, Token(')', ''))
if token == '+':
return (i, Token('operand', '+'))
if token == '*':
return (i, Token('operand', '*'))
if token.isnumeric():
return (i, Token('number', int(token)))
raise Exception("unknown token {}:{} \"{}\"".format(line.rstrip('\n'), i, token))
def pass1_lex(line):
tokens = []
token = ''
for i in range(0, len(line)):
if line[i] == ' ':
i += 1
if len(token) > 0:
(i, tkn) = processToken(token, line, i)
tokens.append(tkn)
token = ''
elif line[i] == '(':
(i, tkn) = processToken(line[i], line, i)
tokens.append(tkn)
token = ''
elif line[i] == ')':
if len(token) > 0:
(i, tkn) = processToken(token, line, i)
tokens.append(tkn)
(i, tkn) = processToken(line[i], line, i)
tokens.append(tkn)
token = ''
elif line[i] == '\n':
if len(token) > 0:
(i, tkn) = processToken(token, line, i)
tokens.append(tkn)
token = ''
break
else:
token += line[i]
i += 1
# last token no '\n'
if len(token) > 0:
(i, tkn) = processToken(token, line, i)
tokens.append(tkn)
return tokens
def processPlusExpression(tokens, idx):
(idx, value1) = processStatement(tokens, idx)
ast = value1
# print("p1: {}".format(idx))
while idx < len(tokens) and tokens[idx].token != ')' and tokens[idx].value != '*':
if tokens[idx].token != 'operand' or tokens[idx].value != '+':
raise Exception('Illegal expression')
operand = tokens[idx]
idx += 1
(idx, value2) = processStatement(tokens, idx)
ast = ASTNode(operand, ast, value2)
# print("p2: {}".format(idx))
return (idx, ast)
def processMulExpression(tokens, idx):
(idx, value1) = processPlusExpression(tokens, idx)
ast = value1
# print("m1: {}".format(idx))
while idx < len(tokens) and tokens[idx].token != ')' and tokens[idx].value != '+':
if tokens[idx].token != 'operand' or tokens[idx].value != '*':
raise Exception('Illegal mul expression {}: {}'.format(tokens[idx].token, tokens[idx].value))
operand = tokens[idx]
idx += 1
(idx, value2) = processPlusExpression(tokens, idx)
ast = ASTNode(operand, ast, value2)
# print("m2: {}".format(idx))
return (idx, ast)
def processStatement(tokens, idx):
# <number>
# print("s1: {}".format(idx))
if tokens[idx].token == 'number':
number_node = ASTNode(tokens[idx], None, None)
idx += 1
return (idx, number_node)
elif tokens[idx].token == '(':
idx += 1
if idx >= len(tokens):
raise Exception("Unbalanced braces")
(idx, parenthese_node) = processMulExpression(tokens, idx)
if tokens[idx].token != ')':
raise Exception("Unbalanced braces")
idx += 1
return (idx, parenthese_node)
else:
raise Exception("Unexpected token {}".format(tokens[idx].token))
return (0, None)
def printTokens(tokens):
for token in tokens:
print(token)
def pass2_parser(tokens):
idx = 0
(idx, val) = processMulExpression(tokens, idx)
if idx != len(tokens):
raise Exception("Unused Tokens {} vs {}".format(idx, len(tokens)))
return val
if __name__ == '__main__':
sum = 0
with open('input.txt', 'r') as infile:
for line in infile:
tokens = pass1_lex(line)
# for token in tokens:
# print(token, end=',')
# print()
value = pass2_parser(tokens)
astTree = AST(value)
# astTree.print()
print(astTree.walk())
sum += astTree.walk()
infile.close()
print(sum)
|
[
"david.tillemans@cryptable.org"
] |
david.tillemans@cryptable.org
|
e74a21dbb2b0a860b6fb7b8cad7c50db9fe62cdb
|
4e3de79f0c1f49b64e53c368622ea540208c3c6b
|
/.ycm_extra_conf.py
|
b2e8bd91540ff5681fec730b42a1d3291e57f3db
|
[] |
no_license
|
rappet/md5spoofer
|
7c8b41bb6e0af2728e5f1be5d40369db3fe850ed
|
b1903efe9b376eea02dda566278af69f5016fb34
|
refs/heads/master
| 2021-01-10T05:40:47.135184
| 2016-04-03T20:57:51
| 2016-04-03T20:57:51
| 55,368,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
# Generated by YCM Generator at 2016-04-03 18:01:58.710140
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
[
"raphael.r.peters@gmail.com"
] |
raphael.r.peters@gmail.com
|
466373d76ca24cb2a34c4824c5097fc46feafc28
|
88cc55bc6e6feee2fa5f186d57ccb5e1d420217c
|
/django_websocket/__init__.py
|
6e4e96ace8d71052cc4fc96b0c8e0a06953e792e
|
[
"BSD-3-Clause"
] |
permissive
|
gregmuellegger/django-websocket
|
7c16d3ba27970284b8fbeab5cecd1358d47373a2
|
cb4804e98f397f242e74c6f9e6f4fabab41a7ab7
|
refs/heads/master
| 2021-07-05T00:13:21.719973
| 2016-05-10T06:27:40
| 2016-05-10T06:27:40
| 773,121
| 68
| 19
|
BSD-3-Clause
| 2021-06-10T17:32:54
| 2010-07-13T19:40:14
|
Python
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
from django_websocket.decorators import *
|
[
"gregor@muellegger.de"
] |
gregor@muellegger.de
|
3f6d7ec8d6856734417c11a6e64dec45fb9e6067
|
ebf8541575709eaaf59515ee972f482b81499fe5
|
/app/repository/teams.py
|
124a6316a0c11641da1d33b3aacaeaeba3e3d745
|
[
"Apache-2.0"
] |
permissive
|
maestro-server/data-app
|
d689411d2bad9991c8995439792929d894e053da
|
cde6479cc84fe410220b34742772d5017571e3d3
|
refs/heads/master
| 2023-07-15T05:50:50.199168
| 2021-10-15T17:12:32
| 2021-10-15T17:12:32
| 129,275,899
| 0
| 0
|
Apache-2.0
| 2023-07-11T01:13:44
| 2018-04-12T15:51:15
|
Python
|
UTF-8
|
Python
| false
| false
| 56
|
py
|
from .model import Model
class Teams(Model):
pass
|
[
"felipeklerk@yahoo.com.br"
] |
felipeklerk@yahoo.com.br
|
78ba15feb70dfa3b47f3b5c30834f09806324425
|
ab0c5c2da1a98276df745e922ea9483360fa476b
|
/ex103.py
|
b9b02e595934d590697061a2da67d9f1b4631574
|
[] |
no_license
|
edlorencetti/Python-3
|
7b18cf4ea434b864fb576bda83ab5fb13d5d703a
|
a66a197f4de2bfd8e197fbb5a4e6d2ed0a2dc05c
|
refs/heads/master
| 2022-09-11T12:22:54.711256
| 2020-05-29T13:23:28
| 2020-05-29T13:23:28
| 264,819,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
def ficha(jog='<esconhecido>', gol=0):
print(f'O jogador {jog} fez {gol} gol(s) no campeonato.')
n = str(input("Nome do jogador: "))
g = str(input("Número de gol: "))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(gol=g)
else:
ficha(n, g)
|
[
"edlorencetti@yahoo.com.br"
] |
edlorencetti@yahoo.com.br
|
7bea582bb5ad2dd0f88b003d966aaf9fd502fe7c
|
17c6294e656703acb394b63e978fdf605746c3c3
|
/resume_scanner/wsgi.py
|
88964705e5997d10a6fb6fe6af6ff0509af8f9ed
|
[] |
no_license
|
oraby8/resume_scanner
|
c0e2bd6cb1beb2870fc5816b3770d65d90825f6e
|
e3000e8f1bafdfd654b70e6cbb4d3ecaace98110
|
refs/heads/master
| 2023-07-11T05:59:03.372907
| 2019-11-04T11:10:07
| 2019-11-04T11:10:07
| 219,481,787
| 5
| 0
| null | 2023-07-06T21:49:43
| 2019-11-04T11:06:57
|
CSS
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
"""
WSGI config for resume_scanner project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
#from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'resume_scanner.settings')
application = get_wsgi_application()
#application=DjangoWhiteNoise(application)
|
[
"ahmedsamiroraby8@gmail.com"
] |
ahmedsamiroraby8@gmail.com
|
09da46de08db8efd21ef86e80c0bd1b0bfa4641f
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/fbs_0140+360/sdB_FBS_0140+360_lc.py
|
757218cb78912bdc16ba760e680f6cef9c974b74
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[25.926708,36.25925], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_FBS_0140+360 /sdB_FBS_0140+360_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
51149a9b0f94c7ee80090fabec79d37acd1ca528
|
b1b495eb9cd761ed4a968828b62ef8b12888990b
|
/Network/data_loader.py
|
df1c344c71328e347e02e9ba94b85427d2801319
|
[] |
no_license
|
liuwenhaha/LungNoduleRetrieval
|
7aaaa79eac22dbd93efd2599d69777e34d7e2c9d
|
49ae736ae0410f5869559d7eaa4399872ff9c00b
|
refs/heads/master
| 2020-06-21T04:22:11.886122
| 2019-05-08T19:22:52
| 2019-05-08T19:22:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,306
|
py
|
import pickle
import numpy as np
from functools import partial, reduce
from skimage import transform
from config import dataset_dir as input_dir, pred_as_dataset_dir, output_dir, local
from config import input_dir as in_dir
from Network.FileManager import Dataset, Dataset3d, DatasetFromPredication
from experiments import CrossValidationManager
from Network.dataUtils import rating_normalize, crop_center, l2_distance
from Network.dataUtils import rating_clusters_distance, rating_clusters_distance_matrix, reorder
# =========================
# Load
# =========================
def build_loader(size=128, res=1.0, apply_mask_to_patch=False, sample='Normal', dataset_type='Clean',
configuration=None, n_groups=5, config_name='LEGACY', run=None, epoch=None, load_data_from_predictions=False, return_predicted_ratings=True):
if config_name == 'LEGACY':
loader = partial(load_nodule_dataset, size, res, apply_mask_to_patch, sample, configuration, n_groups, dataset_type)
else:
if load_data_from_predictions:
loader = partial(load_nodule_dataset_from_predications, config_name, run, epoch, apply_mask_to_patch, configuration, return_predicted_ratings)
else:
loader = partial(load_nodule_dataset_from_dataset, config_name, size, res, apply_mask_to_patch, sample, configuration)
return loader
def build_loader_3d(configuration, net_type, run, epoch, n_groups=5):
loader = partial(load_nodule_dataset_3d, configuration, net_type, run, epoch, n_groups)
return loader
def load_nodule_dataset_3d(configuration, net_type, run, epoch, n_groups=5):
DataFile = Dataset3d(configuration, dir=input_dir)
trainData = DataFile.load(dset='Train', net=net_type, run=run, epoch=epoch)
validData = DataFile.load(dset='Valid', net=net_type, run=run, epoch=epoch)
testData = None
print("Loaded {} entries to {} set".format(len(trainData), 'Train'))
print("Loaded {} entries to {} set".format(len(validData), 'Valid'))
print("Test data not available")
def gather_data(data):
return [( entry['embed'],
transform.resize(entry['mask'], output_shape=entry['embed'].shape, order=0),
entry['label'],
entry['info'],
entry['size'],
entry['rating'],
entry['weights'],
entry['z'])
for entry in data]
validData = gather_data(validData)
trainData = gather_data(trainData)
image_ = np.concatenate([e[0].flatten() for e in trainData])
print("\tImage Range = [{:.1f}, {:.1f}]".format(image_.max(), image_.min()))
return testData, validData, trainData
def load_nodule_dataset(size=128, res=1.0, apply_mask_to_patch=False, sample='Normal', configuration=None, n_groups=5,
dataset_type='Clean'):
if configuration is None:
return load_nodule_dataset_old_style(size=size, res=res, apply_mask_to_patch=apply_mask_to_patch, sample=sample)
if apply_mask_to_patch:
print('WRN: apply_mask_to_patch is for debug only')
test_id = configuration
valid_id = (configuration + 1) % n_groups
testData, validData, trainData = [], [], []
for c in range(n_groups):
data_file = Dataset(data_type=dataset_type, conf=c, dir=input_dir)
data_group = data_file.load(size, res, sample)
if c == test_id:
set = "Test"
testData += data_group
elif c == valid_id:
set = "Valid"
validData += data_group
else:
set = "Train"
trainData += data_group
print("Loaded {} entries from {} to {} set - LEGACY configuration".format(len(data_group), data_file.name(size, res, sample), set))
#trainData, validData, testData = [reduce(lambda x, y: x + y, data) for data in [trainData, validData, testData]]
def gather_data(data, apply_mask):
return [( entry['patch'] * (0.3 + 0.7 * entry['mask']) if apply_mask else entry['patch'],
entry['mask'],
entry['label'],
entry['info'],
entry['size'],
entry['rating'],
entry['weights'],
entry['z'])
for entry in data]
testData = gather_data(testData, apply_mask_to_patch)
validData = gather_data(validData, apply_mask_to_patch)
trainData = gather_data(trainData, apply_mask_to_patch)
image_ = np.concatenate([e[0].flatten() for e in trainData])
print("\tImage Range = [{:.1f}, {:.1f}]".format(image_.max(), image_.min()))
return testData, validData, trainData
def load_nodule_dataset_from_dataset(config_name, size=128, res=1.0, apply_mask_to_patch=False, sample='Normal', configuration=None):
if configuration is None:
assert False
if apply_mask_to_patch:
print('WRN: apply_mask_to_patch is for debug only')
dataset_type = 'Primary'
manager = CrossValidationManager(config_name)
print("Using {} CrossValidationManager:".format(config_name))
def load(data_type, conf, dir, size, res, sample, data_set):
data_file = Dataset(data_type=data_type, conf=conf, dir=dir)
data = data_file.load(size, res, sample)
print("\tLoaded {} entries from {} to {} set".format(len(data), data_file.name(size, res, sample), data_set))
return data
trainData = [load(dataset_type, c, input_dir, size, res, sample, 'Train')
for c in manager.get_train(configuration)]
validData = [load(dataset_type, c, input_dir, size, res, sample, 'Valid')
for c in manager.get_valid(configuration)]
testData = [load(dataset_type, c, input_dir, size, res, sample, 'Test')
for c in manager.get_test(configuration)]
trainData, validData, testData = [reduce(lambda x, y: x + y, data) for data in [trainData, validData, testData]]
def gather_data(data, apply_mask):
return [( entry['patch'] * (0.3 + 0.7 * entry['mask']) if apply_mask else entry['patch'],
entry['mask'],
entry['label'],
entry['info'],
entry['size'],
entry['rating'],
entry['weights'],
entry['z'])
for entry in data]
testData = gather_data(testData, apply_mask_to_patch)
validData = gather_data(validData, apply_mask_to_patch)
trainData = gather_data(trainData, apply_mask_to_patch)
image_ = np.concatenate([e[0].flatten() for e in trainData])
print("\tImage Range = [{:.1f}, {:.1f}]".format(image_.max(), image_.min()))
return testData, validData, trainData
def load_nodule_dataset_from_predications(config_name, run, epoch, apply_mask_to_patch=False, configuration=None, return_predicted=True):
if configuration is None:
assert False
if apply_mask_to_patch:
print('WRN: apply_mask_to_patch is for debug only')
manager = CrossValidationManager(config_name)
print("Using {} CrossValidationManager:".format(config_name))
def load(r, epoch, indir, data_set):
data_file = DatasetFromPredication(type='rating', input_dir=indir)
data = data_file.load(goal=data_set, run=r, epoch=epoch, return_predicted=return_predicted)
print("\tLoaded {} entries from {} to {} set".format(len(data), data_file.name(goal=data_set, run=r), data_set))
return data
cnf_id = manager.get_run_id(configuration)
run_name = '{}c{}'.format(run, cnf_id)
trainData = load(run_name, epoch, pred_as_dataset_dir, 'Train')
validData = load(run_name, epoch, pred_as_dataset_dir, 'Valid')
testData = load(run_name, epoch, pred_as_dataset_dir, 'Test')
#trainData, validData, testData = [reduce(lambda x, y: x + y, data) for data in [trainData, validData, testData]]
def gather_data(data, apply_mask):
return [( entry['patch'] * (0.3 + 0.7 * entry['mask']) if apply_mask else entry['patch'],
entry['mask'],
entry['label'],
entry['info'],
entry['size'],
entry['rating'],
entry['weights'],
entry['z'])
for entry in data]
testData = gather_data(testData, apply_mask_to_patch)
validData = gather_data(validData, apply_mask_to_patch)
trainData = gather_data(trainData, apply_mask_to_patch)
image_ = np.concatenate([e[0].flatten() for e in trainData])
print("\tImage Range = [{:.1f}, {:.1f}]".format(image_.max(), image_.min()))
return testData, validData, trainData
def load_nodule_dataset_old_style(size=128, res=1.0, apply_mask_to_patch=False, sample='Normal'):
if type(res) == str:
filename = '/Dataset/Dataset{:.0f}-{}-{}.p'.format(size, res, sample)
else:
filename = '/Dataset/Dataset{:.0f}-{:.1f}-{}.p'.format(size, res, sample)
try:
testData, validData, trainData = pickle.load(open(filename, 'br'))
except:
testData, validData, trainData = pickle.load(open('.'+filename, 'br'))
print('Loaded: {}'.format(filename))
image_ = np.concatenate([e['patch'] for e in trainData])
print("\tImage Range = [{:.1f}, {:.1f}]".format(np.max(image_), np.min(image_)))
print("\tMasks Range = [{}, {}]".format(np.max(trainData[0]['mask']), np.min(trainData[0]['mask'])))
#print("\tLabels Range = [{}, {}]".format(np.max(trainData[0]['label']), np.min(trainData[0]['label'])))
if apply_mask_to_patch:
print('WRN: apply_mask_to_patch is for debug only')
testData = [(entry['patch']*(0.3+0.7*entry['mask']), entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in testData]
validData = [(entry['patch']*(0.3+0.7*entry['mask']), entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in validData]
trainData = [(entry['patch']*(0.3+0.7*entry['mask']), entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in trainData]
else:
testData = [ (entry['patch'], entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in testData ]
validData = [ (entry['patch'], entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in validData]
trainData = [ (entry['patch'], entry['mask'], entry['label'], entry['info'], entry['size'], entry['rating']) for entry in trainData]
return testData, validData, trainData
def load_nodule_raw_dataset(size=128, res='Legacy', sample='Normal'):
if type(res) == str:
filename = input_dir + '/Dataset{:.0f}-{}-{}.p'.format(size, res, sample)
else:
filename = input_dir + '/Dataset{:.0f}-{:.1f}-{}.p'.format(size, res, sample)
print('Loading {}'.format(filename))
try:
testData, validData, trainData = pickle.load(open(filename, 'br'))
except:
print('...Failed')
testData, validData, trainData = pickle.load(open('.'+filename, 'br'))
return testData, validData, trainData
|
[
"Mark.Loyman@gmail.com"
] |
Mark.Loyman@gmail.com
|
c3be66df4769ed5c17fa2bf6be33d73e1d59c8c9
|
cacea9e8c9261717e8ce0b404f16a2ff2d7782e1
|
/train_conditional.py
|
2a4e6c9ef745b276dada7de8a523b615cf43526b
|
[
"MIT"
] |
permissive
|
vislearn/HINT
|
6fc583bdff80d93a669535f42d7a54f4f05f483b
|
b26c5026bd486bb392a9416430cb1dfebe5aa06f
|
refs/heads/master
| 2023-03-07T03:01:11.791414
| 2021-02-11T11:49:40
| 2021-02-11T11:49:40
| 248,197,267
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,707
|
py
|
from time import time
import torch
from FrEIA.framework import *
from FrEIA.modules import *
import monitoring
# Load training configuration, model and data set
# from configs.fourier_curve.conditional_cinn_1 import c, model, model_inverse
# from configs.fourier_curve.conditional_cinn_2 import c, model, model_inverse
# from configs.fourier_curve.conditional_cinn_4 import c, model, model_inverse
# from configs.fourier_curve.conditional_cinn_8 import c, model, model_inverse
# from configs.fourier_curve.conditional_hint_1_full import c, model, model_inverse
# from configs.fourier_curve.conditional_hint_2_full import c, model, model_inverse
# from configs.fourier_curve.conditional_hint_4_full import c, model, model_inverse
# from configs.fourier_curve.conditional_hint_8_full import c, model, model_inverse
# from configs.plus_shape.conditional_hint_8_full import c, model, model_inverse
def save(c, name):
torch.save({#'opt': optim.state_dict(),
'net': c.model.state_dict()}, name)
def load(c, name):
state_dicts = torch.load(name)
c.model.load_state_dict(state_dicts['net'])
try:
optim.load_state_dict(state_dicts['opt'])
except ValueError:
print('Cannot load optimizer for some reason or other')
def save_sample(c, N=500):
y = c.vis_y_target
# create sample from saved model
try:
load(c, f'output/{c.suffix}.pt')
except:
pass
c.model.eval()
with torch.no_grad():
z_sample = torch.cuda.FloatTensor(N, c.ndim_z).normal_()
y_sample = torch.tensor(y).expand(N, c.ndim_y).cuda()
x_sample = c.model_inverse(y_sample, z_sample)
np.save(f'output/samples/{c.suffix}_sample-{N}', x_sample.cpu())
def x_jac(c):
jacobian = 0
for node in c.model.node_list:
if 'hac_x' in node.name or 'ac_y_to_x' in node.name:
jacobian += node.module.jacobian(None)
return jacobian
def evaluate(c, only_x=True):
with torch.no_grad():
c.model.eval()
loader = c.test_loader
batch_idx = 0
loss_history = []
for x, y in loader:
batch_losses = []
batch_idx += 1
if batch_idx > c.max_batches_per_epoch > 0: break
x, y = x.to(c.device), y.to(c.device)
x += 0.01 * torch.randn_like(x)
# Forward pass
if 'hint' in c.suffix:
z_y, z_x = c.model([y, x])
z = torch.cat([z_x, z_y], dim=-1)
# Maximum likelihood loss terms
if not only_x:
log_jacobian = c.model.log_jacobian([y, x], run_forward=False)
batch_losses.append(0.5 * torch.sum(z**2, dim=1).mean())
batch_losses.append(-log_jacobian.mean())
else:
batch_losses.append(0.5 * torch.sum(z_x**2, dim=1).mean())
batch_losses.append(-x_jac(c).mean())
elif 'cinn' in c.suffix:
z = c.model([x], c=[y])
log_jacobian = c.model.log_jacobian([x], c=[y], run_forward=False)
# Maximum likelihood loss terms
batch_losses.append(0.5 * torch.sum(z**2, dim=1).mean())
batch_losses.append(-log_jacobian.mean())
# Add up all losses
loss_total = sum(batch_losses)
loss_history.append([l.item() for l in batch_losses])
return np.mean(loss_history, axis=0).sum()
def train_epoch(c, optim, i_epoch, vis_y, vis_latent, test=False):
if not test:
c.model.train()
loader = c.train_loader
if test:
c.model.eval()
loader = c.test_loader
nograd = torch.no_grad()
nograd.__enter__()
nll_x = []
batch_idx = 0
loss_history = []
for x, y in loader:
optim.zero_grad()
batch_losses = []
batch_idx += 1
if batch_idx > c.max_batches_per_epoch > 0: break
x, y = x.to(c.device), y.to(c.device)
x += 0.01 * torch.randn_like(x)
# Forward pass
if 'hint' in c.suffix:
z_y, z_x = c.model([y, x])
z = torch.cat([z_x, z_y], dim=-1)
log_jacobian = c.model.log_jacobian([y, x], run_forward=False)
if test:
nll_x.append((0.5 * torch.sum(z_x**2, dim=1).mean() - x_jac(c).mean()).item())
elif 'cinn' in c.suffix:
z = c.model([x], c=[y])
log_jacobian = c.model.log_jacobian([x], c=[y], run_forward=False)
# Maximum likelihood loss terms
batch_losses.append(0.5 * torch.sum(z**2, dim=1).mean())
batch_losses.append(-log_jacobian.mean())
# Add up all losses
loss_total = sum(batch_losses)
loss_history.append([l.item() for l in batch_losses])
# Compute gradients
if not test:
loss_total.backward()
# Clamp gradients
for p in c.model.params_trainable:
p.grad.data.clamp_(-5.00, 5.00)
# Parameter update
optim.step()
# Update progress bar
monitoring.visualizer.update_progress(batch_idx, i_epoch+1)
if test:
# Update plots
latent_sample = z[:500,:].data.cpu().numpy()
with torch.no_grad():
vis_x = c.model_inverse(vis_y, vis_latent).data.cpu().numpy()
monitoring.visualizer.update_plots(latent_sample, vis_x)
# if len(nll_x) > 0:
# print(' NLL_x:', np.mean(nll_x), ' | bits/dim:', -np.mean(nll_x) / (c.ndim_x * np.log(2.)))
nograd.__exit__(None, None, None)
return np.mean(loss_history, axis=0)
def main(c):
loss_labels = ['-log p(z)', '-log |det(J)|']
# Init trainable model parameters
if c.init_scale > 0:
for p in c.model.params_trainable:
p.data = c.init_scale * torch.randn_like(p.data)
# Count total number of trainable parameters
n_model_params = sum([p.numel() for p in c.model.params_trainable])
print(f'\nModel {c.suffix} has {n_model_params:,} trainable parameters.\n')
# assert False
# Prepare optimizer and learning rate schedule
optim = torch.optim.Adam(c.model.params_trainable, lr=c.lr_init,
betas=c.adam_betas, eps=1e-4,
weight_decay=c.l2_weight_reg)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1,
gamma=(c.final_decay)**(1./c.n_epochs))
# For visualization
vis_batchsize = 300
vis_y = torch.Tensor([c.vis_y_target]*vis_batchsize).to(c.device)
vis_latent = torch.randn(vis_batchsize, c.ndim_x).to(c.device)
monitoring.restart(c, loss_labels)
# monitoring.visualizer.print_config()
t_start = time()
try:
for i_epoch in range(c.n_epochs):
if i_epoch < c.pre_low_lr:
for param_group in optim.param_groups:
param_group['lr'] = c.lr_init * 3e-2
train_losses = train_epoch(c, optim, i_epoch, vis_y, vis_latent)
test_losses = train_epoch(c, optim, i_epoch, vis_y, vis_latent, test=True)
monitoring.visualizer.update_losses(np.concatenate([train_losses, test_losses]),
lr_scheduler.get_lr(), logscale=False)
lr_scheduler.step()
# save(f'output/{c.suffix}.pt')
except:
# save(f'output/{c.suffix}.pt' + '_ABORT')
raise
finally:
print("\n\nTraining took %f minutes\n\n" % ((time()-t_start)/60.))
return test_losses.sum()
if __name__ == "__main__":
main()
# save_sample()
|
[
"jakob.kruse@iwr.uni-heidelberg.de"
] |
jakob.kruse@iwr.uni-heidelberg.de
|
32bdc9074cfc1034329d665841a417491497b35a
|
d45d9111e4e5736c7bb7eebef27bfef7bc27b3b8
|
/projects/Python/proto/enums.py
|
c516eda6a82699c787794c1584339b5707d5c4a1
|
[
"MIT"
] |
permissive
|
museghost/FastBinaryEncoding
|
0c0b4b4d6ae1004f13810fd1db0bb761116106cf
|
7e8425552d4c42cd78162dbe0dedab796808ae78
|
refs/heads/master
| 2023-01-02T08:47:52.223902
| 2020-10-16T10:53:09
| 2020-10-16T10:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176,545
|
py
|
# Automatically generated by the Fast Binary Encoding compiler, do not modify!
# https://github.com/chronoxor/FastBinaryEncoding
# Source: enums.fbe
# Version: 1.4.0.0
import base64
import decimal
import enum
import functools
import json
import sys
import uuid
import fbe
class EnumByte(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0) + 0
ENUM_VALUE_2 = int(0) + 1
ENUM_VALUE_3 = int(254) + 0
ENUM_VALUE_4 = int(254) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumByte.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumByte.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumByte.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumByte.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumByte.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumByte.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumByte.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumByte(value)
# Fast Binary Encoding EnumByte field model
class FieldModelEnumByte(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 1
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumByte()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumByte(self.read_byte(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_byte(self.fbe_offset, value)
# Fast Binary Encoding EnumByte final model
class FinalModelEnumByte(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 1
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumByte(), 0
return EnumByte(self.read_byte(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_byte(self.fbe_offset, value)
return self.fbe_size
class EnumChar(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = ord('1') + 0
ENUM_VALUE_2 = ord('1') + 1
ENUM_VALUE_3 = ord('3') + 0
ENUM_VALUE_4 = ord('3') + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumChar.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumChar.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumChar.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumChar.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumChar.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumChar.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumChar.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumChar(value)
# Fast Binary Encoding EnumChar field model
class FieldModelEnumChar(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 1
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumChar()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumChar(self.read_uint8(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint8(self.fbe_offset, value)
# Fast Binary Encoding EnumChar final model
class FinalModelEnumChar(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 1
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumChar(), 0
return EnumChar(self.read_uint8(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint8(self.fbe_offset, value)
return self.fbe_size
class EnumWChar(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0x0444) + 0
ENUM_VALUE_2 = int(0x0444) + 1
ENUM_VALUE_3 = int(0x0555) + 0
ENUM_VALUE_4 = int(0x0555) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumWChar.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumWChar.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumWChar.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumWChar.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumWChar.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumWChar.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumWChar.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumWChar(value)
# Fast Binary Encoding EnumWChar field model
class FieldModelEnumWChar(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 4
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumWChar()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumWChar(self.read_uint32(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint32(self.fbe_offset, value)
# Fast Binary Encoding EnumWChar final model
class FinalModelEnumWChar(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 4
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumWChar(), 0
return EnumWChar(self.read_uint32(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint32(self.fbe_offset, value)
return self.fbe_size
class EnumInt8(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(-128) + 0
ENUM_VALUE_2 = int(-128) + 1
ENUM_VALUE_3 = int(126) + 0
ENUM_VALUE_4 = int(126) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumInt8.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumInt8.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumInt8.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumInt8.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumInt8.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumInt8.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumInt8.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumInt8(value)
# Fast Binary Encoding EnumInt8 field model
class FieldModelEnumInt8(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 1
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumInt8()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumInt8(self.read_int8(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_int8(self.fbe_offset, value)
# Fast Binary Encoding EnumInt8 final model
class FinalModelEnumInt8(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 1
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumInt8(), 0
return EnumInt8(self.read_int8(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_int8(self.fbe_offset, value)
return self.fbe_size
class EnumUInt8(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0) + 0
ENUM_VALUE_2 = int(0) + 1
ENUM_VALUE_3 = int(254) + 0
ENUM_VALUE_4 = int(254) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumUInt8.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumUInt8.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumUInt8.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumUInt8.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumUInt8.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumUInt8.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumUInt8.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumUInt8(value)
# Fast Binary Encoding EnumUInt8 field model
class FieldModelEnumUInt8(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 1
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumUInt8()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumUInt8(self.read_uint8(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint8(self.fbe_offset, value)
# Fast Binary Encoding EnumUInt8 final model
class FinalModelEnumUInt8(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 1
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumUInt8(), 0
return EnumUInt8(self.read_uint8(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint8(self.fbe_offset, value)
return self.fbe_size
class EnumInt16(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(-32768) + 0
ENUM_VALUE_2 = int(-32768) + 1
ENUM_VALUE_3 = int(32766) + 0
ENUM_VALUE_4 = int(32766) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumInt16.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumInt16.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumInt16.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumInt16.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumInt16.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumInt16.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumInt16.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumInt16(value)
# Fast Binary Encoding EnumInt16 field model
class FieldModelEnumInt16(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 2
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumInt16()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumInt16(self.read_int16(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_int16(self.fbe_offset, value)
# Fast Binary Encoding EnumInt16 final model
class FinalModelEnumInt16(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 2
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumInt16(), 0
return EnumInt16(self.read_int16(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_int16(self.fbe_offset, value)
return self.fbe_size
class EnumUInt16(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0) + 0
ENUM_VALUE_2 = int(0) + 1
ENUM_VALUE_3 = int(65534) + 0
ENUM_VALUE_4 = int(65534) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumUInt16.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumUInt16.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumUInt16.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumUInt16.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumUInt16.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumUInt16.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumUInt16.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumUInt16(value)
# Fast Binary Encoding EnumUInt16 field model
class FieldModelEnumUInt16(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 2
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumUInt16()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumUInt16(self.read_uint16(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint16(self.fbe_offset, value)
# Fast Binary Encoding EnumUInt16 final model
class FinalModelEnumUInt16(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 2
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumUInt16(), 0
return EnumUInt16(self.read_uint16(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint16(self.fbe_offset, value)
return self.fbe_size
class EnumInt32(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(-2147483648) + 0
ENUM_VALUE_2 = int(-2147483648) + 1
ENUM_VALUE_3 = int(2147483646) + 0
ENUM_VALUE_4 = int(2147483646) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumInt32.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumInt32.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumInt32.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumInt32.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumInt32.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumInt32.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumInt32.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumInt32(value)
# Fast Binary Encoding EnumInt32 field model
class FieldModelEnumInt32(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 4
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumInt32()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumInt32(self.read_int32(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_int32(self.fbe_offset, value)
# Fast Binary Encoding EnumInt32 final model
class FinalModelEnumInt32(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 4
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumInt32(), 0
return EnumInt32(self.read_int32(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_int32(self.fbe_offset, value)
return self.fbe_size
class EnumUInt32(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0) + 0
ENUM_VALUE_2 = int(0) + 1
ENUM_VALUE_3 = int(0xFFFFFFFE) + 0
ENUM_VALUE_4 = int(0xFFFFFFFE) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumUInt32.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumUInt32.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumUInt32.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumUInt32.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumUInt32.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumUInt32.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumUInt32.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumUInt32(value)
# Fast Binary Encoding EnumUInt32 field model
class FieldModelEnumUInt32(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 4
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumUInt32()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumUInt32(self.read_uint32(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint32(self.fbe_offset, value)
# Fast Binary Encoding EnumUInt32 final model
class FinalModelEnumUInt32(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 4
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumUInt32(), 0
return EnumUInt32(self.read_uint32(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint32(self.fbe_offset, value)
return self.fbe_size
class EnumInt64(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(-9223372036854775807) + 0
ENUM_VALUE_2 = int(-9223372036854775807) + 1
ENUM_VALUE_3 = int(9223372036854775806) + 0
ENUM_VALUE_4 = int(9223372036854775806) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumInt64.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumInt64.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumInt64.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumInt64.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumInt64.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumInt64.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumInt64.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumInt64(value)
# Fast Binary Encoding EnumInt64 field model
class FieldModelEnumInt64(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 8
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumInt64()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumInt64(self.read_int64(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_int64(self.fbe_offset, value)
# Fast Binary Encoding EnumInt64 final model
class FinalModelEnumInt64(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 8
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumInt64(), 0
return EnumInt64(self.read_int64(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_int64(self.fbe_offset, value)
return self.fbe_size
class EnumUInt64(enum.IntEnum, metaclass=fbe.DefaultEnumMeta):
ENUM_VALUE_0 = int(0) + 0
ENUM_VALUE_1 = int(0) + 0
ENUM_VALUE_2 = int(0) + 1
ENUM_VALUE_3 = int(0xFFFFFFFFFFFFFFFE) + 0
ENUM_VALUE_4 = int(0xFFFFFFFFFFFFFFFE) + 1
ENUM_VALUE_5 = ENUM_VALUE_3
unknown = ~0
__slots__ = ()
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
if self.value == EnumUInt64.ENUM_VALUE_0:
return "ENUM_VALUE_0"
if self.value == EnumUInt64.ENUM_VALUE_1:
return "ENUM_VALUE_1"
if self.value == EnumUInt64.ENUM_VALUE_2:
return "ENUM_VALUE_2"
if self.value == EnumUInt64.ENUM_VALUE_3:
return "ENUM_VALUE_3"
if self.value == EnumUInt64.ENUM_VALUE_4:
return "ENUM_VALUE_4"
if self.value == EnumUInt64.ENUM_VALUE_5:
return "ENUM_VALUE_5"
return "<unknown>"
@classmethod
def _missing_(cls, value):
return EnumUInt64.unknown
@staticmethod
def __from_json__(value):
if value is None:
return None
return EnumUInt64(value)
# Fast Binary Encoding EnumUInt64 field model
class FieldModelEnumUInt64(fbe.FieldModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the field size
@property
def fbe_size(self):
return 8
# Get the value
def get(self, defaults=None):
if defaults is None:
defaults = EnumUInt64()
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return defaults
return EnumUInt64(self.read_uint64(self.fbe_offset))
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return
self.write_uint64(self.fbe_offset, value)
# Fast Binary Encoding EnumUInt64 final model
class FinalModelEnumUInt64(fbe.FinalModel):
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
# Get the allocation size
# noinspection PyUnusedLocal
def fbe_allocation_size(self, value):
return self.fbe_size
# Get the final size
@property
def fbe_size(self):
return 8
# Check if the value is valid
def verify(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return sys.maxsize
return self.fbe_size
# Get the value
def get(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return EnumUInt64(), 0
return EnumUInt64(self.read_uint64(self.fbe_offset)), self.fbe_size
# Set the value
def set(self, value):
assert ((self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size), "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
self.write_uint64(self.fbe_offset, value)
return self.fbe_size
@functools.total_ordering
class Enums(object):
__slots__ = "byte0", "byte1", "byte2", "byte3", "byte4", "byte5", "char0", "char1", "char2", "char3", "char4", "char5", "wchar0", "wchar1", "wchar2", "wchar3", "wchar4", "wchar5", "int8b0", "int8b1", "int8b2", "int8b3", "int8b4", "int8b5", "uint8b0", "uint8b1", "uint8b2", "uint8b3", "uint8b4", "uint8b5", "int16b0", "int16b1", "int16b2", "int16b3", "int16b4", "int16b5", "uint16b0", "uint16b1", "uint16b2", "uint16b3", "uint16b4", "uint16b5", "int32b0", "int32b1", "int32b2", "int32b3", "int32b4", "int32b5", "uint32b0", "uint32b1", "uint32b2", "uint32b3", "uint32b4", "uint32b5", "int64b0", "int64b1", "int64b2", "int64b3", "int64b4", "int64b5", "uint64b0", "uint64b1", "uint64b2", "uint64b3", "uint64b4", "uint64b5",
def __init__(self, byte0=EnumByte.ENUM_VALUE_0, byte1=EnumByte.ENUM_VALUE_1, byte2=EnumByte.ENUM_VALUE_2, byte3=EnumByte.ENUM_VALUE_3, byte4=EnumByte.ENUM_VALUE_4, byte5=EnumByte.ENUM_VALUE_5, char0=EnumChar.ENUM_VALUE_0, char1=EnumChar.ENUM_VALUE_1, char2=EnumChar.ENUM_VALUE_2, char3=EnumChar.ENUM_VALUE_3, char4=EnumChar.ENUM_VALUE_4, char5=EnumChar.ENUM_VALUE_5, wchar0=EnumWChar.ENUM_VALUE_0, wchar1=EnumWChar.ENUM_VALUE_1, wchar2=EnumWChar.ENUM_VALUE_2, wchar3=EnumWChar.ENUM_VALUE_3, wchar4=EnumWChar.ENUM_VALUE_4, wchar5=EnumWChar.ENUM_VALUE_5, int8b0=EnumInt8.ENUM_VALUE_0, int8b1=EnumInt8.ENUM_VALUE_1, int8b2=EnumInt8.ENUM_VALUE_2, int8b3=EnumInt8.ENUM_VALUE_3, int8b4=EnumInt8.ENUM_VALUE_4, int8b5=EnumInt8.ENUM_VALUE_5, uint8b0=EnumUInt8.ENUM_VALUE_0, uint8b1=EnumUInt8.ENUM_VALUE_1, uint8b2=EnumUInt8.ENUM_VALUE_2, uint8b3=EnumUInt8.ENUM_VALUE_3, uint8b4=EnumUInt8.ENUM_VALUE_4, uint8b5=EnumUInt8.ENUM_VALUE_5, int16b0=EnumInt16.ENUM_VALUE_0, int16b1=EnumInt16.ENUM_VALUE_1, int16b2=EnumInt16.ENUM_VALUE_2, int16b3=EnumInt16.ENUM_VALUE_3, int16b4=EnumInt16.ENUM_VALUE_4, int16b5=EnumInt16.ENUM_VALUE_5, uint16b0=EnumUInt16.ENUM_VALUE_0, uint16b1=EnumUInt16.ENUM_VALUE_1, uint16b2=EnumUInt16.ENUM_VALUE_2, uint16b3=EnumUInt16.ENUM_VALUE_3, uint16b4=EnumUInt16.ENUM_VALUE_4, uint16b5=EnumUInt16.ENUM_VALUE_5, int32b0=EnumInt32.ENUM_VALUE_0, int32b1=EnumInt32.ENUM_VALUE_1, int32b2=EnumInt32.ENUM_VALUE_2, int32b3=EnumInt32.ENUM_VALUE_3, int32b4=EnumInt32.ENUM_VALUE_4, int32b5=EnumInt32.ENUM_VALUE_5, uint32b0=EnumUInt32.ENUM_VALUE_0, uint32b1=EnumUInt32.ENUM_VALUE_1, uint32b2=EnumUInt32.ENUM_VALUE_2, uint32b3=EnumUInt32.ENUM_VALUE_3, uint32b4=EnumUInt32.ENUM_VALUE_4, uint32b5=EnumUInt32.ENUM_VALUE_5, int64b0=EnumInt64.ENUM_VALUE_0, int64b1=EnumInt64.ENUM_VALUE_1, int64b2=EnumInt64.ENUM_VALUE_2, int64b3=EnumInt64.ENUM_VALUE_3, int64b4=EnumInt64.ENUM_VALUE_4, int64b5=EnumInt64.ENUM_VALUE_5, uint64b0=EnumUInt64.ENUM_VALUE_0, uint64b1=EnumUInt64.ENUM_VALUE_1, uint64b2=EnumUInt64.ENUM_VALUE_2, uint64b3=EnumUInt64.ENUM_VALUE_3, uint64b4=EnumUInt64.ENUM_VALUE_4, uint64b5=EnumUInt64.ENUM_VALUE_5):
self.byte0 = byte0
self.byte1 = byte1
self.byte2 = byte2
self.byte3 = byte3
self.byte4 = byte4
self.byte5 = byte5
self.char0 = char0
self.char1 = char1
self.char2 = char2
self.char3 = char3
self.char4 = char4
self.char5 = char5
self.wchar0 = wchar0
self.wchar1 = wchar1
self.wchar2 = wchar2
self.wchar3 = wchar3
self.wchar4 = wchar4
self.wchar5 = wchar5
self.int8b0 = int8b0
self.int8b1 = int8b1
self.int8b2 = int8b2
self.int8b3 = int8b3
self.int8b4 = int8b4
self.int8b5 = int8b5
self.uint8b0 = uint8b0
self.uint8b1 = uint8b1
self.uint8b2 = uint8b2
self.uint8b3 = uint8b3
self.uint8b4 = uint8b4
self.uint8b5 = uint8b5
self.int16b0 = int16b0
self.int16b1 = int16b1
self.int16b2 = int16b2
self.int16b3 = int16b3
self.int16b4 = int16b4
self.int16b5 = int16b5
self.uint16b0 = uint16b0
self.uint16b1 = uint16b1
self.uint16b2 = uint16b2
self.uint16b3 = uint16b3
self.uint16b4 = uint16b4
self.uint16b5 = uint16b5
self.int32b0 = int32b0
self.int32b1 = int32b1
self.int32b2 = int32b2
self.int32b3 = int32b3
self.int32b4 = int32b4
self.int32b5 = int32b5
self.uint32b0 = uint32b0
self.uint32b1 = uint32b1
self.uint32b2 = uint32b2
self.uint32b3 = uint32b3
self.uint32b4 = uint32b4
self.uint32b5 = uint32b5
self.int64b0 = int64b0
self.int64b1 = int64b1
self.int64b2 = int64b2
self.int64b3 = int64b3
self.int64b4 = int64b4
self.int64b5 = int64b5
self.uint64b0 = uint64b0
self.uint64b1 = uint64b1
self.uint64b2 = uint64b2
self.uint64b3 = uint64b3
self.uint64b4 = uint64b4
self.uint64b5 = uint64b5
# Struct shallow copy
def copy(self, other):
self.byte0 = other.byte0
self.byte1 = other.byte1
self.byte2 = other.byte2
self.byte3 = other.byte3
self.byte4 = other.byte4
self.byte5 = other.byte5
self.char0 = other.char0
self.char1 = other.char1
self.char2 = other.char2
self.char3 = other.char3
self.char4 = other.char4
self.char5 = other.char5
self.wchar0 = other.wchar0
self.wchar1 = other.wchar1
self.wchar2 = other.wchar2
self.wchar3 = other.wchar3
self.wchar4 = other.wchar4
self.wchar5 = other.wchar5
self.int8b0 = other.int8b0
self.int8b1 = other.int8b1
self.int8b2 = other.int8b2
self.int8b3 = other.int8b3
self.int8b4 = other.int8b4
self.int8b5 = other.int8b5
self.uint8b0 = other.uint8b0
self.uint8b1 = other.uint8b1
self.uint8b2 = other.uint8b2
self.uint8b3 = other.uint8b3
self.uint8b4 = other.uint8b4
self.uint8b5 = other.uint8b5
self.int16b0 = other.int16b0
self.int16b1 = other.int16b1
self.int16b2 = other.int16b2
self.int16b3 = other.int16b3
self.int16b4 = other.int16b4
self.int16b5 = other.int16b5
self.uint16b0 = other.uint16b0
self.uint16b1 = other.uint16b1
self.uint16b2 = other.uint16b2
self.uint16b3 = other.uint16b3
self.uint16b4 = other.uint16b4
self.uint16b5 = other.uint16b5
self.int32b0 = other.int32b0
self.int32b1 = other.int32b1
self.int32b2 = other.int32b2
self.int32b3 = other.int32b3
self.int32b4 = other.int32b4
self.int32b5 = other.int32b5
self.uint32b0 = other.uint32b0
self.uint32b1 = other.uint32b1
self.uint32b2 = other.uint32b2
self.uint32b3 = other.uint32b3
self.uint32b4 = other.uint32b4
self.uint32b5 = other.uint32b5
self.int64b0 = other.int64b0
self.int64b1 = other.int64b1
self.int64b2 = other.int64b2
self.int64b3 = other.int64b3
self.int64b4 = other.int64b4
self.int64b5 = other.int64b5
self.uint64b0 = other.uint64b0
self.uint64b1 = other.uint64b1
self.uint64b2 = other.uint64b2
self.uint64b3 = other.uint64b3
self.uint64b4 = other.uint64b4
self.uint64b5 = other.uint64b5
return self
# Struct deep clone
def clone(self):
# Serialize the struct to the FBE stream
writer = EnumsModel(fbe.WriteBuffer())
writer.serialize(self)
# Deserialize the struct from the FBE stream
reader = EnumsModel(fbe.ReadBuffer())
reader.attach_buffer(writer.buffer)
return reader.deserialize()[0]
def __eq__(self, other):
if not isinstance(self, other.__class__):
return NotImplemented
return True
def __lt__(self, other):
if not isinstance(self, other.__class__):
return NotImplemented
return False
@property
def __key__(self):
return ()
def __hash__(self):
return hash(self.__key__)
def __format__(self, format_spec):
return self.__str__()
def __str__(self):
sb = list()
sb.append("Enums(")
sb.append("byte0=")
sb.append(str(self.byte0))
sb.append(",byte1=")
sb.append(str(self.byte1))
sb.append(",byte2=")
sb.append(str(self.byte2))
sb.append(",byte3=")
sb.append(str(self.byte3))
sb.append(",byte4=")
sb.append(str(self.byte4))
sb.append(",byte5=")
sb.append(str(self.byte5))
sb.append(",char0=")
sb.append(str(self.char0))
sb.append(",char1=")
sb.append(str(self.char1))
sb.append(",char2=")
sb.append(str(self.char2))
sb.append(",char3=")
sb.append(str(self.char3))
sb.append(",char4=")
sb.append(str(self.char4))
sb.append(",char5=")
sb.append(str(self.char5))
sb.append(",wchar0=")
sb.append(str(self.wchar0))
sb.append(",wchar1=")
sb.append(str(self.wchar1))
sb.append(",wchar2=")
sb.append(str(self.wchar2))
sb.append(",wchar3=")
sb.append(str(self.wchar3))
sb.append(",wchar4=")
sb.append(str(self.wchar4))
sb.append(",wchar5=")
sb.append(str(self.wchar5))
sb.append(",int8b0=")
sb.append(str(self.int8b0))
sb.append(",int8b1=")
sb.append(str(self.int8b1))
sb.append(",int8b2=")
sb.append(str(self.int8b2))
sb.append(",int8b3=")
sb.append(str(self.int8b3))
sb.append(",int8b4=")
sb.append(str(self.int8b4))
sb.append(",int8b5=")
sb.append(str(self.int8b5))
sb.append(",uint8b0=")
sb.append(str(self.uint8b0))
sb.append(",uint8b1=")
sb.append(str(self.uint8b1))
sb.append(",uint8b2=")
sb.append(str(self.uint8b2))
sb.append(",uint8b3=")
sb.append(str(self.uint8b3))
sb.append(",uint8b4=")
sb.append(str(self.uint8b4))
sb.append(",uint8b5=")
sb.append(str(self.uint8b5))
sb.append(",int16b0=")
sb.append(str(self.int16b0))
sb.append(",int16b1=")
sb.append(str(self.int16b1))
sb.append(",int16b2=")
sb.append(str(self.int16b2))
sb.append(",int16b3=")
sb.append(str(self.int16b3))
sb.append(",int16b4=")
sb.append(str(self.int16b4))
sb.append(",int16b5=")
sb.append(str(self.int16b5))
sb.append(",uint16b0=")
sb.append(str(self.uint16b0))
sb.append(",uint16b1=")
sb.append(str(self.uint16b1))
sb.append(",uint16b2=")
sb.append(str(self.uint16b2))
sb.append(",uint16b3=")
sb.append(str(self.uint16b3))
sb.append(",uint16b4=")
sb.append(str(self.uint16b4))
sb.append(",uint16b5=")
sb.append(str(self.uint16b5))
sb.append(",int32b0=")
sb.append(str(self.int32b0))
sb.append(",int32b1=")
sb.append(str(self.int32b1))
sb.append(",int32b2=")
sb.append(str(self.int32b2))
sb.append(",int32b3=")
sb.append(str(self.int32b3))
sb.append(",int32b4=")
sb.append(str(self.int32b4))
sb.append(",int32b5=")
sb.append(str(self.int32b5))
sb.append(",uint32b0=")
sb.append(str(self.uint32b0))
sb.append(",uint32b1=")
sb.append(str(self.uint32b1))
sb.append(",uint32b2=")
sb.append(str(self.uint32b2))
sb.append(",uint32b3=")
sb.append(str(self.uint32b3))
sb.append(",uint32b4=")
sb.append(str(self.uint32b4))
sb.append(",uint32b5=")
sb.append(str(self.uint32b5))
sb.append(",int64b0=")
sb.append(str(self.int64b0))
sb.append(",int64b1=")
sb.append(str(self.int64b1))
sb.append(",int64b2=")
sb.append(str(self.int64b2))
sb.append(",int64b3=")
sb.append(str(self.int64b3))
sb.append(",int64b4=")
sb.append(str(self.int64b4))
sb.append(",int64b5=")
sb.append(str(self.int64b5))
sb.append(",uint64b0=")
sb.append(str(self.uint64b0))
sb.append(",uint64b1=")
sb.append(str(self.uint64b1))
sb.append(",uint64b2=")
sb.append(str(self.uint64b2))
sb.append(",uint64b3=")
sb.append(str(self.uint64b3))
sb.append(",uint64b4=")
sb.append(str(self.uint64b4))
sb.append(",uint64b5=")
sb.append(str(self.uint64b5))
sb.append(")")
return "".join(sb)
# Get struct JSON value
def to_json(self):
return json.dumps(self.__to_json__(), cls=fbe.JSONEncoder, separators=(',', ':'))
def __to_json__(self):
result = dict()
result.update(dict(
byte0=self.byte0,
byte1=self.byte1,
byte2=self.byte2,
byte3=self.byte3,
byte4=self.byte4,
byte5=self.byte5,
char0=self.char0,
char1=self.char1,
char2=self.char2,
char3=self.char3,
char4=self.char4,
char5=self.char5,
wchar0=self.wchar0,
wchar1=self.wchar1,
wchar2=self.wchar2,
wchar3=self.wchar3,
wchar4=self.wchar4,
wchar5=self.wchar5,
int8b0=self.int8b0,
int8b1=self.int8b1,
int8b2=self.int8b2,
int8b3=self.int8b3,
int8b4=self.int8b4,
int8b5=self.int8b5,
uint8b0=self.uint8b0,
uint8b1=self.uint8b1,
uint8b2=self.uint8b2,
uint8b3=self.uint8b3,
uint8b4=self.uint8b4,
uint8b5=self.uint8b5,
int16b0=self.int16b0,
int16b1=self.int16b1,
int16b2=self.int16b2,
int16b3=self.int16b3,
int16b4=self.int16b4,
int16b5=self.int16b5,
uint16b0=self.uint16b0,
uint16b1=self.uint16b1,
uint16b2=self.uint16b2,
uint16b3=self.uint16b3,
uint16b4=self.uint16b4,
uint16b5=self.uint16b5,
int32b0=self.int32b0,
int32b1=self.int32b1,
int32b2=self.int32b2,
int32b3=self.int32b3,
int32b4=self.int32b4,
int32b5=self.int32b5,
uint32b0=self.uint32b0,
uint32b1=self.uint32b1,
uint32b2=self.uint32b2,
uint32b3=self.uint32b3,
uint32b4=self.uint32b4,
uint32b5=self.uint32b5,
int64b0=self.int64b0,
int64b1=self.int64b1,
int64b2=self.int64b2,
int64b3=self.int64b3,
int64b4=self.int64b4,
int64b5=self.int64b5,
uint64b0=self.uint64b0,
uint64b1=self.uint64b1,
uint64b2=self.uint64b2,
uint64b3=self.uint64b3,
uint64b4=self.uint64b4,
uint64b5=self.uint64b5,
))
return result
# Create struct from JSON value
@staticmethod
def from_json(document):
return Enums.__from_json__(json.loads(document))
@staticmethod
def __from_json__(fields):
if fields is None:
return None
return Enums(
None if "byte0" not in fields else EnumByte.__from_json__(fields["byte0"]),
None if "byte1" not in fields else EnumByte.__from_json__(fields["byte1"]),
None if "byte2" not in fields else EnumByte.__from_json__(fields["byte2"]),
None if "byte3" not in fields else EnumByte.__from_json__(fields["byte3"]),
None if "byte4" not in fields else EnumByte.__from_json__(fields["byte4"]),
None if "byte5" not in fields else EnumByte.__from_json__(fields["byte5"]),
None if "char0" not in fields else EnumChar.__from_json__(fields["char0"]),
None if "char1" not in fields else EnumChar.__from_json__(fields["char1"]),
None if "char2" not in fields else EnumChar.__from_json__(fields["char2"]),
None if "char3" not in fields else EnumChar.__from_json__(fields["char3"]),
None if "char4" not in fields else EnumChar.__from_json__(fields["char4"]),
None if "char5" not in fields else EnumChar.__from_json__(fields["char5"]),
None if "wchar0" not in fields else EnumWChar.__from_json__(fields["wchar0"]),
None if "wchar1" not in fields else EnumWChar.__from_json__(fields["wchar1"]),
None if "wchar2" not in fields else EnumWChar.__from_json__(fields["wchar2"]),
None if "wchar3" not in fields else EnumWChar.__from_json__(fields["wchar3"]),
None if "wchar4" not in fields else EnumWChar.__from_json__(fields["wchar4"]),
None if "wchar5" not in fields else EnumWChar.__from_json__(fields["wchar5"]),
None if "int8b0" not in fields else EnumInt8.__from_json__(fields["int8b0"]),
None if "int8b1" not in fields else EnumInt8.__from_json__(fields["int8b1"]),
None if "int8b2" not in fields else EnumInt8.__from_json__(fields["int8b2"]),
None if "int8b3" not in fields else EnumInt8.__from_json__(fields["int8b3"]),
None if "int8b4" not in fields else EnumInt8.__from_json__(fields["int8b4"]),
None if "int8b5" not in fields else EnumInt8.__from_json__(fields["int8b5"]),
None if "uint8b0" not in fields else EnumUInt8.__from_json__(fields["uint8b0"]),
None if "uint8b1" not in fields else EnumUInt8.__from_json__(fields["uint8b1"]),
None if "uint8b2" not in fields else EnumUInt8.__from_json__(fields["uint8b2"]),
None if "uint8b3" not in fields else EnumUInt8.__from_json__(fields["uint8b3"]),
None if "uint8b4" not in fields else EnumUInt8.__from_json__(fields["uint8b4"]),
None if "uint8b5" not in fields else EnumUInt8.__from_json__(fields["uint8b5"]),
None if "int16b0" not in fields else EnumInt16.__from_json__(fields["int16b0"]),
None if "int16b1" not in fields else EnumInt16.__from_json__(fields["int16b1"]),
None if "int16b2" not in fields else EnumInt16.__from_json__(fields["int16b2"]),
None if "int16b3" not in fields else EnumInt16.__from_json__(fields["int16b3"]),
None if "int16b4" not in fields else EnumInt16.__from_json__(fields["int16b4"]),
None if "int16b5" not in fields else EnumInt16.__from_json__(fields["int16b5"]),
None if "uint16b0" not in fields else EnumUInt16.__from_json__(fields["uint16b0"]),
None if "uint16b1" not in fields else EnumUInt16.__from_json__(fields["uint16b1"]),
None if "uint16b2" not in fields else EnumUInt16.__from_json__(fields["uint16b2"]),
None if "uint16b3" not in fields else EnumUInt16.__from_json__(fields["uint16b3"]),
None if "uint16b4" not in fields else EnumUInt16.__from_json__(fields["uint16b4"]),
None if "uint16b5" not in fields else EnumUInt16.__from_json__(fields["uint16b5"]),
None if "int32b0" not in fields else EnumInt32.__from_json__(fields["int32b0"]),
None if "int32b1" not in fields else EnumInt32.__from_json__(fields["int32b1"]),
None if "int32b2" not in fields else EnumInt32.__from_json__(fields["int32b2"]),
None if "int32b3" not in fields else EnumInt32.__from_json__(fields["int32b3"]),
None if "int32b4" not in fields else EnumInt32.__from_json__(fields["int32b4"]),
None if "int32b5" not in fields else EnumInt32.__from_json__(fields["int32b5"]),
None if "uint32b0" not in fields else EnumUInt32.__from_json__(fields["uint32b0"]),
None if "uint32b1" not in fields else EnumUInt32.__from_json__(fields["uint32b1"]),
None if "uint32b2" not in fields else EnumUInt32.__from_json__(fields["uint32b2"]),
None if "uint32b3" not in fields else EnumUInt32.__from_json__(fields["uint32b3"]),
None if "uint32b4" not in fields else EnumUInt32.__from_json__(fields["uint32b4"]),
None if "uint32b5" not in fields else EnumUInt32.__from_json__(fields["uint32b5"]),
None if "int64b0" not in fields else EnumInt64.__from_json__(fields["int64b0"]),
None if "int64b1" not in fields else EnumInt64.__from_json__(fields["int64b1"]),
None if "int64b2" not in fields else EnumInt64.__from_json__(fields["int64b2"]),
None if "int64b3" not in fields else EnumInt64.__from_json__(fields["int64b3"]),
None if "int64b4" not in fields else EnumInt64.__from_json__(fields["int64b4"]),
None if "int64b5" not in fields else EnumInt64.__from_json__(fields["int64b5"]),
None if "uint64b0" not in fields else EnumUInt64.__from_json__(fields["uint64b0"]),
None if "uint64b1" not in fields else EnumUInt64.__from_json__(fields["uint64b1"]),
None if "uint64b2" not in fields else EnumUInt64.__from_json__(fields["uint64b2"]),
None if "uint64b3" not in fields else EnumUInt64.__from_json__(fields["uint64b3"]),
None if "uint64b4" not in fields else EnumUInt64.__from_json__(fields["uint64b4"]),
None if "uint64b5" not in fields else EnumUInt64.__from_json__(fields["uint64b5"]),
)
# Get the FBE type
@property
def fbe_type(self):
return self.TYPE
TYPE = 1
class FieldModelEnums(fbe.FieldModel):
__slots__ = "_byte0", "_byte1", "_byte2", "_byte3", "_byte4", "_byte5", "_char0", "_char1", "_char2", "_char3", "_char4", "_char5", "_wchar0", "_wchar1", "_wchar2", "_wchar3", "_wchar4", "_wchar5", "_int8b0", "_int8b1", "_int8b2", "_int8b3", "_int8b4", "_int8b5", "_uint8b0", "_uint8b1", "_uint8b2", "_uint8b3", "_uint8b4", "_uint8b5", "_int16b0", "_int16b1", "_int16b2", "_int16b3", "_int16b4", "_int16b5", "_uint16b0", "_uint16b1", "_uint16b2", "_uint16b3", "_uint16b4", "_uint16b5", "_int32b0", "_int32b1", "_int32b2", "_int32b3", "_int32b4", "_int32b5", "_uint32b0", "_uint32b1", "_uint32b2", "_uint32b3", "_uint32b4", "_uint32b5", "_int64b0", "_int64b1", "_int64b2", "_int64b3", "_int64b4", "_int64b5", "_uint64b0", "_uint64b1", "_uint64b2", "_uint64b3", "_uint64b4", "_uint64b5",
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
self._byte0 = FieldModelEnumByte(buffer, 4 + 4)
self._byte1 = FieldModelEnumByte(buffer, self._byte0.fbe_offset + self._byte0.fbe_size)
self._byte2 = FieldModelEnumByte(buffer, self._byte1.fbe_offset + self._byte1.fbe_size)
self._byte3 = FieldModelEnumByte(buffer, self._byte2.fbe_offset + self._byte2.fbe_size)
self._byte4 = FieldModelEnumByte(buffer, self._byte3.fbe_offset + self._byte3.fbe_size)
self._byte5 = FieldModelEnumByte(buffer, self._byte4.fbe_offset + self._byte4.fbe_size)
self._char0 = FieldModelEnumChar(buffer, self._byte5.fbe_offset + self._byte5.fbe_size)
self._char1 = FieldModelEnumChar(buffer, self._char0.fbe_offset + self._char0.fbe_size)
self._char2 = FieldModelEnumChar(buffer, self._char1.fbe_offset + self._char1.fbe_size)
self._char3 = FieldModelEnumChar(buffer, self._char2.fbe_offset + self._char2.fbe_size)
self._char4 = FieldModelEnumChar(buffer, self._char3.fbe_offset + self._char3.fbe_size)
self._char5 = FieldModelEnumChar(buffer, self._char4.fbe_offset + self._char4.fbe_size)
self._wchar0 = FieldModelEnumWChar(buffer, self._char5.fbe_offset + self._char5.fbe_size)
self._wchar1 = FieldModelEnumWChar(buffer, self._wchar0.fbe_offset + self._wchar0.fbe_size)
self._wchar2 = FieldModelEnumWChar(buffer, self._wchar1.fbe_offset + self._wchar1.fbe_size)
self._wchar3 = FieldModelEnumWChar(buffer, self._wchar2.fbe_offset + self._wchar2.fbe_size)
self._wchar4 = FieldModelEnumWChar(buffer, self._wchar3.fbe_offset + self._wchar3.fbe_size)
self._wchar5 = FieldModelEnumWChar(buffer, self._wchar4.fbe_offset + self._wchar4.fbe_size)
self._int8b0 = FieldModelEnumInt8(buffer, self._wchar5.fbe_offset + self._wchar5.fbe_size)
self._int8b1 = FieldModelEnumInt8(buffer, self._int8b0.fbe_offset + self._int8b0.fbe_size)
self._int8b2 = FieldModelEnumInt8(buffer, self._int8b1.fbe_offset + self._int8b1.fbe_size)
self._int8b3 = FieldModelEnumInt8(buffer, self._int8b2.fbe_offset + self._int8b2.fbe_size)
self._int8b4 = FieldModelEnumInt8(buffer, self._int8b3.fbe_offset + self._int8b3.fbe_size)
self._int8b5 = FieldModelEnumInt8(buffer, self._int8b4.fbe_offset + self._int8b4.fbe_size)
self._uint8b0 = FieldModelEnumUInt8(buffer, self._int8b5.fbe_offset + self._int8b5.fbe_size)
self._uint8b1 = FieldModelEnumUInt8(buffer, self._uint8b0.fbe_offset + self._uint8b0.fbe_size)
self._uint8b2 = FieldModelEnumUInt8(buffer, self._uint8b1.fbe_offset + self._uint8b1.fbe_size)
self._uint8b3 = FieldModelEnumUInt8(buffer, self._uint8b2.fbe_offset + self._uint8b2.fbe_size)
self._uint8b4 = FieldModelEnumUInt8(buffer, self._uint8b3.fbe_offset + self._uint8b3.fbe_size)
self._uint8b5 = FieldModelEnumUInt8(buffer, self._uint8b4.fbe_offset + self._uint8b4.fbe_size)
self._int16b0 = FieldModelEnumInt16(buffer, self._uint8b5.fbe_offset + self._uint8b5.fbe_size)
self._int16b1 = FieldModelEnumInt16(buffer, self._int16b0.fbe_offset + self._int16b0.fbe_size)
self._int16b2 = FieldModelEnumInt16(buffer, self._int16b1.fbe_offset + self._int16b1.fbe_size)
self._int16b3 = FieldModelEnumInt16(buffer, self._int16b2.fbe_offset + self._int16b2.fbe_size)
self._int16b4 = FieldModelEnumInt16(buffer, self._int16b3.fbe_offset + self._int16b3.fbe_size)
self._int16b5 = FieldModelEnumInt16(buffer, self._int16b4.fbe_offset + self._int16b4.fbe_size)
self._uint16b0 = FieldModelEnumUInt16(buffer, self._int16b5.fbe_offset + self._int16b5.fbe_size)
self._uint16b1 = FieldModelEnumUInt16(buffer, self._uint16b0.fbe_offset + self._uint16b0.fbe_size)
self._uint16b2 = FieldModelEnumUInt16(buffer, self._uint16b1.fbe_offset + self._uint16b1.fbe_size)
self._uint16b3 = FieldModelEnumUInt16(buffer, self._uint16b2.fbe_offset + self._uint16b2.fbe_size)
self._uint16b4 = FieldModelEnumUInt16(buffer, self._uint16b3.fbe_offset + self._uint16b3.fbe_size)
self._uint16b5 = FieldModelEnumUInt16(buffer, self._uint16b4.fbe_offset + self._uint16b4.fbe_size)
self._int32b0 = FieldModelEnumInt32(buffer, self._uint16b5.fbe_offset + self._uint16b5.fbe_size)
self._int32b1 = FieldModelEnumInt32(buffer, self._int32b0.fbe_offset + self._int32b0.fbe_size)
self._int32b2 = FieldModelEnumInt32(buffer, self._int32b1.fbe_offset + self._int32b1.fbe_size)
self._int32b3 = FieldModelEnumInt32(buffer, self._int32b2.fbe_offset + self._int32b2.fbe_size)
self._int32b4 = FieldModelEnumInt32(buffer, self._int32b3.fbe_offset + self._int32b3.fbe_size)
self._int32b5 = FieldModelEnumInt32(buffer, self._int32b4.fbe_offset + self._int32b4.fbe_size)
self._uint32b0 = FieldModelEnumUInt32(buffer, self._int32b5.fbe_offset + self._int32b5.fbe_size)
self._uint32b1 = FieldModelEnumUInt32(buffer, self._uint32b0.fbe_offset + self._uint32b0.fbe_size)
self._uint32b2 = FieldModelEnumUInt32(buffer, self._uint32b1.fbe_offset + self._uint32b1.fbe_size)
self._uint32b3 = FieldModelEnumUInt32(buffer, self._uint32b2.fbe_offset + self._uint32b2.fbe_size)
self._uint32b4 = FieldModelEnumUInt32(buffer, self._uint32b3.fbe_offset + self._uint32b3.fbe_size)
self._uint32b5 = FieldModelEnumUInt32(buffer, self._uint32b4.fbe_offset + self._uint32b4.fbe_size)
self._int64b0 = FieldModelEnumInt64(buffer, self._uint32b5.fbe_offset + self._uint32b5.fbe_size)
self._int64b1 = FieldModelEnumInt64(buffer, self._int64b0.fbe_offset + self._int64b0.fbe_size)
self._int64b2 = FieldModelEnumInt64(buffer, self._int64b1.fbe_offset + self._int64b1.fbe_size)
self._int64b3 = FieldModelEnumInt64(buffer, self._int64b2.fbe_offset + self._int64b2.fbe_size)
self._int64b4 = FieldModelEnumInt64(buffer, self._int64b3.fbe_offset + self._int64b3.fbe_size)
self._int64b5 = FieldModelEnumInt64(buffer, self._int64b4.fbe_offset + self._int64b4.fbe_size)
self._uint64b0 = FieldModelEnumUInt64(buffer, self._int64b5.fbe_offset + self._int64b5.fbe_size)
self._uint64b1 = FieldModelEnumUInt64(buffer, self._uint64b0.fbe_offset + self._uint64b0.fbe_size)
self._uint64b2 = FieldModelEnumUInt64(buffer, self._uint64b1.fbe_offset + self._uint64b1.fbe_size)
self._uint64b3 = FieldModelEnumUInt64(buffer, self._uint64b2.fbe_offset + self._uint64b2.fbe_size)
self._uint64b4 = FieldModelEnumUInt64(buffer, self._uint64b3.fbe_offset + self._uint64b3.fbe_size)
self._uint64b5 = FieldModelEnumUInt64(buffer, self._uint64b4.fbe_offset + self._uint64b4.fbe_size)
@property
def byte0(self):
return self._byte0
@property
def byte1(self):
return self._byte1
@property
def byte2(self):
return self._byte2
@property
def byte3(self):
return self._byte3
@property
def byte4(self):
return self._byte4
@property
def byte5(self):
return self._byte5
@property
def char0(self):
return self._char0
@property
def char1(self):
return self._char1
@property
def char2(self):
return self._char2
@property
def char3(self):
return self._char3
@property
def char4(self):
return self._char4
@property
def char5(self):
return self._char5
@property
def wchar0(self):
return self._wchar0
@property
def wchar1(self):
return self._wchar1
@property
def wchar2(self):
return self._wchar2
@property
def wchar3(self):
return self._wchar3
@property
def wchar4(self):
return self._wchar4
@property
def wchar5(self):
return self._wchar5
@property
def int8b0(self):
return self._int8b0
@property
def int8b1(self):
return self._int8b1
@property
def int8b2(self):
return self._int8b2
@property
def int8b3(self):
return self._int8b3
@property
def int8b4(self):
return self._int8b4
@property
def int8b5(self):
return self._int8b5
@property
def uint8b0(self):
return self._uint8b0
@property
def uint8b1(self):
return self._uint8b1
@property
def uint8b2(self):
return self._uint8b2
@property
def uint8b3(self):
return self._uint8b3
@property
def uint8b4(self):
return self._uint8b4
@property
def uint8b5(self):
return self._uint8b5
@property
def int16b0(self):
return self._int16b0
@property
def int16b1(self):
return self._int16b1
@property
def int16b2(self):
return self._int16b2
@property
def int16b3(self):
return self._int16b3
@property
def int16b4(self):
return self._int16b4
@property
def int16b5(self):
return self._int16b5
@property
def uint16b0(self):
return self._uint16b0
@property
def uint16b1(self):
return self._uint16b1
@property
def uint16b2(self):
return self._uint16b2
@property
def uint16b3(self):
return self._uint16b3
@property
def uint16b4(self):
return self._uint16b4
@property
def uint16b5(self):
return self._uint16b5
@property
def int32b0(self):
return self._int32b0
@property
def int32b1(self):
return self._int32b1
@property
def int32b2(self):
return self._int32b2
@property
def int32b3(self):
return self._int32b3
@property
def int32b4(self):
return self._int32b4
@property
def int32b5(self):
return self._int32b5
@property
def uint32b0(self):
return self._uint32b0
@property
def uint32b1(self):
return self._uint32b1
@property
def uint32b2(self):
return self._uint32b2
@property
def uint32b3(self):
return self._uint32b3
@property
def uint32b4(self):
return self._uint32b4
@property
def uint32b5(self):
return self._uint32b5
@property
def int64b0(self):
return self._int64b0
@property
def int64b1(self):
return self._int64b1
@property
def int64b2(self):
return self._int64b2
@property
def int64b3(self):
return self._int64b3
@property
def int64b4(self):
return self._int64b4
@property
def int64b5(self):
return self._int64b5
@property
def uint64b0(self):
return self._uint64b0
@property
def uint64b1(self):
return self._uint64b1
@property
def uint64b2(self):
return self._uint64b2
@property
def uint64b3(self):
return self._uint64b3
@property
def uint64b4(self):
return self._uint64b4
@property
def uint64b5(self):
return self._uint64b5
# Get the field size
@property
def fbe_size(self):
return 4
# Get the field body size
@property
def fbe_body(self):
fbe_result = 4 + 4 \
+ self.byte0.fbe_size \
+ self.byte1.fbe_size \
+ self.byte2.fbe_size \
+ self.byte3.fbe_size \
+ self.byte4.fbe_size \
+ self.byte5.fbe_size \
+ self.char0.fbe_size \
+ self.char1.fbe_size \
+ self.char2.fbe_size \
+ self.char3.fbe_size \
+ self.char4.fbe_size \
+ self.char5.fbe_size \
+ self.wchar0.fbe_size \
+ self.wchar1.fbe_size \
+ self.wchar2.fbe_size \
+ self.wchar3.fbe_size \
+ self.wchar4.fbe_size \
+ self.wchar5.fbe_size \
+ self.int8b0.fbe_size \
+ self.int8b1.fbe_size \
+ self.int8b2.fbe_size \
+ self.int8b3.fbe_size \
+ self.int8b4.fbe_size \
+ self.int8b5.fbe_size \
+ self.uint8b0.fbe_size \
+ self.uint8b1.fbe_size \
+ self.uint8b2.fbe_size \
+ self.uint8b3.fbe_size \
+ self.uint8b4.fbe_size \
+ self.uint8b5.fbe_size \
+ self.int16b0.fbe_size \
+ self.int16b1.fbe_size \
+ self.int16b2.fbe_size \
+ self.int16b3.fbe_size \
+ self.int16b4.fbe_size \
+ self.int16b5.fbe_size \
+ self.uint16b0.fbe_size \
+ self.uint16b1.fbe_size \
+ self.uint16b2.fbe_size \
+ self.uint16b3.fbe_size \
+ self.uint16b4.fbe_size \
+ self.uint16b5.fbe_size \
+ self.int32b0.fbe_size \
+ self.int32b1.fbe_size \
+ self.int32b2.fbe_size \
+ self.int32b3.fbe_size \
+ self.int32b4.fbe_size \
+ self.int32b5.fbe_size \
+ self.uint32b0.fbe_size \
+ self.uint32b1.fbe_size \
+ self.uint32b2.fbe_size \
+ self.uint32b3.fbe_size \
+ self.uint32b4.fbe_size \
+ self.uint32b5.fbe_size \
+ self.int64b0.fbe_size \
+ self.int64b1.fbe_size \
+ self.int64b2.fbe_size \
+ self.int64b3.fbe_size \
+ self.int64b4.fbe_size \
+ self.int64b5.fbe_size \
+ self.uint64b0.fbe_size \
+ self.uint64b1.fbe_size \
+ self.uint64b2.fbe_size \
+ self.uint64b3.fbe_size \
+ self.uint64b4.fbe_size \
+ self.uint64b5.fbe_size \
return fbe_result
# Get the field extra size
@property
def fbe_extra(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
fbe_struct_offset = self.read_uint32(self.fbe_offset)
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4) > self._buffer.size):
return 0
self._buffer.shift(fbe_struct_offset)
fbe_result = self.fbe_body \
+ self.byte0.fbe_extra \
+ self.byte1.fbe_extra \
+ self.byte2.fbe_extra \
+ self.byte3.fbe_extra \
+ self.byte4.fbe_extra \
+ self.byte5.fbe_extra \
+ self.char0.fbe_extra \
+ self.char1.fbe_extra \
+ self.char2.fbe_extra \
+ self.char3.fbe_extra \
+ self.char4.fbe_extra \
+ self.char5.fbe_extra \
+ self.wchar0.fbe_extra \
+ self.wchar1.fbe_extra \
+ self.wchar2.fbe_extra \
+ self.wchar3.fbe_extra \
+ self.wchar4.fbe_extra \
+ self.wchar5.fbe_extra \
+ self.int8b0.fbe_extra \
+ self.int8b1.fbe_extra \
+ self.int8b2.fbe_extra \
+ self.int8b3.fbe_extra \
+ self.int8b4.fbe_extra \
+ self.int8b5.fbe_extra \
+ self.uint8b0.fbe_extra \
+ self.uint8b1.fbe_extra \
+ self.uint8b2.fbe_extra \
+ self.uint8b3.fbe_extra \
+ self.uint8b4.fbe_extra \
+ self.uint8b5.fbe_extra \
+ self.int16b0.fbe_extra \
+ self.int16b1.fbe_extra \
+ self.int16b2.fbe_extra \
+ self.int16b3.fbe_extra \
+ self.int16b4.fbe_extra \
+ self.int16b5.fbe_extra \
+ self.uint16b0.fbe_extra \
+ self.uint16b1.fbe_extra \
+ self.uint16b2.fbe_extra \
+ self.uint16b3.fbe_extra \
+ self.uint16b4.fbe_extra \
+ self.uint16b5.fbe_extra \
+ self.int32b0.fbe_extra \
+ self.int32b1.fbe_extra \
+ self.int32b2.fbe_extra \
+ self.int32b3.fbe_extra \
+ self.int32b4.fbe_extra \
+ self.int32b5.fbe_extra \
+ self.uint32b0.fbe_extra \
+ self.uint32b1.fbe_extra \
+ self.uint32b2.fbe_extra \
+ self.uint32b3.fbe_extra \
+ self.uint32b4.fbe_extra \
+ self.uint32b5.fbe_extra \
+ self.int64b0.fbe_extra \
+ self.int64b1.fbe_extra \
+ self.int64b2.fbe_extra \
+ self.int64b3.fbe_extra \
+ self.int64b4.fbe_extra \
+ self.int64b5.fbe_extra \
+ self.uint64b0.fbe_extra \
+ self.uint64b1.fbe_extra \
+ self.uint64b2.fbe_extra \
+ self.uint64b3.fbe_extra \
+ self.uint64b4.fbe_extra \
+ self.uint64b5.fbe_extra \
self._buffer.unshift(fbe_struct_offset)
return fbe_result
# Get the field type
@property
def fbe_type(self):
return self.TYPE
TYPE = 1
# Check if the struct value is valid
def verify(self, fbe_verify_type=True):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return True
fbe_struct_offset = self.read_uint32(self.fbe_offset)
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4 + 4) > self._buffer.size):
return False
fbe_struct_size = self.read_uint32(fbe_struct_offset)
if fbe_struct_size < (4 + 4):
return False
fbe_struct_type = self.read_uint32(fbe_struct_offset + 4)
if fbe_verify_type and (fbe_struct_type != self.fbe_type):
return False
self._buffer.shift(fbe_struct_offset)
fbe_result = self.verify_fields(fbe_struct_size)
self._buffer.unshift(fbe_struct_offset)
return fbe_result
# Check if the struct fields are valid
def verify_fields(self, fbe_struct_size):
fbe_current_size = 4 + 4
if (fbe_current_size + self.byte0.fbe_size) > fbe_struct_size:
return True
if not self.byte0.verify():
return False
fbe_current_size += self.byte0.fbe_size
if (fbe_current_size + self.byte1.fbe_size) > fbe_struct_size:
return True
if not self.byte1.verify():
return False
fbe_current_size += self.byte1.fbe_size
if (fbe_current_size + self.byte2.fbe_size) > fbe_struct_size:
return True
if not self.byte2.verify():
return False
fbe_current_size += self.byte2.fbe_size
if (fbe_current_size + self.byte3.fbe_size) > fbe_struct_size:
return True
if not self.byte3.verify():
return False
fbe_current_size += self.byte3.fbe_size
if (fbe_current_size + self.byte4.fbe_size) > fbe_struct_size:
return True
if not self.byte4.verify():
return False
fbe_current_size += self.byte4.fbe_size
if (fbe_current_size + self.byte5.fbe_size) > fbe_struct_size:
return True
if not self.byte5.verify():
return False
fbe_current_size += self.byte5.fbe_size
if (fbe_current_size + self.char0.fbe_size) > fbe_struct_size:
return True
if not self.char0.verify():
return False
fbe_current_size += self.char0.fbe_size
if (fbe_current_size + self.char1.fbe_size) > fbe_struct_size:
return True
if not self.char1.verify():
return False
fbe_current_size += self.char1.fbe_size
if (fbe_current_size + self.char2.fbe_size) > fbe_struct_size:
return True
if not self.char2.verify():
return False
fbe_current_size += self.char2.fbe_size
if (fbe_current_size + self.char3.fbe_size) > fbe_struct_size:
return True
if not self.char3.verify():
return False
fbe_current_size += self.char3.fbe_size
if (fbe_current_size + self.char4.fbe_size) > fbe_struct_size:
return True
if not self.char4.verify():
return False
fbe_current_size += self.char4.fbe_size
if (fbe_current_size + self.char5.fbe_size) > fbe_struct_size:
return True
if not self.char5.verify():
return False
fbe_current_size += self.char5.fbe_size
if (fbe_current_size + self.wchar0.fbe_size) > fbe_struct_size:
return True
if not self.wchar0.verify():
return False
fbe_current_size += self.wchar0.fbe_size
if (fbe_current_size + self.wchar1.fbe_size) > fbe_struct_size:
return True
if not self.wchar1.verify():
return False
fbe_current_size += self.wchar1.fbe_size
if (fbe_current_size + self.wchar2.fbe_size) > fbe_struct_size:
return True
if not self.wchar2.verify():
return False
fbe_current_size += self.wchar2.fbe_size
if (fbe_current_size + self.wchar3.fbe_size) > fbe_struct_size:
return True
if not self.wchar3.verify():
return False
fbe_current_size += self.wchar3.fbe_size
if (fbe_current_size + self.wchar4.fbe_size) > fbe_struct_size:
return True
if not self.wchar4.verify():
return False
fbe_current_size += self.wchar4.fbe_size
if (fbe_current_size + self.wchar5.fbe_size) > fbe_struct_size:
return True
if not self.wchar5.verify():
return False
fbe_current_size += self.wchar5.fbe_size
if (fbe_current_size + self.int8b0.fbe_size) > fbe_struct_size:
return True
if not self.int8b0.verify():
return False
fbe_current_size += self.int8b0.fbe_size
if (fbe_current_size + self.int8b1.fbe_size) > fbe_struct_size:
return True
if not self.int8b1.verify():
return False
fbe_current_size += self.int8b1.fbe_size
if (fbe_current_size + self.int8b2.fbe_size) > fbe_struct_size:
return True
if not self.int8b2.verify():
return False
fbe_current_size += self.int8b2.fbe_size
if (fbe_current_size + self.int8b3.fbe_size) > fbe_struct_size:
return True
if not self.int8b3.verify():
return False
fbe_current_size += self.int8b3.fbe_size
if (fbe_current_size + self.int8b4.fbe_size) > fbe_struct_size:
return True
if not self.int8b4.verify():
return False
fbe_current_size += self.int8b4.fbe_size
if (fbe_current_size + self.int8b5.fbe_size) > fbe_struct_size:
return True
if not self.int8b5.verify():
return False
fbe_current_size += self.int8b5.fbe_size
if (fbe_current_size + self.uint8b0.fbe_size) > fbe_struct_size:
return True
if not self.uint8b0.verify():
return False
fbe_current_size += self.uint8b0.fbe_size
if (fbe_current_size + self.uint8b1.fbe_size) > fbe_struct_size:
return True
if not self.uint8b1.verify():
return False
fbe_current_size += self.uint8b1.fbe_size
if (fbe_current_size + self.uint8b2.fbe_size) > fbe_struct_size:
return True
if not self.uint8b2.verify():
return False
fbe_current_size += self.uint8b2.fbe_size
if (fbe_current_size + self.uint8b3.fbe_size) > fbe_struct_size:
return True
if not self.uint8b3.verify():
return False
fbe_current_size += self.uint8b3.fbe_size
if (fbe_current_size + self.uint8b4.fbe_size) > fbe_struct_size:
return True
if not self.uint8b4.verify():
return False
fbe_current_size += self.uint8b4.fbe_size
if (fbe_current_size + self.uint8b5.fbe_size) > fbe_struct_size:
return True
if not self.uint8b5.verify():
return False
fbe_current_size += self.uint8b5.fbe_size
if (fbe_current_size + self.int16b0.fbe_size) > fbe_struct_size:
return True
if not self.int16b0.verify():
return False
fbe_current_size += self.int16b0.fbe_size
if (fbe_current_size + self.int16b1.fbe_size) > fbe_struct_size:
return True
if not self.int16b1.verify():
return False
fbe_current_size += self.int16b1.fbe_size
if (fbe_current_size + self.int16b2.fbe_size) > fbe_struct_size:
return True
if not self.int16b2.verify():
return False
fbe_current_size += self.int16b2.fbe_size
if (fbe_current_size + self.int16b3.fbe_size) > fbe_struct_size:
return True
if not self.int16b3.verify():
return False
fbe_current_size += self.int16b3.fbe_size
if (fbe_current_size + self.int16b4.fbe_size) > fbe_struct_size:
return True
if not self.int16b4.verify():
return False
fbe_current_size += self.int16b4.fbe_size
if (fbe_current_size + self.int16b5.fbe_size) > fbe_struct_size:
return True
if not self.int16b5.verify():
return False
fbe_current_size += self.int16b5.fbe_size
if (fbe_current_size + self.uint16b0.fbe_size) > fbe_struct_size:
return True
if not self.uint16b0.verify():
return False
fbe_current_size += self.uint16b0.fbe_size
if (fbe_current_size + self.uint16b1.fbe_size) > fbe_struct_size:
return True
if not self.uint16b1.verify():
return False
fbe_current_size += self.uint16b1.fbe_size
if (fbe_current_size + self.uint16b2.fbe_size) > fbe_struct_size:
return True
if not self.uint16b2.verify():
return False
fbe_current_size += self.uint16b2.fbe_size
if (fbe_current_size + self.uint16b3.fbe_size) > fbe_struct_size:
return True
if not self.uint16b3.verify():
return False
fbe_current_size += self.uint16b3.fbe_size
if (fbe_current_size + self.uint16b4.fbe_size) > fbe_struct_size:
return True
if not self.uint16b4.verify():
return False
fbe_current_size += self.uint16b4.fbe_size
if (fbe_current_size + self.uint16b5.fbe_size) > fbe_struct_size:
return True
if not self.uint16b5.verify():
return False
fbe_current_size += self.uint16b5.fbe_size
if (fbe_current_size + self.int32b0.fbe_size) > fbe_struct_size:
return True
if not self.int32b0.verify():
return False
fbe_current_size += self.int32b0.fbe_size
if (fbe_current_size + self.int32b1.fbe_size) > fbe_struct_size:
return True
if not self.int32b1.verify():
return False
fbe_current_size += self.int32b1.fbe_size
if (fbe_current_size + self.int32b2.fbe_size) > fbe_struct_size:
return True
if not self.int32b2.verify():
return False
fbe_current_size += self.int32b2.fbe_size
if (fbe_current_size + self.int32b3.fbe_size) > fbe_struct_size:
return True
if not self.int32b3.verify():
return False
fbe_current_size += self.int32b3.fbe_size
if (fbe_current_size + self.int32b4.fbe_size) > fbe_struct_size:
return True
if not self.int32b4.verify():
return False
fbe_current_size += self.int32b4.fbe_size
if (fbe_current_size + self.int32b5.fbe_size) > fbe_struct_size:
return True
if not self.int32b5.verify():
return False
fbe_current_size += self.int32b5.fbe_size
if (fbe_current_size + self.uint32b0.fbe_size) > fbe_struct_size:
return True
if not self.uint32b0.verify():
return False
fbe_current_size += self.uint32b0.fbe_size
if (fbe_current_size + self.uint32b1.fbe_size) > fbe_struct_size:
return True
if not self.uint32b1.verify():
return False
fbe_current_size += self.uint32b1.fbe_size
if (fbe_current_size + self.uint32b2.fbe_size) > fbe_struct_size:
return True
if not self.uint32b2.verify():
return False
fbe_current_size += self.uint32b2.fbe_size
if (fbe_current_size + self.uint32b3.fbe_size) > fbe_struct_size:
return True
if not self.uint32b3.verify():
return False
fbe_current_size += self.uint32b3.fbe_size
if (fbe_current_size + self.uint32b4.fbe_size) > fbe_struct_size:
return True
if not self.uint32b4.verify():
return False
fbe_current_size += self.uint32b4.fbe_size
if (fbe_current_size + self.uint32b5.fbe_size) > fbe_struct_size:
return True
if not self.uint32b5.verify():
return False
fbe_current_size += self.uint32b5.fbe_size
if (fbe_current_size + self.int64b0.fbe_size) > fbe_struct_size:
return True
if not self.int64b0.verify():
return False
fbe_current_size += self.int64b0.fbe_size
if (fbe_current_size + self.int64b1.fbe_size) > fbe_struct_size:
return True
if not self.int64b1.verify():
return False
fbe_current_size += self.int64b1.fbe_size
if (fbe_current_size + self.int64b2.fbe_size) > fbe_struct_size:
return True
if not self.int64b2.verify():
return False
fbe_current_size += self.int64b2.fbe_size
if (fbe_current_size + self.int64b3.fbe_size) > fbe_struct_size:
return True
if not self.int64b3.verify():
return False
fbe_current_size += self.int64b3.fbe_size
if (fbe_current_size + self.int64b4.fbe_size) > fbe_struct_size:
return True
if not self.int64b4.verify():
return False
fbe_current_size += self.int64b4.fbe_size
if (fbe_current_size + self.int64b5.fbe_size) > fbe_struct_size:
return True
if not self.int64b5.verify():
return False
fbe_current_size += self.int64b5.fbe_size
if (fbe_current_size + self.uint64b0.fbe_size) > fbe_struct_size:
return True
if not self.uint64b0.verify():
return False
fbe_current_size += self.uint64b0.fbe_size
if (fbe_current_size + self.uint64b1.fbe_size) > fbe_struct_size:
return True
if not self.uint64b1.verify():
return False
fbe_current_size += self.uint64b1.fbe_size
if (fbe_current_size + self.uint64b2.fbe_size) > fbe_struct_size:
return True
if not self.uint64b2.verify():
return False
fbe_current_size += self.uint64b2.fbe_size
if (fbe_current_size + self.uint64b3.fbe_size) > fbe_struct_size:
return True
if not self.uint64b3.verify():
return False
fbe_current_size += self.uint64b3.fbe_size
if (fbe_current_size + self.uint64b4.fbe_size) > fbe_struct_size:
return True
if not self.uint64b4.verify():
return False
fbe_current_size += self.uint64b4.fbe_size
if (fbe_current_size + self.uint64b5.fbe_size) > fbe_struct_size:
return True
if not self.uint64b5.verify():
return False
fbe_current_size += self.uint64b5.fbe_size
return True
# Get the struct value (begin phase)
def get_begin(self):
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
fbe_struct_offset = self.read_uint32(self.fbe_offset)
assert (fbe_struct_offset > 0) and ((self._buffer.offset + fbe_struct_offset + 4 + 4) <= self._buffer.size), "Model is broken!"
if (fbe_struct_offset == 0) or ((self._buffer.offset + fbe_struct_offset + 4 + 4) > self._buffer.size):
return 0
fbe_struct_size = self.read_uint32(fbe_struct_offset)
assert (fbe_struct_size >= (4 + 4)), "Model is broken!"
if fbe_struct_size < (4 + 4):
return 0
self._buffer.shift(fbe_struct_offset)
return fbe_struct_offset
# Get the struct value (end phase)
def get_end(self, fbe_begin):
self._buffer.unshift(fbe_begin)
# Get the struct value
def get(self, fbe_value=None):
if fbe_value is None:
fbe_value = Enums()
fbe_begin = self.get_begin()
if fbe_begin == 0:
return fbe_value
fbe_struct_size = self.read_uint32(0)
self.get_fields(fbe_value, fbe_struct_size)
self.get_end(fbe_begin)
return fbe_value
# Get the struct fields values
def get_fields(self, fbe_value, fbe_struct_size):
fbe_current_size = 4 + 4
if (fbe_current_size + self.byte0.fbe_size) <= fbe_struct_size:
fbe_value.byte0 = self.byte0.get(EnumByte.ENUM_VALUE_0)
else:
fbe_value.byte0 = EnumByte.ENUM_VALUE_0
fbe_current_size += self.byte0.fbe_size
if (fbe_current_size + self.byte1.fbe_size) <= fbe_struct_size:
fbe_value.byte1 = self.byte1.get(EnumByte.ENUM_VALUE_1)
else:
fbe_value.byte1 = EnumByte.ENUM_VALUE_1
fbe_current_size += self.byte1.fbe_size
if (fbe_current_size + self.byte2.fbe_size) <= fbe_struct_size:
fbe_value.byte2 = self.byte2.get(EnumByte.ENUM_VALUE_2)
else:
fbe_value.byte2 = EnumByte.ENUM_VALUE_2
fbe_current_size += self.byte2.fbe_size
if (fbe_current_size + self.byte3.fbe_size) <= fbe_struct_size:
fbe_value.byte3 = self.byte3.get(EnumByte.ENUM_VALUE_3)
else:
fbe_value.byte3 = EnumByte.ENUM_VALUE_3
fbe_current_size += self.byte3.fbe_size
if (fbe_current_size + self.byte4.fbe_size) <= fbe_struct_size:
fbe_value.byte4 = self.byte4.get(EnumByte.ENUM_VALUE_4)
else:
fbe_value.byte4 = EnumByte.ENUM_VALUE_4
fbe_current_size += self.byte4.fbe_size
if (fbe_current_size + self.byte5.fbe_size) <= fbe_struct_size:
fbe_value.byte5 = self.byte5.get(EnumByte.ENUM_VALUE_5)
else:
fbe_value.byte5 = EnumByte.ENUM_VALUE_5
fbe_current_size += self.byte5.fbe_size
if (fbe_current_size + self.char0.fbe_size) <= fbe_struct_size:
fbe_value.char0 = self.char0.get(EnumChar.ENUM_VALUE_0)
else:
fbe_value.char0 = EnumChar.ENUM_VALUE_0
fbe_current_size += self.char0.fbe_size
if (fbe_current_size + self.char1.fbe_size) <= fbe_struct_size:
fbe_value.char1 = self.char1.get(EnumChar.ENUM_VALUE_1)
else:
fbe_value.char1 = EnumChar.ENUM_VALUE_1
fbe_current_size += self.char1.fbe_size
if (fbe_current_size + self.char2.fbe_size) <= fbe_struct_size:
fbe_value.char2 = self.char2.get(EnumChar.ENUM_VALUE_2)
else:
fbe_value.char2 = EnumChar.ENUM_VALUE_2
fbe_current_size += self.char2.fbe_size
if (fbe_current_size + self.char3.fbe_size) <= fbe_struct_size:
fbe_value.char3 = self.char3.get(EnumChar.ENUM_VALUE_3)
else:
fbe_value.char3 = EnumChar.ENUM_VALUE_3
fbe_current_size += self.char3.fbe_size
if (fbe_current_size + self.char4.fbe_size) <= fbe_struct_size:
fbe_value.char4 = self.char4.get(EnumChar.ENUM_VALUE_4)
else:
fbe_value.char4 = EnumChar.ENUM_VALUE_4
fbe_current_size += self.char4.fbe_size
if (fbe_current_size + self.char5.fbe_size) <= fbe_struct_size:
fbe_value.char5 = self.char5.get(EnumChar.ENUM_VALUE_5)
else:
fbe_value.char5 = EnumChar.ENUM_VALUE_5
fbe_current_size += self.char5.fbe_size
if (fbe_current_size + self.wchar0.fbe_size) <= fbe_struct_size:
fbe_value.wchar0 = self.wchar0.get(EnumWChar.ENUM_VALUE_0)
else:
fbe_value.wchar0 = EnumWChar.ENUM_VALUE_0
fbe_current_size += self.wchar0.fbe_size
if (fbe_current_size + self.wchar1.fbe_size) <= fbe_struct_size:
fbe_value.wchar1 = self.wchar1.get(EnumWChar.ENUM_VALUE_1)
else:
fbe_value.wchar1 = EnumWChar.ENUM_VALUE_1
fbe_current_size += self.wchar1.fbe_size
if (fbe_current_size + self.wchar2.fbe_size) <= fbe_struct_size:
fbe_value.wchar2 = self.wchar2.get(EnumWChar.ENUM_VALUE_2)
else:
fbe_value.wchar2 = EnumWChar.ENUM_VALUE_2
fbe_current_size += self.wchar2.fbe_size
if (fbe_current_size + self.wchar3.fbe_size) <= fbe_struct_size:
fbe_value.wchar3 = self.wchar3.get(EnumWChar.ENUM_VALUE_3)
else:
fbe_value.wchar3 = EnumWChar.ENUM_VALUE_3
fbe_current_size += self.wchar3.fbe_size
if (fbe_current_size + self.wchar4.fbe_size) <= fbe_struct_size:
fbe_value.wchar4 = self.wchar4.get(EnumWChar.ENUM_VALUE_4)
else:
fbe_value.wchar4 = EnumWChar.ENUM_VALUE_4
fbe_current_size += self.wchar4.fbe_size
if (fbe_current_size + self.wchar5.fbe_size) <= fbe_struct_size:
fbe_value.wchar5 = self.wchar5.get(EnumWChar.ENUM_VALUE_5)
else:
fbe_value.wchar5 = EnumWChar.ENUM_VALUE_5
fbe_current_size += self.wchar5.fbe_size
if (fbe_current_size + self.int8b0.fbe_size) <= fbe_struct_size:
fbe_value.int8b0 = self.int8b0.get(EnumInt8.ENUM_VALUE_0)
else:
fbe_value.int8b0 = EnumInt8.ENUM_VALUE_0
fbe_current_size += self.int8b0.fbe_size
if (fbe_current_size + self.int8b1.fbe_size) <= fbe_struct_size:
fbe_value.int8b1 = self.int8b1.get(EnumInt8.ENUM_VALUE_1)
else:
fbe_value.int8b1 = EnumInt8.ENUM_VALUE_1
fbe_current_size += self.int8b1.fbe_size
if (fbe_current_size + self.int8b2.fbe_size) <= fbe_struct_size:
fbe_value.int8b2 = self.int8b2.get(EnumInt8.ENUM_VALUE_2)
else:
fbe_value.int8b2 = EnumInt8.ENUM_VALUE_2
fbe_current_size += self.int8b2.fbe_size
if (fbe_current_size + self.int8b3.fbe_size) <= fbe_struct_size:
fbe_value.int8b3 = self.int8b3.get(EnumInt8.ENUM_VALUE_3)
else:
fbe_value.int8b3 = EnumInt8.ENUM_VALUE_3
fbe_current_size += self.int8b3.fbe_size
if (fbe_current_size + self.int8b4.fbe_size) <= fbe_struct_size:
fbe_value.int8b4 = self.int8b4.get(EnumInt8.ENUM_VALUE_4)
else:
fbe_value.int8b4 = EnumInt8.ENUM_VALUE_4
fbe_current_size += self.int8b4.fbe_size
if (fbe_current_size + self.int8b5.fbe_size) <= fbe_struct_size:
fbe_value.int8b5 = self.int8b5.get(EnumInt8.ENUM_VALUE_5)
else:
fbe_value.int8b5 = EnumInt8.ENUM_VALUE_5
fbe_current_size += self.int8b5.fbe_size
if (fbe_current_size + self.uint8b0.fbe_size) <= fbe_struct_size:
fbe_value.uint8b0 = self.uint8b0.get(EnumUInt8.ENUM_VALUE_0)
else:
fbe_value.uint8b0 = EnumUInt8.ENUM_VALUE_0
fbe_current_size += self.uint8b0.fbe_size
if (fbe_current_size + self.uint8b1.fbe_size) <= fbe_struct_size:
fbe_value.uint8b1 = self.uint8b1.get(EnumUInt8.ENUM_VALUE_1)
else:
fbe_value.uint8b1 = EnumUInt8.ENUM_VALUE_1
fbe_current_size += self.uint8b1.fbe_size
if (fbe_current_size + self.uint8b2.fbe_size) <= fbe_struct_size:
fbe_value.uint8b2 = self.uint8b2.get(EnumUInt8.ENUM_VALUE_2)
else:
fbe_value.uint8b2 = EnumUInt8.ENUM_VALUE_2
fbe_current_size += self.uint8b2.fbe_size
if (fbe_current_size + self.uint8b3.fbe_size) <= fbe_struct_size:
fbe_value.uint8b3 = self.uint8b3.get(EnumUInt8.ENUM_VALUE_3)
else:
fbe_value.uint8b3 = EnumUInt8.ENUM_VALUE_3
fbe_current_size += self.uint8b3.fbe_size
if (fbe_current_size + self.uint8b4.fbe_size) <= fbe_struct_size:
fbe_value.uint8b4 = self.uint8b4.get(EnumUInt8.ENUM_VALUE_4)
else:
fbe_value.uint8b4 = EnumUInt8.ENUM_VALUE_4
fbe_current_size += self.uint8b4.fbe_size
if (fbe_current_size + self.uint8b5.fbe_size) <= fbe_struct_size:
fbe_value.uint8b5 = self.uint8b5.get(EnumUInt8.ENUM_VALUE_5)
else:
fbe_value.uint8b5 = EnumUInt8.ENUM_VALUE_5
fbe_current_size += self.uint8b5.fbe_size
if (fbe_current_size + self.int16b0.fbe_size) <= fbe_struct_size:
fbe_value.int16b0 = self.int16b0.get(EnumInt16.ENUM_VALUE_0)
else:
fbe_value.int16b0 = EnumInt16.ENUM_VALUE_0
fbe_current_size += self.int16b0.fbe_size
if (fbe_current_size + self.int16b1.fbe_size) <= fbe_struct_size:
fbe_value.int16b1 = self.int16b1.get(EnumInt16.ENUM_VALUE_1)
else:
fbe_value.int16b1 = EnumInt16.ENUM_VALUE_1
fbe_current_size += self.int16b1.fbe_size
if (fbe_current_size + self.int16b2.fbe_size) <= fbe_struct_size:
fbe_value.int16b2 = self.int16b2.get(EnumInt16.ENUM_VALUE_2)
else:
fbe_value.int16b2 = EnumInt16.ENUM_VALUE_2
fbe_current_size += self.int16b2.fbe_size
if (fbe_current_size + self.int16b3.fbe_size) <= fbe_struct_size:
fbe_value.int16b3 = self.int16b3.get(EnumInt16.ENUM_VALUE_3)
else:
fbe_value.int16b3 = EnumInt16.ENUM_VALUE_3
fbe_current_size += self.int16b3.fbe_size
if (fbe_current_size + self.int16b4.fbe_size) <= fbe_struct_size:
fbe_value.int16b4 = self.int16b4.get(EnumInt16.ENUM_VALUE_4)
else:
fbe_value.int16b4 = EnumInt16.ENUM_VALUE_4
fbe_current_size += self.int16b4.fbe_size
if (fbe_current_size + self.int16b5.fbe_size) <= fbe_struct_size:
fbe_value.int16b5 = self.int16b5.get(EnumInt16.ENUM_VALUE_5)
else:
fbe_value.int16b5 = EnumInt16.ENUM_VALUE_5
fbe_current_size += self.int16b5.fbe_size
if (fbe_current_size + self.uint16b0.fbe_size) <= fbe_struct_size:
fbe_value.uint16b0 = self.uint16b0.get(EnumUInt16.ENUM_VALUE_0)
else:
fbe_value.uint16b0 = EnumUInt16.ENUM_VALUE_0
fbe_current_size += self.uint16b0.fbe_size
if (fbe_current_size + self.uint16b1.fbe_size) <= fbe_struct_size:
fbe_value.uint16b1 = self.uint16b1.get(EnumUInt16.ENUM_VALUE_1)
else:
fbe_value.uint16b1 = EnumUInt16.ENUM_VALUE_1
fbe_current_size += self.uint16b1.fbe_size
if (fbe_current_size + self.uint16b2.fbe_size) <= fbe_struct_size:
fbe_value.uint16b2 = self.uint16b2.get(EnumUInt16.ENUM_VALUE_2)
else:
fbe_value.uint16b2 = EnumUInt16.ENUM_VALUE_2
fbe_current_size += self.uint16b2.fbe_size
if (fbe_current_size + self.uint16b3.fbe_size) <= fbe_struct_size:
fbe_value.uint16b3 = self.uint16b3.get(EnumUInt16.ENUM_VALUE_3)
else:
fbe_value.uint16b3 = EnumUInt16.ENUM_VALUE_3
fbe_current_size += self.uint16b3.fbe_size
if (fbe_current_size + self.uint16b4.fbe_size) <= fbe_struct_size:
fbe_value.uint16b4 = self.uint16b4.get(EnumUInt16.ENUM_VALUE_4)
else:
fbe_value.uint16b4 = EnumUInt16.ENUM_VALUE_4
fbe_current_size += self.uint16b4.fbe_size
if (fbe_current_size + self.uint16b5.fbe_size) <= fbe_struct_size:
fbe_value.uint16b5 = self.uint16b5.get(EnumUInt16.ENUM_VALUE_5)
else:
fbe_value.uint16b5 = EnumUInt16.ENUM_VALUE_5
fbe_current_size += self.uint16b5.fbe_size
if (fbe_current_size + self.int32b0.fbe_size) <= fbe_struct_size:
fbe_value.int32b0 = self.int32b0.get(EnumInt32.ENUM_VALUE_0)
else:
fbe_value.int32b0 = EnumInt32.ENUM_VALUE_0
fbe_current_size += self.int32b0.fbe_size
if (fbe_current_size + self.int32b1.fbe_size) <= fbe_struct_size:
fbe_value.int32b1 = self.int32b1.get(EnumInt32.ENUM_VALUE_1)
else:
fbe_value.int32b1 = EnumInt32.ENUM_VALUE_1
fbe_current_size += self.int32b1.fbe_size
if (fbe_current_size + self.int32b2.fbe_size) <= fbe_struct_size:
fbe_value.int32b2 = self.int32b2.get(EnumInt32.ENUM_VALUE_2)
else:
fbe_value.int32b2 = EnumInt32.ENUM_VALUE_2
fbe_current_size += self.int32b2.fbe_size
if (fbe_current_size + self.int32b3.fbe_size) <= fbe_struct_size:
fbe_value.int32b3 = self.int32b3.get(EnumInt32.ENUM_VALUE_3)
else:
fbe_value.int32b3 = EnumInt32.ENUM_VALUE_3
fbe_current_size += self.int32b3.fbe_size
if (fbe_current_size + self.int32b4.fbe_size) <= fbe_struct_size:
fbe_value.int32b4 = self.int32b4.get(EnumInt32.ENUM_VALUE_4)
else:
fbe_value.int32b4 = EnumInt32.ENUM_VALUE_4
fbe_current_size += self.int32b4.fbe_size
if (fbe_current_size + self.int32b5.fbe_size) <= fbe_struct_size:
fbe_value.int32b5 = self.int32b5.get(EnumInt32.ENUM_VALUE_5)
else:
fbe_value.int32b5 = EnumInt32.ENUM_VALUE_5
fbe_current_size += self.int32b5.fbe_size
if (fbe_current_size + self.uint32b0.fbe_size) <= fbe_struct_size:
fbe_value.uint32b0 = self.uint32b0.get(EnumUInt32.ENUM_VALUE_0)
else:
fbe_value.uint32b0 = EnumUInt32.ENUM_VALUE_0
fbe_current_size += self.uint32b0.fbe_size
if (fbe_current_size + self.uint32b1.fbe_size) <= fbe_struct_size:
fbe_value.uint32b1 = self.uint32b1.get(EnumUInt32.ENUM_VALUE_1)
else:
fbe_value.uint32b1 = EnumUInt32.ENUM_VALUE_1
fbe_current_size += self.uint32b1.fbe_size
if (fbe_current_size + self.uint32b2.fbe_size) <= fbe_struct_size:
fbe_value.uint32b2 = self.uint32b2.get(EnumUInt32.ENUM_VALUE_2)
else:
fbe_value.uint32b2 = EnumUInt32.ENUM_VALUE_2
fbe_current_size += self.uint32b2.fbe_size
if (fbe_current_size + self.uint32b3.fbe_size) <= fbe_struct_size:
fbe_value.uint32b3 = self.uint32b3.get(EnumUInt32.ENUM_VALUE_3)
else:
fbe_value.uint32b3 = EnumUInt32.ENUM_VALUE_3
fbe_current_size += self.uint32b3.fbe_size
if (fbe_current_size + self.uint32b4.fbe_size) <= fbe_struct_size:
fbe_value.uint32b4 = self.uint32b4.get(EnumUInt32.ENUM_VALUE_4)
else:
fbe_value.uint32b4 = EnumUInt32.ENUM_VALUE_4
fbe_current_size += self.uint32b4.fbe_size
if (fbe_current_size + self.uint32b5.fbe_size) <= fbe_struct_size:
fbe_value.uint32b5 = self.uint32b5.get(EnumUInt32.ENUM_VALUE_5)
else:
fbe_value.uint32b5 = EnumUInt32.ENUM_VALUE_5
fbe_current_size += self.uint32b5.fbe_size
if (fbe_current_size + self.int64b0.fbe_size) <= fbe_struct_size:
fbe_value.int64b0 = self.int64b0.get(EnumInt64.ENUM_VALUE_0)
else:
fbe_value.int64b0 = EnumInt64.ENUM_VALUE_0
fbe_current_size += self.int64b0.fbe_size
if (fbe_current_size + self.int64b1.fbe_size) <= fbe_struct_size:
fbe_value.int64b1 = self.int64b1.get(EnumInt64.ENUM_VALUE_1)
else:
fbe_value.int64b1 = EnumInt64.ENUM_VALUE_1
fbe_current_size += self.int64b1.fbe_size
if (fbe_current_size + self.int64b2.fbe_size) <= fbe_struct_size:
fbe_value.int64b2 = self.int64b2.get(EnumInt64.ENUM_VALUE_2)
else:
fbe_value.int64b2 = EnumInt64.ENUM_VALUE_2
fbe_current_size += self.int64b2.fbe_size
if (fbe_current_size + self.int64b3.fbe_size) <= fbe_struct_size:
fbe_value.int64b3 = self.int64b3.get(EnumInt64.ENUM_VALUE_3)
else:
fbe_value.int64b3 = EnumInt64.ENUM_VALUE_3
fbe_current_size += self.int64b3.fbe_size
if (fbe_current_size + self.int64b4.fbe_size) <= fbe_struct_size:
fbe_value.int64b4 = self.int64b4.get(EnumInt64.ENUM_VALUE_4)
else:
fbe_value.int64b4 = EnumInt64.ENUM_VALUE_4
fbe_current_size += self.int64b4.fbe_size
if (fbe_current_size + self.int64b5.fbe_size) <= fbe_struct_size:
fbe_value.int64b5 = self.int64b5.get(EnumInt64.ENUM_VALUE_5)
else:
fbe_value.int64b5 = EnumInt64.ENUM_VALUE_5
fbe_current_size += self.int64b5.fbe_size
if (fbe_current_size + self.uint64b0.fbe_size) <= fbe_struct_size:
fbe_value.uint64b0 = self.uint64b0.get(EnumUInt64.ENUM_VALUE_0)
else:
fbe_value.uint64b0 = EnumUInt64.ENUM_VALUE_0
fbe_current_size += self.uint64b0.fbe_size
if (fbe_current_size + self.uint64b1.fbe_size) <= fbe_struct_size:
fbe_value.uint64b1 = self.uint64b1.get(EnumUInt64.ENUM_VALUE_1)
else:
fbe_value.uint64b1 = EnumUInt64.ENUM_VALUE_1
fbe_current_size += self.uint64b1.fbe_size
if (fbe_current_size + self.uint64b2.fbe_size) <= fbe_struct_size:
fbe_value.uint64b2 = self.uint64b2.get(EnumUInt64.ENUM_VALUE_2)
else:
fbe_value.uint64b2 = EnumUInt64.ENUM_VALUE_2
fbe_current_size += self.uint64b2.fbe_size
if (fbe_current_size + self.uint64b3.fbe_size) <= fbe_struct_size:
fbe_value.uint64b3 = self.uint64b3.get(EnumUInt64.ENUM_VALUE_3)
else:
fbe_value.uint64b3 = EnumUInt64.ENUM_VALUE_3
fbe_current_size += self.uint64b3.fbe_size
if (fbe_current_size + self.uint64b4.fbe_size) <= fbe_struct_size:
fbe_value.uint64b4 = self.uint64b4.get(EnumUInt64.ENUM_VALUE_4)
else:
fbe_value.uint64b4 = EnumUInt64.ENUM_VALUE_4
fbe_current_size += self.uint64b4.fbe_size
if (fbe_current_size + self.uint64b5.fbe_size) <= fbe_struct_size:
fbe_value.uint64b5 = self.uint64b5.get(EnumUInt64.ENUM_VALUE_5)
else:
fbe_value.uint64b5 = EnumUInt64.ENUM_VALUE_5
fbe_current_size += self.uint64b5.fbe_size
# Set the struct value (begin phase)
def set_begin(self):
assert (self._buffer.offset + self.fbe_offset + self.fbe_size) <= self._buffer.size, "Model is broken!"
if (self._buffer.offset + self.fbe_offset + self.fbe_size) > self._buffer.size:
return 0
fbe_struct_size = self.fbe_body
fbe_struct_offset = self._buffer.allocate(fbe_struct_size) - self._buffer.offset
assert (fbe_struct_offset > 0) and ((self._buffer.offset + fbe_struct_offset + fbe_struct_size) <= self._buffer.size), "Model is broken!"
if (fbe_struct_offset <= 0) or ((self._buffer.offset + fbe_struct_offset + fbe_struct_size) > self._buffer.size):
return 0
self.write_uint32(self.fbe_offset, fbe_struct_offset)
self.write_uint32(fbe_struct_offset, fbe_struct_size)
self.write_uint32(fbe_struct_offset + 4, self.fbe_type)
self._buffer.shift(fbe_struct_offset)
return fbe_struct_offset
# Set the struct value (end phase)
def set_end(self, fbe_begin):
self._buffer.unshift(fbe_begin)
# Set the struct value
def set(self, fbe_value):
fbe_begin = self.set_begin()
if fbe_begin == 0:
return
self.set_fields(fbe_value)
self.set_end(fbe_begin)
# Set the struct fields values
def set_fields(self, fbe_value):
self.byte0.set(fbe_value.byte0)
self.byte1.set(fbe_value.byte1)
self.byte2.set(fbe_value.byte2)
self.byte3.set(fbe_value.byte3)
self.byte4.set(fbe_value.byte4)
self.byte5.set(fbe_value.byte5)
self.char0.set(fbe_value.char0)
self.char1.set(fbe_value.char1)
self.char2.set(fbe_value.char2)
self.char3.set(fbe_value.char3)
self.char4.set(fbe_value.char4)
self.char5.set(fbe_value.char5)
self.wchar0.set(fbe_value.wchar0)
self.wchar1.set(fbe_value.wchar1)
self.wchar2.set(fbe_value.wchar2)
self.wchar3.set(fbe_value.wchar3)
self.wchar4.set(fbe_value.wchar4)
self.wchar5.set(fbe_value.wchar5)
self.int8b0.set(fbe_value.int8b0)
self.int8b1.set(fbe_value.int8b1)
self.int8b2.set(fbe_value.int8b2)
self.int8b3.set(fbe_value.int8b3)
self.int8b4.set(fbe_value.int8b4)
self.int8b5.set(fbe_value.int8b5)
self.uint8b0.set(fbe_value.uint8b0)
self.uint8b1.set(fbe_value.uint8b1)
self.uint8b2.set(fbe_value.uint8b2)
self.uint8b3.set(fbe_value.uint8b3)
self.uint8b4.set(fbe_value.uint8b4)
self.uint8b5.set(fbe_value.uint8b5)
self.int16b0.set(fbe_value.int16b0)
self.int16b1.set(fbe_value.int16b1)
self.int16b2.set(fbe_value.int16b2)
self.int16b3.set(fbe_value.int16b3)
self.int16b4.set(fbe_value.int16b4)
self.int16b5.set(fbe_value.int16b5)
self.uint16b0.set(fbe_value.uint16b0)
self.uint16b1.set(fbe_value.uint16b1)
self.uint16b2.set(fbe_value.uint16b2)
self.uint16b3.set(fbe_value.uint16b3)
self.uint16b4.set(fbe_value.uint16b4)
self.uint16b5.set(fbe_value.uint16b5)
self.int32b0.set(fbe_value.int32b0)
self.int32b1.set(fbe_value.int32b1)
self.int32b2.set(fbe_value.int32b2)
self.int32b3.set(fbe_value.int32b3)
self.int32b4.set(fbe_value.int32b4)
self.int32b5.set(fbe_value.int32b5)
self.uint32b0.set(fbe_value.uint32b0)
self.uint32b1.set(fbe_value.uint32b1)
self.uint32b2.set(fbe_value.uint32b2)
self.uint32b3.set(fbe_value.uint32b3)
self.uint32b4.set(fbe_value.uint32b4)
self.uint32b5.set(fbe_value.uint32b5)
self.int64b0.set(fbe_value.int64b0)
self.int64b1.set(fbe_value.int64b1)
self.int64b2.set(fbe_value.int64b2)
self.int64b3.set(fbe_value.int64b3)
self.int64b4.set(fbe_value.int64b4)
self.int64b5.set(fbe_value.int64b5)
self.uint64b0.set(fbe_value.uint64b0)
self.uint64b1.set(fbe_value.uint64b1)
self.uint64b2.set(fbe_value.uint64b2)
self.uint64b3.set(fbe_value.uint64b3)
self.uint64b4.set(fbe_value.uint64b4)
self.uint64b5.set(fbe_value.uint64b5)
# Fast Binary Encoding Enums model
class EnumsModel(fbe.Model):
__slots__ = "_model",
def __init__(self, buffer=None):
super().__init__(buffer)
self._model = FieldModelEnums(self.buffer, 4)
@property
def model(self):
return self._model
# Get the model size
@property
def fbe_size(self):
return self._model.fbe_size + self._model.fbe_extra
# Get the model type
@property
def fbe_type(self):
return self.TYPE
TYPE = FieldModelEnums.TYPE
# Check if the struct value is valid
def verify(self):
if (self.buffer.offset + self._model.fbe_offset - 4) > self.buffer.size:
return False
fbe_full_size = self.read_uint32(self._model.fbe_offset - 4)
if fbe_full_size < self._model.fbe_size:
return False
return self._model.verify()
# Create a new model (begin phase)
def create_begin(self):
fbe_begin = self.buffer.allocate(4 + self._model.fbe_size)
return fbe_begin
# Create a new model (end phase)
def create_end(self, fbe_begin):
fbe_end = self.buffer.size
fbe_full_size = fbe_end - fbe_begin
self.write_uint32(self._model.fbe_offset - 4, fbe_full_size)
return fbe_full_size
# Serialize the struct value
def serialize(self, value):
fbe_begin = self.create_begin()
self._model.set(value)
fbe_full_size = self.create_end(fbe_begin)
return fbe_full_size
# Deserialize the struct value
def deserialize(self, value=None):
if value is None:
value = Enums()
if (self.buffer.offset + self._model.fbe_offset - 4) > self.buffer.size:
value = Enums()
return value, 0
fbe_full_size = self.read_uint32(self._model.fbe_offset - 4)
assert (fbe_full_size >= self._model.fbe_size), "Model is broken!"
if fbe_full_size < self._model.fbe_size:
value = Enums()
return value, 0
self._model.get(value)
return value, fbe_full_size
# Move to the next struct value
def next(self, prev):
self._model.fbe_shift(prev)
class FinalModelEnums(fbe.FinalModel):
__slots__ = "_byte0", "_byte1", "_byte2", "_byte3", "_byte4", "_byte5", "_char0", "_char1", "_char2", "_char3", "_char4", "_char5", "_wchar0", "_wchar1", "_wchar2", "_wchar3", "_wchar4", "_wchar5", "_int8b0", "_int8b1", "_int8b2", "_int8b3", "_int8b4", "_int8b5", "_uint8b0", "_uint8b1", "_uint8b2", "_uint8b3", "_uint8b4", "_uint8b5", "_int16b0", "_int16b1", "_int16b2", "_int16b3", "_int16b4", "_int16b5", "_uint16b0", "_uint16b1", "_uint16b2", "_uint16b3", "_uint16b4", "_uint16b5", "_int32b0", "_int32b1", "_int32b2", "_int32b3", "_int32b4", "_int32b5", "_uint32b0", "_uint32b1", "_uint32b2", "_uint32b3", "_uint32b4", "_uint32b5", "_int64b0", "_int64b1", "_int64b2", "_int64b3", "_int64b4", "_int64b5", "_uint64b0", "_uint64b1", "_uint64b2", "_uint64b3", "_uint64b4", "_uint64b5",
def __init__(self, buffer, offset):
super().__init__(buffer, offset)
self._byte0 = FinalModelEnumByte(buffer, 0)
self._byte1 = FinalModelEnumByte(buffer, 0)
self._byte2 = FinalModelEnumByte(buffer, 0)
self._byte3 = FinalModelEnumByte(buffer, 0)
self._byte4 = FinalModelEnumByte(buffer, 0)
self._byte5 = FinalModelEnumByte(buffer, 0)
self._char0 = FinalModelEnumChar(buffer, 0)
self._char1 = FinalModelEnumChar(buffer, 0)
self._char2 = FinalModelEnumChar(buffer, 0)
self._char3 = FinalModelEnumChar(buffer, 0)
self._char4 = FinalModelEnumChar(buffer, 0)
self._char5 = FinalModelEnumChar(buffer, 0)
self._wchar0 = FinalModelEnumWChar(buffer, 0)
self._wchar1 = FinalModelEnumWChar(buffer, 0)
self._wchar2 = FinalModelEnumWChar(buffer, 0)
self._wchar3 = FinalModelEnumWChar(buffer, 0)
self._wchar4 = FinalModelEnumWChar(buffer, 0)
self._wchar5 = FinalModelEnumWChar(buffer, 0)
self._int8b0 = FinalModelEnumInt8(buffer, 0)
self._int8b1 = FinalModelEnumInt8(buffer, 0)
self._int8b2 = FinalModelEnumInt8(buffer, 0)
self._int8b3 = FinalModelEnumInt8(buffer, 0)
self._int8b4 = FinalModelEnumInt8(buffer, 0)
self._int8b5 = FinalModelEnumInt8(buffer, 0)
self._uint8b0 = FinalModelEnumUInt8(buffer, 0)
self._uint8b1 = FinalModelEnumUInt8(buffer, 0)
self._uint8b2 = FinalModelEnumUInt8(buffer, 0)
self._uint8b3 = FinalModelEnumUInt8(buffer, 0)
self._uint8b4 = FinalModelEnumUInt8(buffer, 0)
self._uint8b5 = FinalModelEnumUInt8(buffer, 0)
self._int16b0 = FinalModelEnumInt16(buffer, 0)
self._int16b1 = FinalModelEnumInt16(buffer, 0)
self._int16b2 = FinalModelEnumInt16(buffer, 0)
self._int16b3 = FinalModelEnumInt16(buffer, 0)
self._int16b4 = FinalModelEnumInt16(buffer, 0)
self._int16b5 = FinalModelEnumInt16(buffer, 0)
self._uint16b0 = FinalModelEnumUInt16(buffer, 0)
self._uint16b1 = FinalModelEnumUInt16(buffer, 0)
self._uint16b2 = FinalModelEnumUInt16(buffer, 0)
self._uint16b3 = FinalModelEnumUInt16(buffer, 0)
self._uint16b4 = FinalModelEnumUInt16(buffer, 0)
self._uint16b5 = FinalModelEnumUInt16(buffer, 0)
self._int32b0 = FinalModelEnumInt32(buffer, 0)
self._int32b1 = FinalModelEnumInt32(buffer, 0)
self._int32b2 = FinalModelEnumInt32(buffer, 0)
self._int32b3 = FinalModelEnumInt32(buffer, 0)
self._int32b4 = FinalModelEnumInt32(buffer, 0)
self._int32b5 = FinalModelEnumInt32(buffer, 0)
self._uint32b0 = FinalModelEnumUInt32(buffer, 0)
self._uint32b1 = FinalModelEnumUInt32(buffer, 0)
self._uint32b2 = FinalModelEnumUInt32(buffer, 0)
self._uint32b3 = FinalModelEnumUInt32(buffer, 0)
self._uint32b4 = FinalModelEnumUInt32(buffer, 0)
self._uint32b5 = FinalModelEnumUInt32(buffer, 0)
self._int64b0 = FinalModelEnumInt64(buffer, 0)
self._int64b1 = FinalModelEnumInt64(buffer, 0)
self._int64b2 = FinalModelEnumInt64(buffer, 0)
self._int64b3 = FinalModelEnumInt64(buffer, 0)
self._int64b4 = FinalModelEnumInt64(buffer, 0)
self._int64b5 = FinalModelEnumInt64(buffer, 0)
self._uint64b0 = FinalModelEnumUInt64(buffer, 0)
self._uint64b1 = FinalModelEnumUInt64(buffer, 0)
self._uint64b2 = FinalModelEnumUInt64(buffer, 0)
self._uint64b3 = FinalModelEnumUInt64(buffer, 0)
self._uint64b4 = FinalModelEnumUInt64(buffer, 0)
self._uint64b5 = FinalModelEnumUInt64(buffer, 0)
@property
def byte0(self):
return self._byte0
@property
def byte1(self):
return self._byte1
@property
def byte2(self):
return self._byte2
@property
def byte3(self):
return self._byte3
@property
def byte4(self):
return self._byte4
@property
def byte5(self):
return self._byte5
@property
def char0(self):
return self._char0
@property
def char1(self):
return self._char1
@property
def char2(self):
return self._char2
@property
def char3(self):
return self._char3
@property
def char4(self):
return self._char4
@property
def char5(self):
return self._char5
@property
def wchar0(self):
return self._wchar0
@property
def wchar1(self):
return self._wchar1
@property
def wchar2(self):
return self._wchar2
@property
def wchar3(self):
return self._wchar3
@property
def wchar4(self):
return self._wchar4
@property
def wchar5(self):
return self._wchar5
@property
def int8b0(self):
return self._int8b0
@property
def int8b1(self):
return self._int8b1
@property
def int8b2(self):
return self._int8b2
@property
def int8b3(self):
return self._int8b3
@property
def int8b4(self):
return self._int8b4
@property
def int8b5(self):
return self._int8b5
@property
def uint8b0(self):
return self._uint8b0
@property
def uint8b1(self):
return self._uint8b1
@property
def uint8b2(self):
return self._uint8b2
@property
def uint8b3(self):
return self._uint8b3
@property
def uint8b4(self):
return self._uint8b4
@property
def uint8b5(self):
return self._uint8b5
@property
def int16b0(self):
return self._int16b0
@property
def int16b1(self):
return self._int16b1
@property
def int16b2(self):
return self._int16b2
@property
def int16b3(self):
return self._int16b3
@property
def int16b4(self):
return self._int16b4
@property
def int16b5(self):
return self._int16b5
@property
def uint16b0(self):
return self._uint16b0
@property
def uint16b1(self):
return self._uint16b1
@property
def uint16b2(self):
return self._uint16b2
@property
def uint16b3(self):
return self._uint16b3
@property
def uint16b4(self):
return self._uint16b4
@property
def uint16b5(self):
return self._uint16b5
@property
def int32b0(self):
return self._int32b0
@property
def int32b1(self):
return self._int32b1
@property
def int32b2(self):
return self._int32b2
@property
def int32b3(self):
return self._int32b3
@property
def int32b4(self):
return self._int32b4
@property
def int32b5(self):
return self._int32b5
@property
def uint32b0(self):
return self._uint32b0
@property
def uint32b1(self):
return self._uint32b1
@property
def uint32b2(self):
return self._uint32b2
@property
def uint32b3(self):
return self._uint32b3
@property
def uint32b4(self):
return self._uint32b4
@property
def uint32b5(self):
return self._uint32b5
@property
def int64b0(self):
return self._int64b0
@property
def int64b1(self):
return self._int64b1
@property
def int64b2(self):
return self._int64b2
@property
def int64b3(self):
return self._int64b3
@property
def int64b4(self):
return self._int64b4
@property
def int64b5(self):
return self._int64b5
@property
def uint64b0(self):
return self._uint64b0
@property
def uint64b1(self):
return self._uint64b1
@property
def uint64b2(self):
return self._uint64b2
@property
def uint64b3(self):
return self._uint64b3
@property
def uint64b4(self):
return self._uint64b4
@property
def uint64b5(self):
return self._uint64b5
# Get the allocation size
def fbe_allocation_size(self, fbe_value):
fbe_result = 0 \
+ self.byte0.fbe_allocation_size(fbe_value.byte0) \
+ self.byte1.fbe_allocation_size(fbe_value.byte1) \
+ self.byte2.fbe_allocation_size(fbe_value.byte2) \
+ self.byte3.fbe_allocation_size(fbe_value.byte3) \
+ self.byte4.fbe_allocation_size(fbe_value.byte4) \
+ self.byte5.fbe_allocation_size(fbe_value.byte5) \
+ self.char0.fbe_allocation_size(fbe_value.char0) \
+ self.char1.fbe_allocation_size(fbe_value.char1) \
+ self.char2.fbe_allocation_size(fbe_value.char2) \
+ self.char3.fbe_allocation_size(fbe_value.char3) \
+ self.char4.fbe_allocation_size(fbe_value.char4) \
+ self.char5.fbe_allocation_size(fbe_value.char5) \
+ self.wchar0.fbe_allocation_size(fbe_value.wchar0) \
+ self.wchar1.fbe_allocation_size(fbe_value.wchar1) \
+ self.wchar2.fbe_allocation_size(fbe_value.wchar2) \
+ self.wchar3.fbe_allocation_size(fbe_value.wchar3) \
+ self.wchar4.fbe_allocation_size(fbe_value.wchar4) \
+ self.wchar5.fbe_allocation_size(fbe_value.wchar5) \
+ self.int8b0.fbe_allocation_size(fbe_value.int8b0) \
+ self.int8b1.fbe_allocation_size(fbe_value.int8b1) \
+ self.int8b2.fbe_allocation_size(fbe_value.int8b2) \
+ self.int8b3.fbe_allocation_size(fbe_value.int8b3) \
+ self.int8b4.fbe_allocation_size(fbe_value.int8b4) \
+ self.int8b5.fbe_allocation_size(fbe_value.int8b5) \
+ self.uint8b0.fbe_allocation_size(fbe_value.uint8b0) \
+ self.uint8b1.fbe_allocation_size(fbe_value.uint8b1) \
+ self.uint8b2.fbe_allocation_size(fbe_value.uint8b2) \
+ self.uint8b3.fbe_allocation_size(fbe_value.uint8b3) \
+ self.uint8b4.fbe_allocation_size(fbe_value.uint8b4) \
+ self.uint8b5.fbe_allocation_size(fbe_value.uint8b5) \
+ self.int16b0.fbe_allocation_size(fbe_value.int16b0) \
+ self.int16b1.fbe_allocation_size(fbe_value.int16b1) \
+ self.int16b2.fbe_allocation_size(fbe_value.int16b2) \
+ self.int16b3.fbe_allocation_size(fbe_value.int16b3) \
+ self.int16b4.fbe_allocation_size(fbe_value.int16b4) \
+ self.int16b5.fbe_allocation_size(fbe_value.int16b5) \
+ self.uint16b0.fbe_allocation_size(fbe_value.uint16b0) \
+ self.uint16b1.fbe_allocation_size(fbe_value.uint16b1) \
+ self.uint16b2.fbe_allocation_size(fbe_value.uint16b2) \
+ self.uint16b3.fbe_allocation_size(fbe_value.uint16b3) \
+ self.uint16b4.fbe_allocation_size(fbe_value.uint16b4) \
+ self.uint16b5.fbe_allocation_size(fbe_value.uint16b5) \
+ self.int32b0.fbe_allocation_size(fbe_value.int32b0) \
+ self.int32b1.fbe_allocation_size(fbe_value.int32b1) \
+ self.int32b2.fbe_allocation_size(fbe_value.int32b2) \
+ self.int32b3.fbe_allocation_size(fbe_value.int32b3) \
+ self.int32b4.fbe_allocation_size(fbe_value.int32b4) \
+ self.int32b5.fbe_allocation_size(fbe_value.int32b5) \
+ self.uint32b0.fbe_allocation_size(fbe_value.uint32b0) \
+ self.uint32b1.fbe_allocation_size(fbe_value.uint32b1) \
+ self.uint32b2.fbe_allocation_size(fbe_value.uint32b2) \
+ self.uint32b3.fbe_allocation_size(fbe_value.uint32b3) \
+ self.uint32b4.fbe_allocation_size(fbe_value.uint32b4) \
+ self.uint32b5.fbe_allocation_size(fbe_value.uint32b5) \
+ self.int64b0.fbe_allocation_size(fbe_value.int64b0) \
+ self.int64b1.fbe_allocation_size(fbe_value.int64b1) \
+ self.int64b2.fbe_allocation_size(fbe_value.int64b2) \
+ self.int64b3.fbe_allocation_size(fbe_value.int64b3) \
+ self.int64b4.fbe_allocation_size(fbe_value.int64b4) \
+ self.int64b5.fbe_allocation_size(fbe_value.int64b5) \
+ self.uint64b0.fbe_allocation_size(fbe_value.uint64b0) \
+ self.uint64b1.fbe_allocation_size(fbe_value.uint64b1) \
+ self.uint64b2.fbe_allocation_size(fbe_value.uint64b2) \
+ self.uint64b3.fbe_allocation_size(fbe_value.uint64b3) \
+ self.uint64b4.fbe_allocation_size(fbe_value.uint64b4) \
+ self.uint64b5.fbe_allocation_size(fbe_value.uint64b5) \
return fbe_result
# Get the final type
@property
def fbe_type(self):
return self.TYPE
TYPE = 1
# Check if the struct value is valid
def verify(self):
self._buffer.shift(self.fbe_offset)
fbe_result = self.verify_fields()
self._buffer.unshift(self.fbe_offset)
return fbe_result
# Check if the struct fields are valid
def verify_fields(self):
fbe_current_offset = 0
self.byte0.fbe_offset = fbe_current_offset
fbe_field_size = self.byte0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.byte1.fbe_offset = fbe_current_offset
fbe_field_size = self.byte1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.byte2.fbe_offset = fbe_current_offset
fbe_field_size = self.byte2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.byte3.fbe_offset = fbe_current_offset
fbe_field_size = self.byte3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.byte4.fbe_offset = fbe_current_offset
fbe_field_size = self.byte4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.byte5.fbe_offset = fbe_current_offset
fbe_field_size = self.byte5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char0.fbe_offset = fbe_current_offset
fbe_field_size = self.char0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char1.fbe_offset = fbe_current_offset
fbe_field_size = self.char1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char2.fbe_offset = fbe_current_offset
fbe_field_size = self.char2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char3.fbe_offset = fbe_current_offset
fbe_field_size = self.char3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char4.fbe_offset = fbe_current_offset
fbe_field_size = self.char4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.char5.fbe_offset = fbe_current_offset
fbe_field_size = self.char5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar0.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar1.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar2.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar3.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar4.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.wchar5.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int8b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint8b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int16b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint16b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int32b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint32b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.int64b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b0.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b1.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b2.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b3.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b4.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
self.uint64b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b5.verify()
if fbe_field_size == sys.maxsize:
return sys.maxsize
fbe_current_offset += fbe_field_size
return fbe_current_offset
# Get the struct value
def get(self, fbe_value=None):
if fbe_value is None:
fbe_value = Enums()
self._buffer.shift(self.fbe_offset)
fbe_size = self.get_fields(fbe_value)
self._buffer.unshift(self.fbe_offset)
return fbe_value, fbe_size
# Get the struct fields values
def get_fields(self, fbe_value):
fbe_current_offset = 0
fbe_current_size = 0
self.byte0.fbe_offset = fbe_current_offset
fbe_result = self.byte0.get()
fbe_value.byte0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.byte1.fbe_offset = fbe_current_offset
fbe_result = self.byte1.get()
fbe_value.byte1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.byte2.fbe_offset = fbe_current_offset
fbe_result = self.byte2.get()
fbe_value.byte2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.byte3.fbe_offset = fbe_current_offset
fbe_result = self.byte3.get()
fbe_value.byte3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.byte4.fbe_offset = fbe_current_offset
fbe_result = self.byte4.get()
fbe_value.byte4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.byte5.fbe_offset = fbe_current_offset
fbe_result = self.byte5.get()
fbe_value.byte5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char0.fbe_offset = fbe_current_offset
fbe_result = self.char0.get()
fbe_value.char0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char1.fbe_offset = fbe_current_offset
fbe_result = self.char1.get()
fbe_value.char1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char2.fbe_offset = fbe_current_offset
fbe_result = self.char2.get()
fbe_value.char2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char3.fbe_offset = fbe_current_offset
fbe_result = self.char3.get()
fbe_value.char3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char4.fbe_offset = fbe_current_offset
fbe_result = self.char4.get()
fbe_value.char4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.char5.fbe_offset = fbe_current_offset
fbe_result = self.char5.get()
fbe_value.char5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar0.fbe_offset = fbe_current_offset
fbe_result = self.wchar0.get()
fbe_value.wchar0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar1.fbe_offset = fbe_current_offset
fbe_result = self.wchar1.get()
fbe_value.wchar1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar2.fbe_offset = fbe_current_offset
fbe_result = self.wchar2.get()
fbe_value.wchar2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar3.fbe_offset = fbe_current_offset
fbe_result = self.wchar3.get()
fbe_value.wchar3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar4.fbe_offset = fbe_current_offset
fbe_result = self.wchar4.get()
fbe_value.wchar4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.wchar5.fbe_offset = fbe_current_offset
fbe_result = self.wchar5.get()
fbe_value.wchar5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b0.fbe_offset = fbe_current_offset
fbe_result = self.int8b0.get()
fbe_value.int8b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b1.fbe_offset = fbe_current_offset
fbe_result = self.int8b1.get()
fbe_value.int8b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b2.fbe_offset = fbe_current_offset
fbe_result = self.int8b2.get()
fbe_value.int8b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b3.fbe_offset = fbe_current_offset
fbe_result = self.int8b3.get()
fbe_value.int8b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b4.fbe_offset = fbe_current_offset
fbe_result = self.int8b4.get()
fbe_value.int8b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int8b5.fbe_offset = fbe_current_offset
fbe_result = self.int8b5.get()
fbe_value.int8b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b0.fbe_offset = fbe_current_offset
fbe_result = self.uint8b0.get()
fbe_value.uint8b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b1.fbe_offset = fbe_current_offset
fbe_result = self.uint8b1.get()
fbe_value.uint8b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b2.fbe_offset = fbe_current_offset
fbe_result = self.uint8b2.get()
fbe_value.uint8b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b3.fbe_offset = fbe_current_offset
fbe_result = self.uint8b3.get()
fbe_value.uint8b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b4.fbe_offset = fbe_current_offset
fbe_result = self.uint8b4.get()
fbe_value.uint8b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint8b5.fbe_offset = fbe_current_offset
fbe_result = self.uint8b5.get()
fbe_value.uint8b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b0.fbe_offset = fbe_current_offset
fbe_result = self.int16b0.get()
fbe_value.int16b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b1.fbe_offset = fbe_current_offset
fbe_result = self.int16b1.get()
fbe_value.int16b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b2.fbe_offset = fbe_current_offset
fbe_result = self.int16b2.get()
fbe_value.int16b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b3.fbe_offset = fbe_current_offset
fbe_result = self.int16b3.get()
fbe_value.int16b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b4.fbe_offset = fbe_current_offset
fbe_result = self.int16b4.get()
fbe_value.int16b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int16b5.fbe_offset = fbe_current_offset
fbe_result = self.int16b5.get()
fbe_value.int16b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b0.fbe_offset = fbe_current_offset
fbe_result = self.uint16b0.get()
fbe_value.uint16b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b1.fbe_offset = fbe_current_offset
fbe_result = self.uint16b1.get()
fbe_value.uint16b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b2.fbe_offset = fbe_current_offset
fbe_result = self.uint16b2.get()
fbe_value.uint16b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b3.fbe_offset = fbe_current_offset
fbe_result = self.uint16b3.get()
fbe_value.uint16b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b4.fbe_offset = fbe_current_offset
fbe_result = self.uint16b4.get()
fbe_value.uint16b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint16b5.fbe_offset = fbe_current_offset
fbe_result = self.uint16b5.get()
fbe_value.uint16b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b0.fbe_offset = fbe_current_offset
fbe_result = self.int32b0.get()
fbe_value.int32b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b1.fbe_offset = fbe_current_offset
fbe_result = self.int32b1.get()
fbe_value.int32b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b2.fbe_offset = fbe_current_offset
fbe_result = self.int32b2.get()
fbe_value.int32b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b3.fbe_offset = fbe_current_offset
fbe_result = self.int32b3.get()
fbe_value.int32b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b4.fbe_offset = fbe_current_offset
fbe_result = self.int32b4.get()
fbe_value.int32b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int32b5.fbe_offset = fbe_current_offset
fbe_result = self.int32b5.get()
fbe_value.int32b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b0.fbe_offset = fbe_current_offset
fbe_result = self.uint32b0.get()
fbe_value.uint32b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b1.fbe_offset = fbe_current_offset
fbe_result = self.uint32b1.get()
fbe_value.uint32b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b2.fbe_offset = fbe_current_offset
fbe_result = self.uint32b2.get()
fbe_value.uint32b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b3.fbe_offset = fbe_current_offset
fbe_result = self.uint32b3.get()
fbe_value.uint32b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b4.fbe_offset = fbe_current_offset
fbe_result = self.uint32b4.get()
fbe_value.uint32b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint32b5.fbe_offset = fbe_current_offset
fbe_result = self.uint32b5.get()
fbe_value.uint32b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b0.fbe_offset = fbe_current_offset
fbe_result = self.int64b0.get()
fbe_value.int64b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b1.fbe_offset = fbe_current_offset
fbe_result = self.int64b1.get()
fbe_value.int64b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b2.fbe_offset = fbe_current_offset
fbe_result = self.int64b2.get()
fbe_value.int64b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b3.fbe_offset = fbe_current_offset
fbe_result = self.int64b3.get()
fbe_value.int64b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b4.fbe_offset = fbe_current_offset
fbe_result = self.int64b4.get()
fbe_value.int64b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.int64b5.fbe_offset = fbe_current_offset
fbe_result = self.int64b5.get()
fbe_value.int64b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b0.fbe_offset = fbe_current_offset
fbe_result = self.uint64b0.get()
fbe_value.uint64b0 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b1.fbe_offset = fbe_current_offset
fbe_result = self.uint64b1.get()
fbe_value.uint64b1 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b2.fbe_offset = fbe_current_offset
fbe_result = self.uint64b2.get()
fbe_value.uint64b2 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b3.fbe_offset = fbe_current_offset
fbe_result = self.uint64b3.get()
fbe_value.uint64b3 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b4.fbe_offset = fbe_current_offset
fbe_result = self.uint64b4.get()
fbe_value.uint64b4 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
self.uint64b5.fbe_offset = fbe_current_offset
fbe_result = self.uint64b5.get()
fbe_value.uint64b5 = fbe_result[0]
fbe_current_offset += fbe_result[1]
fbe_current_size += fbe_result[1]
return fbe_current_size
# Set the struct value
def set(self, fbe_value):
self._buffer.shift(self.fbe_offset)
fbe_size = self.set_fields(fbe_value)
self._buffer.unshift(self.fbe_offset)
return fbe_size
# Set the struct fields values
def set_fields(self, fbe_value):
fbe_current_offset = 0
fbe_current_size = 0
self.byte0.fbe_offset = fbe_current_offset
fbe_field_size = self.byte0.set(fbe_value.byte0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.byte1.fbe_offset = fbe_current_offset
fbe_field_size = self.byte1.set(fbe_value.byte1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.byte2.fbe_offset = fbe_current_offset
fbe_field_size = self.byte2.set(fbe_value.byte2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.byte3.fbe_offset = fbe_current_offset
fbe_field_size = self.byte3.set(fbe_value.byte3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.byte4.fbe_offset = fbe_current_offset
fbe_field_size = self.byte4.set(fbe_value.byte4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.byte5.fbe_offset = fbe_current_offset
fbe_field_size = self.byte5.set(fbe_value.byte5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char0.fbe_offset = fbe_current_offset
fbe_field_size = self.char0.set(fbe_value.char0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char1.fbe_offset = fbe_current_offset
fbe_field_size = self.char1.set(fbe_value.char1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char2.fbe_offset = fbe_current_offset
fbe_field_size = self.char2.set(fbe_value.char2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char3.fbe_offset = fbe_current_offset
fbe_field_size = self.char3.set(fbe_value.char3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char4.fbe_offset = fbe_current_offset
fbe_field_size = self.char4.set(fbe_value.char4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.char5.fbe_offset = fbe_current_offset
fbe_field_size = self.char5.set(fbe_value.char5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar0.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar0.set(fbe_value.wchar0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar1.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar1.set(fbe_value.wchar1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar2.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar2.set(fbe_value.wchar2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar3.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar3.set(fbe_value.wchar3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar4.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar4.set(fbe_value.wchar4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.wchar5.fbe_offset = fbe_current_offset
fbe_field_size = self.wchar5.set(fbe_value.wchar5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b0.set(fbe_value.int8b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b1.set(fbe_value.int8b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b2.set(fbe_value.int8b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b3.set(fbe_value.int8b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b4.set(fbe_value.int8b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int8b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int8b5.set(fbe_value.int8b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b0.set(fbe_value.uint8b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b1.set(fbe_value.uint8b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b2.set(fbe_value.uint8b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b3.set(fbe_value.uint8b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b4.set(fbe_value.uint8b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint8b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint8b5.set(fbe_value.uint8b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b0.set(fbe_value.int16b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b1.set(fbe_value.int16b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b2.set(fbe_value.int16b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b3.set(fbe_value.int16b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b4.set(fbe_value.int16b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int16b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int16b5.set(fbe_value.int16b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b0.set(fbe_value.uint16b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b1.set(fbe_value.uint16b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b2.set(fbe_value.uint16b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b3.set(fbe_value.uint16b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b4.set(fbe_value.uint16b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint16b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint16b5.set(fbe_value.uint16b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b0.set(fbe_value.int32b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b1.set(fbe_value.int32b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b2.set(fbe_value.int32b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b3.set(fbe_value.int32b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b4.set(fbe_value.int32b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int32b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int32b5.set(fbe_value.int32b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b0.set(fbe_value.uint32b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b1.set(fbe_value.uint32b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b2.set(fbe_value.uint32b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b3.set(fbe_value.uint32b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b4.set(fbe_value.uint32b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint32b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint32b5.set(fbe_value.uint32b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b0.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b0.set(fbe_value.int64b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b1.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b1.set(fbe_value.int64b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b2.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b2.set(fbe_value.int64b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b3.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b3.set(fbe_value.int64b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b4.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b4.set(fbe_value.int64b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.int64b5.fbe_offset = fbe_current_offset
fbe_field_size = self.int64b5.set(fbe_value.int64b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b0.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b0.set(fbe_value.uint64b0)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b1.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b1.set(fbe_value.uint64b1)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b2.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b2.set(fbe_value.uint64b2)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b3.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b3.set(fbe_value.uint64b3)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b4.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b4.set(fbe_value.uint64b4)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
self.uint64b5.fbe_offset = fbe_current_offset
fbe_field_size = self.uint64b5.set(fbe_value.uint64b5)
fbe_current_offset += fbe_field_size
fbe_current_size += fbe_field_size
return fbe_current_size
# Fast Binary Encoding Enums final model
class EnumsFinalModel(fbe.Model):
__slots__ = "_model",
def __init__(self, buffer=None):
super().__init__(buffer)
self._model = FinalModelEnums(self.buffer, 8)
# Get the model type
@property
def fbe_type(self):
return self.TYPE
TYPE = FinalModelEnums.TYPE
# Check if the struct value is valid
def verify(self):
if (self.buffer.offset + self._model.fbe_offset) > self.buffer.size:
return False
fbe_struct_size = self.read_uint32(self._model.fbe_offset - 8)
fbe_struct_type = self.read_uint32(self._model.fbe_offset - 4)
if (fbe_struct_size <= 0) or (fbe_struct_type != self.fbe_type):
return False
return (8 + self._model.verify()) == fbe_struct_size
# Serialize the struct value
def serialize(self, value):
fbe_initial_size = self.buffer.size
fbe_struct_type = self.fbe_type
fbe_struct_size = 8 + self._model.fbe_allocation_size(value)
fbe_struct_offset = self.buffer.allocate(fbe_struct_size) - self.buffer.offset
assert ((self.buffer.offset + fbe_struct_offset + fbe_struct_size) <= self.buffer.size), "Model is broken!"
if (self.buffer.offset + fbe_struct_offset + fbe_struct_size) > self.buffer.size:
return 0
fbe_struct_size = 8 + self._model.set(value)
self.buffer.resize(fbe_initial_size + fbe_struct_size)
self.write_uint32(self._model.fbe_offset - 8, fbe_struct_size)
self.write_uint32(self._model.fbe_offset - 4, fbe_struct_type)
return fbe_struct_size
# Deserialize the struct value
def deserialize(self, value=None):
if value is None:
value = Enums()
assert ((self.buffer.offset + self._model.fbe_offset) <= self.buffer.size), "Model is broken!"
if (self.buffer.offset + self._model.fbe_offset) > self.buffer.size:
return Enums(), 0
fbe_struct_size = self.read_uint32(self._model.fbe_offset - 8)
fbe_struct_type = self.read_uint32(self._model.fbe_offset - 4)
assert ((fbe_struct_size > 0) and (fbe_struct_type == self.fbe_type)), "Model is broken!"
if (fbe_struct_size <= 0) or (fbe_struct_type != self.fbe_type):
return Enums(), 8
fbe_result = self._model.get(value)
return fbe_result[0], (8 + fbe_result[1])
# Move to the next struct value
def next(self, prev):
self._model.fbe_shift(prev)
# Fast Binary Encoding enums protocol version
class ProtocolVersion(object):
# Protocol major version
Major = 1
# Protocol minor version
Minor = 0
# Fast Binary Encoding enums sender
class Sender(fbe.Sender):
def __init__(self, buffer=None):
super().__init__(buffer, False)
# Sender models accessors
# Send methods
def send(self, value):
return 0
# Send message handler
def on_send(self, buffer, offset, size):
raise NotImplementedError("enums.Sender.on_send() not implemented!")
# Fast Binary Encoding enums receiver
class Receiver(fbe.Receiver):
def __init__(self, buffer=None):
super().__init__(buffer, False)
# Receive handlers
def on_receive(self, type, buffer, offset, size):
return False
# Fast Binary Encoding enums proxy
class Proxy(fbe.Receiver):
def __init__(self, buffer=None):
super().__init__(buffer, False)
# Receive handlers
def on_receive(self, type, buffer, offset, size):
return False
# Fast Binary Encoding enums final sender
class FinalSender(fbe.Sender):
def __init__(self, buffer=None):
super().__init__(buffer, True)
# Sender models accessors
# Send methods
def send(self, value):
return 0
# Send message handler
def on_send(self, buffer, offset, size):
raise NotImplementedError("enums.Sender.on_send() not implemented!")
# Fast Binary Encoding enums final receiver
class FinalReceiver(fbe.Receiver):
def __init__(self, buffer=None):
super().__init__(buffer, True)
# Receive handlers
def on_receive(self, type, buffer, offset, size):
return False
|
[
"chronoxor@gmail.com"
] |
chronoxor@gmail.com
|
269e62c46fbf46b0860106a026daa1e57e7d2698
|
d0c8170d1189cb7b6914b7d37d672531f0992f66
|
/week 1/Hackerrank/mutations.py
|
c4801b73718913dceb3d0bd5369cb1e362db4450
|
[] |
no_license
|
shynaraaya/BFDjango
|
3c459e45a8c253144783416e06f7fdd43e44ceef
|
dd26cc6f21619c260fda05d46f22ff69bce09f82
|
refs/heads/master
| 2021-09-25T15:33:29.148280
| 2020-04-20T02:26:02
| 2020-04-20T02:26:02
| 234,970,186
| 0
| 0
| null | 2021-09-22T18:31:24
| 2020-01-19T21:23:50
|
Python
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
def mutate_string(string, position, character):
res = string[:position] + character + string[position+1:]
return res
if __name__ == '__main__':
s = raw_input()
i, c = raw_input().split()
s_new = mutate_string(s, int(i), c)
print s_new
|
[
"ayanbekshynar@gmail.com"
] |
ayanbekshynar@gmail.com
|
266c418c43ab368672a7149e70538c0e807b5f91
|
8c6fa70bae915c70268c1180281b2b6d78399ce4
|
/venv/Scripts/easy_install-3.6-script.py
|
c653fcfbb7ae0fe3c591da67ededb234868bfb59
|
[] |
no_license
|
cha-n/PG
|
6aad26fe32521e4713c0b0828b1365da7dcd1613
|
681051fff24f37302c2bba2f4614dc07386b3f89
|
refs/heads/master
| 2022-11-18T14:34:58.439384
| 2020-07-03T07:30:41
| 2020-07-03T07:30:41
| 276,623,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!C:\Users\JCY\PycharmProjects\PG\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"chan01115@naver.com"
] |
chan01115@naver.com
|
a6269498158572202304da939470fc4fdd2e3b1f
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_github/tests/test_releases.py
|
afd754d1f6b234e0108ff323b9fb987cfd825a66
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297
| 2023-06-23T16:33:58
| 2023-06-23T16:33:58
| 101,410,006
| 81
| 107
|
MIT
| 2023-03-29T20:40:31
| 2017-08-25T14:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,464
|
py
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from .common_config import github_config, TS
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_github"
FUNCTION_NAME = "github_create_release"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_function(circuits, function_name, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction(function_name, function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait(f"{function_name}_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
def call_github_create_release_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("github_create_release", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("github_create_release_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestGithubCreateRelease:
""" Tests for the github_create_release function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.livetest
def test_create_release(self, circuits_app):
""" Test calling with sample values for the parameters """
create_release_setup = github_config('create_release')
create_release_setup['github_release_name'] = f"{create_release_setup['github_release_name']}_{TS.strftime('%Y%m%d_%H%M%S')}"
create_release_setup['github_release_tag'] = f"{create_release_setup['github_release_tag']}_{TS.strftime('%Y%m%d_%H%M%S')}"
results = call_function(circuits_app, "github_create_release", create_release_setup)
assert(results['success'])
@pytest.mark.livetest
def test_get_release(self, circuits_app):
""" Test calling with sample values for the parameters """
get_release_setup = github_config('get_release')
get_release_setup['github_release_tag'] = f"{get_release_setup['github_release_tag']}_{TS.strftime('%Y%m%d_%H%M%S')}"
results = call_function(circuits_app, "github_get_release", get_release_setup)
assert(results['success'])
assert(results['content'])
@pytest.mark.livetest
def test_get_releases(self, circuits_app):
get_releases_setup = github_config('get_releases')
results = call_function(circuits_app, "github_get_releases", get_releases_setup)
assert(results['success'])
assert(results['content'])
@pytest.mark.livetest
def test_get_latest_release(self, circuits_app):
get_releases_setup = github_config('get_latest_release')
results = call_function(circuits_app, "github_get_latest_release", get_releases_setup)
assert(results['success'])
assert(results['content'])
|
[
"travis@example.org"
] |
travis@example.org
|
dae2ef9510eb7658c02a8bec10863d09063de120
|
75e5600bcd545b36b0c70700e947c9714569d305
|
/manyActualparameters.py
|
ed6fa7e38812b20666ce9b9a4ac61c6530df91bf
|
[] |
no_license
|
CandyZ93/Python_Work
|
5f720c5b644aef1198c0ad785697f88997ff98b7
|
33c673e4235e103ed6cbad5a40d884524e3d3bd3
|
refs/heads/master
| 2021-05-06T13:35:54.272085
| 2017-12-06T04:56:53
| 2017-12-06T04:56:53
| 113,270,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#many actual parameters
'''
def make_pizza(size, *toppings):
"""print all the topping"""
print("\nMaking a " + str(size) +
"-inch pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza(16,'pepperoni')
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
def build_profile(first, last, **user_info):
"""creat a dict, which contain all the info mation about users."""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('albert', 'einstein',
location = 'princeton',
filed = 'physics')
print(user_profile)
'''
##homework
'''
def ordered_sandwich(*ingredients):
flavor = ''
for ingredient in ingredients:
flavor += (ingredient + ', ')
print("You have orderd a " + flavor + "\b\b sandwich.\n")
ordered_sandwich('tuna')
ordered_sandwich('tuna', 'beef')
ordered_sandwich('tuna', 'beef', 'eggroll')
def build_profile(first, last, **user_info):
"""creat a dict, which contain all the info mation about users."""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
person = build_profile('lance', 'zhou', location = 'wuhan', edcation = 'master',
major = 'software engineering')
full_name = person['first_name'] + ' ' + person['last_name']
print("The profile of " + full_name.title() + " is:")
for k,v in person.items():
print("\t"+k + " : " +v.title())
'''
def make_car(manufacturer, model, **details):
car = {}
car['manufacturer_name'] = manufacturer
car['model'] = model
for key,value in details.items():
car[key] = value
return car
car = make_car('subaru', 'outback', color = 'red', drive = '4WD')
print(car)
|
[
"lance.z1993@outlook.com"
] |
lance.z1993@outlook.com
|
0f923332d74ab03086681ff9097adf5ed4fd7603
|
70ec704fdd3e30c5df97323cd4c9532ebfd544ea
|
/ml_wiki/ch2/selenium-login.py
|
d53a91bf593dc64f47bd9b445885c8954f53b454
|
[] |
no_license
|
smart1004/learn_src
|
e02c13c82bae65b7de2a572e4a1ae58e2ea11588
|
353f92f7657a6f676a271d8d7f00d7c20e39d234
|
refs/heads/master
| 2021-01-25T11:49:49.906805
| 2018-04-08T13:26:18
| 2018-04-08T13:26:18
| 123,435,997
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
from selenium import webdriver
USER = "<아이디>"
PASS = "<비밀번호>"
# PhantomJS 드라이버 추출하기 --- (※1)
browser = webdriver.PhantomJS()
browser.implicitly_wait(3)
# 로그인 페이지에 접근하기 --- (※2)
url_login = "https://nid.naver.com/nidlogin.login"
browser.get(url_login)
print("로그인 페이지에 접근합니다.")
# 텍스트 박스에 아이디와 비밀번호 입력하기 --- (※3)
e = browser.find_element_by_id("id")
e.clear()
e.send_keys(USER)
e = browser.find_element_by_id("pw")
e.clear()
e.send_keys(PASS)
# 입력 양식 전송해서 로그인하기 --- (※4)
form = browser.find_element_by_css_selector("input.btn_global[type=submit]")
form.submit()
print("로그인 버튼을 클릭합니다.")
# 쇼핑 페이지의 데이터 가져오기 --- (※5)
browser.get("https://order.pay.naver.com/home?tabMenu=SHOPPING")
# 쇼핑 목록 출력하기 --- (※6)
products = browser.find_elements_by_css_selector(".p_info span")
print(products)
for product in products:
print("-", product.text)
|
[
"kusung25@naver.com"
] |
kusung25@naver.com
|
8ee7f264c8f5e86b5c66a93f520750a34ee73f7b
|
cab0673e71c6294103d8c2b57b25081dd3e0fb24
|
/model/ActiveLearning.py
|
65fb987633c3c138151765856e2ffa99419d6e06
|
[] |
no_license
|
akassara/Bayesian-Active-Learning
|
236873c9c93b04494a01facc1791a36d26bdd07b
|
b5b8b912dbe83271eeee2226ab36ae986c361d11
|
refs/heads/main
| 2023-01-18T23:20:43.009164
| 2020-11-23T16:09:01
| 2020-11-23T16:09:01
| 314,571,081
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,043
|
py
|
import numpy as np
from utils.train_utils import to_var, to_numpy, train_model
import torch
from utils.Preprocessing import FashionDataset, complex_preprocess
from model.BayesianCNN import BayesianCNN
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
def select_from_pool(model, D_train, N, nb_samples, aquis='BALD'):
"""Return indices of the N most uncertain predictions
"""
if aquis == 'BALD':
uncertainties = []
for i, sample in enumerate(D_train, 1):
# Take variable and put them to GPU
(images, _) = sample
images = to_var(images.float(), device)
# compute uncertainty
uncertainty = model.uncertainty(images)
uncertainties.append(to_numpy(uncertainty))
uncertainties = np.array(uncertainties).flatten()
m = uncertainties.mean()
indices = []
new_x_train = []
new_y_train = []
for k in range(N):
j = np.argmax(uncertainties)
uncertainties[j] = -np.inf
indices.append(j)
return indices, m
elif aquis == 'RAND':
indices = np.random.choice(nb_samples, N)
return indices, None
def active_training(init_model,X_train,y_train,X_pool,y_pool,valid_loader,N,aquis,iterations,epochs,learning_rate,batch_size,weight_decay,criterion):
"""Train model on new dataset after the aquisition of N data points from the new data
"""
train_active_losses = []
train_active_metrics = []
val_active_losses = []
val_active_metrics = []
model = init_model
for k in range(iterations):
new_train_dataset = FashionDataset(X_pool,y_pool,transform = complex_preprocess())
# DataLoaders
new_train_loader = torch.utils.data.DataLoader(new_train_dataset,
batch_size=batch_size, shuffle=False,sampler=None,
drop_last=True)
# We set model to train to activate the dropout layers
model.train()
print('*************')
print('Extract training images from the pool with high uncertainty')
indices,mean = select_from_pool(model,new_train_loader,N,X_pool.shape[0],aquis=aquis)
print('mean uncertainty:',mean)
print('Number of extracted training images:',len(indices))
X_train = np.concatenate((X_train,X_pool[indices]),axis=0)
y_train = np.concatenate((y_train,y_pool[indices]),axis=0)
print('Size of new training dataset',np.shape(X_train))
X_pool = np.delete(X_pool,indices,axis=0)
y_pool = np.delete(y_pool,indices,axis=0)
new_train_dataset = FashionDataset(X_train,y_train,transform = complex_preprocess())
# DataLoaders
new_train_loader = torch.utils.data.DataLoader(new_train_dataset,
batch_size=batch_size, shuffle=True,sampler=None,
drop_last=True)
model = BayesianCNN(dropout_prob=0.25,num_samples=5)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,weight_decay=weight_decay)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.9,patience=5000,verbose=True)
# TRAINING
print('New training has began')
new_train_losses, new_train_metrics,new_val_losses,new_val_metrics = train_model(model,new_train_loader,valid_loader,
criterion,optimizer,scheduler,batch_size,epochs,device)
filename = '/content/drive/MyDrive/BayesianActiveLearning/model_'+aquis + '_' +str(k)+'.pth.tar'
torch.save(model.state_dict(),filename)
print('Saving the model to ',filename)
train_active_losses = train_active_losses + new_train_losses
train_active_metrics = train_active_metrics + new_train_metrics
val_active_losses = val_active_losses + new_val_losses
val_active_metrics = val_active_metrics + new_val_metrics
return model , train_active_losses , train_active_metrics ,val_active_losses , val_active_metrics
|
[
"kassara.amyn@gmail.com"
] |
kassara.amyn@gmail.com
|
5bd1ccd8e4f231ed43f68842d8cd667f6c5ea565
|
9b2d05c7d759e9c8f7e2bf817088b8943402ef15
|
/itdiscovery.py
|
c7868ebf2001c8e7bd56450250a404f598d02754
|
[] |
no_license
|
zipi85/AWL
|
9c3f54483d5899177770668a8e970ec16eadb91a
|
ea2592e68bc0d2cd25102616d4719e4d1283b936
|
refs/heads/master
| 2021-01-25T08:13:19.623508
| 2017-06-19T20:02:50
| 2017-06-19T20:02:50
| 93,733,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
from fabric.api import task, hide, sudo
import fabric.contrib.files
from fabric.contrib.files import exists, contains, append, first, sed
@task()
def update():
"""update itdiscovery"""
with hide('stdout', 'stderr'):
sudo('yum -y update itdiscovery --enablerepo=awl-main,epel')
@task()
def remove_pci_option():
"""remove `-P` option from cron file"""
with hide('stdout', 'stderr'):
fabric.contrib.files.sed('/etc/cron.d/itdiscovery', '-P', '', use_sudo=True, backup='')
@task()
def upgrade_v3():
"""upgrade ITDiscovery v3"""
with hide('stdout', 'stderr'):
if exists('/etc/cron.d/itdiscovery', use_sudo=True):
smtp=sudo('''grep -o -E '(ypp|relay-smtp|smtp).*fr|relay-smtp' /etc/cron.d/itdiscovery''',warn_only=True,quiet=True)
if smtp.succeeded:
print "\033[95m"+smtp+"\033[0m"
installitdisc=sudo("yum -y update itdiscovery --enablerepo=awl-main,epel",quiet=True)
if installitdisc.succeeded:
print "\033[92mITDiscovery has been installed successfully\033[0m"
else:
print "\033[91mFailed to install ITDiscovery\033[0m"
return 1
sudo('''sed -ri 's/#?smtp_server:.*/smtp_server: '''+smtp+'''/g' /etc/itdiscovery/itdiscovery.yml''',quiet=True)
ittest=sudo("itdiscovery -W",quiet=True)
if ittest.succeeded:
print "\033[92mITDiscovery is correctly set\033[0m"
else:
print "\033[91mProblem with ITDiscovery\033[0m"
return 1
else:
itdiscv3=sudo('''grep nice /etc/cron.d/itdiscovery''',warn_only=True,quiet=True)
if itdiscv3.succeeded:
print "\033[96mITDiscovery V3 already installed.\033[0m"
else:
print "\033[93mNo SMTP found in ITDiscovery cron.\033[0m"
else:
print "\033[91mITDiscovery cron not found\033[0m"
|
[
"dariusz.noga@atos.net"
] |
dariusz.noga@atos.net
|
f7242ea788b8b591ac5ace6f519661bc61723bd8
|
8012340a24e3b367ca0c536f6d0a2646587f6319
|
/project/app/models.py
|
b5417cb2281985735e0f04bd95d7b8ccc82a2bec
|
[] |
no_license
|
urvisuthar85/drc_project
|
f175ab707b5f32ab0bd22dd6127f4b1df9c29c23
|
2469692b140fe0eb547af8e4744e96dd216ce7ff
|
refs/heads/master
| 2023-01-02T00:18:57.554134
| 2020-10-25T18:07:15
| 2020-10-25T18:07:15
| 307,160,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=255)
phone = models.IntegerField()
email = models.EmailField(max_length=255,unique=True)
password = models.CharField(max_length=20)
def __str__(self):
return self.username
# from django.contrib.auth.models import User
|
[
"urvashicitrusbug@gmail.com"
] |
urvashicitrusbug@gmail.com
|
38459d585c7f1861e8774c7571859a85236be08b
|
6515a47190986c4f3b6beececfabab42e3d34e34
|
/Models/GPT2_Model/Model/GPT2LMHead.py
|
7f0304b3b13ce6e44e8c037aec7823bb34427b7e
|
[] |
no_license
|
jk96491/Advanced_Models
|
f4140936f5004ed9a9464ad745b33e52d63157fa
|
cde49356fec3c53296446a54f4be497a89dd08cd
|
refs/heads/master
| 2023-06-14T02:26:43.869417
| 2021-06-30T13:07:31
| 2021-06-30T13:07:31
| 143,489,382
| 60
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
import torch.nn as nn
class GPT2LMHead(nn.Module):
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
|
[
"jk96491@naver.com"
] |
jk96491@naver.com
|
40e508f888e6e5ccfdc610d006a27e1b0da0f9c1
|
713ffaf6b0633cb217cff7297799f2c29a75d8ba
|
/main/settings.py
|
51d07ae0663ee131c1dbd75e3096fb7df021033a
|
[] |
no_license
|
Dora72/surveyForm
|
15398fd84856c23704b18e60e7dae8bee878a3c1
|
1b4cfaf0a1cc57e64c32728cd95f7a6badb10c2a
|
refs/heads/master
| 2021-01-23T02:19:20.013812
| 2017-03-23T18:48:51
| 2017-03-23T18:48:51
| 85,985,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,113
|
py
|
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sbs-vpk@8rirn*m(uq7=+-y2m_=(+18ox+^p!6(p416$e2^ykb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.first_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"dorineazmy@gmail.com"
] |
dorineazmy@gmail.com
|
1e236e30d75c559339f1261b732a9a70d9df7122
|
35053a371d85c2d45a4f52239d8a70b38194ef48
|
/Can Place Flowers.py
|
86bf8aeeb427181d1fe805cbf5b1d1bcb364a643
|
[] |
no_license
|
Kuehar/LeetCode
|
51d169c81a2e572ea854399fc78e1130220388f9
|
4555c20455f181f9dd7b3aba2a8779dea795edfb
|
refs/heads/master
| 2023-04-16T10:13:03.584541
| 2023-04-06T11:47:21
| 2023-04-06T11:47:21
| 243,361,421
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if n == 0: return True
if len(flowerbed) == 0: return False
if len(flowerbed) == 1: return flowerbed[0] == 0
pre,cur = flowerbed[0],flowerbed[1]
if pre + cur == 0:
flowerbed[0] = 1
n -= 1
cur,nex = flowerbed[-1],flowerbed[-2]
if cur + nex == 0:
flowerbed[-1] = 1
n -= 1
for i in range(2,len(flowerbed)-2):
pre = flowerbed[i-1]
cur = flowerbed[i]
nex = flowerbed[i+1]
if (pre + cur + nex) == 0:
flowerbed[i] = 1
n -= 1
return n <= 0
# Runtime: 164 ms, faster than 58.48% of Python3 online submissions for Can Place Flowers.
# Memory Usage: 14.5 MB, less than 89.00% of Python3 online submissions for Can Place Flowers.
|
[
"noreply@github.com"
] |
Kuehar.noreply@github.com
|
9ae92d30735a63f876e54362e26dba76ae2b811a
|
b73dbd5810be0ee54143a9f1a58f1f662fb6063b
|
/polls/tests.py
|
15c916d5d65908e20dd6f0c1fccf497f9f87c77e
|
[] |
no_license
|
wataruxun/LearnDjango
|
030c380ec3f5e810dac24ca43cd096668164495a
|
ba604c0b04fa4d2968f035421609dc65ab4e3344
|
refs/heads/master
| 2022-01-29T18:50:35.255105
| 2019-05-06T01:54:52
| 2019-05-06T01:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
# Create your tests here.
class QuestionModelTests(TestCase):
"""
was_published_recently() returns False for questions whose pub_date
is in the future
"""
def test_was_publishe_recently_with_future_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days = days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
|
[
"mikoto.toaru@gmail.com"
] |
mikoto.toaru@gmail.com
|
2e79679ebdc6ebb91b85f95ac5bccc7866b865ab
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/openstackclient/tests/functional/image/v2/test_image.py
|
6faff94a3295cd738875bfb8fda2baf1ac162efb
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330
| 2018-10-15T11:39:20
| 2018-10-15T11:39:20
| 152,568,204
| 0
| 3
| null | 2022-11-19T17:38:49
| 2018-10-11T09:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,865
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from openstackclient.tests.functional import base
class ImageTests(base.TestCase):
"""Functional tests for image. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
os.environ['OS_IMAGE_API_VERSION'] = '2'
opts = cls.get_opts(cls.FIELDS)
raw_output = cls.openstack('image create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
raw_output = cls.openstack('image set --name ' + cls.OTHER_NAME + ' '
+ cls.NAME)
cls.assertOutput('', raw_output)
# Delete test
raw_output = cls.openstack('image delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_image_list(self):
opts = self.get_opts(self.HEADERS)
raw_output = self.openstack('image list' + opts)
self.assertIn(self.NAME, raw_output)
def test_image_show(self):
opts = self.get_opts(self.FIELDS)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
def test_image_set(self):
opts = self.get_opts([
"disk_format", "visibility", "min_disk", "min_ram", "name"])
self.openstack('image set --min-disk 4 --min-ram 5 ' +
'--public ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual("raw\n4\n5\n" + self.NAME + '\npublic\n', raw_output)
def test_image_metadata(self):
opts = self.get_opts(["name", "properties"])
self.openstack('image set --property a=b --property c=d ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\na='b', c='d'\n", raw_output)
def test_image_unset(self):
opts = self.get_opts(["name", "tags", "properties"])
self.openstack('image set --tag 01 ' + self.NAME)
self.openstack('image unset --tag 01 ' + self.NAME)
# test_image_metadata has set image properties "a" and "c"
self.openstack('image unset --property a --property c ' + self.NAME)
raw_output = self.openstack('image show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n\n", raw_output)
def test_image_members(self):
opts = self.get_opts(['project_id'])
my_project_id = self.openstack('token issue' + opts).strip()
self.openstack(
'image add project {} {}'.format(self.NAME, my_project_id))
self.openstack(
'image set --accept ' + self.NAME)
shared_img_list = self.parse_listing(
self.openstack('image list --shared', self.get_opts(['name']))
)
self.assertIn(self.NAME, [img['Name'] for img in shared_img_list])
self.openstack(
'image set --reject ' + self.NAME)
shared_img_list = self.parse_listing(
self.openstack('image list --shared', self.get_opts(['name']))
)
self.openstack(
'image remove project {} {}'.format(self.NAME, my_project_id))
|
[
"jcdiaztorres96@gmail.com"
] |
jcdiaztorres96@gmail.com
|
b40a347f034798cd207d5819205c875aa7fb8bdb
|
0c8b1eedb2f3c5b7aa5fddfe902174d68a5ba40d
|
/array/using python/kadane.py
|
a5de2ad2e54f972bdefd2b4464f1a55c32e05286
|
[] |
no_license
|
vibhav21/must_do_coding_ques
|
4838619fb42935fdc5816c159a440e8cee1a5e29
|
3b6d59868fb7da3f7cb09c4a9449729892ddf16b
|
refs/heads/master
| 2020-04-28T06:04:02.888082
| 2019-07-24T15:06:55
| 2019-07-24T15:06:55
| 175,043,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 20:09:09 2019
@author: Vaibhav
"""
arr=[]
a=int(input())
while(a):
max_s=-32000
max_t=0
size=int(input())
arr=list(map(int,input().split()))
for i in range(size):
max_t=max_t+arr[i]
if(max_s<max_t):
max_s=max_t
if(max_t<0):
max_t=0
print(max_s)
a=a-1
|
[
"vaibhavchat1@gmail.com"
] |
vaibhavchat1@gmail.com
|
e806e3218cd8bc55131bce1e7d166c76ad1ec718
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03288/s779086883.py
|
309d8d3ea1e3ed5219effec5418e8a99e6125de3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
r=int(input())
print("ABC"if r<1200else"ARC"if r<2800else"AGC")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2c9513597922cdf544f8ece8671ae067d202e958
|
1e888f58fce5356b556a8a68562f6fcc19baad8a
|
/publish_handler.py
|
29ca024c425f530ff0a40e7d7e91545bbc0edc8e
|
[] |
no_license
|
carcigenicate/song-searcher
|
1cded140c7609b7d5e58f64f15d02db02f3ff4c4
|
acd0c322e17e1635d2974735e7b29ac62adb3e75
|
refs/heads/master
| 2023-04-14T02:11:31.907739
| 2021-04-25T14:44:00
| 2021-04-25T14:44:00
| 358,049,145
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
import json
import logging
import ssl
from paho.mqtt import MQTTException
from paho.mqtt.publish import single as publish_single
import common as comm
from simple_handlers import bad_request_handler
from broker_config import Config
REQUEST_PARAMETER = "yid"
CONFIG_PATH = "./broker.cfg"
CONFIG = Config.from_file(CONFIG_PATH)
def song_request_handler(request: comm.Request) -> None:
song_request = request.query.get(REQUEST_PARAMETER)
if not song_request:
bad_request_handler(request, "Missing youtube video ID.")
else:
try:
auth = {"username": CONFIG.username, "password": CONFIG.password}
publish_single(CONFIG.topic,
song_request[0],
auth=auth,
hostname=CONFIG.host,
port=CONFIG.port,
tls={"tls_version": ssl.PROTOCOL_TLS},
keepalive=10, # Publish timeout
qos=2)
comm.send_simple_response(request,
200,
{"Content-Type": "application/json"},
json.dumps({"success": "Request Received"}))
# These errors will only be thrown if the hostname is something obviously wrong like "localhost".
except ConnectionRefusedError:
logging.warning("MQTT server rejected connection/is not started.")
bad_request_handler(request, "Unable to contact broker. Message was not passed on.")
except MQTTException as e:
logging.warning(f"Generic MQTT error: {e}")
bad_request_handler(request, "Unable to contact broker. Message was not passed on.")
|
[
"brendon@localhost.localdomain"
] |
brendon@localhost.localdomain
|
7078a2ddc9cf33c2fc31c6ca289eae0fe39c85e5
|
829b1d2661e9fead0f4ac1397129a62e50184683
|
/src/runGdEval.py
|
38d4cee47278954b0f451ae6322eeb9257ac3ce1
|
[] |
no_license
|
vmanisha/tasks_track_evaluation
|
6366bd325b8d274c1f2404c6c425dd67ff1d1fa3
|
c79c4925a608c33433d11019d10f708c5950b8df
|
refs/heads/master
| 2021-07-11T03:06:13.287375
| 2017-10-12T20:18:03
| 2017-10-12T20:18:03
| 106,740,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
import os
import argparse as ap
import sys
import pandas as pd
def RunNdEval(qrelFile,runFolder, outFolder):
docTopics= range(1,51)
# used for 2015: [1, 2 ,3 ,4 ,5 ,6 ,7, 8, 9, 11, 12, 14 ,15 ,16, 17 ,18 ,19 ,20, 21, 22, 23, 24, 25, 26, 28, 29, 31, 32, 34, 35, 36, 37, 42, 43, 50]
commandString = 'perl ./eval/diversity_eval/gdeval.pl '
if not os.path.exists(outFolder):
os.mkdir(outFolder)
for ifile in os.listdir(runFolder):
run = open(runFolder+'/'+ifile,'r').read()
step = 5
k = 5
runMetrics = {}
runid = None
# Format :
# runid,topic,ndcg@K,err@K
for k in [10,20,1000]:
print k, ifile
# Run the command
print commandString+' -k '+str(k)+' '+qrelFile+' '+runFolder+'/'+ifile
content = os.popen(commandString+' -k '+str(k)+' '+qrelFile+' '+runFolder+'/'+ifile).read()
# Process and store the input
i= 0
header = {}
#print content
for line in content.split('\n'):
split = line.strip().split(',')
if len(split) == 4:
# Set the header
if i == 0:
for l in range(len(split)):
header[l] = split[l]
i+=1
else:
runid = split[0]
topic = split[1]
if 'amean' in topic or int(topic) in docTopics:
if topic not in runMetrics:
runMetrics[topic] = {}
runMetrics[topic][header[2]] = split[2]
runMetrics[topic][header[3]] = split[3]
ofile = open(outFolder+'/'+ifile,'w')
# write the header
#dataFrame = pd.DataFrame(index= range(1,50), columns= runMetrics['amean'].keys())
ofile.write('runid,topic,'+','.join(sorted(runMetrics['amean'].keys()))+'\n')
for entry in sorted(runMetrics.items()):
topic, metricValues = entry[0], entry[1]
sortedMet = sorted(metricValues.items())
#print topic, sortedMet
values = []
for entry in sortedMet:
values.append(entry[1])
#print values
ofile.write(runid+','+topic+','+','.join(values) +'\n')
ofile.close()
def main(argv):
parser = ap.ArgumentParser(description='Generate diversity evaluation for several runs in a folder');
parser.add_argument('-q','--qrelFile', help='Qrel file to use for evaluation. Should have format \
query-id subtopic-id doc-id relevance', required=True);
parser.add_argument('-r','--runFolder', help='Folder containing participant runs.', required=True);
parser.add_argument('-o','--outFolder', help='Folder to output per query evaluation for each submitted run.', required=True);
args = parser.parse_args()
RunNdEval(args.qrelFile, args.runFolder, args.outFolder)
if __name__ == '__main__':
main(sys.argv)
|
[
"manishav@yahoo-inc.com"
] |
manishav@yahoo-inc.com
|
cfe76fa70009f31c4fa505c0f592be700b7e4c89
|
9a056a03487a7594a4c2379282638ca22a585670
|
/resources/libs/notify.py
|
ce6a6e0c01f69675483fecf057557ec2fa21eb53
|
[] |
no_license
|
leosupreme/plugin.program.ghost.wizard-1.0.0
|
d67d2d77c74f29cb50b27a1f010ee77cd1c6c892
|
6505cc6f7718598ece7e1005ff1252a1a26723b2
|
refs/heads/master
| 2022-10-06T11:40:17.414864
| 2020-06-11T12:15:26
| 2020-06-11T12:15:26
| 271,537,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43,867
|
py
|
############################################################################
# /T /I #
# / |/ | .-~/ #
# T\ Y I |/ / _ #
# /T | \I | I Y.-~/ #
# I l /I T\ | | l | T / #
# T\ | \ Y l /T | \I l \ ` l Y If your going to copy #
# __ | \l \l \I l __l l \ ` _. | this addon just #
# \ ~-l `\ `\ \ \ ~\ \ `. .-~ | give credit! #
# \ ~-. "-. ` \ ^._ ^. "-. / \ | #
#.--~-._ ~- ` _ ~-_.-"-." ._ /._ ." ./ Stop Deleting the #
# >--. ~-. ._ ~>-" "\ 7 7 ] credits file! #
#^.___~"--._ ~-{ .-~ . `\ Y . / | #
# <__ ~"-. ~ /_/ \ \I Y : | #
# ^-.__ ~(_/ \ >._: | l______ #
# ^--.,___.-~" /_/ ! `-.~"--l_ / ~"-. #
# (_/ . ~( /' "~"--,Y -=b-. _) #
# (_/ . \ : / l c"~o \ #
# \ / `. . .^ \_.-~"~--. ) #
# (_/ . ` / / ! )/ #
# / / _. '. .': / ' #
# ~(_/ . / _ ` .-<_ #
# /_/ . ' .-~" `. / \ \ ,z=. Surfacingx #
# ~( / ' : | K "-.~-.______// Original Author #
# "-,. l I/ \_ __{--->._(==. #
# //( \ < ~"~" // #
# /' /\ \ \ ,v=. (( Fire TV Guru #
# .^. / /\ " }__ //===- ` PyXBMCt LaYOUt #
# / / ' ' "-.,__ {---(==- #
# .^ ' : T ~" ll #
# / . . . : | :! \ #
# (_/ / | | j-" ~^ #
# ~-<_(_.^-~" #
# #
# Copyright (C) One of those Years.... #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
############################################################################
import xbmc, xbmcaddon, xbmcgui, xbmcplugin, os, sys, xbmcvfs, glob
import shutil
import urllib2,urllib
import re
import uservar
import time
from resources.libs import yt, wizard as wiz
from datetime import date, datetime, timedelta
ADDON_ID = uservar.ADDON_ID
ADDON = wiz.addonId(ADDON_ID)
VERSION = wiz.addonInfo(ADDON_ID,'version')
ADDONPATH = wiz.addonInfo(ADDON_ID,'path')
ADDONTITLE = uservar.ADDONTITLE
BUILDERNAME = uservar.BUILDERNAME
DIALOG = xbmcgui.Dialog()
DP = xbmcgui.DialogProgress()
HOME = xbmc.translatePath('special://home/')
ADDONS = os.path.join(HOME, 'addons')
USERDATA = os.path.join(HOME, 'userdata')
PLUGIN = os.path.join(ADDONS, ADDON_ID)
PACKAGES = os.path.join(ADDONS, 'packages')
ADDONDATA = os.path.join(USERDATA, 'addon_data', ADDON_ID)
FANART = os.path.join(ADDONPATH, 'fanart.jpg')
ICON = os.path.join(ADDONPATH, 'icon.png')
ART = os.path.join(ADDONPATH, 'resources', 'art')
SKINFOLD = os.path.join(ADDONPATH, 'resources', 'skins', 'DefaultSkin', 'media')
ADVANCED = os.path.join(USERDATA, 'advancedsettings.xml')
NOTIFY = wiz.getS('notify')
NOTEID = wiz.getS('noteid')
NOTEDISMISS = wiz.getS('notedismiss')
BUILDNAME = wiz.getS('buildname')
BUILDVERSION = wiz.getS('buildversion')
LATESTVERSION = wiz.checkBuild(BUILDNAME, 'version')
TODAY = date.today()
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
TOMORROW = TODAY + timedelta(days=1)
THREEDAYS = TODAY + timedelta(days=3)
UPDATECHECK = uservar.UPDATECHECK if str(uservar.UPDATECHECK).isdigit() else 1
NEXTCHECK = TODAY + timedelta(days=UPDATECHECK)
NOTIFICATION = uservar.NOTIFICATION
ENABLE = uservar.ENABLE
HEADERTYPE = uservar.HEADERTYPE if uservar.HEADERTYPE == 'Image' else 'Text'
HEADERMESSAGE = uservar.HEADERMESSAGE
BACKGROUND = uservar.BACKGROUND
HEADERIMAGE = uservar.HEADERIMAGE
THEME1 = uservar.THEME1
THEME2 = uservar.THEME2
THEME3 = uservar.THEME3
THEME4 = uservar.THEME4
THEME5 = uservar.THEME5
COLOR1 = uservar.COLOR1
COLOR2 = uservar.COLOR2
COLOR3 = uservar.COLOR3
COLOR4 = uservar.COLOR4
COLOR5 = uservar.COLOR5
CONTACTICON = uservar.CONTACTICON if not uservar.CONTACTICON == 'http://' else ICON
CONTACTFANART = uservar.CONTACTFANART if not uservar.CONTACTFANART == 'http://' else FANART
if BACKGROUND == '': BACKGROUND = FANART
elif not wiz.workingURL(BACKGROUND): BACKGROUND = FANART
ACTION_PREVIOUS_MENU = 10 ## ESC action
ACTION_NAV_BACK = 92 ## Backspace action
ACTION_MOVE_LEFT = 1 ## Left arrow key
ACTION_MOVE_RIGHT = 2 ## Right arrow key
ACTION_MOVE_UP = 3 ## Up arrow key
ACTION_MOVE_DOWN = 4 ## Down arrow key
ACTION_MOUSE_WHEEL_UP = 104 ## Mouse wheel up
ACTION_MOUSE_WHEEL_DOWN = 105 ## Mouse wheel down
ACTION_MOVE_MOUSE = 107 ## Down arrow key
ACTION_SELECT_ITEM = 7 ## Number Pad Enter
ACTION_BACKSPACE = 110 ## ?
ACTION_MOUSE_LEFT_CLICK = 100
ACTION_MOUSE_LONG_CLICK = 108
def artwork(file):
if file == 'button': return os.path.join(SKINFOLD, 'Button', 'button-focus_lightblue.png'), os.path.join(SKINFOLD, 'Button', 'button-focus_grey.png')
elif file == 'radio' : return os.path.join(SKINFOLD, 'RadioButton', 'MenuItemFO.png'), os.path.join(SKINFOLD, 'RadioButton', 'MenuItemNF.png'), os.path.join(SKINFOLD, 'RadioButton', 'radiobutton-focus.png'), os.path.join(SKINFOLD, 'RadioButton', 'radiobutton-nofocus.png')
elif file == 'slider': return os.path.join(SKINFOLD, 'Slider', 'osd_slider_nib.png'), os.path.join(SKINFOLD, 'Slider', 'osd_slider_nibNF.png'), os.path.join(SKINFOLD, 'Slider', 'slider1.png'), os.path.join(SKINFOLD, 'Slider', 'slider1.png')
def autoConfig(msg='', TxtColor='0xFFFFFFFF', Font='font10', BorderWidth=10):
class MyWindow(xbmcgui.WindowDialog):
scr={};
def __init__(self,msg='',L=0,T=0,W=1280,H=720,TxtColor='0xFFFFFFFF',Font='font10',BorderWidth=10):
buttonfocus, buttonnofocus = artwork('button')
radiobgfocus, radiobgnofocus, radiofocus, radionofocus = artwork('radio')
slidernibfocus, slidernibnofocus, sliderfocus, slidernofocus = artwork('slider')
image_path = os.path.join(ART, 'ContentPanel.png')
boxbg = os.path.join(ART, 'bgg2.png')
self.border = xbmcgui.ControlImage(L,T,W,H, image_path)
self.addControl(self.border);
self.BG=xbmcgui.ControlImage(L+BorderWidth,T+BorderWidth,W-(BorderWidth*2),H-(BorderWidth*2), FANART, aspectRatio=0, colorDiffuse='0x5FFFFFFF')
self.addControl(self.BG)
top = T+BorderWidth
leftside = L+BorderWidth
rightside = L+(W/2)-(BorderWidth*2)
firstrow = top+30
secondrow = firstrow+275+(BorderWidth/2)
currentwidth = ((W/2)-(BorderWidth*4))/2
header = '[COLOR %s]Advanced Settings Configurator[/COLOR]' % (COLOR2)
self.Header=xbmcgui.ControlLabel(L, top, W, 30, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header)
top += 30+BorderWidth
self.bgarea = xbmcgui.ControlImage(leftside, firstrow, rightside-L, 275, boxbg, aspectRatio=0, colorDiffuse='0x5FFFFFFF')
self.addControl(self.bgarea)
self.bgarea2 = xbmcgui.ControlImage(rightside+BorderWidth+BorderWidth, firstrow, rightside-L, 275, boxbg, aspectRatio=0, colorDiffuse='0x5FFFFFFF')
self.addControl(self.bgarea2)
self.bgarea3 = xbmcgui.ControlImage(leftside, secondrow, rightside-L, 275, boxbg, aspectRatio=0, colorDiffuse='0x5FFFFFFF')
self.addControl(self.bgarea3)
self.bgarea4 = xbmcgui.ControlImage(rightside+BorderWidth+BorderWidth, secondrow, rightside-L, 275, boxbg, aspectRatio=0, colorDiffuse='0x5FFFFFFF')
self.addControl(self.bgarea4)
header = '[COLOR %s]Video Cache Size[/COLOR]' % (COLOR2)
self.Header2=xbmcgui.ControlLabel(leftside+BorderWidth, firstrow+5, (W/2)-(BorderWidth*2), 20, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header2)
freeMemory = int(float(wiz.getInfo('System.Memory(free)')[:-2])*.33)
recMemory = int(float(wiz.getInfo('System.Memory(free)')[:-2])*.23)
msg3 = "[COLOR %s]Number of bytes used for buffering streams in memory. When set to [COLOR %s]0[/COLOR] the cache will be written to disk instead of RAM. Note: For the memory size set here, Kodi will require 3x the amount of RAM to be free. Setting this too high might cause Kodi to crash if it can't get enough RAM(1/3 of Free Memory: [COLOR %s]%s[/COLOR])[/COLOR]" % (COLOR2, COLOR1, COLOR1, freeMemory)
self.Support3=xbmcgui.ControlTextBox(leftside+int(BorderWidth*1.5), firstrow+30+BorderWidth, (W/2)-(BorderWidth*4), 150, font='font12', textColor=TxtColor)
self.addControl(self.Support3)
self.Support3.setText(msg3)
try: self.videoCacheSize=xbmcgui.ControlSlider(leftside+int(BorderWidth*1.5), firstrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus, orientation=xbmcgui.HORIZONTAL)
except: self.videoCacheSize=xbmcgui.ControlSlider(leftside+int(BorderWidth*1.5), firstrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus)
self.addControl(self.videoCacheSize)
self.videomin = 0; self.videomax = freeMemory if freeMemory < 2000 else 2000
self.recommendedVideo = recMemory if recMemory < 500 else 500; self.currentVideo = self.recommendedVideo
videopos = wiz.percentage(self.currentVideo, self.videomax)
self.videoCacheSize.setPercent(videopos)
current1 = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR2, self.currentVideo)
recommended1 = '[COLOR %s]Recommended:[/COLOR] [COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR2, self.recommendedVideo)
self.currentVideo1=xbmcgui.ControlTextBox(leftside+BorderWidth,firstrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.currentVideo1)
self.currentVideo1.setText(current1)
self.recommendedVideo1=xbmcgui.ControlTextBox(leftside+BorderWidth+currentwidth,firstrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.recommendedVideo1)
self.recommendedVideo1.setText(recommended1)
header = '[COLOR %s]CURL Timeout/CURL Low Speed[/COLOR]' % (COLOR2)
self.Header3=xbmcgui.ControlLabel(rightside+BorderWidth, firstrow+5, (W/2)-(BorderWidth*2), 20, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header3)
msg3 = "[COLOR %s][B]curlclienttimeout[/B] is the time in seconds for how long it takes for libcurl connection will timeout and [B]curllowspeedtime[/B] is the time in seconds for libcurl to consider a connection lowspeed. For slower connections set it to 20.[/COLOR]" % COLOR2
self.Support3=xbmcgui.ControlTextBox(rightside+int(BorderWidth*3.5), firstrow+30+BorderWidth, (W/2)-(BorderWidth*4), 150, font='font12', textColor=TxtColor)
self.addControl(self.Support3)
self.Support3.setText(msg3)
try: self.CURLTimeout=xbmcgui.ControlSlider(rightside+int(BorderWidth*3.5),firstrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus, orientation=xbmcgui.HORIZONTAL)
except: self.CURLTimeout=xbmcgui.ControlSlider(rightside+int(BorderWidth*3.5),firstrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus)
self.addControl(self.CURLTimeout)
self.curlmin = 0; self.curlmax = 20
self.recommendedCurl = 10; self.currentCurl = self.recommendedCurl
curlpos = wiz.percentage(self.currentCurl, self.curlmax)
self.CURLTimeout.setPercent(curlpos)
current2 = '[COLOR %s]Current:[/COLOR] [COLOR %s]%ss[/COLOR]' % (COLOR1, COLOR2, self.currentCurl)
recommended2 = '[COLOR %s]Recommended:[/COLOR] [COLOR %s]%ss[/COLOR]' % (COLOR1, COLOR2, self.recommendedCurl)
self.currentCurl2=xbmcgui.ControlTextBox(rightside+(BorderWidth*3),firstrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.currentCurl2)
self.currentCurl2.setText(current2)
self.recommendedCurl2=xbmcgui.ControlTextBox(rightside+(BorderWidth*3)+currentwidth,firstrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.recommendedCurl2)
self.recommendedCurl2.setText(recommended2)
header = '[COLOR %s]Read Buffer Factor[/COLOR]' % (COLOR2)
self.Header4=xbmcgui.ControlLabel(leftside, secondrow+5, (W/2)-(BorderWidth*2), 20, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header4)
msg3 = "[COLOR %s]The value of this setting is a multiplier of the default limit. If Kodi is loading a typical bluray raw file at 36 Mbit/s, then a value of 2 will need at least 72 Mbit/s of network bandwidth. However, unlike with the RAM setting, you can safely increase this value however high you want, and Kodi won't crash.[/COLOR]" % COLOR2
self.Support3=xbmcgui.ControlTextBox(leftside+int(BorderWidth*1.5), secondrow+30+BorderWidth, (W/2)-(BorderWidth*4), 150, font='font12', textColor=TxtColor)
self.addControl(self.Support3)
self.Support3.setText(msg3)
try: self.readBufferFactor=xbmcgui.ControlSlider(leftside+int(BorderWidth*1.5), secondrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus, orientation=xbmcgui.HORIZONTAL)
except: self.readBufferFactor=xbmcgui.ControlSlider(leftside+int(BorderWidth*1.5), secondrow+210,(W/2)-(BorderWidth*5),20, textureback=sliderfocus, texture=slidernibnofocus, texturefocus=slidernibfocus)
self.addControl(self.readBufferFactor)
self.readmin = 0; self.readmax = 10
self.recommendedRead = 5; self.currentRead = self.recommendedRead
readpos = wiz.percentage(self.currentRead, self.readmax)
self.readBufferFactor.setPercent(readpos)
current3 = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR1, COLOR2, self.currentRead)
recommended3 = '[COLOR %s]Recommended:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR1, COLOR2, self.recommendedRead)
self.currentRead3=xbmcgui.ControlTextBox(leftside+BorderWidth,secondrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.currentRead3)
self.currentRead3.setText(current3)
self.recommendedRead3=xbmcgui.ControlTextBox(leftside+BorderWidth+currentwidth,secondrow+235,currentwidth,20,font=Font,textColor=TxtColor)
self.addControl(self.recommendedRead3)
self.recommendedRead3.setText(recommended3)
header = '[COLOR %s]Buffer Mode[/COLOR]' % (COLOR2)
self.Header4=xbmcgui.ControlLabel(rightside+BorderWidth, secondrow+5, (W/2)-(BorderWidth*2), 20, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header4)
msg4 = "[COLOR %s]This setting will force Kodi to use a cache for all video files, including local network, internet, and even the local hard drive. Default value is 0 and will only cache videos that use internet file paths/sources.[/COLOR]" % COLOR2
self.Support4=xbmcgui.ControlTextBox(rightside+int(BorderWidth*3.5), secondrow+30+BorderWidth, (W/2)-(BorderWidth*4), 110, font='font12', textColor=TxtColor)
self.addControl(self.Support4)
self.Support4.setText(msg4)
B1 = secondrow+130+BorderWidth; B2 = B1+30; B3 = B2+30; B4 = B3+30;
self.Button0 = xbmcgui.ControlRadioButton(rightside+(BorderWidth*3), B1, (W/2)-(BorderWidth*4), 30, '0: Buffer all internet filesystems', font='font10', focusTexture=radiobgfocus, noFocusTexture=radiobgnofocus, focusOnTexture=radiofocus, noFocusOnTexture=radiofocus, focusOffTexture=radionofocus, noFocusOffTexture=radionofocus)
self.Button1 = xbmcgui.ControlRadioButton(rightside+(BorderWidth*3), B2, (W/2)-(BorderWidth*4), 30, '1: Buffer all filesystems', font='font10', focusTexture=radiobgfocus, noFocusTexture=radiobgnofocus, focusOnTexture=radiofocus, noFocusOnTexture=radiofocus, focusOffTexture=radionofocus, noFocusOffTexture=radionofocus)
self.Button2 = xbmcgui.ControlRadioButton(rightside+(BorderWidth*3), B3, (W/2)-(BorderWidth*4), 30, '2: Only buffer true internet filesystems', font='font10', focusTexture=radiobgfocus, noFocusTexture=radiobgnofocus, focusOnTexture=radiofocus, noFocusOnTexture=radiofocus, focusOffTexture=radionofocus, noFocusOffTexture=radionofocus)
self.Button3 = xbmcgui.ControlRadioButton(rightside+(BorderWidth*3), B4, (W/2)-(BorderWidth*4), 30, '3: No Buffer', font='font10', focusTexture=radiobgfocus, noFocusTexture=radiobgnofocus, focusOnTexture=radiofocus, noFocusOnTexture=radiofocus, focusOffTexture=radionofocus, noFocusOffTexture=radionofocus)
self.addControl(self.Button0)
self.addControl(self.Button1)
self.addControl(self.Button2)
self.addControl(self.Button3)
self.Button0.setSelected(False)
self.Button1.setSelected(False)
self.Button2.setSelected(True)
self.Button3.setSelected(False)
self.buttonWrite=xbmcgui.ControlButton(leftside,T+H-40-BorderWidth,(W/2)-(BorderWidth*2),35,"Write File",textColor="0xFF000000",focusedColor="0xFF000000",alignment=2,focusTexture=buttonfocus,noFocusTexture=buttonnofocus)
self.buttonCancel=xbmcgui.ControlButton(rightside+BorderWidth*2,T+H-40-BorderWidth,(W/2)-(BorderWidth*2),35,"Cancel",textColor="0xFF000000",focusedColor="0xFF000000",alignment=2,focusTexture=buttonfocus,noFocusTexture=buttonnofocus)
self.addControl(self.buttonWrite); self.addControl(self.buttonCancel)
self.buttonWrite.controlLeft(self.buttonCancel); self.buttonWrite.controlRight(self.buttonCancel); self.buttonWrite.controlUp(self.Button3); self.buttonWrite.controlDown(self.videoCacheSize)
self.buttonCancel.controlLeft(self.buttonWrite); self.buttonCancel.controlRight(self.buttonWrite); self.buttonCancel.controlUp(self.Button3); self.buttonCancel.controlDown(self.videoCacheSize)
self.videoCacheSize.controlUp(self.buttonWrite); self.videoCacheSize.controlDown(self.CURLTimeout)
self.CURLTimeout.controlUp(self.videoCacheSize); self.CURLTimeout.controlDown(self.readBufferFactor)
self.readBufferFactor.controlUp(self.CURLTimeout); self.readBufferFactor.controlDown(self.Button0)
self.Button0.controlUp(self.CURLTimeout); self.Button0.controlDown(self.Button1); self.Button0.controlLeft(self.readBufferFactor); self.Button0.controlRight(self.readBufferFactor);
self.Button1.controlUp(self.Button0); self.Button1.controlDown(self.Button2); self.Button1.controlLeft(self.readBufferFactor); self.Button1.controlRight(self.readBufferFactor);
self.Button2.controlUp(self.Button1); self.Button2.controlDown(self.Button3); self.Button2.controlLeft(self.readBufferFactor); self.Button2.controlRight(self.readBufferFactor);
self.Button3.controlUp(self.Button2); self.Button3.controlDown(self.buttonWrite); self.Button3.controlLeft(self.readBufferFactor); self.Button3.controlRight(self.readBufferFactor);
self.setFocus(self.videoCacheSize)
def doExit(self):
self.CloseWindow()
def updateCurrent(self, control):
if control == self.videoCacheSize:
self.currentVideo = (self.videomax)*self.videoCacheSize.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR2, int(self.currentVideo))
self.currentVideo1.setText(current)
elif control == self.CURLTimeout:
self.currentCurl = (self.curlmax)*self.CURLTimeout.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%ss[/COLOR]' % (COLOR1, COLOR2, int(self.currentCurl))
self.currentCurl2.setText(current)
elif control == self.readBufferFactor:
self.currentRead = (self.readmax)*self.readBufferFactor.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR1, COLOR2, int(self.currentRead))
self.currentRead3.setText(current)
elif control in [self.Button0, self.Button1, self.Button2, self.Button3]:
self.Button0.setSelected(False)
self.Button1.setSelected(False)
self.Button2.setSelected(False)
self.Button3.setSelected(False)
control.setSelected(True)
def doWrite(self):
#self.currentVideo = int((self.videomax-20)*self.videoCacheSize.getPercent()/100+20)*1024*1024
#self.currentCurl = int((self.curlmax)*self.CURLTimeout.getPercent()/100)
#self.currentRead = int((self.readmax)*self.readBufferFactor.getPercent()/100)
if self.Button0.isSelected(): buffermode = 0
elif self.Button1.isSelected(): buffermode = 1
elif self.Button2.isSelected(): buffermode = 2
elif self.Button3.isSelected(): buffermode = 3
if os.path.exists(ADVANCED):
choice = DIALOG.yesno(ADDONTITLE, "[COLOR %s]There is currently an active [COLOR %s]AdvancedSettings.xml[/COLOR], would you like to remove it and continue?[/COLOR]" % (COLOR2, COLOR1), yeslabel="[B][COLOR green]Remove Settings[/COLOR][/B]", nolabel="[B][COLOR red]Cancel Write[/COLOR][/B]")
if choice == 0: return
try: os.remove(ADVANCED)
except: f = open(ADVANCED, 'w'); f.close()
if KODIV < 17:
with open(ADVANCED, 'w+') as f:
f.write('<advancedsettings>\n')
f.write(' <network>\n')
f.write(' <buffermode>%s</buffermode>\n' % buffermode)
f.write(' <cachemembuffersize>%s</cachemembuffersize>\n' % int(self.currentVideo*1024*1024))
f.write(' <readbufferfactor>%s</readbufferfactor>\n' % self.currentRead)
f.write(' <curlclienttimeout>%s</curlclienttimeout>\n' % self.currentCurl)
f.write(' <curllowspeedtime>%s</curllowspeedtime>\n' % self.currentCurl)
f.write(' </network>\n')
f.write('</advancedsettings>\n')
f.close()
else:
with open(ADVANCED, 'w+') as f:
f.write('<advancedsettings>\n')
f.write(' <cache>\n')
f.write(' <buffermode>%s</buffermode>\n' % buffermode)
f.write(' <memorysize>%s</memorysize>\n' % int(self.currentVideo*1024*1024))
f.write(' <readfactor>%s</readfactor>\n' % self.currentRead)
f.write(' </cache>\n')
f.write(' <network>\n')
f.write(' <curlclienttimeout>%s</curlclienttimeout>\n' % self.currentCurl)
f.write(' <curllowspeedtime>%s</curllowspeedtime>\n' % self.currentCurl)
f.write(' </network>\n')
f.write('</advancedsettings>\n')
f.close()
wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, ADDONTITLE), '[COLOR %s]AdvancedSettings.xml have been written[/COLOR]' % COLOR2)
self.CloseWindow()
def onControl(self, control):
if control==self.buttonWrite: self.doWrite()
elif control==self.buttonCancel: self.doExit()
def onAction(self, action):
try: F=self.getFocus()
except: F=False
if F == self.videoCacheSize: self.updateCurrent(self.videoCacheSize)
elif F == self.CURLTimeout: self.updateCurrent(self.CURLTimeout)
elif F == self.readBufferFactor: self.updateCurrent(self.readBufferFactor)
elif F in [self.Button0, self.Button1, self.Button2, self.Button3] and action in [ACTION_MOUSE_LEFT_CLICK, ACTION_SELECT_ITEM]: self.updateCurrent(F)
elif action == ACTION_PREVIOUS_MENU: self.doExit()
elif action == ACTION_NAV_BACK: self.doExit()
def CloseWindow(self): self.close()
maxW=1280; maxH=720; W=int(900); H=int(650); L=int((maxW-W)/2); T=int((maxH-H)/2);
TempWindow=MyWindow(L=L,T=T,W=W,H=H,TxtColor=TxtColor,Font=Font,BorderWidth=BorderWidth);
TempWindow.doModal()
del TempWindow
##########################################
# `7MM"""YMM MMP""MM""YMM .g8"""bgd #
# MM `7 P' MM `7 .dP' `M #
# MM d MM dM' ` #
# MM""MM MM MM #
# MM Y MM MM. `7MMF' #
# MM MM `Mb. MM #
# .JMML. .JMML. `"bmmmdPY #
##########################################
def autoConfig2(msg='', TxtColor='0xFFFFFFFF', Font='font10', BorderWidth=10):
class MyWindow(xbmcgui.WindowDialog):
scr={};
def __init__(self,msg='',L=0,T=0,W=1280,H=720,TxtColor='0xFFFFFFFF',Font='font10',BorderWidth=10):
buttonfocus, buttonnofocus = artwork('button')
self.BG=xbmcgui.ControlImage(L+BorderWidth,T+BorderWidth,W-(BorderWidth*2),H-(BorderWidth*2), FANART, aspectRatio=0)
self.addControl(self.BG)
top = T+BorderWidth
leftside = L+BorderWidth
rightside = L+(W/2)-(BorderWidth*2)
header = '[COLOR %s]Quick Advanced Settings Configurator[/COLOR]' % (COLOR3)
self.Header=xbmcgui.ControlLabel(L, top, W, 30, header, font='font13', textColor=TxtColor, alignment=0x00000002)
self.addControl(self.Header)
top += 30+BorderWidth
#####Video Cache Size####
freeMemory = int(float(wiz.getInfo('System.Memory(free)')[:-2])*.33)
recMemory = int(float(wiz.getInfo('System.Memory(free)')[:-2])*.23)
self.videomin = 0; self.videomax = freeMemory if freeMemory < 2000 else 2000
self.recommendedVideo = recMemory if recMemory < 500 else 500; self.currentVideo = self.recommendedVideo
current1 = '[COLOR %s]Video Cache Size[/COLOR]=[COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR2, self.currentVideo)
recommended1 = '[COLOR %s]Video Cache Size:[/COLOR] [COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR5, self.recommendedVideo)
####CURL Timeout/CURL Low Speed####
self.curlmin = 0; self.curlmax = 20
self.recommendedCurl = 10; self.currentCurl = self.recommendedCurl
curlpos = wiz.percentage(self.currentCurl, self.curlmax)
recommended2 = '[COLOR %s]CURL Timeout/CURL Low Speed:[/COLOR] [COLOR %s]%ss[/COLOR]' % (COLOR1, COLOR5, self.recommendedCurl)
########Read Buffer Factor#####
self.readmin = 0; self.readmax = 10
self.recommendedRead = 5; self.currentRead = self.recommendedRead
readpos = wiz.percentage(self.currentRead, self.readmax)
recommended3 = '[COLOR %s]Read Buffer Factor:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR1, COLOR5, self.recommendedRead)
######Buffer Mode#####
recommended4 = '[COLOR %s]Buffer Mode:[/COLOR] [COLOR %s]2[/COLOR]' %(COLOR1, COLOR5)
####BOX##
msgbox='[COLOR %s]These settings will be written to the advancesettings.xml[/COLOR]\r\n\r\n%s\r\n%s\r\n%s\r\n%s' %(COLOR4, recommended4, recommended1, recommended3, recommended2)
self.box=xbmcgui.ControlTextBox(L+25,T+50,W,H, font='font14')
self.addControl(self.box)
self.box.setText(msgbox)
####Buttons###
self.buttonWrite=xbmcgui.ControlButton(leftside,T+H-40-BorderWidth,(W/2)-(BorderWidth*2),35,"Write File",textColor="0xFF000000",focusedColor="0xFF000000",alignment=2,focusTexture=buttonfocus,noFocusTexture=buttonnofocus)
self.buttonCancel=xbmcgui.ControlButton(rightside+BorderWidth*2,T+H-40-BorderWidth,(W/2)-(BorderWidth*2),35,"Cancel",textColor="0xFF000000",focusedColor="0xFF000000",alignment=2,focusTexture=buttonfocus,noFocusTexture=buttonnofocus)
self.addControl(self.buttonWrite); self.addControl(self.buttonCancel)
self.setFocus(self.buttonCancel)
self.buttonWrite.controlLeft(self.buttonCancel); self.buttonWrite.controlRight(self.buttonCancel); self.buttonCancel.controlLeft(self.buttonWrite); self.buttonCancel.controlRight(self.buttonWrite)
def doExit(self):
self.CloseWindow()
def updateCurrent(self, control):
if control == self.videoCacheSize:
self.currentVideo = (self.videomax)*self.videoCacheSize.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s MB[/COLOR]' % (COLOR1, COLOR2, int(self.currentVideo))
self.currentVideo1.setText(current)
elif control == self.CURLTimeout:
self.currentCurl = (self.curlmax)*self.CURLTimeout.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%ss[/COLOR]' % (COLOR1, COLOR2, int(self.currentCurl))
self.currentCurl2.setText(current)
elif control == self.readBufferFactor:
self.currentRead = (self.readmax)*self.readBufferFactor.getPercent()/100
current = '[COLOR %s]Current:[/COLOR] [COLOR %s]%s[/COLOR]' % (COLOR1, COLOR2, int(self.currentRead))
self.currentRead3.setText(current)
def doWrite(self):
buffermode = 2
if os.path.exists(ADVANCED):
choice = DIALOG.yesno(ADDONTITLE, "[COLOR %s]There is currently an active [COLOR %s]AdvancedSettings.xml[/COLOR], would you like to remove it and continue?[/COLOR]" % (COLOR2, COLOR1), yeslabel="[B][COLOR green]Remove Settings[/COLOR][/B]", nolabel="[B][COLOR red]Cancel Write[/COLOR][/B]")
if choice == 0: return
try: os.remove(ADVANCED)
except: f = open(ADVANCED, 'w'); f.close()
if KODIV < 17:
with open(ADVANCED, 'w+') as f:
f.write('<advancedsettings>\n')
f.write(' <network>\n')
f.write(' <buffermode>%s</buffermode>\n' % buffermode)
f.write(' <cachemembuffersize>%s</cachemembuffersize>\n' % int(self.currentVideo*1024*1024))
f.write(' <readbufferfactor>%s</readbufferfactor>\n' % self.currentRead)
f.write(' <curlclienttimeout>%s</curlclienttimeout>\n' % self.currentCurl)
f.write(' <curllowspeedtime>%s</curllowspeedtime>\n' % self.currentCurl)
f.write(' </network>\n')
f.write('</advancedsettings>\n')
f.close()
else:
with open(ADVANCED, 'w+') as f:
f.write('<advancedsettings>\n')
f.write(' <cache>\n')
f.write(' <buffermode>%s</buffermode>\n' % buffermode)
f.write(' <memorysize>%s</memorysize>\n' % int(self.currentVideo*1024*1024))
f.write(' <readfactor>%s</readfactor>\n' % self.currentRead)
f.write(' </cache>\n')
f.write(' <network>\n')
f.write(' <curlclienttimeout>%s</curlclienttimeout>\n' % self.currentCurl)
f.write(' <curllowspeedtime>%s</curllowspeedtime>\n' % self.currentCurl)
f.write(' </network>\n')
f.write('</advancedsettings>\n')
f.close()
wiz.LogNotify("[COLOR %s]%s[/COLOR]" % (COLOR1, ADDONTITLE), '[COLOR %s]AdvancedSettings.xml have been written[/COLOR]' % COLOR2)
self.CloseWindow()
def onControl(self, control):
if control==self.buttonWrite: self.doWrite()
elif control==self.buttonCancel: self.doExit()
def onAction(self, action):
try: F=self.getFocus()
except: F=False
if action == ACTION_PREVIOUS_MENU: self.doExit()
elif action == ACTION_NAV_BACK: self.doExit()
def CloseWindow(self): self.close()
maxW=1280; maxH=720; W=int(700); H=int(350); L=int((maxW-W)/2); T=int((maxH-H)/2);
TempWindow=MyWindow(L=L,T=T,W=W,H=H,TxtColor=TxtColor,Font=Font,BorderWidth=BorderWidth);
TempWindow.doModal()
del TempWindow
##########################
### Converted to XML
##########################
def contact(msg=""):
class MyWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.title = THEME3 % kwargs["title"]
self.image = kwargs["image"]
self.fanart = kwargs["fanart"]
self.msg = THEME2 % kwargs["msg"]
def onInit(self):
self.fanartimage = 101
self.titlebox = 102
self.imagecontrol = 103
self.textbox = 104
self.scrollcontrol = 105
self.showdialog()
def showdialog(self):
self.getControl(self.imagecontrol).setImage(self.image)
self.getControl(self.fanartimage).setImage(self.fanart)
self.getControl(self.fanartimage).setColorDiffuse('9FFFFFFF')
self.getControl(self.textbox).setText(self.msg)
self.getControl(self.titlebox).setLabel(self.title)
self.setFocusId(self.scrollcontrol)
def onAction(self,action):
if action == ACTION_PREVIOUS_MENU: self.close()
elif action == ACTION_NAV_BACK: self.close()
cw = MyWindow( "Contact.xml" , ADDON.getAddonInfo('path'), 'DefaultSkin', title=ADDONTITLE, fanart=CONTACTFANART, image=CONTACTICON, msg=msg)
cw.doModal()
del cw
def apkInstaller(apk):
class APKInstaller(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.shut=kwargs['close_time']
xbmc.executebuiltin("Skin.Reset(AnimeWindowXMLDialogClose)")
xbmc.executebuiltin("Skin.SetBool(AnimeWindowXMLDialogClose)")
def onClick(self,controlID): self.CloseWindow()
def onAction(self,action):
if action in [ACTION_PREVIOUS_MENU, ACTION_BACKSPACE, ACTION_NAV_BACK, ACTION_SELECT_ITEM, ACTION_MOUSE_LEFT_CLICK, ACTION_MOUSE_LONG_CLICK]: self.CloseWindow()
def CloseWindow(self):
xbmc.executebuiltin("Skin.Reset(AnimeWindowXMLDialogClose)")
xbmc.sleep(400)
self.close()
xbmc.executebuiltin('Skin.SetString(apkinstaller, Now that %s has been downloaded[CR]Click install on the next window!)' % apk)
popup = APKInstaller('APK.xml', ADDON.getAddonInfo('path'), 'DefaultSkin', close_time=34)
popup.doModal()
del popup
def speedTest(img):
class speedTest(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.imgfile = kwargs['img']
def onInit(self):
self.imagespeed = 101
self.button = 201
self.showdialog()
def showdialog(self):
self.setFocus(self.getControl(self.button))
self.getControl(self.imagespeed).setImage(self.imgfile)
def onClick(self,controlID): self.CloseWindow()
def onAction(self,action):
if action in [ACTION_PREVIOUS_MENU, ACTION_BACKSPACE, ACTION_NAV_BACK, ACTION_SELECT_ITEM, ACTION_MOUSE_LEFT_CLICK, ACTION_MOUSE_LONG_CLICK]: self.CloseWindow()
def CloseWindow(self):
self.close()
popup = speedTest('SpeedTest.xml', ADDON.getAddonInfo('path'), 'DefaultSkin', img=img)
popup.doModal()
del popup
def Preview(url):
class YTvid(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.url = kwargs['url']
def onInit(self):
self.button = 101
self.Obutton = 102
self.showdialog()
def showdialog(self):
self.setFocus(self.getControl(self.Obutton))
if wiz.getCond('System.HasAddon(plugin.video.youtube)') == 1:
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % self.url
xbmc.Player().play(url, windowed=False)
xbmc.sleep(2000)
if xbmc.Player().isPlayingVideo() == 0:
yt.PlayVideoB(self.url)
def onClick(self,controlID):
if controlId == self.Obutton:
self.close()
else: self.CloseWindow()
def onAction(self,action):
if action in [ACTION_PREVIOUS_MENU, ACTION_BACKSPACE, ACTION_NAV_BACK, ACTION_SELECT_ITEM, ACTION_MOUSE_LEFT_CLICK, ACTION_MOUSE_LONG_CLICK]: self.CloseWindow(); xbmc.Player().stop()
def CloseWindow(self):
self.close()
YTv = YTvid('Preview.xml', ADDON.getAddonInfo('path'), 'DefaultSkin', url=url)
YTv.doModal()
del YTv
def firstRunSettings():
class firstRun(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.whitelistcurrent = kwargs['current']
def onInit(self):
self.title = 101
self.okbutton = 201
self.trakt = 301
self.debrid = 302
self.login = 303
self.alluc = 314
self.profiles = 305
self.advanced = 306
self.favourites = 307
self.superfav = 308
self.repo = 309
self.whitelist = 310
self.cache = 311
self.showdialog()
self.controllist = [self.trakt, self.debrid, self.login,
self.profiles, self.advanced,
self.favourites, self.superfav, self.repo,
self.whitelist, self.cache, self.alluc]
self.controlsettings = ['keeptrakt', 'keepdebrid', 'keeplogin',
'keepprofiles', 'keepadvanced',
'keepfavourites', 'keeprepos', 'keepsuper',
'keepwhitelist', 'clearcache', 'keepalluc']
for item in self.controllist:
if wiz.getS(self.controlsettings[self.controllist.index(item)]) == 'true':
self.getControl(item).setSelected(True)
def showdialog(self):
self.getControl(self.title).setLabel(ADDONTITLE)
self.setFocus(self.getControl(self.okbutton))
def onClick(self, controlId):
if controlId == self.okbutton:
self.close()
for item in self.controllist:
at = self.controllist.index(item)
if self.getControl(item).isSelected(): wiz.setS(self.controlsettings[at], 'true')
else: wiz.setS(self.controlsettings[at], 'false')
if self.getControl(self.whitelist).isSelected() and not self.whitelistcurrent == 'true':
wiz.whiteList('edit')
fr = firstRun( "FirstRunSaveData.xml" , ADDON.getAddonInfo('path'), 'DefaultSkin', current=wiz.getS('keepwhitelist'))
fr.doModal()
del fr
def firstRun():
class MyWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.title = THEME3 % ADDONTITLE
self.msg = "Currently no build installed from %s.\n\nSelect 'Build Menu' to install a %s Build or 'Ignore' to never see this message again.\n\nThank you for choosing %s." % (ADDONTITLE,BUILDERNAME, ADDONTITLE)
self.msg = THEME2 % self.msg
def onInit(self):
self.image = 101
self.titlebox = 102
self.textbox = 103
self.buildmenu = 201
self.ignore = 202
self.showdialog()
def showdialog(self):
self.getControl(self.image).setImage(FANART)
self.getControl(self.image).setColorDiffuse('9FFFFFFF')
self.getControl(self.textbox).setText(self.msg)
self.getControl(self.titlebox).setLabel(self.title)
self.setFocusId(self.buildmenu)
def onAction(self,action):
if action == ACTION_PREVIOUS_MENU: self.doIgnore()
elif action == ACTION_NAV_BACK: self.doIgnore()
def onClick(self, controlId):
if (controlId == self.buildmenu): self.doBuildMenu()
else: self.doIgnore()
def notification(msg='', test=False):
class MyWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.test = kwargs['test']
self.message = THEME2 % kwargs['msg']
def onInit(self):
self.image = 101
self.titlebox = 102
self.titleimage = 103
self.textbox = 104
self.scroller = 105
self.dismiss = 201
self.remindme = 202
self.showdialog()
def showdialog(self):
self.testimage = os.path.join(ART, 'text.png')
self.getControl(self.image).setImage(BACKGROUND)
self.getControl(self.image).setColorDiffuse('9FFFFFFF')
self.getControl(self.textbox).setText(self.message)
self.setFocusId(self.remindme)
if HEADERTYPE == 'Text':
self.getControl(self.titlebox).setLabel(THEME3 % HEADERMESSAGE)
else:
self.getControl(self.titleimage).setImage(HEADERIMAGE)
def doRemindMeLater(self):
if not test == True:
wiz.setS("notedismiss","false")
wiz.log("[Notification] NotifyID %s Remind Me Later" % wiz.getS('noteid'), xbmc.LOGNOTICE)
self.close()
def doDismiss(self):
if not test == True:
wiz.setS("notedismiss","true")
wiz.log("[Notification] NotifyID %s Dismissed" % wiz.getS('noteid'), xbmc.LOGNOTICE)
self.close()
def onAction(self,action):
if action == ACTION_PREVIOUS_MENU: self.doRemindMeLater()
elif action == ACTION_NAV_BACK: self.doRemindMeLater()
def onClick(self, controlId):
if (controlId == self.dismiss): self.doDismiss()
else: self.doRemindMeLater()
xbmc.executebuiltin('Skin.SetString(headertexttype, %s)' % 'true' if HEADERTYPE == 'Text' else 'false')
xbmc.executebuiltin('Skin.SetString(headerimagetype, %s)' % 'true' if HEADERTYPE == 'Image' else 'false')
notify = MyWindow( "Notifications.xml" , ADDON.getAddonInfo('path'), 'DefaultSkin', msg=msg, test=test)
notify.doModal()
del notify
def updateWindow(name='Testing Window', current='1.0', new='1.1', icon=ICON, fanart=FANART):
class MyWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.name = THEME3 % kwargs['name']
self.current = kwargs['current']
self.new = kwargs['new']
self.icon = kwargs['icon']
self.fanart = kwargs['fanart']
self.msgupdate = "Update avaliable for installed build:\n[COLOR %s]%s[/COLOR]\n\nCurrent Version: v[COLOR %s]%s[/COLOR]\nLatest Version: v[COLOR %s]%s[/COLOR]\n\n[COLOR %s]*Recommened: Fresh install[/COLOR]" % (COLOR1, self.name, COLOR1, self.current, COLOR1, self.new, COLOR1)
self.msgcurrent = "Running latest version of installed build:\n[COLOR %s]%s[/COLOR]\n\nCurrent Version: v[COLOR %s]%s[/COLOR]\nLatest Version: v[COLOR %s]%s[/COLOR]\n\n[COLOR %s]*Recommended: Fresh install[/COLOR]" % (COLOR1, self.name, COLOR1, self.current, COLOR1, self.new, COLOR1)
def onInit(self):
self.imagefanart = 101
self.header = 102
self.textbox = 103
self.imageicon = 104
self.fresh = 201
self.normal = 202
self.ignore = 203
self.showdialog()
def showdialog(self):
self.getControl(self.header).setLabel(self.name)
self.getControl(self.textbox).setText(THEME2 % self.msgupdate if current < new else self.msgcurrent)
self.getControl(self.imagefanart).setImage(self.fanart)
self.getControl(self.imagefanart).setColorDiffuse('2FFFFFFF')
self.getControl(self.imageicon).setImage(self.icon)
self.setFocusId(self.fresh)
def doFreshInstall(self):
wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s] [User Selected: Fresh Install build]" % (BUILDVERSION, LATESTVERSION), xbmc.LOGNOTICE)
wiz.log("[Check Updates] [Next Check: %s]" % str(NEXTCHECK), xbmc.LOGNOTICE)
wiz.setS('lastbuildcheck', str(NEXTCHECK))
self.close()
url = 'plugin://%s/?mode=install&name=%s&url=fresh' % (ADDON_ID, urllib.quote_plus(BUILDNAME))
xbmc.executebuiltin('RunPlugin(%s)' % url)
def doNormalInstall(self):
wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s] [User Selected: Normal Install build]" % (BUILDVERSION, LATESTVERSION), xbmc.LOGNOTICE)
wiz.log("[Check Updates] [Next Check: %s]" % str(NEXTCHECK), xbmc.LOGNOTICE)
wiz.setS('lastbuildcheck', str(NEXTCHECK))
self.close()
url = 'plugin://%s/?mode=install&name=%s&url=normal' % (ADDON_ID, urllib.quote_plus(BUILDNAME))
xbmc.executebuiltin('RunPlugin(%s)' % url)
def doIgnore(self):
wiz.log("[Check Updates] [Installed Version: %s] [Current Version: %s] [User Selected: Ignore 3 Days]" % (BUILDVERSION, LATESTVERSION), xbmc.LOGNOTICE)
wiz.log("[Check Updates] [Next Check: %s]" % str(THREEDAYS), xbmc.LOGNOTICE)
wiz.setS('lastbuildcheck', str(THREEDAYS))
self.close()
def onAction(self,action):
if action == ACTION_PREVIOUS_MENU: self.doIgnore()
elif action == ACTION_NAV_BACK: self.doIgnore()
def onClick(self, controlId):
if (controlId == self.fresh): self.doFreshInstall()
elif (controlId == self.normal): self.doNormalInstall()
else: self.doIgnore()
update = MyWindow( "BuildUpdate.xml" , ADDON.getAddonInfo('path'), 'DefaultSkin', name=name, current=current, new=new, icon=icon, fanart=fanart)
update.doModal()
del update
|
[
"LeoNguena@gmail.com"
] |
LeoNguena@gmail.com
|
3b4876337161a3823dec9d8ca32ffca1ec5482ca
|
013444f1d7030f9bbe56240cb695e12b8cf71e18
|
/tests/conftest.py
|
e4e01bfc4ea668329cb14fae5617a196d32070e5
|
[
"MIT"
] |
permissive
|
caperea/django-admin-resumable-js
|
63feb8fd84d02d6d4bf9f74104c6b1dbbdbab373
|
3dbd8e46dd3558180525d861811881ad9966e2a3
|
refs/heads/master
| 2020-12-25T09:48:03.162724
| 2015-08-29T08:27:14
| 2015-08-29T08:27:14
| 41,584,547
| 0
| 0
| null | 2015-08-29T07:18:42
| 2015-08-29T07:18:41
| null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
import pytest
import os
from selenium import webdriver
browsers = {
'firefox': webdriver.Firefox,
#'chrome': webdriver.Chrome,
}
@pytest.fixture(scope='session',
params=browsers.keys())
def driver(request):
if 'DISPLAY' not in os.environ:
pytest.skip('Test requires display server (export DISPLAY)')
b = browsers[request.param]()
request.addfinalizer(lambda *args: b.quit())
return b
def pytest_configure():
import django
from django.conf import settings
settings.configure(
DEBUG=False,
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'admin_resumable',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
MEDIA_ROOT=os.path.join(os.path.dirname(__file__), 'media')
)
try:
import django
django.setup()
except AttributeError:
pass
|
[
"jonath4n@gmail.com"
] |
jonath4n@gmail.com
|
159cad8ebc54a5a356612b41ee281288113cfa71
|
6df44f3f9660cf63f64cf31af98fa96c23d55bdc
|
/venv/bin/flask
|
e4023bf9e1b6ed60f4b570ff42d0ffc45fa02c1f
|
[] |
no_license
|
ethanchewy/FriendOrNot
|
82a9c8bda2e70e8e692d5c1aae1869a9cfcb6ae3
|
3aeb8e7b2a5481cad391767daf5eb0ffd20f1b04
|
refs/heads/master
| 2021-01-11T18:45:16.496180
| 2017-02-23T20:22:35
| 2017-02-23T20:22:35
| 79,618,551
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
#!/home/ethanc/pennapps/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"17chiue@gmail.com"
] |
17chiue@gmail.com
|
|
4fe46a3e69863bca6e98c1cb6ab5c17fd36f8261
|
5c531de5e4759c904e608b4fc653b2b041f79a0e
|
/779. K-th Symbol in Grammar.py
|
06e46cf683b3f090a9b595db7b7a9fd6675029aa
|
[] |
no_license
|
jianhui-ben/leetcode_python
|
133c7e6e5c7316d00607ba2e327239e002de28b2
|
fcc16124cc24a5993e27f5d97e78d8f290e68230
|
refs/heads/master
| 2022-06-05T22:32:18.034581
| 2022-05-17T02:27:11
| 2022-05-17T02:27:11
| 250,683,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
#779. K-th Symbol in Grammar
#On the first row, we write a 0. Now in every subsequent row, we look at the previous row and replace each occurrence of 0 with 01, and each occurrence of 1 with 10.
#Given row N and index K, return the K-th indexed symbol in row N. (The values of K are 1-indexed.) (1 indexed).
#Examples:
#Input: N = 1, K = 1
#Output: 0
#Input: N = 2, K = 1
#Output: 0
#Input: N = 2, K = 2
#Output: 1
#Input: N = 4, K = 5
#Output: 1
class Solution:
def kthGrammar(self, N: int, K: int) -> int:
## recursion
if N==1: return 0
if K%2==1:
return self.kthGrammar(N-1,(K+1)//2)
else:
return 1-self.kthGrammar(N-1,(K+1)//2)
|
[
"jianhui.ben@gmail.com"
] |
jianhui.ben@gmail.com
|
2c5c74923e47610c9a47e37607a43702de94c859
|
37260ad04c3a965014cd95d6b1bc81e3c3822068
|
/SoundSpeedPrediction/load_weights.py
|
ee4657c0b8252490221406ec3727e94dcb737e58
|
[] |
no_license
|
MarineBioAcousticsRC/ShipNoise
|
db17c330a7892c0eca57cd3377a3751844f7c3a9
|
07d3ccfdeef09e4a8609b9bbbc4f4d2097eb70f5
|
refs/heads/master
| 2022-11-27T13:36:17.284376
| 2021-12-10T19:54:25
| 2021-12-10T19:54:25
| 196,640,839
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
from keras.models import load_model
import data_gen_mixed_stand as pd
import numpy as np
import ML_data_collection as dc
import model_gen as mg
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation,Flatten
from keras.layers import concatenate
from keras.utils import np_utils
import model_gen as mg
from keras import optimizers
import tensorflow as tf
from keras import backend as K
from keras.callbacks import TensorBoard,EarlyStopping
from keras import regularizers
import data_gen_mixed_stand as pd
import numpy as np
from DataGenerator import Generator
import unpickle as up
import ML_data_collection as dc
folder = "J:\Pickled_Data_2\\"
filepath = 'J:\\scripts\\ML_Attempts\\Weights\\Mixed_Data\\attempt_2015.h5'
#definition of R^2 loss function
def coeff_determination(y_true, y_pred):
pd.conv(y_true)
pd.conv(y_pred)
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
#Root Mean Squared Error
def RMSE(y_true, y_pred):
return K.sqrt(K.mean(K.square(pd.conv(y_pred) - pd.conv(y_true))))
#mean absolute error but in m/s so its more readable
def conv_mae(y_true, y_pred):
return K.mean(abs(pd.conv(y_pred) - pd.conv(y_true)))
mlp = mg.create_mlp(5, regress=False)
cnn = mg.create_cnn(508, 508, 1, filters=(16,32), regress=False)
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
comb_output = (Dense(1,activation='linear', kernel_regularizer=regularizers.l1_l2(l1 = 0.01,l2 = 0.01)))(x)
# our final model will accept categorical/numerical data on the MLP
# input and images on the CNN input, outputting a single value
model = Model(inputs=[mlp.input, cnn.input], outputs=comb_output)
adam = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='mae', optimizer=adam,metrics=[conv_mae,RMSE,coeff_determination])
model.load_weights(filepath)
x_data,true_speeds,dates = pd.eval_batch(folder,1000)
pred_speeds = model.predict(x_data)
pred_speeds_conv = pd.conv(pred_speeds)
print(pd.conv(true_speeds))
print(dates)
for i in range(len(dates)):
if dates[i] == 0:
dates.pop(i)
true_speeds.pop(i)
pred_speeds_conv.pop(i)
dc.residual_plot(2015,pd.conv(true_speeds),pred_speeds_conv,)
# dc.fitted_line(2015,pd.conv(true_speeds),pred_speeds_conv,dates)
dc.true_v_pred(2015,pd.conv(true_speeds),pred_speeds_conv)
|
[
"kfrasier@users.noreply.github.com"
] |
kfrasier@users.noreply.github.com
|
d5956bba85c35f02ea953943898e93b84a8d93c3
|
dee95af958bde1937d47646c706f815450023d3c
|
/maya-tools/shelf/scripts/reload_scripts.py
|
ac50bd9996f619e38a18925a2c91b83390122722
|
[] |
no_license
|
tws0002/byu-pipeline-tools
|
4e06556033ea3b4a0d4f2d792ae7962300b689a8
|
076b3f461a7dc239ca7919ed99dbcd212fdfe061
|
refs/heads/master
| 2020-03-16T00:24:22.763033
| 2018-05-03T01:44:24
| 2018-05-03T01:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import maya.cmds as mc
import alembic_static_exporter
import alembic_exporter
import reference_selection
import alembic_tagger
import alembic_untagger
import checkout
import new_body
import playblast
import publish
import rollback
import fk_ik_snapping
import cluster_interpolate
import reference
import reload_scripts
import playground
import education
import crowdCycle
def go():
reload(alembic_static_exporter)
reload(alembic_exporter)
reload(reference_selection)
reload(alembic_tagger)
reload(alembic_untagger)
reload(checkout)
reload(new_body)
reload(playblast)
reload(publish)
reload(rollback)
reload(fk_ik_snapping)
reload(reload_scripts)
reload(reference)
reload(cluster_interpolate)
reload(playground)
reload(education)
reload(crowdCycle)
# reload(byuam)
# reload(byugui)
|
[
"benjamin.demann@gmail.com"
] |
benjamin.demann@gmail.com
|
f92340c5080f725a48b938b510a93a68ecd76fca
|
d1b0f7ba2f23a12dbc3aac6f8e54c646d8c18b95
|
/myprofileApp/urls.py
|
8493915c8364881266798bc5ac8aa51eb7db35a7
|
[] |
no_license
|
Oliver-CodeRepo/Oliver-Mulaku-Profile
|
f81a0e790913e4499616681309d5389f5c4ba60d
|
47e500bf1ad766e7e1fd7dff58e15f0a48192702
|
refs/heads/master
| 2023-03-01T04:28:55.536207
| 2021-02-05T16:32:17
| 2021-02-05T16:32:17
| 331,657,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.homePage, name='home'),
path('my-resume/', views.downloadFile, name='resume')
]
|
[
"sinyorr.mi@gmail.com"
] |
sinyorr.mi@gmail.com
|
786ebda7175d2b93a3367c33edc52cf774294e06
|
8ac3ef229436abdb2a9ae4c428ab1f62148897a3
|
/Vijay_Sir/28-04-2021/Constructor_Override.py
|
f0c5d291738df6cb877b0b0dbf8d4bd883e2963c
|
[] |
no_license
|
udayreddy026/pdemo_Python
|
358e01cf6599e1bea3f787e6dfae2d039ee66668
|
6b7272fe205053141ed273ae70beb4358a4071f2
|
refs/heads/main
| 2023-05-01T04:33:40.558187
| 2021-05-14T07:06:20
| 2021-05-14T07:06:20
| 349,928,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
class Example:
def __init__(self):
self.a = 'Sting Instance Variable'
class Ex:
def __init__(self):
super(Ex, self).__init__()
self.b = 'Second Class Instance Variable'
class Ex1(Ex, Example):
def __init__(self):
super(Ex1, self).__init__()
self.c = 'Ex1 Class Instance variable'
self.a = 'Override Variable'
e = Ex1()
print(e.a)
print(e.b)
|
[
"udayreddy026gmail.com"
] |
udayreddy026gmail.com
|
04358d64c0ac30747ca35d7106ea8a5cbd6927be
|
be1fe1f60420c76bce5ffe71795bb5e7930d3771
|
/Algorithm_Specialization_Course3/Assignment2/clusteringBig.py
|
6615eb376b62141768d953f06d113ec12dd47f75
|
[] |
no_license
|
MagnIeeT/AlgorithmSpecializationStanford
|
b37c2a103835ef8abae1675b3c82080d7b6c31d3
|
d18167fc3a08614ec4a5b3ddb73328e39d9c40fb
|
refs/heads/master
| 2021-09-04T10:02:37.361788
| 2018-01-17T18:56:56
| 2018-01-17T18:56:56
| 114,814,492
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 05 22:48:14 2018
@author: Neeraj Agrawal
"""
from unionFind import UnionFind
def graph():
with open("clustering_big.txt", 'r') as f:
data = f.readlines()
numberNodes = int(data[0].split()[0])
vertexlist =[]
for line in data[1:]:
node = "".join([ el for el in line if el is not " "])[:-1]
vertexlist.append(node)
return numberNodes ,list(set(vertexlist))
numberNodes, vertexlist = graph()
hastable ={}
for i,ele in enumerate(vertexlist):
hastable[ele] = i
uf = UnionFind(len(vertexlist))
def generatecloseVertex(vertex):
newVertices1 = []
newVertices2 = []
for i in range(len(vertex)):
newVertex = (vertex[:i]+str(int(not(int(vertex[i])))) + vertex[i+1:])
newVertices1.append(newVertex)
for i in range(len(vertex)-1):
newVertex = (vertex[:i]+str(int(not(int(vertex[i])))) + vertex[i+1:])
for j in range(i+1, len(vertex)):
newVertex2 = (newVertex[:j]+str(int(not(int(newVertex[j])))) + newVertex[j+1:])
newVertices2.append(newVertex2)
return newVertices1 + newVertices2
def clustering(hastable,uf, vertexlist):
for node in vertexlist:
newVertices = generatecloseVertex(node)
for closenode in newVertices:
try:
uf.union(hastable[closenode],hastable[node])
except:
None
k = len(set(uf.leaders))
return k
print clustering(hastable,uf,vertexlist)
|
[
"Neeraj.Agrawal@Honeywell.com"
] |
Neeraj.Agrawal@Honeywell.com
|
95173e69d72af810e8c4a7049b2ec3ff78a41093
|
8956d5b5c519b2c30e9ba83f8456ec7448cd00a3
|
/Ch02 kNN/kNN_sklearn.py
|
5cda4eec993e7d869b298a4626987f14b6d5b043
|
[] |
no_license
|
zhsama/ml_in_action
|
1f03da846fffa9011d92881fa78fcf498e36e150
|
e0b6af6b9ad1f322c3b1161930299a3338e921f9
|
refs/heads/master
| 2020-04-09T23:16:57.240355
| 2018-12-24T13:39:31
| 2018-12-24T13:39:31
| 160,652,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/7 15:43
# @Author : zhcf1ess
# @Site :
# @File : kNN_sklearn.py
# @Software: PyCharm
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 3
# 导入一些要玩的数据
iris = datasets.load_iris()
X = iris.data[:, :2] # 我们只采用前两个feature. 我们可以使用二维数据集避免这个丑陋的切片
y = iris.target
# print 'X=', type(X), X
# print 'y=', type(y), y
# X = array([[-1.0, -1.1], [-1.0, -1.0], [0, 0], [1.0, 1.1], [2.0, 2.0], [2.0, 2.1]])
# y = array([0, 0, 0, 1, 1, 1])
# print 'X=', type(X), X
# print 'y=', type(y), y
h = .02 # 网格中的步长
# 创建彩色的图
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA'])
# cmap_bold = ListedColormap(['#FF0000', '#00FF00'])
for weights in ['uniform', 'distance']:
# 我们创建了一个knn分类器的实例,并拟合数据。
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# 绘制决策边界。为此,我们将为每个分配一个颜色
# 来绘制网格中的点 [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# 将结果放入一个彩色图中
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# 绘制训练点
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
[
"939020488@qq.com"
] |
939020488@qq.com
|
aa2a15e47cf92dff5ccf95452ce98b39fcdab1e3
|
edd0b7ecc93778832965f428cc591d8ee517be27
|
/pymoviedb/excs/__init__.py
|
34f031c1b0adb10f5328b554b135287ef181f7dd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SensiPeeps/PyMovieDB
|
035a90f9550ab80b6adccc335c350de5da707580
|
1686b267399bcfc8dfe8119fb67b90142f6a878c
|
refs/heads/master
| 2023-02-25T06:56:08.033303
| 2021-01-30T05:16:44
| 2021-01-30T05:16:44
| 293,163,066
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class PyMovieDBError(Exception):
"""
Base class for all PyMovieDB errors
"""
class TmdbApiError(PyMovieDBError):
"""
raised when API response is not `OK 200`
"""
class ZeroResultsFound(PyMovieDBError):
"""
raised when zero results are found
against search query.
"""
|
[
"starry369126@outlook.com"
] |
starry369126@outlook.com
|
781153323d66fcb4c682d3c1a289b8abfdd599a0
|
b2db5765f567177d17b27163f6f17f1c32a1f275
|
/working on text data/cleaning_regency.py
|
ecb547dc86fb1d8dbf9203acbd2386ad74341820
|
[] |
no_license
|
udaykumar156/ML_codes
|
35ca95c3ebe29477e67ad3e4ece15b09c18b6779
|
e8f7edd0a8362ec6625ae79a1c43247845b3a78b
|
refs/heads/master
| 2021-01-19T20:22:56.819949
| 2017-11-04T06:03:16
| 2017-11-04T06:03:16
| 88,501,414
| 0
| 1
| null | 2018-02-08T09:50:19
| 2017-04-17T11:21:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
engine = create_engine('#######################################')
reg = pd.read_sql_table(table_name='regency_final', con=engine)
reg.fillna(np.nan, inplace=True)
reg.replace('None', np.nan, inplace=True)
reg.replace(' ', np.nan, inplace=True)
reg.isnull().sum()
reg.drop(['other degree','special interests','website',], axis=1, inplace=True)
reg.rename(columns={'consultation hour':'work_days'}, inplace=True)
reg.rename(columns={'mmc full reg. no':'mmcreg_no'}, inplace=True)
reg.rename(columns={'medical education':'education'}, inplace=True)
reg.rename(columns={'professional membership':'memberships'}, inplace=True)
reg.rename(columns={'procedure specialties':'speciality_1'}, inplace=True)
reg.rename(columns={'designation':'speciality_2'}, inplace=True)
reg.rename(columns={'academic affiliation':'academic_affiliation'}, inplace=True)
reg.loc[reg.academic_affiliation.str.contains('|'.join(['lecturer','Professor','Faculty']),case=False,na=False), 'parttime_prof'] = 1
reg.loc[reg.speciality_2.str.contains('Resident Consultant', case=False, na=False), 'resident_cons'] = 1
reg.loc[reg.speciality_2.str.contains('Part time Consultant', case=False, na=False), 'partime_cons'] = 1
reg.loc[reg.speciality_2.str.contains('Consultant Emergency', case=False, na=False), 'emergency_cons'] = 1
reg.loc[reg.speciality_2.str.contains('Visiting Consultant', case=False, na=False), 'visiting_cons'] = 1
reg.loc[reg.work_days.str.contains('Sunday', case=False,na=False), 'weekend'] = 1
reg.loc[reg.work_days.str.contains('By appointment', case=False, na=False), 'appointment_cons'] = 1
reg.loc[reg.work_days.str.contains('24/7', na=False), 'fullday_service'] = 1
reg.credentials.replace(np.nan, 'None', inplace=True)
import re
for x in range(len(reg)):
if reg.credentials[x] is not None:
reg.credentials[x] = re.sub(r'\([^\(]*?\)', r'', reg.credentials[x])
reg.education.replace(np.nan, 'None', inplace=True)
import re
for x in range(len(reg)):
if reg.education[x] is not None:
reg.education[x] = re.sub(r'\([^\(]*?\)', r'', reg.education[x])
reg.name = reg.name.str.split('|').str[0]
reg['duplicated_name'] = reg.name.str.strip('Dr.')
reg['duplicated_name'] = reg.duplicated_name.str.replace('S/O','')
reg['duplicated_name'] = reg.duplicated_name.str.replace('@','')
reg['duplicated_name'] = reg.duplicated_name.str.replace('A/L','')
reg['duplicated_name'] = reg.duplicated_name.str.replace('A/P','')
reg['duplicated_name'] = reg.duplicated_name.str.replace('.','')
reg['duplicated_name'] = reg.duplicated_name.str.replace(' ','')
reg.duplicated_name = reg.duplicated_name.str.upper()
reg['regency_data'] = 1
reg.reset_index(drop=True, inplace=True)
reg.shape
reg.to_csv('regency_final.csv', index=False, mode='w', encoding='utf-8')
|
[
"noreply@github.com"
] |
udaykumar156.noreply@github.com
|
5adf7e2ecf8e0908041fd9b472f7833397d98c44
|
e990606b3db8429e87f9a0a5a10e3c10b17ae13b
|
/data_proc/wordpiece/mlperf_tags.py
|
88313a6ba9df4bb9205a4755d963ed60a5c9aa46
|
[] |
no_license
|
michael-wzhu/redesign_vocab_for_zh_bert
|
4c9ae36295815ea58cfb34d4f46ac2ace097861f
|
4e7e72e8f4554feebba6ae2f553a5240e5bedcd4
|
refs/heads/master
| 2022-12-16T17:41:09.704451
| 2020-09-26T12:06:35
| 2020-09-26T12:06:35
| 274,569,222
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,882
|
py
|
# coding=utf-8
# Copyright 2020 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Master list of MLPerf tags to be logged for benchmark submissions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# ==============================================================================
# == Benchmarks ================================================================
# ==============================================================================
# translation/
TRANSFORMER = "transformer"
INPUT_MAX_LENGTH = "input_max_length"
OPT_LR_WARMUP_STEPS = "opt_learning_rate_warmup_steps"
MODEL_HP_INITIALIZER_GAIN = "model_hp_initializer_gain"
MODEL_HP_VOCAB_SIZE = "model_hp_vocab_size"
MODEL_HP_NUM_HIDDEN_LAYERS = "model_hp_hidden_layers"
MODEL_HP_EMBEDDING_SHARED_WEIGHTS = "model_hp_embedding_shared_weights"
MODEL_HP_ATTENTION_DENSE = "model_hp_attention_dense"
MODEL_HP_ATTENTION_DROPOUT = "model_hp_attention_dropout"
MODEL_HP_FFN_OUTPUT_DENSE = "model_hp_ffn_output_dense"
MODEL_HP_FFN_FILTER_DENSE = "model_hp_ffn_filter_dense"
MODEL_HP_RELU_DROPOUT = "model_hp_relu_dropout"
MODEL_HP_LAYER_POSTPROCESS_DROPOUT = "model_hp_layer_postprocess_dropout"
MODEL_HP_NORM = "model_hp_norm"
MODEL_HP_SEQ_BEAM_SEARCH = "model_hp_sequence_beam_search"
# ==============================================================================
# == Tags ======================================================================
# ==============================================================================
"""
Tags may be used by all models, a subset of models, or only one model. A
specification for which models require which tags can be found below the tag
definitions.
"""
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# All models: Tags which should appear in absolutely every MLPerf model.
# //////////////////////////////////////////////////////////////////////////////
# This tag signals to start the timer. Emission of this tag need not be (and
# generally will not be) the first part of a submission script. Rather, this
# tag must be emitted prior to performing any work which the MLPerf rules
# state must be timed. This tag is generally emitted directly before the first
# step which invokes random number generation or the first step which must be
# performed on the system under test. (Whichever comes first.) If clarification
# is needed, please file an issue under:
# https://github.com/mlperf/policies
RUN_START = "run_start"
# This tag signals that a submission has reached the relevant stopping criteria,
# and has completed all tasks which are performed in the reference. The wall
# time for a submission will be computed as the difference between the time
# when this tag is emitted and the time whe the RUN_START is emitted.
RUN_STOP = "run_stop"
# This tag should be emitted immediately before ending a run, and should be the
# last tag emitted. This tag should indicate the completion of untimed post
# processing work such as system specific cleanup.
RUN_FINAL = "run_final"
# Emit this tag in the place(s) where random seeds are set.
RUN_SET_RANDOM_SEED = "run_set_random_seed"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Common Values: Constants which are expected to be reported across many models.
# These values are included for convenience.
# //////////////////////////////////////////////////////////////////////////////
BCE = "binary_cross_entropy"
CCE = "categorical_cross_entropy"
SGD = "stochastic_gradient_descent"
# Some conventions distinguish between "vanilla" SGD and SGD with momentum
# (where vanilla SGD would be the specific case of momentum=0)
SGD_WITH_MOMENTUM = "stochastic_gradient_descent_with_momentum"
ADAM = "adam"
LAZY_ADAM = "lazy_adam"
TRUNCATED_NORMAL = "truncated_normal"
RELU = "relu"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Preprocessing: Tags for generic preprocessing steps
# //////////////////////////////////////////////////////////////////////////////
# The number of training examples in a single epoch
PREPROC_NUM_TRAIN_EXAMPLES = "preproc_num_train_examples"
# The number of evaluation examples in a single epoch
PREPROC_NUM_EVAL_EXAMPLES = "preproc_num_eval_examples"
# This tag is used to declare what part of code tokenizes the training data.
PREPROC_TOKENIZE_TRAINING = "preproc_tokenize_training"
# This tag is used to declare what part of code tokenizes the evaluation data.
PREPROC_TOKENIZE_EVAL = "preproc_tokenize_eval"
# The vocabulary size used for tokenization.
PREPROC_VOCAB_SIZE = "preproc_vocab_size"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Input: Tags for the timed portion of the data input pipeline
# //////////////////////////////////////////////////////////////////////////////
# The number of examples in the training portion of the data pipeline. Generally
# this should match PREPROC_NUM_TRAIN_EXAMPLES. If it does not (for instance
# if certain examples are dropped in compliance with MLPerf rules), the
# call which declares this tag is a good place for a comment stating why the
# disparity is expected.
INPUT_SIZE = "input_size"
# The size of a training minibatch size. If this value is variable, please emit
# "-1" and then log an implementation specific characterization of the batch
# size which is a reasonable analog to the reference. (For instance log that
# all but the last batch has size 64, and the last batch is a partial batch)
INPUT_BATCH_SIZE = "input_batch_size"
# This tag indicates where the location of the code which defines the order in
# which training examples are traversed. It is not necessary to describe the
# method in the tag emission (though comments are always welcome). Rather, this
# should simply provide a good starting point to an interested party.
INPUT_ORDER = "input_order"
# --------------------------------------
# -- Data Augmentation and Alteration --
# --------------------------------------
# ResNet random cropping
INPUT_CENTRAL_CROP = "input_central_crop"
INPUT_DISTORTED_CROP_MIN_OBJ_COV = "input_distorted_crop_min_object_covered"
INPUT_DISTORTED_CROP_RATIO_RANGE = "input_distorted_crop_aspect_ratio_range"
INPUT_DISTORTED_CROP_AREA_RANGE = "input_distorted_crop_area_range"
INPUT_DISTORTED_CROP_MAX_ATTEMPTS = "input_distorted_crop_max_attempts"
INPUT_MEAN_SUBTRACTION = "input_mean_subtraction"
# Random flip of an image for data augmentation
INPUT_RANDOM_FLIP = "input_random_flip"
INPUT_RESIZE = "input_resize"
INPUT_RESIZE_ASPECT_PRESERVING = "input_resize_aspect_preserving"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Opt: Tags for declaring optimizer specific information. Submissions should
# declare and log explicit values rather than relying on defaults.
# //////////////////////////////////////////////////////////////////////////////
# The name of the optimizer used. (SGD, Adam, etc.)
OPT_NAME = "opt_name"
OPT_LR = "opt_learning_rate"
OPT_MOMENTUM = "opt_momentum"
OPT_WEIGHT_DECAY = "opt_weight_decay"
# beta1, beta2, and epsilon are optimizer hyperparameters associated with the
# Adam optimizer and its variants (e.g. LazyAdam).
OPT_HP_ADAM_BETA1 = "opt_hp_Adam_beta1"
OPT_HP_ADAM_BETA2 = "opt_hp_Adam_beta2"
OPT_HP_ADAM_EPSILON = "opt_hp_Adam_epsilon"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Train: Tags for control flow during model training.
# //////////////////////////////////////////////////////////////////////////////
# This tag is emitted when a model first enters its training loop. This is not
# necessarily when it begins to apply gradients; rather, it should be placed at
# a location which logically partitions the submission code.
TRAIN_LOOP = "train_loop"
# The current epoch as said epoch begins training.
TRAIN_EPOCH = "train_epoch"
# This tag is used to indicate approximately where checkpoints are written. Some
# frameworks abstract away checkpoint saving; in such cases simply choose a
# logical place in the code which signals that the framework has been instructed
# to save checkpoints, along with an explanatory comment.
TRAIN_CHECKPOINT = "train_checkpoint"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Eval: Tags for control flow during model evaluation.
# //////////////////////////////////////////////////////////////////////////////
# This tag should be emitted whenever the submission begins an evaluation pass
# for a given set of weights.
EVAL_START = "eval_start"
# The number of examples on which evaluation is performed.
EVAL_SIZE = "eval_size"
# The target quality at which the model may stop training.
EVAL_TARGET = "eval_target"
# The observed accuracy of the model at a given epoch.
EVAL_ACCURACY = "eval_accuracy"
# This tag should be emitted when the model has determined that it has met the
# target quality set by the reference.
EVAL_STOP = "eval_stop"
# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# Model: Tags for logging topology specific information.
# //////////////////////////////////////////////////////////////////////////////
# The loss function (cross entropy, squared error, etc.) used by the model. For
# more exotic loss functions such as those encountered in object detection
# models, additional benchmark specific subcomponents should also be logged.
MODEL_HP_LOSS_FN = "model_hp_loss_fn"
MODEL_HP_INITIAL_SHAPE = "model_hp_initial_shape"
MODEL_HP_FINAL_SHAPE = "model_hp_final_shape"
MODEL_L2_REGULARIZATION = "model_l2_regularization"
MODEL_EXCLUDE_BN_FROM_L2 = "model_exclude_bn_from_l2"
MODEL_HP_RELU = "model_hp_relu"
MODEL_HP_CONV2D_FIXED_PADDING = "model_hp_conv2d_fixed_padding"
MODEL_HP_BATCH_NORM = "model_hp_batch_norm"
MODEL_HP_DENSE = "model_hp_dense"
# ==============================================================================
# == Stdout tags ===============================================================
# ==============================================================================
# These tags are always logged to stdout. The rest will be logged to a file if
# one is available.
STDOUT_TAG_SET = {
RUN_START,
RUN_STOP,
RUN_FINAL,
TRAIN_LOOP,
TRAIN_EPOCH,
EVAL_START,
EVAL_SIZE,
EVAL_TARGET,
EVAL_ACCURACY,
EVAL_STOP,
}
# ==============================================================================
# == Benchmark tag sets ========================================================
# ==============================================================================
ALL_USED_TAGS = set()
TRANSFORMER_TAGS = (
RUN_START,
RUN_STOP,
RUN_FINAL,
RUN_SET_RANDOM_SEED,
PREPROC_NUM_TRAIN_EXAMPLES,
PREPROC_NUM_EVAL_EXAMPLES,
PREPROC_TOKENIZE_TRAINING,
PREPROC_TOKENIZE_EVAL,
PREPROC_VOCAB_SIZE,
INPUT_BATCH_SIZE,
INPUT_MAX_LENGTH,
INPUT_ORDER,
OPT_NAME,
OPT_LR,
OPT_LR_WARMUP_STEPS,
OPT_HP_ADAM_BETA1,
OPT_HP_ADAM_BETA2,
OPT_HP_ADAM_EPSILON,
TRAIN_LOOP,
TRAIN_EPOCH,
EVAL_START,
EVAL_SIZE,
EVAL_TARGET,
EVAL_ACCURACY,
EVAL_STOP,
MODEL_HP_INITIALIZER_GAIN,
MODEL_HP_VOCAB_SIZE,
MODEL_HP_NUM_HIDDEN_LAYERS,
MODEL_HP_EMBEDDING_SHARED_WEIGHTS,
MODEL_HP_ATTENTION_DENSE,
MODEL_HP_ATTENTION_DROPOUT,
MODEL_HP_FFN_OUTPUT_DENSE,
MODEL_HP_FFN_FILTER_DENSE,
MODEL_HP_RELU_DROPOUT,
MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
MODEL_HP_NORM,
MODEL_HP_SEQ_BEAM_SEARCH,
)
ALL_USED_TAGS.update(TRANSFORMER_TAGS)
|
[
"michael_wzhu91@163.com"
] |
michael_wzhu91@163.com
|
618b5d6a7e123ec702bb14cb6cac13e2bdf14346
|
1c859d75ef777168d111f6dc7e5aa51d89d3028f
|
/chat/mainmenu/views.py
|
8334f1269cd140ef11354c3ea2c71e95d91dda41
|
[] |
no_license
|
Alex-proktor/Chat
|
ae8e34c2cccc2a687742e784f79413b96bbe3c4b
|
f561cfa308fdded6cdadf77316be905bd7b94495
|
refs/heads/master
| 2021-04-27T19:41:43.692910
| 2018-03-03T15:42:42
| 2018-03-03T15:42:42
| 122,362,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from django.shortcuts import render
from django.contrib import auth
def index(request):
username = "Bob"
return render(request, 'chat.html', {'username': auth.get_user(request).username})
|
[
"a.ivanov@dunice.net"
] |
a.ivanov@dunice.net
|
1082ace705179dde53219daae2a8d6cf3f9c2bba
|
bf25e2478d11132ea4db03c4b8e12180dd72b39a
|
/reviewweb/urls.py
|
6b528d4606635fb1e55ffc279c25006000fdc78c
|
[] |
no_license
|
hassanito/reviewweb
|
0892d6d444e93e88daabaa2289b7a1c8a8e69deb
|
3233299f0570f60ef0a1d321e56d19104900ceac
|
refs/heads/master
| 2020-06-25T13:54:27.716798
| 2019-09-09T21:09:13
| 2019-09-09T21:09:13
| 199,328,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
"""reviewweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.HomePage.as_view(), name="home"),
path("test/", views.TestPage.as_view(), name="test"),
path('thanks', views.ThanksPage.as_view(), name="thanks"),
path('admin', admin.site.urls),
path('accounts/',include('accounts.urls',namespace='accounts')),
path('accounts/',include('django.contrib.auth.urls')),
path('shops/',include('shops.urls',namespace='shops')),
path('ajax_calls/search/', views.autocompleteModel),
path('login/',views.ajax_login),
path('oauth/', include('social_django.urls', namespace='social')), # <--
path('comment/',views.comment,name='comment'),
path('review/',views.review_ajax,name='review_ajax'),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"1Aa_12345"
] |
1Aa_12345
|
94171e19440d59601861aee4f580b056a82ba31e
|
104085f6878411a137521b17c06612e5f648ef33
|
/service_pro/service_pro/doctype/agent_payment_request/agent_payment_request_dashboard.py
|
bf5bad55f86a6116faab28263273e7b0828fce28
|
[
"MIT"
] |
permissive
|
ksbbalean/service-pro
|
d39f0d12977dd66627b9f7c0336c605d7be4c388
|
c89b39a8e9967dada50dc0db4b08460ed45843bf
|
refs/heads/master
| 2023-04-13T05:35:19.842021
| 2021-04-22T11:05:18
| 2021-04-22T11:05:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
from frappe import _
def get_data():
return {
'fieldname': 'agent_payment_request',
'transactions': [
{
'label': _('Linked Forms'),
'items': ["Journal Entry"]
}
]
}
|
[
"jangeles@bai.ph"
] |
jangeles@bai.ph
|
600d648aef968fa6d9aaf3ddd8d410059382df4b
|
65f856bb3c782fe2fec794192260d5b7aa997ef3
|
/wsc_django/wsc_django/apps/shop/services.py
|
0a53f3c8e183bdcaeeefad41252f7a5440069671
|
[
"MIT"
] |
permissive
|
hzh595395786/wsc_django
|
0c8faf0cac1d8db8d9e3fa22f6914b6b64bf788b
|
c0a4de1a4479fe83f36108c1fdd4d68d18348b8d
|
refs/heads/main
| 2023-06-06T07:26:17.979944
| 2021-06-24T13:14:53
| 2021-06-24T13:14:53
| 336,303,377
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,909
|
py
|
from uuid import uuid4
from django.db.models import Count
from product.constant import ProductStatus
from shop.models import Shop, HistoryRealName, ShopRejectReason, PayChannel
from shop.utils import get_shop_mini_program_qcode, put_qcode_file_to_tencent_cos
from user.models import User
from shop.constant import (
ShopStatus,
)
def create_shop(shop_info: dict, user: User):
"""
创建一个商铺
:param shop_info:{
"shop_name": "name",
"shop_img": "http://xxx",
"shop_province": 420000,
"shop_city": 420100,
"shop_county": 420101,
"shop_address": "光谷智慧谷一栋505",
"description": "xxxx",
"suggest_phone": "153xxxxxxxx",
"shop_phone": "152xxxxxxxx",
"super_admin_id": 1
}
:param user: 创建商铺的用户对象
:return:
"""
# 创建店铺
# 随机一个商铺编码, 查一下,万一重复就再来一个
while True:
shop_code = str(uuid4())[-9:]
shop = Shop.objects.filter(shop_code=shop_code)
if not shop:
break
shop_info["shop_code"] = shop_code
shop_info["shop_phone"] = user.phone
shop_info["super_admin_id"] = user.id
shop = Shop(**shop_info)
shop.save()
return shop
def create_pay_channel(pay_channel_info: dict, shop_id: int):
"""
创建一个商铺的pay_channel
:param pay_channel_info:
:param shop_id:
:return:
"""
shop_pay_channel = PayChannel(shop_id=shop_id, **pay_channel_info)
shop_pay_channel.save()
return shop_pay_channel
def create_shop_reject_reason_by_shop_id(shop_id: int, reject_reason: str):
"""
给拒绝的商铺创建一个拒绝理由
:param shop_id:
:return:
"""
reject_reason = ShopRejectReason(id=shop_id, reject_reason=reject_reason)
reject_reason.save()
return reject_reason
def create_shop_creator_history_realname(shop_id: int, history_realname: str):
"""
储存商铺创建者的历史真实姓名, 与店铺绑定
:param shop_id:
:param history_realname:
:return:
"""
history_realname = HistoryRealName(id=shop_id, realname=history_realname)
history_realname.save()
return history_realname
def create_shop_mini_program_qcode(shop_code: str):
"""
为商铺创建小程序码
:param shop_code:
:return:
"""
qcode_file = get_shop_mini_program_qcode(shop_code)
success, url = put_qcode_file_to_tencent_cos(qcode_file, shop_code)
return success, url
def update_shop_data(shop: Shop, args: dict):
"""
修改商铺信息
:param shop:
:param args:
:return:
"""
for k, v in args.items():
setattr(shop, k, v)
shop.save()
return shop
def get_shop_by_shop_code(shop_code: str, only_normal: bool = True):
"""
通过shop_code获取shop对象
:param shop_code: 商铺编码
:param only_normal: 只查询正常
:return:
"""
shop = Shop.objects.filter(shop_code=shop_code)
if shop and only_normal:
shop = shop.filter(status=ShopStatus.NORMAL)
shop = shop.first()
return shop
def get_shop_by_shop_id(shop_id: int, filter_close: bool = True):
"""
通过商铺id获取商
:param shop_id: 商铺id
:param filter_close: 不查询关闭的
:return:
"""
shop = Shop.objects.filter(id=shop_id)
if shop and filter_close:
shop = shop.exclude(status=ShopStatus.CLOSED)
shop = shop.first()
return shop
def list_shop_by_shop_ids(shop_ids: list, filter_close: bool = True, role: int = 1):
"""
通过ship_id列表查询商铺列表
:param shop_ids:
:param filter_close:过滤关闭
:param role: 访问角色,1:为普通用户,2.为admin用户,普通用户访问时只能查到已审核的店铺
:return:
"""
shop_list_query = Shop.objects.filter(id__in=shop_ids)
if shop_list_query and filter_close:
shop_list_query = shop_list_query.exclude(status=ShopStatus.CLOSED)
if role == 1:
shop_list_query = shop_list_query.filter(status=ShopStatus.NORMAL)
shop_list = shop_list_query.all()
return shop_list
def list_shop_by_shop_status(shop_status: int):
"""
查询某一状态的所有商铺
:param shop_status:
:return:
"""
shop_list = Shop.objects.filter(status=shop_status).order_by('update_at').all()
return shop_list
def list_shop_creator_history_realname(shop_ids: list):
"""
找出商铺创建的历史真实姓名列表
:param shop_ids:
:return:
"""
history_realname_list = (
HistoryRealName.objects.filter(id__in=shop_ids).all()
)
return history_realname_list
def list_shop_reject_reason(shop_ids: list):
"""查询出所有的商铺拒绝信息"""
reject_reason_list = ShopRejectReason.objects.filter(id__in=shop_ids).all()
return reject_reason_list
|
[
"595395786@qq.com"
] |
595395786@qq.com
|
6e0eaa8412f98de3cb193a1d8b771c2ac490c3db
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/SimplePose_ID1038_for_PyTorch/parallel_encoding/paralle.py
|
22a4742535a0677b59076b72a684e6b7562466f4
|
[
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 13,118
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
# #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# # Created by: Hang Zhang, Rutgers University, Email: zhang.hang@rutgers.edu
# # Modified by Thomas Wolf, HuggingFace Inc., Email: thomas@huggingface.co
# # Copyright (c) 2017-2018
##
# # This source code is licensed under the MIT-style license found in the
# # LICENSE file in the root directory of this source tree
# #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Data Parallel"""
import threading
import functools
import torch
from torch.autograd import Variable, Function
import torch.npu.comm as comm
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
torch_ver = torch.__version__[:3]
__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',
'patch_replication_callback']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class AllReduce(Function):
@staticmethod
def forward(ctx, num_inputs, *inputs):
ctx.num_inputs = num_inputs
ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
inputs = [inputs[i:i + num_inputs]
for i in range(0, len(inputs), num_inputs)]
# sort before reduce sum
inputs = sorted(inputs, key=lambda i: i[0].get_device())
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *inputs):
inputs = [i.data for i in inputs]
inputs = [inputs[i:i + ctx.num_inputs]
for i in range(0, len(inputs), ctx.num_inputs)]
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
class Reduce(Function):
@staticmethod
def forward(ctx, *inputs):
ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
inputs = sorted(inputs, key=lambda i: i.get_device())
return comm.reduce_add(inputs)
@staticmethod
def backward(ctx, gradOutput):
return Broadcast.apply(ctx.target_gpus, gradOutput)
class DistributedDataParallelModel(DistributedDataParallel):
"""Implements data parallelism at the module level for the DistributedDataParallel module.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: npu devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. 鈥淐ontext Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DistributedDataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def gather(self, outputs, output_device):
return outputs
class DataParallelModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass,
gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: npu devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. 鈥淐ontext Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def gather(self, outputs, output_device):
return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage.
The targets are splitted across the specified devices by chunking in
the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. 鈥淐ontext Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def forward(self, inputs, *targets, **kwargs):
# input should be already scattered
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs, *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
# return Reduce.apply(*outputs) / len(outputs)
# return self.gather(outputs, self.output_device).mean()
return self.gather(outputs, self.output_device)
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
if torch_ver != "0.3":
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
if torch_ver != "0.3":
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.npu.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
if not isinstance(target, (list, tuple)):
target = (target,)
output = module(*(input + target), **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device), )
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
###########################################################################
# Adapted from Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
#
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
a69fba4e5d07f0b75304b6ba75e87e6f68467fdc
|
1e449c2b408c59f7722aeeacf01ac6d904016785
|
/boardapp/models.py
|
488018ad7e9ed9e552116797d04d734d4ff54611
|
[] |
no_license
|
alittlekitten/hangoverlion
|
62930111298000ceb99aa282bbbdbc596150f5c5
|
3643adfac2fb5c420a00e9548ef5c43a629f0c78
|
refs/heads/master
| 2023-04-29T00:29:59.874724
| 2019-06-01T10:17:53
| 2019-06-01T10:17:53
| 189,718,352
| 0
| 0
| null | 2023-04-21T20:33:41
| 2019-06-01T10:13:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 600
|
py
|
from django.db import models
# Create your models here.
class Board(models.Model):
title = models.CharField(max_length=200)
name = models.CharField(max_length=50)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
class Comment(models.Model):
board = models.ForeignKey('Board',on_delete=models.CASCADE, related_name='comments')
comment_author = models.CharField(max_length = 10)
comment_contents = models.TextField(max_length=200)
created_date = models.DateTimeField(auto_now_add=True)
|
[
"alittlekitten"
] |
alittlekitten
|
a4988105b8f44db42f20393940d9d3a3ae4e6178
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/191/bmi.py
|
0ee130d805eb92fd958498062113b022207001d6
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
# data """Luke Skywalker,172,77
# C-3PO,167,75
# R2-D2,96,32
# Darth Vader,202,136
# Leia Organa,150,49
# Owen Lars,178,120
# Beru Whitesun lars,165,75
# R5-D4,97,32
# Biggs Darklighter,183,84
# Obi-Wan Kenobi,182,77
# Anakin Skywalker,188,84
# Chewbacca,228,112
# Han Solo,180,80
# Greedo,173,74
# Jek Tono Porkins,180,110
# Yoda,66,17
# Palpatine,170,75
# Boba Fett,183,78.2
# IG-88,200,140
# Bossk,190,113
# """
#
#
# ___ person_max_bmi data_?
# """Return (name, BMI float) of the character in data that
# has the highest BMI (rounded on 2 decimals)"""
# bmi # dict
# data_list ?.s.. "\n"
#
# ___ row __ ?
# current ?.s...s.. ","
# __ l.. ? > 1
# ? ? 0 f__ c.. 2 / i.. ? 1 / 100) ** 2
#
# name_max_bmi m.. b.. key b__.g..
# r.. ? r.. b.. ? 2
#
# # if __name__ == "__main__":
# # print(person_max_bmi())
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
a0fee7a87dd4a39dd09e787d7f658e7974737e16
|
f49bf19b343a6230573afdb5bdce61298baca914
|
/HT12/generate_teachers/core/tasks.py
|
59532528ee8df19922fdb68e1016d677a43f8447
|
[] |
no_license
|
DaniilOmelianenko/Python_Advanced_course_Hillel
|
bfb71fcfa0228bef74f4c0594c176223cd970f85
|
5fa7101801e7668bd57a106f40066d88d6845861
|
refs/heads/master
| 2023-05-14T00:24:29.820741
| 2021-03-31T11:22:45
| 2021-03-31T11:22:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
from datetime import date, timedelta
from core.models import Logger
from core.rates import get_kurstoday_rate, get_minfin_mejbank_rate,\
get_mono_rate, get_national_bank_rate, get_vkurse_rate
from django.core.mail import send_mail
from generate_teachers import celery_app
@celery_app.task
def send_mail_task(title, message, email):
send_mail(
subject=title,
message=message,
recipient_list=['support@support.com'],
from_email=email
)
@celery_app.task
def delete_old_logs():
old_date = date.today() - timedelta(days=7)
Logger.objects.filter(creation_date__lte=old_date).delete()
@celery_app.task
def collect_currency_rates():
get_vkurse_rate()
get_mono_rate()
get_minfin_mejbank_rate()
get_national_bank_rate()
get_kurstoday_rate()
|
[
"dr.odiram@gmail.com"
] |
dr.odiram@gmail.com
|
ba3339eeda813a3c7d315fcb1cb1c530a8080125
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/sklearn/preprocessing/_discretization.py
|
7b26ce916e1893d6d13fc02304699fa07bd412b3
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237
| 2020-09-06T03:15:14
| 2020-09-06T03:15:14
| 182,013,100
| 9
| 2
|
Apache-2.0
| 2022-12-09T21:01:00
| 2019-04-18T03:57:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 12,083
|
py
|
# -*- coding: utf-8 -*-
# Author: Henry Lin <hlin117@gmail.com>
# Tom Dupré la Tour
# License: BSD
from __future__ import division, absolute_import
import numbers
import numpy as np
import warnings
from . import OneHotEncoder
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.fixes import np_version
class KBinsDiscretizer(BaseEstimator, TransformerMixin):
"""Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
Parameters
----------
n_bins : int or array-like, shape (n_features,) (default=5)
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, (default='onehot')
Method used to encode the transformed result.
onehot
Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
onehot-dense
Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
ordinal
Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, (default='quantile')
Strategy used to define the widths of the bins.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D k-means
cluster.
Attributes
----------
n_bins_ : int array, shape (n_features,)
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
bin_edges_ : array of arrays, shape (n_features, )
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
Examples
--------
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`sklearn.feature_selection.VarianceThreshold`).
See also
--------
sklearn.preprocessing.Binarizer : class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
"""
def __init__(self, n_bins=5, encode='onehot', strategy='quantile'):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
def fit(self, X, y=None):
"""Fits the estimator.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
y : ignored
Returns
-------
self
"""
X = check_array(X, dtype='numeric')
valid_encode = ('onehot', 'onehot-dense', 'ordinal')
if self.encode not in valid_encode:
raise ValueError("Valid options for 'encode' are {}. "
"Got encode={!r} instead."
.format(valid_encode, self.encode))
valid_strategy = ('uniform', 'quantile', 'kmeans')
if self.strategy not in valid_strategy:
raise ValueError("Valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(valid_strategy, self.strategy))
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn("Feature %d is constant and will be "
"replaced with 0." % jj)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == 'uniform':
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == 'quantile':
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
if np_version < (1, 9):
quantiles = list(quantiles)
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
elif self.strategy == 'kmeans':
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(column[:, None]).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ('quantile', 'kmeans'):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn('Bins whose width are too small (i.e., <= '
'1e-8) in feature %d are removed. Consider '
'decreasing the number of bins.' % jj)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if 'onehot' in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse=self.encode == 'onehot')
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature.
"""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, (numbers.Integral, np.integer)):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(orig_bins).__name__))
if orig_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, orig_bins))
return np.full(n_features, orig_bins, dtype=np.int)
n_bins = check_array(orig_bins, dtype=np.int, copy=True,
ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array "
"of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError("{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int."
.format(KBinsDiscretizer.__name__, indices))
return n_bins
def transform(self, X):
"""Discretizes the data.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : numeric array-like or sparse matrix
Data in the binned space.
"""
check_is_fitted(self, ["bin_edges_"])
Xt = check_array(X, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xt.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xt.shape[1]))
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation. See documentation of
# numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.e-5
atol = 1.e-8
eps = atol + rtol * np.abs(Xt[:, jj])
Xt[:, jj] = np.digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)
if self.encode == 'ordinal':
return Xt
return self._encoder.transform(Xt)
def inverse_transform(self, Xt):
"""Transforms discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : numeric array-like, shape (n_sample, n_features)
Transformed data in the binned space.
Returns
-------
Xinv : numeric array-like
Data in the original feature space.
"""
check_is_fitted(self, ["bin_edges_"])
if 'onehot' in self.encode:
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xinv.shape[1]))
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]
return Xinv
|
[
"leibingye@outlook.com"
] |
leibingye@outlook.com
|
a776987187e4d6bc91acfa696ed24a492103e6e9
|
4231a5ad312a3a03434a49b08ac6ed0bbce7edb7
|
/models/nets/resnet_v2.py
|
7a2ad32a045416fd9fadf2c06e5c52fb4fb726fd
|
[] |
no_license
|
fxle/DFL-CNN-tensorflow
|
db3c97f14b89d60f19e7350735028ecb4ec2019a
|
9c5e3574940ccfc5ada1f09b5afc8bbb03857b43
|
refs/heads/master
| 2020-04-07T04:40:05.303507
| 2018-11-16T08:35:42
| 2018-11-16T08:35:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,511
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, biases_initializer=None,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_50.default_image_size = resnet_v2.default_image_size
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_101.default_image_size = resnet_v2.default_image_size
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_152.default_image_size = resnet_v2.default_image_size
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v2_200.default_image_size = resnet_v2.default_image_size
|
[
"noreply@github.com"
] |
fxle.noreply@github.com
|
bcc75b28b810ed3342694c31eda9379e6b0b1569
|
0af56ece1f50f93cd4e4841ba101600f958fe94c
|
/camera.py
|
0887896256ffbf12bfd73d540af4f556ee8777e2
|
[] |
no_license
|
strikerdlm/HAB-pi-cam
|
df21f8f483b0c8b5e0ca9ffb53cdec7af9f5ca91
|
b1eb0f4c87501adcae7cb5ec28f8ad96ddaa0e4d
|
refs/heads/master
| 2021-09-07T07:21:05.872448
| 2018-02-19T14:17:41
| 2018-02-19T14:17:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,917
|
py
|
#!/usr/bin/python2
# Import required modules
import picamera
import time
import signal
import sys
import os
# The following code will write the Process ID of this script to a hidden file
pid = os.getpid()
PIDfilename = ".PID"
PIDfile = open(PIDfilename, "wt")
PIDfile.write(str(pid))
PIDfile.close()
# Variables
numpics = 5 # number of still pictures taken
numburst = 5 # number of burst pictures taken
rectime = 300 # length of time to record in each loop between pictures (in seconds)
# Functions
# This function will take a number of still pictures, as defined by the input parameter
def capture(numPics):
for i in range(0,numPics):
picname = str(time.strftime('%I%M%S%p_%d-%m-%y'))
camera.capture('Pictures/' + picname + '.jpg')
time.sleep(1)
# This function will take a burst of pictures
def burst(numBurst):
camera.capture_sequence([ 'Pictures/' + str(time.strftime('%I%M%S%p_%d-%m-%y')) + '_burst' + str(i+1) + '.jpg' for i in range(numBurst) ])
def record(recTime):
vidname = str(time.strftime('%I%M%S%p_%d-%m-%y'))
camera.start_recording('Videos/' + vidname + '.h264')
time.sleep(recTime)
camera.stop_recording()
time.sleep(1)
# The following function handles the case when a kill signal is sent to the process
def signal_term_handler(signal, frame):
camera.close()
os.remove(PIDfilename) #removes the hidden temp PID file
sys.exit()
signal.signal(signal.SIGTERM, signal_term_handler)
try:
with picamera.PiCamera() as camera:
while True:
camera.start_preview(alpha=0) #starting the preview "warms up" the camera, and is recommended in the PiCamera documentation
time.sleep(2)
capture(numpics)
burst(numburst)
record(rectime)
camera.stop_preview()
time.sleep(2)
# Handles the case when user exits the running script using Control+C
except KeyboardInterrupt:
camera.close()
os.remove(PIDfilename) #removes the hidden temp PID file
sys.exit()
|
[
"robo_man11@hotmail.com"
] |
robo_man11@hotmail.com
|
bad6ea385845a9eb0bc05e0022b7b16e5480692f
|
9353358a88ca52b4fa5c62ed2f2d2f8feeebc194
|
/core/utils.py
|
31dbf88f744027f0b5771cb6555900de6edaa016
|
[] |
no_license
|
Ydrasil/neomad.org
|
d2b47f7ed983c5b4eea656d4f2c8d22a281e4af4
|
70f7b4c704fcb58c625a758ab8d5b55878ae130f
|
refs/heads/master
| 2021-01-19T07:17:46.160125
| 2017-07-18T15:52:26
| 2017-07-31T13:33:20
| 87,534,270
| 0
| 0
| null | 2017-07-26T16:38:54
| 2017-04-07T10:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 767
|
py
|
import base64
import re
from io import BytesIO
from feincms_cleanse import Cleanse
from PIL import Image
def is_base64(data):
return len(re.findall('^data:\w+/(\w+);base64,', data))
def save_base64_image(data, output, size=None):
meta, data = data.split(',')
try:
format_ = re.findall('data:\w+/(\w+);base64', meta)[0]
except KeyError:
format_ = 'jpeg'
image_data = BytesIO(base64.b64decode(data))
image = Image.open(image_data)
if size:
image.thumbnail(size)
return image.save(output, format=format_)
def clean_html(html, allowed_tags=None):
cleaner = Cleanse()
if allowed_tags:
cleaner.allowed_tags = allowed_tags
cleaner.empty_tags = ('br', 'img',)
return cleaner.cleanse(html)
|
[
"vincent.agnano@scopyleft.fr"
] |
vincent.agnano@scopyleft.fr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.