hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f225bf12838e6351622f05bd70af7bf51152e126 | 148 | py | Python | djusagi/groups/forms.py | carthage-college/django-djusagi | 8fb2fe0b9100ac4bd6ebd0ea7cf44d732f7fcc5a | [
"BSD-3-Clause"
] | null | null | null | djusagi/groups/forms.py | carthage-college/django-djusagi | 8fb2fe0b9100ac4bd6ebd0ea7cf44d732f7fcc5a | [
"BSD-3-Clause"
] | 2 | 2020-03-06T14:09:23.000Z | 2021-11-30T21:33:26.000Z | djusagi/groups/forms.py | carthage-college/django-djusagi | 8fb2fe0b9100ac4bd6ebd0ea7cf44d732f7fcc5a | [
"BSD-3-Clause"
] | null | null | null | from django import forms
| 16.444444 | 30 | 0.587838 | from django import forms
class SearchForm(forms.Form):
email = forms.EmailField(
label="Email",
required = True
)
| 0 | 95 | 25 |
fe9d384ff8c4fedbe2ffc2321bdfe27173895e5f | 783 | py | Python | tests/settings.py | anthonyalmarza/django-timeseries | e7ce5ef15daad18d11e4c0e49603df7d0fff2dc5 | [
"MIT"
] | 15 | 2017-11-04T19:13:11.000Z | 2021-06-08T04:42:31.000Z | tests/settings.py | anthonyalmarza/django-timeseries | e7ce5ef15daad18d11e4c0e49603df7d0fff2dc5 | [
"MIT"
] | 1 | 2019-12-10T23:25:12.000Z | 2019-12-10T23:25:12.000Z | tests/settings.py | anthonyalmarza/timeseries | e7ce5ef15daad18d11e4c0e49603df7d0fff2dc5 | [
"MIT"
] | 2 | 2017-04-24T11:59:04.000Z | 2017-06-07T13:37:09.000Z | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'tdzf@9g8lofi@lo$=126jrka1ydzjix^!8j)vg$6cd+kz^ei5h'
INSTALLED_APPS = [
'django.contrib.contenttypes',
'tests'
]
test_db = os.environ.get('TEST_DB_CONFIG', 'postgres')
db_user = os.environ.get('TEST_DB_USER', os.environ.get('USER', ''))
db_name = 'timerseries_tests' + os.environ.get('TEST_DB_NAME', '')
DB_CONFIGS = {
# N.B. sqlite doesn't support DISTINCT ON for some reason... ???
'postgres': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': db_name,
'USER': db_user,
'PASSWORD': ''
}
}
DATABASES = {
'default': DB_CONFIGS.get(test_db)
}
| 25.258065 | 71 | 0.65645 | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'tdzf@9g8lofi@lo$=126jrka1ydzjix^!8j)vg$6cd+kz^ei5h'
INSTALLED_APPS = [
'django.contrib.contenttypes',
'tests'
]
test_db = os.environ.get('TEST_DB_CONFIG', 'postgres')
db_user = os.environ.get('TEST_DB_USER', os.environ.get('USER', ''))
db_name = 'timerseries_tests' + os.environ.get('TEST_DB_NAME', '')
DB_CONFIGS = {
# N.B. sqlite doesn't support DISTINCT ON for some reason... ???
'postgres': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': db_name,
'USER': db_user,
'PASSWORD': ''
}
}
DATABASES = {
'default': DB_CONFIGS.get(test_db)
}
| 0 | 0 | 0 |
ba9c44aaf47f5818535f2580276719050a477e45 | 1,284 | py | Python | pygcn/models.py | han-2059/GCN | f7a701e78d761531d3dd26b9485ec19977161711 | [
"MIT"
] | 2 | 2021-07-26T08:06:45.000Z | 2022-01-20T01:18:56.000Z | pygcn/models.py | han-2059/GCN | f7a701e78d761531d3dd26b9485ec19977161711 | [
"MIT"
] | null | null | null | pygcn/models.py | han-2059/GCN | f7a701e78d761531d3dd26b9485ec19977161711 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
from .layers import GraphConvolution
| 34.702703 | 107 | 0.661994 | import torch.nn as nn
import torch.nn.functional as F
from .layers import GraphConvolution
class GCN(nn.Module):
# 底层节点的参数,feature的个数;隐层节点个数;最终的分类数
def __init__(self, nfeat, nhid, nclass, dropout):
'''
:param nfeat: 底层节点的参数,feature的个数
:param nhid: 隐层节点个数
:param nclass: 最终的分类数
:param dropout: dropout参数
'''
# super()._init_()在利用父类里的对象构造函数
super(GCN, self).__init__()
# self.gc1代表GraphConvolution(),GraphConvolution()这个方法类在layer里面,gc1输入尺寸nfeat,输出尺寸nhid
self.gc1 = GraphConvolution(nfeat, nhid)
# self.gc2代表GraphConvolution(),gc2输入尺寸nhid,输出尺寸ncalss
self.gc2 = GraphConvolution(nhid, nclass)
# dropout参数
self.dropout = dropout
# 前向传播,按照个人理解,F.log_softmax(x, dim=1)中的参数x就是每一个节点的embedding
def forward(self, x, adj):
# 括号里面x是输入特征,adj是邻接矩阵。self.gc1(x, adj)执行GraphConvolution中forward函数
x = F.relu(self.gc1(x, adj))
# 输入x,dropout参数是self.dropout。training=self.training表示将模型整体的training状态参数传入dropout函数,没有此参数无法进行dropout
x = F.dropout(x, self.dropout, training=self.training)
# gc2层
x = self.gc2(x, adj)
# 输出为输出层做log_softmax变换的结果,dim表示log_softmax将计算的维度
return F.log_softmax(x, dim=1)
| 583 | 982 | 23 |
85055fd4450669db56357200de53a076f91cd15a | 1,287 | py | Python | Talk/catalog/models.py | alex545228/def_hack_brain | 9d3e9f3918619e32ec187afdb48a17312aa6308c | [
"Apache-2.0"
] | null | null | null | Talk/catalog/models.py | alex545228/def_hack_brain | 9d3e9f3918619e32ec187afdb48a17312aa6308c | [
"Apache-2.0"
] | null | null | null | Talk/catalog/models.py | alex545228/def_hack_brain | 9d3e9f3918619e32ec187afdb48a17312aa6308c | [
"Apache-2.0"
] | 2 | 2021-11-03T21:16:30.000Z | 2021-11-05T08:54:13.000Z | from django.db import models
# class Person(models.Model):
# name = models.CharField(max_length=50)
# age = models.IntegerField()
# city = models.CharField(max_length=50)
# rating = models.IntegerField()
#
#
# class PersonalData(models.Model):
# name = models.CharField(max_length=50)
# mail = models.EmailField(max_length=254)
# telephone = models.CharField(max_length=20)
# social_id_1 = models.CharField(max_length=50)
# social_id_2 = models.CharField(max_length=50)
# social_id_3 = models.CharField(max_length=50)
# social_id_4 = models.CharField(max_length=50) | 33 | 51 | 0.724165 | from django.db import models
class Category(models.Model):
title = models.CharField(max_length=20)
class Group(models.Model):
category = models.CharField(max_length=20)
vk_id = models.IntegerField(null=True)
# class Person(models.Model):
# name = models.CharField(max_length=50)
# age = models.IntegerField()
# city = models.CharField(max_length=50)
# rating = models.IntegerField()
#
#
# class PersonalData(models.Model):
# name = models.CharField(max_length=50)
# mail = models.EmailField(max_length=254)
# telephone = models.CharField(max_length=20)
# social_id_1 = models.CharField(max_length=50)
# social_id_2 = models.CharField(max_length=50)
# social_id_3 = models.CharField(max_length=50)
# social_id_4 = models.CharField(max_length=50)
class PersonalData(models.Model):
name = models.CharField(max_length=50)
age = models.IntegerField()
city = models.CharField(max_length=50)
rating = models.IntegerField()
mail = models.EmailField(max_length=254)
telephone = models.CharField(max_length=20)
social_id_1 = models.CharField(max_length=50)
social_id_2 = models.CharField(max_length=50)
social_id_3 = models.CharField(max_length=50)
social_id_4 = models.CharField(max_length=50) | 0 | 605 | 69 |
0e8200286fd6502b0076e32d38a0d61232742e74 | 756 | py | Python | src/common/processor_worker.py | amarbasic/python-processor-setup | 25481ee79cbcf7af20544341fb0a3aa8470457ba | [
"MIT"
] | null | null | null | src/common/processor_worker.py | amarbasic/python-processor-setup | 25481ee79cbcf7af20544341fb0a3aa8470457ba | [
"MIT"
] | null | null | null | src/common/processor_worker.py | amarbasic/python-processor-setup | 25481ee79cbcf7af20544341fb0a3aa8470457ba | [
"MIT"
] | null | null | null | import logging
import traceback
import sys
from .processor_work_manager import ProcessorWorkManager
class ProcessorWorker:
"""Process worker class"""
def run(self):
"""Run worker"""
while self._running:
try:
self.process()
except Exception as ex:
logging.error(traceback.format_exception(*sys.exc_info()))
continue
def process(self):
"""Call process on queue"""
self.processor_work_manager.run()
def join(self):
"""Join processes"""
self._running = False | 26.068966 | 74 | 0.628307 | import logging
import traceback
import sys
from .processor_work_manager import ProcessorWorkManager
class ProcessorWorker:
"""Process worker class"""
def __init__(self, processor_work_manager: ProcessorWorkManager):
self._running = True
self.processor_work_manager = processor_work_manager
def run(self):
"""Run worker"""
while self._running:
try:
self.process()
except Exception as ex:
logging.error(traceback.format_exception(*sys.exc_info()))
continue
def process(self):
"""Call process on queue"""
self.processor_work_manager.run()
def join(self):
"""Join processes"""
self._running = False | 134 | 0 | 27 |
9caef61a006ff74f4975d7994384f2da8800709a | 520 | py | Python | src/Quiet.X.Tests/debug.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | src/Quiet.X.Tests/debug.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | src/Quiet.X.Tests/debug.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
] | null | null | null | from email.charset import QP
from serial import Serial
from serial.tools.list_ports import comports
qPort = None
for p in list(comports()):
if p.product and 'FT232R' in p.product:
qPort = p.device
break
if not qPort:
raise Exception('No Quiet Board found')
quite = Serial(qPort, 57600, timeout=1)
loops = 0
while True:
loops += 1
quite.write('DIGI?\r\n'.encode())
read = quite.read_until()
if len(read) <= 0:
raise Exception(f'Failed after {loops} rounds')
| 20 | 55 | 0.648077 | from email.charset import QP
from serial import Serial
from serial.tools.list_ports import comports
qPort = None
for p in list(comports()):
if p.product and 'FT232R' in p.product:
qPort = p.device
break
if not qPort:
raise Exception('No Quiet Board found')
quite = Serial(qPort, 57600, timeout=1)
loops = 0
while True:
loops += 1
quite.write('DIGI?\r\n'.encode())
read = quite.read_until()
if len(read) <= 0:
raise Exception(f'Failed after {loops} rounds')
| 0 | 0 | 0 |
29850a776755e89c74dc40bad748f55147252d21 | 1,228 | py | Python | setup.py | morepath/more.body_model | 6f581224adfefb42a9e140350937aa8b4b1de921 | [
"BSD-3-Clause"
] | null | null | null | setup.py | morepath/more.body_model | 6f581224adfefb42a9e140350937aa8b4b1de921 | [
"BSD-3-Clause"
] | 4 | 2017-02-22T15:26:06.000Z | 2021-01-20T14:35:30.000Z | setup.py | morepath/more.body_model | 6f581224adfefb42a9e140350937aa8b4b1de921 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
long_description = (
open("README.rst", encoding="utf-8").read()
+ "\n\n"
+ open("CHANGES.rst", encoding="utf-8").read()
)
setup(
name="more.body_model",
version="0.1dev0",
description="load_json infrastructure for Morepath",
long_description=long_description,
author="Henri Hulski",
author_email="henri.hulski@gazeta.pl",
keywords="morepath validation",
license="BSD",
url="https://github.com/morepath/more.body_model",
namespace_packages=["more"],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=["morepath >= 0.17"],
extras_require=dict(
test=["pytest >= 2.9.1", "pytest-remove-stale-bytecode", "webtest"],
coverage=[
"pytest-cov",
],
pep8=[
"flake8",
"pep8-naming",
],
),
)
| 28.55814 | 76 | 0.595277 | from setuptools import setup, find_packages
long_description = (
open("README.rst", encoding="utf-8").read()
+ "\n\n"
+ open("CHANGES.rst", encoding="utf-8").read()
)
setup(
name="more.body_model",
version="0.1dev0",
description="load_json infrastructure for Morepath",
long_description=long_description,
author="Henri Hulski",
author_email="henri.hulski@gazeta.pl",
keywords="morepath validation",
license="BSD",
url="https://github.com/morepath/more.body_model",
namespace_packages=["more"],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=["morepath >= 0.17"],
extras_require=dict(
test=["pytest >= 2.9.1", "pytest-remove-stale-bytecode", "webtest"],
coverage=[
"pytest-cov",
],
pep8=[
"flake8",
"pep8-naming",
],
),
)
| 0 | 0 | 0 |
2a5d9abc674e05d7760ec7aa0780fcfea6fd76c5 | 2,972 | py | Python | back/api/messages.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | 4 | 2018-07-29T10:48:53.000Z | 2018-08-23T13:02:15.000Z | back/api/messages.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | 7 | 2018-11-15T15:17:45.000Z | 2021-05-11T19:58:55.000Z | back/api/messages.py | LyonParapente/EventOrganizer | b263c2ce61b6ad1d6c414eb388ca5ee9492a9b73 | [
"MIT"
] | null | null | null | from flask import abort
from flask_restful_swagger_3 import Resource, swagger
from flask_jwt_extended import jwt_required
from models.message import Messages, MessagesComment, MessagesUser
from models.user import silence_user_fields
from database.manager import db
| 26.774775 | 78 | 0.596904 | from flask import abort
from flask_restful_swagger_3 import Resource, swagger
from flask_jwt_extended import jwt_required
from models.message import Messages, MessagesComment, MessagesUser
from models.user import silence_user_fields
from database.manager import db
def create_basic_user_infos(props):
user_infos = {
'firstname': props['firstname'],
'lastname': props['lastname'],
'phone': props.get('phone', '') or '',
'email': props.get('email', '') or '',
}
if bool(props.get('has_whatsapp', 0)) == True and props['phone']:
user_infos['has_whatsapp'] = True
return user_infos
class MessagesAPI(Resource):
@jwt_required()
@swagger.doc({
'tags': ['messages'],
'security': [
{'BearerAuth': []}
],
'parameters': [
{
'name': 'event_id',
'description': 'Event identifier',
'in': 'query',
'required': True,
'schema': {
'type': 'integer'
}
}
],
'responses': {
'200': {
'description': 'List of messages',
'content': {
'application/json': {
'schema': Messages
}
}
},
'401': {
'description': 'Not authenticated'
},
'404': {
'description': 'Event not found'
}
}
})
def get(self, _parser):
"""Download the list of messages for an event"""
query = _parser.parse_args(strict=True)
messages, registrations, creator = db.get_messages_list(query["event_id"])
if creator is None:
abort(404, 'Event not found')
comments = []
users = {}
participants = []
interested = []
for registration in registrations:
silence_user_fields(registration)
user = MessagesUser(**create_basic_user_infos(registration))
# Add user to list
user_id = registration['user_id']
users[str(user_id)] = user
if registration['interest'] == 1:
interested.append(user_id)
elif registration['interest'] == 2:
participants.append(user_id)
for message in messages:
silence_user_fields(message)
user = MessagesUser(**create_basic_user_infos(message))
# Add user to dict (or overwrite)
users[str(message['author_id'])] = user
comments.append(MessagesComment(**{
'date': message['creation_datetime'],
'user': message['author_id'],
'comment': message['comment']
}))
# Add creator to dict (or overwrite)
silence_user_fields(creator)
user = MessagesUser(**create_basic_user_infos(creator))
users[str(creator['id'])] = user
# Remove empty phone or email
for user_id in users:
user = users[user_id]
if user['phone'] == '':
del user['phone']
if user['email'] == '':
del user['email']
result = {
'users': users,
'comments': comments,
'participants': participants,
'interested': interested
}
return Messages(**result)
| 319 | 2,342 | 46 |
0863b84ee32b6351f62db01edd9b09b8388608e0 | 739 | py | Python | scripts/quest/q6508e.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/quest/q6508e.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/quest/q6508e.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | # Teary Expression (6508)
from net.swordie.ms.enums import Stat
camilla = 1012108
rubble = 4000022
ouch = 5160024
# Grab current fame for quest-induced defame later
fame = chr.getStat(Stat.pop)
sm.setSpeakerID(camilla)
sm.sendNext(''.join(["Hello. Is there something I can... Eeek! "
"Is that #t", repr(rubble), "#? Is there a Golem nearby?! I'm scared! WAAAAAAH! \r\n\r\n"
"#fUI/UIWindow2.img/QuestIcon/4/0# \r\n"
"#i", repr(ouch), "# #t", repr(ouch), "# x 1 \r\n"
"#fUI/UIWindow2.img/QuestIcon/6/0# -1"]))
sm.giveItem(ouch)
chr.setStatAndSendPacket(Stat.pop, fame-1)
sm.completeQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendNext("#b(You learned the Teary Expression from Camilla. "
"However, your Fame went down as a result...)") | 28.423077 | 89 | 0.705007 | # Teary Expression (6508)
from net.swordie.ms.enums import Stat
camilla = 1012108
rubble = 4000022
ouch = 5160024
# Grab current fame for quest-induced defame later
fame = chr.getStat(Stat.pop)
sm.setSpeakerID(camilla)
sm.sendNext(''.join(["Hello. Is there something I can... Eeek! "
"Is that #t", repr(rubble), "#? Is there a Golem nearby?! I'm scared! WAAAAAAH! \r\n\r\n"
"#fUI/UIWindow2.img/QuestIcon/4/0# \r\n"
"#i", repr(ouch), "# #t", repr(ouch), "# x 1 \r\n"
"#fUI/UIWindow2.img/QuestIcon/6/0# -1"]))
sm.giveItem(ouch)
chr.setStatAndSendPacket(Stat.pop, fame-1)
sm.completeQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendNext("#b(You learned the Teary Expression from Camilla. "
"However, your Fame went down as a result...)") | 0 | 0 | 0 |
0310c49ff233035805061ff9975d3bbd318b49cb | 862 | py | Python | time_app/admin.py | MthwBrwn/Varius_project | 478d423f49665becae41eda60597899d4940050c | [
"MIT"
] | null | null | null | time_app/admin.py | MthwBrwn/Varius_project | 478d423f49665becae41eda60597899d4940050c | [
"MIT"
] | 7 | 2020-02-11T01:59:04.000Z | 2021-06-10T18:04:40.000Z | time_app/admin.py | MthwBrwn/Varius_project | 478d423f49665becae41eda60597899d4940050c | [
"MIT"
] | null | null | null | from import_export import resources
from import_export.admin import ImportExportModelAdmin
# from import_export.admin import ImportExportActionModelAdmin
from django.contrib import admin
from .models import TimePost, Client, Project
# admin.site.register(TimePost)
@admin.register(TimePost)
# class TimePostAdmin(ImportExportActionModelAdmin):
admin.site.register(Client)
admin.site.register(Project)
| 26.9375 | 62 | 0.704176 | from import_export import resources
from import_export.admin import ImportExportModelAdmin
# from import_export.admin import ImportExportActionModelAdmin
from django.contrib import admin
from .models import TimePost, Client, Project
class TimePostResource(resources.ModelResource):
class Meta:
model = TimePost
fields = (
'user__username',
'date', 'time_spent', 'notes',
'expenses', 'expense_notes', 'miles',
'miles_notes','client__name', 'project__name',
)
# export_order = ('id', 'price', 'author', 'name')
# admin.site.register(TimePost)
@admin.register(TimePost)
# class TimePostAdmin(ImportExportActionModelAdmin):
class TimePostAdmin(ImportExportModelAdmin):
resource_class = TimePostResource
admin.site.register(Client)
admin.site.register(Project)
| 0 | 406 | 45 |
8c154bf2c335e8afd259563bae152d057158498d | 4,174 | py | Python | cosmosis-standard-library/intrinsic_alignments/flexible_grid/bias_grid.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-09-15T10:10:26.000Z | 2021-09-15T10:10:26.000Z | cosmosis-standard-library/intrinsic_alignments/flexible_grid/bias_grid.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | null | null | null | cosmosis-standard-library/intrinsic_alignments/flexible_grid/bias_grid.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-06-11T15:29:43.000Z | 2021-06-11T15:29:43.000Z | from __future__ import print_function
from builtins import range
from builtins import object
import scipy.interpolate as interp
import numpy as np
import pdb
"""
This module calculates the galaxy and intrinsic alignment bias
using the flexible grid parameterisation of Joachimi and Bridle
(2010) p 6-9.
Outputs both stochastic and systematic terms rI, bI, rg and bg.
"""
| 42.161616 | 103 | 0.572353 | from __future__ import print_function
from builtins import range
from builtins import object
import scipy.interpolate as interp
import numpy as np
import pdb
"""
This module calculates the galaxy and intrinsic alignment bias
using the flexible grid parameterisation of Joachimi and Bridle
(2010) p 6-9.
Outputs both stochastic and systematic terms rI, bI, rg and bg.
"""
class flexible_grid(object):
def __init__(self, config):
self.nz = config['nznodes']
self.nk = config['nknodes']
self.galaxy_bias = config['galaxy_bias']
self.intrinsic_alignments = config['intrinsic_alignments']
interface = {True: 'yes', False: 'no'}
print("intrinsic alignments: %s" % interface[self.intrinsic_alignments])
print("galaxy bias: %s" % interface[self.galaxy_bias])
print("initialised %d x %d (nz x nk) bias grid." % (self.nz, self.nk))
def setup_grid_nodes(self, block):
BI = np.zeros((self.nz, self.nk))
Bg = np.zeros((self.nz, self.nk))
for i in range(self.nz):
for j in range(self.nk):
if self.intrinsic_alignments:
BI[i, j] = block['intrinsic_alignment_parameters',
'node_%d_%d' % (i + 1, j + 1)]
if self.galaxy_bias:
Bg[i, j] = block['bias_parameters',
'node_%d_%d' % (i + 1, j + 1)]
#import pdb ; pdb.set_trace()
# Fix the edge nodes to zero
if self.intrinsic_alignments:
np.lib.pad(BI, 1, fixed_edge)
self.BI = BI
if self.galaxy_bias:
np.lib.pad(Bg, 1, fixed_edge)
self.Bg = Bg
# Load the power spectra required and one free amplitude parameter
if self.intrinsic_alignments:
self.AI = block.get_double('intrinsic_alignment_parameters', 'A')
self.z, self.k, self.b_I_fid = block.get_grid(
'intrinsic_alignment_parameters', 'z', 'k_h', 'b_I')
self.z, self.k, self.r_I_fid = block.get_grid(
'intrinsic_alignment_parameters', 'z', 'k_h', 'r_I')
if self.galaxy_bias:
self.Ag = block.get_double_grid('bias_parameters', 'A')
self.z, self.k, self.b_g_fid = block.get_double_grid(
'bias_parameters', 'z', 'k_h', 'b_g')
self.z, self.k, self.r_g_fid = block.get_double_grid(
'bias_parameters', 'z', 'k_h', 'r_g')
self.K = np.logspace(np.log10(self.k.min()),
np.log10(self.k.max()), self.nk)
self.Z = np.linspace(self.z.min(), self.z.max(), self.nz)
def interpolate_grid(self):
# Use the grid points to get nzxnk free bias parameters for an arbitrary set of k,z coordinates
if self.intrinsic_alignments:
ia_interp = interp.interp2d(np.log(self.K), self.Z, self.BI)
self.QI = ia_interp(np.log(self.k), self.z)
if self.galaxy_bias:
gb_interp = interp.interp2d(np.log(self.K), self.Z, self.Bg)
self.Qg = gb_interp(np.log(self.k), self.z)
def evaluate_and_save_bias(self, block):
# Use the interpolated grid to evaluate a bias at each k,z
if self.intrinsic_alignments:
b_I = self.AI * self.QI * self.b_I_fid
r_I = self.AI * self.QI * self.r_I_fid
block.replace_grid('intrinsic_alignments_parameters',
'z', self.z, 'k_h', self.k, 'b_I', b_I)
block.replace_grid('intrinsic_alignments_parameters',
'z', self.z, 'k_h', self.k, 'r_I', r_I)
if self.galaxy_bias:
b_g = self.Ag * self.Qg * self.b_g_fid
r_g = self.Ag * self.Qg * self.r_g_fid
block.replace_grid('intrinsic_alignments_parameters',
'z', self.z, 'k_h', self.k, 'b_g', b_g)
block.replace_grid('intrinsic_alignments_parameters',
'z', self.z, 'k_h', self.k, 'r_g', r_g)
def fixed_edge(v, width, i, kw):
v[:width[0]] = 0.
v[-width[1]:] = 0.
return v
| 3,632 | 7 | 153 |
5bd1e639df502451d94703c409a07112f3d074f5 | 3,501 | py | Python | src/core/db/connection.py | KevinXuxuxu/datahub_lsems | 87ecaf877117b6747f2432fa4379243fdd3d87bd | [
"MIT"
] | null | null | null | src/core/db/connection.py | KevinXuxuxu/datahub_lsems | 87ecaf877117b6747f2432fa4379243fdd3d87bd | [
"MIT"
] | null | null | null | src/core/db/connection.py | KevinXuxuxu/datahub_lsems | 87ecaf877117b6747f2432fa4379243fdd3d87bd | [
"MIT"
] | null | null | null | import psycopg2
import re
from backend.mg import MGBackend
'''
@author: anant bhardwaj
@date: Oct 3, 2013
DataHub DB wrapper for backends (only postgres implemented)
Any new backend must implement the DataHubConnection interface
'''
class DataHubConnection:
'''
The following methods works only in superuser mode
'''
| 30.982301 | 78 | 0.734076 | import psycopg2
import re
from backend.mg import MGBackend
'''
@author: anant bhardwaj
@date: Oct 3, 2013
DataHub DB wrapper for backends (only postgres implemented)
Any new backend must implement the DataHubConnection interface
'''
class DataHubConnection:
def __init__(self, user, password, repo_base=None):
self.backend = MGBackend(user, password, repo_base=repo_base)
def reset_connection(self, repo_base):
self.backend.reset_connection(repo_base=repo_base)
def close_connection(self):
self.backend.close_connection()
def create_repo(self, repo):
return self.backend.create_repo(repo=repo)
def list_repos(self):
return self.backend.list_repos()
def delete_repo(self, repo, force=False):
return self.backend.delete_repo(repo=repo, force=force)
def add_collaborator(self, repo, username, privileges, auto_in_future=True):
return self.backend.add_collaborator(
repo=repo,
username=username,
privileges=privileges,
auto_in_future=auto_in_future)
def delete_collaborator(self, repo, username):
return self.backend.delete_collaborator(repo=repo, username=username)
def list_tables(self, repo):
return self.backend.list_tables(repo=repo)
def list_views(self, repo):
return self.backend.list_views(repo=repo)
def get_schema(self, table):
return self.backend.get_schema(table=table)
def execute_sql(self, query, params=None):
return self.backend.execute_sql(query, params)
def has_base_privilege(self, login, privilege):
return self.backend.has_base_privilege(
login=login, privilege=privilege)
def has_repo_privilege(self, login, repo, privilege):
return self.backend.has_repo_privilege(
login=login, repo=repo, privilege=privilege)
def has_table_privilege(self, login, table, privilege):
return self.backend.has_table_privilege(
login=login, table=table, privilege=privilege)
def has_column_privilege(self, login, table, column, privilege):
return self.backend.has_column_privilege(
login=login, table=table, column=column, privilege=privilege)
'''
The following methods works only in superuser mode
'''
def create_user(self, username, password, create_db):
return self.backend.create_user(username, password, create_db)
def remove_user(self, username):
return self.backend.remove_user(username)
def change_password(self, username, password):
return self.backend.change_password(username, password)
def import_file(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True, encoding='ISO-8859-1', quote_character='"'):
return self.backend.import_file(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter,
encoding=encoding,
quote_character=quote_character)
def export_table(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_table(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def export_query(self, query, file_path, file_format='CSV',
delimiter=',', header=True):
return self.backend.export_query(
query=query,
file_path=file_path,
file_format=file_format,
delimiter=delimiter)
def list_collaborators(self, repo_base, repo):
return self.backend.list_collaborators(repo_base=repo_base, repo=repo)
| 2,598 | 0 | 574 |
97d3f582ae3778129b742bcae3eec88c274dde87 | 2,046 | py | Python | djdt_api_requests/panels/api_requests.py | ingresso-group/django-debug-toolbar-api-requests | c95a515bba61e1be1ee6870d734b71491a019ace | [
"MIT"
] | 1 | 2020-05-19T20:07:28.000Z | 2020-05-19T20:07:28.000Z | djdt_api_requests/panels/api_requests.py | ingresso-group/django-debug-toolbar-api-requests | c95a515bba61e1be1ee6870d734b71491a019ace | [
"MIT"
] | 1 | 2018-08-08T14:59:44.000Z | 2018-08-08T14:59:44.000Z | djdt_api_requests/panels/api_requests.py | ingresso-group/django-debug-toolbar-api-requests | c95a515bba61e1be1ee6870d734b71491a019ace | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from debug_toolbar.panels import Panel
from django.utils.translation import ugettext_lazy as _, ungettext
import requests.sessions
| 31 | 79 | 0.637341 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from debug_toolbar.panels import Panel
from django.utils.translation import ugettext_lazy as _, ungettext
import requests.sessions
def patch_send(send_method, panel):
def patched_send(self, request, **kwargs):
response = send_method(self, request, **kwargs)
panel.record(
method=request.method,
url=request.url,
status_code=response.status_code,
elapsed=(1000 * response.elapsed.total_seconds())
)
return response
return patched_send
class ApiRequestsPanel(Panel):
nav_title = 'API Requests'
title = 'API Requests'
template = 'djdt_api_requests/panels/api_requests.html'
def __init__(self, *args, **kwargs):
super(ApiRequestsPanel, self).__init__(*args, **kwargs)
self._saved_send = None
self._requests = []
def record(self, **kwargs):
self._requests.append(kwargs)
@property
def nav_subtitle(self):
total_time = sum(r['elapsed'] for r in self._requests)
num_requests = len(self._requests)
return ungettext("%d API request in %.2fms",
"%d API requests in %.2fms", num_requests,
) % (len(self._requests), total_time)
def enable_instrumentation(self):
# monkeypatch the requests library
if self._saved_send is None:
self._saved_send = requests.sessions.Session.send
requests.sessions.Session.send = patch_send(self._saved_send, self)
def disable_instrumentation(self):
# unmonkeypatch the requests library
if self._saved_send is not None:
requests.sessions.Session.send = self._saved_send
self._saved_send = None
def generate_stats(self, request, response):
self.record_stats({
'num_requests': len(self._requests),
'total_time': sum(r['elapsed'] for r in self._requests),
'requests': self._requests
})
| 1,495 | 305 | 46 |
71f31158310c6b260a6cf8639123a46af5e52575 | 106 | py | Python | functions/villain/get/getVillain.py | jeffpignataro/hero-villan-serverless-api | 5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df | [
"MIT"
] | null | null | null | functions/villain/get/getVillain.py | jeffpignataro/hero-villan-serverless-api | 5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df | [
"MIT"
] | null | null | null | functions/villain/get/getVillain.py | jeffpignataro/hero-villan-serverless-api | 5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df | [
"MIT"
] | null | null | null | from models.villain.villain import Villain
| 17.666667 | 43 | 0.688679 | from models.villain.villain import Villain
def getVillain(id):
return "Villain #{}".format(id)
| 35 | 0 | 25 |
9fb8d1876f501522b165c627bb68a7858bed948d | 201 | py | Python | aula15/tupla.py | Eduardo-Quirino/python | acece64c8ed9f35bc94d111861acf65b50ae263c | [
"Apache-2.0"
] | null | null | null | aula15/tupla.py | Eduardo-Quirino/python | acece64c8ed9f35bc94d111861acf65b50ae263c | [
"Apache-2.0"
] | null | null | null | aula15/tupla.py | Eduardo-Quirino/python | acece64c8ed9f35bc94d111861acf65b50ae263c | [
"Apache-2.0"
] | null | null | null | t_carros = ("HRV", "Golf", "Argo")#tupla, não pode alterar o valor
l_carros = list(t_carros)
l_carros[2] = "Focus"
t_carros = tuple(l_carros)
for x in t_carros:
print(x)
input()#lista fica aberta | 25.125 | 66 | 0.691542 | t_carros = ("HRV", "Golf", "Argo")#tupla, não pode alterar o valor
l_carros = list(t_carros)
l_carros[2] = "Focus"
t_carros = tuple(l_carros)
for x in t_carros:
print(x)
input()#lista fica aberta | 0 | 0 | 0 |
16a06257e948c6830419cd90d23115c71afc82b4 | 1,835 | py | Python | build_tools/benchmarks/common/linux_device_utils.py | anthonycanino/iree | be167a62f8872597eac1b72e26b4c62e291bfd5c | [
"Apache-2.0"
] | 1 | 2022-02-12T17:56:47.000Z | 2022-02-12T17:56:47.000Z | build_tools/benchmarks/common/linux_device_utils.py | okkwon/iree | e32cc76952d37a14f73c8a7da889edf47fcc2fce | [
"Apache-2.0"
] | null | null | null | build_tools/benchmarks/common/linux_device_utils.py | okkwon/iree | e32cc76952d37a14f73c8a7da889edf47fcc2fce | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Utils for accessing Linux device information."""
import re
from typing import Optional, Sequence
from .benchmark_definition import (execute_cmd_and_get_output, DeviceInfo,
PlatformType)
def get_linux_cpu_arch(verbose: bool = False) -> str:
"""Returns CPU Architecture, e.g., 'x86_64'."""
return _get_lscpu_field("Architecture", verbose)
def get_linux_cpu_features(verbose: bool = False) -> Sequence[str]:
"""Returns CPU feature lists, e.g., ['mmx', 'fxsr', 'sse', 'sse2']."""
return _get_lscpu_field("Flags", verbose).split(" ")
def get_linux_device_info(device_model: str = "Unknown",
cpu_uarch: Optional[str] = None,
verbose: bool = False) -> DeviceInfo:
"""Returns device info for the Linux device.
Args:
- device_model: the device model name, e.g., 'ThinkStation P520'
- cpu_uarch: the CPU microarchitecture, e.g., 'CascadeLake'
"""
return DeviceInfo(
PlatformType.LINUX,
# Includes CPU model as it is the key factor of the device performance.
model=device_model,
# Currently we only have x86, so CPU ABI = CPU arch.
cpu_abi=get_linux_cpu_arch(verbose),
cpu_uarch=cpu_uarch,
cpu_features=get_linux_cpu_features(verbose),
# We don't yet support GPU benchmark on Linux devices.
gpu_name="Unknown")
| 35.980392 | 77 | 0.680654 | #!/usr/bin/env python3
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Utils for accessing Linux device information."""
import re
from typing import Optional, Sequence
from .benchmark_definition import (execute_cmd_and_get_output, DeviceInfo,
PlatformType)
def _get_lscpu_field(field_name: str, verbose: bool = False) -> str:
output = execute_cmd_and_get_output(["lscpu"], verbose)
(value,) = re.findall(f"^{field_name}:\s*(.+)", output, re.MULTILINE)
return value
def get_linux_cpu_arch(verbose: bool = False) -> str:
"""Returns CPU Architecture, e.g., 'x86_64'."""
return _get_lscpu_field("Architecture", verbose)
def get_linux_cpu_features(verbose: bool = False) -> Sequence[str]:
"""Returns CPU feature lists, e.g., ['mmx', 'fxsr', 'sse', 'sse2']."""
return _get_lscpu_field("Flags", verbose).split(" ")
def get_linux_device_info(device_model: str = "Unknown",
cpu_uarch: Optional[str] = None,
verbose: bool = False) -> DeviceInfo:
"""Returns device info for the Linux device.
Args:
- device_model: the device model name, e.g., 'ThinkStation P520'
- cpu_uarch: the CPU microarchitecture, e.g., 'CascadeLake'
"""
return DeviceInfo(
PlatformType.LINUX,
# Includes CPU model as it is the key factor of the device performance.
model=device_model,
# Currently we only have x86, so CPU ABI = CPU arch.
cpu_abi=get_linux_cpu_arch(verbose),
cpu_uarch=cpu_uarch,
cpu_features=get_linux_cpu_features(verbose),
# We don't yet support GPU benchmark on Linux devices.
gpu_name="Unknown")
| 192 | 0 | 23 |
64e7ede2dcf9bdda884108d88c7d2da7d27bbc3b | 8,791 | py | Python | 8.py | ZerocksX/AOC2020 | 930a4d1da9e5a9efe0f1f94cb93d6b39039f774f | [
"MIT"
] | null | null | null | 8.py | ZerocksX/AOC2020 | 930a4d1da9e5a9efe0f1f94cb93d6b39039f774f | [
"MIT"
] | null | null | null | 8.py | ZerocksX/AOC2020 | 930a4d1da9e5a9efe0f1f94cb93d6b39039f774f | [
"MIT"
] | null | null | null | input = """nop +116
acc +12
acc -8
acc +34
jmp +485
acc +42
jmp +388
acc +36
nop +605
acc +17
jmp +411
acc +49
jmp +1
acc -9
jmp +289
jmp +288
jmp +74
acc +4
acc +42
jmp +258
acc +14
acc -13
nop +106
jmp +280
jmp +534
acc +41
acc +40
jmp +224
acc +43
acc +10
nop +240
jmp +211
acc +7
acc -3
acc +7
jmp +1
jmp +559
jmp +415
jmp +528
acc -16
jmp +568
jmp +442
nop +113
jmp +464
acc +42
jmp +336
acc -2
acc +39
jmp +251
acc -4
acc +42
jmp +528
acc +5
acc +30
nop +429
acc +49
jmp +86
acc +15
nop +145
acc -8
jmp +1
jmp +404
acc +26
acc +50
jmp +251
acc +47
jmp +1
acc +45
acc -5
jmp +357
acc +31
jmp +62
acc +25
nop +540
acc -13
acc +0
jmp +72
acc +28
acc +36
nop +475
acc -17
jmp +166
acc +4
acc +20
acc +30
acc +43
jmp +464
acc +4
jmp +94
jmp +44
nop +446
acc -16
nop +267
acc +30
jmp +519
acc +45
acc +47
jmp +62
acc +28
acc -13
acc +45
jmp +239
acc +12
jmp +1
nop +153
jmp +245
jmp +244
acc -12
jmp +308
jmp +190
jmp -86
acc +45
acc +1
acc +15
acc +30
jmp +350
acc +30
jmp +42
jmp +214
jmp +447
acc +24
jmp +453
acc +29
acc +42
jmp +302
acc -4
acc +33
jmp +447
acc -18
acc +15
acc -2
jmp -24
jmp -4
jmp +35
acc +0
jmp -83
acc -13
nop +437
acc -15
jmp +95
nop +289
jmp +348
acc +17
acc +23
acc +45
jmp +359
acc +18
jmp +352
acc +0
acc +13
acc +25
acc +11
jmp +331
acc -2
jmp +19
jmp -103
acc +34
acc +48
jmp +141
acc +44
jmp +1
acc +42
jmp +374
acc +45
acc +35
nop -37
acc -2
jmp +244
jmp +151
acc +36
acc +4
nop -64
jmp +231
nop +321
nop +291
acc +16
jmp -161
acc +17
nop +412
nop -89
nop +179
jmp -8
nop -167
acc +44
acc +4
jmp +42
acc +22
acc +28
acc +22
jmp +192
acc -18
acc -7
jmp -70
acc +27
acc +25
jmp +312
acc +50
acc -16
jmp -121
acc +14
acc +43
nop -111
jmp -54
nop +39
acc -4
acc +41
jmp +236
acc -11
jmp -118
jmp +150
acc -15
jmp -141
acc +14
jmp +1
acc -8
jmp -96
acc +11
nop -95
jmp +1
acc +47
jmp -113
nop +257
jmp +35
acc +45
acc +25
acc -6
jmp +31
jmp +1
nop +153
nop -39
jmp +25
acc +0
acc +50
jmp +362
acc -15
acc +0
acc +31
acc +22
jmp +69
acc -18
acc +24
jmp -38
acc +39
acc -10
acc +40
jmp +6
jmp +143
jmp -44
acc +32
acc -8
jmp +358
jmp +248
nop +343
nop -11
jmp +116
jmp +74
jmp +120
acc +37
acc -19
acc +36
jmp +341
acc +49
jmp -164
acc +14
acc +13
acc +0
acc +50
jmp +291
jmp +1
jmp -79
acc +19
jmp +243
acc +25
acc -13
acc -12
acc -7
jmp +228
jmp -81
acc +18
nop -163
acc +0
acc +8
jmp +212
acc +38
acc -12
jmp +6
acc +24
acc +42
acc +21
acc +12
jmp +136
acc -12
acc -2
acc +46
acc +35
jmp +290
acc +6
acc +36
jmp -182
acc +14
acc +7
jmp +228
jmp -19
acc +48
acc +25
jmp +106
jmp +70
acc +24
jmp +1
acc +24
acc +29
jmp -156
nop +296
acc +34
jmp +115
acc -12
acc +41
jmp +28
jmp +165
acc +0
acc +24
acc +42
acc +27
jmp +106
acc +24
acc -11
acc +4
acc -6
jmp -180
acc -2
jmp +2
jmp -314
acc -9
acc +1
jmp -327
acc -8
acc +7
acc -6
acc +32
jmp -157
acc +10
acc +10
acc -16
jmp +278
jmp +6
acc +0
nop +178
acc +26
jmp +231
jmp +175
acc +29
acc +36
acc +7
jmp -255
acc +46
acc +45
acc +7
nop -7
jmp -101
jmp +3
acc -13
jmp -140
nop -115
jmp +1
jmp -336
acc +9
acc +9
nop -68
acc -3
jmp -37
acc -13
nop +128
jmp +1
jmp -90
acc +49
jmp -124
acc +16
acc +9
jmp +212
acc -18
jmp -303
acc +33
acc +23
acc +26
jmp +140
acc +25
nop -123
acc +22
jmp +148
acc +1
acc +44
jmp -352
acc -11
jmp +33
acc +16
nop -199
acc +15
jmp -351
jmp +5
jmp -357
nop -284
acc +32
jmp -43
acc +5
acc +23
acc +3
jmp +59
acc -10
nop -266
nop +43
jmp +79
acc +21
jmp -42
acc +35
acc +5
jmp +68
acc +24
acc -4
jmp -155
acc +45
jmp +154
jmp -311
acc +10
acc +17
acc +39
jmp -297
jmp -175
acc +49
jmp -151
acc -4
acc -9
jmp -219
acc +48
acc -17
acc +30
jmp -9
acc +10
jmp -61
nop -396
acc +11
acc +37
jmp -331
acc +14
acc +22
acc +30
acc +2
jmp -43
nop -265
acc +5
acc +40
acc -15
jmp -35
acc -3
acc +24
jmp -415
acc +0
jmp +98
acc +17
acc +25
nop -48
acc -17
jmp -302
acc +11
acc +11
jmp -181
acc +46
acc +19
jmp -331
nop +90
acc +45
acc +8
jmp -237
acc -11
nop -421
jmp -145
acc -16
acc +47
jmp -387
acc +50
jmp -375
acc +38
jmp +1
jmp -225
acc +47
acc +39
jmp +69
acc +46
acc +41
jmp -89
acc +19
jmp -453
nop +63
acc +18
jmp -386
nop -243
acc +48
jmp +70
acc +25
jmp -191
acc +48
acc +31
jmp +40
acc -10
jmp -46
acc +45
jmp -48
jmp -12
acc +16
acc -16
jmp -120
acc -10
jmp +1
acc -10
jmp -124
acc +48
acc +15
acc +8
acc -15
jmp -66
nop -130
acc +16
acc +10
acc +31
jmp -375
acc +9
acc +20
jmp -37
acc +14
jmp -134
acc -9
acc -6
jmp -120
acc +24
acc +17
acc +49
jmp -332
acc +7
acc +35
nop -149
jmp -103
jmp -277
acc -1
acc +28
nop -211
jmp -371
nop -129
acc -15
acc +6
acc +19
jmp -120
acc -6
jmp -79
acc +0
jmp -64
acc +33
acc +33
jmp -440
jmp -85
acc +37
nop -183
acc +24
acc +42
jmp -545
acc +50
acc +6
jmp -7
nop +8
acc +1
jmp -359
acc -1
nop -388
acc -7
acc +28
jmp -211
jmp -384
acc +32
acc +16
acc +40
jmp +17
acc +0
acc +43
acc -14
jmp -512
nop -264
jmp -474
nop -543
acc +17
nop -288
jmp -38
jmp +24
acc -4
jmp -321
acc +49
acc -16
jmp -532
acc +0
acc -11
acc -16
jmp -104
acc -12
jmp -301
acc +6
nop -498
acc +0
jmp -126
nop -127
acc +1
jmp -6
acc +40
jmp -547
acc +16
acc +18
jmp -123
acc -5
acc +27
acc +44
acc +15
jmp -22
acc +48
acc -18
jmp -350
acc -7
acc +30
acc +26
jmp +1
jmp +1"""
lines = input.split("\n")
commands = []
for line in lines:
c, i = line.split(' ')
if c == "nop":
com = nop()
com.parse_args(i)
commands.append(com)
elif c == "jmp":
com = jmp()
com.parse_args(i)
commands.append(com)
elif c == "acc":
com = acc()
com.parse_args(i)
commands.append(com)
#print(commands)
#s = stack(commands)
#while(s.next()):
# pass
#print(s.acc)
for it in range(len(commands)):
commands_2 = commands[:]
if isinstance(commands[it], nop):
com = jmp()
com.parse_args(commands[it].args)
commands_2[it] = com
if isinstance(commands[it], jmp):
com = nop()
commands_2[it] = com
if isinstance(commands[it], acc):
continue
s2 = stack_2(commands_2)
while(s2.next()):
pass
if s2.sp == len(commands_2):
print(s2.acc)
break
| 11.69016 | 60 | 0.550336 | input = """nop +116
acc +12
acc -8
acc +34
jmp +485
acc +42
jmp +388
acc +36
nop +605
acc +17
jmp +411
acc +49
jmp +1
acc -9
jmp +289
jmp +288
jmp +74
acc +4
acc +42
jmp +258
acc +14
acc -13
nop +106
jmp +280
jmp +534
acc +41
acc +40
jmp +224
acc +43
acc +10
nop +240
jmp +211
acc +7
acc -3
acc +7
jmp +1
jmp +559
jmp +415
jmp +528
acc -16
jmp +568
jmp +442
nop +113
jmp +464
acc +42
jmp +336
acc -2
acc +39
jmp +251
acc -4
acc +42
jmp +528
acc +5
acc +30
nop +429
acc +49
jmp +86
acc +15
nop +145
acc -8
jmp +1
jmp +404
acc +26
acc +50
jmp +251
acc +47
jmp +1
acc +45
acc -5
jmp +357
acc +31
jmp +62
acc +25
nop +540
acc -13
acc +0
jmp +72
acc +28
acc +36
nop +475
acc -17
jmp +166
acc +4
acc +20
acc +30
acc +43
jmp +464
acc +4
jmp +94
jmp +44
nop +446
acc -16
nop +267
acc +30
jmp +519
acc +45
acc +47
jmp +62
acc +28
acc -13
acc +45
jmp +239
acc +12
jmp +1
nop +153
jmp +245
jmp +244
acc -12
jmp +308
jmp +190
jmp -86
acc +45
acc +1
acc +15
acc +30
jmp +350
acc +30
jmp +42
jmp +214
jmp +447
acc +24
jmp +453
acc +29
acc +42
jmp +302
acc -4
acc +33
jmp +447
acc -18
acc +15
acc -2
jmp -24
jmp -4
jmp +35
acc +0
jmp -83
acc -13
nop +437
acc -15
jmp +95
nop +289
jmp +348
acc +17
acc +23
acc +45
jmp +359
acc +18
jmp +352
acc +0
acc +13
acc +25
acc +11
jmp +331
acc -2
jmp +19
jmp -103
acc +34
acc +48
jmp +141
acc +44
jmp +1
acc +42
jmp +374
acc +45
acc +35
nop -37
acc -2
jmp +244
jmp +151
acc +36
acc +4
nop -64
jmp +231
nop +321
nop +291
acc +16
jmp -161
acc +17
nop +412
nop -89
nop +179
jmp -8
nop -167
acc +44
acc +4
jmp +42
acc +22
acc +28
acc +22
jmp +192
acc -18
acc -7
jmp -70
acc +27
acc +25
jmp +312
acc +50
acc -16
jmp -121
acc +14
acc +43
nop -111
jmp -54
nop +39
acc -4
acc +41
jmp +236
acc -11
jmp -118
jmp +150
acc -15
jmp -141
acc +14
jmp +1
acc -8
jmp -96
acc +11
nop -95
jmp +1
acc +47
jmp -113
nop +257
jmp +35
acc +45
acc +25
acc -6
jmp +31
jmp +1
nop +153
nop -39
jmp +25
acc +0
acc +50
jmp +362
acc -15
acc +0
acc +31
acc +22
jmp +69
acc -18
acc +24
jmp -38
acc +39
acc -10
acc +40
jmp +6
jmp +143
jmp -44
acc +32
acc -8
jmp +358
jmp +248
nop +343
nop -11
jmp +116
jmp +74
jmp +120
acc +37
acc -19
acc +36
jmp +341
acc +49
jmp -164
acc +14
acc +13
acc +0
acc +50
jmp +291
jmp +1
jmp -79
acc +19
jmp +243
acc +25
acc -13
acc -12
acc -7
jmp +228
jmp -81
acc +18
nop -163
acc +0
acc +8
jmp +212
acc +38
acc -12
jmp +6
acc +24
acc +42
acc +21
acc +12
jmp +136
acc -12
acc -2
acc +46
acc +35
jmp +290
acc +6
acc +36
jmp -182
acc +14
acc +7
jmp +228
jmp -19
acc +48
acc +25
jmp +106
jmp +70
acc +24
jmp +1
acc +24
acc +29
jmp -156
nop +296
acc +34
jmp +115
acc -12
acc +41
jmp +28
jmp +165
acc +0
acc +24
acc +42
acc +27
jmp +106
acc +24
acc -11
acc +4
acc -6
jmp -180
acc -2
jmp +2
jmp -314
acc -9
acc +1
jmp -327
acc -8
acc +7
acc -6
acc +32
jmp -157
acc +10
acc +10
acc -16
jmp +278
jmp +6
acc +0
nop +178
acc +26
jmp +231
jmp +175
acc +29
acc +36
acc +7
jmp -255
acc +46
acc +45
acc +7
nop -7
jmp -101
jmp +3
acc -13
jmp -140
nop -115
jmp +1
jmp -336
acc +9
acc +9
nop -68
acc -3
jmp -37
acc -13
nop +128
jmp +1
jmp -90
acc +49
jmp -124
acc +16
acc +9
jmp +212
acc -18
jmp -303
acc +33
acc +23
acc +26
jmp +140
acc +25
nop -123
acc +22
jmp +148
acc +1
acc +44
jmp -352
acc -11
jmp +33
acc +16
nop -199
acc +15
jmp -351
jmp +5
jmp -357
nop -284
acc +32
jmp -43
acc +5
acc +23
acc +3
jmp +59
acc -10
nop -266
nop +43
jmp +79
acc +21
jmp -42
acc +35
acc +5
jmp +68
acc +24
acc -4
jmp -155
acc +45
jmp +154
jmp -311
acc +10
acc +17
acc +39
jmp -297
jmp -175
acc +49
jmp -151
acc -4
acc -9
jmp -219
acc +48
acc -17
acc +30
jmp -9
acc +10
jmp -61
nop -396
acc +11
acc +37
jmp -331
acc +14
acc +22
acc +30
acc +2
jmp -43
nop -265
acc +5
acc +40
acc -15
jmp -35
acc -3
acc +24
jmp -415
acc +0
jmp +98
acc +17
acc +25
nop -48
acc -17
jmp -302
acc +11
acc +11
jmp -181
acc +46
acc +19
jmp -331
nop +90
acc +45
acc +8
jmp -237
acc -11
nop -421
jmp -145
acc -16
acc +47
jmp -387
acc +50
jmp -375
acc +38
jmp +1
jmp -225
acc +47
acc +39
jmp +69
acc +46
acc +41
jmp -89
acc +19
jmp -453
nop +63
acc +18
jmp -386
nop -243
acc +48
jmp +70
acc +25
jmp -191
acc +48
acc +31
jmp +40
acc -10
jmp -46
acc +45
jmp -48
jmp -12
acc +16
acc -16
jmp -120
acc -10
jmp +1
acc -10
jmp -124
acc +48
acc +15
acc +8
acc -15
jmp -66
nop -130
acc +16
acc +10
acc +31
jmp -375
acc +9
acc +20
jmp -37
acc +14
jmp -134
acc -9
acc -6
jmp -120
acc +24
acc +17
acc +49
jmp -332
acc +7
acc +35
nop -149
jmp -103
jmp -277
acc -1
acc +28
nop -211
jmp -371
nop -129
acc -15
acc +6
acc +19
jmp -120
acc -6
jmp -79
acc +0
jmp -64
acc +33
acc +33
jmp -440
jmp -85
acc +37
nop -183
acc +24
acc +42
jmp -545
acc +50
acc +6
jmp -7
nop +8
acc +1
jmp -359
acc -1
nop -388
acc -7
acc +28
jmp -211
jmp -384
acc +32
acc +16
acc +40
jmp +17
acc +0
acc +43
acc -14
jmp -512
nop -264
jmp -474
nop -543
acc +17
nop -288
jmp -38
jmp +24
acc -4
jmp -321
acc +49
acc -16
jmp -532
acc +0
acc -11
acc -16
jmp -104
acc -12
jmp -301
acc +6
nop -498
acc +0
jmp -126
nop -127
acc +1
jmp -6
acc +40
jmp -547
acc +16
acc +18
jmp -123
acc -5
acc +27
acc +44
acc +15
jmp -22
acc +48
acc -18
jmp -350
acc -7
acc +30
acc +26
jmp +1
jmp +1"""
class stack:
def __init__(self, commands):
self.acc = 0
self.commands = commands
self.sp = 0
self.visited = set()
def next(self):
print(self.sp)
print(self.commands[self.sp])
if self.sp in self.visited:
return False
self.visited.add(self.sp)
self.commands[self.sp].execute(self)
return True
class stack_2:
def __init__(self, commands):
self.acc = 0
self.commands = commands
self.sp = 0
self.visited = set()
def next(self):
if self.sp >= len(self.commands) or self.sp < 0:
return False
if self.sp in self.visited:
return False
self.visited.add(self.sp)
self.commands[self.sp].execute(self)
return True
class command(object):
def __init__(self):
pass
def name():
pass
def parse_args(args):
pass
def execute(self, stack):
pass
def __repr__(self):
return self.name()
class nop(command):
def __init__(self):
command.__init__(self)
def name(self):
return "nop"
def parse_args(self, args):
self.args = args
def execute(self, stack):
stack.sp += 1
def __repr__(self):
return command.__repr__(self)
class acc(command):
def __init__(self):
command.__init__(self)
def name(self):
return "acc"
def parse_args(self, args):
self.inc = int(args.strip().rstrip())
def execute(self, stack):
stack.acc += self.inc
stack.sp += 1
def __repr__(self):
return command.__repr__(self) + " " + str(self.inc)
class jmp(command):
def __init__(self):
command.__init__(self)
def name(self):
return "jmp"
def parse_args(self, args):
self.jp = int(args.strip().rstrip())
def execute(self, stack):
stack.sp += self.jp
def __repr__(self):
return command.__repr__(self) + " " + str(self.jp)
lines = input.split("\n")
commands = []
for line in lines:
c, i = line.split(' ')
if c == "nop":
com = nop()
com.parse_args(i)
commands.append(com)
elif c == "jmp":
com = jmp()
com.parse_args(i)
commands.append(com)
elif c == "acc":
com = acc()
com.parse_args(i)
commands.append(com)
#print(commands)
#s = stack(commands)
#while(s.next()):
# pass
#print(s.acc)
for it in range(len(commands)):
commands_2 = commands[:]
if isinstance(commands[it], nop):
com = jmp()
com.parse_args(commands[it].args)
commands_2[it] = com
if isinstance(commands[it], jmp):
com = nop()
commands_2[it] = com
if isinstance(commands[it], acc):
continue
s2 = stack_2(commands_2)
while(s2.next()):
pass
if s2.sp == len(commands_2):
print(s2.acc)
break
| 1,316 | -21 | 810 |
9cac231cf2fe33aff15be82cbd15f524945be850 | 7,744 | py | Python | radloggerpy/tests/cli/device/test_device_show.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/cli/device/test_device_show.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/cli/device/test_device_show.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
# Copyright (c) 2020 Dantali0n
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import copy
from unittest import mock
from cliff.show import ShowOne
from sqlalchemy.orm.exc import MultipleResultsFound
from radloggerpy.cli.v1.device import device_show
from radloggerpy.device.device_manager import DeviceManager as dm
from radloggerpy.tests import base
from radloggerpy.types.device_interfaces import DeviceInterfaces
from radloggerpy.types.device_interfaces import INTERFACE_CHOICES
from radloggerpy.types.device_types import DeviceTypes
from radloggerpy.types.serial_bytesize import SerialBytesizeTypes
from radloggerpy.types.serial_parity import SerialParityTypes
from radloggerpy.types.serial_stopbit import SerialStopbitTypes
| 38.336634 | 76 | 0.685434 | # -*- encoding: utf-8 -*-
# Copyright (c) 2020 Dantali0n
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import copy
from unittest import mock
from cliff.show import ShowOne
from sqlalchemy.orm.exc import MultipleResultsFound
from radloggerpy.cli.v1.device import device_show
from radloggerpy.device.device_manager import DeviceManager as dm
from radloggerpy.tests import base
from radloggerpy.types.device_interfaces import DeviceInterfaces
from radloggerpy.types.device_interfaces import INTERFACE_CHOICES
from radloggerpy.types.device_types import DeviceTypes
from radloggerpy.types.serial_bytesize import SerialBytesizeTypes
from radloggerpy.types.serial_parity import SerialParityTypes
from radloggerpy.types.serial_stopbit import SerialStopbitTypes
class TestDeviceShow(base.TestCase):
def setUp(self):
super(TestDeviceShow, self).setUp()
@mock.patch.object(device_show, 'super')
def test_parser(self, m_super):
m_parser = mock.Mock()
m_super.return_value.get_parser.return_value = m_parser
bases = copy(device_show.DeviceShow.__bases__)
f_bases = tuple(base for base in bases if base != ShowOne)
m_base = mock.patch.object(
device_show.DeviceShow, '__bases__', f_bases)
with m_base:
m_base.is_local = True
t_device = device_show.DeviceShow()
t_device._add_interfaces = mock.Mock()
t_device._add_implementations = mock.Mock()
t_device.register_arguments = mock.Mock()
t_device.get_parser("test")
t_device._add_interfaces.assert_called_once()
t_device._add_implementations.assert_called_once()
t_device.register_arguments.assert_called_once_with(m_parser)
# ensure that is_local on the patch does not modify the actual bases
self.assertEqual(bases, device_show.DeviceShow.__bases__)
@mock.patch.object(device_show, 'DeviceObject')
def test_take_action(self, m_dev_obj):
bases = copy(device_show.DeviceShow.__bases__)
f_bases = tuple(base for base in bases if base != ShowOne)
m_args = mock.Mock()
m_args._get_kwargs.return_value = {'detailed': None}
m_mod_dev = mock.Mock()
m_mod_dev.id = 1
m_mod_dev.name = 'test'
m_mod_dev.type = DeviceTypes.AVERAGE
m_mod_dev.interface = DeviceInterfaces.SERIAL
m_mod_dev.implementation = dm.get_device_implementations()[0].NAME
m_dev_obj.find.return_value = m_mod_dev
m_base = mock.patch.object(
device_show.DeviceShow, '__bases__', f_bases)
with m_base:
m_base.is_local = True
t_device = device_show.DeviceShow()
t_device.app = mock.Mock()
t_result = t_device.take_action(m_args)
self.assertEqual(t_result[1][0], m_mod_dev.id)
self.assertEqual(t_result[1][1], m_mod_dev.name)
self.assertEqual(t_result[1][2], m_mod_dev.type)
self.assertEqual(t_result[1][3], m_mod_dev.interface)
self.assertEqual(t_result[1][4], m_mod_dev.implementation)
# ensure that is_local on the patch does not modify the actual bases
self.assertEqual(bases, device_show.DeviceShow.__bases__)
@mock.patch.object(device_show, 'SerialDeviceObject')
@mock.patch.object(device_show, 'DeviceObject')
def test_take_action_details_serial(self, m_dev_obj, m_dev_ser_obj):
bases = copy(device_show.DeviceShow.__bases__)
f_bases = tuple(base for base in bases if base != ShowOne)
m_args = mock.Mock()
m_args._get_kwargs.return_value = {'detailed': True}
m_mod_dev = mock.Mock()
m_mod_dev.id = 1
m_mod_dev.name = 'test'
m_mod_dev.type = DeviceTypes.AVERAGE
m_mod_dev.interface = INTERFACE_CHOICES[DeviceInterfaces.SERIAL]
m_mod_dev.implementation = dm.get_device_implementations()[0].NAME
m_dev_obj.find.return_value = m_mod_dev
m_mod_ser_dev = mock.Mock()
m_mod_ser_dev.port = '/dev/ttyUSB0'
m_mod_ser_dev.baudrate = 9600
m_mod_ser_dev.bytesize = SerialBytesizeTypes.FIVEBITS
m_mod_ser_dev.parity = SerialParityTypes.PARITY_NONE
m_mod_ser_dev.stopbits = SerialStopbitTypes.STOPBITS_ONE
m_mod_ser_dev.timeout = None
m_dev_ser_obj.find.return_value = m_mod_ser_dev
m_base = mock.patch.object(
device_show.DeviceShow, '__bases__', f_bases)
with m_base:
m_base.is_local = True
t_device = device_show.DeviceShow()
t_device.app = mock.Mock()
t_result = t_device.take_action(m_args)
self.assertEqual(t_result[1][0], m_mod_dev.id)
self.assertEqual(t_result[1][1], m_mod_dev.name)
self.assertEqual(t_result[1][2], m_mod_dev.type)
self.assertEqual(t_result[1][3], m_mod_dev.interface)
self.assertEqual(t_result[1][4], m_mod_dev.implementation)
self.assertEqual(t_result[1][5], m_mod_ser_dev.port)
self.assertEqual(t_result[1][6], m_mod_ser_dev.baudrate)
self.assertEqual(t_result[1][7], m_mod_ser_dev.bytesize)
self.assertEqual(t_result[1][8], m_mod_ser_dev.parity)
self.assertEqual(t_result[1][9], m_mod_ser_dev.stopbits)
self.assertEqual(t_result[1][10], m_mod_ser_dev.timeout)
# ensure that is_local on the patch does not modify the actual bases
self.assertEqual(bases, device_show.DeviceShow.__bases__)
@mock.patch.object(device_show, 'DeviceObject')
def test_take_action_none(self, m_dev_obj):
bases = copy(device_show.DeviceShow.__bases__)
f_bases = tuple(base for base in bases if base != ShowOne)
m_args = mock.Mock()
m_args._get_kwargs.return_value = {'detailed': None}
m_dev_obj.find.return_value = None
m_base = mock.patch.object(
device_show.DeviceShow, '__bases__', f_bases)
with m_base:
m_base.is_local = True
t_device = device_show.DeviceShow()
t_device.app = mock.Mock()
self.assertRaises(RuntimeWarning, t_device.take_action, m_args)
# ensure that is_local on the patch does not modify the actual bases
self.assertEqual(bases, device_show.DeviceShow.__bases__)
@mock.patch.object(device_show, 'DeviceObject')
def test_take_action_multiple(self, m_dev_obj):
bases = copy(device_show.DeviceShow.__bases__)
f_bases = tuple(base for base in bases if base != ShowOne)
m_args = mock.Mock()
m_args._get_kwargs.return_value = {'detailed': None}
m_dev_obj.find.side_effect = MultipleResultsFound()
m_base = mock.patch.object(
device_show.DeviceShow, '__bases__', f_bases)
with m_base:
m_base.is_local = True
t_device = device_show.DeviceShow()
t_device.app = mock.Mock()
self.assertRaises(RuntimeWarning, t_device.take_action, m_args)
# ensure that is_local on the patch does not modify the actual bases
self.assertEqual(bases, device_show.DeviceShow.__bases__)
| 5,964 | 488 | 23 |
c7bc657d45cc79f3ce95dbf8ffc06c54afd1ceaf | 1,884 | py | Python | app/core/tests/test_commands.py | SuEpoch254/recipe-app-api | bdd37b2b1b8cc0ed7d67ed19375b51d2ad315106 | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | SuEpoch254/recipe-app-api | bdd37b2b1b8cc0ed7d67ed19375b51d2ad315106 | [
"MIT"
] | null | null | null | app/core/tests/test_commands.py | SuEpoch254/recipe-app-api | bdd37b2b1b8cc0ed7d67ed19375b51d2ad315106 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
| 43.813953 | 79 | 0.673567 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# Note: Use path to mock the connection handler to just return True
# every time it's called.
gi.return_value = True
# Note: So during test, we're rewriting connection handler
# Now we can test our call command.
call_command('wait_for_db')
# Check the mock object gi (the connection handler) was called once
self.assertEqual(gi.call_count, 1)
# Now we want to check that wait_of_db command will check for connection
# five times. On the sixth time it will be successful and continue.
# We use patch as a decorator to mock the time.sleep, which would make it
# wait a second before testing again the connection. We want to patch that
# to speed up the test.
# Even though we don't use ts we need to add it as an argument othetwise it
# will show an error.
@patch('time.sleep', return_value=None)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
# We use again the contect manager patch to pass the handler as gi
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# Instead of changing the value we change the side_effect
# We want to make it raise the operational error 5 times. On the
# 6th it will proceed. This is what the construction below means.
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6) | 0 | 1,702 | 23 |
e0b2a8431f343b1996b3b1f088fb8091f32ff84a | 2,500 | py | Python | faucet.py | topnoom259/railblock | b164ae05d70a2c6af637bcf0f07c74f60f81e175 | [
"BSD-3-Clause"
] | 1 | 2018-11-09T08:53:23.000Z | 2018-11-09T08:53:23.000Z | faucet.py | topnoom259/railblock | b164ae05d70a2c6af637bcf0f07c74f60f81e175 | [
"BSD-3-Clause"
] | null | null | null | faucet.py | topnoom259/railblock | b164ae05d70a2c6af637bcf0f07c74f60f81e175 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# RaiBlocks Telegram bot
# @RaiWalletBot https://t.me/RaiWalletBot
#
# Source code:
# https://github.com/SergiySW/RaiWalletBot
#
# Released under the BSD 3-Clause License
#
#
# Run by cron every hour, 1-2 minutes after distribution starts
# With new rules it can be inaccurate
#
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import Bot, ParseMode
import logging
import urllib3, certifi, socket, json
import time, math
# Parse config
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('bot.cfg')
api_key = config.get('main', 'api_key')
log_file_faucet = config.get('main', 'log_file_faucet')
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO, filename=log_file_faucet)
logger = logging.getLogger(__name__)
# MySQL requests
from common_mysql import mysql_select_accounts_list, mysql_select_blacklist, mysql_select_language, mysql_select_accounts_list_extra
# Common functions
from common import push_simple
# Translation
with open('language.json') as lang_file:
language = json.load(lang_file)
# Faucet
faucet() | 27.777778 | 132 | 0.7368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# RaiBlocks Telegram bot
# @RaiWalletBot https://t.me/RaiWalletBot
#
# Source code:
# https://github.com/SergiySW/RaiWalletBot
#
# Released under the BSD 3-Clause License
#
#
# Run by cron every hour, 1-2 minutes after distribution starts
# With new rules it can be inaccurate
#
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import Bot, ParseMode
import logging
import urllib3, certifi, socket, json
import time, math
# Parse config
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('bot.cfg')
api_key = config.get('main', 'api_key')
log_file_faucet = config.get('main', 'log_file_faucet')
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO, filename=log_file_faucet)
logger = logging.getLogger(__name__)
# MySQL requests
from common_mysql import mysql_select_accounts_list, mysql_select_blacklist, mysql_select_language, mysql_select_accounts_list_extra
# Common functions
from common import push_simple
# Translation
with open('language.json') as lang_file:
language = json.load(lang_file)
def lang(user_id, text_id):
lang_id = mysql_select_language(user_id)
try:
return language[lang_id][text_id]
except KeyError:
return language['en'][text_id]
# Faucet
def faucet():
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
url = 'https://faucet.raiblockscommunity.net/paylist.php?json=1'
response = http.request('GET', url)
json_paylist = json.loads(response.data)
#save it
with open('paylist.json', 'w') as outfile:
json.dump(json_paylist, outfile)
json_array = json_paylist['pending']
bot = Bot(api_key)
# list from MySQL
accounts_list_orig = mysql_select_accounts_list()
accounts_list_extra = mysql_select_accounts_list_extra()
accounts_list = accounts_list_orig + accounts_list_extra
# blacklist
BLACK_LIST = mysql_select_blacklist()
for account in accounts_list:
for paylist in json_array:
if ((paylist['account'] == account[1]) and (account[0] not in BLACK_LIST)):
claims = int(paylist['pending'])
text = lang(account[0], 'faucet_claims').format("{:,}".format(claims))
try:
push_simple(bot, account[0], text)
except Exception as e:
logging.warn('Push failed for {0}\n{1}'.format(account[0], text))
#print(text)
logging.info('{0}\n{1}'.format(account[0], text))
time.sleep(1.25)
faucet() | 1,237 | 0 | 44 |
ff12e324df12b9288d3b1c80e2fba0ab24fcb1d8 | 1,762 | py | Python | 2020/day19/solve2.py | krother/advent_of_code | fd7d5199666b2f3a60c41c6cf24b747322ad88e5 | [
"MIT"
] | 3 | 2021-12-01T09:27:34.000Z | 2022-02-24T23:35:56.000Z | 2020/day19/solve2.py | krother/advent_of_code | fd7d5199666b2f3a60c41c6cf24b747322ad88e5 | [
"MIT"
] | null | null | null | 2020/day19/solve2.py | krother/advent_of_code | fd7d5199666b2f3a60c41c6cf24b747322ad88e5 | [
"MIT"
] | null | null | null |
# TODO: check https://en.wikipedia.org/wiki/Earley_parser
data = '''
0: 1 2 3
1: "a"
2: "b"
3: 1 1 4 2
4: 1 | 2
abaabb
baaabb
ababab
abaaab
abaabb
abaabc'''.strip()
def solve(rules, message):
"""check whether message matches r"""
queue = [(0, ['0'])]
while queue:
position, (rule, *following) = queue.pop()
rule = rules[rule]
# skip too short messages
if position >= len(message):
continue
if is_char(rule):
if message[position] == rule[0][1]:
if not following and position == len(message) - 1:
# match last char with terminal -> success
return 1
if following:
queue.append((position + 1, following))
else:
# check OR blocks
for subrule in rule:
queue.append((position, subrule + following))
return 0
rules, msg = parse(data)
assert count(rules, message) == 3
rules, msg = parse(open('input_noloop.txt').read())
assert count(rules, message) == 3
rules, msg = parse(open('input.txt').read())
assert count(rules, message) == 12
rules, msg = parse(open('input_big_mod.txt').read()) # --> 306
print(count(rules, message))
| 22.025 | 66 | 0.529512 |
# TODO: check https://en.wikipedia.org/wiki/Earley_parser
data = '''
0: 1 2 3
1: "a"
2: "b"
3: 1 1 4 2
4: 1 | 2
abaabb
baaabb
ababab
abaaab
abaabb
abaabc'''.strip()
def parse(data):
r, msg = data.strip().split('\n\n')
msg = msg.split('\n')
rules = {}
for line in r.split('\n'):
k, v = line.split(': ')
v = v.split('|')
if not v[0][0] == '"':
v = [x.split() for x in v]
rules[k] = v
return rules, msg
def is_char(rule):
if len(rule) == 1 and rule[0][0] == '"':
return True
def solve(rules, message):
"""check whether message matches r"""
queue = [(0, ['0'])]
while queue:
position, (rule, *following) = queue.pop()
rule = rules[rule]
# skip too short messages
if position >= len(message):
continue
if is_char(rule):
if message[position] == rule[0][1]:
if not following and position == len(message) - 1:
# match last char with terminal -> success
return 1
if following:
queue.append((position + 1, following))
else:
# check OR blocks
for subrule in rule:
queue.append((position, subrule + following))
return 0
def count(rules, message):
return sum([solve(rules, message) for message in msg])
rules, msg = parse(data)
assert count(rules, message) == 3
rules, msg = parse(open('input_noloop.txt').read())
assert count(rules, message) == 3
rules, msg = parse(open('input.txt').read())
assert count(rules, message) == 12
rules, msg = parse(open('input_big_mod.txt').read()) # --> 306
print(count(rules, message))
| 413 | 0 | 69 |
c9da19a1134f1020da00255ec415e6701fe851e1 | 14,390 | py | Python | MainWindow.py | Khairiazim/Uniten-sprout | 1f1ed2368e57c8977aedd59284945661a5befe02 | [
"BSD-3-Clause"
] | null | null | null | MainWindow.py | Khairiazim/Uniten-sprout | 1f1ed2368e57c8977aedd59284945661a5befe02 | [
"BSD-3-Clause"
] | null | null | null | MainWindow.py | Khairiazim/Uniten-sprout | 1f1ed2368e57c8977aedd59284945661a5befe02 | [
"BSD-3-Clause"
] | null | null | null | #todo / schedule/ internet connection
#todo instabot.log at api.py / instabot started at bot.py
#todo set follow followers count limit
#generate random number at server send to apps
#both check
import os
import random
import shutil
import sys
import time
import threading
import errno
import requests
import API_Util
from PyQt5 import QtCore,QtGui, QtWidgets
from PyQt5 import uic
from tqdm import tqdm
#IMPORT INSTABOT
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
from UI import mainwindow
#IMPORT DIC
from dic import dicAcc
############ IMPORT QWIDGET / QDIALOG #################
from Limit_Setting import Limit_Setting_class
from Login import Login_class
from Like import Like_class
############ WRITE FILE .TXT ################
hashtag_file = "Private/hashtagsdb.txt"
users_file = "Private/usersdb.txt"
whitelist = "Private/whitelist.txt"
blacklist = "Private/blacklist.txt"
userlist = "Private/userlist.txt"
comment = "Private/comments.txt"
setting = "Private/setting.txt"
SECRET_FILE = "Private/secret.txt"
unfollow_job_file = "Private/unfollow_job.txt"
follow_job_file = "Private/follow_job.txt"
follow_random_file = "Private/follow_random.txt"
like_job_file = "Private/like_job.txt"
like_random_file = "Private/like_random.txt"
comment_job_file = "Private/comment_job.txt"
comment_file = "Private/comment.txt"
comment_usertag_file = "Private/comment_usertag.txt"
class OutputWrapper(QtCore.QObject):
""" to show all output in ui text edit"""
outputWritten = QtCore.pyqtSignal(object, object)
############ OPEN QWIDGET / QDIALOG #################
if __name__== "__main__":
try:
os.makedirs("Private")
except OSError as e:
if e.errno != errno.EEXIST:
raise
app = QtWidgets.QApplication(sys.argv)
MainWindow = MainWindow_class()
MainWindow.show()
sys.exit(app.exec_())
| 35.183374 | 125 | 0.570327 | #todo / schedule/ internet connection
#todo instabot.log at api.py / instabot started at bot.py
#todo set follow followers count limit
#generate random number at server send to apps
#both check
import os
import random
import shutil
import sys
import time
import threading
import errno
import requests
import API_Util
from PyQt5 import QtCore,QtGui, QtWidgets
from PyQt5 import uic
from tqdm import tqdm
#IMPORT INSTABOT
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
from UI import mainwindow
#IMPORT DIC
from dic import dicAcc
############ IMPORT QWIDGET / QDIALOG #################
from Limit_Setting import Limit_Setting_class
from Login import Login_class
from Like import Like_class
############ WRITE FILE .TXT ################
hashtag_file = "Private/hashtagsdb.txt"
users_file = "Private/usersdb.txt"
whitelist = "Private/whitelist.txt"
blacklist = "Private/blacklist.txt"
userlist = "Private/userlist.txt"
comment = "Private/comments.txt"
setting = "Private/setting.txt"
SECRET_FILE = "Private/secret.txt"
unfollow_job_file = "Private/unfollow_job.txt"
follow_job_file = "Private/follow_job.txt"
follow_random_file = "Private/follow_random.txt"
like_job_file = "Private/like_job.txt"
like_random_file = "Private/like_random.txt"
comment_job_file = "Private/comment_job.txt"
comment_file = "Private/comment.txt"
comment_usertag_file = "Private/comment_usertag.txt"
class workThread(QtCore.QThread):
def __init__(self,parent=None):
super(workThread,self).__init__(parent)
def like_job(self):
with open(like_job_file, "r") as rf:
lines = rf.readlines()
like_lines = str(lines[0].strip())
with open(like_random_file, "r") as rff:
usertag = [line.rstrip() for line in rff]
if like_lines == "Like from hashtag":
random_tag = random.choice(usertag)
bot.like_hashtag(random_tag)
elif like_lines == "Like followers":
user_id = random.choice(usertag)
bot.like_followers(user_id)
elif like_lines == "Like following":
user_id = random.choice(usertag)
bot.like_following(user_id)
elif like_lines == "Like last media likers":
user_id = random.choice(usertag)
medias = bot.get_user_medias(user_id, filtration=False)
if len(medias):
likers = bot.get_media_likers(medias[0])
for liker in tqdm(likers):
bot.like_user(liker, amount=2, filtration=False)
else:
bot.like_timeline()
def result_validation(self):
try:
result = requests.post(API_Util.API_URL + "api/user/check_validity/",
data={'api_key': API_Util.API_KEY,
'mac_guid': API_Util.get_machine_guid()})
result = result.json()
return result["status"]
except:
QtWidgets.QMessageBox.information(self, "Hi user","Cannot connect to the server. Please check your connection.")
def run(self):
print("workthread begin")
# while self.result_validation(): #while result 1
# try:
# print("like")
# # Thread_like = threading.Thread(target=self.like_job())
# # Thread_like.start()
# # Thread_like.join()
# #
# self.result_validation()
# except:
# print("\n \n \n LIKE ERROR : NO LIKE ACTIVITIES \n \n \n")
# break
QtWidgets.QMessageBox.warning(self, "Ooopps", "workthread stop")
class OutputWrapper(QtCore.QObject):
""" to show all output in ui text edit"""
outputWritten = QtCore.pyqtSignal(object, object)
def __init__(self, parent, stdout=True):
QtCore.QObject.__init__(self, parent)
if stdout:
self._stream = sys.stdout
sys.stdout = self
else:
self._stream = sys.stderr
sys.stderr = self
self._stdout = stdout
def write(self, text):
self._stream.write(text)
self.outputWritten.emit(text, self._stdout)
def __getattr__(self, name):
return getattr(self._stream, name)
def __del__(self):
try:
if self._stdout:
sys.stdout = self._stream
else:
sys.stderr = self._stream
except AttributeError:
pass
class MainWindow_class(mainwindow.Ui_MainWindow,QtWidgets.QMainWindow):
def __init__(self):
# QtWidgets.QMainWindow.__init__(self)
# uic.loadUi("UI/mainwindow2.ui",self)
super(mainwindow.Ui_MainWindow, self).__init__()#import ui.py
self.setupUi(self)
################ BUTTON IN MAINWINDOW ########################
#self.pb_Start.clicked.connect(self.start_bot)
self.pb_Start.clicked.connect(self.start_bot)
self.pb_Stop.clicked.connect(self.enable_button)
self.pb_Limit.clicked.connect(self.open_Limit_Setting)
self.pb_Account.clicked.connect(self.open_Login)
self.pb_Follow.clicked.connect(self.open_Follow)
self.pb_Like.clicked.connect(self.open_Like)
self.pb_Reset.clicked.connect(self.delete_all_txt) #todo
self.pb_Dm.clicked.connect(self.open_coming_soon)
self.actionLast_Activities.triggered.connect(self.server_Logout)#todo
########## workThread #########
self.workThread = workThread()
########## show output in QTextEdit #############
stdout = OutputWrapper(self, True)
stdout.outputWritten.connect(self.handleOutput)
stderr = OutputWrapper(self, False)
stderr.outputWritten.connect(self.handleOutput)
QtCore.QCoreApplication.processEvents()
# check package
global package
# package = API_Util.SESSION_DATA["acctype"].lower()
package = "pro"
print(package)
if package == "personal":
self.pb_Comment.setEnabled(False)
self.pb_Dm.setEnabled(False)
self.pb_Like.setEnabled(False)
if package == "business":
self.pb_Comment.setEnabled(False)
self.pb_Dm.setEnabled(False)
else:
pass
def disable_button(self):
self.pb_Account.setEnabled(False)
self.pb_Follow.setEnabled(False)
self.pb_Comment.setEnabled(False)
self.pb_Unfollow.setEnabled(False)
self.pb_Dm.setEnabled(False)
self.pb_Like.setEnabled(False)
self.pb_Limit.setEnabled(False)
self.pb_Reset.setEnabled(False)
self.pb_Start.setEnabled(False)
self.pb_blacklist.setEnabled(False)
self.pb_whitelist.setEnabled(False)
def enable_button(self):#todo
try:
self.workThread.terminate()
except:
print("workThread termination error")
QtCore.QCoreApplication.processEvents()
bot.logout()
QtCore.QCoreApplication.processEvents()
if package == "personal":
self.pb_Account.setEnabled(True)
self.pb_Follow.setEnabled(True)
self.pb_Unfollow.setEnabled(True)
self.pb_Limit.setEnabled(True)
self.pb_Reset.setEnabled(True)
self.pb_Start.setEnabled(True)
self.pb_blacklist.setEnabled(True)
self.pb_whitelist.setEnabled(True)
if package == "business":
self.pb_Account.setEnabled(True)
self.pb_Follow.setEnabled(True)
self.pb_Unfollow.setEnabled(True)
self.pb_Like.setEnabled(True)
self.pb_Limit.setEnabled(True)
self.pb_Reset.setEnabled(True)
self.pb_Start.setEnabled(True)
self.pb_blacklist.setEnabled(True)
self.pb_whitelist.setEnabled(True)
else:
self.pb_Account.setEnabled(True)
self.pb_Follow.setEnabled(True)
self.pb_Comment.setEnabled(True)
self.pb_Unfollow.setEnabled(True)
self.pb_Dm.setEnabled(True)
self.pb_Like.setEnabled(True)
self.pb_Limit.setEnabled(True)
self.pb_Reset.setEnabled(True)
self.pb_Start.setEnabled(True)
self.pb_blacklist.setEnabled(True)
self.pb_whitelist.setEnabled(True)
QtCore.QCoreApplication.processEvents()
def initial_checker(self):
try:
fh = open(hashtag_file, 'r')
fh = open(users_file, 'r')
fh = open(whitelist, 'r')
fh = open(blacklist, 'r')
#fh = open(comments, 'r')
#fh = open(setting, 'r')
except BaseException:
fh = open(hashtag_file, 'w')
fh = open(users_file, 'w')
fh = open(whitelist, 'w')
fh = open(blacklist, 'w')
#fh = open(comments, 'w')
#fh = open(setting, 'w')
def start_bot(self):
try:
# self.initial_checker() #todo
f = open(setting)
lines = f.readlines()
setting_0 = int(lines[0].strip())
setting_1 = int(lines[1].strip())
setting_2 = int(lines[2].strip())
setting_3 = int(lines[3].strip())
setting_4 = int(lines[4].strip())
setting_5 = int(lines[5].strip())
setting_6 = int(lines[6].strip())
setting_7 = int(lines[7].strip())
setting_8 = int(lines[8].strip())
setting_9 = int(lines[9].strip())
setting_10 = int(lines[10].strip())
setting_11 = int(lines[11].strip())
setting_12 = int(lines[12].strip())
setting_13 = int(lines[13].strip())
setting_14 = int(lines[14].strip())
setting_15 = int(lines[15].strip())
setting_16 = int(lines[16].strip())
setting_17 = int(lines[17].strip())
setting_18 = lines[18].strip()
global bot
bot = Bot(
max_likes_per_day=setting_0,
max_unlikes_per_day=setting_1,
max_follows_per_day=setting_2,
max_unfollows_per_day=setting_3,
max_comments_per_day=setting_4,
max_likes_to_like=setting_5,
max_followers_to_follow=setting_6,
min_followers_to_follow=setting_7,
max_following_to_follow=setting_8,
min_following_to_follow=setting_9,
max_followers_to_following_ratio=setting_10,
max_following_to_followers_ratio=setting_11,
min_media_count_to_follow=setting_12,
like_delay=setting_13,
unlike_delay=setting_14,
follow_delay=setting_15,
unfollow_delay=setting_16,
comment_delay=setting_17,
#proxy=setting_18, todo
whitelist=whitelist,
blacklist=blacklist,
comments_file=comment,
stop_words=[
'order',
'shop',
'store',
'free',
'doodleartindonesia',
'doodle art indonesia',
'fullofdoodleart',
'commission',
'vector',
'karikatur',
'jasa',
'open'])
print("Collecting data please wait ...........")
QtCore.QCoreApplication.processEvents()
self.disable_button()
QtCore.QCoreApplication.processEvents()
# username = dicAcc.ACCOUNT_INSTA["username"]
# password = dicAcc.ACCOUNT_INSTA["password"]
# bot.login(username=username, password=password)
######### WORK START HERE ################
QtCore.QCoreApplication.processEvents()
self.workThread.start()
QtCore.QCoreApplication.processEvents()
except:
QtWidgets.QMessageBox.warning(self, "Ooopps", "Please set your Account and Setting first")
def server_Logout(self):
QtCore.QCoreApplication.processEvents()
# logout from server-> close
try:
requests.post(API_Util.API_URL + "api/user/app_logout/",
data={'api_key': API_Util.API_KEY,
'mac_guid': API_Util.get_machine_guid()})
self.close()
except:
QtWidgets.QMessageBox.information(self, "Hi user",
"Cannot connect to the server. Please check your connection.")
def delete_all_txt(self):
shutil.rmtree("Private")
def handleOutput(self, text, stdout):
self.textEdit.moveCursor(QtGui.QTextCursor.End)
self.textEdit.insertPlainText(text)
############ OPEN QWIDGET / QDIALOG #################
def open_Limit_Setting(self):
self.Limit_Setting = Limit_Setting_class()
self.Limit_Setting.show()
def open_Login(self):
self.Login = Login_class()
self.Login.show()
def open_Follow(self):
QtWidgets.QMessageBox.warning(self, "Ooopps", "Stay tuned")
def open_Like(self):
self.Like = Like_class()
self.Like.show()
def open_coming_soon(self):
QtWidgets.QMessageBox.information(self,"Power Up","we will update this powerful tool soon ")
def open_License_window(self):
QtWidgets.QMessageBox.warning(self, "Ooopps", "Dont forget to leave your email")
if __name__== "__main__":
try:
os.makedirs("Private")
except OSError as e:
if e.errno != errno.EEXIST:
raise
app = QtWidgets.QApplication(sys.argv)
MainWindow = MainWindow_class()
MainWindow.show()
sys.exit(app.exec_())
| 11,676 | 62 | 681 |
5022073b94aa06c9a780426f2c33cd14e5791612 | 1,978 | py | Python | egressive.py | P4tches/Egress-ive | 403aa2e9aa56ffdff4c465c78ba0be86638ca398 | [
"MIT"
] | null | null | null | egressive.py | P4tches/Egress-ive | 403aa2e9aa56ffdff4c465c78ba0be86638ca398 | [
"MIT"
] | null | null | null | egressive.py | P4tches/Egress-ive | 403aa2e9aa56ffdff4c465c78ba0be86638ca398 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import subprocess,sys
## Rules
#I1 = ['''-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT''','''-A OUTPUT -j REJECT''']
I1 = ['''ufw-not-local -j DROP'''] #For testing purposes
B1 = ['''-A INPUT -p tcp --dport 53 -j DROP''']
B2 = ['''-A INPUT -p icmp -j REJECT --reject-with icmp-port-unreachable''']
## Check if Rules are Applied
# I'm lazy
main()
| 23.547619 | 108 | 0.664307 | #!/usr/bin/python3
import subprocess,sys
def main():
menu()
def menu():
ruleDump()
print("")
print("Options:")
# Ingress
print(appliedBox(ruleMatch(I1)) + "I1) Only Allow Incoming Traffic (Very Restrictive)")
# Egress
print("E1) Only Allow Top 10,000 Sites (Somewhat Restrictive)")
print("E2) Only Allow 53,80,443 OUT")
# Block Actions
print(appliedBox(ruleMatch(B1)) + "B1) Block Domain Transfers")
print(appliedBox(ruleMatch(B2)) + "B2) Block ICMP")
option = input("Selection: ")
warning()
setRule(I1) #Temp
def warning():
print("This might effect your access to the internet")
warning = input("Continue?(y/N) ")
if warning.lower() == "y":
#rules()
print("Applying Rules...")
elif warning.lower() == "n":
sys.exit()
elif warning == "":
sys.exit()
else:
print("unknown input")
## Rules
#I1 = ['''-A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT''','''-A OUTPUT -j REJECT''']
I1 = ['''ufw-not-local -j DROP'''] #For testing purposes
B1 = ['''-A INPUT -p tcp --dport 53 -j DROP''']
B2 = ['''-A INPUT -p icmp -j REJECT --reject-with icmp-port-unreachable''']
def setRule(ruleID):
for i in ruleID:
print("/sbin/iptables "+i)
#cmd("iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT") # Allow initiated connections out
#cmd("iptables -A OUTPUT -j REJECT") # Drop all other traffic
#print("applied")
## Check if Rules are Applied
def ruleMatch(ruleArray):
for lineRule in ruleArray:
if lineMatch(lineRule) == False:
return False
return True
def lineMatch(lineRule):
matchedLine = False
with open("/tmp/current.rules") as f:
for line in f:
if lineRule == "-A "+line.rstrip():
matchedLine = True
return matchedLine
def ruleDump():
cmd("iptables-save > /tmp/current.rules")
def appliedBox(ruleBool):
if ruleBool == True:
return "[x] "
elif ruleBool == False:
return "[ ] "
else:
return "err "
# I'm lazy
def cmd(command):
subprocess.run(command, shell=True, check=True)
main()
| 1,389 | 0 | 205 |
0507760acbce99cc1430bb26727d75edc9909bd0 | 1,090 | py | Python | medium/problem113/solution.py | cutoutsy/leetcode | 0734f1060a0340370b8234e8072d70c10d4306d9 | [
"Apache-2.0"
] | 1 | 2018-02-25T03:45:04.000Z | 2018-02-25T03:45:04.000Z | medium/problem113/solution.py | cutoutsy/leetcode | 0734f1060a0340370b8234e8072d70c10d4306d9 | [
"Apache-2.0"
] | null | null | null | medium/problem113/solution.py | cutoutsy/leetcode | 0734f1060a0340370b8234e8072d70c10d4306d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
solution = Solution()
ans = solution.pathSum(root, 4)
for i in range(len(ans)):
print(ans[i])
| 23.191489 | 56 | 0.53211 | #!/usr/bin/python
# -*- coding: utf-8 -*-
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
self.ans = []
if root is None:
return self.ans
def dfs(root, path):
if root.left is None and root.right is None:
self.ans.append(path)
if root.left:
dfs(root.left, path + [root.left.val])
if root.right:
dfs(root.right, path + [root.right.val])
dfs(root, [root.val])
result = []
def total(data):
total = 0
for x in (data):
total += x
return total
for path in self.ans:
if sum == total(path):
result.append(path)
return result
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
solution = Solution()
ans = solution.pathSum(root, 4)
for i in range(len(ans)):
print(ans[i])
| 395 | 414 | 72 |
b2ea83662d0b2689f48c9ba46c2246a7903f0702 | 310 | py | Python | src/main.py | abeakkas/ipho-unofficial.org | 803ab3cc20670304f7be899ecc48b753765c2ddc | [
"MIT"
] | 5 | 2020-04-10T19:32:43.000Z | 2021-05-08T23:39:21.000Z | src/main.py | abeakkas/ipho-unofficial.org | 803ab3cc20670304f7be899ecc48b753765c2ddc | [
"MIT"
] | 5 | 2016-09-01T04:38:53.000Z | 2019-01-31T15:51:39.000Z | src/main.py | abeakkas/ipho-unofficial.org | 803ab3cc20670304f7be899ecc48b753765c2ddc | [
"MIT"
] | 7 | 2016-08-10T10:30:59.000Z | 2020-02-21T14:12:35.000Z | import countries
import e404
import index
import search
import static_files
import timeline
if __name__ == "__main__":
run()
| 16.315789 | 40 | 0.635484 | import countries
import e404
import index
import search
import static_files
import timeline
def run():
print("Creating the whole project")
index.run()
e404.run()
timeline.run()
countries.run()
search.run()
static_files.run()
if __name__ == "__main__":
run()
| 146 | 0 | 25 |
297edefe6ee585ccb2d96661b63396fac71557fd | 8,907 | py | Python | src/GeneralProcess/InitialParser.py | haganenoneko/EphysAnalysisTools | c404e213095ff4f14831d78f810101c19bce152e | [
"MIT"
] | null | null | null | src/GeneralProcess/InitialParser.py | haganenoneko/EphysAnalysisTools | c404e213095ff4f14831d78f810101c19bce152e | [
"MIT"
] | 1 | 2021-12-26T17:09:21.000Z | 2021-12-26T17:09:21.000Z | src/GeneralProcess/InitialParser.py | haganenoneko/EphysAnalysisTools | c404e213095ff4f14831d78f810101c19bce152e | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Delbert Yip
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
import logging
import pandas as pd
from pathlib import Path
from typing import Dict, Any, List
import GeneralProcess.base as base
from GeneralProcess.ephys_info_filter import EphysInfoFiltering
import regex as re
from pydantic import BaseModel, validator
import pyabf
"""Find CSV and ABF files that meet selection criteria
This module contains classes and methods that specialize in
1. Applying, but not validating/reading, selection criteria
2. Finding files with given extensions and at given location(s)
The key class is `DataLoader`, which holds information such as
1. Raw data files (CSV, ABF)
2. Experimental parameters (e.g. series resistance, recording protocol name)
3. Paths to all files
An alternative use case would be to work with non-ABF file formats. To do so,
implement a subclass of `DataLoader` and override the `getDataFiles` method.
"""
# -------------------------------- Find files -------------------------------- #
class FileFinder(BaseModel):
"""Find files at `path`. Optionally ignore files in `to_ignore`
:param paths: path(s) to check for files
:type paths: List[str]
:param to_ignore: list of filenames to ignore, defaults to None
:type to_ignore: List[str], optional
"""
paths: List[str]
to_ignore: List[str] = None
fmt: str = '.csv'
@validator('fmt')
@validator('to_ignore')
@staticmethod
def _readFiles(self, files: List[Path]) -> None:
"""Load csv files"""
ignore_msg = "File ignored: {0}"
for file in files:
fname = file.stem
if not file.is_file():
continue
elif fname in self.to_ignore:
logging.info(ignore_msg.format(fname))
else:
df = self._readFile(file, fname)
self.data_files[fname] = df
return
# -------------------------------- Main parser ------------------------------- #
class DataLoader:
"""Select and find CSV and ABF files"""
def __init__(self,
main_dir: str, csv_path: str, abf_path: str, ephys_info_path: str,
filter_criteria: Dict[str, Any],
log_path: str = None, out_path: str = None
) -> None:
"""Load CSV and ABF files
:param main_dir: [description]
:type main_dir: str
:param csv_path: path to CSV files
:type csv_path: str
:param abf_path: path to ABF files
:type abf_path: str
:param filter_criteria: criteria used to select files. See `EphysInfoFiltering` for more information
:type filter_criteria: Dict[str, Any]
:param log_path: path to save logs, defaults to None
:type log_path: str, optional
:param out_path: path for output files, defaults to None
:type out_path: str, optional
:raises ValueError: if filter criteria are not provided
"""
if not filter_criteria:
raise ValueError(f"No filter criteria provided.")
self.paths = self.validatePaths(
dict(main=main_dir, csv=csv_path, abf=abf_path,
ephys_info_path=ephys_info_path, log=log_path, out=out_path))
self.criteria = filter_criteria
self.filenames: List[str] = None
self.ephys_info: pd.DataFrame = None
self.exp_params: pd.DataFrame = None
self.paired_files: Dict[str, Any] = None
def validatePaths(self, paths: Dict[str, str]) -> Dict[str, Path]:
"""Check that paths are valid and convert to `Path` objects"""
for key, path in paths.items():
if os.path.isdir(path):
paths[key] = Path(path)
elif os.path.isdir(paths['main'] + path):
paths[key] = Path(paths['main'] + path)
else:
raise ValueError(f'{path} is not a valid path.')
return paths
def getDataFiles(self, filenames: List[str], to_ignore: List[str]) -> List[str]:
"""Get Dataframes and pyABF objects for CSV and ABF files, respectively
:param filenames: file names
:type filenames: List[str]
:param to_ignore: list of file names to ignore
:type to_ignore: List[str]
:return: list of missing files
:rtype: List[str]
"""
CSVs = FileFinder(self.paths['csv'], to_ignore=to_ignore,
fmt='.csv').find(filenames, rglob=False)
ABFs = ABF_Finder(self.paths['abf'], to_ignore=to_ignore,
fmt='.abf').find(filenames, rglob=True)
missing: List[str] = []
missing_msg = "{0} in CSVs: {1}\n{0} in ABFs: {2}"
for f in filenames:
if f in CSVs and f in ABFs:
continue
logging.info(
missing_msg.format(f, (f in CSVs), (f in ABFs))
)
missing.append(f)
self.CSVs = CSVs
self.ABFs = ABFs
return missing
| 30.608247 | 108 | 0.581453 | # Copyright (c) 2021 Delbert Yip
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
import logging
import pandas as pd
from pathlib import Path
from typing import Dict, Any, List
import GeneralProcess.base as base
from GeneralProcess.ephys_info_filter import EphysInfoFiltering
import regex as re
from pydantic import BaseModel, validator
import pyabf
"""Find CSV and ABF files that meet selection criteria
This module contains classes and methods that specialize in
1. Applying, but not validating/reading, selection criteria
2. Finding files with given extensions and at given location(s)
The key class is `DataLoader`, which holds information such as
1. Raw data files (CSV, ABF)
2. Experimental parameters (e.g. series resistance, recording protocol name)
3. Paths to all files
An alternative use case would be to work with non-ABF file formats. To do so,
implement a subclass of `DataLoader` and override the `getDataFiles` method.
"""
# -------------------------------- Find files -------------------------------- #
class FileFinder(BaseModel):
"""Find files at `path`. Optionally ignore files in `to_ignore`
:param paths: path(s) to check for files
:type paths: List[str]
:param to_ignore: list of filenames to ignore, defaults to None
:type to_ignore: List[str], optional
"""
paths: List[str]
to_ignore: List[str] = None
fmt: str = '.csv'
def __post_init__(self):
self.paths: List[Path] = base.get_valid_paths(self.paths)
self.data_files: Dict[str, pd.DataFrame] = {}
@validator('fmt')
def format_matches_regex(cls: object, fmt: str) -> None:
if not isinstance(fmt, str):
raise TypeError(f"{fmt} must be type str, not {type(fmt)}")
pattern = re.compile(r"^[a-zA-Z0-9\_\-]*\.[a-zA-Z0-9]$")
if not pattern.match(fmt):
raise ValueError(
f"{fmt} does not match pattern: [prefix].[extension]"
)
return
@validator('to_ignore')
def ignore_are_string(cls: object, to_ignore: List[str]) -> None:
if to_ignore is None:
return
if not to_ignore:
return
if not all([isinstance(f, str) for f in to_ignore]):
raise TypeError(
f"All files to ignore must be strings.\n{to_ignore}")
return
@staticmethod
def _readFile(file: Path, filename: str) -> pd.DataFrame:
df: pd.DataFrame = pd.read_csv(file, header=None, index_col=0)
# check if first index is not 0
if df.index[0] != 0:
try:
df.index = df.index.astype(float)
df.index -= df.index[1]
except ValueError:
df = df.iloc[1:, :]
df.index = df.index.astype(float)
df.index.name = filename
return df
def _readFiles(self, files: List[Path]) -> None:
"""Load csv files"""
ignore_msg = "File ignored: {0}"
for file in files:
fname = file.stem
if not file.is_file():
continue
elif fname in self.to_ignore:
logging.info(ignore_msg.format(fname))
else:
df = self._readFile(file, fname)
self.data_files[fname] = df
return
def find(
self, filenames: List[str], rglob: bool = False
) -> Dict[str, pd.DataFrame]:
fmt = self.fmt
for path in self.paths:
if rglob:
file_paths = [path.rglob(f + fmt) for f in filenames]
else:
file_paths = [path.with_stem(f + fmt) for f in filenames]
self._readFiles(file_paths)
return self.data_files
def __repr__(self) -> str:
return f"""
Paths: {self.paths}\n
Ignored files: {self.to_ignore}\n
Loaded files: {self.data_files}
"""
class ABF_Finder(FileFinder):
@staticmethod
def _readFile(file: Path, filename: str) -> pyabf.ABF:
if not file.is_file():
logging.info(f"File was not found: {file}")
return pyabf.ABF(file)
# -------------------------------- Main parser ------------------------------- #
class DataLoader:
"""Select and find CSV and ABF files"""
def __init__(self,
main_dir: str, csv_path: str, abf_path: str, ephys_info_path: str,
filter_criteria: Dict[str, Any],
log_path: str = None, out_path: str = None
) -> None:
"""Load CSV and ABF files
:param main_dir: [description]
:type main_dir: str
:param csv_path: path to CSV files
:type csv_path: str
:param abf_path: path to ABF files
:type abf_path: str
:param filter_criteria: criteria used to select files. See `EphysInfoFiltering` for more information
:type filter_criteria: Dict[str, Any]
:param log_path: path to save logs, defaults to None
:type log_path: str, optional
:param out_path: path for output files, defaults to None
:type out_path: str, optional
:raises ValueError: if filter criteria are not provided
"""
if not filter_criteria:
raise ValueError(f"No filter criteria provided.")
self.paths = self.validatePaths(
dict(main=main_dir, csv=csv_path, abf=abf_path,
ephys_info_path=ephys_info_path, log=log_path, out=out_path))
self.criteria = filter_criteria
self.filenames: List[str] = None
self.ephys_info: pd.DataFrame = None
self.exp_params: pd.DataFrame = None
self.paired_files: Dict[str, Any] = None
def validatePaths(self, paths: Dict[str, str]) -> Dict[str, Path]:
"""Check that paths are valid and convert to `Path` objects"""
for key, path in paths.items():
if os.path.isdir(path):
paths[key] = Path(path)
elif os.path.isdir(paths['main'] + path):
paths[key] = Path(paths['main'] + path)
else:
raise ValueError(f'{path} is not a valid path.')
return paths
def getDataFiles(self, filenames: List[str], to_ignore: List[str]) -> List[str]:
"""Get Dataframes and pyABF objects for CSV and ABF files, respectively
:param filenames: file names
:type filenames: List[str]
:param to_ignore: list of file names to ignore
:type to_ignore: List[str]
:return: list of missing files
:rtype: List[str]
"""
CSVs = FileFinder(self.paths['csv'], to_ignore=to_ignore,
fmt='.csv').find(filenames, rglob=False)
ABFs = ABF_Finder(self.paths['abf'], to_ignore=to_ignore,
fmt='.abf').find(filenames, rglob=True)
missing: List[str] = []
missing_msg = "{0} in CSVs: {1}\n{0} in ABFs: {2}"
for f in filenames:
if f in CSVs and f in ABFs:
continue
logging.info(
missing_msg.format(f, (f in CSVs), (f in ABFs))
)
missing.append(f)
self.CSVs = CSVs
self.ABFs = ABFs
return missing
def run(self, to_ignore: List[str] = None):
ephysInfo = EphysInfoFiltering(
self.criteria, ephys_info_path=self.paths['ephys_info_path'])
# apply filter criteria
filenames, ephys_info = ephysInfo.filter()
# extract paired files and experimental parameters
paired_files, exp_params = ephysInfo.ExpParams()
missing = self.getDataFiles(to_ignore=to_ignore)
# remove missing files
if missing:
filenames = [f for f in filenames if f not in missing]
ephysInfo = ephysInfo[ephysInfo['Files'].str.isin(filenames)]
exp_params = exp_params.loc[:, exp_params.str.isin(filenames)]
self.filenames = filenames
self.ephys_info = ephys_info
self.exp_params = exp_params
self.paired_files = paired_files
def __repr__(self) -> str:
pretty_paths = base.pprint_dict(self.paths)
pretty_CSVs = base.pprint_dict(self.CSVs, delim=" ",
func=base._get_df_shape)
pretty_ABFs = base.pprint_dict(self.ABFs, delim=" ",
func=base._get_df_shape)
return f"""
Filenames:\n{self.filenames}\n
Experiment info:\n{self.ephys_info}\n
Seal parameters:\n{self.exp_params}\n
Paired files:\n{self.paired_files}\n
ABF files:\n{pretty_ABFs}\n
CSV files:\n{pretty_CSVs}\n
\nSelection criteria: {self.criteria}\n
{pretty_paths}
"""
| 3,490 | 53 | 236 |
b17570f975254eb582451c8008a5f8277305ca87 | 306 | py | Python | fstmt/table/__init__.py | mission-liao/fin-stmt-additional | da9ef5299e6ff10406996d0cb0975b46498d3c39 | [
"MIT"
] | null | null | null | fstmt/table/__init__.py | mission-liao/fin-stmt-additional | da9ef5299e6ff10406996d0cb0975b46498d3c39 | [
"MIT"
] | null | null | null | fstmt/table/__init__.py | mission-liao/fin-stmt-additional | da9ef5299e6ff10406996d0cb0975b46498d3c39 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .cost_breakdown import CostBreakdown
from .manufacturing_expense import ManufacturingExpense
from .stock import Stock
from .factory import TableAdaptorFactory
from .me2 import ManufacturingExpense2
from .product_price import ProductPrice
from .product_cost import ProductCost
| 30.6 | 55 | 0.836601 | # -*- coding: utf-8 -*-
from .cost_breakdown import CostBreakdown
from .manufacturing_expense import ManufacturingExpense
from .stock import Stock
from .factory import TableAdaptorFactory
from .me2 import ManufacturingExpense2
from .product_price import ProductPrice
from .product_cost import ProductCost
| 0 | 0 | 0 |
7524574830bfd648c2c773c48d37d248d559da32 | 608 | py | Python | oops_fhir/r4/value_set/v3_processing_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/v3_processing_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/v3_processing_mode.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_processing_mode import (
v3ProcessingMode as v3ProcessingMode_,
)
__all__ = ["v3ProcessingMode"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3ProcessingMode(v3ProcessingMode_):
"""
v3 Code System ProcessingMode
**** MISSING DEFINITIONS ****
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-ProcessingMode
"""
| 19.612903 | 69 | 0.733553 | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_processing_mode import (
v3ProcessingMode as v3ProcessingMode_,
)
__all__ = ["v3ProcessingMode"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3ProcessingMode(v3ProcessingMode_):
"""
v3 Code System ProcessingMode
**** MISSING DEFINITIONS ****
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-ProcessingMode
"""
class Meta:
resource = _resource
| 0 | 19 | 27 |
457aaaf1dfd9f71afd38241785863148c7859cb4 | 161 | py | Python | uploading/admin.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | uploading/admin.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | uploading/admin.py | trevin-livele/django_api_awwwards | 604a480cfe3d0efc01019c4ba15ffba5c140be0b | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post,ReviewRating
# Register your models here.
admin.site.register(Post)
admin.site.register(ReviewRating) | 23 | 37 | 0.819876 | from django.contrib import admin
from .models import Post,ReviewRating
# Register your models here.
admin.site.register(Post)
admin.site.register(ReviewRating) | 0 | 0 | 0 |
a188f1c773c39a2017c2542f0382177d7951c891 | 3,966 | py | Python | cd4ml/ml_model.py | muralipoola/CD4ML-Scenarios | 2efb4adc3221d6daaa28fd62c3df1c1ff2c78424 | [
"MIT"
] | null | null | null | cd4ml/ml_model.py | muralipoola/CD4ML-Scenarios | 2efb4adc3221d6daaa28fd62c3df1c1ff2c78424 | [
"MIT"
] | null | null | null | cd4ml/ml_model.py | muralipoola/CD4ML-Scenarios | 2efb4adc3221d6daaa28fd62c3df1c1ff2c78424 | [
"MIT"
] | null | null | null | import joblib
from wickedhot import OneHotEncoder
from cd4ml.train import get_trained_model
from cd4ml.model_utils import get_target_id_features_lists
import logging
import mlflow.sklearn
import mlflow
import os
from cd4ml.utils.utils import mini_batch_eval
| 40.469388 | 115 | 0.652547 | import joblib
from wickedhot import OneHotEncoder
from cd4ml.train import get_trained_model
from cd4ml.model_utils import get_target_id_features_lists
import logging
import mlflow.sklearn
import mlflow
import os
from cd4ml.utils.utils import mini_batch_eval
class MLModel:
def __init__(self, algorithm_name,
algorithm_params,
feature_set,
encoder,
random_seed):
self.logger = logging.getLogger(__name__)
self.algorithm_name = algorithm_name
self.algorithm_params = algorithm_params
self.random_seed = random_seed
self.trained_model = None
self.encoder = encoder
self.feature_set = feature_set
self.packaged_encoder = None
mlflow.set_tracking_uri(os.environ["MLFLOW_TRACKING_URL"])
def load_encoder_from_package(self):
self.logger.info('loading encoder from packaging')
self.encoder = OneHotEncoder([], [])
self.encoder.load_from_packaged_data(self.packaged_encoder)
def predict_encoded_rows(self, encoded_row_list):
# needs a list not a stream
# for batch or mini-batch calls
# eliminates model scoring overhead
preds = self.trained_model.predict(encoded_row_list)
return [float(pred) for pred in preds]
def predict_single_processed_row(self, processed_row):
# don't call this on each element of a stream or list
# call it when you really only have one to predict
# not performant when called many times
# use predict_processed_rows for that instead
self.logger.debug('processed_row', processed_row)
return list(self.predict_processed_rows([processed_row]))[0]
def predict_processed_rows(self, processed_row_stream):
# minibatch prediction is much faster because of overhead
# of model scoring call
if self.encoder is None:
# in case it has been packaged
self.load_encoder_from_package()
self.packaged_encoder = None
batch_size = 1000
feature_row_stream = (self.feature_set.features(row) for row in processed_row_stream)
encoded_row_stream = (self.encoder.encode_row(feature_row) for feature_row in feature_row_stream)
return mini_batch_eval(encoded_row_stream, batch_size, self.predict_encoded_rows)
def _get_target_id_features_lists_training(self, training_processed_stream):
return get_target_id_features_lists(self.feature_set.identifier_field,
self.feature_set.target_field,
self.feature_set,
training_processed_stream)
def train(self, training_processed_stream):
# reads in the streams to lists of dicts, trains model and then
# deletes the data to free up memory
target_data, identifiers, features = self._get_target_id_features_lists_training(training_processed_stream)
encoded_training_data = [self.encoder.encode_row(feature_row) for feature_row in features]
del features, identifiers
self.trained_model = get_trained_model(self.algorithm_name,
self.algorithm_params,
encoded_training_data,
target_data,
self.random_seed)
mlflow.sklearn.log_model(sk_model=self.trained_model,
artifact_path='wine-pyfile-model2') # noqa
del encoded_training_data
def save(self, filename):
# The encoder apparently is not pickleable.
# No problem. The encoder has built in serialization
# so make use if it.
self.packaged_encoder = self.encoder.package_data()
self.encoder = None
joblib.dump(self, filename)
| 3,475 | -7 | 238 |
4a3816d4c395304ae8175a3ae48d262adc748f0d | 8,409 | py | Python | games/management/commands/fillrankingdb.py | MarkusShepherd/ludoj-server | 6cfb218029e6e1cf520eed1ccab2576c990cd287 | [
"MIT"
] | 1 | 2018-11-10T21:03:48.000Z | 2018-11-10T21:03:48.000Z | games/management/commands/fillrankingdb.py | MarkusShepherd/ludoj-server | 6cfb218029e6e1cf520eed1ccab2576c990cd287 | [
"MIT"
] | null | null | null | games/management/commands/fillrankingdb.py | MarkusShepherd/ludoj-server | 6cfb218029e6e1cf520eed1ccab2576c990cd287 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Parses the ranking CSVs and writes them to the database."""
import csv
import logging
import os
import sys
from datetime import datetime, timezone
from functools import lru_cache
from itertools import groupby
from pathlib import Path
import pandas as pd
from django.core.management.base import BaseCommand
from pytility import arg_to_iter, batchify, parse_date
from snaptime import snap
from ...models import Game, Ranking
from ...utils import format_from_path
csv.field_size_limit(sys.maxsize)
LOGGER = logging.getLogger(__name__)
WEEK_DAYS = ("SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT")
@lru_cache(maxsize=128)
@lru_cache(maxsize=128)
def parse_ranking_csv(path_file, date=None, tzinfo=timezone.utc):
"""Parses a ranking CSV file."""
LOGGER.info("Reading ranking from <%s>...", path_file)
date = _extract_date(path_file=path_file, tzinfo=tzinfo) if date is None else date
ranking = pd.read_csv(path_file)
ranking["date"] = date
return ranking
def parse_ranking_csvs(
path_dir,
week_day="SUN",
tzinfo=timezone.utc,
min_date=None,
max_date=None,
):
"""Parses all ranking CSV files in a directory."""
path_dir = Path(path_dir).resolve()
LOGGER.info("Iterating through all CSV files in <%s>...", path_dir)
files = (file for file in path_dir.iterdir() if format_from_path(file) == "csv")
files = (
(_extract_date(path_file=file, tzinfo=tzinfo), file) for file in sorted(files)
)
if min_date:
LOGGER.info("Filter out files before %s", min_date)
files = ((date, file) for date, file in files if date >= min_date)
if max_date:
LOGGER.info("Filter out files after %s", max_date)
files = ((date, file) for date, file in files if date <= max_date)
if not week_day:
for date, file in files:
LOGGER.info("Processing rankings from %s...", date)
yield date, parse_ranking_csv(path_file=file, date=date)
return
for group_date, group in groupby(
files,
key=lambda pair: _following(date=pair[0], week_day=week_day, tzinfo=tzinfo),
):
LOGGER.info("Processing rankings from the week ending in %s...", group_date)
dfs = (
parse_ranking_csv(path_file=path_file, date=date)
for date, path_file in group
)
yield group_date, pd.concat(dfs, ignore_index=True)
class Command(BaseCommand):
"""Parses the ranking CSVs and writes them to the database."""
help = "Parses the ranking CSVs and writes them to the database."
ranking_types = {
Ranking.BGG: ("bgg", "last", None, None),
Ranking.RECOMMEND_GAMES: ("r_g", "mean", None, 0),
Ranking.FACTOR: ("factor", "mean", None, None),
Ranking.SIMILARITY: ("similarity", "mean", None, None),
Ranking.CHARTS: (
"charts",
"all",
datetime(2016, 1, 1, tzinfo=timezone.utc),
None,
),
}
| 30.139785 | 87 | 0.593888 | # -*- coding: utf-8 -*-
"""Parses the ranking CSVs and writes them to the database."""
import csv
import logging
import os
import sys
from datetime import datetime, timezone
from functools import lru_cache
from itertools import groupby
from pathlib import Path
import pandas as pd
from django.core.management.base import BaseCommand
from pytility import arg_to_iter, batchify, parse_date
from snaptime import snap
from ...models import Game, Ranking
from ...utils import format_from_path
csv.field_size_limit(sys.maxsize)
LOGGER = logging.getLogger(__name__)
WEEK_DAYS = ("SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT")
@lru_cache(maxsize=128)
def _week_day_number(day):
if not isinstance(day, str):
return None
try:
return WEEK_DAYS.index(day.upper())
except Exception:
pass
return None
@lru_cache(maxsize=128)
def _make_instruction(day):
day_number = day if isinstance(day, int) else _week_day_number(day)
return f"@w{day_number + 1}+1w-1d" if isinstance(day_number, int) else day
def _following(date, week_day="SUN", tzinfo=timezone.utc):
date = parse_date(date, tzinfo=tzinfo).astimezone(tzinfo)
instruction = _make_instruction(week_day)
return snap(date, instruction).date()
def _extract_date(path_file, tzinfo=timezone.utc):
file_name = os.path.basename(path_file)
date_str, _ = os.path.splitext(file_name)
return parse_date(date_str, tzinfo=tzinfo)
def parse_ranking_csv(path_file, date=None, tzinfo=timezone.utc):
"""Parses a ranking CSV file."""
LOGGER.info("Reading ranking from <%s>...", path_file)
date = _extract_date(path_file=path_file, tzinfo=tzinfo) if date is None else date
ranking = pd.read_csv(path_file)
ranking["date"] = date
return ranking
def parse_ranking_csvs(
path_dir,
week_day="SUN",
tzinfo=timezone.utc,
min_date=None,
max_date=None,
):
"""Parses all ranking CSV files in a directory."""
path_dir = Path(path_dir).resolve()
LOGGER.info("Iterating through all CSV files in <%s>...", path_dir)
files = (file for file in path_dir.iterdir() if format_from_path(file) == "csv")
files = (
(_extract_date(path_file=file, tzinfo=tzinfo), file) for file in sorted(files)
)
if min_date:
LOGGER.info("Filter out files before %s", min_date)
files = ((date, file) for date, file in files if date >= min_date)
if max_date:
LOGGER.info("Filter out files after %s", max_date)
files = ((date, file) for date, file in files if date <= max_date)
if not week_day:
for date, file in files:
LOGGER.info("Processing rankings from %s...", date)
yield date, parse_ranking_csv(path_file=file, date=date)
return
for group_date, group in groupby(
files,
key=lambda pair: _following(date=pair[0], week_day=week_day, tzinfo=tzinfo),
):
LOGGER.info("Processing rankings from the week ending in %s...", group_date)
dfs = (
parse_ranking_csv(path_file=path_file, date=date)
for date, path_file in group
)
yield group_date, pd.concat(dfs, ignore_index=True)
def _last_ranking(data, date=None):
data.sort_values(["bgg_id", "date"], inplace=True)
groups = data.groupby("bgg_id")
rankings = groups.last()
rankings.reset_index(inplace=True)
rankings.sort_values("rank", inplace=True)
if date is not None:
rankings["date"] = date
return rankings
def _avg_ranking(data, date=None):
groups = data.groupby("bgg_id")
rankings = groups["score"].mean().reset_index()
rankings.sort_values("score", ascending=False, inplace=True)
rankings["rank"] = range(1, len(rankings) + 1)
if date is not None:
rankings["date"] = date
return rankings
def _create_instances(
path_dir,
ranking_type=Ranking.BGG,
filter_ids=None,
method="last", # TODO this should really be an enum
week_day="SUN",
min_date=None,
max_date=None,
min_score=None,
):
LOGGER.info(
"Finding all rankings of type <%s> in <%s>, aggregating <%s>...",
ranking_type,
path_dir,
method,
)
for date, data in parse_ranking_csvs(
path_dir=path_dir,
week_day=None if method == "all" else week_day,
min_date=min_date,
max_date=max_date,
):
rankings = (
_avg_ranking(data=data, date=date)
if method == "mean"
else _last_ranking(data=data, date=date)
if method == "last"
else data
if method == "all"
else None
)
assert rankings is not None, f"illegal method <{method}>"
if min_score is not None and "score" in rankings:
rankings = rankings[rankings["score"] > min_score]
for item in rankings.itertuples(index=False):
if filter_ids is None or item.bgg_id in filter_ids:
yield Ranking(
game_id=item.bgg_id,
ranking_type=ranking_type,
rank=item.rank,
date=item.date,
)
class Command(BaseCommand):
"""Parses the ranking CSVs and writes them to the database."""
help = "Parses the ranking CSVs and writes them to the database."
ranking_types = {
Ranking.BGG: ("bgg", "last", None, None),
Ranking.RECOMMEND_GAMES: ("r_g", "mean", None, 0),
Ranking.FACTOR: ("factor", "mean", None, None),
Ranking.SIMILARITY: ("similarity", "mean", None, None),
Ranking.CHARTS: (
"charts",
"all",
datetime(2016, 1, 1, tzinfo=timezone.utc),
None,
),
}
def add_arguments(self, parser):
parser.add_argument("path", help="input directory")
parser.add_argument(
"--batch",
"-b",
type=int,
default=100_000,
help="batch size for DB transactions",
)
parser.add_argument(
"--types",
"-t",
choices=self.ranking_types.keys(),
nargs="+",
help="only create rankings of these particular types",
)
parser.add_argument(
"--week-day",
"-w",
default="SUN",
choices=WEEK_DAYS,
help="anchor week day when aggregating weeks",
)
parser.add_argument(
"--dry-run",
"-n",
action="store_true",
help="don't write to the database",
)
def _create_all_instances(self, path, filter_ids=None, week_day="SUN", types=None):
types = frozenset(arg_to_iter(types))
for ranking_type, (
sub_dir,
method,
min_date,
min_score,
) in self.ranking_types.items():
if not types or ranking_type in types:
yield from _create_instances(
path_dir=os.path.join(path, sub_dir),
ranking_type=ranking_type,
filter_ids=filter_ids,
method=method,
week_day=week_day,
min_date=min_date,
min_score=min_score,
)
def handle(self, *args, **kwargs):
logging.basicConfig(
stream=sys.stderr,
level=logging.DEBUG if kwargs["verbosity"] > 1 else logging.INFO,
format="%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)s] %(message)s",
)
LOGGER.info(kwargs)
# pylint: disable=no-member
game_ids = frozenset(Game.objects.order_by().values_list("bgg_id", flat=True))
instances = self._create_all_instances(
path=kwargs["path"],
filter_ids=game_ids,
week_day=kwargs["week_day"],
types=kwargs["types"],
)
batches = (
batchify(instances, kwargs["batch"]) if kwargs["batch"] else (instances,)
)
for count, batch in enumerate(batches):
LOGGER.info("Processing batch #%d...", count + 1)
if kwargs["dry_run"]:
for item in batch:
print(item)
else:
Ranking.objects.bulk_create(batch)
LOGGER.info("Done filling the database.")
| 5,155 | 0 | 240 |
941ffa52dfc4e009046f91e469fc8909811556a1 | 4,567 | py | Python | event_store_client.py | martinez099/event-store | bc007adcf7ce6a3cb2a9d99cf9ab32708dcb17a9 | [
"MIT"
] | 5 | 2020-06-14T10:08:29.000Z | 2021-07-06T05:46:14.000Z | event_store_client.py | martinez099/event-store | bc007adcf7ce6a3cb2a9d99cf9ab32708dcb17a9 | [
"MIT"
] | null | null | null | event_store_client.py | martinez099/event-store | bc007adcf7ce6a3cb2a9d99cf9ab32708dcb17a9 | [
"MIT"
] | 3 | 2020-09-30T13:49:33.000Z | 2021-09-04T23:48:51.000Z | import json
import logging
import os
import threading
import uuid
import grpc
from event_store_pb2 import PublishRequest, SubscribeRequest, UnsubscribeRequest, GetRequest
from event_store_pb2_grpc import EventStoreStub
EVENT_STORE_HOSTNAME = os.getenv('EVENT_STORE_HOSTNAME', 'localhost')
EVENT_STORE_PORTNR = os.getenv('EVENT_STORE_PORTNR', '50051')
def create_event(_action, _data):
"""
Create an event.
:param _action: The event action.
:param _data: A dict with the event data.
:return: A dict with the event information.
"""
return {
'event_id': str(uuid.uuid4()),
'event_action': _action,
'event_data': json.dumps(_data)
}
class EventStoreClient(object):
"""
Event Store Client class.
"""
def publish(self, _topic, _info):
"""
Publish an event.
:param _topic: The event topic.
:param _info: A dict with the event information.
:return: The entry ID.
"""
response = self.stub.publish(PublishRequest(
event_topic=_topic,
event_info=json.dumps(_info)
))
return response.entry_id
def subscribe(self, _topic, _handler, _group=None):
"""
Subscribe to an event topic.
:param _topic: The event topic.
:param _handler: The event handler.
:param _group: Optional group name.
:return: Success.
"""
if _topic in self.subscribers:
self.subscribers[_topic].add_handler(_handler)
else:
subscriber = Subscriber(_topic, _handler, self.stub, _group)
subscriber.start()
self.subscribers[_topic] = subscriber
return True
def unsubscribe(self, _topic, _handler):
"""
Unsubscribe from an event topic.
:param _topic: The event topic.
:param _handler: The event handler.
:return: Success.
"""
subscriber = self.subscribers.get(_topic)
if not subscriber:
return False
response = self.stub.unsubscribe(UnsubscribeRequest(event_topic=_topic))
subscriber.rem_handler(_handler)
if not subscriber:
del self.subscribers[_topic]
return response.success
def get(self, _topic):
"""
Get events for a topic.
:param _topic: The event topic, i.e name of event stream.
:return: A list with entities.
"""
response = self.stub.get(GetRequest(event_topic=_topic))
return json.loads(response.events) if response.events else None
class Subscriber(threading.Thread):
"""
Subscriber Thread class.
"""
def __init__(self, _topic, _handler, _stub, _group=None):
"""
:param _topic: The topic to subscirbe to.
:param _handler: A handler function.
:param _group: The name of the subscriber.
"""
super(Subscriber, self).__init__()
self._running = False
self.handlers = [_handler]
self.topic = _topic
self.stub = _stub
self.group = _group
def run(self):
"""
Poll the event stream and call each handler with each entry returned.
"""
if self._running:
return
self._running = True
for item in self.stub.subscribe(
SubscribeRequest(event_topic=self.topic, group_name=self.group)):
for handler in self.handlers:
try:
handler(item)
except Exception as e:
logging.error(
'error calling handler function ({}) for {}.{}: {}'.format(
e.__class__.__name__, self.topic, handler.__name__, str(e)
)
)
self._running = False
def add_handler(self, _handler):
"""
Add an event handler.
:param _handler: The event handler function.
"""
self.handlers.append(_handler)
def rem_handler(self, _handler):
"""
Remove an event handler.
:param _handler: The event handler function.
"""
self.handlers.remove(_handler)
| 27.184524 | 92 | 0.592511 | import json
import logging
import os
import threading
import uuid
import grpc
from event_store_pb2 import PublishRequest, SubscribeRequest, UnsubscribeRequest, GetRequest
from event_store_pb2_grpc import EventStoreStub
EVENT_STORE_HOSTNAME = os.getenv('EVENT_STORE_HOSTNAME', 'localhost')
EVENT_STORE_PORTNR = os.getenv('EVENT_STORE_PORTNR', '50051')
def create_event(_action, _data):
"""
Create an event.
:param _action: The event action.
:param _data: A dict with the event data.
:return: A dict with the event information.
"""
return {
'event_id': str(uuid.uuid4()),
'event_action': _action,
'event_data': json.dumps(_data)
}
class EventStoreClient(object):
"""
Event Store Client class.
"""
def __init__(self):
host, port = EVENT_STORE_HOSTNAME, EVENT_STORE_PORTNR
self.channel = grpc.insecure_channel('{}:{}'.format(host, port))
self.stub = EventStoreStub(self.channel)
self.subscribers = {}
def __del__(self):
self.channel.close()
def publish(self, _topic, _info):
"""
Publish an event.
:param _topic: The event topic.
:param _info: A dict with the event information.
:return: The entry ID.
"""
response = self.stub.publish(PublishRequest(
event_topic=_topic,
event_info=json.dumps(_info)
))
return response.entry_id
def subscribe(self, _topic, _handler, _group=None):
"""
Subscribe to an event topic.
:param _topic: The event topic.
:param _handler: The event handler.
:param _group: Optional group name.
:return: Success.
"""
if _topic in self.subscribers:
self.subscribers[_topic].add_handler(_handler)
else:
subscriber = Subscriber(_topic, _handler, self.stub, _group)
subscriber.start()
self.subscribers[_topic] = subscriber
return True
def unsubscribe(self, _topic, _handler):
"""
Unsubscribe from an event topic.
:param _topic: The event topic.
:param _handler: The event handler.
:return: Success.
"""
subscriber = self.subscribers.get(_topic)
if not subscriber:
return False
response = self.stub.unsubscribe(UnsubscribeRequest(event_topic=_topic))
subscriber.rem_handler(_handler)
if not subscriber:
del self.subscribers[_topic]
return response.success
def get(self, _topic):
"""
Get events for a topic.
:param _topic: The event topic, i.e name of event stream.
:return: A list with entities.
"""
response = self.stub.get(GetRequest(event_topic=_topic))
return json.loads(response.events) if response.events else None
class Subscriber(threading.Thread):
"""
Subscriber Thread class.
"""
def __init__(self, _topic, _handler, _stub, _group=None):
"""
:param _topic: The topic to subscirbe to.
:param _handler: A handler function.
:param _group: The name of the subscriber.
"""
super(Subscriber, self).__init__()
self._running = False
self.handlers = [_handler]
self.topic = _topic
self.stub = _stub
self.group = _group
def __len__(self):
return len(self.handlers)
def run(self):
"""
Poll the event stream and call each handler with each entry returned.
"""
if self._running:
return
self._running = True
for item in self.stub.subscribe(
SubscribeRequest(event_topic=self.topic, group_name=self.group)):
for handler in self.handlers:
try:
handler(item)
except Exception as e:
logging.error(
'error calling handler function ({}) for {}.{}: {}'.format(
e.__class__.__name__, self.topic, handler.__name__, str(e)
)
)
self._running = False
def add_handler(self, _handler):
"""
Add an event handler.
:param _handler: The event handler function.
"""
self.handlers.append(_handler)
def rem_handler(self, _handler):
"""
Remove an event handler.
:param _handler: The event handler function.
"""
self.handlers.remove(_handler)
| 269 | 0 | 81 |
c0752693a81375be4af89460a0fc8ed1656c43af | 2,133 | py | Python | ntgen/utils.py | mrapacz/ntgen | 62f1e04624a0b406060418eb4f89af132a635a3f | [
"MIT"
] | 4 | 2020-03-29T09:31:52.000Z | 2020-04-27T19:02:54.000Z | ntgen/utils.py | mrapacz/ntgen | 62f1e04624a0b406060418eb4f89af132a635a3f | [
"MIT"
] | 2 | 2020-03-29T10:47:09.000Z | 2020-04-05T08:12:38.000Z | ntgen/utils.py | mrapacz/ntgen | 62f1e04624a0b406060418eb4f89af132a635a3f | [
"MIT"
] | null | null | null | import re
from typing import Optional
def normalize_field_name(name: str, leading_undescores_prefix: Optional[str] = None) -> str:
"""
Normalize a string to take a Pythonic form.
Normalize a string to take a Pythonic form by:
- replacing leading underscores with a given (optional) prefix
- converting the name so snake_case
"""
return convert_to_snake_case(replace_leading_underscores(name, prefix=leading_undescores_prefix))
def convert_to_snake_case(name: str) -> str:
"""
Convert a given string to snake_case.
Converts a given string to snake_case from camel case or kebab case
>>> normalize_field_name('SomeCamelCase')
'some_camel_case'
>>> normalize_field_name('sample-kebab-case')
'sample_kebab_case'
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", name).replace("-", "_").lower()
def normalize_class_name(name: str) -> str:
"""
Normalize class name by converting it to PascalCase.
>>> normalize_class_name('some_hyphen_case')
'SomeHyphenCase'
"""
if re.match(r"(?:[A-Z][a-z]+)+", name):
return name
return "".join([fragment.capitalize() for fragment in normalize_field_name(name).split("_")])
def replace_leading_underscores(name: str, prefix: Optional[str] = None) -> str:
"""
Replace leading underscores with a given prefix.
Replaces leading underscores with a given prefix. If no prefix is specified, the leading underscores are removed.
>>> replace_leading_underscores('_private_field')
'private_field'
>>> replace_leading_underscores('__private_field', prefix='dunder')
'dunder_private_field'
"""
return re.sub(r"^_+", f"{prefix}_" if prefix else "", name)
def indent_statement(indent: int, statement: str) -> str:
"""
Indents the given string by a specified number of indents.
Indents the given string by a specified number of indents, e.g. indenting by 1 will preprend the string
with 4 space characters:
>>> indent_statement(0, 'x = 3')
'x = 3'
>>> indent_statement(1, 'x = 3')
' x = 3'
"""
return " " * 4 * indent + statement
| 31.835821 | 117 | 0.676981 | import re
from typing import Optional
def normalize_field_name(name: str, leading_undescores_prefix: Optional[str] = None) -> str:
"""
Normalize a string to take a Pythonic form.
Normalize a string to take a Pythonic form by:
- replacing leading underscores with a given (optional) prefix
- converting the name so snake_case
"""
return convert_to_snake_case(replace_leading_underscores(name, prefix=leading_undescores_prefix))
def convert_to_snake_case(name: str) -> str:
"""
Convert a given string to snake_case.
Converts a given string to snake_case from camel case or kebab case
>>> normalize_field_name('SomeCamelCase')
'some_camel_case'
>>> normalize_field_name('sample-kebab-case')
'sample_kebab_case'
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", name).replace("-", "_").lower()
def normalize_class_name(name: str) -> str:
"""
Normalize class name by converting it to PascalCase.
>>> normalize_class_name('some_hyphen_case')
'SomeHyphenCase'
"""
if re.match(r"(?:[A-Z][a-z]+)+", name):
return name
return "".join([fragment.capitalize() for fragment in normalize_field_name(name).split("_")])
def replace_leading_underscores(name: str, prefix: Optional[str] = None) -> str:
"""
Replace leading underscores with a given prefix.
Replaces leading underscores with a given prefix. If no prefix is specified, the leading underscores are removed.
>>> replace_leading_underscores('_private_field')
'private_field'
>>> replace_leading_underscores('__private_field', prefix='dunder')
'dunder_private_field'
"""
return re.sub(r"^_+", f"{prefix}_" if prefix else "", name)
def indent_statement(indent: int, statement: str) -> str:
"""
Indents the given string by a specified number of indents.
Indents the given string by a specified number of indents, e.g. indenting by 1 will preprend the string
with 4 space characters:
>>> indent_statement(0, 'x = 3')
'x = 3'
>>> indent_statement(1, 'x = 3')
' x = 3'
"""
return " " * 4 * indent + statement
| 0 | 0 | 0 |
0e45a7779e2663718ced38640283e0176e71c2af | 2,158 | py | Python | dostuff.py | rumkex/IonTools | e4c80a6385fa41fd7dd3721636a1a4b177839625 | [
"BSD-3-Clause"
] | 3 | 2018-08-09T08:49:40.000Z | 2019-07-24T02:48:55.000Z | dostuff.py | rumkex/IonTools | e4c80a6385fa41fd7dd3721636a1a4b177839625 | [
"BSD-3-Clause"
] | null | null | null | dostuff.py | rumkex/IonTools | e4c80a6385fa41fd7dd3721636a1a4b177839625 | [
"BSD-3-Clause"
] | 1 | 2019-05-22T12:22:45.000Z | 2019-05-22T12:22:45.000Z | #!/usr/bin/env python3
from math import pi
from multiprocessing import Pool
from tempfile import NamedTemporaryFile
from subprocess import call, DEVNULL
import time
import os
import os.path as path
from shared import update_progress, plot_data
baseflags = ['--interpolate=linear', '--enable-weights']
config = {
"base": baseflags + ['-n=128'],
"smoothing": baseflags + ['-n=128', '--enable-aa'],
"128x128-multigrid-3-layers": baseflags + ['-n=128', '-l=3'],
"64x128-multigrid-3-layers": baseflags + ['-n=64x128', '-l=3'],
}
if __name__ == "__main__":
main()
| 30.394366 | 117 | 0.603336 | #!/usr/bin/env python3
from math import pi
from multiprocessing import Pool
from tempfile import NamedTemporaryFile
from subprocess import call, DEVNULL
import time
import os
import os.path as path
from shared import update_progress, plot_data
baseflags = ['--interpolate=linear', '--enable-weights']
config = {
"base": baseflags + ['-n=128'],
"smoothing": baseflags + ['-n=128', '--enable-aa'],
"128x128-multigrid-3-layers": baseflags + ['-n=128', '-l=3'],
"64x128-multigrid-3-layers": baseflags + ['-n=64x128', '-l=3'],
}
def reconstruct(filename, flags, outname):
with NamedTemporaryFile() as temp:
call(['bin/Solver', '-i=100', '-t=polar'] + flags + [filename] + [temp.name], stdout=DEVNULL, stderr=DEVNULL)
plot_data(temp, outname, '')
def main():
datafiles = [path.join('./data', f) for f in os.listdir('./data') if path.isfile(path.join('./data', f))]
pool = Pool()
completedTasks = 0
totalTasks = 0
def mp_callback(task):
nonlocal totalTasks, completedTasks
completedTasks += 1
update_progress(completedTasks / totalTasks * 100)
job_start = time.time()
print('Generating jobs...')
for datafile in datafiles:
prefix = 'out/' + path.splitext(path.basename(datafile))[0] + '/'
if not path.exists(prefix):
os.makedirs(prefix)
for solverType in ['ART', 'SIRT']:
solver_flag = ['--solver=' + solverType]
for name, cfg in config.items():
outname = prefix + solverType + '-' + name + '.png'
if path.exists(outname):
continue
totalTasks += 1
#reconstruct(datafile, solver_flag + cfg, outname)
pool.apply_async(reconstruct, args = (datafile, solver_flag + cfg, outname), callback = mp_callback)
print('Generated ' + str(totalTasks) + ' jobs.')
update_progress(0)
pool.close()
pool.join()
if totalTasks == 0: update_progress(100)
job_end = time.time()
print('Took {0:1f}s'.format(job_end - job_start))
if __name__ == "__main__":
main()
| 1,528 | 0 | 46 |
4008b775c8f08dd29efc36b744c6a3bd6e47d7fe | 106 | py | Python | Misc/10print.py | sparkstark03/Awesome-Scripts | 7326f2480bdeb4d843c50488bc790a5139820c8a | [
"MIT"
] | 141 | 2018-10-04T10:02:15.000Z | 2022-03-18T08:47:01.000Z | Misc/10print.py | sparkstark03/Awesome-Scripts | 7326f2480bdeb4d843c50488bc790a5139820c8a | [
"MIT"
] | 34 | 2018-10-04T08:28:01.000Z | 2020-11-02T09:36:02.000Z | Misc/10print.py | sparkstark03/Awesome-Scripts | 7326f2480bdeb4d843c50488bc790a5139820c8a | [
"MIT"
] | 110 | 2018-10-04T04:28:11.000Z | 2022-03-22T05:49:02.000Z | #!/usr/bin/env python3
import random
for i in range(100000):print(chr(9585+random.randint(0,1)), end="")
| 21.2 | 67 | 0.707547 | #!/usr/bin/env python3
import random
for i in range(100000):print(chr(9585+random.randint(0,1)), end="")
| 0 | 0 | 0 |
41739de9d63f3025d9be0b4a264539bc8a971b23 | 4,395 | py | Python | sa/profiles/Zyxel/ZyNOS/get_switchport.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Zyxel/ZyNOS/get_switchport.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Zyxel/ZyNOS/get_switchport.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.ZyNOS.get_switchport
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetswitchport import IGetSwitchport
| 36.932773 | 87 | 0.471445 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.ZyNOS.get_switchport
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetswitchport import IGetSwitchport
class Script(BaseScript):
name = "Zyxel.ZyNOS.get_switchport"
interface = IGetSwitchport
rx_portinfo = re.compile(
r"Port No\s+:(?P<interface>\d+)\n"
r"\s*Active\s+:(?P<admin>\S+)\n"
r"\s*Name\s+:(?P<description>.+)?\n"
r"\s*PVID\s+:(?P<untag>\d+)\s+Flow Control\s+:\S+$",
re.MULTILINE,
)
rx_vlan_stack = re.compile(r"^(?P<interface>\d+)\s+(?P<role>\S+).+$", re.MULTILINE)
rx_vlan_stack_global = re.compile(r"^Operation:\s+(?P<status>active)$")
rx_vlan_ports = re.compile(
r"\s+\d+\s+(?P<vid>\d+)\s+\S+\s+\S+\s+"
r"Untagged\s+:(?P<untagged>[0-9,\-]*)."
r"\s+Tagged\s+:(?P<tagged>[0-9,\-]*)",
re.MULTILINE | re.DOTALL,
)
def execute(self):
# Get portchannels
portchannel_members = []
portchannels = self.scripts.get_portchannel()
for p in portchannels:
portchannel_members += p["members"]
# Get interafces' status
interface_status = {}
for s in self.scripts.get_interface_status():
interface_status[s["interface"]] = s["status"]
# Get 802.1ad status if supported
vlan_stack_status = {}
try:
cmd = self.cli("show vlan-stacking")
match = self.rx_vlan_stack_global.match(cmd)
if match:
for match in self.rx_vlan_stack.finditer(cmd):
if match.group("role").lower() == "tunnel":
vlan_stack_status[int(match.group("interface"))] = True
except self.CLISyntaxError:
pass
# Get ports in vlans
vlan_ports = []
for match in self.rx_vlan_ports.finditer(self.cli("show vlan")):
vlan_ports += [
{
"vid": match.group("vid"),
"tagged": self.expand_rangelist(match.group("tagged")),
"untagged": self.expand_rangelist(match.group("untagged")),
}
]
# Make a list of tags for each port
port_tags = {}
for port in interface_status:
tags = []
untag = []
for vlan in vlan_ports:
if int(port) in vlan["tagged"]:
tags += [vlan["vid"]]
elif int(port) in vlan["untagged"]:
untag = vlan["vid"]
port_tags[port] = {"tags": tags, "untag": untag}
# Get switchport data and overall result
r = []
swp = {}
for match in self.rx_portinfo.finditer(self.cli("show interface config *")):
name = match.group("interface")
swp = {
"status": interface_status.get(name, False),
"802.1Q Enabled": len(port_tags[name].get("tags", None)) > 0,
"802.1ad Tunnel": vlan_stack_status.get(int(name), False),
"tagged": port_tags[name]["tags"],
}
if match.group("description"):
swp["description"] = match.group("description")
if port_tags[name]["untag"]:
swp["untagged"] = port_tags[name]["untag"]
if name not in portchannel_members:
swp["interface"] = name
swp["members"] = []
r += [swp]
else:
for p in portchannels:
if name in p["members"]:
swp["interface"] = p["interface"]
swp["members"] = p["members"]
r += [swp]
st = False
for m in p["members"]:
st = interface_status.get(name, False)
if st:
break
swp["status"] = st
portchannels.remove(p)
break
return r
| 3,157 | 730 | 23 |
52bf3108b8781fe487dcaa7f471df575358ab42e | 157 | py | Python | practice/practice52.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice52.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice52.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = 077
b = a | 3
print 'a | b = %d' % b
b |= 7
print 'a|b=%d' % b
| 15.7 | 26 | 0.426752 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = 077
b = a | 3
print 'a | b = %d' % b
b |= 7
print 'a|b=%d' % b
| 0 | 0 | 0 |
628473232007d9a6c34401980c9d1a671512e47e | 8,572 | py | Python | postprocess/ensemble_masks.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | postprocess/ensemble_masks.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | postprocess/ensemble_masks.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | import sys, operator
sys.path.append('..')
from scipy.ndimage.morphology import binary_fill_holes
from configuration import Configuration
from dataset.reader import *
from dataset.folder import TrainFolder
from utility.draw import *
from net.lib.nms.cython_nms.cython_nms import cython_nms
from net.layer.mask import instance_to_binary
from multiprocessing import Pool
from numba import jit
def clustering_masks(instances, iou_threshold=0.5, overlap_threshold=0.8):
"""
:param instances: numpy array of instances
:return:
"""
clusters = []
num = instances.shape[0]
instance_sizes = []
for i in range(num):
instance = instances[i]
instance_sizes.append((i, instance.sum()))
sorted_sizes = sorted(instance_sizes, key=lambda tup: tup[1], reverse=True)
for i, instance_size in sorted_sizes:
instance = instances[i]
added_to_group = False
for c in clusters:
cluster_size = c.core_size
inter = np.logical_and(c.core, instance).sum()
union = np.logical_or(c.core, instance).sum()
iou = inter / (union + 1e-12)
if ((inter / cluster_size) > overlap_threshold) or \
((inter / instance_size) > overlap_threshold) or \
(iou > iou_threshold):
c.add(instance)
added_to_group = True
if added_to_group == False:
c = MaskCluster()
c.add(instance)
clusters.append(c)
return clusters
@jit
@jit
def filter_small(proposals, instances, area_threshold=36):
"""
:param instances: numpy array of 0/1 instance in one image
:param area_threshold: do filter if max mask / min mask > this
:param min_threshold: min area ratio
:return: filtered instances
"""
H, W = instances[0].shape[:2]
keep_instances = []
keep_proposals = []
max_size = 0
min_size = H*W
for i in range(instances.shape[0]):
size = instances[i].sum()
if size > max_size:
max_size = size
elif size < min_size:
min_size = size
size_threshold = max_size / area_threshold
if (max_size / min_size) > area_threshold:
for i in range(instances.shape[0]):
size = instances[i].sum()
if size > size_threshold:
#print('%d: %d'%(i, size), ' > ', size_threshold, 'append')
keep_instances.append(instances[i])
keep_proposals.append(proposals[i])
else:
pass
#print('%d: %d'%(i, size), ' < ', size_threshold, 'exclude')
else:
keep_instances = instances
keep_proposals = proposals
keep_proposals = np.array(keep_proposals)
keep_instances = np.array(keep_instances)
return keep_proposals, keep_instances
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
ensemble_masks(multiprocess=False)
print('\nsucess!') | 34.425703 | 112 | 0.625758 | import sys, operator
sys.path.append('..')
from scipy.ndimage.morphology import binary_fill_holes
from configuration import Configuration
from dataset.reader import *
from dataset.folder import TrainFolder
from utility.draw import *
from net.lib.nms.cython_nms.cython_nms import cython_nms
from net.layer.mask import instance_to_binary
from multiprocessing import Pool
from numba import jit
class MaskCluster:
def __init__(self):
super(MaskCluster, self).__init__()
self.members = []
self.core = None
self.core_size = None
def add(self, instance, type='union'):
"""
:param instance: numpy array of one instance
:param type:
union: core is union
intersect: core is intersection
:return:
"""
if self.members == []:
self.members.append(instance)
self.core = instance
self.core_size = instance.sum()
else:
self.members.append(instance)
if type == 'union':
self.core = np.logical_or(self.core, instance)
elif type == 'intersect':
self.core = np.logical_and(self.core, instance)
else:
raise NotImplementedError
self.core_size = self.core.sum()
def clustering_masks(instances, iou_threshold=0.5, overlap_threshold=0.8):
"""
:param instances: numpy array of instances
:return:
"""
clusters = []
num = instances.shape[0]
instance_sizes = []
for i in range(num):
instance = instances[i]
instance_sizes.append((i, instance.sum()))
sorted_sizes = sorted(instance_sizes, key=lambda tup: tup[1], reverse=True)
for i, instance_size in sorted_sizes:
instance = instances[i]
added_to_group = False
for c in clusters:
cluster_size = c.core_size
inter = np.logical_and(c.core, instance).sum()
union = np.logical_or(c.core, instance).sum()
iou = inter / (union + 1e-12)
if ((inter / cluster_size) > overlap_threshold) or \
((inter / instance_size) > overlap_threshold) or \
(iou > iou_threshold):
c.add(instance)
added_to_group = True
if added_to_group == False:
c = MaskCluster()
c.add(instance)
clusters.append(c)
return clusters
@jit
def fill_holes(instances):
for i in range(instances.shape[0]):
instances[i] = binary_fill_holes(instances[i]).astype(np.float32)
return instances
@jit
def filter_small(proposals, instances, area_threshold=36):
"""
:param instances: numpy array of 0/1 instance in one image
:param area_threshold: do filter if max mask / min mask > this
:param min_threshold: min area ratio
:return: filtered instances
"""
H, W = instances[0].shape[:2]
keep_instances = []
keep_proposals = []
max_size = 0
min_size = H*W
for i in range(instances.shape[0]):
size = instances[i].sum()
if size > max_size:
max_size = size
elif size < min_size:
min_size = size
size_threshold = max_size / area_threshold
if (max_size / min_size) > area_threshold:
for i in range(instances.shape[0]):
size = instances[i].sum()
if size > size_threshold:
#print('%d: %d'%(i, size), ' > ', size_threshold, 'append')
keep_instances.append(instances[i])
keep_proposals.append(proposals[i])
else:
pass
#print('%d: %d'%(i, size), ' < ', size_threshold, 'exclude')
else:
keep_instances = instances
keep_proposals = proposals
keep_proposals = np.array(keep_proposals)
keep_instances = np.array(keep_instances)
return keep_proposals, keep_instances
def ensemble_one_mask(packed):
cfg, folder_name, img_id = packed
ensemble_dirs = [os.path.join(folder_name, 'predict', 'mask_ensemble_' + e) for e in cfg.test_augment_names]
out_dir = os.path.join(folder_name, 'predict', 'ensemble_all')
folder, name = img_id.split('/')[-2:]
#if os.path.isfile(os.path.join(out_dir, 'ensemble_masks', '%s.npy' % name)):
# print('skip: ', name[:6])
# return
image = cv2.imread(os.path.join(cfg.data_dir, folder, 'images', '%s.png' % name), cv2.IMREAD_COLOR)
height, width = image.shape[:2]
instances = []
proposals = []
for t, dir in enumerate(ensemble_dirs):
instance_prob = np.load(os.path.join(dir, 'instances', '%s.npy' % name))
instance = (instance_prob > cfg.mask_test_mask_threshold).astype(np.float32)
proposal = np.load(os.path.join(dir, 'detections', '%s.npy' % name))
assert (len(proposal) == len(instance))
instances.append(instance)
proposals.append(proposal)
all_proposals = np.concatenate(proposals)
all_instances = np.concatenate(instances)
# filter small noises
all_proposals, all_instances = filter_small(all_proposals, all_instances)
# nms
rois = all_proposals[:, 1:6]
keep = cython_nms(rois, 0.5)
all_instances = all_instances[keep]
# fill holes
# all_instances = fill_holes(all_instances)
# mask cluster
clusters = clustering_masks(all_instances, iou_threshold=0.5)
# ensemble instance
ensemble_instances = [] # list of summed up instance clusters
ensemble_instance_edges = []
for c in clusters:
num_members = len(c.members)
ensemble_instance = np.zeros((height, width), np.float32) # summed up one cluster
ensemble_instance_edge = np.zeros((height, width), np.float32)
for j in range(num_members):
m = c.members[j] # members in the same cluster
kernel = np.ones((3, 3), np.float32)
me = m - cv2.erode(m, kernel)
md = m - cv2.dilate(m, kernel)
diff = (me - md) * m
ensemble_instance += m
ensemble_instance_edge += diff
# convert sum_up of one cluster to binary
binary_instance = ((ensemble_instance / num_members) > 0.2).astype(np.float32)
ensemble_instances.append(binary_instance)
ensemble_instance_edges.append(ensemble_instance_edge)
ensemble_instances = np.array(ensemble_instances)
ensemble_instance_edges = np.array(ensemble_instance_edges)
sum_instance = ensemble_instances.sum(axis=0)
sum_instance_edge = ensemble_instance_edges.sum(axis=0)
gray1 = (sum_instance / sum_instance.max() * 255).astype(np.uint8)
rgb1 = cv2.cvtColor(gray1, cv2.COLOR_GRAY2RGB)
gray2 = (sum_instance_edge / sum_instance_edge.max() * 255).astype(np.uint8)
rgb2 = cv2.cvtColor(gray2, cv2.COLOR_GRAY2RGB)
w, h, _ = rgb2.shape
m = rgb2 > 0
c = np.tile([0, 255, 0], [w, h, 1])
i = image * (1 - m) + c * m
all = np.hstack([image, i, rgb1])
# save as train data
# data = cv2.merge((image, gray1, gray2))
multi_mask = instance_to_multi_mask(ensemble_instances)
cv2.imwrite(os.path.join(out_dir, 'ensemble_data_overlays', '%s.png' % name), all)
# cv2.imwrite(os.path.join(out_dir, 'ensemble_data', '%s.png' % name), data)
# np.save(os.path.join(out_dir, 'ensemble_instances', '%s.npy' % name), ensemble_instances)
np.save(os.path.join(out_dir, 'ensemble_masks', '%s.npy' % name), multi_mask)
print('Done')
def ensemble_masks(multiprocess=True):
cfg = Configuration()
f_eval = TrainFolder(os.path.join(cfg.result_dir, cfg.model_name))
out_dir = os.path.join(f_eval.folder_name, 'predict', 'ensemble_all')
# setup ---------------------------------------
os.makedirs(out_dir + '/ensemble_data_overlays', exist_ok=True)
os.makedirs(out_dir + '/ensemble_data', exist_ok=True)
os.makedirs(out_dir + '/ensemble_masks', exist_ok=True)
split = cfg.valid_split # 'test_black_white_53'
ids = read_list_from_file(os.path.join(cfg.split_dir, split), comment='#')
if multiprocess:
pool = Pool()
pool.map(ensemble_one_mask, [(cfg, f_eval.folder_name, img_id) for img_id in ids])
else:
for i, packed in enumerate([(cfg, f_eval.folder_name, img_id) for img_id in ids]):
print('%04d'%i, end='')
ensemble_one_mask(packed)
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
ensemble_masks(multiprocess=False)
print('\nsucess!') | 4,671 | 767 | 91 |
f28bdca64ae4805bfca0002fe2ade27265ccf4c1 | 6,675 | py | Python | ssdpipelineClass.py | scrambleegg7/ssd_prescription | 37932d16d5b7a7741fcdf6afff5be0804ef958a8 | [
"MIT"
] | null | null | null | ssdpipelineClass.py | scrambleegg7/ssd_prescription | 37932d16d5b7a7741fcdf6afff5be0804ef958a8 | [
"MIT"
] | null | null | null | ssdpipelineClass.py | scrambleegg7/ssd_prescription | 37932d16d5b7a7741fcdf6afff5be0804ef958a8 | [
"MIT"
] | null | null | null | import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import matplotlib.image as mpimg
from ssd_k2 import SSD300
#
# for keras version 1.2
# from ssd import SSD300
#
from ssd_utils import BBoxUtility
plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
np.set_printoptions(suppress=True)
from PIL import ImageEnhance
from PIL import Image as pil_image
from timeit import default_timer as timer
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
| 35.31746 | 238 | 0.578876 | import cv2
import keras
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.models import Model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import tensorflow as tf
import matplotlib.image as mpimg
from ssd_k2 import SSD300
#
# for keras version 1.2
# from ssd import SSD300
#
from ssd_utils import BBoxUtility
plt.rcParams['figure.figsize'] = (8, 8)
plt.rcParams['image.interpolation'] = 'nearest'
np.set_printoptions(suppress=True)
from PIL import ImageEnhance
from PIL import Image as pil_image
from timeit import default_timer as timer
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
class SSDPipeline(object):
def __init__(self):
voc_classes = ['Aeroplane', 'Bicycle', 'Bird', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
input_shape=(300, 300, 3)
self.model = SSD300(input_shape, num_classes=NUM_CLASSES)
weights_file = "./checkpoints/weights.10-2.85.hdf5"
#weights_file = "./checkpoints/weights.39-1.61_ubuntu.hdf5"
self.model.load_weights(weights_file, by_name=True)
self.bbox_util = BBoxUtility(NUM_CLASSES)
def loadImage(self,video_path):
vid = cv2.VideoCapture(video_path)
vidw = vid.get(3) # CV_CAP_PROP_FRAME_WIDTH
vidh = vid.get(4) # CV_CAP_PROP_FRAME_HEIGHT
print(vidw,vidh)
input_shape = (300,300,3)
vidar = vidw/vidh
#print(vidar)
return vidar
def setClassColors(self):
self.class_colors = []
self.class_names = ["background", "Prescription", "None", "title", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"];
NUM_CLASSES = len(self.class_names)
for i in range(0, NUM_CLASSES):
# This can probably be written in a more elegant manner
hue = 255*i/NUM_CLASSES
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 128 # Saturation
col[0][0][2] = 255 # Value
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
self.class_colors.append(col)
def pipeline(self,orig_image):
start_frame = 0
# this is manual adjustment parameter
# For binary classifilcation, set higher threshhold rather than 0.5
conf_thresh = 0.50
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
vidh, vidw, _ = orig_image.shape
vidar = vidw/vidh
input_shape = (300,300,3)
display_shape = (600,600,3)
im_size = (input_shape[0], input_shape[1])
resized = cv2.resize(orig_image, im_size)
to_draw = cv2.resize(resized, (int(input_shape[0]*vidar), input_shape[1]))
#to_draw = cv2.resize(resized, (int(display_shape[0]*vidar), display_shape[1]))
#to_draw = orig_image.copy()
# Use model to predict
inputs = [image.img_to_array(resized)]
tmp_inp = np.array(inputs)
x = preprocess_input(tmp_inp)
y = self.model.predict(x)
#preds = model.predict(inputs, batch_size=1, verbose=1)
results = self.bbox_util.detection_out(y)
if len(results) > 0 and len(results[0]) > 0:
# Interpret output, only one frame is used
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
det_xmin = results[0][:, 2]
det_ymin = results[0][:, 3]
det_xmax = results[0][:, 4]
det_ymax = results[0][:, 5]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
classes = []
probs = []
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * to_draw.shape[1]))
ymin = int(round(top_ymin[i] * to_draw.shape[0]))
xmax = int(round(top_xmax[i] * to_draw.shape[1]))
ymax = int(round(top_ymax[i] * to_draw.shape[0]))
# Draw the box on top of the to_draw image
class_num = int(top_label_indices[i])
# sorry, but x length bigger than half of screen size avoid to
# draw rectangle
if ( abs(xmax-xmin) > to_draw.shape[1] / 2. ):
continue
classes.append(self.class_names[class_num])
probs.append(top_conf[i])
cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax),
self.class_colors[class_num], 2)
text = self.class_names[class_num] + " " + ('%.2f' % top_conf[i])
text_top = (xmin, ymin-10)
text_bot = (xmin + 80, ymin + 5)
text_pos = (xmin + 5, ymin)
cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], -1)
cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
# Calculate FPS
# This computes FPS for everything, not just the model's execution
# which may or may not be what you want
#curr_time = timer()
#exec_time = curr_time - prev_time
#prev_time = curr_time
#accum_time = accum_time + exec_time
#curr_fps = curr_fps + 1
#if accum_time > 1:
# accum_time = accum_time - 1
# fps = "FPS: " + str(curr_fps)
# curr_fps = 0
# Draw FPS in top left corner
#cv2.rectangle(to_draw, (0,0), (50, 17), (255,255,255), -1)
#cv2.putText(to_draw, fps, (3,10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0,0,0), 1)
#print("object NO:", i+1)
#print("rectangle info: ", coords)
return to_draw, classes, probs
| 5,705 | 5 | 140 |
5d5aaa78abda3a5dac6f04c7e1a655e11a29afae | 3,017 | py | Python | var/spack/repos/builtin/packages/libtree/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-11-16T02:42:57.000Z | 2019-06-06T19:18:50.000Z | var/spack/repos/builtin/packages/libtree/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 25 | 2021-02-08T14:39:48.000Z | 2022-03-21T18:37:29.000Z | var/spack/repos/builtin/packages/libtree/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 7 | 2018-09-13T18:04:56.000Z | 2020-03-18T20:52:06.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libtree(CMakePackage):
"""ldd as a tree with an option to bundle dependencies into a
single folder"""
homepage = "https://github.com/haampie/libtree"
git = "https://github.com/haampie/libtree.git"
url = "https://github.com/haampie/libtree/archive/refs/tags/v2.0.0.tar.gz"
maintainers = ['haampie']
version('master', branch='master')
version('2.0.0', sha256='099e85d8ba3c3d849ce05b8ba2791dd25cd042a813be947fb321b0676ef71883')
version('1.2.3', sha256='4a912cf97109219fe931942a30579336b6ab9865395447bd157bbfa74bf4e8cf')
version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd')
version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0')
version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97')
version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e')
version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b')
version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374')
version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4')
version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536')
version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45')
version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b')
variant('chrpath', default=False, description='Use chrpath for deployment')
variant('strip', default=False, description='Use binutils strip for deployment')
# header only dependencies
depends_on('cpp-termcolor', when='@2.0:', type='build')
depends_on('cxxopts', when='@2.0:', type='build')
depends_on('elfio', when='@2.0:', type='build')
# runtime deps
depends_on('chrpath', when='+chrpath', type='run')
depends_on('binutils', when='+strip', type='run')
# testing
depends_on('googletest', type='test')
| 45.029851 | 109 | 0.721246 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libtree(CMakePackage):
"""ldd as a tree with an option to bundle dependencies into a
single folder"""
homepage = "https://github.com/haampie/libtree"
git = "https://github.com/haampie/libtree.git"
url = "https://github.com/haampie/libtree/archive/refs/tags/v2.0.0.tar.gz"
maintainers = ['haampie']
version('master', branch='master')
version('2.0.0', sha256='099e85d8ba3c3d849ce05b8ba2791dd25cd042a813be947fb321b0676ef71883')
version('1.2.3', sha256='4a912cf97109219fe931942a30579336b6ab9865395447bd157bbfa74bf4e8cf')
version('1.2.2', sha256='4ccf09227609869b85a170550b636defcf0b0674ecb0785063b81785b1c29bdd')
version('1.2.1', sha256='26791c0f418b93d502879db0e1fd2fd3081b885ad87326611d992a5f8977a9b0')
version('1.2.0', sha256='3e74655f22b1dcc19e8a1b9e7796b8ad44bc37f29e9a99134119e8521e28be97')
version('1.1.4', sha256='38648f67c8fa72c3a4a3af2bb254b5fd6989c0f1362387ab298176db5cbbcc4e')
version('1.1.3', sha256='4c681d7b67ef3d62f95450fb7eb84e33ff10a3b9db1f7e195b965b2c3c58226b')
version('1.1.2', sha256='31641c6bf6c2980ffa7b4c57392460434f97ba66fe51fe6346867430b33a0374')
version('1.1.1', sha256='3e8543145a40a94e9e2ce9fed003d2bf68294e1fce9607028a286bc132e17dc4')
version('1.1.0', sha256='6cf36fb9a4c8c3af01855527d4931110732bb2d1c19be9334c689f1fd1c78536')
version('1.0.4', sha256='b15a54b6f388b8bd8636e288fcb581029f1e65353660387b0096a554ad8e9e45')
version('1.0.3', sha256='67ce886c191d50959a5727246cdb04af38872cd811c9ed4e3822f77a8f40b20b')
def url_for_version(self, version):
if version < Version("2.0.0"):
return "https://github.com/haampie/libtree/releases/download/v{0}/sources.tar.gz".format(version)
return "https://github.com/haampie/libtree/archive/refs/tags/v{0}.tar.gz".format(version)
variant('chrpath', default=False, description='Use chrpath for deployment')
variant('strip', default=False, description='Use binutils strip for deployment')
# header only dependencies
depends_on('cpp-termcolor', when='@2.0:', type='build')
depends_on('cxxopts', when='@2.0:', type='build')
depends_on('elfio', when='@2.0:', type='build')
# runtime deps
depends_on('chrpath', when='+chrpath', type='run')
depends_on('binutils', when='+strip', type='run')
# testing
depends_on('googletest', type='test')
def cmake_args(self):
tests_enabled = 'ON' if self.run_tests else 'OFF'
if self.spec.satisfies('@2.0:'):
tests_define = 'LIBTREE_BUILD_TESTS'
else:
tests_define = 'BUILD_TESTING'
return [
self.define(tests_define, tests_enabled)
]
def check(self):
with working_dir(self.build_directory):
ctest('--output-on-failure')
| 632 | 0 | 81 |
903f4d9031aff626c4315c70f8e2f674feff8b97 | 3,316 | py | Python | postgres/postgres.py | p-wu-214/NewsSummary | 67b3a42f69c79dadbc319473999bdc77b8e3647e | [
"MIT"
] | null | null | null | postgres/postgres.py | p-wu-214/NewsSummary | 67b3a42f69c79dadbc319473999bdc77b8e3647e | [
"MIT"
] | null | null | null | postgres/postgres.py | p-wu-214/NewsSummary | 67b3a42f69c79dadbc319473999bdc77b8e3647e | [
"MIT"
] | null | null | null | import psycopg2
# Redownloading the models is a pain, maybe a better solution is to keep the models on the ram and transfer back and forth from video card | 46.704225 | 155 | 0.579312 | import psycopg2
def model_name_to_db_column(model_name):
return model_name.replace('google/', '').replace('-', '_')
class PostGres:
def __init__(self):
self.connection = psycopg2.connect(user="patterson",
password="jeramywu0214",
host="localhost",
port="5432",
database="patterson")
self.cursor = self.connection.cursor()
# Redownloading the models is a pain, maybe a better solution is to keep the models on the ram and transfer back and forth from video card
def summaries_to_db(self, summaries):
try:
records_to_insert = []
for article_id, summary in summaries.items():
records_to_insert.append((article_id, summary['google/pegasus-xsum'], summary['google/pegasus-newsroom'],
summary['google/pegasus-multi_news'], summary['google/pegasus-cnn_dailymail'],
summary['google/pegasus-large'], summary['google/pegasus-gigaword']))
sql = """
INSERT INTO summaries (article_id, pegasus_xsum, pegasus_newsroom, pegasus_multi_news,
pegasus_cnn_dailymail, pegasus_large, pegasus_gigaword) VALUES (%s, %s, %s, %s, %s, %s, %s);
"""
self.cursor.executemany(sql, records_to_insert)
self.connection.commit()
except (Exception, psycopg2.Error) as error:
if self.connection:
print("Failed to insert record into articles table", error)
def articles_to_db(self, articles):
try:
if articles is None:
return
sql = """INSERT INTO articles (date_published, title, url, language, content)
VALUES (%s,%s,%s,%s,%s);"""
records_to_insert = []
for article in articles:
if article['title'] is None or article['date_publish'] is None or article['maintext'] is None:
continue
title_len = len(article['title']) if len(article['title']) < 80 else 79
records_to_insert.append((article['date_publish'], article['title'][:title_len], article['url'], article['language'], article['maintext']))
self.cursor.executemany(sql, records_to_insert)
self.connection.commit()
except (Exception, psycopg2.Error) as error:
if self.connection:
print("Failed to insert record into articles table", error)
def get_articles_to_summarize(self):
query_filename = '../summarizer/get_articles_to_summarize.sql'
try:
query_file = open('{}'.format(query_filename))
query_as_string = query_file.read()
self.cursor.execute(query_as_string)
articles = self.cursor.fetchall()
return articles
except (Exception, psycopg2.Error) as error:
if self.connection:
print('Failed to execute query: {}'.format(query_filename), error)
def close_connection(self):
if self.connection:
self.cursor.close()
self.connection.close()
print("PostgreSQL connection is closed") | 2,984 | -6 | 179 |
8f64320db148dc392c0150c92254121a2a9d2158 | 44 | py | Python | users/urls.py | casol/Digital-Library | 2ba8586ac861ea1bbd291128ea6b9e0341cbdc15 | [
"MIT"
] | null | null | null | users/urls.py | casol/Digital-Library | 2ba8586ac861ea1bbd291128ea6b9e0341cbdc15 | [
"MIT"
] | 11 | 2020-06-05T22:23:17.000Z | 2022-03-12T00:03:36.000Z | users/urls.py | casol/Digital-Library | 2ba8586ac861ea1bbd291128ea6b9e0341cbdc15 | [
"MIT"
] | null | null | null | from django.urls import path, include # new
| 22 | 43 | 0.772727 | from django.urls import path, include # new
| 0 | 0 | 0 |
8b54be24e2e3815e5d119808a2523cd5ff0a4e7f | 1,819 | py | Python | tests/test_env_yaml_from_manifest.py | rigzba21/conda-vendor | be06c520af0a08f6dc58a6bdc46e760b04e55869 | [
"MIT"
] | 10 | 2021-08-25T14:31:01.000Z | 2021-12-27T19:46:38.000Z | tests/test_env_yaml_from_manifest.py | rigzba21/conda-vendor | be06c520af0a08f6dc58a6bdc46e760b04e55869 | [
"MIT"
] | 6 | 2021-09-22T21:08:51.000Z | 2022-03-31T17:28:41.000Z | tests/test_env_yaml_from_manifest.py | rigzba21/conda-vendor | be06c520af0a08f6dc58a6bdc46e760b04e55869 | [
"MIT"
] | 2 | 2021-10-06T13:28:48.000Z | 2022-01-27T16:47:30.000Z | # TODO: Utils ? like load so we don't duplicate code ?
from unittest import TestCase
from unittest.mock import patch
import pytest
from ruamel.yaml import YAML
from yaml import SafeLoader
from conda_vendor.env_yaml_from_manifest import YamlFromManifest
@pytest.fixture
| 36.38 | 80 | 0.772402 | # TODO: Utils ? like load so we don't duplicate code ?
from unittest import TestCase
from unittest.mock import patch
import pytest
from ruamel.yaml import YAML
from yaml import SafeLoader
from conda_vendor.env_yaml_from_manifest import YamlFromManifest
@pytest.fixture
def yml_man_fixture(tmp_path, get_path_location_for_manifest_fixture):
test_meta_manifest_path = get_path_location_for_manifest_fixture
return YamlFromManifest(
channel_root=tmp_path, meta_manifest_path=test_meta_manifest_path
)
def test_YamlFromManifest_init_(yml_man_fixture):
test_manifest_path = yml_man_fixture.meta_manifest_path
with open(test_manifest_path, "r") as f:
expected = YAML(typ="safe").load(f)
result = yml_man_fixture.meta_manifest
TestCase().assertDictEqual(expected, result)
def test_get_packages_from_manifest(yml_man_fixture):
expected_packages = ["brotlipy=0.7.0", "ensureconda=1.4.1"]
result_packages = yml_man_fixture.get_packages_from_manifest()
TestCase().assertListEqual(expected_packages, result_packages)
def test_get_local_channels_paths(tmp_path, yml_man_fixture):
test_channel_names = ["local_conda-forge", "local_main"]
expected_channels = [str(tmp_path / c) for c in test_channel_names]
result_channels = yml_man_fixture.get_local_channels_paths(tmp_path)
TestCase().assertCountEqual(expected_channels, result_channels)
def test_create_yaml(tmp_path, yml_man_fixture):
test_env_name = "test_env"
expected_env_yaml = {
"name": "test_env",
"channels": [f"{tmp_path}/local_main", f"{tmp_path}/local_conda-forge"],
"dependencies": ["brotlipy=0.7.0", "ensureconda=1.4.1"],
}
result = yml_man_fixture.create_yaml(tmp_path, test_env_name)
TestCase().assertDictEqual(expected_env_yaml, result)
| 1,428 | 0 | 114 |
e64f42e56034356f778ffe1a7cb05be62e8ad980 | 112 | py | Python | arquivos-py/2109/seis2109.py | byancasantos/exercicios-python | b7796b9e2e7c714ebc1dbe8ca31d37c4f40cb19c | [
"MIT"
] | null | null | null | arquivos-py/2109/seis2109.py | byancasantos/exercicios-python | b7796b9e2e7c714ebc1dbe8ca31d37c4f40cb19c | [
"MIT"
] | null | null | null | arquivos-py/2109/seis2109.py | byancasantos/exercicios-python | b7796b9e2e7c714ebc1dbe8ca31d37c4f40cb19c | [
"MIT"
] | null | null | null | cont=0
while cont<5:
print(cont)
cont=cont+1
else:
print('o loop while foi encerrado com sucesso.')
| 16 | 52 | 0.660714 | cont=0
while cont<5:
print(cont)
cont=cont+1
else:
print('o loop while foi encerrado com sucesso.')
| 0 | 0 | 0 |
9fed3a96c9c373a5bc11bfec86583807e2dc75ca | 64 | py | Python | spire/version.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | 1 | 2021-12-01T14:18:12.000Z | 2021-12-01T14:18:12.000Z | spire/version.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | 13 | 2021-07-13T19:19:22.000Z | 2022-01-14T16:33:42.000Z | spire/version.py | bugout-dev/spire | def55cb64fbd306ddde47067d8573cf4b234115c | [
"Apache-2.0"
] | null | null | null | """
Spire library and API version.
"""
SPIRE_VERSION = "0.4.0"
| 10.666667 | 30 | 0.640625 | """
Spire library and API version.
"""
SPIRE_VERSION = "0.4.0"
| 0 | 0 | 0 |
07ef4ed51a1c3b6623a2b51b57ad186416813ed1 | 9,706 | py | Python | archive/v0.0.1/0.5x/mdm_model.py | pw1316/ShuffleV2-Face-Alignment | 1b76e706cad7fce18f78f871a3a7e1ca6afef375 | [
"BSD-3-Clause"
] | 1 | 2019-02-11T06:00:23.000Z | 2019-02-11T06:00:23.000Z | archive/v0.0.1/0.5x/mdm_model.py | pw1316/mdm | 1b76e706cad7fce18f78f871a3a7e1ca6afef375 | [
"BSD-3-Clause"
] | null | null | null | archive/v0.0.1/0.5x/mdm_model.py | pw1316/mdm | 1b76e706cad7fce18f78f871a3a7e1ca6afef375 | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
import utils
| 44.935185 | 121 | 0.589429 | import tensorflow as tf
import utils
def align_reference_shape(reference_shape, reference_shape_bb, im, bb):
def norm(x):
return tf.sqrt(tf.reduce_sum(tf.square(x - tf.reduce_mean(x, 0))))
ratio = norm(bb) / norm(reference_shape_bb)
align_mean_shape = (reference_shape - tf.reduce_mean(reference_shape_bb, 0)) * ratio + tf.reduce_mean(bb, 0)
new_size = tf.to_int32(tf.to_float(tf.shape(im)[:2]) / ratio)
return tf.image.resize_bilinear(tf.expand_dims(im, 0), new_size)[0, :, :, :], align_mean_shape / ratio, ratio
def _conv2d(
inputs,
filters,
kernel_size,
strides=(1, 1),
activation=None,
use_bias=True,
use_bn=False,
training=False,
name='Convolution'
):
with tf.variable_scope(name, values=[inputs]):
inputs = tf.layers.conv2d(
inputs, filters, kernel_size, strides, padding='same', use_bias=use_bias, name='Conv2D'
)
if use_bn:
inputs = tf.layers.batch_normalization(inputs, training=training, name='BatchNorm')
if activation is not None:
inputs = activation(inputs)
return inputs
def _conv2d_dw(
inputs,
filters,
kernel_size,
strides=(1, 1),
activation=None,
use_bias=True,
use_bn=False,
training=False,
name='DepthWiseConvolution'
):
with tf.variable_scope(name, values=[inputs]):
layer = tf.keras.layers.DepthwiseConv2D(
kernel_size, strides,
padding='same', use_bias=use_bias, name='DWConv2D')
inputs = layer.apply(inputs)
if use_bn:
inputs = tf.layers.batch_normalization(inputs, training=training, name='BatchNorm1')
inputs = tf.layers.conv2d(inputs, filters, [1, 1], padding='same', use_bias=use_bias, name='Conv2D')
if use_bn:
inputs = tf.layers.batch_normalization(inputs, training=training, name='BatchNorm2')
if activation is not None:
inputs = activation(inputs)
return inputs
def _shuffle_block(
inputs,
in_filters,
out_filters,
kernel_size,
strides,
depth,
training=False,
name='ShuffleBlock'
):
with tf.variable_scope(name, values=[inputs]):
with tf.variable_scope('Unit0'):
left = _conv2d_dw(
inputs, out_filters // 2, kernel_size, strides,
activation=tf.nn.relu, use_bias=False, use_bn=True, training=training, name='Bypass'
)
right = _conv2d(
inputs, in_filters, [1, 1],
activation=tf.nn.relu, use_bias=False, use_bn=True, training=training, name='Convolution1x1'
)
right = _conv2d_dw(
right, out_filters // 2, kernel_size, strides,
activation=tf.nn.relu, use_bias=False, use_bn=True, training=training, name='DepthWiseConvolution3x3'
)
for i in range(1, depth):
with tf.variable_scope('Unit{}'.format(i)):
with tf.name_scope('ChannelShuffle'):
ll, lr = tf.split(left, [out_filters // 4, out_filters // 4], -1)
rl, rr = tf.split(right, [out_filters // 4, out_filters // 4], -1)
left = tf.concat([ll, rl], -1)
right = tf.concat([lr, rr], -1)
right = _conv2d(
right, out_filters // 2, [1, 1],
activation=tf.nn.relu, use_bias=False, use_bn=True, training=training, name='Convolution1x1'
)
right = _conv2d_dw(
right, out_filters // 2, kernel_size, [1, 1],
activation=tf.nn.relu, use_bias=False, use_bn=True, training=training, name='DepthWiseConvolution3x3'
)
return tf.concat([left, right], -1)
class MDMModel:
def __init__(
self, images, shapes, mean_shape,
batch_size, num_patches, num_channels,
is_training=True
):
self.in_images = images
self.in_shapes = shapes
self.in_mean_shape = mean_shape
self.batch_size = batch_size
self.num_patches = num_patches
self.num_channels = num_channels
self.is_training = is_training
with tf.variable_scope('Network', values=[self.in_mean_shape]):
with tf.variable_scope('Initial'):
inputs = _conv2d(
self.in_images, 64, [3, 3],
activation=tf.nn.relu, use_bias=False, use_bn=True, training=self.is_training, name='Convolution'
)
inputs = tf.layers.max_pooling2d(inputs, [2, 2], [2, 2], name='MaxPooling')
inputs = _shuffle_block(
inputs, 64, 96, [3, 3], [2, 2], 4,
training=self.is_training, name='ShuffleBlock1'
)
inputs = _shuffle_block(
inputs, 96, 192, [3, 3], [2, 2], 8,
training=self.is_training, name='ShuffleBlock2'
)
inputs = _shuffle_block(
inputs, 192, 384, [3, 3], [2, 2], 4,
training=self.is_training, name='ShuffleBlock3'
)
with tf.variable_scope('Finalize'):
inputs = _conv2d(inputs, 1024, [1, 1], activation=tf.nn.relu, name='Convolution')
inputs = tf.layers.dropout(inputs, 0.2, training=self.is_training, name='Dropout')
inputs = tf.layers.average_pooling2d(inputs, [7, 7], [1, 1], name='AvgPooling')
with tf.variable_scope('Predict'):
inputs = _conv2d(inputs, 146, [1, 1], name='Convolution')
inputs = tf.reshape(inputs, [-1, 73, 2])
self.prediction = inputs + self.in_mean_shape
self.out_images, = tf.py_func(
utils.batch_draw_landmarks_discrete,
[self.in_images, self.in_shapes, self.prediction],
[tf.float32]
)
tf.summary.image('images', self.out_images, max_outputs=10)
# For tuning
with tf.gfile.GFile('graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='Original')
self.var_map = {}
for i in range(4):
self.map_shuffle_block(1, i)
for i in range(8):
self.map_shuffle_block(2, i)
for i in range(4):
self.map_shuffle_block(3, i)
self.var_map['Network/Finalize/Convolution/Conv2D/kernel:0'] = 'Original/Stage1/Conv5/weights:0'
self.var_map['Network/Finalize/Convolution/Conv2D/bias:0'] = 'Original/Stage1/Conv5/biases:0'
def map_conv2d_bn(self, src, dst):
self.var_map[dst + '/Conv2D/kernel:0'] = src + '/weights:0'
self.var_map[dst + '/BatchNorm/beta:0'] = src + '/BatchNorm/beta:0'
self.var_map[dst + '/BatchNorm/gamma:0'] = src + '/BatchNorm/Const:0'
self.var_map[dst + '/BatchNorm/moving_mean:0'] = src + '/BatchNorm/moving_mean:0'
self.var_map[dst + '/BatchNorm/moving_variance:0'] = src + '/BatchNorm/moving_variance:0'
def map_conv2d_dw_bn(self, src, dst):
self.var_map[dst + '/DWConv2D/depthwise_kernel:0'] = src + '/SeparableConv2d/depthwise_weights:0'
self.var_map[dst + '/BatchNorm1/beta:0'] = src + '/SeparableConv2d/BatchNorm/beta:0'
self.var_map[dst + '/BatchNorm1/gamma:0'] = src + '/SeparableConv2d/BatchNorm/Const:0'
self.var_map[dst + '/BatchNorm1/moving_mean:0'] = src + '/SeparableConv2d/BatchNorm/moving_mean:0'
self.var_map[dst + '/BatchNorm1/moving_variance:0'] = src + '/SeparableConv2d/BatchNorm/moving_variance:0'
self.var_map[dst + '/Conv2D/kernel:0'] = src + '/conv1x1_after/weights:0'
self.var_map[dst + '/BatchNorm2/beta:0'] = src + '/conv1x1_after/BatchNorm/beta:0'
self.var_map[dst + '/BatchNorm2/gamma:0'] = src + '/conv1x1_after/BatchNorm/Const:0'
self.var_map[dst + '/BatchNorm2/moving_mean:0'] = src + '/conv1x1_after/BatchNorm/moving_mean:0'
self.var_map[dst + '/BatchNorm2/moving_variance:0'] = src + '/conv1x1_after/BatchNorm/moving_variance:0'
def map_shuffle_block(self, sid, uid):
self.map_conv2d_bn(
'Original/Stage1/Stage{}/unit_{}/conv1x1_before'.format(sid + 1, uid + 1),
'Network/ShuffleBlock{}/Unit{}/Convolution1x1'.format(sid, uid)
)
self.map_conv2d_dw_bn(
'Original/Stage1/Stage{}/unit_{}'.format(sid + 1, uid + 1),
'Network/ShuffleBlock{}/Unit{}/DepthWiseConvolution3x3'.format(sid, uid)
)
if uid == 0:
self.map_conv2d_dw_bn(
'Original/Stage1/Stage{}/unit_{}/second_branch'.format(sid + 1, uid + 1),
'Network/ShuffleBlock{}/Unit{}/Bypass'.format(sid, uid)
)
def normalized_rmse(self, pred, gt_truth):
l, r = utils.norm_idx(self.num_patches)
assert (l is not None and r is not None)
norm = tf.sqrt(tf.reduce_sum(((gt_truth[:, l, :] - gt_truth[:, r, :]) ** 2), 1))
return tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(pred - gt_truth), 2)), 1) / (norm * self.num_patches)
def normalized_error(self, pred, gt_truth):
l, r = utils.norm_idx(self.num_patches)
assert (l is not None and r is not None)
norm = tf.sqrt(tf.reduce_sum(((gt_truth[:, l, :] - gt_truth[:, r, :]) ** 2), 1))
return tf.sqrt(tf.reduce_sum(tf.square(pred - gt_truth), 2)) / norm
def normalized_mean_error(self, n_error):
return tf.reduce_sum(n_error, 1) / self.num_patches
| 9,367 | -6 | 303 |
e81f8c1235c94ae563d3aeb7457fa6c02b3fd29a | 4,769 | py | Python | file_operations/file_methods.py | asiftandel96/WaferFaultDetectionProject | 4578e00c1b354c6dfcb9e218ce9bd1fe17e95cd1 | [
"MIT"
] | 1 | 2021-12-03T08:14:15.000Z | 2021-12-03T08:14:15.000Z | file_operations/file_methods.py | asiftandel96/WaferFaultDetectionProject | 4578e00c1b354c6dfcb9e218ce9bd1fe17e95cd1 | [
"MIT"
] | null | null | null | file_operations/file_methods.py | asiftandel96/WaferFaultDetectionProject | 4578e00c1b354c6dfcb9e218ce9bd1fe17e95cd1 | [
"MIT"
] | null | null | null | import pickle
import os
import shutil
class File_Operation:
"""This class shall be used to save the model after training and load the saved model for prediction."""
def save_model(self,model,filename):
"""
Method Name: save_model
Description: Save the model file to directory
Outcome: File gets saved
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the save_model method of the File_Operation class')
try:
path = os.path.join(self.model_directory,filename) #create seperate directory for each cluster
if os.path.isdir(path): #remove previously existing models for each clusters
shutil.rmtree(self.model_directory)
os.makedirs(path)
else:
os.makedirs(path) #
with open(path +'/' + filename+'.sav',
'wb') as f:
pickle.dump(model, f) # save the model to file
self.logger_object.log(self.file_object,
'Model File '+filename+' saved. Exited the save_model method of the Model_Finder class')
return 'success'
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in save_model method of the Model_Finder class. Exception message: ' + str(e))
self.logger_object.log(self.file_object,
'Model File '+filename+' could not be saved. Exited the save_model method of the Model_Finder class')
raise Exception()
def load_model(self,filename):
"""
Method Name: load_model
Description: load the model file to memory
Output: The Model file loaded in memory
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the load_model method of the File_Operation class')
try:
with open(self.model_directory + filename + '/' + filename + '.sav',
'rb') as f:
self.logger_object.log(self.file_object,
'Model File ' + filename + ' loaded. Exited the load_model method of the Model_Finder class')
return pickle.load(f)
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in load_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model File ' + filename + ' could not be saved. Exited the load_model method of the Model_Finder class')
raise Exception()
def find_correct_model_file(self,cluster_number):
"""
Method Name: find_correct_model_file
Description: Select the correct model based on cluster number
Output: The Model file
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the find_correct_model_file method of the File_Operation class')
try:
self.cluster_number= cluster_number
self.folder_name=self.model_directory
self.list_of_model_files = []
self.list_of_files = os.listdir(self.folder_name)
for self.file in self.list_of_files:
try:
if (self.file.index(str( self.cluster_number))!=-1):
self.model_name=self.file
except:
continue
self.model_name=self.model_name.split('.')[0]
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class.')
return self.model_name
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in find_correct_model_file method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class with Failure')
raise Exception() | 51.27957 | 151 | 0.561963 | import pickle
import os
import shutil
class File_Operation:
"""This class shall be used to save the model after training and load the saved model for prediction."""
def __init__(self,file_object,logger_object):
self.file_object = file_object
self.logger_object = logger_object
self.model_directory='models/'
def save_model(self,model,filename):
"""
Method Name: save_model
Description: Save the model file to directory
Outcome: File gets saved
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the save_model method of the File_Operation class')
try:
path = os.path.join(self.model_directory,filename) #create seperate directory for each cluster
if os.path.isdir(path): #remove previously existing models for each clusters
shutil.rmtree(self.model_directory)
os.makedirs(path)
else:
os.makedirs(path) #
with open(path +'/' + filename+'.sav',
'wb') as f:
pickle.dump(model, f) # save the model to file
self.logger_object.log(self.file_object,
'Model File '+filename+' saved. Exited the save_model method of the Model_Finder class')
return 'success'
except Exception as e:
self.logger_object.log(self.file_object,'Exception occured in save_model method of the Model_Finder class. Exception message: ' + str(e))
self.logger_object.log(self.file_object,
'Model File '+filename+' could not be saved. Exited the save_model method of the Model_Finder class')
raise Exception()
def load_model(self,filename):
"""
Method Name: load_model
Description: load the model file to memory
Output: The Model file loaded in memory
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the load_model method of the File_Operation class')
try:
with open(self.model_directory + filename + '/' + filename + '.sav',
'rb') as f:
self.logger_object.log(self.file_object,
'Model File ' + filename + ' loaded. Exited the load_model method of the Model_Finder class')
return pickle.load(f)
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in load_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model File ' + filename + ' could not be saved. Exited the load_model method of the Model_Finder class')
raise Exception()
def find_correct_model_file(self,cluster_number):
"""
Method Name: find_correct_model_file
Description: Select the correct model based on cluster number
Output: The Model file
On Failure: Raise Exception
"""
self.logger_object.log(self.file_object, 'Entered the find_correct_model_file method of the File_Operation class')
try:
self.cluster_number= cluster_number
self.folder_name=self.model_directory
self.list_of_model_files = []
self.list_of_files = os.listdir(self.folder_name)
for self.file in self.list_of_files:
try:
if (self.file.index(str( self.cluster_number))!=-1):
self.model_name=self.file
except:
continue
self.model_name=self.model_name.split('.')[0]
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class.')
return self.model_name
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in find_correct_model_file method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class with Failure')
raise Exception() | 148 | 0 | 27 |
5c8e9b7b9fa7a13fbc7df412245423487884e294 | 424 | py | Python | roda_purchase/api/serializers.py | sanss021/roda_case | 1e8cf876ae6856615aea0a2f8946b8f542a42bc0 | [
"MIT"
] | null | null | null | roda_purchase/api/serializers.py | sanss021/roda_case | 1e8cf876ae6856615aea0a2f8946b8f542a42bc0 | [
"MIT"
] | null | null | null | roda_purchase/api/serializers.py | sanss021/roda_case | 1e8cf876ae6856615aea0a2f8946b8f542a42bc0 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from roda_purchase import models
'''Aqui é onde transformamos dados em um formato que pode ser armazenado ou transmitido''' | 28.266667 | 90 | 0.738208 | from rest_framework import serializers
from roda_purchase import models
class PurchaseSerializer(serializers.ModelSerializer):
class Meta:
model = models.Purchase
fields = '__all__'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = models.Product
fields = '__all__'
'''Aqui é onde transformamos dados em um formato que pode ser armazenado ou transmitido''' | 0 | 214 | 46 |
9afc46003dbb0c41cf0b8a68be575471e0c01980 | 925 | py | Python | setup.py | pptx704/torpedo | f35e66b571f07d5d3553434da25cb29e05a1a6fb | [
"MIT"
] | 16 | 2021-07-09T16:39:47.000Z | 2021-12-26T20:29:18.000Z | setup.py | pptx704/torpedo | f35e66b571f07d5d3553434da25cb29e05a1a6fb | [
"MIT"
] | null | null | null | setup.py | pptx704/torpedo | f35e66b571f07d5d3553434da25cb29e05a1a6fb | [
"MIT"
] | 2 | 2021-07-09T17:58:25.000Z | 2021-10-01T20:21:45.000Z | import setuptools
long_description = "Check full documentation [here](https://github.com/pptx704/torpedo)"
setuptools.setup(
name="mailtorpedo",
version="1.1.0",
author="Rafeed M. Bhuiyan",
author_email="rafeedm.bhuiyan@gmail.com",
description="A Python package for sending personalized emails using own SMTP server.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pptx704/torpedo",
project_urls={
"Bug Tracker": "https://github.com/pptx704/torpedo/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'beautifulsoup4',
'openpyxl'
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
) | 31.896552 | 90 | 0.661622 | import setuptools
long_description = "Check full documentation [here](https://github.com/pptx704/torpedo)"
setuptools.setup(
name="mailtorpedo",
version="1.1.0",
author="Rafeed M. Bhuiyan",
author_email="rafeedm.bhuiyan@gmail.com",
description="A Python package for sending personalized emails using own SMTP server.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pptx704/torpedo",
project_urls={
"Bug Tracker": "https://github.com/pptx704/torpedo/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'beautifulsoup4',
'openpyxl'
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
) | 0 | 0 | 0 |
5f63848fe6683f638aa250ed3d73f59ef6dba171 | 1,171 | py | Python | oct/tools/rebuild_results.py | gregadc/oct | 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | [
"MIT"
] | 10 | 2015-01-06T11:59:05.000Z | 2016-03-25T15:19:55.000Z | oct/tools/rebuild_results.py | gregadc/oct | 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | [
"MIT"
] | 33 | 2016-04-17T09:57:38.000Z | 2020-09-25T12:40:35.000Z | oct/tools/rebuild_results.py | gregadc/oct | 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | [
"MIT"
] | 3 | 2016-12-14T14:38:04.000Z | 2019-11-15T21:57:21.000Z | import six
from oct.core.exceptions import OctConfigurationError
from oct.results.output import output
from oct.results.models import db, set_database
from oct.utilities.configuration import configure, get_db_uri
| 35.484848 | 118 | 0.730999 | import six
from oct.core.exceptions import OctConfigurationError
from oct.results.output import output
from oct.results.models import db, set_database
from oct.utilities.configuration import configure, get_db_uri
def rebuild(args):
config = configure(None, args.config_file)
if args.results_file is None:
db_uri = get_db_uri(config, args.results_dir)
else:
db_uri = args.results_file
if not db_uri:
raise OctConfigurationError("Bad database configured, if you use sqlite database use -f option")
set_database(db_uri, db, config)
output(args.results_dir, config)
def rebuild_results(sp):
if six.PY2:
parser = sp.add_parser('rebuild-results', help="Rebuild the html report from result dir")
else:
parser = sp.add_parser('rebuild-results', help="Rebuild the html report from result dir", aliases=['rebuild'])
parser.add_argument('results_dir', help="The directory containing the results")
parser.add_argument('config_file', help="The configuration file")
parser.add_argument('-f', '--results-file', help="The sqlite result file", default=None)
parser.set_defaults(func=rebuild)
| 909 | 0 | 46 |
e5bb9621225aaadac5ceca87a02f4a40632c3701 | 2,575 | py | Python | python/l0904.py | daidaifan/leetcode-problem-solver | 1793eada501a2a18d05f118a98ac52e2edd12ef8 | [
"MIT"
] | null | null | null | python/l0904.py | daidaifan/leetcode-problem-solver | 1793eada501a2a18d05f118a98ac52e2edd12ef8 | [
"MIT"
] | null | null | null | python/l0904.py | daidaifan/leetcode-problem-solver | 1793eada501a2a18d05f118a98ac52e2edd12ef8 | [
"MIT"
] | null | null | null | """
In a row of trees, the i-th tree produces fruit with type tree[i].
You start at any tree of your choice, then repeatedly perform the following steps:
Add one piece of fruit from this tree to your baskets. If you cannot, stop.
Move to the next tree to the right of the current tree. If there is no tree to the right, stop.
Note that you do not have any choice after the initial choice of starting tree: you must perform step 1, then step 2, then back to step 1, then step 2, and so on until you stop.
You have two baskets, and each basket can carry any quantity of fruit, but you want each basket to only carry one type of fruit each.
What is the total amount of fruit you can collect with this procedure?
Example 1:
Input: [1,2,1]
Output: 3
Explanation: We can collect [1,2,1].
Example 2:
Input: [0,1,2,2]
Output: 3
Explanation: We can collect [1,2,2].
If we started at the first tree, we would only collect [0, 1].
Example 3:
Input: [1,2,3,2,2]
Output: 4
Explanation: We can collect [2,3,2,2].
If we started at the first tree, we would only collect [1, 2].
Example 4:
Input: [3,3,3,1,2,1,1,2,3,3,4]
Output: 5
Explanation: We can collect [1,2,1,1,2].
If we started at the first tree or the eighth tree, we would only collect 4 fruits.
Note:
1 <= tree.length <= 40000
0 <= tree[i] < tree.length
If I start from a tree I can't stop and have to put the fruit in a basket, but I want basket to have only one type of fruit. It is not clear if one needs to stop after a 3rd type of fruit is encountered.
Since about 4 contests there is at least one question that is harder to understand than to solve. Is it too hard to have someone proofread it before posting?
"""
s = Solution()
print(s.totalFruit([1,2,1]))
print(s.totalFruit([0,1,2,2]))
print(s.totalFruit([1,2,3,2,2]))
print(s.totalFruit([3,3,3,1,2,1,1,2,3,3,4]))
| 31.024096 | 203 | 0.641165 | """
In a row of trees, the i-th tree produces fruit with type tree[i].
You start at any tree of your choice, then repeatedly perform the following steps:
Add one piece of fruit from this tree to your baskets. If you cannot, stop.
Move to the next tree to the right of the current tree. If there is no tree to the right, stop.
Note that you do not have any choice after the initial choice of starting tree: you must perform step 1, then step 2, then back to step 1, then step 2, and so on until you stop.
You have two baskets, and each basket can carry any quantity of fruit, but you want each basket to only carry one type of fruit each.
What is the total amount of fruit you can collect with this procedure?
Example 1:
Input: [1,2,1]
Output: 3
Explanation: We can collect [1,2,1].
Example 2:
Input: [0,1,2,2]
Output: 3
Explanation: We can collect [1,2,2].
If we started at the first tree, we would only collect [0, 1].
Example 3:
Input: [1,2,3,2,2]
Output: 4
Explanation: We can collect [2,3,2,2].
If we started at the first tree, we would only collect [1, 2].
Example 4:
Input: [3,3,3,1,2,1,1,2,3,3,4]
Output: 5
Explanation: We can collect [1,2,1,1,2].
If we started at the first tree or the eighth tree, we would only collect 4 fruits.
Note:
1 <= tree.length <= 40000
0 <= tree[i] < tree.length
If I start from a tree I can't stop and have to put the fruit in a basket, but I want basket to have only one type of fruit. It is not clear if one needs to stop after a 3rd type of fruit is encountered.
Since about 4 contests there is at least one question that is harder to understand than to solve. Is it too hard to have someone proofread it before posting?
"""
class Solution:
def totalFruit(self, tree):
"""
:type tree: List[int]
:rtype: int
"""
type2freq = {}
set_type = set()
head = tail = 0
max_collect = 0
while tail < len(tree):
t = tree[tail]
if t not in set_type:
set_type.add(t)
type2freq[t] = type2freq.get(t, 0) + 1
while len(set_type) > 2:
remove_t = tree[head]
type2freq[remove_t] -= 1
if type2freq[remove_t] == 0:
set_type.remove(remove_t)
head += 1
max_collect = max(max_collect, tail - head + 1)
tail += 1
return max_collect
s = Solution()
print(s.totalFruit([1,2,1]))
print(s.totalFruit([0,1,2,2]))
print(s.totalFruit([1,2,3,2,2]))
print(s.totalFruit([3,3,3,1,2,1,1,2,3,3,4]))
| 0 | 714 | 23 |
bd66867f86dfd9aa4e1aaffdb68dbb54cb15d46b | 12,454 | py | Python | testing/test_streaming.py | vincent-antaki/Neuraxle | cef1284a261010c655f8ef02b4fca5b8bb45850c | [
"Apache-2.0"
] | 1 | 2021-04-26T23:46:40.000Z | 2021-04-26T23:46:40.000Z | testing/test_streaming.py | Tubbz-alt/Neuraxle | 308f24248cdb242b7e2f6ec7c51daf2ee3e38834 | [
"Apache-2.0"
] | null | null | null | testing/test_streaming.py | Tubbz-alt/Neuraxle | 308f24248cdb242b7e2f6ec7c51daf2ee3e38834 | [
"Apache-2.0"
] | null | null | null | import time
import numpy as np
import pytest
from neuraxle.base import BaseStep, ExecutionContext
from neuraxle.data_container import DataContainer, AbsentValuesNullObject
from neuraxle.distributed.streaming import SequentialQueuedPipeline, ParallelQueuedFeatureUnion, QueueJoiner
from neuraxle.hyperparams.space import HyperparameterSamples
from neuraxle.pipeline import Pipeline
from neuraxle.steps.loop import ForEachDataInput
from neuraxle.steps.misc import FitTransformCallbackStep, Sleep
from neuraxle.steps.numpy import MultiplyByN
EXPECTED_OUTPUTS = np.array(range(100)) * 2 * 2 * 2 * 2
EXPECTED_OUTPUTS_PARALLEL = np.array((np.array(range(100)) * 2).tolist() * 4)
| 34.787709 | 112 | 0.689979 | import time
import numpy as np
import pytest
from neuraxle.base import BaseStep, ExecutionContext
from neuraxle.data_container import DataContainer, AbsentValuesNullObject
from neuraxle.distributed.streaming import SequentialQueuedPipeline, ParallelQueuedFeatureUnion, QueueJoiner
from neuraxle.hyperparams.space import HyperparameterSamples
from neuraxle.pipeline import Pipeline
from neuraxle.steps.loop import ForEachDataInput
from neuraxle.steps.misc import FitTransformCallbackStep, Sleep
from neuraxle.steps.numpy import MultiplyByN
EXPECTED_OUTPUTS = np.array(range(100)) * 2 * 2 * 2 * 2
EXPECTED_OUTPUTS_PARALLEL = np.array((np.array(range(100)) * 2).tolist() * 4)
def test_queued_pipeline_with_excluded_incomplete_batch():
p = SequentialQueuedPipeline([
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2)
], batch_size=10, include_incomplete_batch=False, n_workers_per_step=1, max_queue_size=5)
outputs = p.transform(list(range(15)))
assert np.array_equal(outputs, np.array(list(range(10))) * 2 * 2 * 2 * 2)
def test_queued_pipeline_with_included_incomplete_batch():
p = SequentialQueuedPipeline(
[
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2)
],
batch_size=10,
include_incomplete_batch=True,
default_value_data_inputs=AbsentValuesNullObject(),
default_value_expected_outputs=AbsentValuesNullObject(),
n_workers_per_step=1,
max_queue_size=5
)
outputs = p.transform(list(range(15)))
assert np.array_equal(outputs, np.array(list(range(15))) * 2 * 2 * 2 * 2)
def test_queued_pipeline_with_included_incomplete_batch_that_raises_an_exception():
with pytest.raises(AttributeError):
p = SequentialQueuedPipeline(
[
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2)
],
batch_size=10,
include_incomplete_batch=True,
default_value_data_inputs=None, # this will raise an exception in the worker
default_value_expected_outputs=None, # this will raise an exception in the worker
n_workers_per_step=1,
max_queue_size=5
)
p.transform(list(range(15)))
def test_queued_pipeline_with_step():
p = SequentialQueuedPipeline([
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2),
MultiplyByN(2)
], batch_size=10, n_workers_per_step=1, max_queue_size=5)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_queued_pipeline_with_step_name_step():
p = SequentialQueuedPipeline([
('1', MultiplyByN(2)),
('2', MultiplyByN(2)),
('3', MultiplyByN(2)),
('4', MultiplyByN(2))
], batch_size=10, n_workers_per_step=1, max_queue_size=5)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_queued_pipeline_with_n_workers_step():
p = SequentialQueuedPipeline([
(1, MultiplyByN(2)),
(1, MultiplyByN(2)),
(1, MultiplyByN(2)),
(1, MultiplyByN(2))
], batch_size=10, max_queue_size=5)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_queued_pipeline_with_step_name_n_worker_max_queue_size():
p = SequentialQueuedPipeline([
('1', 1, 5, MultiplyByN(2)),
('2', 1, 5, MultiplyByN(2)),
('3', 1, 5, MultiplyByN(2)),
('4', 1, 5, MultiplyByN(2))
], batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_queued_pipeline_with_step_name_n_worker_with_step_name_n_workers_and_default_max_queue_size():
p = SequentialQueuedPipeline([
('1', 1, MultiplyByN(2)),
('2', 1, MultiplyByN(2)),
('3', 1, MultiplyByN(2)),
('4', 1, MultiplyByN(2))
], max_queue_size=10, batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_queued_pipeline_with_step_name_n_worker_with_default_n_workers_and_default_max_queue_size():
p = SequentialQueuedPipeline([
('1', MultiplyByN(2)),
('2', MultiplyByN(2)),
('3', MultiplyByN(2)),
('4', MultiplyByN(2))
], n_workers_per_step=1, max_queue_size=10, batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
def test_parallel_queued_pipeline_with_step_name_n_worker_max_queue_size():
p = ParallelQueuedFeatureUnion([
('1', 1, 5, MultiplyByN(2)),
('2', 1, 5, MultiplyByN(2)),
('3', 1, 5, MultiplyByN(2)),
('4', 1, 5, MultiplyByN(2))
], batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS_PARALLEL)
def test_parallel_queued_parallelize_correctly():
sleep_time = 0.001
p = SequentialQueuedPipeline([
('1', 4, 10, Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)])),
('2', 4, 10, Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)])),
('3', 4, 10, Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)])),
('4', 4, 10, Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)]))
], batch_size=10)
a = time.time()
outputs_streaming = p.transform(list(range(100)))
b = time.time()
time_queued_pipeline = b - a
p = Pipeline([
Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)]),
Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)]),
Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)]),
Pipeline([ForEachDataInput(Sleep(sleep_time=sleep_time)), MultiplyByN(2)])
])
a = time.time()
outputs_vanilla = p.transform(list(range(100)))
b = time.time()
time_vanilla_pipeline = b - a
assert time_queued_pipeline < time_vanilla_pipeline
assert np.array_equal(outputs_streaming, outputs_vanilla)
def test_parallel_queued_pipeline_with_step_name_n_worker_additional_arguments_max_queue_size():
n_workers = 4
worker_arguments = [('hyperparams', HyperparameterSamples({'multiply_by': 2})) for _ in range(n_workers)]
p = ParallelQueuedFeatureUnion([
('1', n_workers, worker_arguments, 5, MultiplyByN()),
], batch_size=10)
outputs = p.transform(list(range(100)))
expected = np.array(list(range(0, 200, 2)))
assert np.array_equal(outputs, expected)
def test_parallel_queued_pipeline_with_step_name_n_worker_additional_arguments():
n_workers = 4
worker_arguments = [('hyperparams', HyperparameterSamples({'multiply_by': 2})) for _ in range(n_workers)]
p = ParallelQueuedFeatureUnion([
('1', n_workers, worker_arguments, MultiplyByN()),
], batch_size=10, max_queue_size=5)
outputs = p.transform(list(range(100)))
expected = np.array(list(range(0, 200, 2)))
assert np.array_equal(outputs, expected)
def test_parallel_queued_pipeline_with_step_name_n_worker_with_step_name_n_workers_and_default_max_queue_size():
p = ParallelQueuedFeatureUnion([
('1', 1, MultiplyByN(2)),
('2', 1, MultiplyByN(2)),
('3', 1, MultiplyByN(2)),
('4', 1, MultiplyByN(2)),
], max_queue_size=10, batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS_PARALLEL)
def test_parallel_queued_pipeline_with_step_name_n_worker_with_default_n_workers_and_default_max_queue_size():
p = ParallelQueuedFeatureUnion([
('1', MultiplyByN(2)),
('2', MultiplyByN(2)),
('3', MultiplyByN(2)),
('4', MultiplyByN(2)),
], n_workers_per_step=1, max_queue_size=10, batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS_PARALLEL)
def test_parallel_queued_pipeline_step_name_n_worker_with_default_n_workers_and_default_max_queue_size():
p = ParallelQueuedFeatureUnion([
('1', MultiplyByN(2)),
('2', MultiplyByN(2)),
('3', MultiplyByN(2)),
('4', MultiplyByN(2)),
], n_workers_per_step=1, max_queue_size=10, batch_size=10)
outputs = p.transform(list(range(100)))
assert np.array_equal(outputs, EXPECTED_OUTPUTS_PARALLEL)
def test_queued_pipeline_saving(tmpdir):
# Given
p = ParallelQueuedFeatureUnion([
('1', FitTransformCallbackStep()),
('2', FitTransformCallbackStep()),
('3', FitTransformCallbackStep()),
('4', FitTransformCallbackStep()),
], n_workers_per_step=1, max_queue_size=10, batch_size=10)
# When
p, outputs = p.fit_transform(list(range(100)), list(range(100)))
p.save(ExecutionContext(tmpdir))
p.apply('clear_callbacks')
# Then
assert len(p[0].wrapped.transform_callback_function.data) == 0
assert len(p[0].wrapped.fit_callback_function.data) == 0
assert len(p[1].wrapped.transform_callback_function.data) == 0
assert len(p[1].wrapped.fit_callback_function.data) == 0
assert len(p[2].wrapped.transform_callback_function.data) == 0
assert len(p[2].wrapped.fit_callback_function.data) == 0
assert len(p[3].wrapped.transform_callback_function.data) == 0
assert len(p[3].wrapped.fit_callback_function.data) == 0
p = p.load(ExecutionContext(tmpdir))
assert len(p[0].wrapped.transform_callback_function.data) == 10
assert len(p[0].wrapped.fit_callback_function.data) == 10
assert len(p[1].wrapped.transform_callback_function.data) == 10
assert len(p[1].wrapped.fit_callback_function.data) == 10
assert len(p[2].wrapped.transform_callback_function.data) == 10
assert len(p[2].wrapped.fit_callback_function.data) == 10
assert len(p[3].wrapped.transform_callback_function.data) == 10
assert len(p[3].wrapped.fit_callback_function.data) == 10
def test_queued_pipeline_with_savers(tmpdir):
# Given
p = ParallelQueuedFeatureUnion([
('1', MultiplyByN(2)),
('2', MultiplyByN(2)),
('3', MultiplyByN(2)),
('4', MultiplyByN(2)),
], n_workers_per_step=1, max_queue_size=10, batch_size=10, use_savers=True, cache_folder=tmpdir)
# When
outputs = p.transform(list(range(100)))
# Then
assert np.array_equal(outputs, EXPECTED_OUTPUTS_PARALLEL)
class QueueJoinerForTest(QueueJoiner):
def __init__(self, batch_size):
super().__init__(batch_size)
self.called_queue_joiner = False
def join(self, original_data_container: DataContainer) -> DataContainer:
self.called_queue_joiner = True
super().join(original_data_container)
def test_sequential_queued_pipeline_should_fit_without_multiprocessing():
batch_size = 10
p = SequentialQueuedPipeline([
(1, FitTransformCallbackStep()),
(1, FitTransformCallbackStep()),
(1, FitTransformCallbackStep()),
(1, FitTransformCallbackStep())
], batch_size=batch_size, max_queue_size=5)
queue_joiner_for_test = QueueJoinerForTest(batch_size=batch_size)
p.steps[-1] = queue_joiner_for_test
p.steps_as_tuple[-1] = (p.steps_as_tuple[-1][0], queue_joiner_for_test)
p._refresh_steps()
p = p.fit(list(range(100)), list(range(100)))
assert not p[-1].called_queue_joiner
def test_sequential_queued_pipeline_should_fit_transform_without_multiprocessing():
batch_size = 10
p = SequentialQueuedPipeline([
(1, FitTransformCallbackStep(transform_function=lambda di: np.array(di) * 2)),
(1, FitTransformCallbackStep(transform_function=lambda di: np.array(di) * 2)),
(1, FitTransformCallbackStep(transform_function=lambda di: np.array(di) * 2)),
(1, FitTransformCallbackStep(transform_function=lambda di: np.array(di) * 2))
], batch_size=batch_size, max_queue_size=5)
queue_joiner_for_test = QueueJoinerForTest(batch_size=batch_size)
p.steps[-1] = queue_joiner_for_test
p.steps_as_tuple[-1] = (p.steps_as_tuple[-1][0], queue_joiner_for_test)
p._refresh_steps()
p, outputs = p.fit_transform(list(range(100)), list(range(100)))
assert not p[-1].called_queue_joiner
assert np.array_equal(outputs, EXPECTED_OUTPUTS)
| 11,205 | 17 | 536 |
5882a31d0860d31e880c999a1557a68bd379e23b | 630 | py | Python | atbash-cipher/atbash_cipher.py | gordonli08/Exercism-python | ff3d79ccc61bac7475641ee6c2f19a0367e3e254 | [
"MIT"
] | null | null | null | atbash-cipher/atbash_cipher.py | gordonli08/Exercism-python | ff3d79ccc61bac7475641ee6c2f19a0367e3e254 | [
"MIT"
] | null | null | null | atbash-cipher/atbash_cipher.py | gordonli08/Exercism-python | ff3d79ccc61bac7475641ee6c2f19a0367e3e254 | [
"MIT"
] | null | null | null | import string
cipheren = dict(zip(list(string.ascii_lowercase), string.ascii_lowercase[::-1]))
cipherde = dict(zip(string.ascii_lowercase[::-1], list(string.ascii_lowercase)))
| 37.058824 | 87 | 0.680952 | import string
cipheren = dict(zip(list(string.ascii_lowercase), string.ascii_lowercase[::-1]))
cipherde = dict(zip(string.ascii_lowercase[::-1], list(string.ascii_lowercase)))
def encode(plain_text):
stripped = [ch for ch in plain_text.lower() if ch.isalnum()]
stripped = [cipheren[ch] if ch.isalpha() else ch for ch in stripped]
length = len(stripped)
return " ".join(["".join(stripped[index:index+5]) for index in range(0,length, 5)])
def decode(ciphered_text):
ret = ""
for chunk in ciphered_text.split():
ret += "".join([cipherde[ch] if ch.isalpha() else ch for ch in chunk])
return ret
| 408 | 0 | 46 |
f119b86d9b0f095daf2dc2565973dd23411342c0 | 3,205 | py | Python | img.py | Naruki-Ichihara/portraitprinter | c3720d71cd9ac275dde90a7f5c5856b78326e397 | [
"MIT"
] | null | null | null | img.py | Naruki-Ichihara/portraitprinter | c3720d71cd9ac275dde90a7f5c5856b78326e397 | [
"MIT"
] | null | null | null | img.py | Naruki-Ichihara/portraitprinter | c3720d71cd9ac275dde90a7f5c5856b78326e397 | [
"MIT"
] | null | null | null | from dolfin import *
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as s
eps = 0.3
g = 0.0
w0 = 0.8
dt = 1e-5
gamma = 1.0
file = File('./test.pvd')
eps = 0.3
g = 0.0
w0 = 0.8
dt = 0.01
gamma = 1.0
img = plt.imread("./img/newton.jpeg")/256
(Nx, Ny) = img.shape
print(img)
L = 100
x = np.linspace(0, L, Nx)
y = np.linspace(0, L, Ny)
f = s.interp2d(x, y, img)
mesh = RectangleMesh(Point((0, 0)), Point(L, L), Nx-1, Ny-1)
X = FunctionSpace(mesh, 'CG', 1)
rho = Function(X)
field = Field()
rho.interpolate(field)
Vh = FiniteElement('CG', mesh.ufl_cell(), 2)
ME = FunctionSpace(mesh, Vh*Vh)
X = VectorFunctionSpace(mesh, 'CG', 1)
Y = FunctionSpace(mesh, 'CG', 1)
R = FunctionSpace(refine(mesh), 'CG', 1)
theta = Function(X)
vectorField = VectorField()
theta.interpolate(vectorField)
k = sqrt((np.pi*rho/w0)**2 - rho**2*gamma**2)
Uh = Function(ME)
Uh_0 = Function(ME)
U = TrialFunction(ME)
phi, psi = TestFunctions(ME)
initial = GaussianRandomField()
Uh.interpolate(initial)
Uh_0.interpolate(initial)
uh, qh = split(Uh)
uh_0, qh_0 = split(Uh_0)
qh_mid = 0.5*qh + 0.5*qh_0
dPhi = G1(uh, uh_0)*uh + G2(uh_0)
L0 = (uh-uh_0)*phi*dx + dt*A(qh_mid, phi, k) - dt*B(uh, phi, theta, gamma) + dt*dPhi*phi*dx
L1 = qh*psi*dx - A(uh, psi, k)
L = L0 + L1
a = derivative(L, Uh, U)
SH_problem = Problem(a, L)
solver = CustomSolver()
t = 0
T = 5
file = File('./result/newton.pvd')
while (t < T):
print('time: {}'.format(t))
t += dt
Uh_0.vector()[:] = Uh.vector()
solver.solve(SH_problem, Uh.vector())
sol_c = project(Uh.split()[0], Y)
sol_r = Function(R)
LagrangeInterpolator.interpolate(sol_r, sol_c)
sol_r.rename('field', 'label')
file << (sol_r, t) | 23.394161 | 97 | 0.614041 | from dolfin import *
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as s
eps = 0.3
g = 0.0
w0 = 0.8
dt = 1e-5
gamma = 1.0
file = File('./test.pvd')
eps = 0.3
g = 0.0
w0 = 0.8
dt = 0.01
gamma = 1.0
img = plt.imread("./img/newton.jpeg")/256
(Nx, Ny) = img.shape
print(img)
L = 100
x = np.linspace(0, L, Nx)
y = np.linspace(0, L, Ny)
f = s.interp2d(x, y, img)
mesh = RectangleMesh(Point((0, 0)), Point(L, L), Nx-1, Ny-1)
class Field(UserExpression):
def eval(self, value, x):
value[0] = f(x[0], x[1])
def value_shape(self):
return ()
X = FunctionSpace(mesh, 'CG', 1)
rho = Function(X)
field = Field()
rho.interpolate(field)
Vh = FiniteElement('CG', mesh.ufl_cell(), 2)
ME = FunctionSpace(mesh, Vh*Vh)
X = VectorFunctionSpace(mesh, 'CG', 1)
Y = FunctionSpace(mesh, 'CG', 1)
R = FunctionSpace(refine(mesh), 'CG', 1)
class GaussianRandomField(UserExpression):
def eval(self, val, x):
val[0] = np.sqrt(0.001)*np.random.randn()
val[1] = np.sqrt(0.001)*np.random.randn()
def value_shape(self):
return (2,)
class VectorField(UserExpression):
def eval(self, val, x):
val[0] = 0
val[1] = 1
def value_shape(self):
return (2,)
class Problem(NonlinearProblem):
def __init__(self, a, L):
NonlinearProblem.__init__(self)
self.L = L
self.a = a
def F(self, b, x):
assemble(self.L, tensor=b)
def J(self, A, x):
assemble(self.a, tensor=A)
class CustomSolver(NewtonSolver):
def __init__(self):
NewtonSolver.__init__(self, MPI.comm_world, PETScKrylovSolver(), PETScFactory.instance())
def solver_setup(self, A, P, problem, iteration):
self.linear_solver().set_operator(A)
PETScOptions.set("ksp_type", "gmres")
PETScOptions.set("ksp_monitor")
PETScOptions.set("pc_type", "ilu")
self.linear_solver().set_from_options()
def G1(w, v):
return -eps/2 - g/3*(w+v) + 1/4*(w**2+w*v+v**2)
def G2(w):
return -eps/2*w - g/3*w**2 + 1/4*w**3
def A(w, v, k):
return (dot(grad(w), grad(v)) - k**2*w*v)*dx
def B(w, v, theta, gamma):
D = outer(theta, theta)
return 2*gamma**2*dot(grad(w), dot(D, grad(v)))*dx
theta = Function(X)
vectorField = VectorField()
theta.interpolate(vectorField)
k = sqrt((np.pi*rho/w0)**2 - rho**2*gamma**2)
Uh = Function(ME)
Uh_0 = Function(ME)
U = TrialFunction(ME)
phi, psi = TestFunctions(ME)
initial = GaussianRandomField()
Uh.interpolate(initial)
Uh_0.interpolate(initial)
uh, qh = split(Uh)
uh_0, qh_0 = split(Uh_0)
qh_mid = 0.5*qh + 0.5*qh_0
dPhi = G1(uh, uh_0)*uh + G2(uh_0)
L0 = (uh-uh_0)*phi*dx + dt*A(qh_mid, phi, k) - dt*B(uh, phi, theta, gamma) + dt*dPhi*phi*dx
L1 = qh*psi*dx - A(uh, psi, k)
L = L0 + L1
a = derivative(L, Uh, U)
SH_problem = Problem(a, L)
solver = CustomSolver()
t = 0
T = 5
file = File('./result/newton.pvd')
while (t < T):
print('time: {}'.format(t))
t += dt
Uh_0.vector()[:] = Uh.vector()
solver.solve(SH_problem, Uh.vector())
sol_c = project(Uh.split()[0], Y)
sol_r = Function(R)
LagrangeInterpolator.interpolate(sol_r, sol_c)
sol_r.rename('field', 'label')
file << (sol_r, t) | 938 | 64 | 494 |
cd63c3b8b53f2f236bd67362d698bb3d491c86cb | 189 | py | Python | kw6/__init__.py | Aiwizo/kw6 | c079b2a57a07d9c56fefd90de65cf3f41d81e70f | [
"Apache-2.0"
] | 3 | 2020-09-18T22:31:28.000Z | 2020-09-21T09:38:42.000Z | kw6/__init__.py | Aiwizo/kw6 | c079b2a57a07d9c56fefd90de65cf3f41d81e70f | [
"Apache-2.0"
] | 2 | 2021-06-15T09:29:17.000Z | 2021-07-15T10:41:00.000Z | kw6/__init__.py | Aiwizo/kw6 | c079b2a57a07d9c56fefd90de65cf3f41d81e70f | [
"Apache-2.0"
] | null | null | null | from kw6.reader import Reader
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution("kw6").version
except DistributionNotFound:
pass
| 23.625 | 64 | 0.814815 | from kw6.reader import Reader
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution("kw6").version
except DistributionNotFound:
pass
| 0 | 0 | 0 |
8c88cd80bf347df648b894c24aa16ce5b07cee02 | 11,125 | py | Python | figures.py | mattjj/dirichlet-truncated-multinomial | 1b239e8960042bbf0f063d4f9e0400dfa6cb648b | [
"MIT"
] | 2 | 2020-07-24T20:50:29.000Z | 2020-07-26T08:30:42.000Z | figures.py | mattjj/dirichlet-truncated-multinomial | 1b239e8960042bbf0f063d4f9e0400dfa6cb648b | [
"MIT"
] | null | null | null | figures.py | mattjj/dirichlet-truncated-multinomial | 1b239e8960042bbf0f063d4f9e0400dfa6cb648b | [
"MIT"
] | null | null | null | from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
na = np.newaxis
from scipy.interpolate import griddata
import simplex, dirichlet, sampling, tests, density, timing
allfigfuncs = []
SAVING = True
# plt.interactive(False)
#################################
# Figure-Generating Functions #
#################################
allfigfuncs.append(prior_posterior_2D)
allfigfuncs.append(aux_posterior_2D)
allfigfuncs.append(Rhatp)
allfigfuncs.append(autocorrelation)
allfigfuncs.append(statistic_convergence)
###############
# Utilities #
###############
import os
def scoreatpercentile(data,per,axis):
'''
like the function in scipy.stats but with an axis argument, and works on
arrays.
'''
a = np.sort(data,axis=axis)
idx = per/100. * (data.shape[axis]-1)
if (idx % 1 == 0):
return a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]]
else:
lowerweight = 1-(idx % 1)
upperweight = (idx % 1)
idx = int(np.floor(idx))
return lowerweight * a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] \
+ upperweight * a[[slice(None) if ii != axis else idx+1 for ii in range(a.ndim)]]
##########################
# Generate All Figures #
##########################
if __name__ == '__main__':
main()
| 39.874552 | 111 | 0.65609 | from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
na = np.newaxis
from scipy.interpolate import griddata
import simplex, dirichlet, sampling, tests, density, timing
allfigfuncs = []
SAVING = True
# plt.interactive(False)
#################################
# Figure-Generating Functions #
#################################
def prior_posterior_2D(meshsize=250,alpha=2.,data=np.array([[0,2,0],[0,0,0],[0,0,0]])):
assert data.shape == (3,3)
mesh3D = simplex.mesh(meshsize)
mesh2D = simplex.proj_to_2D(mesh3D) # use specialized b/c it plays nicer with triangulation algorithm
priorvals = np.exp(dirichlet.log_dirichlet_density(mesh3D,alpha))
posteriorvals_uncensored = np.exp(dirichlet.log_dirichlet_density(mesh3D,alpha,data=data.sum(0)))
temp = dirichlet.log_censored_dirichlet_density(mesh3D,alpha,data=data)
temp = np.exp(temp - temp.max())
posteriorvals_censored = temp/temp.sum() # direct discretized integration!
# used for grid interpolation
xi = np.linspace(mesh2D[:,0].min(), mesh2D[:,0].max(), 2000, endpoint=True)
yi = np.linspace(mesh2D[:,1].min(), mesh2D[:,1].max(), 2000, endpoint=True)
plt.figure(figsize=(8,8))
# use exactly one of the next two code lines!
# this one performs interpolation to get a rectangular-pixel grid, but
# produces a blurred image
plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),priorvals,(xi[na,:],yi[:,na]),method='cubic'))
# this one exactly represents the data by performing a DeLaunay
# triangulation, but it must draw each triangular pixel individually,
# resulting in large files and slow draw times
# plt.tripcolor(mesh2D[:,0],mesh2D[:,1],priorvals) # exact triangles, no blurring
plt.axis('off')
save('../writeup/figures/dirichlet_prior_2D.pdf')
plt.figure(figsize=(8,8))
plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),posteriorvals_uncensored,(xi[na,:],yi[:,na]),method='cubic'))
# plt.tripcolor(mesh2D[:,0],mesh2D[:,1],posteriorvals_uncensored)
plt.axis('off')
save('../writeup/figures/dirichlet_uncensored_posterior_2D.pdf')
plt.figure(figsize=(8,8))
plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),posteriorvals_censored,(xi[na,:],yi[:,na]),method='cubic'))
# plt.tripcolor(mesh2D[:,0],mesh2D[:,1],posteriorvals_censored)
plt.axis('off')
save('../writeup/figures/dirichlet_censored_posterior_2D.pdf')
allfigfuncs.append(prior_posterior_2D)
def aux_posterior_2D(meshsize=250,alpha=2.,data=np.array([[0,2,0],[0,0,0],[0,0,0]])):
assert data.shape == (3,3)
mesh3D = simplex.mesh(meshsize)
mesh2D = simplex.proj_to_2D(mesh3D) # use specialized b/c it plays nicer with triangulation algorithm
# get samples
auxsamples = sampling.generate_pi_samples_withauxvars(alpha,10000,data)
# evaluate a kde based on the samples
aux_kde = density.kde(0.005,auxsamples[len(auxsamples)//20:])
aux_kde_vals = aux_kde(mesh3D)
### plot
# used for grid interpolation
xi = np.linspace(mesh2D[:,0].min(), mesh2D[:,0].max(), 2000, endpoint=True)
yi = np.linspace(mesh2D[:,1].min(), mesh2D[:,1].max(), 2000, endpoint=True)
plt.figure(figsize=(8,8))
plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),aux_kde_vals,(xi[na,:],yi[:,na]),method='cubic'))
plt.axis('off')
save('../writeup/figures/dirichlet_censored_auxvar_posterior_2D.pdf')
allfigfuncs.append(aux_posterior_2D)
def Rhatp(nsamples=1000,ncomputepoints=25,nruns=50,ndims=10):
# get samples
data = np.zeros((ndims,ndims))
data[np.roll(np.arange(ndims//2),1),np.arange(ndims//2)] = 10 # fill half the dims with data
alpha = 2. # Dirichlet prior hyperparameter
beta = 160. # MH proposal distribution parameter, set so acceptance rate is about 0.24 with ndims=10
mhsamples, auxsamples = map(np.array,
sampling.load_or_run_samples(nruns,nsamples,alpha,beta,data))
# get Rhatps
aux_R = tests.get_Rhat(auxsamples,ncomputepoints=ncomputepoints)
mh_R = tests.get_Rhat(mhsamples,ncomputepoints=ncomputepoints)
### plot without time scaling
plt.figure()
# plt.subplot(2,1,1)
plt.plot(tests.chunk_indices(nsamples,ncomputepoints),aux_R,'bx-',label='Aux. Var. Sampler')
plt.plot(tests.chunk_indices(nsamples,ncomputepoints),mh_R,'gx-',label='MH Sampler')
plt.ylim(0,1.1*mh_R.max())
plt.xlim(0,1000)
plt.xlabel('sample index')
plt.legend()
plt.title('MH and Aux. Var. Samplers MSPRF vs Sample Indices')
# plt.subplot(2,1,2)
# plt.plot(tests.chunk_indices(nsamples,ncomputepoints),aux_R,'bx-')
# plt.ylim(0,1.1*aux_R.max())
# plt.xlim(0,closeindex)
# plt.xlabel('sample index')
# plt.title('Aux. Var. Sampler MSPRF vs Sample Indices')
save('../writeup/figures/MSPRF_sampleindexscaling_%dD.pdf' % ndims)
### plot with time scaling
plt.figure()
# compute time per sample
aux_timing = timing.get_auxvar_timing(data=data,alpha=alpha)
mh_timing = timing.get_mh_timing(data=data,beta=beta,alpha=alpha)
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints))*aux_timing,
aux_R,'bx-',label='Aux. Var. Sampler')
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints))*mh_timing,
mh_R,'gx-',label='MH Sampler')
plt.ylim(0,1.1*mh_R.max())
plt.xlim(0,mh_timing*nsamples)
plt.xlabel('seconds')
plt.legend()
plt.title('MH and Aux. Var. Sampler MSPRF vs Computation Time')
save('../writeup/figures/MSPRF_timescaling_%dD.pdf' % ndims)
allfigfuncs.append(Rhatp)
def autocorrelation(nsamples=1000,nruns=50,ndims=10):
# get samples
data = np.zeros((ndims,ndims))
data[np.roll(np.arange(ndims//2),1),np.arange(ndims//2)] = 10 # fill half the dims with data
alpha = 2. # Dirichlet prior hyperparameter
beta = 160. # MH proposal distribution parameter, set so acceptance rate is about 0.24 with ndims=10
mhsamples, auxsamples = map(np.array,
sampling.load_or_run_samples(nruns,nsamples,alpha,beta,data))
# compute autocorrelations
aux_corrs = tests.get_autocorr(auxsamples)
mh_corrs = tests.get_autocorr(mhsamples)
# plot
for component, ordinalname in zip([0,1],['first','second']):
plt.figure()
for corrs, samplername, color in zip([aux_corrs, mh_corrs],['Aux. Var.','MH'],['b','g']):
plt.plot(corrs.mean(0)[:,component],color+'-',label='%s Sampler' % samplername)
plt.plot(scoreatpercentile(corrs[...,component],per=10,axis=0),color+'--')
plt.plot(scoreatpercentile(corrs[...,component],per=90,axis=0),color+'--')
plt.legend()
plt.xlabel('lag')
plt.xlim(0,np.where(mh_corrs.mean(0)[:,component] < 0.01)[0][0])
plt.title('%s Component Autocorrelations' % ordinalname.capitalize())
save('../writeup/figures/autocorrelations_%dD_%s.pdf' % (ndims,ordinalname))
allfigfuncs.append(autocorrelation)
def statistic_convergence(nsamples=5000,ncomputepoints=50,nruns=50,ndims=10):
# get samples
data = np.zeros((ndims,ndims))
data[np.roll(np.arange(ndims//2),1),np.arange(ndims//2)] = 10 # fill half the dims with data
alpha = 2. # Dirichlet prior hyperparameter
beta = 160. # MH proposal distribution parameter, set so acceptance rate is about 0.24 with ndims=10
mhsamples, auxsamples = map(np.array,
sampling.load_or_run_samples(nruns,nsamples,alpha,beta,data))
# compute statistics
(mhmeans, mhvars), (mh_truemean, mh_truevar), (mh_mean_ds, mh_var_ds) = \
tests.get_statistic_convergence(mhsamples,ncomputepoints)
(auxmeans, auxvars), (aux_truemean, aux_truevar), (aux_mean_ds, aux_var_ds) = \
tests.get_statistic_convergence(auxsamples,ncomputepoints)
# check that the estimated "true" statistics agree
assert ((mh_truemean - aux_truemean)**2).sum() < 1e-5 \
and ((mh_truevar - aux_truevar)**2).sum() < 1e-5
# get time scaling
aux_timing = timing.get_auxvar_timing(data=data,alpha=alpha)
mh_timing = timing.get_mh_timing(data=data,beta=beta,alpha=alpha)
# plot
for samplerds, statisticname in zip(((aux_mean_ds,mh_mean_ds),(aux_var_ds,mh_var_ds)),('mean','variance')):
# sample index scaling
plt.figure()
for ds, samplername, color in zip(samplerds, ['Aux. Var.','MH'],['b','g']):
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints)),
ds.mean(0),color+'-',label='%s Sampler' % samplername)
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints)),
scoreatpercentile(ds,per=10,axis=0),color+'--')
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints)),
scoreatpercentile(ds,per=90,axis=0),color+'--')
plt.legend()
plt.xlabel('sample index')
plt.title('%s Convergence' % statisticname.capitalize())
save('../writeup/figures/statisticconvergence_%dD_%s.pdf' % (ndims,statisticname))
# time scaling
plt.figure()
for ds, samplername, color, timescaling in zip(samplerds, ['Aux. Var.','MH'],['b','g'],
(aux_timing,mh_timing)):
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints))*timescaling,
ds.mean(0),color+'-',label='%s Sampler' % samplername)
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints))*timescaling,
scoreatpercentile(ds,per=10,axis=0),color+'--')
plt.plot(np.array(tests.chunk_indices(nsamples,ncomputepoints))*timescaling,
scoreatpercentile(ds,per=90,axis=0),color+'--')
plt.legend()
plt.xlabel('seconds')
plt.title('%s Convergence' % statisticname.capitalize())
save('../writeup/figures/statisticconvergence_timescaling_%dD_%s.pdf' % (ndims,statisticname))
allfigfuncs.append(statistic_convergence)
###############
# Utilities #
###############
import os
def save(pathstr):
filepath = os.path.abspath(pathstr)
if SAVING:
if (not os.path.isfile(pathstr)) or raw_input('save over %s? [y/N] ' % filepath).lower() == 'y':
plt.savefig(filepath)
print 'saved %s' % filepath
return
print 'not saved'
def scoreatpercentile(data,per,axis):
'''
like the function in scipy.stats but with an axis argument, and works on
arrays.
'''
a = np.sort(data,axis=axis)
idx = per/100. * (data.shape[axis]-1)
if (idx % 1 == 0):
return a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]]
else:
lowerweight = 1-(idx % 1)
upperweight = (idx % 1)
idx = int(np.floor(idx))
return lowerweight * a[[slice(None) if ii != axis else idx for ii in range(a.ndim)]] \
+ upperweight * a[[slice(None) if ii != axis else idx+1 for ii in range(a.ndim)]]
##########################
# Generate All Figures #
##########################
def main():
for f in allfigfuncs:
f()
plt.show()
if __name__ == '__main__':
main()
| 9,612 | 0 | 160 |
056005ab7f53638037cc2312b0332cac8f1847ec | 655 | py | Python | nad_logging_service/tests/logger/exception_test.py | KaiPrince/NAD-Logging-Service | f6310f459a1770a2a4664843cfdca5f05506cf66 | [
"MIT"
] | null | null | null | nad_logging_service/tests/logger/exception_test.py | KaiPrince/NAD-Logging-Service | f6310f459a1770a2a4664843cfdca5f05506cf66 | [
"MIT"
] | null | null | null | nad_logging_service/tests/logger/exception_test.py | KaiPrince/NAD-Logging-Service | f6310f459a1770a2a4664843cfdca5f05506cf66 | [
"MIT"
] | null | null | null | """
* Project Name: NAD-Logging-Service
* File Name: exception_test.py
* Programmer: Kai Prince
* Date: Sun, Nov 15, 2020
* Description: This file contains exception tests for the Logger app.
"""
import pytest
from .sample_data import exception_logs as sample_logs
@pytest.mark.parametrize("data", sample_logs)
def test_all_bad_tests_fail(client, data):
""" All these tests should fail """
# Arrange
# Act
response = client.post(
"/logger/log",
content_type="application/json",
json=data,
headers={"x-access-token": data["authToken"]},
)
# Assert
assert response.status_code != 200
| 21.129032 | 70 | 0.665649 | """
* Project Name: NAD-Logging-Service
* File Name: exception_test.py
* Programmer: Kai Prince
* Date: Sun, Nov 15, 2020
* Description: This file contains exception tests for the Logger app.
"""
import pytest
from .sample_data import exception_logs as sample_logs
@pytest.mark.parametrize("data", sample_logs)
def test_all_bad_tests_fail(client, data):
""" All these tests should fail """
# Arrange
# Act
response = client.post(
"/logger/log",
content_type="application/json",
json=data,
headers={"x-access-token": data["authToken"]},
)
# Assert
assert response.status_code != 200
| 0 | 0 | 0 |
16831f50a27decf1bee87652dafc18da6a10e031 | 1,604 | py | Python | zerver/lib/url_preview/oembed.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-03-19T00:52:48.000Z | 2020-03-19T00:52:48.000Z | zerver/lib/url_preview/oembed.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | null | null | null | zerver/lib/url_preview/oembed.py | pranayshahxyz/zulip | 3da483487af79fde9dce2d21124dfa39b94936a5 | [
"Apache-2.0"
] | 1 | 2020-07-06T11:43:28.000Z | 2020-07-06T11:43:28.000Z | from typing import Optional, Dict, Any
from pyoembed import oEmbed, PyOembedException
import json
| 32.734694 | 78 | 0.595387 | from typing import Optional, Dict, Any
from pyoembed import oEmbed, PyOembedException
import json
def get_oembed_data(url: str,
maxwidth: Optional[int]=640,
maxheight: Optional[int]=480) -> Optional[Dict[str, Any]]:
try:
data = oEmbed(url, maxwidth=maxwidth, maxheight=maxheight)
except (PyOembedException, json.decoder.JSONDecodeError):
return None
oembed_resource_type = data.get('type', '')
image = data.get('url', data.get('image'))
thumbnail = data.get('thumbnail_url')
html = data.pop('html', '')
if oembed_resource_type == 'photo' and image:
return dict(
oembed=True,
image=image,
type=oembed_resource_type,
title=data.get('title'),
description=data.get('description'),
)
if oembed_resource_type == 'video' and html and thumbnail:
return dict(
oembed=True,
image=thumbnail,
type=oembed_resource_type,
html=strip_cdata(html),
title=data.get('title'),
description=data.get('description'),
)
# Otherwise, start with just the embed type.
return dict(
type=oembed_resource_type,
title=data.get('title'),
description=data.get('description'),
)
def strip_cdata(html: str) -> str:
# Work around a bug in SoundCloud's XML generation:
# <html><![CDATA[<iframe ...></iframe>]]></html>
if html.startswith('<![CDATA[') and html.endswith(']]>'):
html = html[9:-3]
return html
| 1,460 | 0 | 46 |
e557939ae3675417623f858389f1ee28f2bb3c74 | 9,585 | py | Python | common/crypto_utils/avalon_crypto_utils/crypto_utility.py | karthikamurthy/avalon | 5aca5d6236f18ecf8c44ffe7ff9ada3640c1d6e2 | [
"Apache-2.0"
] | null | null | null | common/crypto_utils/avalon_crypto_utils/crypto_utility.py | karthikamurthy/avalon | 5aca5d6236f18ecf8c44ffe7ff9ada3640c1d6e2 | [
"Apache-2.0"
] | 1 | 2020-06-12T11:55:38.000Z | 2020-06-12T12:35:25.000Z | common/crypto_utils/avalon_crypto_utils/crypto_utility.py | pankajgoyal2/trusted-compute-framework | c060755995864f05516206e98c46e00e3826e425 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import utility.hex_utils as hex_utils
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP
from Cryptodome.Cipher import AES
from Cryptodome.Random import get_random_bytes
from Cryptodome.Hash import SHA256
from ecdsa import SigningKey, SECP256k1
import logging
# 96 bits of randomness is recommended to prevent birthday attacks
IV_SIZE = 12
# Key size for authenticated encryption is 256 bits and tag size is 128 bits
KEY_SIZE = 32
TAG_SIZE = 16
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def generate_signing_keys():
"""
Function to generate private key object
"""
return SigningKey.generate(curve=SECP256k1)
# -----------------------------------------------------------------------------
def get_verifying_key(private_key):
"""
Function to return serialized verifying key from the private key
"""
return private_key.get_verifying_key().to_pem().decode('ascii')
# -----------------------------------------------------------------
def generate_iv():
"""
Function to generate random initialization vector
"""
return get_random_bytes(IV_SIZE)
# -----------------------------------------------------------------
def generate_encrypted_key(key, encryption_key):
"""
Function to generate session key for the client
Parameters:
- encryption_key is a one-time encryption used to encrypt the passed key
- key that needs to be encrypted
"""
pub_enc_key = RSA.importKey(encryption_key)
# RSA encryption protocol according to PKCS#1 OAEP
cipher = PKCS1_OAEP.new(pub_enc_key)
return cipher.encrypt(key)
# -----------------------------------------------------------------
def generate_key():
"""
Function to generate symmetric key
"""
return get_random_bytes(KEY_SIZE)
# -----------------------------------------------------------------
def compute_data_hash(data):
'''
Computes SHA-256 hash of data
'''
data_hash = compute_message_hash(data.encode("UTF-8"))
return data_hash
# -----------------------------------------------------------------
def encrypt_data(data, encryption_key, iv=None):
"""
Function to encrypt data based on encryption key and iv
Parameters:
- data is each item in inData or outData part of workorder request
as per Trusted Compute EEA API 6.1.7 Work Order Data Formats
- encryption_key is the key used to encrypt the data
- iv is an initialization vector if required by the data encryption
algorithm.
The default is all zeros.iv must be a unique random number for every
encryption operation.
"""
# Generate a random iv
if iv is None:
iv = get_random_bytes(IV_SIZE)
generate_iv = True
iv_length = IV_SIZE
else:
generate_iv = False
iv_length = len(iv)
cipher = AES.new(encryption_key, AES.MODE_GCM, iv)
ciphered_data, tag = cipher.encrypt_and_digest(bytes(data))
if generate_iv:
# if iv passed by user is None, random iv generated
# above is prepended in encrypted data
# iv + Cipher + Tag
result = iv + ciphered_data + tag
else:
# Cipher + Tag
result = ciphered_data + tag
return result
# -----------------------------------------------------------------
def decrypt_data(encryption_key, data, iv=None):
"""
Function to decrypt the outData in the result
Parameters:
- encryption_key is the key used to decrypt the encrypted data of the
response.
- iv is an initialization vector if required by the data encryption
algorithm.
The default is all zeros.
- data is the parameter data in outData part of workorder request as
per Trusted Compute EEA API 6.1.7 Work Order Data Formats.
Returns decrypted data as a string
"""
if not data:
logger.debug("Outdata is empty, nothing to decrypt")
return data
# if iv is None the it's assumed that 12 bytes iv is
# prepended in encrypted data
data_byte = base64_to_byte_array(data)
if iv is None:
iv_length = IV_SIZE
iv = data_byte[:iv_length]
data_contains_iv = True
else:
iv_length = len(iv)
data_contains_iv = False
cipher = AES.new(encryption_key, AES.MODE_GCM, iv)
# Split data into iv, tag and ciphered data
if data_contains_iv:
ciphertext_len = len(data_byte) - iv_length - TAG_SIZE
ciphered_data = data_byte[iv_length: iv_length + ciphertext_len]
tag = data_byte[-TAG_SIZE:]
else:
ciphertext_len = len(data_byte) - TAG_SIZE
ciphered_data = data_byte[: ciphertext_len]
tag = data_byte[-TAG_SIZE:]
result = cipher.decrypt_and_verify(ciphered_data, tag).decode("utf-8")
logger.info("Decryption result at client - %s", result)
return result
# -----------------------------------------------------------------------------
def decrypted_response(input_json, session_key, session_iv, data_key=None,
data_iv=None):
"""
Function iterate through the out data items and decrypt the data using
encryptedDataEncryptionKey and returns json object.
Parameters:
- input_json is a dictionary object containing the work order response
payload
as per Trusted Compute EEA API 6.1.2
- session_key is the key used to decrypt the encrypted data of the
response.
- session_iv is an initialization vector corresponding to session_key.
- data_key is a one time key generated by participant used to encrypt
work order indata
- data_iv is an initialization vector used along with data_key.
Default is all zeros.
returns out data json object in response after decrypting output data
"""
i = 0
do_decrypt = True
data_objects = input_json['outData']
for item in data_objects:
data = item['data'].encode('UTF-8')
iv = item['iv'].encode('UTF-8')
e_key = item['encryptedDataEncryptionKey'].encode('UTF-8')
if not e_key or (e_key == "null".encode('UTF-8')):
data_encryption_key_byte = session_key
iv = session_iv
elif e_key == "-".encode('UTF-8'):
do_decrypt = False
else:
data_encryption_key_byte = data_key
iv = data_iv
if not do_decrypt:
input_json['outData'][i]['data'] = data
logger.info(
"Work order response data not encrypted, data in plain - %s",
base64.b64decode(data).decode('UTF-8'))
else:
logger.debug("encrypted_key: %s", data_encryption_key_byte)
# Decrypt output data
data_in_plain = decrypt_data(
data_encryption_key_byte, item['data'], iv)
input_json['outData'][i]['data'] = data_in_plain
i = i + 1
return input_json['outData']
# -----------------------------------------------------------------------------
def verify_data_hash(msg, data_hash):
'''
Function to verify data hash
msg - Input text
data_hash - hash of the data in hex format
'''
verify_success = True
msg_hash = compute_data_hash(msg)
# Convert both hash hex string values to upper case
msg_hash_hex = hex_utils.byte_array_to_hex_str(msg_hash).upper()
data_hash = data_hash.upper()
if msg_hash_hex == data_hash:
logger.info("Computed hash of message matched with data hash")
else:
logger.error("Computed hash of message does not match with data hash")
verify_success = False
return verify_success
# -----------------------------------------------------------------------------
def strip_begin_end_public_key(key):
"""
Strips off newline chars, BEGIN PUBLIC KEY and END PUBLIC KEY.
"""
return key.replace("\n", "")\
.replace("-----BEGIN PUBLIC KEY-----", "").replace(
"-----END PUBLIC KEY-----", "")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
| 34.981752 | 79 | 0.592175 | # Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import utility.hex_utils as hex_utils
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP
from Cryptodome.Cipher import AES
from Cryptodome.Random import get_random_bytes
from Cryptodome.Hash import SHA256
from ecdsa import SigningKey, SECP256k1
import logging
# 96 bits of randomness is recommended to prevent birthday attacks
IV_SIZE = 12
# Key size for authenticated encryption is 256 bits and tag size is 128 bits
KEY_SIZE = 32
TAG_SIZE = 16
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def generate_signing_keys():
"""
Function to generate private key object
"""
return SigningKey.generate(curve=SECP256k1)
# -----------------------------------------------------------------------------
def get_verifying_key(private_key):
"""
Function to return serialized verifying key from the private key
"""
return private_key.get_verifying_key().to_pem().decode('ascii')
# -----------------------------------------------------------------
def generate_iv():
"""
Function to generate random initialization vector
"""
return get_random_bytes(IV_SIZE)
# -----------------------------------------------------------------
def generate_encrypted_key(key, encryption_key):
"""
Function to generate session key for the client
Parameters:
- encryption_key is a one-time encryption used to encrypt the passed key
- key that needs to be encrypted
"""
pub_enc_key = RSA.importKey(encryption_key)
# RSA encryption protocol according to PKCS#1 OAEP
cipher = PKCS1_OAEP.new(pub_enc_key)
return cipher.encrypt(key)
# -----------------------------------------------------------------
def generate_key():
"""
Function to generate symmetric key
"""
return get_random_bytes(KEY_SIZE)
# -----------------------------------------------------------------
def compute_data_hash(data):
'''
Computes SHA-256 hash of data
'''
data_hash = compute_message_hash(data.encode("UTF-8"))
return data_hash
# -----------------------------------------------------------------
def encrypt_data(data, encryption_key, iv=None):
"""
Function to encrypt data based on encryption key and iv
Parameters:
- data is each item in inData or outData part of workorder request
as per Trusted Compute EEA API 6.1.7 Work Order Data Formats
- encryption_key is the key used to encrypt the data
- iv is an initialization vector if required by the data encryption
algorithm.
The default is all zeros.iv must be a unique random number for every
encryption operation.
"""
# Generate a random iv
if iv is None:
iv = get_random_bytes(IV_SIZE)
generate_iv = True
iv_length = IV_SIZE
else:
generate_iv = False
iv_length = len(iv)
cipher = AES.new(encryption_key, AES.MODE_GCM, iv)
ciphered_data, tag = cipher.encrypt_and_digest(bytes(data))
if generate_iv:
# if iv passed by user is None, random iv generated
# above is prepended in encrypted data
# iv + Cipher + Tag
result = iv + ciphered_data + tag
else:
# Cipher + Tag
result = ciphered_data + tag
return result
# -----------------------------------------------------------------
def decrypt_data(encryption_key, data, iv=None):
"""
Function to decrypt the outData in the result
Parameters:
- encryption_key is the key used to decrypt the encrypted data of the
response.
- iv is an initialization vector if required by the data encryption
algorithm.
The default is all zeros.
- data is the parameter data in outData part of workorder request as
per Trusted Compute EEA API 6.1.7 Work Order Data Formats.
Returns decrypted data as a string
"""
if not data:
logger.debug("Outdata is empty, nothing to decrypt")
return data
# if iv is None the it's assumed that 12 bytes iv is
# prepended in encrypted data
data_byte = base64_to_byte_array(data)
if iv is None:
iv_length = IV_SIZE
iv = data_byte[:iv_length]
data_contains_iv = True
else:
iv_length = len(iv)
data_contains_iv = False
cipher = AES.new(encryption_key, AES.MODE_GCM, iv)
# Split data into iv, tag and ciphered data
if data_contains_iv:
ciphertext_len = len(data_byte) - iv_length - TAG_SIZE
ciphered_data = data_byte[iv_length: iv_length + ciphertext_len]
tag = data_byte[-TAG_SIZE:]
else:
ciphertext_len = len(data_byte) - TAG_SIZE
ciphered_data = data_byte[: ciphertext_len]
tag = data_byte[-TAG_SIZE:]
result = cipher.decrypt_and_verify(ciphered_data, tag).decode("utf-8")
logger.info("Decryption result at client - %s", result)
return result
# -----------------------------------------------------------------------------
def decrypted_response(input_json, session_key, session_iv, data_key=None,
data_iv=None):
"""
Function iterate through the out data items and decrypt the data using
encryptedDataEncryptionKey and returns json object.
Parameters:
- input_json is a dictionary object containing the work order response
payload
as per Trusted Compute EEA API 6.1.2
- session_key is the key used to decrypt the encrypted data of the
response.
- session_iv is an initialization vector corresponding to session_key.
- data_key is a one time key generated by participant used to encrypt
work order indata
- data_iv is an initialization vector used along with data_key.
Default is all zeros.
returns out data json object in response after decrypting output data
"""
i = 0
do_decrypt = True
data_objects = input_json['outData']
for item in data_objects:
data = item['data'].encode('UTF-8')
iv = item['iv'].encode('UTF-8')
e_key = item['encryptedDataEncryptionKey'].encode('UTF-8')
if not e_key or (e_key == "null".encode('UTF-8')):
data_encryption_key_byte = session_key
iv = session_iv
elif e_key == "-".encode('UTF-8'):
do_decrypt = False
else:
data_encryption_key_byte = data_key
iv = data_iv
if not do_decrypt:
input_json['outData'][i]['data'] = data
logger.info(
"Work order response data not encrypted, data in plain - %s",
base64.b64decode(data).decode('UTF-8'))
else:
logger.debug("encrypted_key: %s", data_encryption_key_byte)
# Decrypt output data
data_in_plain = decrypt_data(
data_encryption_key_byte, item['data'], iv)
input_json['outData'][i]['data'] = data_in_plain
i = i + 1
return input_json['outData']
# -----------------------------------------------------------------------------
def verify_data_hash(msg, data_hash):
'''
Function to verify data hash
msg - Input text
data_hash - hash of the data in hex format
'''
verify_success = True
msg_hash = compute_data_hash(msg)
# Convert both hash hex string values to upper case
msg_hash_hex = hex_utils.byte_array_to_hex_str(msg_hash).upper()
data_hash = data_hash.upper()
if msg_hash_hex == data_hash:
logger.info("Computed hash of message matched with data hash")
else:
logger.error("Computed hash of message does not match with data hash")
verify_success = False
return verify_success
# -----------------------------------------------------------------------------
def strip_begin_end_public_key(key):
"""
Strips off newline chars, BEGIN PUBLIC KEY and END PUBLIC KEY.
"""
return key.replace("\n", "")\
.replace("-----BEGIN PUBLIC KEY-----", "").replace(
"-----END PUBLIC KEY-----", "")
# -----------------------------------------------------------------------------
def byte_array_to_hex(byte_array):
hex_value = hex_utils.byte_array_to_hex_str(byte_array)
return hex_value.upper()
# -----------------------------------------------------------------------------
def compute_message_hash(message):
hash_obj = SHA256.new()
hash_obj.update(message)
return list(hash_obj.digest())
# -----------------------------------------------------------------------------
def base64_to_byte_array(b64_str):
b64_arr = bytearray(b64_str, 'utf-8')
b_arr = base64.b64decode(b64_arr)
return b_arr
# -----------------------------------------------------------------------------
def byte_array_to_base64(byte_array):
b_arr = bytearray(byte_array)
b64_arr = base64.b64encode(b_arr)
b64_str = str(b64_arr, 'utf-8')
return b64_str
| 460 | 0 | 88 |
594c3766a3d4935123686d1e634bf726cb601e9a | 1,020 | py | Python | mkt/zadmin/models.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/zadmin/models.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | mkt/zadmin/models.py | oremj/zamboni | a751dc6d22f7af947da327b0a091cbab0a999f49 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from addons.models import Category
import amo
import mkt
from mkt.webapps.models import Webapp
| 30.909091 | 74 | 0.681373 | from django.db import models
from addons.models import Category
import amo
import mkt
from mkt.webapps.models import Webapp
class FeaturedApp(amo.models.ModelBase):
app = models.ForeignKey(Webapp, null=False)
category = models.ForeignKey(Category, null=True)
is_sponsor = models.BooleanField(default=False)
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
class Meta:
db_table = 'zadmin_featuredapp'
class FeaturedAppRegion(amo.models.ModelBase):
featured_app = models.ForeignKey(FeaturedApp, null=False,
related_name='regions')
region = models.PositiveIntegerField(default=mkt.regions.WORLDWIDE.id,
db_index=True)
class FeaturedAppCarrier(amo.models.ModelBase):
featured_app = models.ForeignKey(FeaturedApp, null=False,
related_name='carriers')
carrier = models.CharField(max_length=255, db_index=True, null=False)
| 0 | 821 | 69 |
753ec3ebf2072f47ab0e4ff3a93374bb9a709c8e | 372 | py | Python | QR_code_generator/qrcode.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | 1 | 2022-03-23T23:04:02.000Z | 2022-03-23T23:04:02.000Z | QR_code_generator/qrcode.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | null | null | null | QR_code_generator/qrcode.py | Wish1991/Python | 11b407ea1c47f63cb07dbf8cb90df93d2190821f | [
"MIT"
] | 1 | 2022-03-23T23:04:40.000Z | 2022-03-23T23:04:40.000Z | import pyqrcode
import png
from pyqrcode import QRCode
# Text which is to be converted to QR code
print("Enter text to convert")
s = input(": ")
# Name of QR code png file
print("Enter image name to save")
n = input(": ")
# Adding extension as .pnf
d = n + ".png"
# Creating QR code
url = pyqrcode.create(s)
# Saving QR code as a png file
url.show()
url.png(d, scale=6)
| 20.666667 | 42 | 0.69086 | import pyqrcode
import png
from pyqrcode import QRCode
# Text which is to be converted to QR code
print("Enter text to convert")
s = input(": ")
# Name of QR code png file
print("Enter image name to save")
n = input(": ")
# Adding extension as .pnf
d = n + ".png"
# Creating QR code
url = pyqrcode.create(s)
# Saving QR code as a png file
url.show()
url.png(d, scale=6)
| 0 | 0 | 0 |
fa39a00958f800d57a2c9d07e16cfacc1534663c | 413 | py | Python | advanced/decorator.py | djordjijeK/python-essentials | 51dac04ac44090ae01a65c5adb6c57f6f4d15189 | [
"Unlicense"
] | null | null | null | advanced/decorator.py | djordjijeK/python-essentials | 51dac04ac44090ae01a65c5adb6c57f6f4d15189 | [
"Unlicense"
] | null | null | null | advanced/decorator.py | djordjijeK/python-essentials | 51dac04ac44090ae01a65c5adb6c57f6f4d15189 | [
"Unlicense"
] | null | null | null | def log(funct):
"""
Logs the function.
"""
return wrapper
@log # decorator
if __name__ == '__main__':
f = log(multiply) # process and return `multipy` function.
print(f(2,5))
print(add(10, 7))
| 17.208333 | 65 | 0.566586 | def log(funct):
"""
Logs the function.
"""
def wrapper(*args, **kwargs):
print('Calling', funct.__name__)
return funct(*args, **kwargs)
return wrapper
@log # decorator
def add(x, y):
return x + y
def multiply(x, y):
return x*y
if __name__ == '__main__':
f = log(multiply) # process and return `multipy` function.
print(f(2,5))
print(add(10, 7))
| 110 | 0 | 71 |
1506ccd3ee1896c80a197be88f1e591912327e05 | 14,929 | py | Python | lib_pypy/_ctypes/primitive.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | lib_pypy/_ctypes/primitive.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2022-02-22T00:59:49.000Z | 2022-02-22T00:59:49.000Z | lib_pypy/_ctypes/primitive.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2022-03-30T11:42:37.000Z | 2022-03-30T11:42:37.000Z | from _rawffi import alt as _ffi
import _rawffi
import weakref
import sys
SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?"
from _ctypes.basics import (
_CData, _CDataMeta, cdata_from_address, CArgObject, sizeof)
from _ctypes.builtin import ConvMode
from _ctypes.array import Array, byteorder
from _ctypes.pointer import _Pointer, as_ffi_pointer
NULL = NULL()
TP_TO_DEFAULT = {
'c': 0,
'u': 0,
'b': 0,
'B': 0,
'h': 0,
'H': 0,
'i': 0,
'I': 0,
'l': 0,
'L': 0,
'q': 0,
'Q': 0,
'f': 0.0,
'd': 0.0,
'g': 0.0,
'P': None,
# not part of struct
'O': NULL,
'z': None,
'Z': None,
'?': False,
'v': 0,
}
if sys.platform == 'win32':
TP_TO_DEFAULT['X'] = NULL
DEFAULT_VALUE = object()
pyobj_container = GlobalPyobjContainer()
def from_param_char_p(cls, value):
"used by c_char_p and c_wchar_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, (Array, _Pointer)):
from ctypes import c_char, c_byte, c_wchar
if type(value)._type_ in [c_char, c_byte, c_wchar]:
return value
def from_param_void_p(cls, value):
"used by c_void_p subclasses"
from _ctypes.function import CFuncPtr
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, Array):
return value
if isinstance(value, (_Pointer, CFuncPtr)):
return cls.from_address(value._buffer.buffer)
if isinstance(value, int):
return cls(value)
FROM_PARAM_BY_TYPE = {
'z': from_param_char_p,
'Z': from_param_char_p,
'P': from_param_void_p,
}
CTYPES_TO_PEP3118_TABLE = {
'i': {2: 'h', 4: 'i', 8: 'q'},
'I': {2: 'H', 4: 'I', 8: 'Q'},
'l': {4: 'l', 8: 'q'},
'L': {4: 'L', 8: 'Q'},
'?': {1: '?', 2: 'h', 4: 'l', 8: 'q'},
}
| 33.699774 | 99 | 0.523947 | from _rawffi import alt as _ffi
import _rawffi
import weakref
import sys
SIMPLE_TYPE_CHARS = "cbBhHiIlLdfguzZqQPXOv?"
from _ctypes.basics import (
_CData, _CDataMeta, cdata_from_address, CArgObject, sizeof)
from _ctypes.builtin import ConvMode
from _ctypes.array import Array, byteorder
from _ctypes.pointer import _Pointer, as_ffi_pointer
class NULL(object):
pass
NULL = NULL()
TP_TO_DEFAULT = {
'c': 0,
'u': 0,
'b': 0,
'B': 0,
'h': 0,
'H': 0,
'i': 0,
'I': 0,
'l': 0,
'L': 0,
'q': 0,
'Q': 0,
'f': 0.0,
'd': 0.0,
'g': 0.0,
'P': None,
# not part of struct
'O': NULL,
'z': None,
'Z': None,
'?': False,
'v': 0,
}
if sys.platform == 'win32':
TP_TO_DEFAULT['X'] = NULL
DEFAULT_VALUE = object()
class GlobalPyobjContainer(object):
def __init__(self):
self.objs = []
def add(self, obj):
num = len(self.objs)
self.objs.append(weakref.ref(obj))
return num
def get(self, num):
return self.objs[num]()
pyobj_container = GlobalPyobjContainer()
def swap_bytes(value, sizeof, typeof, get_or_set):
def swap_2():
return ((value >> 8) & 0x00FF) | ((value << 8) & 0xFF00)
def swap_4():
return ((value & 0x000000FF) << 24) | \
((value & 0x0000FF00) << 8) | \
((value & 0x00FF0000) >> 8) | \
((value >> 24) & 0xFF)
def swap_8():
return ((value & 0x00000000000000FF) << 56) | \
((value & 0x000000000000FF00) << 40) | \
((value & 0x0000000000FF0000) << 24) | \
((value & 0x00000000FF000000) << 8) | \
((value & 0x000000FF00000000) >> 8) | \
((value & 0x0000FF0000000000) >> 24) | \
((value & 0x00FF000000000000) >> 40) | \
((value >> 56) & 0xFF)
def swap_double_float(typ):
from struct import pack, unpack
if get_or_set == 'set':
if sys.byteorder == 'little':
st = pack(''.join(['>', typ]), value)
else:
st = pack(''.join(['<', typ]), value)
return unpack(typ, st)[0]
else:
packed = pack(typ, value)
if sys.byteorder == 'little':
st = unpack(''.join(['>', typ]), packed)
else:
st = unpack(''.join(['<', typ]), packed)
return st[0]
if typeof in ('c_float', 'c_float_le', 'c_float_be'):
return swap_double_float('f')
elif typeof in ('c_double', 'c_double_le', 'c_double_be'):
return swap_double_float('d')
else:
if sizeof == 2:
return swap_2()
elif sizeof == 4:
return swap_4()
elif sizeof == 8:
return swap_8()
def generic_xxx_p_from_param(cls, value):
if value is None:
return cls(None)
if isinstance(value, (str, bytes)):
return cls(value)
if isinstance(value, _SimpleCData) and \
type(value)._type_ in 'zZP':
return value
return None # eventually raise
def from_param_char_p(cls, value):
"used by c_char_p and c_wchar_p subclasses"
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, (Array, _Pointer)):
from ctypes import c_char, c_byte, c_wchar
if type(value)._type_ in [c_char, c_byte, c_wchar]:
return value
def from_param_void_p(cls, value):
"used by c_void_p subclasses"
from _ctypes.function import CFuncPtr
res = generic_xxx_p_from_param(cls, value)
if res is not None:
return res
if isinstance(value, Array):
return value
if isinstance(value, (_Pointer, CFuncPtr)):
return cls.from_address(value._buffer.buffer)
if isinstance(value, int):
return cls(value)
FROM_PARAM_BY_TYPE = {
'z': from_param_char_p,
'Z': from_param_char_p,
'P': from_param_void_p,
}
CTYPES_TO_PEP3118_TABLE = {
'i': {2: 'h', 4: 'i', 8: 'q'},
'I': {2: 'H', 4: 'I', 8: 'Q'},
'l': {4: 'l', 8: 'q'},
'L': {4: 'L', 8: 'Q'},
'?': {1: '?', 2: 'h', 4: 'l', 8: 'q'},
}
class SimpleType(_CDataMeta):
def __new__(self, name, bases, dct):
try:
tp = dct['_type_']
except KeyError:
for base in bases:
if hasattr(base, '_type_'):
tp = base._type_
break
else:
raise AttributeError("cannot find _type_ attribute")
if tp == 'abstract':
tp = 'i'
if (not isinstance(tp, str) or
not len(tp) == 1 or
tp not in SIMPLE_TYPE_CHARS):
raise ValueError('%s is not a type character' % (tp))
default = TP_TO_DEFAULT[tp]
ffiarray = _rawffi.Array(tp)
result = type.__new__(self, name, bases, dct)
result._ffiargshape_ = tp
result._ffishape_ = tp
result._fficompositesize_ = None
result._ffiarray = ffiarray
if tp in CTYPES_TO_PEP3118_TABLE:
pep_code = CTYPES_TO_PEP3118_TABLE[tp][_rawffi.sizeof(tp)]
else:
pep_code = tp
result._format = byteorder[sys.byteorder] + pep_code
if tp == 'z':
# c_char_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.charp2string(addr)
def _setvalue(self, value):
if isinstance(value, bytes):
#self._objects = value
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.char)
elif tp == 'Z':
# c_wchar_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
return _rawffi.wcharp2unicode(addr)
def _setvalue(self, value):
if isinstance(value, str):
#self._objects = value
array = _rawffi.Array('u')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
result._ffiargtype = _ffi.types.Pointer(_ffi.types.unichar)
elif tp == 'P':
# c_void_p
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
return addr
def _setvalue(self, value):
if isinstance(value, bytes):
array = _rawffi.Array('c')(len(value)+1, value)
self._objects = CArgObject(value, array)
value = array.buffer
elif value is None:
value = 0
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == 'u':
def _setvalue(self, val):
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'c':
def _setvalue(self, val):
if val:
self._buffer[0] = val
def _getvalue(self):
return self._buffer[0]
result.value = property(_getvalue, _setvalue)
elif tp == 'O':
def _setvalue(self, val):
num = pyobj_container.add(val)
self._buffer[0] = num
def _getvalue(self):
return pyobj_container.get(self._buffer[0])
result.value = property(_getvalue, _setvalue)
elif tp == 'X':
from ctypes import WinDLL
# Use WinDLL("oleaut32") instead of windll.oleaut32
# because the latter is a shared (cached) object; and
# other code may set their own restypes. We need out own
# restype here.
oleaut32 = WinDLL("oleaut32")
import ctypes
SysAllocStringLen = oleaut32.SysAllocStringLen
SysStringLen = oleaut32.SysStringLen
SysFreeString = oleaut32.SysFreeString
if ctypes.sizeof(ctypes.c_void_p) == 4:
ptype = ctypes.c_int
else:
ptype = ctypes.c_longlong
SysAllocStringLen.argtypes=[ptype, ctypes.c_uint]
SysAllocStringLen.restype = ptype
SysStringLen.argtypes=[ptype]
SysStringLen.restype = ctypes.c_uint
SysFreeString.argtypes=[ptype]
def _getvalue(self):
addr = self._buffer[0]
if addr == 0:
return None
else:
size = SysStringLen(addr)
return _rawffi.wcharp2rawunicode(addr, size)
def _setvalue(self, value):
if isinstance(value, (str, bytes)):
if isinstance(value, bytes):
value = value.decode(ConvMode.encoding,
ConvMode.errors)
array = _rawffi.Array('u')(len(value)+1, value)
value = SysAllocStringLen(array.buffer, len(value))
elif value is None:
value = 0
if self._buffer[0]:
SysFreeString(self._buffer[0])
self._buffer[0] = value
result.value = property(_getvalue, _setvalue)
elif tp == '?': # regular bool
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
self._buffer[0] = bool(value)
result.value = property(_getvalue, _setvalue)
elif tp == 'v': # VARIANT_BOOL type
def _getvalue(self):
return bool(self._buffer[0])
def _setvalue(self, value):
if value:
self._buffer[0] = -1 # VARIANT_TRUE
else:
self._buffer[0] = 0 # VARIANT_FALSE
result.value = property(_getvalue, _setvalue)
# make pointer-types compatible with the _ffi fast path
if result._is_pointer_like():
def _as_ffi_pointer_(self, ffitype):
return as_ffi_pointer(self, ffitype)
result._as_ffi_pointer_ = _as_ffi_pointer_
if name[-2:] != '_p' and name[-3:] not in ('_le', '_be') \
and name not in ('c_wchar', '_SimpleCData', 'c_longdouble', 'c_bool', 'py_object'):
if sys.byteorder == 'big':
name += '_le'
swapped = self.__new__(self, name, bases, dct)
result.__ctype_le__ = swapped
result.__ctype_be__ = result
swapped.__ctype_be__ = result
swapped.__ctype_le__ = swapped
swapped._format = '<' + pep_code
else:
name += '_be'
swapped = self.__new__(self, name, bases, dct)
result.__ctype_be__ = swapped
result.__ctype_le__ = result
swapped.__ctype_le__ = result
swapped.__ctype_be__ = swapped
swapped._format = '>' + pep_code
from _ctypes import sizeof
def _getval(self):
return swap_bytes(self._buffer[0], sizeof(self), name, 'get')
def _setval(self, value):
d = result()
d.value = value
self._buffer[0] = swap_bytes(d.value, sizeof(self), name, 'set')
swapped.value = property(_getval, _setval)
return result
from_address = cdata_from_address
def from_param(self, value):
if isinstance(value, self):
return value
if self._type_ == 'abstract':
raise TypeError('abstract class')
from_param_f = FROM_PARAM_BY_TYPE.get(self._type_)
if from_param_f:
res = from_param_f(self, value)
if res is not None:
return res
else:
try:
return self(value)
except (TypeError, ValueError):
pass
return super(SimpleType, self).from_param(value)
def _CData_output(self, resbuffer, base=None, index=-1):
output = super(SimpleType, self)._CData_output(resbuffer, base, index)
if self.__bases__[0] is _SimpleCData:
return output.value
return output
def _sizeofinstances(self):
return _rawffi.sizeof(self._type_)
def _alignmentofinstances(self):
return _rawffi.alignment(self._type_)
def _is_pointer_like(self):
return self._type_ in "sPzUZXO"
def _getformat(self):
return self._format
class _SimpleCData(_CData, metaclass=SimpleType):
_type_ = 'abstract'
def __init__(self, value=DEFAULT_VALUE):
if not hasattr(self, '_buffer'):
self._buffer = self._ffiarray(1, autofree=True)
if value is not DEFAULT_VALUE:
self.value = value
_init_no_arg_ = __init__
def _ensure_objects(self):
# No '_objects' is the common case for primitives. Examples
# where there is an _objects is if _type in 'zZP', or if
# self comes from 'from_buffer(buf)'. See module/test_lib_pypy/
# ctypes_test/test_buffers.py: test_from_buffer_keepalive.
return getattr(self, '_objects', None)
def _getvalue(self):
return self._buffer[0]
def _setvalue(self, value):
self._buffer[0] = value
value = property(_getvalue, _setvalue)
del _getvalue, _setvalue
def __ctypes_from_outparam__(self):
meta = type(type(self))
if issubclass(meta, SimpleType) and meta != SimpleType:
return self
return self.value
def __repr__(self):
if type(self).__bases__[0] is _SimpleCData:
return "%s(%r)" % (type(self).__name__, self.value)
else:
return "<%s object at 0x%x>" % (type(self).__name__,
id(self))
def __bool__(self):
return self._buffer[0] not in (0, b'\x00')
| 12,136 | 598 | 218 |
7291ac4c6404121ed688cb756ac95e7bc65b9fde | 7,645 | py | Python | tests/test_cassandra3.py | criteo-forks/testing.cassandra | f25bc20e0679849daae073c41281607e756e4d56 | [
"Apache-2.0"
] | 1 | 2021-02-18T04:58:04.000Z | 2021-02-18T04:58:04.000Z | tests/test_cassandra3.py | criteo-forks/testing.cassandra | f25bc20e0679849daae073c41281607e756e4d56 | [
"Apache-2.0"
] | null | null | null | tests/test_cassandra3.py | criteo-forks/testing.cassandra | f25bc20e0679849daae073c41281607e756e4d56 | [
"Apache-2.0"
] | 1 | 2018-07-10T13:06:46.000Z | 2018-07-10T13:06:46.000Z | # -*- coding: utf-8 -*-
import os
import sys
import signal
import cassandra.cluster as cassandra_cluster
import tempfile
import testing.cassandra3
from mock import patch
from time import sleep
from shutil import rmtree
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
| 34.75 | 114 | 0.650098 | # -*- coding: utf-8 -*-
import os
import sys
import signal
import cassandra.cluster as cassandra_cluster
import tempfile
import testing.cassandra3
from mock import patch
from time import sleep
from shutil import rmtree
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestCassandra(unittest.TestCase):
def setUp(self):
self.cluster = self.session = None
def tearDown(self):
self._shutdown()
def _connect(self, cassandra):
if not self.cluster:
self.cluster = cassandra_cluster.Cluster(
**cassandra.connection_params()
)
if not self.session:
self.session = self.cluster.connect()
return self.session
def _shutdown(self):
if self.session is not None:
self.session.shutdown()
self.session = None
if self.cluster is not None:
self.cluster.shutdown()
self.cluster = None
def test_basic(self):
# start cassandra server
cassandra = testing.cassandra3.Cassandra()
self.assertIsNotNone(cassandra)
self.assertEqual(cassandra.server_list(),
['127.0.0.1:%d' % cassandra.cassandra_yaml['rpc_port']])
# connect to cassandra
session = self._connect(cassandra)
self.assertIsNotNone(session)
# shutting down
pid = cassandra.server_pid
self.assertTrue(cassandra.is_alive())
cassandra.stop()
sleep(1)
self.assertFalse(cassandra.is_alive())
with self.assertRaises(OSError):
os.kill(pid, 0) # process is down
def test_stop(self):
# start cassandra server
cassandra = testing.cassandra3.Cassandra()
self.assertTrue(os.path.exists(cassandra.base_dir))
self.assertTrue(cassandra.is_alive())
# call stop()
cassandra.stop()
self.assertFalse(os.path.exists(cassandra.base_dir))
self.assertFalse(cassandra.is_alive())
# call stop() again
cassandra.stop()
self.assertFalse(os.path.exists(cassandra.base_dir))
self.assertFalse(cassandra.is_alive())
# delete cassandra object after stop()
del cassandra
def test_with_cassandra(self):
with testing.cassandra3.Cassandra() as cassandra:
self.assertIsNotNone(cassandra)
# connect to cassandra
conn = self._connect(cassandra)
self.assertIsNotNone(conn)
self.assertTrue(cassandra.is_alive())
self.assertFalse(cassandra.is_alive())
def test_multiple_cassandra(self):
cassandra1 = testing.cassandra3.Cassandra()
cassandra2 = testing.cassandra3.Cassandra()
self.assertNotEqual(cassandra1.server_pid, cassandra2.server_pid)
self.assertTrue(cassandra1.is_alive())
self.assertTrue(cassandra2.is_alive())
@patch("testing.cassandra3.find_cassandra_home")
def test_cassandra_is_not_found(self, find_cassandra_home):
find_cassandra_home.side_effect = RuntimeError
with self.assertRaises(RuntimeError):
testing.cassandra3.Cassandra()
def test_fork(self):
cassandra = testing.cassandra3.Cassandra()
if os.fork() == 0:
del cassandra
cassandra = None
os.kill(os.getpid(), signal.SIGTERM) # exit tests FORCELY
else:
os.wait()
sleep(1)
self.assertTrue(cassandra.is_alive()) # process is alive (delete mysqld obj in child does not effect)
def test_stop_on_child_process(self):
cassandra = testing.cassandra3.Cassandra()
if os.fork() == 0:
cassandra.stop()
os.kill(cassandra.server_pid, 0) # process is alive (calling stop() is ignored)
os.kill(os.getpid(), signal.SIGTERM) # exit tests FORCELY
else:
os.wait()
sleep(1)
self.assertTrue(cassandra.is_alive()) # process is alive (calling stop() in child is ignored)
def test_copy_data_from(self):
try:
tmpdir = tempfile.mkdtemp()
# create new database
with testing.cassandra3.Cassandra(base_dir=tmpdir) as cassandra:
session = self._connect(cassandra)
session.execute('CREATE TABLE IF NOT EXISTS test.hello (k INT, PRIMARY KEY(k))')
session.execute('INSERT INTO test.hello (k) VALUES(1)')
self._shutdown()
# flushing MemTable (commit log) to SSTable
with testing.cassandra3.Cassandra(base_dir=tmpdir) as cassandra:
pass
# create another database from first one
data_dir = os.path.join(tmpdir, 'data')
with testing.cassandra3.Cassandra(copy_data_from=data_dir) as cassandra:
session = self._connect(cassandra)
results = list(session.execute('SELECT k FROM test.hello'))
self.assertEqual(1, results[0].k)
finally:
rmtree(tmpdir)
def test_skipIfNotInstalled_found(self):
@testing.cassandra3.skipIfNotInstalled
def testcase():
pass
self.assertEqual(False, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(False, hasattr(testcase, '__unittest_skip_why__'))
@patch("testing.cassandra3.find_cassandra_home")
def test_skipIfNotInstalled_notfound(self, find_cassandra_home):
find_cassandra_home.side_effect = RuntimeError
@testing.cassandra3.skipIfNotInstalled
def testcase():
pass
self.assertEqual(True, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(True, hasattr(testcase, '__unittest_skip_why__'))
self.assertEqual(True, testcase.__unittest_skip__)
self.assertEqual("Cassandra not found", testcase.__unittest_skip_why__)
def test_skipIfNotInstalled_with_args_found(self):
cassandra_home = testing.cassandra3.find_cassandra_home()
path = os.path.join(cassandra_home, 'bin', 'cassandra')
@testing.cassandra3.skipIfNotInstalled(path)
def testcase():
pass
self.assertEqual(False, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(False, hasattr(testcase, '__unittest_skip_why__'))
def test_skipIfNotInstalled_with_args_notfound(self):
@testing.cassandra3.skipIfNotInstalled("/path/to/anywhere")
def testcase():
pass
self.assertEqual(True, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(True, hasattr(testcase, '__unittest_skip_why__'))
self.assertEqual(True, testcase.__unittest_skip__)
self.assertEqual("Cassandra not found", testcase.__unittest_skip_why__)
def test_skipIfNotFound_found(self):
@testing.cassandra3.skipIfNotFound
def testcase():
pass
self.assertEqual(False, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(False, hasattr(testcase, '__unittest_skip_why__'))
@patch("testing.cassandra3.find_cassandra_home")
def test_skipIfNotFound_notfound(self, find_cassandra_home):
find_cassandra_home.side_effect = RuntimeError
@testing.cassandra3.skipIfNotFound
def testcase():
pass
self.assertEqual(True, hasattr(testcase, '__unittest_skip__'))
self.assertEqual(True, hasattr(testcase, '__unittest_skip_why__'))
self.assertEqual(True, testcase.__unittest_skip__)
self.assertEqual("Cassandra not found", testcase.__unittest_skip_why__)
| 6,648 | 663 | 23 |
f32d576af2373b821cd21cbecd7889f472c37d1f | 8,254 | py | Python | ding/reward_model/rnd_reward_model.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | ding/reward_model/rnd_reward_model.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | ding/reward_model/rnd_reward_model.py | jayyoung0802/DI-engine | efbb35ddaf184d1009291e6842fbbae09f193492 | [
"Apache-2.0"
] | null | null | null | from typing import Union, Tuple, List, Dict, Any
from easydict import EasyDict
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ding.utils import SequenceType, REWARD_MODEL_REGISTRY
from ding.model import FCEncoder, ConvEncoder
from .base_reward_model import BaseRewardModel
from ding.utils import RunningMeanStd
from ding.torch_utils.data_helper import to_tensor
import copy
@REWARD_MODEL_REGISTRY.register('rnd')
| 46.370787 | 118 | 0.663194 | from typing import Union, Tuple, List, Dict, Any
from easydict import EasyDict
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ding.utils import SequenceType, REWARD_MODEL_REGISTRY
from ding.model import FCEncoder, ConvEncoder
from .base_reward_model import BaseRewardModel
from ding.utils import RunningMeanStd
from ding.torch_utils.data_helper import to_tensor
import copy
def collect_states(iterator):
res = []
for item in iterator:
state = item['obs']
res.append(state)
return res
class RndNetwork(nn.Module):
def __init__(self, obs_shape: Union[int, SequenceType], hidden_size_list: SequenceType) -> None:
super(RndNetwork, self).__init__()
if isinstance(obs_shape, int) or len(obs_shape) == 1:
self.target = FCEncoder(obs_shape, hidden_size_list)
self.predictor = FCEncoder(obs_shape, hidden_size_list)
elif len(obs_shape) == 3:
self.target = ConvEncoder(obs_shape, hidden_size_list)
self.predictor = ConvEncoder(obs_shape, hidden_size_list)
else:
raise KeyError(
"not support obs_shape for pre-defined encoder: {}, please customize your own RND model".
format(obs_shape)
)
for param in self.target.parameters():
param.requires_grad = False
def forward(self, obs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
predict_feature = self.predictor(obs)
with torch.no_grad():
target_feature = self.target(obs)
return predict_feature, target_feature
@REWARD_MODEL_REGISTRY.register('rnd')
class RndRewardModel(BaseRewardModel):
config = dict(
type='rnd',
intrinsic_reward_type='add',
learning_rate=1e-3,
batch_size=64,
hidden_size_list=[64, 64, 128],
update_per_collect=100,
obs_norm=True,
obs_norm_clamp_min=-1,
obs_norm_clamp_max=1,
intrinsic_reward_weight=None,
# means the relative weight of RND intrinsic_reward.
# If intrinsic_reward_weight=None, we will automatically set it based on
# the absolute value of the difference between max and min extrinsic reward in the sampled mini-batch
# please refer to estimate() method for details.
intrinsic_reward_rescale=0.01,
# means the rescale value of RND intrinsic_reward only used when intrinsic_reward_weight is None
)
def __init__(self, config: EasyDict, device: str, tb_logger: 'SummaryWriter') -> None: # noqa
super(RndRewardModel, self).__init__()
self.cfg = config
self.intrinsic_reward_rescale = self.cfg.intrinsic_reward_rescale
assert device == "cpu" or device.startswith("cuda")
self.device = device
self.tb_logger = tb_logger
self.reward_model = RndNetwork(config.obs_shape, config.hidden_size_list)
self.reward_model.to(self.device)
self.intrinsic_reward_type = config.intrinsic_reward_type
assert self.intrinsic_reward_type in ['add', 'new', 'assign']
self.train_obs = []
self.opt = optim.Adam(self.reward_model.predictor.parameters(), config.learning_rate)
self._running_mean_std_rnd_reward = RunningMeanStd(epsilon=1e-4)
self.estimate_cnt_rnd = 0
self._running_mean_std_rnd_obs = RunningMeanStd(epsilon=1e-4)
def _train(self) -> None:
train_data: list = random.sample(self.train_obs, self.cfg.batch_size)
train_data: torch.Tensor = torch.stack(train_data).to(self.device)
if self.cfg.obs_norm:
# Note: observation normalization: transform obs to mean 0, std 1
self._running_mean_std_rnd_obs.update(train_data.cpu().numpy())
train_data = (train_data - to_tensor(self._running_mean_std_rnd_obs.mean).to(self.device)) / to_tensor(
self._running_mean_std_rnd_obs.std
).to(self.device)
train_data = torch.clamp(train_data, min=self.cfg.obs_norm_clamp_min, max=self.cfg.obs_norm_clamp_max)
predict_feature, target_feature = self.reward_model(train_data)
loss = F.mse_loss(predict_feature, target_feature.detach())
self.opt.zero_grad()
loss.backward()
self.opt.step()
def train(self) -> None:
for _ in range(self.cfg.update_per_collect):
self._train()
def estimate(self, data: list) -> List[Dict]:
"""
Rewrite the reward key in each row of the data.
"""
# NOTE: deepcopy reward part of data is very important,
# otherwise the reward of data in the replay buffer will be incorrectly modified.
train_data_augmented = self.reward_deepcopy(data)
obs = collect_states(train_data_augmented)
obs = torch.stack(obs).to(self.device)
if self.cfg.obs_norm:
# Note: observation normalization: transform obs to mean 0, std 1
obs = (obs - to_tensor(self._running_mean_std_rnd_obs.mean
).to(self.device)) / to_tensor(self._running_mean_std_rnd_obs.std).to(self.device)
obs = torch.clamp(obs, min=self.cfg.obs_norm_clamp_min, max=self.cfg.obs_norm_clamp_max)
with torch.no_grad():
predict_feature, target_feature = self.reward_model(obs)
mse = F.mse_loss(predict_feature, target_feature, reduction='none').mean(dim=1)
self._running_mean_std_rnd_reward.update(mse.cpu().numpy())
# Note: according to the min-max normalization, transform rnd reward to [0,1]
rnd_reward = (mse - mse.min()) / (mse.max() - mse.min() + 1e-11)
self.estimate_cnt_rnd += 1
self.tb_logger.add_scalar('rnd_reward/rnd_reward_max', rnd_reward.max(), self.estimate_cnt_rnd)
self.tb_logger.add_scalar('rnd_reward/rnd_reward_mean', rnd_reward.mean(), self.estimate_cnt_rnd)
self.tb_logger.add_scalar('rnd_reward/rnd_reward_min', rnd_reward.min(), self.estimate_cnt_rnd)
rnd_reward = rnd_reward.to(train_data_augmented[0]['reward'].device)
rnd_reward = torch.chunk(rnd_reward, rnd_reward.shape[0], dim=0)
"""NOTE:
Following normalization approach to extrinsic reward seems be not reasonable,
because this approach compresses the extrinsic reward magnitude, resulting in less informative reward signals.
"""
# rewards = torch.stack([data[i]['reward'] for i in range(len(data))])
# rewards = (rewards - torch.min(rewards)) / (torch.max(rewards) - torch.min(rewards))
# TODO(pu): how to set intrinsic_reward_rescale automatically?
if self.cfg.intrinsic_reward_weight is None:
"""Note: the following way of setting self.cfg.intrinsic_reward_weight is only suitable for the dense
reward env like lunarlander, not suitable for the dense reward env.
In sparse reward env, e.g. minigrid, if the agent reaches the goal, it obtain reward ~1, otherwise 0.
Thus, in sparse reward env, it's reasonable to set the intrinsic_reward_weight approximately equal to
the inverse of max_episode_steps.
"""
self.cfg.intrinsic_reward_weight = self.intrinsic_reward_rescale * max(
1,
abs(
max([train_data_augmented[i]['reward'] for i in range(len(train_data_augmented))]) -
min([train_data_augmented[i]['reward'] for i in range(len(train_data_augmented))])
)
)
for item, rnd_rew in zip(train_data_augmented, rnd_reward):
if self.intrinsic_reward_type == 'add':
item['reward'] = item['reward'] + rnd_rew * self.cfg.intrinsic_reward_weight
elif self.intrinsic_reward_type == 'new':
item['intrinsic_reward'] = rnd_rew
elif self.intrinsic_reward_type == 'assign':
item['reward'] = rnd_rew
return train_data_augmented
def collect_data(self, data: list) -> None:
self.train_obs.extend(collect_states(data))
def clear_data(self) -> None:
self.train_obs.clear()
| 3,074 | 4,572 | 122 |
9705b305739f77edfc4d4a38d56f9a475f434e5b | 4,830 | py | Python | pychron/classifier/isotope_trainer.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/classifier/isotope_trainer.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/classifier/isotope_trainer.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, Button, Str
from traitsui.api import View, UItem, HGroup, VGroup
from traitsui.handler import Handler
from pychron.classifier.isotope_classifier import IsotopeClassifier
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.graph.stacked_regression_graph import StackedRegressionGraph
from pychron.loggable import Loggable
UUIDS = ()
if __name__ == "__main__":
t = IsotopeTrainer()
t.configure_traits(view=View("test"))
# ============= EOF =============================================
| 32.416107 | 86 | 0.52029 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, Button, Str
from traitsui.api import View, UItem, HGroup, VGroup
from traitsui.handler import Handler
from pychron.classifier.isotope_classifier import IsotopeClassifier
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.graph.stacked_regression_graph import StackedRegressionGraph
from pychron.loggable import Loggable
UUIDS = ()
class KlassHandler(Handler):
def object_good_button_changed(self, info):
info.object.klass = 1
info.ui.dispose()
def object_bad_button_changed(self, info):
info.object.klass = 0
info.ui.dispose()
def object_skip_button_changed(self, info):
info.object.klass = -1
info.ui.dispose()
class IsotopeTrainer(Loggable):
graph = Instance(StackedRegressionGraph)
good_button = Button
bad_button = Button
skip_button = Button
test = Button
klass = None
dvc = Instance("pychron.dvc.dvc.DVC")
pklass = Str
def setup_graph(self, iso):
self.klass = None
self.pklass = str(self.clf.predict_isotope(iso))
g = StackedRegressionGraph()
g.new_plot(padding=[60, 10, 10, 40])
if iso:
g.new_series(
iso.xs,
iso.ys,
fit=iso.fit,
filter_outliers_dict=iso.filter_outliers_dict,
)
g.set_x_limits(min_=0, max_=iso.xs[-1] * 1.1)
g.set_y_title(iso.name)
g.set_x_title("Time (s)")
g.refresh()
self.graph = g
def train(self):
# dvc = DVC(bind=False,
# organization='NMGRLData')
# dvc.db.trait_set(name='pychrondvc',
# username=os.environ.get('ARGONSERVER_DB_USER'),
# password=os.environ.get('ARGONSERVER_DB_PWD'),
# kind='mysql',
# host=os.environ.get('ARGONSERVER_HOST'))
# dvc.connect()
self.clf = clf = IsotopeClassifier()
isos = []
klasses = []
uuids = UUIDS
dvc = self.dvc
with dvc.session_ctx():
ans = dvc.get_last_nhours_analyses(200, mass_spectrometers="jan")
if ans:
# records = [ri for ai in ans for ri in ai.record_views]
for ai in self.dvc.make_analyses(ans):
broke = False
# dbai = dvc.get_analysis_uuid(uuid)
# ai = dvc.make_analyses((dbai,))[0]
# ai = dvc.make_analysis(dbai.record_view)
ai.load_raw_data()
for iso in ai.isotopes.values():
klass = self._get_klass(iso)
if klass is -1:
continue
if klass is not None:
isos.append(iso)
klasses.append(klass)
else:
broke = True
break
if broke:
break
if isos:
clf.add_isotopes(isos, klasses)
clf.dump()
def _get_klass(self, iso):
self.setup_graph(iso)
bgrp = HGroup(
UItem("good_button"),
UItem("bad_button"),
UItem("skip_button"),
UItem("pklass"),
)
self.edit_traits(
view=View(
VGroup(bgrp, UItem("graph", style="custom")), buttons=["Cancel", "OK"]
),
kind="livemodal",
handler=KlassHandler(),
)
return self.klass
# def _test_fired(self):
# # self._get_klass(None)
# self.train()
if __name__ == "__main__":
t = IsotopeTrainer()
t.configure_traits(view=View("test"))
# ============= EOF =============================================
| 2,841 | 398 | 126 |
1c697e22fa7daef87d2323415a45758719dc0eb3 | 11,875 | py | Python | song_text_widget.py | maccesch/songscreen | de10f1d4cb0489d56861eaf633cf61d75dfd51c7 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-03-30T15:42:35.000Z | 2017-03-05T13:26:55.000Z | song_text_widget.py | maccesch/songscreen | de10f1d4cb0489d56861eaf633cf61d75dfd51c7 | [
"BSD-3-Clause-Clear"
] | null | null | null | song_text_widget.py | maccesch/songscreen | de10f1d4cb0489d56861eaf633cf61d75dfd51c7 | [
"BSD-3-Clause-Clear"
] | null | null | null | from operator import attrgetter
from PyQt5.QtCore import Qt, QRectF, QAbstractAnimation, QPropertyAnimation, QEasingCurve
from PyQt5.QtGui import QPen, QBrush, QColor, QPainter, QFont, QFontMetricsF, QTransform
from PyQt5.QtWidgets import QGraphicsObject, QGraphicsView, QSizePolicy, QGraphicsScene
from marker_mixin import MarkerMixin
| 36.314985 | 130 | 0.635789 | from operator import attrgetter
from PyQt5.QtCore import Qt, QRectF, QAbstractAnimation, QPropertyAnimation, QEasingCurve
from PyQt5.QtGui import QPen, QBrush, QColor, QPainter, QFont, QFontMetricsF, QTransform
from PyQt5.QtWidgets import QGraphicsObject, QGraphicsView, QSizePolicy, QGraphicsScene
from marker_mixin import MarkerMixin
class SongTextCover(QGraphicsObject):
def __init__(self, w, h, *args, **kwargs):
super(SongTextCover, self).__init__(*args, **kwargs)
self.w = w
self.h = h
def paint(self, qp, *args, **kwargs):
qp.setPen(QPen(Qt.NoPen))
qp.setBrush(QBrush(QColor(0, 0, 0)))
qp.drawRect(self.boundingRect())
def boundingRect(self):
return QRectF(-100, -100, self.w + 200, self.h * 2)
class SongTextWidget(MarkerMixin, QGraphicsView):
line_height_factor = 1.5
def __init__(self, *args, **kwargs):
super(SongTextWidget, self).__init__(*args, **kwargs)
self.setWindowTitle(self.tr("Lyrics"))
self.w = 1920
self.h = 1080
self._progress = 0.0
# self._animated_progress = 0.0
self.title = ""
self._linecount = 0
self._extra_lines_after = []
self._first_lyrics_line_y = 0
self._covered = True
self.setMinimumHeight(9 * 50)
self.setMinimumWidth(16 * 50)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Minimum)
self.setScene(QGraphicsScene(self))
self.setRenderHints(QPainter.HighQualityAntialiasing | QPainter.SmoothPixmapTransform)
self.setInteractive(False)
self.scene().setBackgroundBrush(Qt.black)
self.setStyleSheet( "QGraphicsView { border-style: none; }" )
self._line_height = 10
self._document_height = 10
self._animation = None
self._font_size = 40
self._line_increment = 2
def heightForWidth(self, width):
return int(round(width / 16 * 9))
def hasHeightForWidth(self):
return True
@property
def _scroll_progress(self):
if not self.markers:
return self.progress
if self.progress <= self.markers[0].progress:
return 0.0
past_line_count = 0
current_marker = None
next_marker_progress = 1
for i, marker in enumerate(self.markers[:-1]):
next_marker = self.markers[i + 1]
if next_marker.progress > self.progress:
current_marker = marker
next_marker_progress = next_marker.progress
break
else:
past_line_count += marker.linecount
if next_marker_progress == 1:
current_marker = self.markers[-1]
return past_line_count / self._linecount + \
(current_marker.linecount / self._linecount) * \
(self.progress - current_marker.progress) / (next_marker_progress - current_marker.progress)
@MarkerMixin.markers.setter
def markers(self, value):
MarkerMixin.markers.fset(self, value)
self._linecount = 0
for marker in self._markers:
self._linecount += marker.linecount
self._rebuild_scene()
def _markers_changed(self):
self.repaint()
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, value):
self._progress = value
self._update_screen_rect()
def _update_screen_rect(self):
if self._animation is not None and self._animation.state() == QAbstractAnimation.Running:
return
vertical_offset_bias = self._line_height - self._first_lyrics_line_y
vertical_offset = self._scroll_progress * \
(self._document_height - len(self._extra_lines_after) * self._line_height)
if vertical_offset <= vertical_offset_bias:
vertical_offset = 0
else:
vertical_offset = min(vertical_offset - vertical_offset_bias, self._document_height)
current_line_index = vertical_offset / self._line_height
extra_line_count = len(tuple(filter(lambda i: i < current_line_index, self._extra_lines_after)))
vertical_offset += self._line_height * extra_line_count
diff = self.sceneRect().y() - vertical_offset
if abs(diff) > self._line_height * self._line_increment:
factor = -int(diff) // int(self._line_height * self._line_increment)
y = self.sceneRect().y() + (self._line_height * self._line_increment) * factor
y = max(0, y)
target_rect = QRectF(0, y, self.w, self.h)
if not self._covered:
self._animation = QPropertyAnimation(self, b"sceneRect")
self._animation.setDuration(3000)
self._animation.setStartValue(self.sceneRect())
self._animation.setEndValue(target_rect)
self._animation.setEasingCurve(QEasingCurve.InOutQuad)
self._animation.start()
else:
self._animation = None
self.setSceneRect(target_rect)
def _add_line(self, scene, line_index, left, text_str, font, color, offset=0, align_right=False):
y = self._line_height * line_index + offset + self.h * 0.1
if line_index == 1:
self._first_lyrics_line_y = y
metrics = QFontMetricsF(font)
text_width = metrics.width(text_str)
max_text_width = (self.w - left - left)
overflow = text_width - max_text_width
if overflow <= 0:
text = scene.addText(text_str, font)
if align_right:
text.setPos(self.w - left - text_width, y)
else:
text.setPos(left, y)
text.setDefaultTextColor(color)
else:
scale_factor = max_text_width / text_width
if scale_factor >= 0.9:
text = scene.addText(text_str, font)
text.setPos(left, y)
text.setDefaultTextColor(color)
text.setTransform(QTransform().scale(scale_factor, 1.0))
else:
self._extra_lines_after.append(line_index)
idx = len(text_str) // 2
while idx < len(text_str) and not text_str[idx].isspace():
idx += 1
line_index = self._add_line(scene, line_index, left, text_str[:idx], font, color, offset)
line_index += 1
line_index = self._add_line(scene, line_index, left, "\t" + text_str[idx:], font, color,
offset - self._line_height * 0.1)
return line_index
def _rebuild_scene(self, keep_progress=False):
if not self.markers:
self.scene().setBackgroundBrush(Qt.black)
return
if keep_progress:
prev_scene_rect_y = self.sceneRect().y()
prev_document_height = self._document_height
scene = self.scene()
scene.setBackgroundBrush(Qt.white)
scene.clear()
self._extra_lines_after = []
font_size = self._calc_font_size(self.h)
title_font = QFont('Fira Sans', font_size, QFont.Medium)
default_font = QFont('Fira Sans', font_size, QFont.Normal)
default_font.setHintingPreference(QFont.PreferFullHinting)
default_font.setLetterSpacing(QFont.PercentageSpacing, 99)
heading_font = QFont('Fira Sans', font_size * 0.7, QFont.DemiBold)
default_color = QColor(0, 0, 0)
heading_color = QColor(180, 180, 180)
self._line_height = self._calc_line_height(default_font)
# scene.addRect(0.0, 0.0, self.w, self._document_height * 1.2, QPen(Qt.NoPen), QBrush(Qt.NoBrush))
margin = self.w / 10
line_index = 0
line_index = self._add_line(scene, line_index, margin, self.title, offset=- self.h * 0.05, font=title_font,
color=default_color)
line_index += 1
for marker in sorted(self.markers, key=attrgetter("progress")):
line_index = self._add_line(scene, line_index, margin, marker.name, offset=self._line_height * 0.2,
font=heading_font, color=heading_color)
line_index += 1
for line in marker.text.splitlines():
line_index = self._add_line(scene, line_index, margin, line, font=default_font, color=default_color)
line_index += 1
self._document_height = self._calc_document_height()
# self._document_cover = scene.addRect(-self.w * 0.5, -self._document_height * 0.5, self.w * 2, self._document_height * 2,
# QPen(Qt.NoPen), QBrush(QColor(0, 0, 0)))
# self._document_cover.setOpacity(1)
# self._document_cover_animation = OpacityAnimation(self._document_cover)
# self._document_cover_animation = QPropertyAnimation(self._document_cover, b"opacity", self)
# self._document_cover_animation.setDuration(1000)
# self._document_cover_animation.setStartValue(1)
# self._document_cover_animation.setStartValue(0)
# self._document_cover_animation.start()
self._document_cover = SongTextCover(self.w, self._document_height)
scene.addItem(self._document_cover)
self._document_cover.opacityChanged.connect(self._redraw_scene)
scene_rect_y = 0
if keep_progress:
self._document_cover.setOpacity(0.0)
scene_rect_y = prev_scene_rect_y / prev_document_height * self._document_height
else:
self._animation = None
self._covered = not keep_progress
self.setSceneRect(QRectF(0, scene_rect_y, self.w, self.h))
self.fitInView(QRectF(0, 0, self.w, self.h), Qt.KeepAspectRatio)
def fade_in(self):
self.show()
self.raise_()
self._covered = False
self._document_cover_animation = QPropertyAnimation(self._document_cover, b"opacity")
self._document_cover_animation.setDuration(1000)
self._document_cover_animation.setStartValue(1)
self._document_cover_animation.setEndValue(0)
self._document_cover_animation.setEasingCurve(QEasingCurve.InOutQuad)
self._document_cover_animation.start()
def fade_out(self):
self._document_cover_animation = QPropertyAnimation(self._document_cover, b"opacity")
self._document_cover_animation.finished.connect(self._fade_out_finished)
self._document_cover_animation.setDuration(1000)
self._document_cover_animation.setStartValue(0)
self._document_cover_animation.setEndValue(1)
self._document_cover_animation.setEasingCurve(QEasingCurve.InOutQuad)
self._document_cover_animation.start()
def _fade_out_finished(self):
self._covered = True
self._animation = None
self.progress = 0
self.hide()
def _redraw_scene(self):
self.viewport().update()
def resizeEvent(self, resize_event):
self.fitInView(QRectF(0, 0, self.w, self.h), Qt.KeepAspectRatio)
def _calc_document_height(self):
return (self._linecount + 1 + len(self._extra_lines_after)) * self._line_height
def _calc_line_height(self, font):
metrics = QFontMetricsF(font)
line_height = metrics.height() * self.line_height_factor
return line_height
def _calc_font_size(self, h):
return self._font_size
# font_size = h // (self.lines_per_screen * self.line_height_factor * 1.4)
# return font_size
def set_font_size(self, font_size):
self._font_size = font_size
self._rebuild_scene(keep_progress=True)
def set_line_increment(self, increment):
self._line_increment = increment
| 10,688 | 721 | 126 |
72d434e21fa91e7cedeb6a72ca4b69bf41ba142f | 7,173 | py | Python | dataedit.py | dnsbob/password-dongle | a2e04ec4bd370401a14d55fedf2ea9da065fc4cf | [
"Apache-2.0"
] | null | null | null | dataedit.py | dnsbob/password-dongle | a2e04ec4bd370401a14d55fedf2ea9da065fc4cf | [
"Apache-2.0"
] | null | null | null | dataedit.py | dnsbob/password-dongle | a2e04ec4bd370401a14d55fedf2ea9da065fc4cf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
dataedit.py
menu password edit for passworddongle.py
'''
import time
import random
import getpass
import json
# all printable ascii chars, plus space, except double quote, tab, backslash
mychars="`aZ0+nM<bY1!oL>cX2@pK;dW3#qJ:eV4$rI'fU5%sH[gT6^tG]hS7&uF{iR8*vE}jQ9(wD-kP,)xC=lO.~yB_mN/ zA"
mylen=len(mychars)
# direction values
ENCRYPT=+1
DECRYPT=-1
def dataedit(data, key, level=0):
'''update data based on key and input'''
if type(data) == type([]):
if len(data) == 0:
print("ERROR - empty list",data)
elif (len(data) % 2) == 1:
print("ERROR - not even number of list members",data)
else:
for i,v in enumerate(data):
if (i % 2) == 0:
'''even items are labels'''
if type(v) == type([]):
print("ERROR? list as odd element in list?",json.dumps(data))
elif type(v) == type('string'):
print(v)
label=v
else:
if type(v) == type([]):
print('level:',level,'menu: ',label)
dataedit(v,key, level + 1)
elif type(v) == type('string'):
if label == 'back' and v == '':
print('back')
else:
while True:
'''password'''
x=input('password - encoded/decoded/input/next:')
if x == 'e':
print(v)
elif x == 'd':
print(tinydecrypt(v,key))
elif x == 'i':
plain=getpass.getpass()
v=tinyencrypt(plain,key)
data[i]=v
elif x == 'n':
break # out of while True
else:
print("ERROR - not a list",data)
print(data)
if __name__ == '__main__':
datafile=input('data file name:')
print('file:',datafile)
with open(datafile) as f:
dataraw=f.read()
if dataraw[0:5] == 'data=':
mylist=dataraw[5:]
data = json.loads(mylist)
orig=list(data)
print(data)
key=getpass.getpass(prompt='Password key: ')
dataedit(data,key)
print(data)
if data != orig:
print("changed")
if True:
outfile=input('data file to write:')
if outfile:
with open(outfile,'w') as fw:
dataraw="data=" + json.dumps(data,indent=4) + '\n'
fw.write(dataraw)
| 32.753425 | 101 | 0.500209 | #!/usr/bin/env python3
'''
dataedit.py
menu password edit for passworddongle.py
'''
import time
import random
import getpass
import json
# all printable ascii chars, plus space, except double quote, tab, backslash
mychars="`aZ0+nM<bY1!oL>cX2@pK;dW3#qJ:eV4$rI'fU5%sH[gT6^tG]hS7&uF{iR8*vE}jQ9(wD-kP,)xC=lO.~yB_mN/ zA"
mylen=len(mychars)
# direction values
ENCRYPT=+1
DECRYPT=-1
def code2indexlist(key):
# turn key into list of indexes
keyi=[]
for k in range(len(key)):
keyi.append(mychars.index(key[k]))
return keyi
def cryptchar(inchar,keyoffset,direction):
try:
charindex=mychars.index(inchar)
encind=(charindex+direction*keyoffset)%mylen
outchar=mychars[encind]
except ValueError:
print("invalid character used?")
outchar="invalid"
return outchar
def cryptstring(instring,keyi,direction):
outstring=""
keylen=len(keyi)
keyindex=1%keylen # allow 1 char key (even if not recommended)
for eachchar in instring:
keyoffset=keyi[keyindex]
outletter=cryptchar(eachchar,keyoffset,direction)
outstring+=outletter
keyindex = (keyindex+1) % keylen
return outstring
def tinyencrypt(text,key):
try:
keyi = code2indexlist(key)
textlen=len(text)
keylen=len(keyi)
totlen=textlen + keylen
if totlen > mylen:
print("error - string too long, limited to",mylen,"character")
return "invalid"
eachchar=mychars[totlen]
keyoffset=keyi[0]
# use lencode as first char of text to encrypt
code=cryptchar(eachchar,keyoffset,ENCRYPT)
keyindex=1%keylen # allow 1 char key
code2=cryptstring(text,keyi,ENCRYPT)
code += code2
# pad to nearest block size
blocksize=20
blocks=int(textlen/blocksize)+1
padding=blocks*blocksize-textlen
for x in range(padding):
code+=mychars[random.randrange(mylen)]
except ValueError:
print("invalid character used?")
code="invalid"
return code
def tinydecrypt(code,key):
try:
keyi = code2indexlist(key)
codelen=len(code)
keylen=len(keyi)
eachchar=code[0]
keyoffset=keyi[0]
lencode=cryptchar(eachchar,keyoffset,DECRYPT)
totlen=mychars.index(lencode)
encind=mychars.index(code[0])
textlen=(totlen-keylen+mylen)%mylen
plain=cryptstring(code[1:textlen+1],keyi,DECRYPT)
except ValueError:
print("invalid character used?")
plain="invalid"
except IndexError:
print("index error")
plain='invalid'
return plain
def menu(key,buttons):
#print("starting pwmenu")
# constants
tick = 0.1
bsp = chr(8) + " " + chr(8) # backspace and overwrite with backspace
action = ["up", "down", "enter"]
import data2
data=data2.data
#print(data[4])
# variables
current = data
stack = []
i = 0
# print(current[i],end='')
layout.write(current[i])
old = current[i]
button_num = 0
button = buttons[0]
while True:
button_press = 0
while not button_press:
for button_num, button in enumerate(buttons):
if button.value:
button_press = 1
break
time.sleep(tick) # only if no button
c = current[i]
if action[button_num] == "down":
i = (i + 2) % len(current) # wrap at ends
elif action[button_num] == "up":
i = (i - 2) % len(current) # wrap
elif action[button_num] == "enter":
v = current[i + 1]
if type(v) == type("string"):
if c == "back":
if stack:
i = stack.pop()
current = stack.pop()
c = current[i]
else:
z="password:" + v
#z=plain=tinydecrypt(v,key)
# print(v,end='')
layout.write(z)
old = c
#return # testing memory use - debug
elif type(v) == type(["list"]):
old = current[i]
stack.append(current)
stack.append(i)
current = v
i = 0
c = current[i]
if c != old: # only print if changed
# print("".join([bsp for x in range(len(old))]),end='') # erase
layout.write("".join([bsp for x in range(len(old))])) # erase
# print(current[i],end='')
layout.write(current[i])
old = c
while button.value:
time.sleep(tick) # wait for button release
def dataedit(data, key, level=0):
'''update data based on key and input'''
if type(data) == type([]):
if len(data) == 0:
print("ERROR - empty list",data)
elif (len(data) % 2) == 1:
print("ERROR - not even number of list members",data)
else:
for i,v in enumerate(data):
if (i % 2) == 0:
'''even items are labels'''
if type(v) == type([]):
print("ERROR? list as odd element in list?",json.dumps(data))
elif type(v) == type('string'):
print(v)
label=v
else:
if type(v) == type([]):
print('level:',level,'menu: ',label)
dataedit(v,key, level + 1)
elif type(v) == type('string'):
if label == 'back' and v == '':
print('back')
else:
while True:
'''password'''
x=input('password - encoded/decoded/input/next:')
if x == 'e':
print(v)
elif x == 'd':
print(tinydecrypt(v,key))
elif x == 'i':
plain=getpass.getpass()
v=tinyencrypt(plain,key)
data[i]=v
elif x == 'n':
break # out of while True
else:
print("ERROR - not a list",data)
print(data)
if __name__ == '__main__':
datafile=input('data file name:')
print('file:',datafile)
with open(datafile) as f:
dataraw=f.read()
if dataraw[0:5] == 'data=':
mylist=dataraw[5:]
data = json.loads(mylist)
orig=list(data)
print(data)
key=getpass.getpass(prompt='Password key: ')
dataedit(data,key)
print(data)
if data != orig:
print("changed")
if True:
outfile=input('data file to write:')
if outfile:
with open(outfile,'w') as fw:
dataraw="data=" + json.dumps(data,indent=4) + '\n'
fw.write(dataraw)
| 4,244 | 0 | 138 |
1ffcc5a067d5b5c360ec8a38fa4992ad81a61a36 | 929 | py | Python | 34/__init__.py | sc4599/LeetCode | a2ce242e20e485cc8f6cc6083f4993332d414bbf | [
"MIT"
] | null | null | null | 34/__init__.py | sc4599/LeetCode | a2ce242e20e485cc8f6cc6083f4993332d414bbf | [
"MIT"
] | null | null | null | 34/__init__.py | sc4599/LeetCode | a2ce242e20e485cc8f6cc6083f4993332d414bbf | [
"MIT"
] | null | null | null |
import unittest
if __name__ == "__main__":
unittest.main() | 24.447368 | 51 | 0.53606 | class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
count = 0
first_position = -1
last_position = -1
if target in nums:
first_position = nums.index(target)
n = nums[first_position]
while n== nums[first_position+1]:
count +=1
nums.remove(target)
count += 1
if count:
last_position = first_position + count
return [first_position, last_position]
import unittest
class TestSolution(unittest.TestCase):
def test_searchRange(self):
cls = Solution()
r = cls.searchRange([1], 0)
self.assertEquals(r, [-1, -1])
r = cls.searchRange([5, 7, 7, 8, 8, 10], 8)
self.assertEquals(r, [3, 4])
if __name__ == "__main__":
unittest.main() | 195 | 596 | 71 |
c458f982932f71bc17123edf426223766b7d5c2f | 147 | py | Python | test/factorial.py | radiilab/Rad-lang | 39eb4762c5bfea8537f4f269a2e8457dd5409e3a | [
"BSD-3-Clause"
] | 1 | 2019-07-18T23:30:38.000Z | 2019-07-18T23:30:38.000Z | test/factorial.py | radiilab/Rad-lang | 39eb4762c5bfea8537f4f269a2e8457dd5409e3a | [
"BSD-3-Clause"
] | null | null | null | test/factorial.py | radiilab/Rad-lang | 39eb4762c5bfea8537f4f269a2e8457dd5409e3a | [
"BSD-3-Clause"
] | null | null | null | end
print factorial(5) # should output 120
| 14.7 | 40 | 0.557823 | def factorial(n):
if n < 1:
return 1
else:
return n * factorial(n-1)
end
end
print factorial(5) # should output 120
| 79 | 0 | 22 |
bf27071cdd5a17a6a5d80fc5d40d8393c9bb5247 | 4,537 | py | Python | src/sleuthdeck/actions.py | sleuth-io/sleuth-deck | 289b9967e7d395de8aa05268eb5e686b67285c1e | [
"Apache-2.0"
] | null | null | null | src/sleuthdeck/actions.py | sleuth-io/sleuth-deck | 289b9967e7d395de8aa05268eb5e686b67285c1e | [
"Apache-2.0"
] | null | null | null | src/sleuthdeck/actions.py | sleuth-io/sleuth-deck | 289b9967e7d395de8aa05268eb5e686b67285c1e | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import signal
import subprocess
from time import sleep
from typing import Optional, Union, Tuple
from PIL.Image import Image
from pyautogui import hotkey
from sleuthdeck.deck import Action
from sleuthdeck.deck import ClickType
from sleuthdeck.deck import Key
from sleuthdeck.deck import KeyScene
from sleuthdeck.deck import Scene
from sleuthdeck.keys import IconKey
from sleuthdeck.windows import get_window, By
| 29.083333 | 93 | 0.638748 | from __future__ import annotations
import signal
import subprocess
from time import sleep
from typing import Optional, Union, Tuple
from PIL.Image import Image
from pyautogui import hotkey
from sleuthdeck.deck import Action
from sleuthdeck.deck import ClickType
from sleuthdeck.deck import Key
from sleuthdeck.deck import KeyScene
from sleuthdeck.deck import Scene
from sleuthdeck.keys import IconKey
from sleuthdeck.windows import get_window, By
class Sequential(Action):
def __init__(self, *actions: Action) -> None:
super().__init__()
self._actions = actions
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
for action in self._actions:
action(scene, key, click)
class Command(Action):
def __init__(self, command: str, *args: str):
self.command = command
self.args = args
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
print(f"Running {self.command} {' '.join(self.args)}")
subprocess.run([self.command] + list(self.args))
class Toggle(Action):
def __init__(self, on_enable: Action, on_disable: Action, initial: bool = False) -> None:
self._on_enable = on_enable
self._on_disable = on_disable
self._state = initial
def __call__(self, scene: KeyScene, key: IconKey, click: ClickType):
if self._state:
self._on_disable(scene, key, click)
key.update_icon(enabled=False)
self._state = False
else:
self._on_enable(scene, key, click)
key.update_icon(enabled=True)
self._state = True
class ChangeScene(Action):
def __init__(self, scene: Scene):
self.scene = scene
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
scene.deck.change_scene(self.scene)
class PreviousScene(Action):
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
scene.deck.previous_scene()
class Close(Action):
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
scene.deck.close()
signal.raise_signal(signal.SIGINT)
class MaximizeWindow(Action):
def __init__(self, title: Union[str, By]):
self.title = title
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
window = get_window(self.title, attempts=5 * 10)
if window:
window.maximize()
else:
print("No window found")
class UnMaximizeWindow(Action):
def __init__(self, title: Union[str, By]):
self.title = title
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
window = get_window(self.title, attempts=5 * 10)
if window:
window.unmaximize()
else:
print("No window found")
class Pause(Action):
def __init__(self, seconds: Union[float, int]):
self.seconds = seconds
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
sleep(self.seconds)
class CloseWindow(Action):
def __init__(self, title: Union[str, By], wait=5):
self.title = title
self._wait = wait
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
window = get_window(self.title, attempts=self._wait * 10)
if window:
window.close()
else:
print(f"No window found with {self.title}")
class MoveWindow(Action):
def __init__(self, title: Union[str, By], x: int, y: int, width: int, height: int):
self.x = x
self.y = y
self.width = width
self.height = height
self.title = title
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
window = get_window(self.title, attempts=5 * 10)
if window:
window.move(self.x, self.y, self.width, self.height)
else:
print("No window found")
class SendHotkey(Action):
def __init__(self, title: Union[str, By], *hotkey: str):
self.title = title
self.hotkey = hotkey
def __call__(self, scene: KeyScene, key: Key, click: ClickType):
print("sending key")
window = get_window(self.title, attempts=5 * 10)
print("got window")
window.focus()
hotkey(*self.hotkey)
print("sent")
class DeckBrightness(Action):
def __init__(self, value: int) -> None:
self._value = value
def __call__(self, scene: KeyScene, key: IconKey, click: ClickType):
scene.deck.stream_deck.set_brightness(self._value)
| 3,087 | 54 | 934 |
59f02b823d905cf3630bb0e8dff47036b37d5821 | 1,582 | py | Python | pages/insights.py | illicitDev/kepler-observations | c520632f8abc091511259cea83dacdb4aa0d6a4a | [
"MIT"
] | null | null | null | pages/insights.py | illicitDev/kepler-observations | c520632f8abc091511259cea83dacdb4aa0d6a4a | [
"MIT"
] | null | null | null | pages/insights.py | illicitDev/kepler-observations | c520632f8abc091511259cea83dacdb4aa0d6a4a | [
"MIT"
] | null | null | null | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Insights
Overall I'm happy with my tree-based model's performance. The precision and recall of my model are both equal at a score
of 0.78 respectively. It also has a validation accuracy score of 0.76.
The linear model did not perform as well, with precision at 0.67 and recall at 0.42. It also has a validation accuracy
of 0.53. This is not an acceptable model as just guessing 'FALSE POSITIVE', the majority class, you will have an accuracy
score of 0.51.
As we continue to make new observations about the planets in our galaxy and continue to collect
data from deep space, it's important to have models like this one to make sense of the data and label that
observation correctly.
"""
),
],
)
column2 = dbc.Col(
[
html.Img(src='../assets/kpe_class_report.png', style={'height': '45%', 'width': '65%'}),
html.Br(),
html.Img(src='../assets/confusion_matrix.png', style={'height': '45%', 'width': '65%'})
],
)
layout = dbc.Row([column1, column2]) | 36.790698 | 134 | 0.639697 | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Insights
Overall I'm happy with my tree-based model's performance. The precision and recall of my model are both equal at a score
of 0.78 respectively. It also has a validation accuracy score of 0.76.
The linear model did not perform as well, with precision at 0.67 and recall at 0.42. It also has a validation accuracy
of 0.53. This is not an acceptable model as just guessing 'FALSE POSITIVE', the majority class, you will have an accuracy
score of 0.51.
As we continue to make new observations about the planets in our galaxy and continue to collect
data from deep space, it's important to have models like this one to make sense of the data and label that
observation correctly.
"""
),
],
)
column2 = dbc.Col(
[
html.Img(src='../assets/kpe_class_report.png', style={'height': '45%', 'width': '65%'}),
html.Br(),
html.Img(src='../assets/confusion_matrix.png', style={'height': '45%', 'width': '65%'})
],
)
layout = dbc.Row([column1, column2]) | 0 | 0 | 0 |
9bae94530088cda6422abe15142ca4a38cf8641a | 2,551 | py | Python | code.py | Moreyash/olympic-hero | 091ee3a95fd128060c1781922d027ee02a06e7ce | [
"MIT"
] | null | null | null | code.py | Moreyash/olympic-hero | 091ee3a95fd128060c1781922d027ee02a06e7ce | [
"MIT"
] | null | null | null | code.py | Moreyash/olympic-hero | 091ee3a95fd128060c1781922d027ee02a06e7ce | [
"MIT"
] | null | null | null | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data=pd.read_csv(path).rename(columns={'Total':'Total_Medals'})
data.head(10)
#Code starts here
# --------------
#Code starts here
data['Better_Event']=np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both' , (np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer','Winter')))
better_event= data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
top_10_summer= top_ten(top_countries,'Total_Summer')
print(top_10_summer)
top_10_winter= top_ten(top_countries,'Total_Winter')
print(top_10_winter)
top_10= top_ten(top_countries,'Total_Medals')
print(top_10)
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print(common)
# --------------
#Code starts here
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df=data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']= summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = (summer_df['Golden_Ratio']).max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']= winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio = (winter_df['Golden_Ratio']).max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']= top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio = (top_df['Golden_Ratio']).max()
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1= data[:-1]
data_1['Total_Points']= data_1['Gold_Total']*3 +data_1['Silver_Total']*2 +data_1['Bronze_Total']
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| 22.182609 | 163 | 0.693062 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data=pd.read_csv(path).rename(columns={'Total':'Total_Medals'})
data.head(10)
#Code starts here
# --------------
#Code starts here
data['Better_Event']=np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both' , (np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer','Winter')))
better_event= data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
def top_ten(data,col):
country_list=[]
country_list=list((data.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer= top_ten(top_countries,'Total_Summer')
print(top_10_summer)
top_10_winter= top_ten(top_countries,'Total_Winter')
print(top_10_winter)
top_10= top_ten(top_countries,'Total_Medals')
print(top_10)
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print(common)
# --------------
#Code starts here
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df=data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']= summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = (summer_df['Golden_Ratio']).max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']= winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio = (winter_df['Golden_Ratio']).max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']= top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio = (top_df['Golden_Ratio']).max()
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1= data[:-1]
data_1['Total_Points']= data_1['Gold_Total']*3 +data_1['Silver_Total']*2 +data_1['Bronze_Total']
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| 111 | 0 | 25 |
321be22ce44c5c5bcb005091a744cd129736f3a8 | 48 | py | Python | code/main.py | famaxth/Random.py | f8806ecf5b73be2cafe7e956d52bd8da54d715ac | [
"Unlicense"
] | null | null | null | code/main.py | famaxth/Random.py | f8806ecf5b73be2cafe7e956d52bd8da54d715ac | [
"Unlicense"
] | null | null | null | code/main.py | famaxth/Random.py | f8806ecf5b73be2cafe7e956d52bd8da54d715ac | [
"Unlicense"
] | null | null | null | import random
print(random.randint(1, 100))
| 12 | 30 | 0.708333 | import random
print(random.randint(1, 100))
| 0 | 0 | 0 |
ac84f7fdf5aaab9edad6d5a0f1b53873e9010961 | 283 | py | Python | web/autoapp.py | ChristoferHuynh/web | a7b92e5c8c3a85c6ab8182571cc4186a50b67df6 | [
"BSD-3-Clause"
] | null | null | null | web/autoapp.py | ChristoferHuynh/web | a7b92e5c8c3a85c6ab8182571cc4186a50b67df6 | [
"BSD-3-Clause"
] | null | null | null | web/autoapp.py | ChristoferHuynh/web | a7b92e5c8c3a85c6ab8182571cc4186a50b67df6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Create an application instance."""
from flask.helpers import get_debug_flag
from web.app import create_app
from web.settings import DevConfig, ProdConfig
#CONFIG = DevConfig if get_debug_flag() else ProdConfig
CONFIG = DevConfig
app = create_app(CONFIG)
| 25.727273 | 55 | 0.770318 | # -*- coding: utf-8 -*-
"""Create an application instance."""
from flask.helpers import get_debug_flag
from web.app import create_app
from web.settings import DevConfig, ProdConfig
#CONFIG = DevConfig if get_debug_flag() else ProdConfig
CONFIG = DevConfig
app = create_app(CONFIG)
| 0 | 0 | 0 |
c565bed72f6369a687a95b3bac63f41b5356c413 | 94 | py | Python | tracking/dummy.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | 80 | 2017-11-13T21:58:55.000Z | 2022-01-03T20:10:42.000Z | tracking/dummy.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | null | null | null | tracking/dummy.py | athrn/kognitivo | 15822338778213c09ea654ec4e06a300129f9478 | [
"Apache-2.0"
] | 21 | 2017-11-14T09:47:41.000Z | 2021-11-23T06:44:31.000Z | from tracking.abstract import AbstractTracker
| 15.666667 | 45 | 0.819149 | from tracking.abstract import AbstractTracker
class DummyTracker(AbstractTracker):
pass
| 0 | 24 | 23 |
74e0b709e83e18377aa4305b9ecdb1dfc071a640 | 510 | py | Python | 30-days-of-code/day8-dictionaries.py | kasimte/hackerrank | d43e6bb91428488b35fc344893612da631e0e770 | [
"MIT"
] | null | null | null | 30-days-of-code/day8-dictionaries.py | kasimte/hackerrank | d43e6bb91428488b35fc344893612da631e0e770 | [
"MIT"
] | null | null | null | 30-days-of-code/day8-dictionaries.py | kasimte/hackerrank | d43e6bb91428488b35fc344893612da631e0e770 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(input())
book = {}
for x in range(0,n):
line = input().split()
book[line[0]] = line[1]
while True:
try:
line = input().split()
name = line[0]
if name in book:
entry = book[name]
print("{0}={1}".format(name, entry))
else:
print("Not found")
except EOFError:
break
| 22.173913 | 72 | 0.558824 | # https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(input())
book = {}
for x in range(0,n):
line = input().split()
book[line[0]] = line[1]
while True:
try:
line = input().split()
name = line[0]
if name in book:
entry = book[name]
print("{0}={1}".format(name, entry))
else:
print("Not found")
except EOFError:
break
| 0 | 0 | 0 |
70ddec5a00a2079779e22798a547a9b46b167c08 | 14,937 | py | Python | lorentz.py | theSage21/lorentz-embeddings | deb57587af284087045dcb9f4c6d4b140965d172 | [
"MIT"
] | 66 | 2019-02-20T06:29:29.000Z | 2022-03-07T21:58:59.000Z | lorentz.py | theSage21/lorentz-embeddings | deb57587af284087045dcb9f4c6d4b140965d172 | [
"MIT"
] | 6 | 2019-02-17T16:10:44.000Z | 2021-07-29T12:46:46.000Z | lorentz.py | theSage21/lorentz-embeddings | deb57587af284087045dcb9f4c6d4b140965d172 | [
"MIT"
] | 14 | 2019-02-13T09:03:35.000Z | 2021-11-08T15:25:03.000Z | import os
import sys
import torch
import random
import numpy as np
from torch import nn
from torch import optim
from tqdm import trange, tqdm
from collections import Counter
from datetime import datetime
from tensorboardX import SummaryWriter
from torch.utils.data import Dataset, DataLoader
import matplotlib
matplotlib.use("Agg") # this needs to come before other matplotlib imports
import matplotlib.pyplot as plt
plt.style.use("ggplot")
# ========================= models
class Lorentz(nn.Module):
"""
This will embed `n_items` in a `dim` dimensional lorentz space.
"""
def forward(self, I, Ks):
"""
Using the pairwise similarity matrix, generate the following inputs and
provide to this function.
Inputs:
- I : - long tensor
- size (B,)
- This denotes the `i` used in all equations.
- Ks : - long tensor
- size (B, N)
- This denotes at max `N` documents which come from the
nearest neighbor sample.
- The `j` document must be the first of the N indices.
This is used to calculate the losses
Return:
- size (B,)
- Ranking loss calculated using
document to the given `i` document.
"""
n_ks = Ks.size()[1]
ui = torch.stack([self.table(I)] * n_ks, dim=1)
uks = self.table(Ks)
# ---------- reshape for calculation
B, N, D = ui.size()
ui = ui.reshape(B * N, D)
uks = uks.reshape(B * N, D)
dists = -lorentz_scalar_product(ui, uks)
dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists)
# sometimes 2 embedding can come very close in R^D.
# when calculating the lorenrz inner product,
# -1 can become -0.99(no idea!), then arcosh will become nan
dists = -arcosh(dists)
# print(dists)
# ---------- turn back to per-sample shape
dists = dists.reshape(B, N)
loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6))
return loss
def recon(table, pair_mat):
"Reconstruction accuracy"
count = 0
table = torch.tensor(table[1:])
for i in range(1, len(pair_mat)): # 0 padding, 1 root, we leave those two
x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D
mask = torch.tensor([0.0] * len(table))
mask[i] = 1
mask = mask * -10000.0
dists = lorentz_scalar_product(x, table) + mask
dists = (
dists.numpy()
) # arccosh is monotonically increasing, so no need of that here
# and no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y)
# print(dists)
predicted_parent = np.argmax(dists)
actual_parent = np.argmax(pair_mat[:, i])
# print(predicted_parent, actual_parent, i, end="\n\n")
count += actual_parent == predicted_parent
count = count / (len(pair_mat) - 1) * 100
return count
_moon_count = 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset", help="File:pairwise_matrix")
parser.add_argument(
"-sample_size", help="How many samples in the N matrix", default=5, type=int
)
parser.add_argument(
"-batch_size", help="How many samples in the batch", default=32, type=int
)
parser.add_argument(
"-burn_c",
help="Divide learning rate by this for the burn epochs",
default=10,
type=int,
)
parser.add_argument(
"-burn_epochs",
help="How many epochs to run the burn phase for?",
default=100,
type=int,
)
parser.add_argument(
"-plot", help="Plot the embeddings", default=False, action="store_true"
)
parser.add_argument("-plot_size", help="Size of the plot", default=3, type=int)
parser.add_argument(
"-plot_graph",
help="Plot the Graph associated with the embeddings",
default=False,
action="store_true",
)
parser.add_argument(
"-overwrite_plots",
help="Overwrite the plots?",
default=False,
action="store_true",
)
parser.add_argument(
"-ckpt", help="Which checkpoint to use?", default=None, type=str
)
parser.add_argument(
"-shuffle", help="Shuffle within batch while learning?", default=True, type=bool
)
parser.add_argument(
"-epochs", help="How many epochs to optimize for?", default=1_000_000, type=int
)
parser.add_argument(
"-poincare_dim",
help="Poincare projection time. Lorentz will be + 1",
default=2,
type=int,
)
parser.add_argument(
"-n_items", help="How many items to embed?", default=None, type=int
)
parser.add_argument(
"-learning_rate", help="RSGD learning rate", default=0.1, type=float
)
parser.add_argument(
"-log_step", help="Log at what multiple of epochs?", default=1, type=int
)
parser.add_argument(
"-logdir", help="What folder to put logs in", default="runs", type=str
)
parser.add_argument(
"-save_step", help="Save at what multiple of epochs?", default=100, type=int
)
parser.add_argument(
"-savedir", help="What folder to put checkpoints in", default="ckpt", type=str
)
parser.add_argument(
"-loader_workers",
help="How many workers to generate tensors",
default=4,
type=int,
)
args = parser.parse_args()
# ----------------------------------- get the correct matrix
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
if not os.path.exists(args.savedir):
os.mkdir(args.savedir)
exec(f"from datasets import {args.dataset} as pairwise")
pairwise = pairwise[: args.n_items, : args.n_items]
args.n_items = len(pairwise) if args.n_items is None else args.n_items
print(f"{args.n_items} being embedded")
# ---------------------------------- Generate the proper objects
net = Lorentz(
args.n_items, args.poincare_dim + 1
) # as the paper follows R^(n+1) for this space
if args.plot:
if args.poincare_dim != 2:
print("Only embeddings with `-poincare_dim` = 2 are supported for now.")
sys.exit(1)
if args.ckpt is None:
print("Please provide `-ckpt` when using `-plot`")
sys.exit(1)
if os.path.isdir(args.ckpt):
paths = [
os.path.join(args.ckpt, c)
for c in os.listdir(args.ckpt)
if c.endswith("ckpt")
]
else:
paths = [args.ckpt]
paths = list(sorted(paths))
edges = [
tuple(edge)
for edge in set(
[
frozenset((a + 1, b + 1))
for a, row in enumerate(pairwise > 0)
for b, is_non_zero in enumerate(row)
if is_non_zero
]
)
]
print(len(edges), "nodes")
internal_nodes = set(
node
for node, count in Counter(
[node for edge in edges for node in edge]
).items()
if count > 1
)
edges = np.array([edge for edge in edges if edge[1] in internal_nodes])
print(len(edges), "internal nodes")
for path in tqdm(paths, desc="Plotting"):
save_path = f"{path}.svg"
if os.path.exists(save_path) and not args.overwrite_plots:
continue
net.load_state_dict(torch.load(path))
table = net.lorentz_to_poincare()
# skip padding. plot x y
plt.figure(figsize=(7, 7))
if args.plot_graph:
for edge in edges:
plt.plot(
table[edge, 0],
table[edge, 1],
color="black",
marker="o",
alpha=0.5,
)
else:
plt.scatter(table[1:, 0], table[1:, 1])
plt.title(path)
plt.gca().set_xlim(-1, 1)
plt.gca().set_ylim(-1, 1)
plt.gca().add_artist(plt.Circle((0, 0), 1, fill=False, edgecolor="black"))
plt.savefig(save_path)
plt.close()
sys.exit(0)
dataloader = DataLoader(
Graph(pairwise, args.sample_size),
shuffle=args.shuffle,
batch_size=args.batch_size,
num_workers=args.loader_workers,
)
rsgd = RSGD(net.parameters(), learning_rate=args.learning_rate)
name = f"{args.dataset} {datetime.utcnow()}"
writer = SummaryWriter(f"{args.logdir}/{name}")
with tqdm(ncols=80, mininterval=0.2) as epoch_bar:
for epoch in range(args.epochs):
rsgd.learning_rate = (
args.learning_rate / args.burn_c
if epoch < args.burn_epochs
else args.learning_rate
)
for I, Ks in dataloader:
rsgd.zero_grad()
loss = net(I, Ks).mean()
loss.backward()
rsgd.step()
writer.add_scalar("loss", loss, epoch)
writer.add_scalar(
"recon_preform", recon(net.get_lorentz_table(), pairwise), epoch
)
writer.add_scalar("table_test", net._test_table(), epoch)
if epoch % args.save_step == 0:
torch.save(net.state_dict(), f"{args.savedir}/{epoch} {name}.ckpt")
epoch_bar.set_description(
f"🔥 Burn phase loss: {float(loss)}"
if epoch < args.burn_epochs
else _moon(loss)
)
epoch_bar.update(1)
| 34.656613 | 88 | 0.54991 | import os
import sys
import torch
import random
import numpy as np
from torch import nn
from torch import optim
from tqdm import trange, tqdm
from collections import Counter
from datetime import datetime
from tensorboardX import SummaryWriter
from torch.utils.data import Dataset, DataLoader
import matplotlib
matplotlib.use("Agg") # this needs to come before other matplotlib imports
import matplotlib.pyplot as plt
plt.style.use("ggplot")
def arcosh(x):
return torch.log(x + torch.sqrt(x ** 2 - 1))
def lorentz_scalar_product(x, y):
# BD, BD -> B
m = x * y
result = m[:, 1:].sum(dim=1) - m[:, 0]
return result
def tangent_norm(x):
# BD -> B
return torch.sqrt(lorentz_scalar_product(x, x))
def exp_map(x, v):
# BD, BD -> BD
tn = tangent_norm(v).unsqueeze(dim=1)
tn_expand = tn.repeat(1, x.size()[-1])
result = torch.cosh(tn) * x + torch.sinh(tn) * (v / tn)
result = torch.where(tn_expand > 0, result, x) # only update if tangent norm is > 0
return result
def set_dim0(x):
x = torch.renorm(x, p=2, dim=0, maxnorm=1e2) # otherwise leaves will explode
# NOTE: the paper does not mention the square part of the equation but if
# you try to derive it you get a square term in the equation
dim0 = torch.sqrt(1 + (x[:, 1:] ** 2).sum(dim=1))
x[:, 0] = dim0
return x
# ========================= models
class RSGD(optim.Optimizer):
def __init__(self, params, learning_rate=None):
learning_rate = learning_rate if learning_rate is not None else 0.01
defaults = {"learning_rate": learning_rate}
super().__init__(params, defaults=defaults)
def step(self):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
B, D = p.size()
gl = torch.eye(D, device=p.device, dtype=p.dtype)
gl[0, 0] = -1
grad_norm = torch.norm(p.grad.data)
grad_norm = torch.where(grad_norm > 1, grad_norm, torch.tensor(1.0))
# only normalize if global grad_norm is more than 1
h = (p.grad.data / grad_norm) @ gl
proj = (
h
- (
lorentz_scalar_product(p, h) / lorentz_scalar_product(p, p)
).unsqueeze(1)
* p
)
# print(p, lorentz_scalar_product(p, p))
update = exp_map(p, -group["learning_rate"] * proj)
is_nan_inf = torch.isnan(update) | torch.isinf(update)
update = torch.where(is_nan_inf, p, update)
update[0, :] = p[0, :] # no ❤️ for embedding
update = set_dim0(update)
p.data.copy_(update)
class Lorentz(nn.Module):
"""
This will embed `n_items` in a `dim` dimensional lorentz space.
"""
def __init__(self, n_items, dim, init_range=0.001):
super().__init__()
self.n_items = n_items
self.dim = dim
self.table = nn.Embedding(n_items + 1, dim, padding_idx=0)
nn.init.uniform_(self.table.weight, -init_range, init_range)
# equation 6
with torch.no_grad():
self.table.weight[0] = 5 # padding idx push it to corner
set_dim0(self.table.weight)
def forward(self, I, Ks):
"""
Using the pairwise similarity matrix, generate the following inputs and
provide to this function.
Inputs:
- I : - long tensor
- size (B,)
- This denotes the `i` used in all equations.
- Ks : - long tensor
- size (B, N)
- This denotes at max `N` documents which come from the
nearest neighbor sample.
- The `j` document must be the first of the N indices.
This is used to calculate the losses
Return:
- size (B,)
- Ranking loss calculated using
document to the given `i` document.
"""
n_ks = Ks.size()[1]
ui = torch.stack([self.table(I)] * n_ks, dim=1)
uks = self.table(Ks)
# ---------- reshape for calculation
B, N, D = ui.size()
ui = ui.reshape(B * N, D)
uks = uks.reshape(B * N, D)
dists = -lorentz_scalar_product(ui, uks)
dists = torch.where(dists <= 1, torch.ones_like(dists) + 1e-6, dists)
# sometimes 2 embedding can come very close in R^D.
# when calculating the lorenrz inner product,
# -1 can become -0.99(no idea!), then arcosh will become nan
dists = -arcosh(dists)
# print(dists)
# ---------- turn back to per-sample shape
dists = dists.reshape(B, N)
loss = -(dists[:, 0] - torch.log(torch.exp(dists).sum(dim=1) + 1e-6))
return loss
def lorentz_to_poincare(self):
table = self.table.weight.data.numpy()
return table[:, 1:] / (
table[:, :1] + 1
) # diffeomorphism transform to poincare ball
def get_lorentz_table(self):
return self.table.weight.data.numpy()
def _test_table(self):
x = self.table.weight.data
check = lorentz_scalar_product(x, x) + 1.0
return check.numpy().sum()
class Graph(Dataset):
def __init__(self, pairwise_matrix, sample_size=10):
self.pairwise_matrix = pairwise_matrix
self.n_items = len(pairwise_matrix)
self.sample_size = sample_size
self.arange = np.arange(0, self.n_items)
def __len__(self):
return self.n_items
def __getitem__(self, i):
I = torch.Tensor([i + 1]).squeeze().long()
has_child = (self.pairwise_matrix[i] > 0).sum()
has_parent = (self.pairwise_matrix[:, i] > 0).sum()
arange = np.random.permutation(self.arange)
if has_parent: # if no child go for parent
for j in arange:
if self.pairwise_matrix[j, i] > 0: # assuming no disconneted nodes
min = self.pairwise_matrix[j, i]
break
elif has_child:
for j in arange:
if self.pairwise_matrix[i, j] > 0: # assuming no self loop
min = self.pairwise_matrix[i, j]
break
else:
raise Exception(f"Node {i} has no parent and no child")
arange = np.random.permutation(self.arange)
if has_child:
indices = [x for x in arange if i != x and self.pairwise_matrix[i, x] < min]
else:
indices = [x for x in arange if i != x and self.pairwise_matrix[x, i] < min]
indices = indices[: self.sample_size]
Ks = ([i + 1 for i in [j] + indices] + [0] * self.sample_size)[
: self.sample_size
]
# print(I, Ks)
return I, torch.Tensor(Ks).long()
def recon(table, pair_mat):
"Reconstruction accuracy"
count = 0
table = torch.tensor(table[1:])
for i in range(1, len(pair_mat)): # 0 padding, 1 root, we leave those two
x = table[i].repeat(len(table)).reshape([len(table), len(table[i])]) # N, D
mask = torch.tensor([0.0] * len(table))
mask[i] = 1
mask = mask * -10000.0
dists = lorentz_scalar_product(x, table) + mask
dists = (
dists.numpy()
) # arccosh is monotonically increasing, so no need of that here
# and no -dist also, as acosh in m i, -acosh(-l(x,y)) is nothing but l(x,y)
# print(dists)
predicted_parent = np.argmax(dists)
actual_parent = np.argmax(pair_mat[:, i])
# print(predicted_parent, actual_parent, i, end="\n\n")
count += actual_parent == predicted_parent
count = count / (len(pair_mat) - 1) * 100
return count
_moon_count = 0
def _moon(loss, phases="🌕🌖🌗🌘🌑🌒🌓🌔"):
global _moon_count
_moon_count += 1
p = phases[_moon_count % 8]
return f"{p} Loss: {float(loss)}"
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset", help="File:pairwise_matrix")
parser.add_argument(
"-sample_size", help="How many samples in the N matrix", default=5, type=int
)
parser.add_argument(
"-batch_size", help="How many samples in the batch", default=32, type=int
)
parser.add_argument(
"-burn_c",
help="Divide learning rate by this for the burn epochs",
default=10,
type=int,
)
parser.add_argument(
"-burn_epochs",
help="How many epochs to run the burn phase for?",
default=100,
type=int,
)
parser.add_argument(
"-plot", help="Plot the embeddings", default=False, action="store_true"
)
parser.add_argument("-plot_size", help="Size of the plot", default=3, type=int)
parser.add_argument(
"-plot_graph",
help="Plot the Graph associated with the embeddings",
default=False,
action="store_true",
)
parser.add_argument(
"-overwrite_plots",
help="Overwrite the plots?",
default=False,
action="store_true",
)
parser.add_argument(
"-ckpt", help="Which checkpoint to use?", default=None, type=str
)
parser.add_argument(
"-shuffle", help="Shuffle within batch while learning?", default=True, type=bool
)
parser.add_argument(
"-epochs", help="How many epochs to optimize for?", default=1_000_000, type=int
)
parser.add_argument(
"-poincare_dim",
help="Poincare projection time. Lorentz will be + 1",
default=2,
type=int,
)
parser.add_argument(
"-n_items", help="How many items to embed?", default=None, type=int
)
parser.add_argument(
"-learning_rate", help="RSGD learning rate", default=0.1, type=float
)
parser.add_argument(
"-log_step", help="Log at what multiple of epochs?", default=1, type=int
)
parser.add_argument(
"-logdir", help="What folder to put logs in", default="runs", type=str
)
parser.add_argument(
"-save_step", help="Save at what multiple of epochs?", default=100, type=int
)
parser.add_argument(
"-savedir", help="What folder to put checkpoints in", default="ckpt", type=str
)
parser.add_argument(
"-loader_workers",
help="How many workers to generate tensors",
default=4,
type=int,
)
args = parser.parse_args()
# ----------------------------------- get the correct matrix
if not os.path.exists(args.logdir):
os.mkdir(args.logdir)
if not os.path.exists(args.savedir):
os.mkdir(args.savedir)
exec(f"from datasets import {args.dataset} as pairwise")
pairwise = pairwise[: args.n_items, : args.n_items]
args.n_items = len(pairwise) if args.n_items is None else args.n_items
print(f"{args.n_items} being embedded")
# ---------------------------------- Generate the proper objects
net = Lorentz(
args.n_items, args.poincare_dim + 1
) # as the paper follows R^(n+1) for this space
if args.plot:
if args.poincare_dim != 2:
print("Only embeddings with `-poincare_dim` = 2 are supported for now.")
sys.exit(1)
if args.ckpt is None:
print("Please provide `-ckpt` when using `-plot`")
sys.exit(1)
if os.path.isdir(args.ckpt):
paths = [
os.path.join(args.ckpt, c)
for c in os.listdir(args.ckpt)
if c.endswith("ckpt")
]
else:
paths = [args.ckpt]
paths = list(sorted(paths))
edges = [
tuple(edge)
for edge in set(
[
frozenset((a + 1, b + 1))
for a, row in enumerate(pairwise > 0)
for b, is_non_zero in enumerate(row)
if is_non_zero
]
)
]
print(len(edges), "nodes")
internal_nodes = set(
node
for node, count in Counter(
[node for edge in edges for node in edge]
).items()
if count > 1
)
edges = np.array([edge for edge in edges if edge[1] in internal_nodes])
print(len(edges), "internal nodes")
for path in tqdm(paths, desc="Plotting"):
save_path = f"{path}.svg"
if os.path.exists(save_path) and not args.overwrite_plots:
continue
net.load_state_dict(torch.load(path))
table = net.lorentz_to_poincare()
# skip padding. plot x y
plt.figure(figsize=(7, 7))
if args.plot_graph:
for edge in edges:
plt.plot(
table[edge, 0],
table[edge, 1],
color="black",
marker="o",
alpha=0.5,
)
else:
plt.scatter(table[1:, 0], table[1:, 1])
plt.title(path)
plt.gca().set_xlim(-1, 1)
plt.gca().set_ylim(-1, 1)
plt.gca().add_artist(plt.Circle((0, 0), 1, fill=False, edgecolor="black"))
plt.savefig(save_path)
plt.close()
sys.exit(0)
dataloader = DataLoader(
Graph(pairwise, args.sample_size),
shuffle=args.shuffle,
batch_size=args.batch_size,
num_workers=args.loader_workers,
)
rsgd = RSGD(net.parameters(), learning_rate=args.learning_rate)
name = f"{args.dataset} {datetime.utcnow()}"
writer = SummaryWriter(f"{args.logdir}/{name}")
with tqdm(ncols=80, mininterval=0.2) as epoch_bar:
for epoch in range(args.epochs):
rsgd.learning_rate = (
args.learning_rate / args.burn_c
if epoch < args.burn_epochs
else args.learning_rate
)
for I, Ks in dataloader:
rsgd.zero_grad()
loss = net(I, Ks).mean()
loss.backward()
rsgd.step()
writer.add_scalar("loss", loss, epoch)
writer.add_scalar(
"recon_preform", recon(net.get_lorentz_table(), pairwise), epoch
)
writer.add_scalar("table_test", net._test_table(), epoch)
if epoch % args.save_step == 0:
torch.save(net.state_dict(), f"{args.savedir}/{epoch} {name}.ckpt")
epoch_bar.set_description(
f"🔥 Burn phase loss: {float(loss)}"
if epoch < args.burn_epochs
else _moon(loss)
)
epoch_bar.update(1)
| 4,536 | 7 | 425 |
27881eb6fa948140af012299c81579cdcda82f22 | 519 | py | Python | Python/retest.py | yutakakn/MyScript | cb40e0077fdce1d64f658227952ae1654e7510ae | [
"BSD-3-Clause"
] | null | null | null | Python/retest.py | yutakakn/MyScript | cb40e0077fdce1d64f658227952ae1654e7510ae | [
"BSD-3-Clause"
] | null | null | null | Python/retest.py | yutakakn/MyScript | cb40e0077fdce1d64f658227952ae1654e7510ae | [
"BSD-3-Clause"
] | 2 | 2017-07-19T15:26:19.000Z | 2017-07-19T15:50:59.000Z | # coding: utf-8
#
# Pythonでの正規表現サンプル
#
# Update: 2018/3/21
#
import re
# 検索対象のテキスト
textdata = '''
Welcome to Infra workshop!
Kusotsui
Hikariare
Misogi
Misogi999
Misogi9999
'''.strip()
# print m.groups(0)
# メインルーチン
if __name__ == "__main__":
main()
| 12.658537 | 34 | 0.635838 | # coding: utf-8
#
# Pythonでの正規表現サンプル
#
# Update: 2018/3/21
#
import re
# 検索対象のテキスト
textdata = '''
Welcome to Infra workshop!
Kusotsui
Hikariare
Misogi
Misogi999
Misogi9999
'''.strip()
def displaymatch(m):
if (m is None):
print('No match')
else:
print('Match: %r, groups=%r'
% (m.group(), m.groups())
)
# print m.groups(0)
# メインルーチン
def main():
# print textdata
# Misogi999の999を探す
rep = re.compile(r"[Mm].*?(\d+)")
m = rep.search(textdata)
displaymatch(m)
if __name__ == "__main__":
main()
| 223 | 0 | 46 |
f8d3247de75c172564bcdc54e2ae4384a81a46f1 | 91 | py | Python | benefits/apps.py | linikerunk/tcc-people-analytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | null | null | null | benefits/apps.py | linikerunk/tcc-people-analytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | 1 | 2020-10-11T10:09:39.000Z | 2020-10-11T10:09:39.000Z | benefits/apps.py | linikerunk/TCC_PeopleAnalytics | fdda975682d5299c8384e31ebb974dc085330875 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.166667 | 33 | 0.758242 | from django.apps import AppConfig
class BenefitsConfig(AppConfig):
name = 'benefits'
| 0 | 33 | 23 |
5ea1c26b7f60345f58e78ab7f9520a367c9920ad | 1,313 | py | Python | caching.py | kudep/df_addon_turn_caching | f70bba0a8c60a92fab836605431b2bd8a4f6bc83 | [
"Apache-2.0"
] | null | null | null | caching.py | kudep/df_addon_turn_caching | f70bba0a8c60a92fab836605431b2bd8a4f6bc83 | [
"Apache-2.0"
] | null | null | null | caching.py | kudep/df_addon_turn_caching | f70bba0a8c60a92fab836605431b2bd8a4f6bc83 | [
"Apache-2.0"
] | null | null | null | """
Caching
---------------------------
"""
import functools
from df_engine.core import Actor, Context
from df_engine.core.types import ActorStage
class OneTurnCache:
"""
Class that caches the information from the last turn.
"""
| 26.795918 | 79 | 0.620716 | """
Caching
---------------------------
"""
import functools
from df_engine.core import Actor, Context
from df_engine.core.types import ActorStage
class OneTurnCache:
"""
Class that caches the information from the last turn.
"""
def __init__(self):
self.wrappers = []
def update_actor_handlers(self, actor: Actor) -> Actor:
handlers = actor.handlers.get(ActorStage.CONTEXT_INIT, [])
handlers += [self.clear_cache_handler]
actor.handlers[ActorStage.CONTEXT_INIT] = handlers
return actor
def clear_cache_handler(self, ctx: Context, actor: Actor, *args, **kwargs):
[wrapper.cache_clear() for wrapper in self.wrappers]
def cache(self, func):
@functools.cache
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
self.wrappers += [wrapper]
return wrapper
def lru_cache(self, maxsize=128, typed=False):
_maxsize = 128 if callable(maxsize) else maxsize
def decorator(func):
@functools.lru_cache(maxsize=_maxsize, typed=typed)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
self.wrappers += [wrapper]
return wrapper
return decorator(maxsize) if callable(maxsize) else decorator
| 933 | 0 | 135 |
85d5dd82fb31e2230af5808895e2ef4990361f83 | 4,854 | py | Python | multiple_pattern_durations/code/filter_results.py | INM-6/SPADE_applications | 1633907a5c8c295f758a07fe4d594efbfa94155e | [
"BSD-3-Clause"
] | 2 | 2020-04-10T08:47:32.000Z | 2020-10-10T01:30:18.000Z | multiple_pattern_durations/code/filter_results.py | INM-6/SPADE_applications | 1633907a5c8c295f758a07fe4d594efbfa94155e | [
"BSD-3-Clause"
] | 1 | 2020-10-10T10:08:43.000Z | 2021-02-05T06:57:11.000Z | multiple_pattern_durations/code/filter_results.py | INM-6/SPADE_applications | 1633907a5c8c295f758a07fe4d594efbfa94155e | [
"BSD-3-Clause"
] | 2 | 2020-10-10T05:21:28.000Z | 2021-08-25T09:56:50.000Z | import numpy as np
import elephant.spade as spade
import argparse
import yaml
from yaml import Loader
# Function to filter patterns when the output format of spade function
# is 'patterns'
def _pattern_spectrum_filter(patterns, ns_signature, spectrum, winlen):
"""
Filter to select concept which signature is significant
"""
if spectrum == '3d#':
keep_concept = patterns['signature'] + tuple([max(
np.abs(np.diff(np.array(patterns['lags']) % winlen)))]) \
not in ns_signature
else:
keep_concept = patterns['signature'] not in ns_signature
return keep_concept
if __name__ == '__main__':
# Load parameters dictionary
param_dict = np.load('../data/art_data.npy',
encoding='latin1').item()['params']
lengths = param_dict['lengths']
binsize = param_dict['binsize']
winlens = [int(l/binsize)+1 for l in lengths]
print(winlens)
# Filtering parameters
# Load general parameters
with open("configfile.yaml", 'r') as stream:
config = yaml.load(stream, Loader=Loader)
alpha = config['alpha']
psr_param = config['psr_param']
correction = config['correction']
min_occ = config['min_occ']
# Passing spectrum parameter
parser = argparse.ArgumentParser(description='Compute spade on artificial data'
' for the given winlen and '
'spectrum parameters')
parser.add_argument('spectrum', metavar='spectrum', type=str,
help='spectrum parameter of the spade function')
parser.add_argument('winlen', metavar='winlen', type=int,
help='winlen parameter of the spade function')
args = parser.parse_args()
spectrum = args.spectrum
winlen = args.winlen
# Filtering parameters for the different window length
# Loading result
res_spade, params = \
np.load('../results/{}/winlen{}/art_data_results.npy'.format(spectrum,
winlen),
encoding='latin1')
concepts = res_spade['patterns']
pval_spectrum = res_spade['pvalue_spectrum']
# SPADE parameters
spectrum = params['spectrum']
min_spikes = params['min_spikes']
n_surr = params['n_surr']
# PSF filtering
if len(pval_spectrum) == 0:
ns_sgnt = []
else:
# Computing non-significant entries of the spectrum applying
# the statistical correction
ns_sgnt = spade.test_signature_significance(
pval_spectrum, alpha, corr=correction, report='e',
spectrum=spectrum)
concepts_psf = list(filter(
lambda c: spade._pattern_spectrum_filter(
c, ns_sgnt, spectrum, winlen), concepts))
print('Winlen:', winlen)
print('Non significant signatures:', sorted(ns_sgnt))
print('Number of significant patterns before psr:', len(concepts_psf))
# PSR filtering
# Decide whether filter the concepts using psr
if psr_param is not None:
# Filter using conditional tests (psr)
if 0 < alpha < 1 and n_surr > 0:
concepts_psr = spade.pattern_set_reduction(concepts_psf, ns_sgnt,
winlen=winlen,
h=psr_param[0],
k=psr_param[1],
l=psr_param[2],
min_spikes=min_spikes,
min_occ=min_occ)
else:
concepts_psr = spade.pattern_set_reduction(concepts_psf, [],
winlen=winlen,
h=psr_param[0],
k=psr_param[1],
l=psr_param[2],
min_spikes=min_spikes,
min_occ=min_occ)
patterns = spade.concept_output_to_patterns(
concepts_psr, winlen, binsize, pval_spectrum)
else:
patterns = spade.concept_output_to_patterns(
concepts_psf, winlen, binsize, pval_spectrum)
print('Number of significant patterns after psr:', len(concepts_psf))
# Storing filtered results
params['alpha'] = alpha
params['psr_param'] = psr_param
params['correction'] = correction
params['min_occ'] = min_occ
np.save(
'../results/{}/winlen{}/filtered_patterns.npy'.format(
spectrum, winlen), [patterns, pval_spectrum, ns_sgnt, params])
| 42.578947 | 83 | 0.54965 | import numpy as np
import elephant.spade as spade
import argparse
import yaml
from yaml import Loader
# Function to filter patterns when the output format of spade function
# is 'patterns'
def _pattern_spectrum_filter(patterns, ns_signature, spectrum, winlen):
"""
Filter to select concept which signature is significant
"""
if spectrum == '3d#':
keep_concept = patterns['signature'] + tuple([max(
np.abs(np.diff(np.array(patterns['lags']) % winlen)))]) \
not in ns_signature
else:
keep_concept = patterns['signature'] not in ns_signature
return keep_concept
if __name__ == '__main__':
# Load parameters dictionary
param_dict = np.load('../data/art_data.npy',
encoding='latin1').item()['params']
lengths = param_dict['lengths']
binsize = param_dict['binsize']
winlens = [int(l/binsize)+1 for l in lengths]
print(winlens)
# Filtering parameters
# Load general parameters
with open("configfile.yaml", 'r') as stream:
config = yaml.load(stream, Loader=Loader)
alpha = config['alpha']
psr_param = config['psr_param']
correction = config['correction']
min_occ = config['min_occ']
# Passing spectrum parameter
parser = argparse.ArgumentParser(description='Compute spade on artificial data'
' for the given winlen and '
'spectrum parameters')
parser.add_argument('spectrum', metavar='spectrum', type=str,
help='spectrum parameter of the spade function')
parser.add_argument('winlen', metavar='winlen', type=int,
help='winlen parameter of the spade function')
args = parser.parse_args()
spectrum = args.spectrum
winlen = args.winlen
# Filtering parameters for the different window length
# Loading result
res_spade, params = \
np.load('../results/{}/winlen{}/art_data_results.npy'.format(spectrum,
winlen),
encoding='latin1')
concepts = res_spade['patterns']
pval_spectrum = res_spade['pvalue_spectrum']
# SPADE parameters
spectrum = params['spectrum']
min_spikes = params['min_spikes']
n_surr = params['n_surr']
# PSF filtering
if len(pval_spectrum) == 0:
ns_sgnt = []
else:
# Computing non-significant entries of the spectrum applying
# the statistical correction
ns_sgnt = spade.test_signature_significance(
pval_spectrum, alpha, corr=correction, report='e',
spectrum=spectrum)
concepts_psf = list(filter(
lambda c: spade._pattern_spectrum_filter(
c, ns_sgnt, spectrum, winlen), concepts))
print('Winlen:', winlen)
print('Non significant signatures:', sorted(ns_sgnt))
print('Number of significant patterns before psr:', len(concepts_psf))
# PSR filtering
# Decide whether filter the concepts using psr
if psr_param is not None:
# Filter using conditional tests (psr)
if 0 < alpha < 1 and n_surr > 0:
concepts_psr = spade.pattern_set_reduction(concepts_psf, ns_sgnt,
winlen=winlen,
h=psr_param[0],
k=psr_param[1],
l=psr_param[2],
min_spikes=min_spikes,
min_occ=min_occ)
else:
concepts_psr = spade.pattern_set_reduction(concepts_psf, [],
winlen=winlen,
h=psr_param[0],
k=psr_param[1],
l=psr_param[2],
min_spikes=min_spikes,
min_occ=min_occ)
patterns = spade.concept_output_to_patterns(
concepts_psr, winlen, binsize, pval_spectrum)
else:
patterns = spade.concept_output_to_patterns(
concepts_psf, winlen, binsize, pval_spectrum)
print('Number of significant patterns after psr:', len(concepts_psf))
# Storing filtered results
params['alpha'] = alpha
params['psr_param'] = psr_param
params['correction'] = correction
params['min_occ'] = min_occ
np.save(
'../results/{}/winlen{}/filtered_patterns.npy'.format(
spectrum, winlen), [patterns, pval_spectrum, ns_sgnt, params])
| 0 | 0 | 0 |
ed71b3a65a656d7d6424d6da1dcc78046962b2fd | 4,635 | py | Python | OAparser.py | mickolaine/OAstats | 6d8f8677fb9f807cfcd4f18f894d983c872983bf | [
"Unlicense"
] | null | null | null | OAparser.py | mickolaine/OAstats | 6d8f8677fb9f807cfcd4f18f894d983c872983bf | [
"Unlicense"
] | null | null | null | OAparser.py | mickolaine/OAstats | 6d8f8677fb9f807cfcd4f18f894d983c872983bf | [
"Unlicense"
] | null | null | null | """
Parser handles reading and interpreting the OA logfile
"""
from player import Player | 30.493421 | 142 | 0.480475 | """
Parser handles reading and interpreting the OA logfile
"""
from player import Player
class Parser:
def __init__(self, logfile):
self.logfile = logfile
self.loglist = []
self.log = []
self.players = {}
self.open()
self.get_timecodes()
self.get_commands()
self.get_players()
self.get_kills()
self.results()
self.guns = {
1: "Shotgun",
3: "Machinegun",
7: "",
8: "",
10: "Railgun",
20: "Suicide",
}
def open(self):
try:
f = open(self.logfile)
for line in f:
self.loglist.append(line)
except IOError:
print("File", self.logfile, "not found!")
def get_timecodes(self):
for i in self.loglist:
self.log.append(i[7:].strip())
def get_commands(self):
#remove = []
temp = []
for i in self.log:
#print(i)
try:
temp.append(i.split(":", 1))
except IndexError:
print(i)
pass
print("Commands extracted")
#for i in remove:
# self.log.pop(i)
self.log = temp
def get_players(self):
print("Getting players from log size of", len(self.log))
for i in self.log:
if i[0] == "ClientUserinfoChanged":
temp = i[1].split("n\\", 1)
#print("Found", temp[1].split("\\"))
playernumber = int(temp[0].strip())
if playernumber > 30: continue
self.players[playernumber] = Player(number=playernumber)
self.extract_player_data(self.players[playernumber], temp[1].split("\\"))
#for i in self.players:
# print(self.players[i].number, self.players[i].name)
#print(self.players)
def get_kills(self):
for i in self.log:
if i[0] == "Kill":
data = i[1].split(":", 1)[0].split()
if int(data[0]) > 30:
if int(data[0]) == 1022:
pass
else:
print(data[0], "omitted")
continue
if self.players[int(data[0])].team == self.players[int(data[1])].team:
team = True
else: team = False
self.players[int(data[0])].addkill(int(data[1]), team)
self.players[int(data[1])].adddeath(int(data[0]))
self.players[int(data[0])].weapons(int(data[2]))
def results(self):
toplist = {}
for i in self.players:
toplist[self.players[i].number] = self.players[i].killnumber
a = sorted(toplist, key=toplist.get, reverse=True)
maxlength = 0
for i in a:
if maxlength < len(self.players[i].name): maxlength = len(self.players[i].name)
print("The results are ready:\n")
print(f"Player {(maxlength - 5)*' '} Kills Most Killed Fav Gun Teamkills")
for i in a:
mostkilled_number = self.players[i].mostkilled()
if mostkilled_number != -1:
mostkilled = self.players[mostkilled_number].name
else:
mostkilled = "Ei tappoja"
favgun = self.favgun_name(self.players[i].favgun())
teamkills = self.players[i].teamkills
space1 = (maxlength - len(self.players[i].name) + 2)*" "
space2 = (7 - len(str(self.players[i].killnumber)))*" "
space3 = (maxlength - len(mostkilled))*" "
space4 = (25 - len(str(favgun)))*" "
print(f"{self.players[i].name}{space1} {self.players[i].killnumber}{space2} {mostkilled} {space3} {favgun}{space4} {teamkills}")
@staticmethod
def extract_player_data(player, data):
player.data = data
player.name = data[0]
player.team = data[2]
@staticmethod
def favgun_name(number):
gunlist = {
-1: "None",
1: "Shotgun",
2: "Gauntlet",
3: "Machinegun",
4: "Grenade launcher",
5: "Grenade splash",
6: "Rocket launcher",
7: "Rocket launcher splash",
8: "Plasma gun",
9: "9",
10: "Railgun",
18: "Telefrag",
19: "Falling",
20: "Suicide",
22: "Mod_Trigger_Hurt"
}
return gunlist[number] | 4,218 | 305 | 23 |
c1741bf2d02de78900fad92c0710ae53ffbbf44b | 528 | py | Python | cla_backend/apps/legalaid/migrations/0009_remove_case_old_eod_details.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 3 | 2019-10-02T15:31:03.000Z | 2022-01-13T10:15:53.000Z | cla_backend/apps/legalaid/migrations/0009_remove_case_old_eod_details.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 206 | 2015-01-02T16:50:11.000Z | 2022-02-16T20:16:05.000Z | cla_backend/apps/legalaid/migrations/0009_remove_case_old_eod_details.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 6 | 2015-03-23T23:08:42.000Z | 2022-02-15T17:04:44.000Z | # coding=utf-8
from __future__ import unicode_literals
from django.db import models, migrations
| 26.4 | 87 | 0.660985 | # coding=utf-8
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("legalaid", "0008_eod_data_migration")]
operations = [
migrations.RemoveField(model_name="case", name="old_eod_details"),
migrations.AlterField(
model_name="eoddetails",
name="case",
field=models.OneToOneField(related_name="eod_details", to="legalaid.Case"),
preserve_default=True,
),
]
| 0 | 407 | 23 |
edc0c4f5c8e1beb9abd5438d714fc99b83402b63 | 702 | py | Python | tests/internet_search_demo.py | xiaobuguilaile/service-oriented-chatbot | 3966b4fa1c2ee9bf5544b11a0241df9f3fa6ec2a | [
"MIT"
] | null | null | null | tests/internet_search_demo.py | xiaobuguilaile/service-oriented-chatbot | 3966b4fa1c2ee9bf5544b11a0241df9f3fa6ec2a | [
"MIT"
] | null | null | null | tests/internet_search_demo.py | xiaobuguilaile/service-oriented-chatbot | 3966b4fa1c2ee9bf5544b11a0241df9f3fa6ec2a | [
"MIT"
] | null | null | null | # -*-coding:utf-8 -*-
'''
@File : internet_search_demo.py
@Author : HW Shen
@Date : 2020/5/26
@Desc :
'''
from ServiceOrientedChatbot.search_dialog import SearchEngine
from ServiceOrientedChatbot.utils import logger
if __name__ == '__main__':
engine = SearchEngine()
logger.debug(engine.search("北京今天天气如何?"))
logger.debug(engine.search("上海呢?"))
# logger.debug(engine.search("武汉呢?"))
# logger.debug(engine.search("武汉明天呢?"))
#
# ans = engine.search("貂蝉是谁")
# logger.debug(ans)
# ans = engine.search("西施是谁")
# logger.debug(ans)
# ans = engine.search("你知道我是谁")
# logger.debug(ans)
context = engine.contents
print(context)
| 21.9375 | 61 | 0.632479 | # -*-coding:utf-8 -*-
'''
@File : internet_search_demo.py
@Author : HW Shen
@Date : 2020/5/26
@Desc :
'''
from ServiceOrientedChatbot.search_dialog import SearchEngine
from ServiceOrientedChatbot.utils import logger
if __name__ == '__main__':
engine = SearchEngine()
logger.debug(engine.search("北京今天天气如何?"))
logger.debug(engine.search("上海呢?"))
# logger.debug(engine.search("武汉呢?"))
# logger.debug(engine.search("武汉明天呢?"))
#
# ans = engine.search("貂蝉是谁")
# logger.debug(ans)
# ans = engine.search("西施是谁")
# logger.debug(ans)
# ans = engine.search("你知道我是谁")
# logger.debug(ans)
context = engine.contents
print(context)
| 0 | 0 | 0 |
ba66223be12038ddd3602f3150abab92c7d6c0c4 | 5,502 | py | Python | openmdao/devtools/iprof_utils.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | openmdao/devtools/iprof_utils.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | openmdao/devtools/iprof_utils.py | hwangjt/blue | 609defbe476c86a4a2eddd12977b47e649ea7f50 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import sys
import ast
from inspect import getmembers
from fnmatch import fnmatchcase
from collections import defaultdict
from openmdao.core.system import System
from openmdao.core.problem import Problem
from openmdao.core.driver import Driver
from openmdao.solvers.solver import Solver
from openmdao.jacobians.jacobian import Jacobian
from openmdao.matrices.matrix import Matrix
from openmdao.vectors.vector import Vector, Transfer
class FunctionFinder(ast.NodeVisitor):
"""
This class locates all of the functions and methods in a file and associates any
method with its corresponding class.
"""
def find_qualified_name(filename, line, cache, full=True):
"""
Determine full function name (class.method) or function for unbound functions.
Parameters
----------
filename : str
Name of file containing source code.
line : int
Line number within the give file.
cache : dict
A dictionary containing infomation by filename.
full : bool
If True, assemble the full name else return the parts
Returns
-------
str or None
Fully qualified function/method name or None.
"""
if filename not in cache:
fcache = {}
with open(filename, 'Ur') as f:
contents = f.read()
if len(contents) > 0 and contents[-1] != '\n':
contents += '\n'
FunctionFinder(filename, fcache).visit(ast.parse(contents, filename))
cache[filename] = fcache
if full:
parts = cache[filename][line]
if parts[0]:
return '.'.join((parts[0], parts[2]))
else:
return '.'.join((parts[1], parts[2]))
return cache[filename][line]
# This maps a simple identifier to a group of classes and corresponding
# glob patterns for each class.
func_group = {
'openmdao': [
("*", (System, Jacobian, Matrix, Solver, Driver, Problem)),
],
'openmdao_all': [
("*", (System, Vector, Transfer, Jacobian, Matrix, Solver, Driver, Problem)),
],
'setup': [
("*setup*", (System, Solver, Driver, Problem)),
],
'dataflow': [
('*compute*', (System,)),
('*linear*', (System,)),
('*', (Transfer,)),
],
'linear': [
('*linear*', (System,)),
('*solve*', (Solver,)),
]
}
try:
from mpi4py import MPI
from petsc4py import PETSc
from openmdao.vectors.petsc_vector import PETScVector, PETScTransfer
#TODO: this needs work. Still lots of MPI calls not covered here...
func_group['mpi'] = [
('*', (PETScTransfer,)),
('get_norm', (PETScVector,)),
('_initialize_data', (PETScVector,))
]
except ImportError:
pass
def _collect_methods(method_patterns):
"""
Iterate over a dict of method name patterns mapped to classes. Search
through the classes for anything that matches and return a dict of
exact name matches and their corresponding classes.
Parameters
----------
method_patterns : [(pattern1, (class1, class2, ... class_n)), ... (pattern_n, (class_n1, class_n2, ...)]
List of tuples of glob patterns and lists of classes used for isinstance checks
Returns
-------
defaultdict
Dict of method names and tuples of all classes that matched for that method. Default value
of the dict is a class that matches nothing
"""
matches = defaultdict(list)
# TODO: update this to also work with stand-alone functions
for pattern, classes in method_patterns:
for class_ in classes:
for name, obj in getmembers(class_):
if callable(obj) and (pattern == '*' or fnmatchcase(name, pattern)):
matches[name].append(class_)
# convert values to tuples so we can use in isinstance call
for name in matches:
lst = matches[name]
if len(lst) == 1:
matches[name] = lst[0]
else:
matches[name] = tuple(matches[name])
return matches
def _create_profile_callback(stack, matches, do_call=None, do_ret=None, context=None):
"""
The wrapped function returned from here handles identification of matching calls when called
as a setprofile callback.
"""
return _wrapped
| 30.230769 | 108 | 0.614686 | from __future__ import print_function
import os
import sys
import ast
from inspect import getmembers
from fnmatch import fnmatchcase
from collections import defaultdict
from openmdao.core.system import System
from openmdao.core.problem import Problem
from openmdao.core.driver import Driver
from openmdao.solvers.solver import Solver
from openmdao.jacobians.jacobian import Jacobian
from openmdao.matrices.matrix import Matrix
from openmdao.vectors.vector import Vector, Transfer
class FunctionFinder(ast.NodeVisitor):
"""
This class locates all of the functions and methods in a file and associates any
method with its corresponding class.
"""
def __init__(self, fname, cache):
ast.NodeVisitor.__init__(self)
self.fname = fname
self.cache = cache
self.class_stack = []
def visit_ClassDef(self, node):
self.class_stack.append(node.name)
for bnode in node.body:
self.visit(bnode)
self.class_stack.pop()
def visit_FunctionDef(self, node):
if self.class_stack:
qual = (None, '.'.join(self.class_stack), node.name)
else:
qual = ("<%s:%d>" % (self.fname, node.lineno), None, node.name)
self.cache[node.lineno] = qual
def find_qualified_name(filename, line, cache, full=True):
"""
Determine full function name (class.method) or function for unbound functions.
Parameters
----------
filename : str
Name of file containing source code.
line : int
Line number within the give file.
cache : dict
A dictionary containing infomation by filename.
full : bool
If True, assemble the full name else return the parts
Returns
-------
str or None
Fully qualified function/method name or None.
"""
if filename not in cache:
fcache = {}
with open(filename, 'Ur') as f:
contents = f.read()
if len(contents) > 0 and contents[-1] != '\n':
contents += '\n'
FunctionFinder(filename, fcache).visit(ast.parse(contents, filename))
cache[filename] = fcache
if full:
parts = cache[filename][line]
if parts[0]:
return '.'.join((parts[0], parts[2]))
else:
return '.'.join((parts[1], parts[2]))
return cache[filename][line]
# This maps a simple identifier to a group of classes and corresponding
# glob patterns for each class.
func_group = {
'openmdao': [
("*", (System, Jacobian, Matrix, Solver, Driver, Problem)),
],
'openmdao_all': [
("*", (System, Vector, Transfer, Jacobian, Matrix, Solver, Driver, Problem)),
],
'setup': [
("*setup*", (System, Solver, Driver, Problem)),
],
'dataflow': [
('*compute*', (System,)),
('*linear*', (System,)),
('*', (Transfer,)),
],
'linear': [
('*linear*', (System,)),
('*solve*', (Solver,)),
]
}
try:
from mpi4py import MPI
from petsc4py import PETSc
from openmdao.vectors.petsc_vector import PETScVector, PETScTransfer
#TODO: this needs work. Still lots of MPI calls not covered here...
func_group['mpi'] = [
('*', (PETScTransfer,)),
('get_norm', (PETScVector,)),
('_initialize_data', (PETScVector,))
]
except ImportError:
pass
def _collect_methods(method_patterns):
"""
Iterate over a dict of method name patterns mapped to classes. Search
through the classes for anything that matches and return a dict of
exact name matches and their corresponding classes.
Parameters
----------
method_patterns : [(pattern1, (class1, class2, ... class_n)), ... (pattern_n, (class_n1, class_n2, ...)]
List of tuples of glob patterns and lists of classes used for isinstance checks
Returns
-------
defaultdict
Dict of method names and tuples of all classes that matched for that method. Default value
of the dict is a class that matches nothing
"""
matches = defaultdict(list)
# TODO: update this to also work with stand-alone functions
for pattern, classes in method_patterns:
for class_ in classes:
for name, obj in getmembers(class_):
if callable(obj) and (pattern == '*' or fnmatchcase(name, pattern)):
matches[name].append(class_)
# convert values to tuples so we can use in isinstance call
for name in matches:
lst = matches[name]
if len(lst) == 1:
matches[name] = lst[0]
else:
matches[name] = tuple(matches[name])
return matches
def _create_profile_callback(stack, matches, do_call=None, do_ret=None, context=None):
"""
The wrapped function returned from here handles identification of matching calls when called
as a setprofile callback.
"""
def _wrapped(frame, event, arg):
if event == 'call':
if 'self' in frame.f_locals and frame.f_code.co_name in matches and \
isinstance(frame.f_locals['self'], matches[frame.f_code.co_name]):
stack.append(frame)
if do_call is not None:
return do_call(frame, arg, stack, context)
elif event == 'return' and stack:
if frame is stack[-1]:
if do_ret is not None:
do_ret(frame, arg, stack, context)
stack.pop()
return _wrapped
| 1,065 | 0 | 106 |
7539d6048376b2a8eb5311e0e53339fd000755eb | 1,261 | py | Python | tests/heap_profiler_test.py | ROCmSoftwarePlatform/jax | be34a14dc40384ac8876fad2b23b5e205ccfe22e | [
"Apache-2.0"
] | 7 | 2020-12-04T16:54:54.000Z | 2022-02-15T07:26:56.000Z | tests/heap_profiler_test.py | ROCmSoftwarePlatform/jax | be34a14dc40384ac8876fad2b23b5e205ccfe22e | [
"Apache-2.0"
] | 20 | 2021-08-17T20:31:56.000Z | 2022-03-31T11:56:24.000Z | tests/heap_profiler_test.py | ROCmSoftwarePlatform/jax | be34a14dc40384ac8876fad2b23b5e205ccfe22e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
import jax
import jax._src.lib.xla_bridge
from jax.config import config
import jax.test_util as jtu
config.parse_flags_with_absl()
# These tests simply test that the heap profiler API does not crash; they do
# not check functional correctness.
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 28.659091 | 78 | 0.750991 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest
import jax
import jax._src.lib.xla_bridge
from jax.config import config
import jax.test_util as jtu
config.parse_flags_with_absl()
class HeapProfilerTest(unittest.TestCase):
# These tests simply test that the heap profiler API does not crash; they do
# not check functional correctness.
def testBasics(self):
client = jax._src.lib.xla_bridge.get_backend()
_ = client.heap_profile()
a = jax.device_put(1)
_ = client.heap_profile()
# Heap profiler doesn't crash with deleted buffer
a.delete()
_ = client.heap_profile()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 238 | 21 | 48 |
538398ac392be8d56ff9a45cc270efad1b75ff36 | 1,378 | py | Python | HANDWRITING/model.py | MacuXavier/S1_ML_Practices | 4cd29c5537c95cfd940e13e741db8d363c57ab1d | [
"Apache-2.0"
] | null | null | null | HANDWRITING/model.py | MacuXavier/S1_ML_Practices | 4cd29c5537c95cfd940e13e741db8d363c57ab1d | [
"Apache-2.0"
] | null | null | null | HANDWRITING/model.py | MacuXavier/S1_ML_Practices | 4cd29c5537c95cfd940e13e741db8d363c57ab1d | [
"Apache-2.0"
] | null | null | null | '''
Author: MJ.XU
Date: 2021-11-29 17:16:33
LastEditTime: 2021-12-18 23:28:25
LastEditors: MJ.XU
Description: Tech4better
FilePath: \Tutorial-HandWriting-Cls-master\model.py
Personal URL: https://www.squirrelled.cn/
'''
# pytorch related packages
import torch
import torch.nn as nn
import torch.nn.functional as F
# Model Definition
| 29.956522 | 79 | 0.576923 | '''
Author: MJ.XU
Date: 2021-11-29 17:16:33
LastEditTime: 2021-12-18 23:28:25
LastEditors: MJ.XU
Description: Tech4better
FilePath: \Tutorial-HandWriting-Cls-master\model.py
Personal URL: https://www.squirrelled.cn/
'''
# pytorch related packages
import torch
import torch.nn as nn
import torch.nn.functional as F
# Model Definition
class Net(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=20, kernel_size=3, stride=1),
nn.BatchNorm2d(20),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(in_channels=20, out_channels=40, kernel_size=3,
stride=1),
nn.BatchNorm2d(40),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(in_channels=40, out_channels=80, kernel_size=3,
stride=1),
nn.BatchNorm2d(80),
nn.ReLU(),
nn.AdaptiveMaxPool2d((3, 3)),
)
self.fc1 = nn.Linear(80 * 3 * 3, 500)
self.dropout = nn.Dropout(p=0.5, inplace=True)
self.fc2 = nn.Linear(500, num_classes)
def forward(self, x):
x = self.conv(x)
x = x.view(-1, 80 * 3 * 3)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
| 968 | 0 | 75 |
867d8fefb40283c3ae65dc325f1f0f4d512cb133 | 3,481 | py | Python | src/data/Bert.py | VictorCallejas/FB_MMHM | 9a2d4a990a51811406bec9dc76dde612e5ec1c5c | [
"MIT"
] | 4 | 2020-11-19T18:52:23.000Z | 2022-01-05T02:36:14.000Z | src/data/Bert.py | VictorCallejas/FB_MMHM | 9a2d4a990a51811406bec9dc76dde612e5ec1c5c | [
"MIT"
] | null | null | null | src/data/Bert.py | VictorCallejas/FB_MMHM | 9a2d4a990a51811406bec9dc76dde612e5ec1c5c | [
"MIT"
] | 1 | 2020-11-20T08:35:31.000Z | 2020-11-20T08:35:31.000Z | import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from data.utils import get_Data, calculate_max_len, get_tokenized
from transformers import AutoTokenizer
from config import cfg, BertConfig
| 36.260417 | 116 | 0.673082 | import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from data.utils import get_Data, calculate_max_len, get_tokenized
from transformers import AutoTokenizer
from config import cfg, BertConfig
def getData(fold_idx):
train, dev, test = get_Data(fold_idx)
print('Data obtained')
train, dev, test = getDataset(train,dev,test)
sentences = []
sentences.extend(train.full.values)
if not cfg.final_train:
sentences.extend(dev.full.values)
sentences.extend(test.full.values)
tokenizer = AutoTokenizer.from_pretrained(BertConfig.model,do_lower=BertConfig.do_lower)
if BertConfig.max_len_tokenized_sentence > -1:
max_len = BertConfig.max_len_tokenized_sentence
print('Using config max len: ', max_len)
else:
max_len = calculate_max_len(sentences,tokenizer)
print('Using calculated max len: ', max_len)
train = getTensorDataset(train.id.values,train.full.values,train.label.values,tokenizer,max_len)
dev = getTensorDataset(dev.id.values,dev.full.values,dev.label.values,tokenizer,max_len)
test = getTensorDataset(test.id.values,test.full.values,None,tokenizer,max_len)
print('Tensor Datasets created')
train_dataloader, valid_dataloader, test_dataloader = getDataloader(train,dev,test)
print('Dataloaders created')
return train_dataloader, valid_dataloader, test_dataloader
def getDataset(train,dev,test):
if (BertConfig.mode == 'image'):
train['full'] = train[BertConfig.img_col]
dev['full'] = dev[BertConfig.img_col]
test['full'] = test[BertConfig.img_col]
elif BertConfig.mode == 'image_caption':
train['full'] = train[BertConfig.img_col] + BertConfig.separator + train['text']
dev['full'] = dev[BertConfig.img_col] + BertConfig.separator + dev['text']
test['full'] = test[BertConfig.img_col] + BertConfig.separator + test['text']
else:
train['full'] = train['text']
dev['full'] = dev['text']
test['full'] = test['text']
train.full = train.full.astype(str)
dev.full = dev.full.astype(str)
test.full = test.full.astype(str)
return train, dev, test
def getTensorDataset(ids, sentences, labels, tokenizer, max_len):
input_ids , attention_masks, token_types_ids = get_tokenized(sentences, tokenizer,max_len)
if labels is None:
return TensorDataset(torch.from_numpy(ids),input_ids,attention_masks,token_types_ids,torch.from_numpy(ids))
return TensorDataset(torch.from_numpy(ids),input_ids,attention_masks,token_types_ids,torch.from_numpy(labels))
def getDataloader(train_dataset, valid_dataset,test_dataset):
train_dataloader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = BertConfig.train_batch_size
)
valid_dataloader = DataLoader(
valid_dataset,
sampler = SequentialSampler(valid_dataset),
batch_size = BertConfig.dev_batch_size
)
test_dataloader = DataLoader(
test_dataset,
sampler = SequentialSampler(test_dataset),
batch_size = BertConfig.dev_batch_size
)
return train_dataloader, valid_dataloader, test_dataloader | 3,107 | 0 | 99 |
34f33bc559f7e13b93d515ea080396c7ef24cad3 | 1,973 | py | Python | test/test_connection.py | abudarevsky/solrcloudpy | 8ba59f1079d9b4efab03ddc892337a467c9671b2 | [
"BSD-3-Clause"
] | null | null | null | test/test_connection.py | abudarevsky/solrcloudpy | 8ba59f1079d9b4efab03ddc892337a467c9671b2 | [
"BSD-3-Clause"
] | null | null | null | test/test_connection.py | abudarevsky/solrcloudpy | 8ba59f1079d9b4efab03ddc892337a467c9671b2 | [
"BSD-3-Clause"
] | null | null | null | import os
import time
import unittest
from solr_instance import SolrInstance
from solrcloudpy import SolrCollection, SolrConnection
solrprocess = None
if __name__ == "__main__":
unittest.main()
| 29.447761 | 78 | 0.671566 | import os
import time
import unittest
from solr_instance import SolrInstance
from solrcloudpy import SolrCollection, SolrConnection
solrprocess = None
class TestConnection(unittest.TestCase):
def setUp(self):
self.conn = SolrConnection(version=os.getenv("SOLR_VERSION", "6.1.0"))
self.collparams = {}
confname = os.getenv("SOLR_CONFNAME", "")
if confname != "":
self.collparams["collection_config_name"] = confname
def test_list(self):
self.conn["foo"].create(**self.collparams)
colls = self.conn.list()
self.assertTrue(len(colls) >= 1)
self.conn["foo"].drop()
def test_live_nodes(self):
nodes = self.conn.live_nodes
# to support easy use of solrcloud gettingstarted
self.assertTrue(len(nodes) >= 1)
def test_cluster_leader(self):
leader = self.conn.cluster_leader
self.assertTrue(leader is not None)
def test_create_collection(self):
coll = self.conn.create_collection("test2", **self.collparams)
self.assertTrue(isinstance(coll, SolrCollection))
self.conn.test2.drop()
def test_create_collection_https(self):
test_conn = SolrConnection(server="localhost", use_https=True)
self.assertTrue(test_conn.url_template.startswith("https:"))
test_conn = SolrConnection(server="localhost", use_https=False)
self.assertTrue(test_conn.url_template.startswith("http:"))
test_conn = SolrConnection(server="localhost")
self.assertTrue(test_conn.url_template.startswith("http:"))
def setUpModule():
if os.getenv("SKIP_STARTUP", False):
return
# start solr
solrprocess = SolrInstance("solr2")
solrprocess.start()
solrprocess.wait_ready()
time.sleep(1)
def tearDownModule():
if os.getenv("SKIP_STARTUP", False):
return
if solrprocess:
solrprocess.terminate()
if __name__ == "__main__":
unittest.main()
| 1,519 | 19 | 230 |
a28f6c929b4e051e721250e8f3d1e7cb4d98de6a | 1,252 | py | Python | docs/conf.py | LevinZ2016/pyvinecopulib | e8fc0e6805daa71ab3fa1e03fd257a9f4063605d | [
"MIT"
] | null | null | null | docs/conf.py | LevinZ2016/pyvinecopulib | e8fc0e6805daa71ab3fa1e03fd257a9f4063605d | [
"MIT"
] | null | null | null | docs/conf.py | LevinZ2016/pyvinecopulib | e8fc0e6805daa71ab3fa1e03fd257a9f4063605d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# pyvinecopulib documentation build configuration file
# Sphinx extension modules
from pkg_resources import get_distribution
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
napoleon_include_init_with_doc = True
autosummary_generate = True
# The suffix(es) of source filenames.
source_suffix = '.rst'
# For the templates.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyvinecopulib'
copyright = u'2019, Thomas Nagler and Thibault Vatter'
author = u'Thomas Nagler and Thibault Vatter'
# The version info.
release = get_distribution('pyvinecopulib').version
version = '.'.join(release.split('.')[:2])
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_copy_source = False
html_show_copyright = False
html_show_sphinx = False
add_module_names = False
pygments_style = 'sphinx'
html_logo = '_static/pyvinecopulib.png'
| 21.964912 | 78 | 0.683706 | # -*- coding: utf-8 -*-
#
# pyvinecopulib documentation build configuration file
# Sphinx extension modules
from pkg_resources import get_distribution
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx_rtd_theme',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
napoleon_include_init_with_doc = True
autosummary_generate = True
# The suffix(es) of source filenames.
source_suffix = '.rst'
# For the templates.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyvinecopulib'
copyright = u'2019, Thomas Nagler and Thibault Vatter'
author = u'Thomas Nagler and Thibault Vatter'
# The version info.
release = get_distribution('pyvinecopulib').version
version = '.'.join(release.split('.')[:2])
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_copy_source = False
html_show_copyright = False
html_show_sphinx = False
add_module_names = False
pygments_style = 'sphinx'
html_logo = '_static/pyvinecopulib.png'
| 0 | 0 | 0 |
602b7684bce5d19dfcadd0ec598781b30fb4448c | 14,439 | py | Python | src/parse_args.py | dertilo/MultiHopKG | ee103c10c633ab808f063b0d3d8a4b3388b21aa5 | [
"BSD-3-Clause"
] | null | null | null | src/parse_args.py | dertilo/MultiHopKG | ee103c10c633ab808f063b0d3d8a4b3388b21aa5 | [
"BSD-3-Clause"
] | null | null | null | src/parse_args.py | dertilo/MultiHopKG | ee103c10c633ab808f063b0d3d8a4b3388b21aa5 | [
"BSD-3-Clause"
] | 1 | 2020-02-03T09:39:44.000Z | 2020-02-03T09:39:44.000Z | """
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Experiment Hyperparameters.
"""
import argparse
import os
parser = argparse.ArgumentParser(
description="Multi-Hop Knowledge Graph Reasoning with Reward Shaping"
)
# Experiment control
parser.add_argument(
"--process_data",
action="store_true",
help="process knowledge graph (default: False)",
)
parser.add_argument(
"--train",
action="store_true",
help="run path selection set_policy training (default: False)",
)
parser.add_argument(
"--inference",
action="store_true",
help="run knowledge graph inference (default: False)",
)
parser.add_argument(
"--search_random_seed",
action="store_true",
help="run experiments with multiple random initializations and compute the result statistics "
"(default: False)",
)
parser.add_argument(
"--eval", action="store_true", help="compute evaluation metrics (default: False)"
)
parser.add_argument(
"--eval_by_relation_type",
action="store_true",
help="compute evaluation metrics for to-M and to-1 relations separately (default: False)",
)
parser.add_argument(
"--eval_by_seen_queries",
action="store_true",
help="compute evaluation metrics for seen queries and unseen queries separately (default: False)",
)
parser.add_argument(
"--run_ablation_studies", action="store_true", help="run ablation studies"
)
parser.add_argument(
"--run_analysis",
action="store_true",
help="run algorithm analysis and print intermediate results (default: False)",
)
parser.add_argument(
"--data_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "data"),
help="directory where the knowledge graph data is stored (default: None)",
)
parser.add_argument(
"--model_root_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "model"),
help="root directory where the model parameters are stored (default: None)",
)
parser.add_argument(
"--model_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "model"),
help="directory where the model parameters are stored (default: None)",
)
parser.add_argument("--gpu", type=int, default=0, help="gpu device (default: 0)")
parser.add_argument(
"--checkpoint_path", type=str, default=None, help="path to a pretrained checkpoint"
)
# Data
parser.add_argument(
"--test",
action="store_true",
help="perform inference on the test set (default: False)",
)
parser.add_argument(
"--group_examples_by_query",
action="store_true",
help="group examples by topic entity + query relation (default: False)",
)
# Network Architecture
parser.add_argument(
"--model",
type=str,
default="point",
help="knowledge graph QA model (default: point)",
)
parser.add_argument(
"--entity_dim",
type=int,
default=200,
metavar="E",
help="entity embedding dimension (default: 200)",
)
parser.add_argument(
"--relation_dim",
type=int,
default=200,
metavar="R",
help="relation embedding dimension (default: 200)",
)
parser.add_argument(
"--history_dim",
type=int,
default=400,
metavar="H",
help="action history encoding LSTM hidden states dimension (default: 400)",
)
parser.add_argument(
"--history_num_layers",
type=int,
default=3,
metavar="L",
help="action history encoding LSTM number of layers (default: 1)",
)
parser.add_argument(
"--use_action_space_bucketing",
action="store_true",
help="bucket adjacency list by outgoing degree to avoid memory blow-up (default: False)",
)
parser.add_argument(
"--bucket_interval",
type=int,
default=10,
help="adjacency list bucket size (default: 32)",
)
parser.add_argument(
"--type_only",
action="store_true",
help="use denote knowledge graph node by entity types only (default: False)",
)
parser.add_argument(
"--relation_only",
action="store_true",
help="search with relation information only, ignoring entity representation (default: False)",
)
parser.add_argument(
"--relation_only_in_path",
action="store_true",
help="include intermediate entities in path (default: False)",
)
# Knowledge Graph
parser.add_argument(
"--num_graph_convolution_layers",
type=int,
default=0,
help="number of graph convolution layers to use (default: 0, no GC is used)",
)
parser.add_argument(
"--graph_convolution_rank", type=int, default=10, help="number of ranks "
)
parser.add_argument(
"--add_reverse_relations",
type=bool,
default=True,
help="add reverse relations to KB (default: True)",
)
parser.add_argument(
"--add_reversed_training_edges",
action="store_true",
help="add reversed edges to extend training set (default: False)",
)
parser.add_argument(
"--train_entire_graph",
type=bool,
default=False,
help="add all edges in the graph to extend training set (default: False)",
)
parser.add_argument(
"--emb_dropout_rate",
type=float,
default=0.3,
help="Knowledge graph embedding dropout rate (default: 0.3)",
)
parser.add_argument(
"--zero_entity_initialization",
type=bool,
default=False,
help="Initialize all entities to zero (default: False)",
)
parser.add_argument(
"--uniform_entity_initialization",
type=bool,
default=False,
help="Initialize all entities with the same random embedding (default: False)",
)
# Optimization
parser.add_argument(
"--num_epochs",
type=int,
default=200,
help="maximum number of pass over the entire training set (default: 20)",
)
parser.add_argument(
"--num_wait_epochs",
type=int,
default=5,
help="number of epochs to wait before stopping training if dev set performance drops",
)
parser.add_argument(
"--num_peek_epochs",
type=int,
default=2,
help="number of epochs to wait for next dev set result check (default: 2)",
)
parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="epoch from which the training should start (default: 0)",
)
parser.add_argument(
"--batch_size", type=int, default=256, help="mini-batch size (default: 256)"
)
parser.add_argument(
"--train_batch_size",
type=int,
default=256,
help="mini-batch size during training (default: 256)",
)
parser.add_argument(
"--dev_batch_size",
type=int,
default=64,
help="mini-batch size during inferece (default: 64)",
)
parser.add_argument(
"--margin",
type=float,
default=0,
help="margin used for base MAMES training (default: 0)",
)
parser.add_argument(
"--learning_rate",
type=float,
default=0.0001,
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--learning_rate_decay",
type=float,
default=1.0,
help="learning rate decay factor for the Adam optimizer (default: 1)",
)
parser.add_argument(
"--adam_beta1",
type=float,
default=0.9,
help="Adam: decay rates for the first movement estimate (default: 0.9)",
)
parser.add_argument(
"--adam_beta2",
type=float,
default=0.999,
help="Adam: decay rates for the second raw movement estimate (default: 0.999)",
)
parser.add_argument(
"--grad_norm",
type=float,
default=10000,
help="norm threshold for gradient clipping (default 10000)",
)
parser.add_argument(
"--xavier_initialization",
type=bool,
default=True,
help="Initialize all model parameters using xavier initialization (default: True)",
)
parser.add_argument(
"--random_parameters",
type=bool,
default=False,
help="Inference with random parameters (default: False)",
)
# Fact Network
parser.add_argument(
"--label_smoothing_epsilon",
type=float,
default=0.1,
help="epsilon used for label smoothing",
)
parser.add_argument(
"--hidden_dropout_rate",
type=float,
default=0.3,
help="ConvE hidden layer dropout rate (default: 0.3)",
)
parser.add_argument(
"--feat_dropout_rate",
type=float,
default=0.2,
help="ConvE feature dropout rate (default: 0.2)",
)
parser.add_argument(
"--emb_2D_d1",
type=int,
default=10,
help="ConvE embedding 2D shape dimension 1 (default: 10)",
)
parser.add_argument(
"--emb_2D_d2",
type=int,
default=20,
help="ConvE embedding 2D shape dimension 2 (default: 20)",
)
parser.add_argument(
"--num_out_channels",
type=int,
default=32,
help="ConvE number of output channels of the convolution layer (default: 32)",
)
parser.add_argument(
"--kernel_size", type=int, default=3, help="ConvE kernel size (default: 3)"
)
parser.add_argument(
"--distmult_state_dict_path",
type=str,
default="",
help="Path to the DistMult network state_dict (default: " ")",
)
parser.add_argument(
"--complex_state_dict_path",
type=str,
default="",
help="Path to the ComplEx network state dict (default: " ")",
)
parser.add_argument(
"--conve_state_dict_path",
type=str,
default="",
help="Path to the ConvE network state dict (default: " ")",
)
# Policy Network
parser.add_argument(
"--ff_dropout_rate",
type=float,
default=0.1,
help="Feed-forward layer dropout rate (default: 0.1)",
)
parser.add_argument(
"--rnn_dropout_rate",
type=float,
default=0.0,
help="RNN Variational Dropout Rate (default: 0.0)",
)
parser.add_argument(
"--action_dropout_rate",
type=float,
default=0.1,
help="Dropout rate for randomly masking out knowledge graph edges (default: 0.1)",
)
parser.add_argument(
"--action_dropout_anneal_factor",
type=float,
default=0.95,
help="Decrease the action dropout rate once the dev set results stopped increase (default: 0.95)",
)
parser.add_argument(
"--action_dropout_anneal_interval",
type=int,
default=1000,
help="Number of epochs to wait before decreasing the action dropout rate (default: 1000. Action "
"dropout annealing is not used when the value is >= 1000.)",
)
parser.add_argument(
"--num_negative_samples",
type=int,
default=10,
help="Number of negative samples to use for embedding-based methods",
)
# Reward Shaping
parser.add_argument(
"--fn_state_dict_path",
type=str,
default="",
help="(Aborted) Path to the saved fact network model",
)
parser.add_argument(
"--fn_kg_state_dict_path",
type=str,
default="",
help="(Aborted) Path to the saved knowledge graph embeddings used by a fact network",
)
parser.add_argument(
"--reward_shaping_threshold",
type=float,
default=0,
help="Threshold cut off of reward shaping scores (default: 0)",
)
parser.add_argument(
"--mu",
type=float,
default=1.0,
help="Weight over the estimated reward (default: 1.0)",
)
# Graph Completion
parser.add_argument(
"--theta",
type=float,
default=0.2,
help="Threshold for sifting high-confidence facts (default: 0.2)",
)
# Reinforcement Learning
parser.add_argument(
"--num_rollouts", type=int, default=20, help="number of rollouts (default: 20)"
)
parser.add_argument(
"--num_rollout_steps", type=int, default=3, help="maximum path length (default: 3)"
)
parser.add_argument(
"--bandwidth",
type=int,
default=300,
help="maximum number of outgoing edges to explore at each step (default: 300)",
)
parser.add_argument(
"--r_bandwidth",
type=int,
default=10,
help="maximum number of unique relation types connecting a pair of entities (default: 10)",
)
parser.add_argument(
"--num_paths_per_entity",
type=int,
default=3,
help="number of paths used to calculate entity potential (default: 3)",
)
parser.add_argument(
"--beta",
type=float,
default=0.0,
help="entropy regularization weight (default: 0.0)",
)
parser.add_argument(
"--gamma", type=float, default=1, help="moving average weight (default: 1)"
)
# Policy Gradient
parser.add_argument(
"--baseline",
type=str,
default="n/a",
help="baseline used by the policy gradient algorithm (default: n/a)",
)
parser.add_argument(
"--seed", type=int, default=543, metavar="S", help="random seed (default: 543)"
)
# Search Decoding
parser.add_argument(
"--beam_size",
type=int,
default=100,
help="size of beam used in beam search inference (default: 100)",
)
parser.add_argument(
"--mask_test_false_negatives",
type=bool,
default=False,
help="mask false negative examples in the dev/test set during decoding (default: False. This flag "
"was implemented for sanity checking and was not used in any experiment.)",
)
parser.add_argument(
"--visualize_paths",
action="store_true",
help="generate path visualizations during inference (default: False)",
)
parser.add_argument(
"--save_beam_search_paths",
action="store_true",
help="save the decoded path into a CSV file (default: False)",
)
# Separate Experiments
parser.add_argument(
"--export_to_embedding_projector",
action="store_true",
help="export model embeddings to the Tensorflow Embedding Projector format (default: False)",
)
parser.add_argument(
"--export_reward_shaping_parameters",
action="store_true",
help="export KG embeddings and fact network parameters for reward shaping models (default: False)",
)
parser.add_argument(
"--compute_fact_scores",
action="store_true",
help="[Debugging Option] compute embedding based model scores (default: False)",
)
parser.add_argument(
"--export_fuzzy_facts",
action="store_true",
help="export the facts recovered by embedding based method (default: False)",
)
parser.add_argument(
"--export_error_cases",
action="store_true",
help="export the error cases of a model",
)
parser.add_argument(
"--compute_map",
action="store_true",
help="compute the Mean Average Precision evaluation metrics (default: False)",
)
# Hyperparameter Search
parser.add_argument(
"--tune",
type=str,
default="",
help="Specify the hyperparameters to tune during the search, separated by commas (default: None)",
)
parser.add_argument(
"--grid_search", action="store_true", help="Conduct grid search of hyperparameters"
)
default_args,_ = parser.parse_known_args()
| 26.689464 | 109 | 0.689037 | """
Copyright (c) 2018, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
Experiment Hyperparameters.
"""
import argparse
import os
parser = argparse.ArgumentParser(
description="Multi-Hop Knowledge Graph Reasoning with Reward Shaping"
)
# Experiment control
parser.add_argument(
"--process_data",
action="store_true",
help="process knowledge graph (default: False)",
)
parser.add_argument(
"--train",
action="store_true",
help="run path selection set_policy training (default: False)",
)
parser.add_argument(
"--inference",
action="store_true",
help="run knowledge graph inference (default: False)",
)
parser.add_argument(
"--search_random_seed",
action="store_true",
help="run experiments with multiple random initializations and compute the result statistics "
"(default: False)",
)
parser.add_argument(
"--eval", action="store_true", help="compute evaluation metrics (default: False)"
)
parser.add_argument(
"--eval_by_relation_type",
action="store_true",
help="compute evaluation metrics for to-M and to-1 relations separately (default: False)",
)
parser.add_argument(
"--eval_by_seen_queries",
action="store_true",
help="compute evaluation metrics for seen queries and unseen queries separately (default: False)",
)
parser.add_argument(
"--run_ablation_studies", action="store_true", help="run ablation studies"
)
parser.add_argument(
"--run_analysis",
action="store_true",
help="run algorithm analysis and print intermediate results (default: False)",
)
parser.add_argument(
"--data_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "data"),
help="directory where the knowledge graph data is stored (default: None)",
)
parser.add_argument(
"--model_root_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "model"),
help="root directory where the model parameters are stored (default: None)",
)
parser.add_argument(
"--model_dir",
type=str,
default=os.path.join(os.path.dirname(os.path.dirname(__file__)), "model"),
help="directory where the model parameters are stored (default: None)",
)
parser.add_argument("--gpu", type=int, default=0, help="gpu device (default: 0)")
parser.add_argument(
"--checkpoint_path", type=str, default=None, help="path to a pretrained checkpoint"
)
# Data
parser.add_argument(
"--test",
action="store_true",
help="perform inference on the test set (default: False)",
)
parser.add_argument(
"--group_examples_by_query",
action="store_true",
help="group examples by topic entity + query relation (default: False)",
)
# Network Architecture
parser.add_argument(
"--model",
type=str,
default="point",
help="knowledge graph QA model (default: point)",
)
parser.add_argument(
"--entity_dim",
type=int,
default=200,
metavar="E",
help="entity embedding dimension (default: 200)",
)
parser.add_argument(
"--relation_dim",
type=int,
default=200,
metavar="R",
help="relation embedding dimension (default: 200)",
)
parser.add_argument(
"--history_dim",
type=int,
default=400,
metavar="H",
help="action history encoding LSTM hidden states dimension (default: 400)",
)
parser.add_argument(
"--history_num_layers",
type=int,
default=3,
metavar="L",
help="action history encoding LSTM number of layers (default: 1)",
)
parser.add_argument(
"--use_action_space_bucketing",
action="store_true",
help="bucket adjacency list by outgoing degree to avoid memory blow-up (default: False)",
)
parser.add_argument(
"--bucket_interval",
type=int,
default=10,
help="adjacency list bucket size (default: 32)",
)
parser.add_argument(
"--type_only",
action="store_true",
help="use denote knowledge graph node by entity types only (default: False)",
)
parser.add_argument(
"--relation_only",
action="store_true",
help="search with relation information only, ignoring entity representation (default: False)",
)
parser.add_argument(
"--relation_only_in_path",
action="store_true",
help="include intermediate entities in path (default: False)",
)
# Knowledge Graph
parser.add_argument(
"--num_graph_convolution_layers",
type=int,
default=0,
help="number of graph convolution layers to use (default: 0, no GC is used)",
)
parser.add_argument(
"--graph_convolution_rank", type=int, default=10, help="number of ranks "
)
parser.add_argument(
"--add_reverse_relations",
type=bool,
default=True,
help="add reverse relations to KB (default: True)",
)
parser.add_argument(
"--add_reversed_training_edges",
action="store_true",
help="add reversed edges to extend training set (default: False)",
)
parser.add_argument(
"--train_entire_graph",
type=bool,
default=False,
help="add all edges in the graph to extend training set (default: False)",
)
parser.add_argument(
"--emb_dropout_rate",
type=float,
default=0.3,
help="Knowledge graph embedding dropout rate (default: 0.3)",
)
parser.add_argument(
"--zero_entity_initialization",
type=bool,
default=False,
help="Initialize all entities to zero (default: False)",
)
parser.add_argument(
"--uniform_entity_initialization",
type=bool,
default=False,
help="Initialize all entities with the same random embedding (default: False)",
)
# Optimization
parser.add_argument(
"--num_epochs",
type=int,
default=200,
help="maximum number of pass over the entire training set (default: 20)",
)
parser.add_argument(
"--num_wait_epochs",
type=int,
default=5,
help="number of epochs to wait before stopping training if dev set performance drops",
)
parser.add_argument(
"--num_peek_epochs",
type=int,
default=2,
help="number of epochs to wait for next dev set result check (default: 2)",
)
parser.add_argument(
"--start_epoch",
type=int,
default=0,
help="epoch from which the training should start (default: 0)",
)
parser.add_argument(
"--batch_size", type=int, default=256, help="mini-batch size (default: 256)"
)
parser.add_argument(
"--train_batch_size",
type=int,
default=256,
help="mini-batch size during training (default: 256)",
)
parser.add_argument(
"--dev_batch_size",
type=int,
default=64,
help="mini-batch size during inferece (default: 64)",
)
parser.add_argument(
"--margin",
type=float,
default=0,
help="margin used for base MAMES training (default: 0)",
)
parser.add_argument(
"--learning_rate",
type=float,
default=0.0001,
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--learning_rate_decay",
type=float,
default=1.0,
help="learning rate decay factor for the Adam optimizer (default: 1)",
)
parser.add_argument(
"--adam_beta1",
type=float,
default=0.9,
help="Adam: decay rates for the first movement estimate (default: 0.9)",
)
parser.add_argument(
"--adam_beta2",
type=float,
default=0.999,
help="Adam: decay rates for the second raw movement estimate (default: 0.999)",
)
parser.add_argument(
"--grad_norm",
type=float,
default=10000,
help="norm threshold for gradient clipping (default 10000)",
)
parser.add_argument(
"--xavier_initialization",
type=bool,
default=True,
help="Initialize all model parameters using xavier initialization (default: True)",
)
parser.add_argument(
"--random_parameters",
type=bool,
default=False,
help="Inference with random parameters (default: False)",
)
# Fact Network
parser.add_argument(
"--label_smoothing_epsilon",
type=float,
default=0.1,
help="epsilon used for label smoothing",
)
parser.add_argument(
"--hidden_dropout_rate",
type=float,
default=0.3,
help="ConvE hidden layer dropout rate (default: 0.3)",
)
parser.add_argument(
"--feat_dropout_rate",
type=float,
default=0.2,
help="ConvE feature dropout rate (default: 0.2)",
)
parser.add_argument(
"--emb_2D_d1",
type=int,
default=10,
help="ConvE embedding 2D shape dimension 1 (default: 10)",
)
parser.add_argument(
"--emb_2D_d2",
type=int,
default=20,
help="ConvE embedding 2D shape dimension 2 (default: 20)",
)
parser.add_argument(
"--num_out_channels",
type=int,
default=32,
help="ConvE number of output channels of the convolution layer (default: 32)",
)
parser.add_argument(
"--kernel_size", type=int, default=3, help="ConvE kernel size (default: 3)"
)
parser.add_argument(
"--distmult_state_dict_path",
type=str,
default="",
help="Path to the DistMult network state_dict (default: " ")",
)
parser.add_argument(
"--complex_state_dict_path",
type=str,
default="",
help="Path to the ComplEx network state dict (default: " ")",
)
parser.add_argument(
"--conve_state_dict_path",
type=str,
default="",
help="Path to the ConvE network state dict (default: " ")",
)
# Policy Network
parser.add_argument(
"--ff_dropout_rate",
type=float,
default=0.1,
help="Feed-forward layer dropout rate (default: 0.1)",
)
parser.add_argument(
"--rnn_dropout_rate",
type=float,
default=0.0,
help="RNN Variational Dropout Rate (default: 0.0)",
)
parser.add_argument(
"--action_dropout_rate",
type=float,
default=0.1,
help="Dropout rate for randomly masking out knowledge graph edges (default: 0.1)",
)
parser.add_argument(
"--action_dropout_anneal_factor",
type=float,
default=0.95,
help="Decrease the action dropout rate once the dev set results stopped increase (default: 0.95)",
)
parser.add_argument(
"--action_dropout_anneal_interval",
type=int,
default=1000,
help="Number of epochs to wait before decreasing the action dropout rate (default: 1000. Action "
"dropout annealing is not used when the value is >= 1000.)",
)
parser.add_argument(
"--num_negative_samples",
type=int,
default=10,
help="Number of negative samples to use for embedding-based methods",
)
# Reward Shaping
parser.add_argument(
"--fn_state_dict_path",
type=str,
default="",
help="(Aborted) Path to the saved fact network model",
)
parser.add_argument(
"--fn_kg_state_dict_path",
type=str,
default="",
help="(Aborted) Path to the saved knowledge graph embeddings used by a fact network",
)
parser.add_argument(
"--reward_shaping_threshold",
type=float,
default=0,
help="Threshold cut off of reward shaping scores (default: 0)",
)
parser.add_argument(
"--mu",
type=float,
default=1.0,
help="Weight over the estimated reward (default: 1.0)",
)
# Graph Completion
parser.add_argument(
"--theta",
type=float,
default=0.2,
help="Threshold for sifting high-confidence facts (default: 0.2)",
)
# Reinforcement Learning
parser.add_argument(
"--num_rollouts", type=int, default=20, help="number of rollouts (default: 20)"
)
parser.add_argument(
"--num_rollout_steps", type=int, default=3, help="maximum path length (default: 3)"
)
parser.add_argument(
"--bandwidth",
type=int,
default=300,
help="maximum number of outgoing edges to explore at each step (default: 300)",
)
parser.add_argument(
"--r_bandwidth",
type=int,
default=10,
help="maximum number of unique relation types connecting a pair of entities (default: 10)",
)
parser.add_argument(
"--num_paths_per_entity",
type=int,
default=3,
help="number of paths used to calculate entity potential (default: 3)",
)
parser.add_argument(
"--beta",
type=float,
default=0.0,
help="entropy regularization weight (default: 0.0)",
)
parser.add_argument(
"--gamma", type=float, default=1, help="moving average weight (default: 1)"
)
# Policy Gradient
parser.add_argument(
"--baseline",
type=str,
default="n/a",
help="baseline used by the policy gradient algorithm (default: n/a)",
)
parser.add_argument(
"--seed", type=int, default=543, metavar="S", help="random seed (default: 543)"
)
# Search Decoding
parser.add_argument(
"--beam_size",
type=int,
default=100,
help="size of beam used in beam search inference (default: 100)",
)
parser.add_argument(
"--mask_test_false_negatives",
type=bool,
default=False,
help="mask false negative examples in the dev/test set during decoding (default: False. This flag "
"was implemented for sanity checking and was not used in any experiment.)",
)
parser.add_argument(
"--visualize_paths",
action="store_true",
help="generate path visualizations during inference (default: False)",
)
parser.add_argument(
"--save_beam_search_paths",
action="store_true",
help="save the decoded path into a CSV file (default: False)",
)
# Separate Experiments
parser.add_argument(
"--export_to_embedding_projector",
action="store_true",
help="export model embeddings to the Tensorflow Embedding Projector format (default: False)",
)
parser.add_argument(
"--export_reward_shaping_parameters",
action="store_true",
help="export KG embeddings and fact network parameters for reward shaping models (default: False)",
)
parser.add_argument(
"--compute_fact_scores",
action="store_true",
help="[Debugging Option] compute embedding based model scores (default: False)",
)
parser.add_argument(
"--export_fuzzy_facts",
action="store_true",
help="export the facts recovered by embedding based method (default: False)",
)
parser.add_argument(
"--export_error_cases",
action="store_true",
help="export the error cases of a model",
)
parser.add_argument(
"--compute_map",
action="store_true",
help="compute the Mean Average Precision evaluation metrics (default: False)",
)
# Hyperparameter Search
parser.add_argument(
"--tune",
type=str,
default="",
help="Specify the hyperparameters to tune during the search, separated by commas (default: None)",
)
parser.add_argument(
"--grid_search", action="store_true", help="Conduct grid search of hyperparameters"
)
default_args,_ = parser.parse_known_args()
| 0 | 0 | 0 |
48575c772d18785191b360395e995f192472cd72 | 642 | py | Python | db/mongo_connection.py | stivenramireza/airflow-service | 8945fc7062cfedec855ba421d5a417ca0cbf9b67 | [
"MIT"
] | null | null | null | db/mongo_connection.py | stivenramireza/airflow-service | 8945fc7062cfedec855ba421d5a417ca0cbf9b67 | [
"MIT"
] | null | null | null | db/mongo_connection.py | stivenramireza/airflow-service | 8945fc7062cfedec855ba421d5a417ca0cbf9b67 | [
"MIT"
] | null | null | null | from pymongo import MongoClient | 29.181818 | 73 | 0.67757 | from pymongo import MongoClient
class MongoConnection:
__instance = None
def __init__(self, url_conn: str) -> None:
self.url_conn = url_conn
if MongoConnection.__instance is None:
MongoConnection.__instance = MongoClient(url_conn)
else:
raise Exception('You cannot create another Mongo connection')
@staticmethod
def get_instance(url_conn: str) -> object:
if not MongoConnection.__instance:
MongoConnection(url_conn)
return MongoConnection.__instance
@staticmethod
def close_instance() -> None:
MongoConnection.__instance.close() | 447 | 141 | 23 |
73489b4fbb0f389aa5ee1c07ce8d7a8eda40e35e | 846 | py | Python | metadeploy/api/management/commands/tests/truncate_data.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 33 | 2019-03-20T15:34:39.000Z | 2022-03-30T15:59:40.000Z | metadeploy/api/management/commands/tests/truncate_data.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 2,718 | 2019-02-27T19:46:07.000Z | 2022-03-11T23:18:09.000Z | metadeploy/api/management/commands/tests/truncate_data.py | sfdc-qbranch/MetaDeploy | d22547b3814dbec6aefa4d86b9f81c6f175c1b67 | [
"BSD-3-Clause"
] | 28 | 2019-03-28T04:57:16.000Z | 2022-02-04T16:49:25.000Z | import pytest
from django.core.management import call_command
from ....models import (
Plan,
PlanSlug,
Product,
ProductCategory,
ProductSlug,
Step,
Version,
)
@pytest.mark.django_db
| 23.5 | 54 | 0.699764 | import pytest
from django.core.management import call_command
from ....models import (
Plan,
PlanSlug,
Product,
ProductCategory,
ProductSlug,
Step,
Version,
)
@pytest.mark.django_db
def test_truncate_data(product_factory, step_factory):
p1 = product_factory()
p2 = product_factory()
p3 = product_factory()
step_factory(plan__version__product=p1)
step_factory(plan__version__product=p2)
step_factory(plan__version__product=p3)
assert ProductCategory.objects.count() == 3
assert ProductSlug.objects.count() == 3
assert Product.objects.count() == 3
assert Version.objects.count() == 3
assert PlanSlug.objects.count() == 3
assert Plan.objects.count() == 3
assert Step.objects.count() == 3
call_command("truncate_data")
assert Product.objects.count() == 0
| 611 | 0 | 22 |
8aab240ab7b20f968d87a6141bb0f294e611b3d5 | 329 | py | Python | Algorithms/DynamicProgramming/maximum-difference-zeros-ones-binary-string-set-2-time.py | Sangeerththan/pythonDSA | d126b3a7a8acc1e202107e20a21ed96fb4ab144e | [
"MIT"
] | 1 | 2021-09-12T20:40:37.000Z | 2021-09-12T20:40:37.000Z | Algorithms/DynamicProgramming/maximum-difference-zeros-ones-binary-string-set-2-time.py | Sangeerththan/pythonDataStructure | d126b3a7a8acc1e202107e20a21ed96fb4ab144e | [
"MIT"
] | null | null | null | Algorithms/DynamicProgramming/maximum-difference-zeros-ones-binary-string-set-2-time.py | Sangeerththan/pythonDataStructure | d126b3a7a8acc1e202107e20a21ed96fb4ab144e | [
"MIT"
] | null | null | null |
s = "11000010001"
n = 11
print(findLength(s, n))
| 21.933333 | 54 | 0.583587 | def findLength(string, n):
current_sum = 0
max_sum = 0
for i in range(n):
current_sum += (1 if string[i] == '0' else -1)
if current_sum < 0:
current_sum = 0
max_sum = max(current_sum, max_sum)
return max_sum if max_sum else 0
s = "11000010001"
n = 11
print(findLength(s, n))
| 256 | 0 | 22 |
4e619c206ae0df10192fe968b3a3efbb68e98129 | 94 | py | Python | src/mlb/models/app_mode.py | benbrandt22/MagTagMLB | 1ec347743bc7df9339fb8e3de0f86ea037b7694f | [
"MIT"
] | null | null | null | src/mlb/models/app_mode.py | benbrandt22/MagTagMLB | 1ec347743bc7df9339fb8e3de0f86ea037b7694f | [
"MIT"
] | null | null | null | src/mlb/models/app_mode.py | benbrandt22/MagTagMLB | 1ec347743bc7df9339fb8e3de0f86ea037b7694f | [
"MIT"
] | null | null | null | # (doesn't seem to be any official Enum support in Circuitpython)
Schedule = 1
ScoreBoard = 2 | 23.5 | 65 | 0.755319 | # (doesn't seem to be any official Enum support in Circuitpython)
Schedule = 1
ScoreBoard = 2 | 0 | 0 | 0 |
3c4e0e95661b4e2c5aca203fb3b2a20b1b33339b | 731 | py | Python | src/caterpillar/variants.py | overhacked/caterpillar | dc51e551a938705f5e200087720caa15b4a31371 | [
"MIT"
] | 37 | 2017-12-27T03:41:59.000Z | 2022-02-05T04:11:07.000Z | src/caterpillar/variants.py | overhacked/caterpillar | dc51e551a938705f5e200087720caa15b4a31371 | [
"MIT"
] | 7 | 2019-01-25T18:39:52.000Z | 2020-11-11T03:57:54.000Z | src/caterpillar/variants.py | overhacked/caterpillar | dc51e551a938705f5e200087720caa15b4a31371 | [
"MIT"
] | 7 | 2020-04-25T03:06:18.000Z | 2021-10-30T21:24:10.000Z | import m3u8
from typing import Tuple
# Rate variant stream by resolution, average bandwidth, and bandwidth.
# Select the best variant stream (best effort).
#
# Assumption: m3u8 object has one or more variants.
| 31.782609 | 73 | 0.740082 | import m3u8
from typing import Tuple
# Rate variant stream by resolution, average bandwidth, and bandwidth.
def variant_score(variant: m3u8.Playlist) -> Tuple[int, int, int, int]:
stream_info = variant.stream_info
if stream_info.resolution:
width, height = stream_info.resolution
else:
width = height = 0
average_bandwidth = stream_info.average_bandwidth or 0
bandwidth = stream_info.bandwidth or 0
return (width, height, average_bandwidth, bandwidth)
# Select the best variant stream (best effort).
#
# Assumption: m3u8 object has one or more variants.
def select_variant(m3u8_obj: m3u8.M3U8) -> m3u8.Playlist:
return sorted(m3u8_obj.playlists, key=variant_score, reverse=True)[0]
| 472 | 0 | 44 |
ca4cbeb6e31114d37f1c1518d066fa96e13438cb | 2,177 | py | Python | Transformation Scenes/2_Linear_Transformation_with_matrix.py | samsub18/Visualizing-Linear-Transformations | cf3e3aeb463d4a73de8a13a951191e8e191dbe62 | [
"MIT"
] | 5 | 2020-08-21T04:42:44.000Z | 2021-04-24T14:11:32.000Z | Transformation Scenes/2_Linear_Transformation_with_matrix.py | samsub18/Visualizing-Linear-Transformations | cf3e3aeb463d4a73de8a13a951191e8e191dbe62 | [
"MIT"
] | null | null | null | Transformation Scenes/2_Linear_Transformation_with_matrix.py | samsub18/Visualizing-Linear-Transformations | cf3e3aeb463d4a73de8a13a951191e8e191dbe62 | [
"MIT"
] | 3 | 2020-09-19T05:15:01.000Z | 2021-06-03T05:47:22.000Z | from big_ol_pile_of_manim_imports import *
| 27.556962 | 126 | 0.661001 | from big_ol_pile_of_manim_imports import *
class Introduction(Scene):
def construct(self):
t1 = TexMobject(r"Visualzing\quad Linear\quad Transformations")
t2 = TexMobject(r"Using\quad python\quad \& \quad manim")
t3 = TexMobject(r"Sameer\quad Prasad\quad Subhedar")
t4 = TexMobject("PES2201800323")
self.play(Write(t1))
self.play(Transform(t1,t2))
self.wait()
self.play(Transform(t1,t3))
self.wait()
self.play(Transform(t1,t4))
self.wait()
class Matrix2D(Scene):
def construct(self):
t1 = TextMobject("This is a","2D","matrix")
t1[1].set_color(RED)
# grid = ScreenGrid()
mat = TexMobject(r"\begin{bmatrix} -1 & \quad 1 \\ 1 & -1 \end{bmatrix}")
mat.set_color(RED)
t2 = TextMobject("This is an", "input", "vector")
t2[1].set_color(YELLOW)
t3 = TextMobject("This results in")
t3.set_color(BLUE)
mat2=TexMobject(r"\begin{bmatrix} 1 \\ 2 \end{bmatrix}")
mat2.set_color(YELLOW)
mat3 = TexMobject(r"=\quad 1\begin{bmatrix} -1 \\ 1 \end{bmatrix}\quad +\quad 2\begin{bmatrix} \quad 1 \\ -1 \end{bmatrix}")
mat3.set_color(BLUE)
mat4 = TexMobject(r"=\quad \begin{bmatrix} \quad 1 \\ -1 \end{bmatrix}")
mat4.set_color(YELLOW)
t4 = TextMobject("Thus the output", "vector","is")
t4[1].set_color(YELLOW)
t1.move_to(3*UP)
t3.move_to(3*UP)
mat.move_to(5*LEFT)
t2.move_to(3*UP)
mat2.move_to(3.5*LEFT)
t4.move_to(3*UP)
mat4.move_to(5*RIGHT)
# self.add(grid)
self.play(Write(t1))
self.play(Transform(t1,mat))
self.wait()
self.play(Write(t2))
self.play(Transform(t2,mat2))
self.wait()
self.play(Write(t3))
self.play(Transform(t3,mat3))
self.wait()
self.play(Write(t4))
self.play(Transform(t4,mat4))
self.wait()
class Introduction_2(Scene):
def construct(self):
t1 = TextMobject("Now let's see how the","Transformation","looks visually")
t1[1].set_color(YELLOW)
self.play(Write(t1))
self.wait()
class FinalTransformation(LinearTransformationScene):
CONFIG = {
"leave_ghost_vectors": True,
}
def construct(self):
v = np.array([[1], [2]])
matrix = [[-1, 1], [1, -1]]
self.add_vector(v)
self.apply_matrix(matrix)
self.wait() | 1,842 | 129 | 161 |
d33307b24d598e56c3104322bb0aa7d9d0d9c14a | 748 | py | Python | unsdgbot/config.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | unsdgbot/config.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | unsdgbot/config.py | btaba/UN-SDG-bot | 46fde9a866cefe37b8e600e87b37ed0f73bc94a2 | [
"MIT"
] | null | null | null | import os
import logging
basedir = os.path.abspath(os.path.dirname(__file__))
| 22 | 62 | 0.657754 | import os
import logging
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
FB_API_VERSION = 2.7
FB_ACCESS_TOKEN = 'this-is-in-local-config'
FB_VERIFY_TOKEN = 'this-is-in-local-config'
FB_WEBHOOK_URL = 'this-is-in-local-config'
LOGGING_FORMAT = (
'%(asctime)s - %(name)s - %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
LOGGING_DIRECTORY = '../logs/app.log'
LOGGING_LEVEL = logging.ERROR
class ProductionConfig(Config):
DEVELOPMENT = False
DEBUG = False
class DevelopmentConfig(Config):
# ngrok http -host-header=rewrite 192.168.33.11:80
DEVELOPMENT = True
DEBUG = True
| 0 | 597 | 69 |
fae3809523280baaf1bed07b85c14642416d98e2 | 2,458 | py | Python | yahoo_quote_download/validater.py | manniealfaro/yahoo_quote_download | 31fd8a3a239712764cc1da48251fb94fee9a1313 | [
"BSD-2-Clause"
] | 153 | 2017-05-21T13:24:00.000Z | 2022-03-18T00:56:13.000Z | yahoo_quote_download/validater.py | manniealfaro/yahoo_quote_download | 31fd8a3a239712764cc1da48251fb94fee9a1313 | [
"BSD-2-Clause"
] | 15 | 2017-05-25T19:23:50.000Z | 2021-07-26T17:06:28.000Z | yahoo_quote_download/validater.py | manniealfaro/yahoo_quote_download | 31fd8a3a239712764cc1da48251fb94fee9a1313 | [
"BSD-2-Clause"
] | 77 | 2017-05-21T13:24:11.000Z | 2021-06-22T10:58:18.000Z | # -*- coding: utf-8 -*-
"""
validate.py - Trivial data validater
Created on December 24, 2019
@author: c0redumb
"""
# To make print working for Python2/3
from __future__ import print_function
def validate(ticker, data, begindate='1920-01-01', verbose=0):
'''
This function perform a query and extract the matching cookie and crumb.
'''
new_data = []
last_date = None
for line in data:
# Filename lines, usually the first line
# Zero length lines, usually the last line
if len(line) == 0 or line.startswith('Date'):
new_data.append(line)
continue
# Extract all the fields
try:
field = line.split(',')
d = field[0]
o = float(field[1])
h = float(field[2])
l = float(field[3])
c = float(field[4])
adj_c = float(field[5])
except:
#print("Failed to parse:", line)
continue
# This is a wierd quirk we need to check
invalid_date = False
if last_date is None:
if d < begindate:
invalid_date = True
last_date = d
else:
if d <= last_date:
invalid_date = True
else:
last_date = d
if invalid_date:
if verbose > 0:
print("!!! {}: Invalid date {} in data".format(
ticker, field[0]))
continue
# Verify that the open/close is within the high/low range
mid = (h + l) / 2
corrected = False
if o > h * 1.0001 or o < l * 0.9999:
o = mid
corrected = True
if verbose > 0:
print("!!! {}: Open is out of range on {}".format(
ticker, field[0]))
if c > h * 1.0001 or c < l * 0.9999:
if c != 0.0:
adj_c *= mid / c
else:
adj_c = mid
c = mid
corrected = True
if verbose > 0:
print("!!! {}: Close is out of range on {}".format(
ticker, field[0]))
if corrected:
if verbose > 5:
print(line)
line = "{},{},{},{},{},{},{}".format(
field[0], o, h, l, c, adj_c, field[6])
if verbose > 5:
print(line)
new_data.append(line)
return new_data
| 28.581395 | 76 | 0.463792 | # -*- coding: utf-8 -*-
"""
validate.py - Trivial data validater
Created on December 24, 2019
@author: c0redumb
"""
# To make print working for Python2/3
from __future__ import print_function
def validate(ticker, data, begindate='1920-01-01', verbose=0):
'''
This function perform a query and extract the matching cookie and crumb.
'''
new_data = []
last_date = None
for line in data:
# Filename lines, usually the first line
# Zero length lines, usually the last line
if len(line) == 0 or line.startswith('Date'):
new_data.append(line)
continue
# Extract all the fields
try:
field = line.split(',')
d = field[0]
o = float(field[1])
h = float(field[2])
l = float(field[3])
c = float(field[4])
adj_c = float(field[5])
except:
#print("Failed to parse:", line)
continue
# This is a wierd quirk we need to check
invalid_date = False
if last_date is None:
if d < begindate:
invalid_date = True
last_date = d
else:
if d <= last_date:
invalid_date = True
else:
last_date = d
if invalid_date:
if verbose > 0:
print("!!! {}: Invalid date {} in data".format(
ticker, field[0]))
continue
# Verify that the open/close is within the high/low range
mid = (h + l) / 2
corrected = False
if o > h * 1.0001 or o < l * 0.9999:
o = mid
corrected = True
if verbose > 0:
print("!!! {}: Open is out of range on {}".format(
ticker, field[0]))
if c > h * 1.0001 or c < l * 0.9999:
if c != 0.0:
adj_c *= mid / c
else:
adj_c = mid
c = mid
corrected = True
if verbose > 0:
print("!!! {}: Close is out of range on {}".format(
ticker, field[0]))
if corrected:
if verbose > 5:
print(line)
line = "{},{},{},{},{},{},{}".format(
field[0], o, h, l, c, adj_c, field[6])
if verbose > 5:
print(line)
new_data.append(line)
return new_data
| 0 | 0 | 0 |