hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d73f68f5fdde19e76c3d09d7604ff232b8d082d | 1,261 | py | Python | icarus_simulator/sat_core/coordinate_util.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 5 | 2021-08-31T08:07:41.000Z | 2022-01-04T02:09:25.000Z | icarus_simulator/sat_core/coordinate_util.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 3 | 2021-09-23T09:06:35.000Z | 2021-12-08T04:53:01.000Z | icarus_simulator/sat_core/coordinate_util.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 2 | 2022-01-19T17:50:56.000Z | 2022-03-06T18:59:41.000Z | # 2020 Tommaso Ciussani and Giacomo Giuliari
"""
This file contains the type definitions and conversions between coordinate schemes.
All length values in m
"""
import numpy as np
from typing import Tuple
from typing_extensions import TypedDict
from icarus_simulator.sat_core.planetary_const import EARTH_RADIUS
CartCoords = Tuple[float, float, float]
def geo2cart(geo_coord: GeodeticPosition) -> CartCoords:
"""
Converts a {lat, long, elevation} point to cartesian (x, y, z).
Args:
geo_coord: SatPosition. Coordinates of the point in geodesic format.
Returns:
Tuple[float, float, float]: Tuple of cartesian coordinates.
"""
theta = np.deg2rad(geo_coord["lon"])
phi = np.deg2rad(90 - geo_coord["lat"])
r = geo_coord["elev"] + EARTH_RADIUS
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
cart = (x, y, z)
rad = np.sqrt(np.sum(np.square(cart)))
assert rad >= EARTH_RADIUS - 1000 # Allow for approximation error
return cart
| 25.22 | 83 | 0.681205 | # 2020 Tommaso Ciussani and Giacomo Giuliari
"""
This file contains the type definitions and conversions between coordinate schemes.
All length values in m
"""
import numpy as np
from typing import Tuple
from typing_extensions import TypedDict
from icarus_simulator.sat_core.planetary_const import EARTH_RADIUS
class GeodeticPosition(TypedDict):
lat: float
lon: float
elev: float # Elevation wrt Earth surface, NOT Earth center!
class CartesianPosition(TypedDict):
x: float
y: float
z: float
CartCoords = Tuple[float, float, float]
def geo2cart(geo_coord: GeodeticPosition) -> CartCoords:
"""
Converts a {lat, long, elevation} point to cartesian (x, y, z).
Args:
geo_coord: SatPosition. Coordinates of the point in geodesic format.
Returns:
Tuple[float, float, float]: Tuple of cartesian coordinates.
"""
theta = np.deg2rad(geo_coord["lon"])
phi = np.deg2rad(90 - geo_coord["lat"])
r = geo_coord["elev"] + EARTH_RADIUS
x = r * np.sin(phi) * np.cos(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(phi)
cart = (x, y, z)
rad = np.sqrt(np.sum(np.square(cart)))
assert rad >= EARTH_RADIUS - 1000 # Allow for approximation error
return cart
| 0 | 162 | 46 |
0a0267d5dc5af5883c61f4d96b1b3eaeb3197231 | 220 | py | Python | src/cmd_help.py | parsecmonkey/RepoMainForPy | e07ef5358723c384ca8d233e42dc8580e761867e | [
"MIT"
] | 6 | 2021-11-26T21:57:35.000Z | 2022-01-23T12:40:35.000Z | src/cmd_help.py | parsecmonkey/RepoMainForPy | e07ef5358723c384ca8d233e42dc8580e761867e | [
"MIT"
] | 46 | 2021-11-18T17:32:47.000Z | 2022-01-10T12:26:56.000Z | src/cmd_help.py | parsecmonkey/RepoMainForPy | e07ef5358723c384ca8d233e42dc8580e761867e | [
"MIT"
] | 1 | 2021-11-19T03:16:16.000Z | 2021-11-19T03:16:16.000Z | # help | 22 | 33 | 0.590909 | # help
def run():
print("help : コマンド一覧を表示")
print("clone: リポジトリをクローン")
print("log : ログを確認")
print("diff : ファイルの差分を確認")
print("plot : グラフを描画")
print("message:コミットメッセージを解析")
print("exit : 終了") | 301 | 0 | 23 |
09d701a9f15c22b38883e7c39e11f536f6754a18 | 104 | py | Python | entity/cards/LETLT_060/__init__.py | x014/lushi_script | edab2b88e3f0de8139de2541ab2daa331f777c0e | [
"MIT"
] | 102 | 2021-10-20T09:06:39.000Z | 2022-03-28T13:35:11.000Z | entity/cards/LETLT_060/__init__.py | x014/lushi_script | edab2b88e3f0de8139de2541ab2daa331f777c0e | [
"MIT"
] | 98 | 2021-10-19T16:13:27.000Z | 2022-03-27T13:27:49.000Z | entity/cards/LETLT_060/__init__.py | x014/lushi_script | edab2b88e3f0de8139de2541ab2daa331f777c0e | [
"MIT"
] | 55 | 2021-10-19T03:56:50.000Z | 2022-03-25T08:25:26.000Z | # -*- coding: utf-8 -*-
import entity.cards.LETLT_060.LETLT_060
import entity.cards.LETLT_060.LETLT_060
| 26 | 39 | 0.769231 | # -*- coding: utf-8 -*-
import entity.cards.LETLT_060.LETLT_060
import entity.cards.LETLT_060.LETLT_060
| 0 | 0 | 0 |
7752f4522648507d97542e729853ad5868334402 | 1,503 | py | Python | blog_app/blog/migrations/0011_auto_20190216_1124.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | 1 | 2019-03-27T02:24:22.000Z | 2019-03-27T02:24:22.000Z | blog_app/blog/migrations/0011_auto_20190216_1124.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | null | null | null | blog_app/blog/migrations/0011_auto_20190216_1124.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-16 03:24
from django.db import migrations, models
| 27.833333 | 65 | 0.540253 | # Generated by Django 2.1.5 on 2019-02-16 03:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_auto_20190213_1925'),
]
operations = [
migrations.AlterField(
model_name='book',
name='created_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='book',
name='excerpt',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='book',
name='modified_time',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='book',
name='picture',
field=models.ImageField(blank=True, upload_to='img'),
),
migrations.AlterField(
model_name='book',
name='publication_date',
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name='book',
name='review',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='book',
name='writer',
field=models.CharField(blank=True, max_length=50),
),
]
| 0 | 1,389 | 23 |
429ee136ee6e644765bfba03ee4ed907a9f58777 | 416 | py | Python | rules/LineTooLongRule.py | rvben/galaxy-lint-rules | aa014ea79ce1eaf3a5e6be187fd6d3cc1617fea4 | [
"Apache-2.0"
] | null | null | null | rules/LineTooLongRule.py | rvben/galaxy-lint-rules | aa014ea79ce1eaf3a5e6be187fd6d3cc1617fea4 | [
"Apache-2.0"
] | null | null | null | rules/LineTooLongRule.py | rvben/galaxy-lint-rules | aa014ea79ce1eaf3a5e6be187fd6d3cc1617fea4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Ansible, Inc.
# All Rights Reserved.
from ansiblelint import AnsibleLintRule
| 26 | 62 | 0.661058 | # Copyright (c) 2018 Ansible, Inc.
# All Rights Reserved.
from ansiblelint import AnsibleLintRule
class LineTooLongRule(AnsibleLintRule):
id = '204'
shortdesc = 'Lines should be no longer than 120 chars'
description = 'Long lines make code harder to read and ' \
'code review more difficult'
tags = ['formatting']
def match(self, file, line):
return len(line) > 120
| 38 | 255 | 23 |
bd43ad78f3ba7e5d3ac09df61846ff56d8bd6cec | 827 | py | Python | recruiter/convertPDFToText.py | bhagvank/BankingChatbot | 0a320176cdef41605ed8cbe3471a61815a769f29 | [
"Apache-2.0"
] | null | null | null | recruiter/convertPDFToText.py | bhagvank/BankingChatbot | 0a320176cdef41605ed8cbe3471a61815a769f29 | [
"Apache-2.0"
] | null | null | null | recruiter/convertPDFToText.py | bhagvank/BankingChatbot | 0a320176cdef41605ed8cbe3471a61815a769f29 | [
"Apache-2.0"
] | null | null | null | import PyPDF2
import textract
#from nltk.tokenize import word_tokenize
#from nltk.corpus import stopwords | 41.35 | 163 | 0.708585 | import PyPDF2
import textract
#from nltk.tokenize import word_tokenize
#from nltk.corpus import stopwords
def convertPDFToText(path):
filename = path #open allows you to read the file
pdfFileObj = open(filename,'rb')#The pdfReader variable is a readable object that will be parsed
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)#discerning the number of pages will allow us to parse through all #the pages
num_pages = pdfReader.numPages
count = 0
text = ""#The while loop will read each page
while count < num_pages:
pageObj = pdfReader.getPage(count)
count +=1
text += pageObj.extractText()#This if statement exists to check if the above library returned #words. It's done because PyPDF2 cannot read scanned files.
#print(text)
return text | 698 | 0 | 23 |
43eb5bd1e21c18789dba4a888e8bc24b516d544f | 5,066 | py | Python | BLSeg/blseg/backbone/mobilenet.py | ForrestPi/semanticSegmentation | 1e5519279e2a9574f09eaf91439138b74b0f860c | [
"MIT"
] | 7 | 2020-04-06T10:25:30.000Z | 2021-02-24T14:51:22.000Z | BLSeg/blseg/backbone/mobilenet.py | ForrestPi/semanticSegmentation | 1e5519279e2a9574f09eaf91439138b74b0f860c | [
"MIT"
] | null | null | null | BLSeg/blseg/backbone/mobilenet.py | ForrestPi/semanticSegmentation | 1e5519279e2a9574f09eaf91439138b74b0f860c | [
"MIT"
] | 2 | 2020-04-08T14:43:21.000Z | 2020-12-11T03:03:37.000Z | import torch
from torch import nn
from .utils import conv3x3, DepthwiseSeparableConv
from .base import BackboneBaseModule
| 38.969231 | 78 | 0.536715 | import torch
from torch import nn
from .utils import conv3x3, DepthwiseSeparableConv
from .base import BackboneBaseModule
class LinearBottleneck(nn.Module):
def __init__(self, in_ch, out_ch, t, stride):
super(LinearBottleneck, self).__init__()
self.do_residual = in_ch == out_ch and stride == 1
self.conv1 = nn.Conv2d(in_ch, in_ch * t, 1, bias=False)
self.bn1 = nn.BatchNorm2d(in_ch * t)
self.relu = nn.ReLU6(inplace=True)
self.dsconv = DepthwiseSeparableConv(in_ch * t,
out_ch,
stride=stride,
relu6=True,
last_relu=False)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.dsconv(x)
if self.do_residual:
x += residual
return x
class MobileNetV1(BackboneBaseModule):
def __init__(self):
super(MobileNetV1, self).__init__()
self.channels = [64, 128, 256, 512, 1024]
self.strides = [2, 4, 8, 16, 32]
self.stage0 = nn.Sequential(
conv3x3(3, 32, 2),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
DepthwiseSeparableConv(32, self.channels[0], 1, relu6=False),
)
self.stage1 = self._add_stage(self.channels[0], self.channels[1], 2)
self.stage2 = self._add_stage(self.channels[1], self.channels[2], 2)
self.stage3 = self._add_stage(self.channels[2], self.channels[3], 6)
self.stage4 = self._add_stage(self.channels[3], self.channels[4], 2)
self._init_params()
def forward(self, x):
x = self.stage0(x) # 64, 1/2
x = self.stage1(x) # 128, 1/4
x = self.stage2(x) # 256, 1/8
x = self.stage3(x) # 512, 1/16
x = self.stage4(x) # 1024, 1/32
return x
def _add_stage(self, in_ch, out_ch, repeat_time):
assert repeat_time > 0 and isinstance(repeat_time, int)
layers = [DepthwiseSeparableConv(in_ch, out_ch, 2, relu6=False)]
for _ in range(repeat_time - 1):
layers.append(DepthwiseSeparableConv(out_ch, out_ch, relu6=False))
return nn.Sequential(*layers)
def _change_downsample(self, params):
self.stage3[0].dwconv.stride = (params[0], params[0])
self.stage4[0].dwconv.stride = (params[1], params[1])
class MobileNetV2(BackboneBaseModule):
def __init__(self):
super(MobileNetV2, self).__init__()
self.channels = [16, 24, 32, 96, 1280]
self.strides = [2, 4, 8, 16, 32]
self.stage0 = nn.Sequential(
conv3x3(3, 32, 2),
nn.BatchNorm2d(32),
nn.ReLU6(inplace=True),
LinearBottleneck(32, self.channels[0], 1, 1),
)
self.stage1 = self._add_stage(LinearBottleneck, self.channels[0],
self.channels[1], 6, 2, 2)
self.stage2 = self._add_stage(LinearBottleneck, self.channels[1],
self.channels[2], 6, 2, 3)
self.stage3 = self._add_stage(LinearBottleneck, self.channels[2],
self.channels[2] * 2, 6, 2, 4)
for layer in self._add_stage(LinearBottleneck, self.channels[2] * 2,
self.channels[3], 6, 1, 3):
self.stage3.add_module(str(len(self.stage3)), layer)
self.stage4 = self._add_stage(LinearBottleneck, self.channels[3],
int(self.channels[4] / 8), 6, 2, 3)
for layer in self._add_stage(LinearBottleneck,
int(self.channels[4] / 8),
int(self.channels[4] / 4), 6, 1, 1):
self.stage4.add_module(str(len(self.stage4)), layer)
self.stage4.add_module(
str(len(self.stage4)),
nn.Sequential(
nn.Conv2d(int(self.channels[4] / 4),
self.channels[4],
1,
bias=False),
nn.BatchNorm2d(self.channels[4]),
nn.ReLU6(inplace=True),
))
self._init_params()
def forward(self, x):
x = self.stage0(x) # 16, 1/2
x = self.stage1(x) # 24, 1/4
x = self.stage2(x) # 32, 1/8
x = self.stage3(x) # 96, 1/16
x = self.stage4(x) # 1280, 1/32
return x
def _add_stage(self, block, in_ch, out_ch, t, stride, repeat_time):
assert repeat_time > 0 and isinstance(repeat_time, int)
layers = [block(in_ch, out_ch, t, stride)]
for _ in range(repeat_time - 1):
layers.append(block(out_ch, out_ch, t, 1))
return nn.Sequential(*layers)
def _change_downsample(self, params):
self.stage3[0].dsconv.dwconv.stride = (params[0], params[0])
self.stage4[0].dsconv.dwconv.stride = (params[1], params[1])
| 4,555 | 47 | 339 |
860ae04a58c63c4e628e9f219b834f014c47ea76 | 2,174 | py | Python | coreRelback/urls.py | supernoi/relback | b4f04d7ed5cb315e9c862fbd82291ada8fb5af08 | [
"MIT"
] | null | null | null | coreRelback/urls.py | supernoi/relback | b4f04d7ed5cb315e9c862fbd82291ada8fb5af08 | [
"MIT"
] | null | null | null | coreRelback/urls.py | supernoi/relback | b4f04d7ed5cb315e9c862fbd82291ada8fb5af08 | [
"MIT"
] | 1 | 2019-06-25T13:27:40.000Z | 2019-06-25T13:27:40.000Z | from django.contrib import admin
from django.urls import path
from coreRelback import views
app_name = 'coreRelback'
urlpatterns = [
path('', views.index, name='index'),
path('admin/', admin.site.urls),
path('creators/', views.creators, name='creators'),
# Routes - Clients
path('client/', views.clientRead.as_view(), name='client'),
path('client/create/', views.clientCreate.as_view(), name='clientCreate'),
path('client/update/', views.clientUpdate.as_view(), name='clientUpdate'),
path('client/delete/', views.clientDelete.as_view(), name='clientDelete'),
# Routes - Hosts
path('host/', views.hostRead.as_view(), name='host'),
path('host/create/', views.hostCreate.as_view(), name='hostCreate'),
path('host/update/', views.hostUpdate.as_view(), name='hostUpdate'),
path('host/delete/', views.hostDelete.as_view(), name='hostDelete'),
# Routes - Databases
path('database/', views.databaseRead.as_view(), name='database'),
path('database/create/', views.databaseCreate.as_view(), name='databaseCreate'),
path('database/update/', views.databaseUpdate.as_view(), name='databaseUpdate'),
path('database/delete/', views.databaseDelete.as_view(), name='databaseDelete'),
path('database/hostsList/', views.hostsList, name='hostsList'),
# Routes - Policies
path('policy/', views.policyRead.as_view(), name='policy'),
path('policy/detail/', views.policyRead.policyDetail, name='policyDetail'),
path('policy/create/', views.policyCreate.as_view(), name='policyCreate'),
path('policy/update/', views.policyUpdate.as_view(), name='policyUpdate'),
path('policy/delete/', views.policyDelete.as_view(), name='policyDelete'),
path('policy/hostsList/', views.hostsList, name='hostsList'),
path('policy/databasesList/', views.databasesList, name='databasesList'),
# Routes - Reports
path('reports/', views.reportRead, name='reportRead'),
path('reports/readLogDetail/<int:idPolicy>/<int:dbKey>/<int:sessionKey>/', views.reportReadLogDetail, name='reportReadLogDetail'),
path('reports/refreshSchedule', views.reportRefreshSchedule, name='refreshSchedule'),
]
| 46.255319 | 134 | 0.699632 | from django.contrib import admin
from django.urls import path
from coreRelback import views
app_name = 'coreRelback'
urlpatterns = [
path('', views.index, name='index'),
path('admin/', admin.site.urls),
path('creators/', views.creators, name='creators'),
# Routes - Clients
path('client/', views.clientRead.as_view(), name='client'),
path('client/create/', views.clientCreate.as_view(), name='clientCreate'),
path('client/update/', views.clientUpdate.as_view(), name='clientUpdate'),
path('client/delete/', views.clientDelete.as_view(), name='clientDelete'),
# Routes - Hosts
path('host/', views.hostRead.as_view(), name='host'),
path('host/create/', views.hostCreate.as_view(), name='hostCreate'),
path('host/update/', views.hostUpdate.as_view(), name='hostUpdate'),
path('host/delete/', views.hostDelete.as_view(), name='hostDelete'),
# Routes - Databases
path('database/', views.databaseRead.as_view(), name='database'),
path('database/create/', views.databaseCreate.as_view(), name='databaseCreate'),
path('database/update/', views.databaseUpdate.as_view(), name='databaseUpdate'),
path('database/delete/', views.databaseDelete.as_view(), name='databaseDelete'),
path('database/hostsList/', views.hostsList, name='hostsList'),
# Routes - Policies
path('policy/', views.policyRead.as_view(), name='policy'),
path('policy/detail/', views.policyRead.policyDetail, name='policyDetail'),
path('policy/create/', views.policyCreate.as_view(), name='policyCreate'),
path('policy/update/', views.policyUpdate.as_view(), name='policyUpdate'),
path('policy/delete/', views.policyDelete.as_view(), name='policyDelete'),
path('policy/hostsList/', views.hostsList, name='hostsList'),
path('policy/databasesList/', views.databasesList, name='databasesList'),
# Routes - Reports
path('reports/', views.reportRead, name='reportRead'),
path('reports/readLogDetail/<int:idPolicy>/<int:dbKey>/<int:sessionKey>/', views.reportReadLogDetail, name='reportReadLogDetail'),
path('reports/refreshSchedule', views.reportRefreshSchedule, name='refreshSchedule'),
]
| 0 | 0 | 0 |
b900d8747d0d6c2c0034ef9bc48889dd61f44eaa | 1,181 | py | Python | angr/analyses/reaching_definitions/__init__.py | Will03/angr | 2a3a67f38aaeb4dbd5cc4ed0d5b9057c8739ca80 | [
"BSD-2-Clause"
] | 1 | 2021-05-21T02:41:28.000Z | 2021-05-21T02:41:28.000Z | angr/analyses/reaching_definitions/__init__.py | FDlucifer/angr | 598f18a39c98d853a91f6880d25df2c528d3b312 | [
"BSD-2-Clause"
] | null | null | null | angr/analyses/reaching_definitions/__init__.py | FDlucifer/angr | 598f18a39c98d853a91f6880d25df2c528d3b312 | [
"BSD-2-Clause"
] | null | null | null | from typing import Set, Optional, TYPE_CHECKING
from ...knowledge_plugins.key_definitions import LiveDefinitions
from .. import register_analysis
from .reaching_definitions import ReachingDefinitionsAnalysis
if TYPE_CHECKING:
from angr.knowledge_plugins.key_definitions.definition import Definition
from angr.storage.memory_object import SimMemoryObject
from angr.storage.memory_mixins import MultiValuedMemory
from angr.storage.memory_mixins.paged_memory.pages import MVListPage
register_analysis(ReachingDefinitionsAnalysis, 'ReachingDefinitions')
| 33.742857 | 76 | 0.73497 | from typing import Set, Optional, TYPE_CHECKING
from ...knowledge_plugins.key_definitions import LiveDefinitions
from .. import register_analysis
from .reaching_definitions import ReachingDefinitionsAnalysis
if TYPE_CHECKING:
from angr.knowledge_plugins.key_definitions.definition import Definition
from angr.storage.memory_object import SimMemoryObject
from angr.storage.memory_mixins import MultiValuedMemory
from angr.storage.memory_mixins.paged_memory.pages import MVListPage
def get_all_definitions(region: 'MultiValuedMemory') -> Set['Definition']:
all_defs: Set['Definition'] = set()
# MultiValuedMemory only uses ListPage internally
for page in region._pages.values():
page: 'MVListPage'
for idx in range(page._min_stored_offset, page._max_stored_offset):
cnt_set: Optional[Set['SimMemoryObject']] = page.content[idx]
if cnt_set is None:
continue
for cnt in cnt_set:
for def_ in LiveDefinitions.extract_defs(cnt.object):
all_defs.add(def_)
return all_defs
register_analysis(ReachingDefinitionsAnalysis, 'ReachingDefinitions')
| 586 | 0 | 23 |
8dac0f518ca1a8ddf8cf6e572b444e406a6bfe5b | 1,843 | py | Python | viewer.py | ramankh/Smartcab | 0044bbc61b2ade7170115271e7f6ec590ed0e298 | [
"MIT"
] | null | null | null | viewer.py | ramankh/Smartcab | 0044bbc61b2ade7170115271e7f6ec590ed0e298 | [
"MIT"
] | null | null | null | viewer.py | ramankh/Smartcab | 0044bbc61b2ade7170115271e7f6ec590ed0e298 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt | 36.86 | 141 | 0.605534 | import numpy as np
import matplotlib.pyplot as plt
class Plotter():
def __init__(self, e, g, a):
self.e = e
self.g = g
self.a = a
def plot_success(self):
result = np.load("result.npy").item()
stats = np.load("stats.npy").item()
colors = list("rgbcmyk")
print stats
x = result.keys()
y = result.values()
fig = plt.figure()
fig.suptitle('(Epsilon: {} - Gamma: {} - Alpha: {} \n Success Rate: %{})'.format(self.e, self.g, self.a, stats["winning"]), fontsize=14)
plt.scatter(x,y,color=colors.pop())
plt.xlabel("Trials", fontsize = 14)
plt.ylabel("Rewards", fontsize = 14)
plt.legend(result.keys())
fig.savefig('(Epsilon{}Gamma{}Alpha{}).png'.format(self.e, self.g, self.a), bbox_inches='tight')
def plot_times(self):
times = np.load("times.npy").item()
colors = list("rgbcmyk")
x = times.keys()
y = times.values()
fig = plt.figure()
fig.suptitle('(Epsilon: {} - Gamma: {} - Alpha: {})'.format(self.e, self.g, self.a), fontsize=14)
plt.scatter(x,y,color=colors.pop())
plt.xlabel("Time", fontsize = 14)
plt.ylabel("Rewards", fontsize = 14)
plt.legend(times.keys())
fig.savefig('TIMES(Epsilon{}Gamma{}Alpha{}).png'.format(self.e, self.g, self.a), bbox_inches='tight')
def plot_rewards(self):
result = np.load("rewards.npy").item()
colors = list("rgbcmyk")
x = result.keys()
y = result.values()
fig = plt.figure()
fig.suptitle('(Epsilon: {} - Gamma: {} - Alpha: {})'.format(self.e, self.g, self.a), fontsize=14)
plt.scatter(x,y,color=colors.pop())
plt.xlabel("Trials", fontsize = 14)
plt.ylabel("Positive Rewards %", fontsize = 14)
plt.legend(result.keys())
fig.savefig('REWARDS(Epsilon{}Gamma{}Alpha{}).png'.format(self.e, self.g, self.a), bbox_inches='tight') | 1,679 | -5 | 119 |
145812906c5f285067f65f7a95625e19b7a51831 | 6,009 | py | Python | okcupyd/profile_copy.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | 89 | 2015-01-09T19:58:07.000Z | 2022-03-03T21:56:50.000Z | okcupyd/profile_copy.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | 51 | 2015-01-18T23:09:35.000Z | 2017-04-24T03:16:03.000Z | okcupyd/profile_copy.py | sphericalcow/okcupyd | ae0a99d248c515eea9a6d21a9c89f51e299b33f5 | [
"MIT"
] | 24 | 2015-01-16T17:43:21.000Z | 2020-09-18T12:19:15.000Z | import logging
import time
from .profile import Profile
from .question import Questions
log = logging.getLogger(__name__)
class Copy(object):
"""Copy photos, essays and other attributes from one profile to another."""
copy_methods = ['photos', 'essays', 'looking_for', 'details', 'questions']
def __init__(self, source_profile_or_user, dest_user):
"""
:param source_profile_or_user: A :class:`~okcupyd.user.User` or
:class:`~okcupyd.profile.Profile` object
from which to copy attributes.
:meth:`~.Copy.questions` will not
will not preserve the importance of
copied questions if a
:class:`~okcupyd.profile.Profile`
instance is provided.
:param dest_user: A :class:`~okcupyd.user.User` to which data will be
copied
"""
if isinstance(source_profile_or_user, Profile):
self.source_profile = source_profile_or_user
self.source_user = None
else:
self.source_user = source_profile_or_user
self.source_profile = self.source_user.profile
self.dest_user = dest_user
def questions(self):
"""Copy questions to the destination user. When this class was
initialized with a :class:`~okcupyd.profile.Profile`, this will
delete any existing questions answers on the destination account.
"""
if self.source_user:
return self._copy_questions_from_user()
else:
return self._copy_questions_from_profile()
def photos(self):
"""Copy photos to the destination user."""
# Reverse because pictures appear in inverse chronological order.
for photo_info in self.dest_user.profile.photo_infos:
self.dest_user.photo.delete(photo_info)
return [self.dest_user.photo.upload_and_confirm(info)
for info in reversed(self.source_profile.photo_infos)]
def essays(self):
"""Copy essays from the source profile to the destination profile."""
for essay_name in self.dest_user.profile.essays.essay_names:
setattr(self.dest_user.profile.essays, essay_name,
getattr(self.source_profile.essays, essay_name))
def looking_for(self):
"""Copy looking for attributes from the source profile to the
destination profile.
"""
looking_for = self.source_profile.looking_for
return self.dest_user.profile.looking_for.update(
gentation=looking_for.gentation,
single=looking_for.single,
near_me=looking_for.near_me,
kinds=looking_for.kinds,
ages=looking_for.ages
)
def details(self):
"""Copy details from the source profile to the destination profile."""
return self.dest_user.profile.details.convert_and_update(
self.source_profile.details.as_dict
)
def all(self):
"""Invoke all of :meth:`~.Copy.questions`, :meth:`~.Copy.details`,
:meth:`~.Copy.essays`, :meth:`~.Copy.photos`, :meth:`~.Copy.looking_for`
"""
for method_name in self.copy_methods:
getattr(self, method_name)()
| 41.157534 | 80 | 0.579963 | import logging
import time
from .profile import Profile
from .question import Questions
log = logging.getLogger(__name__)
class Copy(object):
"""Copy photos, essays and other attributes from one profile to another."""
copy_methods = ['photos', 'essays', 'looking_for', 'details', 'questions']
def __init__(self, source_profile_or_user, dest_user):
"""
:param source_profile_or_user: A :class:`~okcupyd.user.User` or
:class:`~okcupyd.profile.Profile` object
from which to copy attributes.
:meth:`~.Copy.questions` will not
will not preserve the importance of
copied questions if a
:class:`~okcupyd.profile.Profile`
instance is provided.
:param dest_user: A :class:`~okcupyd.user.User` to which data will be
copied
"""
if isinstance(source_profile_or_user, Profile):
self.source_profile = source_profile_or_user
self.source_user = None
else:
self.source_user = source_profile_or_user
self.source_profile = self.source_user.profile
self.dest_user = dest_user
def questions(self):
"""Copy questions to the destination user. When this class was
initialized with a :class:`~okcupyd.profile.Profile`, this will
delete any existing questions answers on the destination account.
"""
if self.source_user:
return self._copy_questions_from_user()
else:
return self._copy_questions_from_profile()
def _copy_questions_from_user(self):
dest_questions = self.dest_user.questions
for key, importance in Questions.importance_name_to_number.items():
questions = getattr(self.source_user.questions, key)
for question in questions:
log.debug(
dest_questions.respond_from_user_question(
question,
importance
).content
)
def _copy_questions_from_profile(self):
# Answer all of the questions that the source user has answered.
# So that we can see their answers
for question in self.source_profile.questions:
log.debug(u'Answering {0}: {1} to see {2}\'s answer'.format(
question.id, question.text, self.source_profile.username
))
if not question.answered:
self.dest_user.questions.respond(question.id, [1], [1], 3)
# Load all of their questions. We use a new question fetchable because
# the one that lives on the profile has been accessed and has cached
# data.
source_questions = self.source_profile.question_fetchable()[:]
log.debug(u'Copying {0} questions from {1} to {2}'.format(
len(source_questions),
self.source_profile.username,
self.dest_user.username
))
id_to_user_question = {question.id: question
for question in self.dest_user.profile.questions}
self.dest_user.questions.clear()
while True:
log.debug(u'Sleeping to wait for questions to clear')
time.sleep(5)
try:
question = self.dest_user.profile.question_fetchable()[0]
except IndexError:
break
else:
log.debug(u'Destination user still has question {0}'.format(
question.text
))
for question in source_questions:
try:
user_question = id_to_user_question[question.id]
except KeyError:
log.debug(u'No user question found for {0}: {1}'.format(
question.id, question.text
))
else:
log.debug(u'Answering {0}: {1}'.format(
question.id, question.text
))
self.dest_user.questions.respond_from_question(question,
user_question, 3)
def photos(self):
"""Copy photos to the destination user."""
# Reverse because pictures appear in inverse chronological order.
for photo_info in self.dest_user.profile.photo_infos:
self.dest_user.photo.delete(photo_info)
return [self.dest_user.photo.upload_and_confirm(info)
for info in reversed(self.source_profile.photo_infos)]
def essays(self):
"""Copy essays from the source profile to the destination profile."""
for essay_name in self.dest_user.profile.essays.essay_names:
setattr(self.dest_user.profile.essays, essay_name,
getattr(self.source_profile.essays, essay_name))
def looking_for(self):
"""Copy looking for attributes from the source profile to the
destination profile.
"""
looking_for = self.source_profile.looking_for
return self.dest_user.profile.looking_for.update(
gentation=looking_for.gentation,
single=looking_for.single,
near_me=looking_for.near_me,
kinds=looking_for.kinds,
ages=looking_for.ages
)
def details(self):
"""Copy details from the source profile to the destination profile."""
return self.dest_user.profile.details.convert_and_update(
self.source_profile.details.as_dict
)
def all(self):
"""Invoke all of :meth:`~.Copy.questions`, :meth:`~.Copy.details`,
:meth:`~.Copy.essays`, :meth:`~.Copy.photos`, :meth:`~.Copy.looking_for`
"""
for method_name in self.copy_methods:
getattr(self, method_name)()
| 2,516 | 0 | 54 |
fb07d1f256a2f6d7a6cc9dbdf801ef7f4558d52a | 323 | py | Python | TP_ALGO_3/convert.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_3/convert.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_3/convert.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null |
print(convert(10,2))
print(convert_inv(10,2))
| 16.15 | 49 | 0.544892 | def convert(n,base):
if n < base:
res = str(n)
else:
res = convert(n//base,base) + str(n%base)
return res
print(convert(10,2))
def convert_inv(n,base):
if n < base:
res = str(n)
else:
res = str(n%base) + convert(n//base,base)
return res
print(convert_inv(10,2))
| 228 | 0 | 45 |
87c1757d37d96b9139b1f04cc9caf72ffd1cad94 | 208 | py | Python | {{cookiecutter.repo_name}}/tests/conftest.py | digitalr00ts/cookiecutter-sphinx-docs | 0311e0de0978662fa4f129a4598ffe6dfc2340ea | [
"Apache-2.0"
] | 1 | 2018-11-10T23:45:10.000Z | 2018-11-10T23:45:10.000Z | {{cookiecutter.repo_name}}/tests/conftest.py | digitalr00ts/cookiecutter-sphinx-docs | 0311e0de0978662fa4f129a4598ffe6dfc2340ea | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.repo_name}}/tests/conftest.py | digitalr00ts/cookiecutter-sphinx-docs | 0311e0de0978662fa4f129a4598ffe6dfc2340ea | [
"Apache-2.0"
] | 3 | 2019-03-15T20:28:29.000Z | 2020-10-10T04:32:53.000Z | """Configuration for tests."""
import {{ cookiecutter.project_slug }}
def pytest_report_header():
"""Additional report header."""
return f"version: { {{- cookiecutter.project_slug -}}.__version__}"
| 26 | 71 | 0.697115 | """Configuration for tests."""
import {{ cookiecutter.project_slug }}
def pytest_report_header():
"""Additional report header."""
return f"version: { {{- cookiecutter.project_slug -}}.__version__}"
| 0 | 0 | 0 |
fcbd9e1e6136d72f930126d932e00b32886b9ef2 | 30 | py | Python | bluespot/guest/settings.py | unifispot/unifispot-free | 186c906aa79e8671e03d0808469a3f27d67e34e1 | [
"MIT"
] | 21 | 2015-11-10T11:40:39.000Z | 2021-12-02T22:58:45.000Z | bluespot/guest/settings.py | unifispot/unifispot-free | 186c906aa79e8671e03d0808469a3f27d67e34e1 | [
"MIT"
] | null | null | null | bluespot/guest/settings.py | unifispot/unifispot-free | 186c906aa79e8671e03d0808469a3f27d67e34e1 | [
"MIT"
] | 10 | 2016-02-03T14:45:41.000Z | 2019-11-08T08:21:25.000Z | GUEST_URL_PREFIX = '/guest'
| 7.5 | 27 | 0.7 | GUEST_URL_PREFIX = '/guest'
| 0 | 0 | 0 |
e2f86cad37bb3bfbf46641c1ad6cb78d3514bb2b | 957 | py | Python | python/tour/asyncio/lib01.py | supeterlau/bedev | c134875eae37d265936199fda278416e2a3c1224 | [
"MIT"
] | null | null | null | python/tour/asyncio/lib01.py | supeterlau/bedev | c134875eae37d265936199fda278416e2a3c1224 | [
"MIT"
] | null | null | null | python/tour/asyncio/lib01.py | supeterlau/bedev | c134875eae37d265936199fda278416e2a3c1224 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 Peter Lau <superpeterlau@outlook.com>
#
# Distributed under terms of the MIT license.
import time
import asyncio
# method 1
# asyncio.run(main())
# print(main())
# asyncio.run(main2())
asyncio.run(main3())
| 19.530612 | 56 | 0.634274 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 Peter Lau <superpeterlau@outlook.com>
#
# Distributed under terms of the MIT license.
import time
import asyncio
async def main1():
print("Hello")
await asyncio.sleep(1)
print("DONE")
# method 1
# asyncio.run(main())
# print(main())
async def say_after(delay, message):
await asyncio.sleep(delay)
print(message)
async def main2():
print(f"started at {time.strftime('%X')}")
await say_after(2, 'Hello Main2')
await say_after(1, 'Done Main2')
print(f"Finished at {time.strftime('%X')}")
# asyncio.run(main2())
async def main3():
task1 = asyncio.create_task(
say_after(1, 'Hello Main3')
)
task2 = asyncio.create_task(
say_after(2, 'Done Main3')
)
print(f"Started at {time.strftime('%X')}")
await task1
await task2
print(f"Finished at {time.strftime('%X')}")
asyncio.run(main3())
| 569 | 0 | 92 |
2e123e19d0b2552573d4e4c4cd2e18ff38b0b711 | 1,212 | py | Python | prpe9/collect_prpe_ne.py | zuters/prpene | d76d6203e366c91efc1d1ae7ecee4c73e80e38d2 | [
"MIT"
] | 1 | 2020-12-02T09:00:49.000Z | 2020-12-02T09:00:49.000Z | prpe9/collect_prpe_ne.py | zuters/prpene | d76d6203e366c91efc1d1ae7ecee4c73e80e38d2 | [
"MIT"
] | null | null | null | prpe9/collect_prpe_ne.py | zuters/prpene | d76d6203e366c91efc1d1ae7ecee4c73e80e38d2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Jānis Zuters
from __future__ import unicode_literals, division
import sys
import argparse
from io import open
argparse.open = open
from prpe_ne import collect_ne_pairs
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
collect_ne_pairs(args.input1.name,args.input2.name,args.output1.name,args.output2.name)
| 28.186047 | 91 | 0.65429 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Jānis Zuters
from __future__ import unicode_literals, division
import sys
import argparse
from io import open
argparse.open = open
from prpe_ne import collect_ne_pairs
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="extract potential pairs of named entities from parallel corpora")
parser.add_argument(
'--input1', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH',
help="Input file #1")
parser.add_argument(
'--input2', '-k', type=argparse.FileType('r'),
metavar='PATH',
help="Input file #2")
parser.add_argument(
'--output1', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH',
help="Output file #1")
parser.add_argument(
'--output2', '-p', type=argparse.FileType('w'),
metavar='PATH',
help="Output file #2")
return parser
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
collect_ne_pairs(args.input1.name,args.input2.name,args.output1.name,args.output2.name)
| 783 | 0 | 23 |
0d87be926ab4f43c42524a34778fe6bbfbd0404e | 4,188 | py | Python | versions/2.0/karma/python/geo.py | Lituta/dig-alignment | 812ab0e4a181953d23024ef9df92eec1d40559cb | [
"Apache-2.0"
] | 5 | 2015-04-26T19:39:44.000Z | 2018-12-24T13:17:42.000Z | versions/2.0/karma/python/geo.py | Lituta/dig-alignment | 812ab0e4a181953d23024ef9df92eec1d40559cb | [
"Apache-2.0"
] | 7 | 2016-03-23T16:52:32.000Z | 2018-07-24T03:08:05.000Z | versions/2.0/karma/python/geo.py | Lituta/dig-alignment | 812ab0e4a181953d23024ef9df92eec1d40559cb | [
"Apache-2.0"
] | 28 | 2015-04-08T22:39:07.000Z | 2021-12-25T22:16:36.000Z |
def gn_place_uri(geonamesid,fcode,country,admin1,admin2,admin3,admin4):
"Return URI of place from a geonames id"
if fcode=="PCLI":
return "geonames/place/Country/"+country
elif fcode=="ADM1":
return "geonames/place/State1stDiv/"+country+"_"+admin1
elif fcode=="ADM2":
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2
elif fcode=="ADM3":
return "geonames/place/Community3rdDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3
elif fcode=="ADM4":
return "geonames/place/SubCommunity4thDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3+"_"+admin4
return "geonames/place/"+geonamesid
def gn_place_spacetimevolume_uri(class_uri):
"Return URI of SpaceTimeVolume for a class with Spacetimevolume"
return class_uri+"/SpaceTimeVolume"
def gn_place_identifier_uri(geonamesid):
"Return URI of place from a geonames id"
return "geonames/place/"+geonamesid+"/identifier"
def gn_name_uri(geonamesid,name):
"Return URI of name for a place with geonames id"
return "geonames/place/"+geonamesid+"/Name/"+name
def gn_nametype(type):
"Return Nametype of name"
return "http://dig.isi.edu/gazetteer/data/SKOS/NameTypes/"+type
def gn_select_not_populated_or_administrative(fclass):
"Return Nametype of name"
return fclass!="P" and fclass!="A"
def gn_nametype_conditional(type,condition):
"Return Nametype of name if condition is 1, used for alternamtenames which have flags for historic, colloquial,..."
if condition == 1:
return "http://dig.isi.edu/gazetteer/data/SKOS/NameTypes/"+type
return ''
def gn_countrycodeconcept_uri(country):
"Return country code concept_uri of country taken from SKOS vocabulary http://eulersharp.sourceforge.net/2003/03swap/countries"
return "http://eulersharp.sourceforge.net/2003/03swap/countries#"+country
def gn_languagecodeconcept_uri(language):
"Return language code concept_uri of language taken from SKOS vocabulary http://eulersharp.sourceforge.net/2003/03swap/languages"
if len(language) == 2:
return "http://eulersharp.sourceforge.net/2003/03swap/languages#"+language
return ''
def gn_pointgeometry_uri(place_uri):
"Return URI of PointGeometry for a place"
return place_uri+"/PointGeometry"
def gn_country_uri(country):
"Return URI for Place of class country"
return "geonames/place/Country/"+country
def gn_geojson(lat,long):
"Return geojson point representation"
return """{"type": "Point","coordinates": ["""+lat+","+long+"]}"
def gn_State1stDiv_uri(country,admin1):
"Return URI for Place of class State1stDiv"
if admin1 == None or admin1 =='00':
return ''
return "geonames/place/State1stDiv/"+country+"_"+admin1
def gn_CountyProvince2ndDiv_uri(country,admin1,admin2):
"Return URI for Place of class CountyProvince2ndDiv"
if admin2 == '' or admin2 =='00':
return ''
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2
def gn_Community3rdDiv_uri(country,admin1,admin2,admin3):
"Return URI for Place of class CountyProvince2ndDiv"
if admin3 == '' or admin3 =='00':
return ''
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3
def gn_SubCommunity4thDiv_uri(country,admin1,admin2,admin3,admin4):
"Return URI for Place of class CountyProvince2ndDiv"
if admin4 == '' or admin4 =='00':
return ''
return "geonames/place/SubCommunity4thDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3+"_"+admin4
def fcode_to_class(fclass,fcode):
"Compute the name of the class in the ontology from a geonames fcode"
c = fclass_dictionary[fclass]
if fclass=="P":
return dgeo+c
c = fcode_dictionary[fcode]
if c == None:
return ''
return dgeo+c
#
dgeo = "http://dig.isi.edu/ontology/dgeo/"
fcode_dictionary = {}
fcode_dictionary['PCLI'] = "Country"
fcode_dictionary['ADM1'] = "State1stDiv"
fcode_dictionary['ADM2'] = "CountyProvince2ndDiv"
fcode_dictionary['ADM3'] = "Community3rdDiv"
fcode_dictionary['ADM4'] = "SubCommunity4thDiv"
fclass_dictionary = {}
fclass_dictionary['P'] = "PopulatedPlace"
fclass_dictionary['A'] = "AdministrativeArea" | 37.392857 | 133 | 0.724928 |
def gn_place_uri(geonamesid,fcode,country,admin1,admin2,admin3,admin4):
"Return URI of place from a geonames id"
if fcode=="PCLI":
return "geonames/place/Country/"+country
elif fcode=="ADM1":
return "geonames/place/State1stDiv/"+country+"_"+admin1
elif fcode=="ADM2":
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2
elif fcode=="ADM3":
return "geonames/place/Community3rdDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3
elif fcode=="ADM4":
return "geonames/place/SubCommunity4thDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3+"_"+admin4
return "geonames/place/"+geonamesid
def gn_place_spacetimevolume_uri(class_uri):
"Return URI of SpaceTimeVolume for a class with Spacetimevolume"
return class_uri+"/SpaceTimeVolume"
def gn_place_identifier_uri(geonamesid):
"Return URI of place from a geonames id"
return "geonames/place/"+geonamesid+"/identifier"
def gn_name_uri(geonamesid,name):
"Return URI of name for a place with geonames id"
return "geonames/place/"+geonamesid+"/Name/"+name
def gn_nametype(type):
"Return Nametype of name"
return "http://dig.isi.edu/gazetteer/data/SKOS/NameTypes/"+type
def gn_select_not_populated_or_administrative(fclass):
"Return Nametype of name"
return fclass!="P" and fclass!="A"
def gn_nametype_conditional(type,condition):
"Return Nametype of name if condition is 1, used for alternamtenames which have flags for historic, colloquial,..."
if condition == 1:
return "http://dig.isi.edu/gazetteer/data/SKOS/NameTypes/"+type
return ''
def gn_countrycodeconcept_uri(country):
"Return country code concept_uri of country taken from SKOS vocabulary http://eulersharp.sourceforge.net/2003/03swap/countries"
return "http://eulersharp.sourceforge.net/2003/03swap/countries#"+country
def gn_languagecodeconcept_uri(language):
"Return language code concept_uri of language taken from SKOS vocabulary http://eulersharp.sourceforge.net/2003/03swap/languages"
if len(language) == 2:
return "http://eulersharp.sourceforge.net/2003/03swap/languages#"+language
return ''
def gn_pointgeometry_uri(place_uri):
"Return URI of PointGeometry for a place"
return place_uri+"/PointGeometry"
def gn_country_uri(country):
"Return URI for Place of class country"
return "geonames/place/Country/"+country
def gn_geojson(lat,long):
"Return geojson point representation"
return """{"type": "Point","coordinates": ["""+lat+","+long+"]}"
def gn_State1stDiv_uri(country,admin1):
"Return URI for Place of class State1stDiv"
if admin1 == None or admin1 =='00':
return ''
return "geonames/place/State1stDiv/"+country+"_"+admin1
def gn_CountyProvince2ndDiv_uri(country,admin1,admin2):
"Return URI for Place of class CountyProvince2ndDiv"
if admin2 == '' or admin2 =='00':
return ''
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2
def gn_Community3rdDiv_uri(country,admin1,admin2,admin3):
"Return URI for Place of class CountyProvince2ndDiv"
if admin3 == '' or admin3 =='00':
return ''
return "geonames/place/CountyProvince2ndDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3
def gn_SubCommunity4thDiv_uri(country,admin1,admin2,admin3,admin4):
"Return URI for Place of class CountyProvince2ndDiv"
if admin4 == '' or admin4 =='00':
return ''
return "geonames/place/SubCommunity4thDiv/"+country+"_"+admin1+"_"+admin2+"_"+admin3+"_"+admin4
def fcode_to_class(fclass,fcode):
"Compute the name of the class in the ontology from a geonames fcode"
c = fclass_dictionary[fclass]
if fclass=="P":
return dgeo+c
c = fcode_dictionary[fcode]
if c == None:
return ''
return dgeo+c
#
dgeo = "http://dig.isi.edu/ontology/dgeo/"
fcode_dictionary = {}
fcode_dictionary['PCLI'] = "Country"
fcode_dictionary['ADM1'] = "State1stDiv"
fcode_dictionary['ADM2'] = "CountyProvince2ndDiv"
fcode_dictionary['ADM3'] = "Community3rdDiv"
fcode_dictionary['ADM4'] = "SubCommunity4thDiv"
fclass_dictionary = {}
fclass_dictionary['P'] = "PopulatedPlace"
fclass_dictionary['A'] = "AdministrativeArea" | 0 | 0 | 0 |
b9aee1236e3f5bb69bd587db78f5680abfa332d5 | 11,346 | py | Python | volttrontesting/fixtures/volttron_platform_fixtures.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | volttrontesting/fixtures/volttron_platform_fixtures.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | volttrontesting/fixtures/volttron_platform_fixtures.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from random import randint
import socket
import uuid
from volttrontesting.utils.platformwrapper import PlatformWrapper
from volttrontesting.utils.utils import get_hostname_and_random_port, get_rand_vip, get_rand_ip_and_port
from volttron.platform import is_rabbitmq_available
PRINT_LOG_ON_SHUTDOWN = False
HAS_RMQ = is_rabbitmq_available()
rmq_skipif = pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
@pytest.fixture(scope="module",
params=[dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True))
])
# IPC testing is removed since it is not used from VOLTTRON 6.0
@pytest.fixture(scope="function")
@pytest.fixture(scope="module")
# Generic fixtures. Ideally we want to use the below instead of
# Use this fixture when you want a single instance of volttron platform for
# test
@pytest.fixture(scope="module",
params=(
dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True)),
))
def volttron_instance(request, **kwargs):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
address = kwargs.pop("vip_address", get_rand_vip())
wrapper = build_wrapper(address,
messagebus=request.param['messagebus'],
ssl_auth=request.param['ssl_auth'],
**kwargs)
yield wrapper
cleanup_wrapper(wrapper)
# Use this fixture to get more than 1 volttron instance for test.
# Usage example:
# def test_function_that_uses_n_instances(request, get_volttron_instances):
# instances = get_volttron_instances(3)
#
# TODO allow rmq to be added to the multi platform request.
@pytest.fixture(scope="module",
params=[
dict(messagebus='zmq', ssl_auth=False)
])
def get_volttron_instances(request):
""" Fixture to get more than 1 volttron instance for test
Use this fixture to get more than 1 volttron instance for test. This
returns a function object that should be called with number of instances
as parameter to get a list of volttron instnaces. The fixture also
takes care of shutting down all the instances at the end
Example Usage:
def test_function_that_uses_n_instances(get_volttron_instances):
instance1, instance2, instance3 = get_volttron_instances(3)
@param request: pytest request object
@return: function that can used to get any number of
volttron instances for testing.
"""
all_instances = []
request.addfinalizer(cleanup)
return get_n_volttron_instances
# Use this fixture when you want a single instance of volttron platform for zmq message bus
# test
@pytest.fixture(scope="module")
def volttron_instance_zmq(request):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
address = get_rand_vip()
wrapper = build_wrapper(address)
yield wrapper
cleanup_wrapper(wrapper)
# Use this fixture when you want a single instance of volttron platform for rmq message bus
# test
@pytest.fixture(scope="module")
def volttron_instance_rmq(request):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
wrapper = None
address = get_rand_vip()
wrapper = build_wrapper(address,
messagebus='rmq',
ssl_auth=True)
yield wrapper
cleanup_wrapper(wrapper)
@pytest.fixture(scope="module",
params=[
dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True))
])
@pytest.fixture(scope="module",
params=[
dict(sink='zmq_web', source='zmq'),
rmq_skipif(dict(sink='rmq_web', source='zmq')),
rmq_skipif(dict(sink='rmq_web', source='rmq')),
rmq_skipif(dict(sink='zmq_web', source='rmq'))
])
def volttron_multi_messagebus(request):
""" This fixture allows multiple two message bus types to be configured to work together
This case will create a source (where data comes from) and a sink (where data goes to) to
allow connections from source to sink to be tested for the different cases. In particular,
the case of VolttronCentralPlatform, Forwarder and DataMover agents should use this
case.
:param request:
:return:
"""
print("volttron_multi_messagebus source: {} sink: {}".format(request.param['source'],
request.param['sink']))
sink_address = get_rand_vip()
if request.param['sink'] == 'rmq_web':
hostname, port = get_hostname_and_random_port()
web_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port)
messagebus = 'rmq'
ssl_auth = True
else:
web_address = "http://{}".format(get_rand_ip_and_port())
messagebus = 'zmq'
ssl_auth = False
sink = build_wrapper(sink_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
bind_web_address=web_address,
volttron_central_address=web_address)
source_address = get_rand_vip()
messagebus = 'zmq'
ssl_auth = False
if request.param['source'] == 'rmq':
messagebus = 'rmq'
ssl_auth = True
if sink.messagebus == 'rmq':
# sink_ca_file = sink.certsobj.cert_file(sink.certsobj.root_ca_name)
source = build_wrapper(source_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
volttron_central_address=sink.bind_web_address,
remote_platform_ca=sink.certsobj.cert_file(sink.certsobj.root_ca_name))
if source.messagebus == 'rmq':
# The _ca is how the auth subsystem saves the remote cert from discovery. We
# are effectively doing that here instead of making the discovery call.
source.certsobj.save_remote_cert(sink.certsobj.root_ca_name + "_ca", sink.certsobj.ca_cert(
public_bytes=True))
else:
source = build_wrapper(source_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
volttron_central_address=sink.bind_web_address)
yield source, sink
cleanup_wrapper(source)
cleanup_wrapper(sink)
| 35.679245 | 108 | 0.632822 | import os
import pytest
from random import randint
import socket
import uuid
from volttrontesting.utils.platformwrapper import PlatformWrapper
from volttrontesting.utils.utils import get_hostname_and_random_port, get_rand_vip, get_rand_ip_and_port
from volttron.platform import is_rabbitmq_available
PRINT_LOG_ON_SHUTDOWN = False
HAS_RMQ = is_rabbitmq_available()
rmq_skipif = pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup')
def print_log(volttron_home):
if PRINT_LOG_ON_SHUTDOWN:
if os.environ.get('PRINT_LOGS', PRINT_LOG_ON_SHUTDOWN):
log_path = volttron_home + "/volttron.log"
if os.path.exists(log_path):
with open(volttron_home + "/volttron.log") as fin:
print(fin.read())
else:
print('NO LOG FILE AVAILABLE.')
def build_wrapper(vip_address, should_start=True, messagebus='zmq', remote_platform_ca=None,
instance_name=None, **kwargs):
wrapper = PlatformWrapper(ssl_auth=kwargs.pop('ssl_auth', False),
messagebus=messagebus,
instance_name=instance_name,
remote_platform_ca=remote_platform_ca)
if should_start:
wrapper.startup_platform(vip_address=vip_address, **kwargs)
return wrapper
def cleanup_wrapper(wrapper):
print('Shutting down instance: {0}, MESSAGE BUS: {1}'.format(wrapper.volttron_home, wrapper.messagebus))
# if wrapper.is_running():
# wrapper.remove_all_agents()
# Shutdown handles case where the platform hasn't started.
wrapper.shutdown_platform()
def cleanup_wrappers(platforms):
for p in platforms:
cleanup_wrapper(p)
@pytest.fixture(scope="module",
params=[dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True))
])
def volttron_instance_msgdebug(request):
print("building msgdebug instance")
wrapper = build_wrapper(get_rand_vip(),
msgdebug=True,
messagebus=request.param['messagebus'],
ssl_auth=request.param['ssl_auth'])
yield wrapper
cleanup_wrapper(wrapper)
# IPC testing is removed since it is not used from VOLTTRON 6.0
@pytest.fixture(scope="function")
def volttron_instance_encrypt(request):
print("building instance (using encryption)")
address = get_rand_vip()
wrapper = build_wrapper(address)
def cleanup():
cleanup_wrapper(wrapper)
request.addfinalizer(cleanup)
return wrapper
@pytest.fixture(scope="module")
def volttron_instance_module_web(request):
print("building module instance (using web)")
address = get_rand_vip()
web_address = "http://{}".format(get_rand_ip_and_port())
wrapper = build_wrapper(address,
bind_web_address=web_address,
messagebus='zmq',
ssl_auth=False)
yield wrapper
cleanup_wrapper(wrapper)
# Generic fixtures. Ideally we want to use the below instead of
# Use this fixture when you want a single instance of volttron platform for
# test
@pytest.fixture(scope="module",
params=(
dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True)),
))
def volttron_instance(request, **kwargs):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
address = kwargs.pop("vip_address", get_rand_vip())
wrapper = build_wrapper(address,
messagebus=request.param['messagebus'],
ssl_auth=request.param['ssl_auth'],
**kwargs)
yield wrapper
cleanup_wrapper(wrapper)
# Use this fixture to get more than 1 volttron instance for test.
# Usage example:
# def test_function_that_uses_n_instances(request, get_volttron_instances):
# instances = get_volttron_instances(3)
#
# TODO allow rmq to be added to the multi platform request.
@pytest.fixture(scope="module",
params=[
dict(messagebus='zmq', ssl_auth=False)
])
def get_volttron_instances(request):
""" Fixture to get more than 1 volttron instance for test
Use this fixture to get more than 1 volttron instance for test. This
returns a function object that should be called with number of instances
as parameter to get a list of volttron instnaces. The fixture also
takes care of shutting down all the instances at the end
Example Usage:
def test_function_that_uses_n_instances(get_volttron_instances):
instance1, instance2, instance3 = get_volttron_instances(3)
@param request: pytest request object
@return: function that can used to get any number of
volttron instances for testing.
"""
all_instances = []
def get_n_volttron_instances(n, should_start=True, **kwargs):
get_n_volttron_instances.count = n
instances = []
for i in range(0, n):
address = kwargs.pop("vip_address", get_rand_vip())
wrapper = build_wrapper(address, should_start=should_start,
messagebus=request.param['messagebus'],
ssl_auth=request.param['ssl_auth'],
**kwargs)
instances.append(wrapper)
instances = instances if n > 1 else instances[0]
# setattr(get_n_volttron_instances, 'instances', instances)
get_n_volttron_instances.instances = instances
return instances
def cleanup():
if isinstance(get_n_volttron_instances.instances, PlatformWrapper):
print('Shutting down instance: {}'.format(
get_n_volttron_instances.instances))
cleanup_wrapper(get_n_volttron_instances.instances)
return
for i in range(0, get_n_volttron_instances.count):
print('Shutting down instance: {}'.format(
get_n_volttron_instances.instances[i].volttron_home))
cleanup_wrapper(get_n_volttron_instances.instances[i])
request.addfinalizer(cleanup)
return get_n_volttron_instances
# Use this fixture when you want a single instance of volttron platform for zmq message bus
# test
@pytest.fixture(scope="module")
def volttron_instance_zmq(request):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
address = get_rand_vip()
wrapper = build_wrapper(address)
yield wrapper
cleanup_wrapper(wrapper)
# Use this fixture when you want a single instance of volttron platform for rmq message bus
# test
@pytest.fixture(scope="module")
def volttron_instance_rmq(request):
"""Fixture that returns a single instance of volttron platform for testing
@param request: pytest request object
@return: volttron platform instance
"""
wrapper = None
address = get_rand_vip()
wrapper = build_wrapper(address,
messagebus='rmq',
ssl_auth=True)
yield wrapper
cleanup_wrapper(wrapper)
@pytest.fixture(scope="module",
params=[
dict(messagebus='zmq', ssl_auth=False),
rmq_skipif(dict(messagebus='rmq', ssl_auth=True))
])
def volttron_instance_web(request):
print("volttron_instance_web (messagebus {messagebus} ssl_auth {ssl_auth})".format(**request.param))
address = get_rand_vip()
if request.param['ssl_auth']:
hostname, port = get_hostname_and_random_port()
web_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port)
else:
web_address = "http://{}".format(get_rand_ip_and_port())
wrapper = build_wrapper(address,
ssl_auth=request.param['ssl_auth'],
messagebus=request.param['messagebus'],
bind_web_address=web_address,
volttron_central_address=web_address)
yield wrapper
cleanup_wrapper(wrapper)
@pytest.fixture(scope="module",
params=[
dict(sink='zmq_web', source='zmq'),
rmq_skipif(dict(sink='rmq_web', source='zmq')),
rmq_skipif(dict(sink='rmq_web', source='rmq')),
rmq_skipif(dict(sink='zmq_web', source='rmq'))
])
def volttron_multi_messagebus(request):
""" This fixture allows multiple two message bus types to be configured to work together
This case will create a source (where data comes from) and a sink (where data goes to) to
allow connections from source to sink to be tested for the different cases. In particular,
the case of VolttronCentralPlatform, Forwarder and DataMover agents should use this
case.
:param request:
:return:
"""
print("volttron_multi_messagebus source: {} sink: {}".format(request.param['source'],
request.param['sink']))
sink_address = get_rand_vip()
if request.param['sink'] == 'rmq_web':
hostname, port = get_hostname_and_random_port()
web_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port)
messagebus = 'rmq'
ssl_auth = True
else:
web_address = "http://{}".format(get_rand_ip_and_port())
messagebus = 'zmq'
ssl_auth = False
sink = build_wrapper(sink_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
bind_web_address=web_address,
volttron_central_address=web_address)
source_address = get_rand_vip()
messagebus = 'zmq'
ssl_auth = False
if request.param['source'] == 'rmq':
messagebus = 'rmq'
ssl_auth = True
if sink.messagebus == 'rmq':
# sink_ca_file = sink.certsobj.cert_file(sink.certsobj.root_ca_name)
source = build_wrapper(source_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
volttron_central_address=sink.bind_web_address,
remote_platform_ca=sink.certsobj.cert_file(sink.certsobj.root_ca_name))
if source.messagebus == 'rmq':
# The _ca is how the auth subsystem saves the remote cert from discovery. We
# are effectively doing that here instead of making the discovery call.
source.certsobj.save_remote_cert(sink.certsobj.root_ca_name + "_ca", sink.certsobj.ca_cert(
public_bytes=True))
else:
source = build_wrapper(source_address,
ssl_auth=ssl_auth,
messagebus=messagebus,
volttron_central_address=sink.bind_web_address)
yield source, sink
cleanup_wrapper(source)
cleanup_wrapper(sink)
| 4,122 | 0 | 234 |
a705e3342019f5dbc9e7c2702cdad80b86ca87ab | 592 | py | Python | touches/migrations/0002_auto_20170713_0001.py | brains-on-art/touch-helsinki-web | 87d12d1f0a4d0b0d2754bd7ccb7a19b4b0f68061 | [
"MIT"
] | null | null | null | touches/migrations/0002_auto_20170713_0001.py | brains-on-art/touch-helsinki-web | 87d12d1f0a4d0b0d2754bd7ccb7a19b4b0f68061 | [
"MIT"
] | null | null | null | touches/migrations/0002_auto_20170713_0001.py | brains-on-art/touch-helsinki-web | 87d12d1f0a4d0b0d2754bd7ccb7a19b4b0f68061 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 21:01
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
| 26.909091 | 164 | 0.66723 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 21:01
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('touches', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='delta',
name='lattice',
field=django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.FloatField(), size=40), size=60),
),
]
| 0 | 375 | 23 |
670fba351ef929c641d2ec1a4368fad39a96acf1 | 6,869 | py | Python | tests/test_genome_tools.py | Wang-Cankun/lisa2 | 2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310 | [
"MIT"
] | 17 | 2020-09-21T20:04:43.000Z | 2022-01-15T11:25:41.000Z | tests/test_genome_tools.py | Wang-Cankun/lisa2 | 2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310 | [
"MIT"
] | 5 | 2020-09-24T22:08:54.000Z | 2021-07-24T02:45:53.000Z | tests/test_genome_tools.py | Wang-Cankun/lisa2 | 2407cc3c12f43bf41f0e14b2a8a5fcdfe07ff310 | [
"MIT"
] | 5 | 2021-02-16T13:16:34.000Z | 2022-03-08T16:15:25.000Z | import unittest
from lisa.core import genome_tools
import numpy as np
if __name__ == '__main__':
unittest.main()
| 32.709524 | 146 | 0.596885 | import unittest
from lisa.core import genome_tools
import numpy as np
class TestRegionObject(unittest.TestCase):
def test_invalid_region(self):
with self.assertRaises(AssertionError):
genome_tools.Region('chr1',10, 5)
def test_overlap_different_chr(self):
self.assertFalse(
genome_tools.Region('chr1',5,10).overlaps(genome_tools.Region('chr2',5,10))
)
def test_overlap_negative(self):
self.assertFalse(
genome_tools.Region('chr1',5,10).overlaps(genome_tools.Region('chr1',20,25))
)
def test_overlap_negative_abutted(self):
self.assertFalse(
genome_tools.Region('chr1',5,10).overlaps(genome_tools.Region('chr1',10,20))
)
def test_overlap_any(self):
self.assertTrue(
genome_tools.Region('chr1',5,10).overlaps(genome_tools.Region('chr1',8,15))
)
self.assertFalse(
genome_tools.Region('chr1',1,11).overlaps(genome_tools.Region('chr1',9,15), min_overlap_proportion=0.4)
)
self.assertTrue(
genome_tools.Region('chr1',1,11).overlaps(genome_tools.Region('chr1',6,15), min_overlap_proportion=0.4)
)
def test_distance_function(self):
self.assertEqual(
genome_tools.Region('chr1',20, 30).get_genomic_distance(genome_tools.Region('chr1', 120, 130)), 100)
def test_regions_equal(self):
self.assertTrue(
genome_tools.Region('chr1',10,15) == genome_tools.Region('chr1', 10, 15)
)
class TestGenomeObject(unittest.TestCase):
def setUp(self):
self.genome = genome_tools.Genome(['chr1','chr2','chr3'], [450, 375, 600], window_size=100)
self.unsorted_genome = genome_tools.Genome(['chr1','chr3','chr2'],[200, 300, 250], window_size= 100, _sort=False)
self.sorted_genome = genome_tools.Genome(['chr1','chr3','chr2'],[200, 400, 250], window_size= 100, _sort=True)
self.correct_mapping = np.array([
(0,0),
(1,1),
(2,5),
(3,6),
(4,7),
(5,2),
(6,3),
(7,4),
])
def test_genome_genome_mapping(self):
self.assertTrue(
np.all(self.unsorted_genome.map_genomes(self.sorted_genome) == self.correct_mapping)
)
def test_indptr(self):
self.assertEqual(
tuple(self.genome.indptr), (0,5,9,15)
)
def test_region_check(self):
with self.assertRaises(genome_tools.BadRegionError):
self.genome.check_region(genome_tools.Region('chr4',10,20))
with self.assertRaises(genome_tools.BadRegionError):
self.genome.check_region(genome_tools.Region('chr1',-1, 20))
with self.assertRaises(genome_tools.BadRegionError):
self.genome.check_region(genome_tools.Region('chr2', 300, 400))
def test_chromlen(self):
self.assertEqual(
self.genome.get_chromlen('chr2'), 375
)
def test_get_num_windows(self):
self.assertEqual(
self.genome.get_num_windows(self.genome.get_chromlen('chr1'), self.genome.window_size), 5
)
self.assertEqual(
self.genome.num_windows_in_genome(), 15
)
def test_get_window_from_position(self):
region, window_idx = self.genome.get_window_from_position('chr2', 250)
self.assertTrue(
region == genome_tools.Region('chr2', 200, 300)
)
next_region, next_window_idx = self.genome.get_next_window(region)
self.assertTrue(
next_region == genome_tools.Region('chr2',300,375)
)
self.assertTrue(
window_idx == (next_window_idx - 1)
)
def test_get_window_idx(self):
self.assertTrue(
self.genome.get_region(0)[0] == genome_tools.Region('chr1', 0, 100)
)
self.assertTrue(
self.genome.get_region(4)[0] == genome_tools.Region('chr1',400,450)
)
self.assertTrue(
self.genome.get_region(5)[0] == genome_tools.Region('chr2',0,100)
)
self.assertTrue(
self.genome.get_region(14)[0] == genome_tools.Region('chr3',500,600)
)
with self.assertRaises(AssertionError):
self.genome.get_region(15)
class TestRegionSet(unittest.TestCase):
def setUp(self):
self.genome = genome_tools.Genome(['chr1','chr2','chr3'], [450, 375, 600], window_size=50)
self.scrambled_genome = genome_tools.Genome(['chr1','chr3','chr2'], [450, 600, 375], window_size=50)
self.regions1A = [
genome_tools.Region('chr1',20,40),
genome_tools.Region('chr1',30,60),
genome_tools.Region('chr1',210,230),
genome_tools.Region('chr2',100,150),
genome_tools.Region('chr2',220,233),
genome_tools.Region('chr3',430,450),
]
self.regions1B = [
genome_tools.Region('chr1',20,40),
genome_tools.Region('chr1',30,60),
genome_tools.Region('chr1',210,230),
genome_tools.Region('chr2',100,150),
genome_tools.Region('chr2',220,233),
genome_tools.Region('chr3',430,450),
]
self.auto_distancing_truth = np.array(
[[0,15,0,0,0,0],
[15,0,175,0,0,0],
[0,175,0,0,0,0],
[0,0,0,0,101,0],
[0,0,0,101,0,0],
[0,0,0,0,0,0]
]
)
self.m2m_map_truth = np.array([
(0,0),
(0,1),
(1,1),
(4,2),
(11,3),
(13,4),
(25,5)
])
def test_auto_distancing(self):
distance_matrix = genome_tools.RegionSet(self.regions1A, self.genome).map_intersects(genome_tools.RegionSet(self.regions1B, self.genome),
lambda x,y : x.get_genomic_distance(y), slop_distance=75)
self.assertTrue(
np.all(np.array(distance_matrix.todense()).astype(int) == self.auto_distancing_truth)
)
def test_auto_distancing_scrambled(self):
distance_matrix = genome_tools.RegionSet(self.regions1A, self.scrambled_genome)\
.map_intersects(genome_tools.RegionSet(self.regions1B, self.scrambled_genome),
lambda x,y : x.get_genomic_distance(y), slop_distance=75)
self.assertTrue(
np.all(np.array(distance_matrix.todense()).astype(int) == self.auto_distancing_truth)
)
def test_genome_bin_mapping(self):
m2m_map = genome_tools.RegionSet(self.regions1A, self.genome)\
.map_genomic_windows(min_window_overlap_proportion=0.0, regions_to_bins=False)
self.assertTrue(
np.all(m2m_map == self.m2m_map_truth)
)
if __name__ == '__main__':
unittest.main()
| 6,104 | 60 | 586 |
fc2712a021a30d0196ad358b3f8763ba6dee7663 | 1,982 | py | Python | examples/filter_examples/run_rotate_MNIST_example.py | ohtu-projekti-dataproblemsemulator/dataproblemsemulator | 58170716b44c4e4dee639451977f352b3c68053c | [
"MIT"
] | 2 | 2019-12-13T09:58:49.000Z | 2020-02-10T10:37:17.000Z | examples/filter_examples/run_rotate_MNIST_example.py | ohtu-projekti-dataproblemsemulator/dataproblemsemulator | 58170716b44c4e4dee639451977f352b3c68053c | [
"MIT"
] | 199 | 2019-05-19T17:48:39.000Z | 2022-03-11T23:56:15.000Z | examples/filter_examples/run_rotate_MNIST_example.py | thalvari/dpEmu-AutoML | b24eac686fae4147264c1ccc8169fd96b1875577 | [
"MIT"
] | 5 | 2019-10-02T23:14:05.000Z | 2020-05-28T16:23:22.000Z | # MIT License
#
# Copyright (c) 2019 Tuomas Halvari, Juha Harviainen, Juha Mylläri, Antti Röyskö, Juuso Silvennoinen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import matplotlib.pyplot as plt
from dpemu.nodes import Array, Series
from dpemu.filters.image import Rotation
from dpemu.dataset_utils import load_mnist
def main():
"""An example that rotates MNIST digits and displays one.
Usage: python run_rotate_MNIST_example <angle>
where <angle> is the angle of rotation
(e.g. 90 to rotate by pi / 2)
"""
x, _, _, _ = load_mnist()
xs = x[:20] # small subset of x
angle = float(sys.argv[1])
print(f"x subset shape: {xs.shape}")
img_node = Array(reshape=(28, 28))
root_node = Series(img_node)
img_node.addfilter(Rotation("angle"))
result = root_node.generate_error(xs, {'angle': angle})
plt.matshow(result[0].reshape((28, 28)))
plt.show()
if __name__ == "__main__":
main()
| 38.115385 | 100 | 0.729566 | # MIT License
#
# Copyright (c) 2019 Tuomas Halvari, Juha Harviainen, Juha Mylläri, Antti Röyskö, Juuso Silvennoinen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import matplotlib.pyplot as plt
from dpemu.nodes import Array, Series
from dpemu.filters.image import Rotation
from dpemu.dataset_utils import load_mnist
def main():
"""An example that rotates MNIST digits and displays one.
Usage: python run_rotate_MNIST_example <angle>
where <angle> is the angle of rotation
(e.g. 90 to rotate by pi / 2)
"""
x, _, _, _ = load_mnist()
xs = x[:20] # small subset of x
angle = float(sys.argv[1])
print(f"x subset shape: {xs.shape}")
img_node = Array(reshape=(28, 28))
root_node = Series(img_node)
img_node.addfilter(Rotation("angle"))
result = root_node.generate_error(xs, {'angle': angle})
plt.matshow(result[0].reshape((28, 28)))
plt.show()
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
842f6805d9c28ae285c0a1c754ce08a421ea33e9 | 869 | py | Python | meta.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 177 | 2017-04-02T18:13:13.000Z | 2022-02-22T05:51:06.000Z | meta.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 9 | 2018-01-03T13:52:52.000Z | 2022-02-28T09:03:01.000Z | meta.py | Muroger/SVHNClassifier_pytorch | 608e2fc4d7eee0966949533195ad91abca6cf323 | [
"MIT"
] | 47 | 2017-04-05T09:02:14.000Z | 2021-12-27T05:41:23.000Z | import json
| 32.185185 | 70 | 0.560414 | import json
class Meta(object):
def __init__(self):
self.num_train_examples = None
self.num_val_examples = None
self.num_test_examples = None
def save(self, path_to_json_file):
with open(path_to_json_file, 'w') as f:
content = {
'num_examples': {
'train': self.num_train_examples,
'val': self.num_val_examples,
'test': self.num_test_examples
}
}
json.dump(content, f)
def load(self, path_to_json_file):
with open(path_to_json_file, 'r') as f:
content = json.load(f)
self.num_train_examples = content['num_examples']['train']
self.num_val_examples = content['num_examples']['val']
self.num_test_examples = content['num_examples']['test']
| 755 | -2 | 103 |
fc5338c0a141643d797fc75f3db3886647efd1e0 | 2,091 | py | Python | observation/tests/StoreObsTest.py | CUrW-SL/cfcwm-cms | 3888e724800395c478f1b63dab9f77d0afa3b2c4 | [
"Apache-2.0"
] | null | null | null | observation/tests/StoreObsTest.py | CUrW-SL/cfcwm-cms | 3888e724800395c478f1b63dab9f77d0afa3b2c4 | [
"Apache-2.0"
] | null | null | null | observation/tests/StoreObsTest.py | CUrW-SL/cfcwm-cms | 3888e724800395c478f1b63dab9f77d0afa3b2c4 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import logging
import logging.config
import os
import sys
import traceback
from os.path import join as pjoin
from subprocess import Popen, PIPE
import unittest2 as unittest
| 35.440678 | 106 | 0.599235 | import datetime
import json
import logging
import logging.config
import os
import sys
import traceback
from os.path import join as pjoin
from subprocess import Popen, PIPE
import unittest2 as unittest
class StoreObsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
cls.root_dir = os.path.dirname(os.path.realpath(__file__))
# config = json.loads(open(pjoin(cls.root_dir, '../config/CONFIG.json')).read())
# Initialize Logger
logging_config = json.loads(open(pjoin(cls.root_dir, '../config/LOGGING_CONFIG.json')).read())
logging.config.dictConfig(logging_config)
cls.logger = logging.getLogger('StoreObsTest')
cls.logger.addHandler(logging.StreamHandler())
cls.logger.info('setUpClass')
cls.run_start_date = datetime.datetime(2017, 11, 20, 0, 0, 0)
cls.run_end_date = datetime.datetime(2017, 11, 20, 12, 0, 0)
except Exception as e:
logging.error(e)
traceback.print_exc()
@classmethod
def tearDownClass(cls):
cls.logger.info('tearDownClass')
def setUp(self):
self.logger.info('setUp')
def tearDown(self):
self.logger.info('tearDown')
def test_runScriptForAll(self):
self.logger.info('runScriptForAll')
execList = ['python', pjoin(self.root_dir, '../StoreObs.py')]
execList = execList + ['-s', self.run_start_date.strftime("%Y-%m-%d")]
execList = execList + ['--start-time', self.run_start_date.strftime("%H:%M:%S")]
execList = execList + ['-f']
execList = execList + ['-e', self.run_end_date.strftime("%Y-%m-%d")]
execList = execList + ['--end-time', self.run_end_date.strftime("%H:%M:%S")]
execList = execList + ['-m', 'all']
print('*********************************************************')
print('>>>', execList, '\n')
process = Popen(execList, stdout=PIPE, stdin=PIPE)
for line in process.stdout.readlines():
print(line)
process.wait()
| 1,680 | 185 | 23 |
92e9b61ef393b8e0b1994c6564135ea55fd6308a | 3,896 | py | Python | emmet/workflows/tests/test_property_workflows.py | kmu/emmet | 97d48616da5890ad3d44c9bfd6e65696449105b4 | [
"BSD-3-Clause-LBNL"
] | 2 | 2020-04-08T19:01:16.000Z | 2020-04-26T04:09:53.000Z | emmet/workflows/tests/test_property_workflows.py | kmu/emmet | 97d48616da5890ad3d44c9bfd6e65696449105b4 | [
"BSD-3-Clause-LBNL"
] | null | null | null | emmet/workflows/tests/test_property_workflows.py | kmu/emmet | 97d48616da5890ad3d44c9bfd6e65696449105b4 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-09-30T16:57:19.000Z | 2021-09-30T16:57:19.000Z | import os
import unittest
from maggma.stores import MemoryStore
from maggma.runner import Runner
from maggma.builders import Builder
from emmet.workflows.property_workflows import PropertyWorkflowBuilder,\
get_elastic_wf_builder
from pymatgen.util.testing import PymatgenTest
from atomate.vasp.workflows.presets.core import wf_elastic_constant
from fireworks import LaunchPad, Workflow
from monty.tempfile import ScratchDir
from monty.serialization import dumpfn, loadfn
__author__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if __name__ == "__main__":
unittest.main()
| 36.411215 | 113 | 0.665041 | import os
import unittest
from maggma.stores import MemoryStore
from maggma.runner import Runner
from maggma.builders import Builder
from emmet.workflows.property_workflows import PropertyWorkflowBuilder,\
get_elastic_wf_builder
from pymatgen.util.testing import PymatgenTest
from atomate.vasp.workflows.presets.core import wf_elastic_constant
from fireworks import LaunchPad, Workflow
from monty.tempfile import ScratchDir
from monty.serialization import dumpfn, loadfn
__author__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
class TestPropertyWorkflowBuilder(unittest.TestCase):
@classmethod
def setUpClass(cls):
materials = MemoryStore("materials")
materials.connect()
docs = []
for n, mat_string in enumerate(["Si", "Sn", "TiO2", "VO2"]):
docs.append({"task_id": n, "structure": PymatgenTest.get_structure(mat_string).as_dict()})
materials.update(docs, key='task_id')
elasticity = MemoryStore("elasticity")
elasticity.connect()
elasticity.update(docs[0:1], key="task_id")
cls.materials = materials
cls.elasticity = elasticity
def setUp(self):
lpad = LaunchPad(name="test_emmet")
lpad.reset('', require_password=False)
self.lpad = lpad
self.nofilter = PropertyWorkflowBuilder(
self.elasticity, self.materials, wf_elastic_constant, material_filter=None, lpad=self.lpad)
self.nofilter.connect()
self.filter = PropertyWorkflowBuilder(
self.elasticity,
self.materials,
wf_elastic_constant,
material_filter={"task_id": {
"$lt": 3
}},
lpad=self.lpad)
self.filter.connect()
def test_serialization(self):
# Test invocation from string method
builder = PropertyWorkflowBuilder(
self.elasticity,
self.materials,
"emmet.workflows.property_workflows.generate_elastic_workflow",
lpad=self.lpad)
serialized = builder.as_dict()
new = PropertyWorkflowBuilder.from_dict(serialized)
self.assertEqual(new._wf_function_string, "emmet.workflows.property_workflows.generate_elastic_workflow")
with ScratchDir('.'):
dumpfn(builder, "builder.json")
new_builder = loadfn("builder.json")
self.assertTrue(isinstance(new_builder, Builder))
def test_get_items(self):
# No filter
self.assertEqual(len(list(self.nofilter.get_items())), 4)
# elasticity filter
self.assertEqual(len(list(self.filter.get_items())), 3)
def test_process_items(self):
for item in self.nofilter.get_items():
processed = self.nofilter.process_item(item)
if processed:
self.assertTrue(isinstance(processed, Workflow))
self.assertTrue(item[0]['task_id'] in processed.metadata['tags'])
else:
self.assertEqual(item[0]['task_id'], 0)
def test_update_targets(self):
processed = [self.nofilter.process_item(item) for item in self.nofilter.get_items()]
self.nofilter.update_targets(processed)
self.assertEqual(self.lpad.workflows.count(), 3)
def test_runner_pipeline(self):
runner = Runner([self.nofilter])
runner.run()
self.assertEqual(self.lpad.workflows.count(), 3)
# Ensure no further updates
runner.run()
self.assertEqual(self.lpad.workflows.count(), 3)
def test_elastic_wf_builder(self):
el_wf_builder = get_elastic_wf_builder(self.elasticity, self.materials, self.lpad)
self.assertEqual(len(list(el_wf_builder.get_items())), 4)
# TODO: Test the functionality of this builder
if __name__ == "__main__":
unittest.main()
| 2,950 | 264 | 23 |
0a95d4a1567f2240b60061ccfe32a3b346555780 | 57 | py | Python | p97.py | brandonpelfrey/project-euler | 2004720e1545e554bdefc0de3898f6dbddf731f8 | [
"MIT"
] | null | null | null | p97.py | brandonpelfrey/project-euler | 2004720e1545e554bdefc0de3898f6dbddf731f8 | [
"MIT"
] | null | null | null | p97.py | brandonpelfrey/project-euler | 2004720e1545e554bdefc0de3898f6dbddf731f8 | [
"MIT"
] | null | null | null | n = 28433 * pow(2,7830457,10**10) + 1
print str(n)[-10:]
| 19 | 37 | 0.578947 | n = 28433 * pow(2,7830457,10**10) + 1
print str(n)[-10:]
| 0 | 0 | 0 |
2e620c4667c5e606e3b7a171d1cd8ca4819552f8 | 4,848 | py | Python | backend/app/core/routers/logic/tag_logic.py | Hawangledt/template-projeto-selecao-back | 2c90236ebac48c7018b8a36562aec1eb3b200825 | [
"MIT"
] | null | null | null | backend/app/core/routers/logic/tag_logic.py | Hawangledt/template-projeto-selecao-back | 2c90236ebac48c7018b8a36562aec1eb3b200825 | [
"MIT"
] | null | null | null | backend/app/core/routers/logic/tag_logic.py | Hawangledt/template-projeto-selecao-back | 2c90236ebac48c7018b8a36562aec1eb3b200825 | [
"MIT"
] | null | null | null | from fastapi import HTTPException
from sqlalchemy.orm import Session
from core.models.table import TagDB, TagInRepoDB, RepoDB
from core.models.schema import TagCreate
from core.models.schema import TagInRepoCreate
def _create_tag(db: Session, tag_name: str, user_id: int):
""" Creates a new tag in the database
Args:
db (Session): sqlAlchemy connection object
tag (TagCreate): Schema for creating a tag in database
Raises:
HTTPException: 422, Tag name cannot be empty
Returns:
sql_object : Tag data
"""
if tag_name.strip() == "":
raise HTTPException(
status_code=422, detail="Tag name cannot be empty")
db_tag = TagDB(name=tag_name, auth_id=user_id)
db.add(db_tag)
db.commit()
db.refresh(db_tag)
return db_tag
def _get_tag_by_name(tag_name: str, auth_id: int, db: Session):
""" Returns tag data by passing the tag name
Args:
tag_name (str): Tag Name
auth_id (int): User Id
db (Session): sqlAlchemy connection object
Returns:
sql_object : Tag data
"""
return db.query(TagDB).filter(TagDB.name == tag_name,
TagDB.auth_id == auth_id).first()
def _get_tag_by_id(tag_id: int, db: Session):
""" Returns tag data by passing the tag id
Args:
tag_id (int): Tag id
db (Session): sqlAlchemy connection object
Returns:
sql_object : Tag data
"""
return db.query(TagDB).filter(TagDB.id == tag_id).first()
def _get_all_tags(db: Session, auth_id: int):
""" Returns data for all tags
Args:
db (Session): sqlAlchemy connection object
auth_id (int): User id
Returns:
sql_object : All tags data
"""
return db.query(TagDB).filter(auth_id == auth_id).all()
def _add_tag_in_repo(tag_in_repo: TagInRepoCreate, db: Session):
""" Adds a tag's link to a repository
Args:
tag_in_repo (TagInRepoCreate): Schema for creating a relationship
between a tag and a repository
db (Session): sqlAlchemy connection object
Returns:
sql_object: Tag associated with the repository
"""
db_tag_in_repo = _get_tag_in_repo(repo_id=tag_in_repo.repo_id,
tag_id=tag_in_repo.tag_id,
db=db)
if db_tag_in_repo:
raise HTTPException(
status_code=400, detail="Tag is already associated")
db_tag_in_repo = TagInRepoDB(repo_id=tag_in_repo.repo_id,
tag_id=tag_in_repo.tag_id)
db.add(db_tag_in_repo)
db.commit()
db.refresh(db_tag_in_repo)
return db_tag_in_repo
def _get_tag_in_repo(repo_id: int, tag_id: int, db: Session):
""" Returns a tag's link data to a repository
Args:
repo_id (int): Repository ID
tag_id (int): Tag ID
db (Session): sqlAlchemy connection object
Returns:
sql_object: Tag associated with the repository
Raises:
HTTPException: 404, Tag not found
HTTPException: 404, Repository not found
Returns:
sql_object: Tag associated with the repository
"""
tag_in_repo_db = db.query(TagInRepoDB).filter(
TagInRepoDB.repo_id == repo_id,
TagInRepoDB.tag_id == tag_id).first()
user_has_the_tag = db.query(TagDB).filter(
TagDB.id == tag_id).first()
user_has_the_repo = db.query(RepoDB).filter(
RepoDB.id == repo_id).first()
if not user_has_the_tag:
raise HTTPException(
status_code=404, detail="Tag not found")
elif not user_has_the_repo:
raise HTTPException(
status_code=404, detail="Repository not found"
)
else:
return tag_in_repo_db
def _get_all_tags_in_repo(repo_id: int, db: Session):
""" Returns data for all tag's link data to a repository
Args:
repo_id (int): Repository ID
db (Session): sqlAlchemy connection object
Returns:
sql_object: All tag relationships associated with the repository
"""
tags_in_repo = db.query(TagInRepoDB).filter(
TagInRepoDB.repo_id == repo_id).all()
tags = []
for tag in tags_in_repo:
tag_data = _get_tag_by_id(
tag_id=tag.tag_id, db=db)
tag_info = {
"id": tag_data.id,
"name": tag_data.name
}
tags.append(tag_info)
return tags
def _remove_tag_in_repo(tag_in_repo_id: int, db: Session):
""" Removes a tag's link to a repository
Args:
tag_in_repo_id (int): Relationship id between a tag and a repository
db (Session): sqlAlchemy connection object
"""
db_tag_in_repo = db.query(TagInRepoDB).filter(
TagInRepoDB.id == tag_in_repo_id).first()
db.delete(db_tag_in_repo)
db.commit()
| 28.186047 | 76 | 0.634901 | from fastapi import HTTPException
from sqlalchemy.orm import Session
from core.models.table import TagDB, TagInRepoDB, RepoDB
from core.models.schema import TagCreate
from core.models.schema import TagInRepoCreate
def _create_tag(db: Session, tag_name: str, user_id: int):
""" Creates a new tag in the database
Args:
db (Session): sqlAlchemy connection object
tag (TagCreate): Schema for creating a tag in database
Raises:
HTTPException: 422, Tag name cannot be empty
Returns:
sql_object : Tag data
"""
if tag_name.strip() == "":
raise HTTPException(
status_code=422, detail="Tag name cannot be empty")
db_tag = TagDB(name=tag_name, auth_id=user_id)
db.add(db_tag)
db.commit()
db.refresh(db_tag)
return db_tag
def _get_tag_by_name(tag_name: str, auth_id: int, db: Session):
""" Returns tag data by passing the tag name
Args:
tag_name (str): Tag Name
auth_id (int): User Id
db (Session): sqlAlchemy connection object
Returns:
sql_object : Tag data
"""
return db.query(TagDB).filter(TagDB.name == tag_name,
TagDB.auth_id == auth_id).first()
def _get_tag_by_id(tag_id: int, db: Session):
""" Returns tag data by passing the tag id
Args:
tag_id (int): Tag id
db (Session): sqlAlchemy connection object
Returns:
sql_object : Tag data
"""
return db.query(TagDB).filter(TagDB.id == tag_id).first()
def _get_all_tags(db: Session, auth_id: int):
""" Returns data for all tags
Args:
db (Session): sqlAlchemy connection object
auth_id (int): User id
Returns:
sql_object : All tags data
"""
return db.query(TagDB).filter(auth_id == auth_id).all()
def _add_tag_in_repo(tag_in_repo: TagInRepoCreate, db: Session):
""" Adds a tag's link to a repository
Args:
tag_in_repo (TagInRepoCreate): Schema for creating a relationship
between a tag and a repository
db (Session): sqlAlchemy connection object
Returns:
sql_object: Tag associated with the repository
"""
db_tag_in_repo = _get_tag_in_repo(repo_id=tag_in_repo.repo_id,
tag_id=tag_in_repo.tag_id,
db=db)
if db_tag_in_repo:
raise HTTPException(
status_code=400, detail="Tag is already associated")
db_tag_in_repo = TagInRepoDB(repo_id=tag_in_repo.repo_id,
tag_id=tag_in_repo.tag_id)
db.add(db_tag_in_repo)
db.commit()
db.refresh(db_tag_in_repo)
return db_tag_in_repo
def _get_tag_in_repo(repo_id: int, tag_id: int, db: Session):
""" Returns a tag's link data to a repository
Args:
repo_id (int): Repository ID
tag_id (int): Tag ID
db (Session): sqlAlchemy connection object
Returns:
sql_object: Tag associated with the repository
Raises:
HTTPException: 404, Tag not found
HTTPException: 404, Repository not found
Returns:
sql_object: Tag associated with the repository
"""
tag_in_repo_db = db.query(TagInRepoDB).filter(
TagInRepoDB.repo_id == repo_id,
TagInRepoDB.tag_id == tag_id).first()
user_has_the_tag = db.query(TagDB).filter(
TagDB.id == tag_id).first()
user_has_the_repo = db.query(RepoDB).filter(
RepoDB.id == repo_id).first()
if not user_has_the_tag:
raise HTTPException(
status_code=404, detail="Tag not found")
elif not user_has_the_repo:
raise HTTPException(
status_code=404, detail="Repository not found"
)
else:
return tag_in_repo_db
def _get_all_tags_in_repo(repo_id: int, db: Session):
""" Returns data for all tag's link data to a repository
Args:
repo_id (int): Repository ID
db (Session): sqlAlchemy connection object
Returns:
sql_object: All tag relationships associated with the repository
"""
tags_in_repo = db.query(TagInRepoDB).filter(
TagInRepoDB.repo_id == repo_id).all()
tags = []
for tag in tags_in_repo:
tag_data = _get_tag_by_id(
tag_id=tag.tag_id, db=db)
tag_info = {
"id": tag_data.id,
"name": tag_data.name
}
tags.append(tag_info)
return tags
def _remove_tag_in_repo(tag_in_repo_id: int, db: Session):
""" Removes a tag's link to a repository
Args:
tag_in_repo_id (int): Relationship id between a tag and a repository
db (Session): sqlAlchemy connection object
"""
db_tag_in_repo = db.query(TagInRepoDB).filter(
TagInRepoDB.id == tag_in_repo_id).first()
db.delete(db_tag_in_repo)
db.commit()
| 0 | 0 | 0 |
900b7b58989ee5ac8c73cc3d74714e26e9cfe22b | 218 | py | Python | overtime/algorithms/centrality/__init__.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 9 | 2020-10-15T13:53:36.000Z | 2022-03-08T12:08:09.000Z | overtime/algorithms/centrality/__init__.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 6 | 2021-02-07T15:43:12.000Z | 2021-04-24T04:03:39.000Z | overtime/algorithms/centrality/__init__.py | overtime3/overtime | ed3ae6877894f4d2c9f8473a885698e1622be3bd | [
"MIT"
] | 7 | 2020-10-15T13:55:12.000Z | 2022-03-12T03:54:02.000Z | from overtime.algorithms.centrality.betweenness import *
from overtime.algorithms.centrality.closeness import *
from overtime.algorithms.centrality.pagerank import *
from overtime.algorithms.centrality.degree import *
| 43.6 | 56 | 0.853211 | from overtime.algorithms.centrality.betweenness import *
from overtime.algorithms.centrality.closeness import *
from overtime.algorithms.centrality.pagerank import *
from overtime.algorithms.centrality.degree import *
| 0 | 0 | 0 |
b1b4d73668dbdada7333dd77a3679b727c4d1bf8 | 840 | py | Python | prefs.py | filippobuletto/DropLink | 6a2dcaaf6fef071df23b32ee1ced09df526448a6 | [
"MIT"
] | 1 | 2017-08-08T10:49:28.000Z | 2017-08-08T10:49:28.000Z | prefs.py | filippobuletto/DropLink | 6a2dcaaf6fef071df23b32ee1ced09df526448a6 | [
"MIT"
] | null | null | null | prefs.py | filippobuletto/DropLink | 6a2dcaaf6fef071df23b32ee1ced09df526448a6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# This sample code is for use with Dropbox desktop client
# versions 1.2 and below. It is likely to be deprecated in all
# other future releases. Use it at your own risk.
# Read more at http://www.dropbox.com/developers/desktop_apps
import base64
import os.path
import platform
if platform.system() == 'Windows':
HOST_DB_PATH = os.path.expandvars(r'%APPDATA%\Dropbox\host.db')
else:
HOST_DB_PATH = os.path.expanduser(r'~/.dropbox/host.db')
if __name__ == '__main__':
print read_dropbox_location() | 28 | 67 | 0.684524 | #!/usr/bin/python
# This sample code is for use with Dropbox desktop client
# versions 1.2 and below. It is likely to be deprecated in all
# other future releases. Use it at your own risk.
# Read more at http://www.dropbox.com/developers/desktop_apps
import base64
import os.path
import platform
if platform.system() == 'Windows':
HOST_DB_PATH = os.path.expandvars(r'%APPDATA%\Dropbox\host.db')
else:
HOST_DB_PATH = os.path.expanduser(r'~/.dropbox/host.db')
def read_dropbox_location():
f = open(HOST_DB_PATH, "r")
try:
ignore = f.readline()
location_line = f.readline().strip()
return base64.decodestring(location_line).decode('utf8')
finally:
f.close()
raise Exception("Dropbox location not found")
if __name__ == '__main__':
print read_dropbox_location() | 279 | 0 | 25 |
be9445878d8d1a7d3745931c73412eb85d987bda | 9,851 | py | Python | seafileapi/files.py | vincentrou/python-seafile | e75afb1618203e120b43ba3ab1b40e3736aac320 | [
"Apache-2.0"
] | null | null | null | seafileapi/files.py | vincentrou/python-seafile | e75afb1618203e120b43ba3ab1b40e3736aac320 | [
"Apache-2.0"
] | null | null | null | seafileapi/files.py | vincentrou/python-seafile | e75afb1618203e120b43ba3ab1b40e3736aac320 | [
"Apache-2.0"
] | null | null | null | import io
import os
import posixpath
import re
from urllib import urlencode
from seafileapi.utils import querystr, utf8lize,raise_does_not_exist
ZERO_OBJ_ID = '0000000000000000000000000000000000000000'
class _SeafDirentBase(object):
"""Base class for :class:`SeafFile` and :class:`SeafDir`.
It provides implementation of their common operations.
"""
isdir = None
def __init__(self, repo_id, path, object_id, size=0, client=None):
"""
:param:`path` the full path of this entry within its repo, like
"/documents/example.md"
:param:`size` The size of a file. It should be zero for a dir.
"""
self.client = client
self.repo_id = repo_id
self.path = path
self.id = object_id
self.size = size
@property
# @property
# def path(self):
# return self.path
#
# @property
# def repo_id(self):
# return self.repo_id
def rename(self, newname):
"""Change file/folder name to newname
"""
suffix = 'dir' if self.isdir else 'file'
url = '/api2/repos/%s/%s/' % (self.repo.id, suffix) + querystr(p=self.path, reloaddir='true')
postdata = {'operation': 'rename', 'newname': newname}
resp = self.client.post(url, data=postdata)
succeeded = resp.status_code == 200
if succeeded:
if self.isdir:
new_dirent = self.repo.get_dir(os.path.join(os.path.dirname(self.path), newname))
else:
new_dirent = self.repo.get_file(os.path.join(os.path.dirname(self.path), newname))
for key in self.__dict__.keys():
self.__dict__[key] = new_dirent.__dict__[key]
return succeeded
def copyTo(self, dst_dir, dst_repo_id=None):
"""Copy file/folder to other directory (also to a different repo)
"""
if dst_repo_id is None:
dst_repo_id = self.repo.id
dirent_type = 'dir' if self.isdir else 'file'
resp = self._copy_move_task('copy', dirent_type, dst_dir, dst_repo_id)
return resp.status_code == 200
def moveTo(self, dst_dir, dst_repo_id=None):
"""Move file/folder to other directory (also to a different repo)
"""
if dst_repo_id is None:
dst_repo_id = self.repo.id
dirent_type = 'dir' if self.isdir else 'file'
resp = self._copy_move_task('move', dirent_type, dst_dir, dst_repo_id)
succeeded = resp.status_code == 200
if succeeded:
new_repo = self.client.repos.get_repo(dst_repo_id)
dst_path = os.path.join(dst_dir, os.path.basename(self.path))
if self.isdir:
new_dirent = new_repo.get_dir(dst_path)
else:
new_dirent = new_repo.get_file(dst_path)
for key in self.__dict__.keys():
self.__dict__[key] = new_dirent.__dict__[key]
return succeeded
| 34.086505 | 101 | 0.607857 | import io
import os
import posixpath
import re
from urllib import urlencode
from seafileapi.utils import querystr, utf8lize,raise_does_not_exist
ZERO_OBJ_ID = '0000000000000000000000000000000000000000'
class _SeafDirentBase(object):
"""Base class for :class:`SeafFile` and :class:`SeafDir`.
It provides implementation of their common operations.
"""
isdir = None
def __init__(self, repo_id, path, object_id, size=0, client=None):
"""
:param:`path` the full path of this entry within its repo, like
"/documents/example.md"
:param:`size` The size of a file. It should be zero for a dir.
"""
self.client = client
self.repo_id = repo_id
self.path = path
self.id = object_id
self.size = size
@property
def name(self):
return posixpath.basename(self.path)
def get_path(self):
return self.path
def get_repo_id(self):
return self.repo_id
# @property
# def path(self):
# return self.path
#
# @property
# def repo_id(self):
# return self.repo_id
def list_revisions(self):
pass
def delete(self):
suffix = 'dir' if self.isdir else 'file'
url = '/api2/repos/%s/%s/' % (self.repo_id, suffix) + querystr(p=self.path)
resp = self.client.delete(url)
return resp
def rename(self, newname):
"""Change file/folder name to newname
"""
suffix = 'dir' if self.isdir else 'file'
url = '/api2/repos/%s/%s/' % (self.repo.id, suffix) + querystr(p=self.path, reloaddir='true')
postdata = {'operation': 'rename', 'newname': newname}
resp = self.client.post(url, data=postdata)
succeeded = resp.status_code == 200
if succeeded:
if self.isdir:
new_dirent = self.repo.get_dir(os.path.join(os.path.dirname(self.path), newname))
else:
new_dirent = self.repo.get_file(os.path.join(os.path.dirname(self.path), newname))
for key in self.__dict__.keys():
self.__dict__[key] = new_dirent.__dict__[key]
return succeeded
def _copy_move_task(self, operation, dirent_type, dst_dir, dst_repo_id=None):
url = '/api/v2.1/copy-move-task/'
src_repo_id = self.repo.id
src_parent_dir = os.path.dirname(self.path)
src_dirent_name = os.path.basename(self.path)
dst_repo_id = dst_repo_id
dst_parent_dir = dst_dir
operation = operation
dirent_type = dirent_type
postdata = {'src_repo_id': src_repo_id, 'src_parent_dir': src_parent_dir,
'src_dirent_name': src_dirent_name, 'dst_repo_id': dst_repo_id,
'dst_parent_dir': dst_parent_dir, 'operation': operation,
'dirent_type': dirent_type}
return self.client.post(url, data=postdata)
def copyTo(self, dst_dir, dst_repo_id=None):
"""Copy file/folder to other directory (also to a different repo)
"""
if dst_repo_id is None:
dst_repo_id = self.repo.id
dirent_type = 'dir' if self.isdir else 'file'
resp = self._copy_move_task('copy', dirent_type, dst_dir, dst_repo_id)
return resp.status_code == 200
def moveTo(self, dst_dir, dst_repo_id=None):
"""Move file/folder to other directory (also to a different repo)
"""
if dst_repo_id is None:
dst_repo_id = self.repo.id
dirent_type = 'dir' if self.isdir else 'file'
resp = self._copy_move_task('move', dirent_type, dst_dir, dst_repo_id)
succeeded = resp.status_code == 200
if succeeded:
new_repo = self.client.repos.get_repo(dst_repo_id)
dst_path = os.path.join(dst_dir, os.path.basename(self.path))
if self.isdir:
new_dirent = new_repo.get_dir(dst_path)
else:
new_dirent = new_repo.get_file(dst_path)
for key in self.__dict__.keys():
self.__dict__[key] = new_dirent.__dict__[key]
return succeeded
def get_share_link(self):
pass
class SeafDir(_SeafDirentBase):
isdir = True
def __init__(self, *args, **kwargs):
super(SeafDir, self).__init__(*args, **kwargs)
self.entries = None
self.entries = kwargs.pop('entries', None)
def ls(self, force_refresh=False):
"""List the entries in this dir.
Return a list of objects of class :class:`SeafFile` or :class:`SeafDir`.
"""
if self.entries is None or force_refresh:
self.load_entries()
return self.entries
def create_empty_file(self, name):
"""Create a new empty file in this dir.
Return a :class:`SeafFile` object of the newly created file.
"""
# TODO: file name validation
path = posixpath.join(self.path, name)
url = '/api2/repos/%s/file/' % self.repo_id + querystr(p=path, reloaddir='true')
postdata = {'operation': 'create'}
resp = self.client.post(url, data=postdata)
self.id = resp.headers['oid']
self.load_entries(resp.json())
return SeafFile(self.repo_id, path, ZERO_OBJ_ID, 0,self.client)
def mkdir(self, name):
"""Create a new sub folder right under this dir.
Return a :class:`SeafDir` object of the newly created sub folder.
"""
path = posixpath.join(self.path, name)
url = '/api2/repos/%s/dir/' % self.repo_id + querystr(p=path, reloaddir='true')
postdata = {'operation': 'mkdir'}
resp = self.client.post(url, data=postdata)
self.id = resp.headers['oid']
self.load_entries(resp.json())
return SeafDir(self.repo_id, path, ZERO_OBJ_ID,0,self.client)
def upload(self, fileobj, filename):
"""Upload a file to this folder.
:param:fileobj :class:`File` like object
:param:filename The name of the file
Return a :class:`SeafFile` object of the newly uploaded file.
"""
if isinstance(fileobj, str):
fileobj = io.BytesIO(fileobj)
upload_url = self._get_upload_link()
files = {
'file': (filename, fileobj),
'parent_dir': self.path,
}
self.client.post(upload_url, files=files)
# repo_obj = Repo.create_from_repo_id(self.client, self.repo_id)
return self.get_file(posixpath.join(self.path, filename))
@raise_does_not_exist('The requested file does not exist')
def get_file(self, path):
"""Get the file object located in `path` in this repo.
Return a :class:`SeafFile` object
"""
assert path.startswith('/')
url = '/api2/repos/%s/file/detail/' % self.repo_id
query = '?' + urlencode(dict(p=path))
file_json = self.client.get(url + query).json()
return SeafFile(self.repo_id, path, file_json['id'], file_json['size'],self.client)
def upload_local_file(self, filepath, name=None):
"""Upload a file to this folder.
:param:filepath The path to the local file
:param:name The name of this new file. If None, the name of the local file would be used.
Return a :class:`SeafFile` object of the newly uploaded file.
"""
name = name or os.path.basename(filepath)
with open(filepath, 'r') as fp:
return self.upload(fp, name)
def _get_upload_link(self):
url = '/api2/repos/%s/upload-link/' % self.repo_id
resp = self.client.get(url)
return re.match(r'"(.*)"', resp.text).group(1)
def get_uploadable_sharelink(self):
"""Generate a uploadable shared link to this dir.
Return the url of this link.
"""
pass
def load_entries(self, dirents_json=None):
if dirents_json is None:
url = '/api2/repos/%s/dir/' % self.repo_id + querystr(p=self.path)
dirents_json = self.client.get(url).json()
self.entries = [self._load_dirent(entry_json) for entry_json in dirents_json]
def _load_dirent(self, dirent_json):
dirent_json = utf8lize(dirent_json)
path = posixpath.join(self.path, dirent_json['name'])
if dirent_json['type'] == 'file':
return SeafFile(self.repo_id, path, dirent_json['id'], dirent_json['size'],self.client)
else:
return SeafDir(self.repo_id, path, dirent_json['id'], 0,self.client)
@property
def num_entries(self):
if self.entries is None:
self.load_entries()
return len(self.entries) if self.entries is not None else 0
def __str__(self):
return 'SeafDir[repo=%s,path=%s]' % \
(self.repo_id[:6], self.path)
__repr__ = __str__
@staticmethod
def create_from_shared_folder(item,client):
'''
Use the shared folder api return value to create SeafDir object.
:param item: [dict]
:return: [SeafDir]
'''
repo_id = item.get("repo_id",None)
path = item.get("path",None)
return SeafDir(repo_id, path, ZERO_OBJ_ID, 0,client)
class SeafFile(_SeafDirentBase):
isdir = False
def update(self, fileobj):
"""Update the content of this file"""
pass
def __str__(self):
return 'SeafFile[repo=%s,path=%s,size=%s]' % \
(self.repo_id[:6], self.path, self.size)
def _get_download_link(self):
url = '/api2/repos/%s/file/' % self.repo_id + querystr(p=self.path)
resp = self.client.get(url)
return re.match(r'"(.*)"', resp.text).group(1)
def get_content(self):
"""Get the content of the file"""
url = self._get_download_link()
return self.client.get(url).content
__repr__ = __str__
| 2,465 | 4,170 | 234 |
7173ecd00afa5901a031f66d359c45f5c9dc2ffd | 7,584 | py | Python | Predictive models/Product/predictive_Models.py | Shelbyyyy/Python | 4bb6d9025f1fa7d066cecd0849cc561a5432d7cd | [
"Apache-2.0"
] | null | null | null | Predictive models/Product/predictive_Models.py | Shelbyyyy/Python | 4bb6d9025f1fa7d066cecd0849cc561a5432d7cd | [
"Apache-2.0"
] | null | null | null | Predictive models/Product/predictive_Models.py | Shelbyyyy/Python | 4bb6d9025f1fa7d066cecd0849cc561a5432d7cd | [
"Apache-2.0"
] | null | null | null |
import pandas as p#导入目前所需要的库并给与简称
data_train = '../homework/train.csv' #查看基本数据
data_train = p.read_csv(data_train)#导入训练模型
print(data_train.info())#查看数据类型
print(data_train.describe())#粗略查看基本数据
###导入并且查看原始数据
import matplotlib.pyplot as pt
import numpy as n
pt.rcParams['font.sans-serif']=['Simhei'] #解决中文为方块的问题
pt.rcParams['axes.unicode_minus'] = False #解决图像是负号显示为方块的问题
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
pt.subplot2grid((2,3),(0,0)) # 在一张大图里分一些小图并设定位置
data_train.Survived.value_counts().plot(kind='bar') #以生存总数为标准 设置图标种类为柱状图
pt.title("生存 (1 Survived)")
pt.ylabel("生存人数")
pt.subplot2grid((2,3),(0,1))
data_train.Pclass.value_counts().plot(kind="bar")
pt.ylabel("总人数")
pt.title("仓位")
pt.subplot2grid((2,3),(0,2))
pt.scatter(data_train.Survived, data_train.Age)
pt.ylabel("年龄")
pt.grid(b=True, which='major', axis='y')
pt.title("年龄 (1 Survived)")
pt.subplot2grid((2,3),(1,0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
pt.xlabel("年龄")
pt.ylabel("密度")
pt.title("各等级的乘客年龄分布")
pt.legend(('头等舱', '2等舱','3等舱'),loc='best') # 设置图例
pt.subplot2grid((2,3),(1,2))
data_train.Embarked.value_counts().plot(kind='bar')
pt.title("各登船口岸上船人数")
pt.ylabel("人数")
pt.show()
#粗略的以数据可视化的形式更直观的查看原始数据
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()#将未生存总数0存入value并与仓位对应
print(Survived_0)
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未生存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("仓位与生存率是否相关")
pt.xlabel("仓位")
pt.ylabel("总人数")
pt.show()
#设立假设 仓位 也就是阶级 与生存率有关与否
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
df=p.DataFrame({'男性':Survived_m, '女性':Survived_f})
df.plot(kind='bar', stacked=False)
pt.title("性别与生存率是否相关")
pt.xlabel("性别")
pt.ylabel("总人数")
pt.show()
#设立假设 性别与生存率是否相关
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未幸存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("假设登船港口与生存率是否有关")
pt.xlabel("港口")
pt.ylabel("总人数")
pt.show()
#假设登船港口与生存率是否有关
g = data_train.groupby(['SibSp','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
g = data_train.groupby(['Parch','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
#判断是否有兄弟姐妹在船上以及是否有父母子女在船上与生存率是否有关
###设立假设 进行数据分析
### 处理空值年龄
from sklearn.ensemble import RandomForestRegressor #从sklearn库中导入随机森林
### 使用 RandomForest 填补缺失的年龄属性
data_train, rfr = set_missing_ages(data_train)#将预测值存入训练样本中以供使用
data_train = set_Cabin_type(data_train)#将Yes及No存入训练样本中以供使用
data_train.info()#再次查看整理过的数据
### 处理空值港口
data_train = set_Embarked_type(data_train)
data_train.Embarked = data_train.Embarked.fillna(0)
data_train.Embarked = list(map(int,data_train.Embarked))
print(data_train.Embarked.mean())
data_train = set_Embarked_type(data_train)
### 使用随机森林处理票价为0的值
data_train, rfr = set_missing_fare(data_train)
print(data_train.Fare.describe())
###数据处理
### 使用算法开始建模 这里使用逻辑回归
data_train.Pclass = data_train.Pclass.astype('object')
cate =p.get_dummies(data_train[['Cabin','Sex','Embarked','Pclass']])
data_new = data_train[['Survived','Age','SibSp','Parch','Fare']].join(cate) #数据的转储以及整理
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data_new.iloc[:,1:], data_new.Survived, test_size = 0.2, random_state=34)
lr = LogisticRegression()
lr.fit(x_train,y_train)#用数据X,y来训练模型
pred = lr.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))#预测准确率
print(accuracy_score(y_test,pred))#分类准确率分数
#尝试使用不同算法 这里使用决策树
from sklearn.tree import *
dt = DecisionTreeClassifier(random_state=99,splitter='best', presort=True)
dt.fit(x_train,y_train)
pred = dt.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))
print(accuracy_score(y_test,pred))
####模型构建
data_test = p.read_csv('../homework/test.csv')#导入测试样本
data_test = set_missing_ages(data_test, rfr)
data_test = set_Cabin_type(data_test)
data_test.Pclass = data_test.Pclass.astype('object')
cate_test =p.get_dummies(data_test[['Cabin','Sex','Embarked','Pclass']])
data_test_new = data_test[['PassengerId','Age','SibSp','Parch','Fare']].join(cate_test)
final = dt.predict(data_test_new.fillna(0))
final_1=data_test[['PassengerId','Age']]
final_1['Survived'] = final
final = final_1[['PassengerId','Survived']]
final.to_csv('C:/Users/yang/Desktop/code/python/homework/6.csv')
print(final.describe())
print(data_test_new)
### 使用训练好的模型进行预测
| 29.509728 | 126 | 0.692906 |
import pandas as p#导入目前所需要的库并给与简称
data_train = '../homework/train.csv' #查看基本数据
data_train = p.read_csv(data_train)#导入训练模型
print(data_train.info())#查看数据类型
print(data_train.describe())#粗略查看基本数据
###导入并且查看原始数据
import matplotlib.pyplot as pt
import numpy as n
pt.rcParams['font.sans-serif']=['Simhei'] #解决中文为方块的问题
pt.rcParams['axes.unicode_minus'] = False #解决图像是负号显示为方块的问题
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
pt.subplot2grid((2,3),(0,0)) # 在一张大图里分一些小图并设定位置
data_train.Survived.value_counts().plot(kind='bar') #以生存总数为标准 设置图标种类为柱状图
pt.title("生存 (1 Survived)")
pt.ylabel("生存人数")
pt.subplot2grid((2,3),(0,1))
data_train.Pclass.value_counts().plot(kind="bar")
pt.ylabel("总人数")
pt.title("仓位")
pt.subplot2grid((2,3),(0,2))
pt.scatter(data_train.Survived, data_train.Age)
pt.ylabel("年龄")
pt.grid(b=True, which='major', axis='y')
pt.title("年龄 (1 Survived)")
pt.subplot2grid((2,3),(1,0), colspan=2)
data_train.Age[data_train.Pclass == 1].plot(kind='kde')
data_train.Age[data_train.Pclass == 2].plot(kind='kde')
data_train.Age[data_train.Pclass == 3].plot(kind='kde')
pt.xlabel("年龄")
pt.ylabel("密度")
pt.title("各等级的乘客年龄分布")
pt.legend(('头等舱', '2等舱','3等舱'),loc='best') # 设置图例
pt.subplot2grid((2,3),(1,2))
data_train.Embarked.value_counts().plot(kind='bar')
pt.title("各登船口岸上船人数")
pt.ylabel("人数")
pt.show()
#粗略的以数据可视化的形式更直观的查看原始数据
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()#将未生存总数0存入value并与仓位对应
print(Survived_0)
Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未生存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("仓位与生存率是否相关")
pt.xlabel("仓位")
pt.ylabel("总人数")
pt.show()
#设立假设 仓位 也就是阶级 与生存率有关与否
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
df=p.DataFrame({'男性':Survived_m, '女性':Survived_f})
df.plot(kind='bar', stacked=False)
pt.title("性别与生存率是否相关")
pt.xlabel("性别")
pt.ylabel("总人数")
pt.show()
#设立假设 性别与生存率是否相关
fig = pt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
df=p.DataFrame({'生存':Survived_1, '未幸存':Survived_0})
df.plot(kind='bar', stacked=False)
pt.title("假设登船港口与生存率是否有关")
pt.xlabel("港口")
pt.ylabel("总人数")
pt.show()
#假设登船港口与生存率是否有关
g = data_train.groupby(['SibSp','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
g = data_train.groupby(['Parch','Survived'])
df = p.DataFrame(g.count()['PassengerId'])
print(df)
#判断是否有兄弟姐妹在船上以及是否有父母子女在船上与生存率是否有关
###设立假设 进行数据分析
### 处理空值年龄
from sklearn.ensemble import RandomForestRegressor #从sklearn库中导入随机森林
### 使用 RandomForest 填补缺失的年龄属性
def set_missing_ages(df):#定义函数
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]#将已有并且可用特征存入age
known_age = age_df[age_df.Age.notnull()].values#将年龄根据是否为空值为判断条件 分别储存为已知和未知两种值
unknown_age = age_df[age_df.Age.isnull()].values
y = known_age[:, 0]#y为我们希望求得的空值年龄
x = known_age[:, 1:]#X为我们所给予的可用特征
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(x, y)#利用fit将x,y放入随机森林中,并设定随机森林的属性
predictedAges = rfr.predict(unknown_age[:, 1::])#用随机森林中得出的结果去预测未知年龄
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges #将得出的年龄存入空值中
return df, rfr#返回函数
def set_Cabin_type(df):#定义函数以将Cabin中是否有值当成条件判断分别设置成Yes以及No
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df#返回函数
data_train, rfr = set_missing_ages(data_train)#将预测值存入训练样本中以供使用
data_train = set_Cabin_type(data_train)#将Yes及No存入训练样本中以供使用
data_train.info()#再次查看整理过的数据
### 处理空值港口
def set_Embarked_type(df):#以填补港口空值为出发点首先进行数据转换以便使用fillna
df.loc[ (df.Embarked=='S'), 'Embarked' ] = "1"
df.loc[ (df.Embarked=='C'), 'Embarked' ] = "2"
df.loc[ (df.Embarked=='Q'), 'Embarked' ] = "3"
return df
data_train = set_Embarked_type(data_train)
data_train.Embarked = data_train.Embarked.fillna(0)
data_train.Embarked = list(map(int,data_train.Embarked))
print(data_train.Embarked.mean())
def set_Embarked_type(df):#再将已经填补完空值的列表赋值回训练样本#
df.loc[ (df.Embarked==0), 'Embarked' ] = "S"
return df
data_train = set_Embarked_type(data_train)
### 使用随机森林处理票价为0的值
def set_missing_fare(df):#定义函数
fare_df = df[['Fare','Age','Parch', 'SibSp', 'Pclass']]#将已有并且可用特征存入age
known_fare = fare_df.loc[fare_df.Fare != 0].values#将年龄根据是否为0为判断条件 分别储存为已知和未知两种值
unknown_fare = fare_df.loc[fare_df.Fare == 0].values
y1 = known_fare[:, 0]#y为我们希望求得的0票价
x1 = known_fare[:, 1:]#X为我们所给予的可用特征
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(x1, y1)#利用fit将x,y放入随机森林中,并设定随机森林的属性
predictedAges = rfr.predict(unknown_fare[:, 1::])#用随机森林中得出的结果去预测未知年龄
df.loc[ df.Fare == 0, 'Fare' ] = predictedAges #将得出的年龄存入空值中
return df, rfr#返回函数
data_train, rfr = set_missing_fare(data_train)
print(data_train.Fare.describe())
###数据处理
### 使用算法开始建模 这里使用逻辑回归
data_train.Pclass = data_train.Pclass.astype('object')
cate =p.get_dummies(data_train[['Cabin','Sex','Embarked','Pclass']])
data_new = data_train[['Survived','Age','SibSp','Parch','Fare']].join(cate) #数据的转储以及整理
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data_new.iloc[:,1:], data_new.Survived, test_size = 0.2, random_state=34)
lr = LogisticRegression()
lr.fit(x_train,y_train)#用数据X,y来训练模型
pred = lr.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))#预测准确率
print(accuracy_score(y_test,pred))#分类准确率分数
#尝试使用不同算法 这里使用决策树
from sklearn.tree import *
dt = DecisionTreeClassifier(random_state=99,splitter='best', presort=True)
dt.fit(x_train,y_train)
pred = dt.predict(x_test)
from sklearn.metrics import classification_report, accuracy_score
print(classification_report(y_test,pred))
print(accuracy_score(y_test,pred))
####模型构建
data_test = p.read_csv('../homework/test.csv')#导入测试样本
def set_missing_ages(df,rfr):
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
known_age = age_df[age_df.Age.notnull()].values
unknown_age = age_df[age_df.Age.isnull()].values
y3 = known_age[:, 0]#目标年龄
X3 = known_age[:, 1:]#特征属性值
predictedAges = rfr.predict(unknown_age[:, 1::]) # 用得到的模型进行未知年龄结果预测
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges # 用得到的预测结果填补原缺失数据
return df
data_test = set_missing_ages(data_test, rfr)
data_test = set_Cabin_type(data_test)
data_test.Pclass = data_test.Pclass.astype('object')
cate_test =p.get_dummies(data_test[['Cabin','Sex','Embarked','Pclass']])
data_test_new = data_test[['PassengerId','Age','SibSp','Parch','Fare']].join(cate_test)
final = dt.predict(data_test_new.fillna(0))
final_1=data_test[['PassengerId','Age']]
final_1['Survived'] = final
final = final_1[['PassengerId','Survived']]
final.to_csv('C:/Users/yang/Desktop/code/python/homework/6.csv')
print(final.describe())
print(data_test_new)
### 使用训练好的模型进行预测
| 2,842 | 0 | 144 |
2230f1c0b0526112ff87b859b1aa753e56ca180b | 685 | py | Python | chemreg/substance/urls.py | Chemical-Curation/chemcurator | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | 1 | 2020-10-05T18:02:24.000Z | 2020-10-05T18:02:24.000Z | chemreg/substance/urls.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | 207 | 2020-01-30T19:17:44.000Z | 2021-02-24T19:45:29.000Z | chemreg/substance/urls.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | null | null | null | from django.urls import include, path
from chemreg.jsonapi.routers import SimpleRouter
from chemreg.substance import views
# Create a router and register our viewsets with it.
router = SimpleRouter()
router.register(views.QCLevelsTypeViewSet, "qcLevels")
router.register(views.RelationshipTypeViewSet)
router.register(views.SynonymViewSet)
router.register(views.SynonymTypeViewSet)
router.register(views.SourceViewSet)
router.register(views.SubstanceViewSet)
router.register(views.SubstanceTypeViewSet)
router.register(views.SynonymQualityViewSet, prefix="synonymQualities")
router.register(views.SubstanceRelationshipViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 31.136364 | 71 | 0.830657 | from django.urls import include, path
from chemreg.jsonapi.routers import SimpleRouter
from chemreg.substance import views
# Create a router and register our viewsets with it.
router = SimpleRouter()
router.register(views.QCLevelsTypeViewSet, "qcLevels")
router.register(views.RelationshipTypeViewSet)
router.register(views.SynonymViewSet)
router.register(views.SynonymTypeViewSet)
router.register(views.SourceViewSet)
router.register(views.SubstanceViewSet)
router.register(views.SubstanceTypeViewSet)
router.register(views.SynonymQualityViewSet, prefix="synonymQualities")
router.register(views.SubstanceRelationshipViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 0 | 0 | 0 |
874a1f7796f20abe02230f9fc1c8771629b0380a | 829 | py | Python | tests/cases/issues/test_issue_179.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 213 | 2018-07-05T21:21:21.000Z | 2022-03-22T04:54:53.000Z | tests/cases/issues/test_issue_179.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 259 | 2018-06-22T16:46:33.000Z | 2022-03-23T19:39:15.000Z | tests/cases/issues/test_issue_179.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 27 | 2019-03-26T12:46:49.000Z | 2022-02-21T16:56:23.000Z | import pytest_cases as pytest
@pytest.fixture
@pytest.parametrize_with_cases("x,y", cases=CaseY, debug=True)
| 21.25641 | 64 | 0.677925 | import pytest_cases as pytest
@pytest.fixture
def db_dep():
return None
class CaseX:
def case_one(self, db_dep):
return 1
def case_two(self, db_dep):
return 2
class CaseY:
@pytest.parametrize_with_cases("x", cases=CaseX, debug=True)
def case_x_one(self,db_dep,x):
return x, 1
@pytest.parametrize_with_cases("x", cases=CaseX, debug=True)
def case_x_two(self,db_dep,x):
return x, 1
@pytest.parametrize_with_cases("x,y", cases=CaseY, debug=True)
def test_nested_parametrize(x, y):
pass
def test_synthesis(module_results_dct):
assert list(module_results_dct) == [
'test_nested_parametrize[x_one-one]',
'test_nested_parametrize[x_one-two]',
'test_nested_parametrize[x_two-one]',
'test_nested_parametrize[x_two-two]'
]
| 382 | 165 | 166 |
d33d27398db1a78287309315d309d2df91c6dc08 | 4,889 | py | Python | plugins/sign-in/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | 3 | 2020-08-23T17:43:09.000Z | 2020-08-31T04:43:42.000Z | plugins/sign-in/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | null | null | null | plugins/sign-in/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | null | null | null |
import os
import random
import datetime
import aiofiles
from enum import Enum
from dateutil.parser import parse
import aiohttp
try:
import ujson as json
except:
import json
| 26.284946 | 133 | 0.566578 |
import os
import random
import datetime
import aiofiles
from enum import Enum
from dateutil.parser import parse
import aiohttp
try:
import ujson as json
except:
import json
class Model(Enum):
ALL = '_all'
BLURRY = '_blurry'
SEND_AT = '_send_at'
SEND_DEFAULT = '_send_default'
class Status(Enum):
SUCCESS = '_success'
FAILURE = '_failure'
class TimeUtils():
DAY = 'day'
HOUR = 'hour'
MINUTE = 'minute'
SECOND = 'second'
ALL = 'all'
@staticmethod
def getTheCurrentTime():
nowDate = str(datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d'))
return nowDate
@staticmethod
def getAccurateTimeNow():
nowDate = str(datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d/%H:%M:%S'))
return nowDate
@classmethod
def judgeTimeDifference(cls, lastTime):
timeNow = cls.getAccurateTimeNow()
a = parse(lastTime)
b = parse(timeNow)
return int((b - a).total_seconds() / 3600)
@staticmethod
def getTheCurrentHour():
return int(str(datetime.datetime.strftime(datetime.datetime.now(),'%H')))
@classmethod
def calculateTheElapsedTimeCombination(cls, lastTime):
timeNow = cls.getAccurateTimeNow()
a = parse(lastTime)
b = parse(timeNow)
seconds = int((b - a).total_seconds())
return [int(seconds / 3600), int((seconds % 3600) / 60), int(seconds % 60)]
@staticmethod
def replaceHourMinuteAndSecond(parameterList, msg):
return (msg.replace(r'{hour}', str(parameterList[0]))
.replace(r'{minute}', str(parameterList[1]))
.replace(r'{second}', str(parameterList[2])))
@classmethod
def getTimeDifference(cls, original, model):
a = parse(original)
b = parse(cls.getAccurateTimeNow())
seconds = int((b - a).total_seconds())
if model == cls.ALL:
return {
cls.DAY: int((b - a).days),
cls.HOUR: int(seconds / 3600),
cls.MINUTE: int((seconds % 3600) / 60), # The rest
cls.SECOND: int(seconds % 60) # The rest
}
if model == cls.DAY:
b = parse(cls.getTheCurrentTime())
return int((b - a).days)
if model == cls.MINUTE:
return int(seconds / 60)
if model == cls.SECOND:
return seconds
class Tools():
@staticmethod
def commandMatch(msg, commandList, model = Model.ALL):
if model == Model.ALL:
for c in commandList:
if c == msg:
return True
if model == Model.BLURRY:
for c in commandList:
if msg.find(c) != -1:
return True
return False
@staticmethod
def checkFolder(dir):
if not os.path.exists(dir):
os.makedirs(dir)
@staticmethod
async def readJsonFile(p):
if not os.path.exists(p):
return Status.FAILURE
async with aiofiles.open(p, 'r', encoding='utf-8') as f:
content = await f.read()
return json.loads(content)
@staticmethod
async def writeJsonFile(p, content):
async with aiofiles.open(p, 'w', encoding='utf-8') as f:
await f.write(json.dumps(content))
return Status.SUCCESS
@staticmethod
def random(items):
return random.choice(items)
class Dict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
@classmethod
def dictToObj(cls, dictObj):
if not isinstance(dictObj, dict):
return dictObj
d = cls.Dict()
for k, v in dictObj.items():
d[k] = cls.dictToObj(v)
return d
class Network():
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
@classmethod
async def getBytes(cls, url, headers = '', timeout = 10):
if headers == '':
headers = cls.DEFAULT_HEADERS
try:
async with aiohttp.ClientSession() as session:
async with session.get(url = url, headers = headers, timeout = timeout) as res:
result = await res.read()
return result
except:
return Status.FAILURE
@classmethod
async def getJson(cls, url, headers = '', timeout = 10):
if headers == '':
headers = cls.DEFAULT_HEADERS
try:
async with aiohttp.ClientSession() as session:
async with session.get(url = url, headers = headers, timeout = timeout) as res:
result = await res.json()
return result
except:
return Status.FAILURE | 3,408 | 1,177 | 115 |
f68376b8ba9a2f5f86ba491fadf18c096dac11db | 2,033 | py | Python | tests/utils/kmp_tests.py | SteelPh0enix/pySerialMonitor | d8504da1ba08669ac466e1c78d75d15d803540fd | [
"MIT"
] | 1 | 2022-02-17T07:18:37.000Z | 2022-02-17T07:18:37.000Z | tests/utils/kmp_tests.py | SteelPh0enix/pySerialMonitor | d8504da1ba08669ac466e1c78d75d15d803540fd | [
"MIT"
] | null | null | null | tests/utils/kmp_tests.py | SteelPh0enix/pySerialMonitor | d8504da1ba08669ac466e1c78d75d15d803540fd | [
"MIT"
] | null | null | null | import unittest
from app.utils import KMP
| 29.463768 | 88 | 0.579439 | import unittest
from app.utils import KMP
class KMPTests(unittest.TestCase):
def test_string_pattern_matching(self):
kmp = KMP()
pattern_1 = "abc"
dataset_1 = "abbabbabbcbabcaabbababcababa"
expected_1 = [11, 20]
self.assertListEqual(kmp.search(dataset_1, pattern_1), expected_1)
pattern_2 = "aa"
dataset_2 = "aaaaaaaaa"
expected_2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertListEqual(kmp.search(dataset_2, pattern_2), expected_2)
def test_byte_pattern_matching(self):
kmp = KMP()
pattern_1 = [0x21, 0x37]
dataset_1 = [0x22, 0x21, 0x00, 0x12, 0x83, 0x92, 0x21, 0x37, 0x69, 0x37, 0x35]
expected_1 = [6]
self.assertListEqual(kmp.search(dataset_1, pattern_1), expected_1)
pattern_2 = [0x69, 0x69]
dataset_2 = [
0x82,
0x91,
0x00,
0x00,
0x60,
0x69,
0x12,
0x45,
0x69,
0x69,
0x69,
0x99,
]
expected_2 = [8, 9]
self.assertListEqual(kmp.search(dataset_2, pattern_2), expected_2)
# 012345678 90123 4 567890123 456789012 3 4 5 6 7 89012
newline_pattern = b"testtest\ntest\r\nalsotest\rmoredata\x22\x33\x44\n\r\ndadad"
newline_n = b"\n"
newline_r = b"\r"
newline_nr = b"\n\r"
newline_rn = b"\r\n"
newline_n_expected = [8, 14, 35, 37]
newline_r_expected = [13, 23, 36]
newline_nr_expected = [35]
newline_rn_expected = [13, 36]
self.assertListEqual(kmp.search(newline_pattern, newline_n), newline_n_expected)
self.assertListEqual(kmp.search(newline_pattern, newline_r), newline_r_expected)
self.assertListEqual(
kmp.search(newline_pattern, newline_nr), newline_nr_expected
)
self.assertListEqual(
kmp.search(newline_pattern, newline_rn), newline_rn_expected
)
| 1,901 | 13 | 76 |
f01ecbdf7e1871505f0a10215d7b342df4553e67 | 3,669 | py | Python | carplay/generate_index.py | waha0506/keras_yolo_v2 | 3575201fd09bec7dee6abf956d737d08c17c6086 | [
"MIT"
] | null | null | null | carplay/generate_index.py | waha0506/keras_yolo_v2 | 3575201fd09bec7dee6abf956d737d08c17c6086 | [
"MIT"
] | 4 | 2020-09-25T22:32:42.000Z | 2022-02-09T23:34:48.000Z | carplay/generate_index.py | waha0506/keras_yolo_v2 | 3575201fd09bec7dee6abf956d737d08c17c6086 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
import argparse
from itertools import permutations
from lxml import etree as ET
ap=argparse.ArgumentParser()
ap.add_argument('-i', '--image', type=str)
args=vars(ap.parse_args())
image = cv2.imread(args['image'])
source = image.copy()
target = image.copy()
phone=source[101:286,209:394]
music=source[101:286,479:664]
maps=source[101:286,749:934]
messages=source[101:286,1019:1204]
playing=source[419:604,209:394]
podcasts=source[419:604,479:664]
audiobook=source[419:604,749:934]
audiotest=source[419:604,1019:1204]
icons_coordinate = {
0:[101,286,209,394],
1:[101,286,479,664],
2:[101,286,749,934],
3:[101,286,1019,1204],
4:[419,604,209,394],
5:[419,604,479,664],
6:[419,604,749,934],
7:[419,604,1019,1204]
}
icons = [phone, music, maps, messages, playing, podcasts, audiobook, audiotest]
icon_name = ["phone", "music", "maps", "messages", "playing", "podcasts", "audiobook", "audiotest"]
index = [0, 1, 2, 3, 4, 5, 6, 7]
index_set = []
for p in permutations(index):
index_set.append(p)
#print(index_set)
#print(len(index_set))
#for i in range(len(index_set)):
# print(index_set[i][0])
for i in range(len(index_set)):
target[101:286,209:394]=icons[index_set[i][0]]
target[101:286,479:664]=icons[index_set[i][1]]
target[101:286,749:934]=icons[index_set[i][2]]
target[101:286,1019:1204]=icons[index_set[i][3]]
target[419:604,209:394]=icons[index_set[i][4]]
target[419:604,479:664]=icons[index_set[i][5]]
target[419:604,749:934]=icons[index_set[i][6]]
target[419:604,1019:1204]=icons[index_set[i][7]]
filename='./images/'+str(i)+'.jpg'
cv2.imwrite(filename, target)
write_xml_file(i)
| 30.322314 | 99 | 0.676751 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import argparse
from itertools import permutations
from lxml import etree as ET
ap=argparse.ArgumentParser()
ap.add_argument('-i', '--image', type=str)
args=vars(ap.parse_args())
image = cv2.imread(args['image'])
source = image.copy()
target = image.copy()
phone=source[101:286,209:394]
music=source[101:286,479:664]
maps=source[101:286,749:934]
messages=source[101:286,1019:1204]
playing=source[419:604,209:394]
podcasts=source[419:604,479:664]
audiobook=source[419:604,749:934]
audiotest=source[419:604,1019:1204]
icons_coordinate = {
0:[101,286,209,394],
1:[101,286,479,664],
2:[101,286,749,934],
3:[101,286,1019,1204],
4:[419,604,209,394],
5:[419,604,479,664],
6:[419,604,749,934],
7:[419,604,1019,1204]
}
icons = [phone, music, maps, messages, playing, podcasts, audiobook, audiotest]
icon_name = ["phone", "music", "maps", "messages", "playing", "podcasts", "audiobook", "audiotest"]
index = [0, 1, 2, 3, 4, 5, 6, 7]
index_set = []
def write_xml_file(index):
root = ET.Element('annotation')
folder = ET.SubElement(root, 'folder')
folder.text='images'
filename = ET.SubElement(root, 'filename')
file_name = str(i)+'.jpg'
filename.text = file_name
path = ET.SubElement(root, 'path')
path.text='/home/xueguang/images/'+str(i)+'.jpg'
source = ET.SubElement(root, 'source')
database = ET.SubElement(source, 'database')
database.text='Unknown'
size = ET.SubElement(root, 'size')
width = ET.SubElement(size, 'width')
width.text='1280'
height = ET.SubElement(size, 'height')
height.text='768'
depth = ET.SubElement(size, 'depth')
depth.text='3'
segmented = ET.SubElement(root, 'segmented')
segmented.text='0'
j = 0
for j in range(8):
write_object(index_set[index][j], root, j)
tree = ET.ElementTree(root)
filename = './annotations/'+str(i)+'.xml'
tree.write(filename, pretty_print=True, xml_declaration=True, encoding="utf-8")
def write_object(index_set_name, root_name, index):
object_name = ET.SubElement(root_name, 'object')
name = ET.SubElement(object_name, 'name')
name.text = icon_name[index_set_name]
pose = ET.SubElement(object_name, 'pose')
pose.text = "Unspecified"
truncated = ET.SubElement(object_name, 'truncated')
truncated.text = "0"
difficult = ET.SubElement(object_name, 'difficult')
difficult.text = "0"
bndbox = ET.SubElement(object_name, 'bndbox')
xmin = ET.SubElement(bndbox, 'xmin')
xmin_value = icons_coordinate[index][2]
xmin.text = str(xmin_value)
ymin = ET.SubElement(bndbox, 'ymin')
ymin_value = icons_coordinate[index][0]
ymin.text = str(ymin_value)
xmax = ET.SubElement(bndbox, 'xmax')
xmax_value = icons_coordinate[index][3]
xmax.text = str(xmax_value)
ymax = ET.SubElement(bndbox, 'ymax')
ymax_value = icons_coordinate[index][1]
ymax.text = str(ymax_value)
for p in permutations(index):
index_set.append(p)
#print(index_set)
#print(len(index_set))
#for i in range(len(index_set)):
# print(index_set[i][0])
for i in range(len(index_set)):
target[101:286,209:394]=icons[index_set[i][0]]
target[101:286,479:664]=icons[index_set[i][1]]
target[101:286,749:934]=icons[index_set[i][2]]
target[101:286,1019:1204]=icons[index_set[i][3]]
target[419:604,209:394]=icons[index_set[i][4]]
target[419:604,479:664]=icons[index_set[i][5]]
target[419:604,749:934]=icons[index_set[i][6]]
target[419:604,1019:1204]=icons[index_set[i][7]]
filename='./images/'+str(i)+'.jpg'
cv2.imwrite(filename, target)
write_xml_file(i)
| 1,908 | 0 | 46 |
fad4b0e4d3b1b5fdd64fe0d2b39bf829079b205e | 7,749 | py | Python | data/external/repositories_2to3/163451/kaggle_diabetic_retinopathy-master/utils.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/163451/kaggle_diabetic_retinopathy-master/utils.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/163451/kaggle_diabetic_retinopathy-master/utils.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import re
import glob
import os
import sys
import skimage
import numpy as np
import theano.tensor as T
from sklearn.cross_validation import StratifiedShuffleSplit
import string
import lasagne as nn
# TODO clean this mess up
# TODO: very ugly stuff here, can probably be done a lot better
| 34.44 | 77 | 0.550136 | import re
import glob
import os
import sys
import skimage
import numpy as np
import theano.tensor as T
from sklearn.cross_validation import StratifiedShuffleSplit
import string
import lasagne as nn
def padtosquare(im):
w, l = im.shape
if w < l:
pad_size = (l - w) / 2.0
im_new = skimage.util.pad(im, pad_width=((int(np.floor(pad_size)),
int(np.ceil(pad_size))),
(0, 0)),
mode='constant',
constant_values=(1, 1))
else:
pad_size = (w - l) / 2.0
im_new = skimage.util.pad(im, pad_width=((0, 0),
(int(np.floor(pad_size)),
int(np.ceil(pad_size)))),
mode='constant',
constant_values=(1, 1))
return im_new
def one_hot(vec, m=None):
if m is None:
m = int(np.max(vec)) + 1
return np.eye(m)[vec].astype('int32')
def hms(seconds):
seconds = np.floor(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def rms(x, axis=None, epsilon=1e-12):
return T.sqrt(T.mean(T.sqr(x), axis=axis) + epsilon)
# TODO clean this mess up
def split_data(train_labels, labels_split, valid_size=20,
SEED=42, stratified=True, pairs=False):
if valid_size >= 100:
return None
num_all = len(train_labels)
np.random.seed(SEED)
if stratified:
if pairs:
# TODO: Taking max level to stratify for now.
label_pairs = labels_split.groupby('id')['level'].max()
label_pairs.index = list(map(int, label_pairs.index))
label_pairs = label_pairs.sort_index(ascending=True)
sss = StratifiedShuffleSplit(label_pairs.values, n_iter=1,
test_size=0.01 * valid_size,
indices=None, random_state=SEED)
else:
sss = StratifiedShuffleSplit(train_labels.level, n_iter=1,
test_size=0.01 * valid_size,
indices=None, random_state=SEED)
for ix_train, ix_test in sss:
pass
# TODO: has no next(), need to figure this out
else:
shuffled_index = np.random.permutation(np.arange(num_all))
num_valid = num_all // (100 / valid_size)
num_train = num_all - num_valid
ix_train = shuffled_index[:num_train]
ix_test = shuffled_index[num_train:]
if pairs:
id_train = np.sort(np.asarray(label_pairs.index[ix_train]))
y_train_left = labels_split[
labels_split.id.isin(id_train)].level.values[::2]
y_train_right = labels_split[
labels_split.id.isin(id_train)].level.values[1::2]
y_train = np.vstack([y_train_left, y_train_right]).T
# TODO are they sorted
assert labels_split[
labels_split.id.isin(id_train)].eye[::2].unique().shape[0] == 1
assert labels_split[
labels_split.id.isin(id_train)].eye[1::2].unique().shape[0] == 1
id_valid = np.sort(np.asarray(label_pairs.index[ix_test]))
y_valid_left = labels_split[
labels_split.id.isin(id_valid)].level.values[::2]
y_valid_right = labels_split[
labels_split.id.isin(id_valid)].level.values[1::2]
y_valid = np.vstack([y_valid_left, y_valid_right]).T
# TODO are they sorted
assert labels_split[
labels_split.id.isin(id_valid)].eye[::2].unique().shape[0] == 1
assert labels_split[
labels_split.id.isin(id_valid)].eye[1::2].unique().shape[0] == 1
else:
id_train = train_labels.ix[ix_train].image.values
y_train = train_labels.ix[ix_train].level.values
id_valid = train_labels.ix[ix_test].image.values
y_valid = train_labels.ix[ix_test].level.values
return id_train, y_train, id_valid, y_valid
# TODO: very ugly stuff here, can probably be done a lot better
def oversample_set(id_train, y_train, coefs):
train_1 = list(np.where(np.apply_along_axis(
lambda x: 1 in x,
1,
y_train))[0])
train_2 = list(np.where(np.apply_along_axis(
lambda x: 2 in x,
1,
y_train))[0])
train_3 = list(np.where(np.apply_along_axis(
lambda x: 3 in x,
1,
y_train))[0])
train_4 = list(np.where(np.apply_along_axis(
lambda x: 4 in x,
1,
y_train))[0])
id_train_oversample = list(id_train)
id_train_oversample += list(id_train[coefs[1] * train_1])
id_train_oversample += list(id_train[coefs[2] * train_2])
id_train_oversample += list(id_train[coefs[3] * train_3])
id_train_oversample += list(id_train[coefs[4] * train_4])
labels_train_oversample = np.array(y_train)
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[1] * train_1]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[2] * train_2]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[3] * train_3]])
labels_train_oversample = np.vstack([labels_train_oversample,
y_train[coefs[4] * train_4]])
return id_train_oversample, labels_train_oversample
def get_img_ids_from_iter(ar):
test_ids = []
prog = re.compile(r'\b(\d+)_(\w+)')
for img_fn in ar:
try:
test_id, test_side = prog.search(img_fn).groups()
except AttributeError:
print(img_fn)
sys.exit(0)
test_id = int(test_id)
test_ids.append(test_id)
return test_ids
def get_img_ids_from_dir(img_dir):
test_fns = glob.glob(os.path.join(img_dir, "*.jpeg"))
return get_img_ids_from_iter(test_fns)
def softmax(ar, temp=1):
e = np.exp(ar / temp)
return e / e.sum(axis=1)[:, None]
def architecture_string(layer):
model_arch = ''
for i, layer in enumerate(nn.layers.get_all_layers(layer)):
name = string.ljust(layer.__class__.__name__, 28)
model_arch += " %2i %s %s " % (i, name,
nn.layers.get_output_shape(layer))
if hasattr(layer, 'filter_size'):
model_arch += str(layer.filter_size[0])
model_arch += ' //'
elif hasattr(layer, 'pool_size'):
if isinstance(layer.pool_size, int):
model_arch += str(layer.pool_size)
else:
model_arch += str(layer.pool_size[0])
model_arch += ' //'
if hasattr(layer, 'p'):
model_arch += ' [%.2f]' % layer.p
if hasattr(layer, 'stride'):
model_arch += str(layer.stride[0])
if hasattr(layer, 'learning_rate_scale'):
if layer.learning_rate_scale != 1.0:
model_arch += ' [lr_scale=%.2f]' % layer.learning_rate_scale
if hasattr(layer, 'params'):
for param in layer.params:
if 'trainable' not in layer.params[param]:
model_arch += ' [NT] '
model_arch += '\n'
return model_arch
| 7,175 | 0 | 246 |
1c7f325c5890cb1a60510f4fab30b2de39a8c405 | 1,109 | py | Python | server.py | Guozhongyuan/insurtechapp | e7591944d9d2f0dfc447be5d2d71240f67b4661f | [
"MIT"
] | 201 | 2018-10-25T06:17:48.000Z | 2022-03-30T08:41:27.000Z | server.py | Guozhongyuan/insurtechapp | e7591944d9d2f0dfc447be5d2d71240f67b4661f | [
"MIT"
] | 10 | 2019-04-07T22:44:49.000Z | 2021-07-03T03:52:53.000Z | server.py | Guozhongyuan/insurtechapp | e7591944d9d2f0dfc447be5d2d71240f67b4661f | [
"MIT"
] | 69 | 2018-10-25T20:28:25.000Z | 2021-12-22T01:27:50.000Z | # Dash app initialization
import dash
# User management initialization
import os
from flask_login import LoginManager, UserMixin
from users_mgt import db, User as base
from config import config
app = dash.Dash(
__name__,
meta_tags=[
{
'charset': 'utf-8',
},
{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1, shrink-to-fit=no'
}
]
)
server = app.server
app.config.suppress_callback_exceptions = True
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
# config
server.config.update(
SECRET_KEY=os.urandom(12),
SQLALCHEMY_DATABASE_URI=config.get('database', 'con'),
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
db.init_app(server)
# Setup the LoginManager for the server
login_manager = LoginManager()
login_manager.init_app(server)
login_manager.login_view = '/login'
# Create User class with UserMixin
# callback to reload the user object
@login_manager.user_loader
| 21.326923 | 78 | 0.707845 | # Dash app initialization
import dash
# User management initialization
import os
from flask_login import LoginManager, UserMixin
from users_mgt import db, User as base
from config import config
app = dash.Dash(
__name__,
meta_tags=[
{
'charset': 'utf-8',
},
{
'name': 'viewport',
'content': 'width=device-width, initial-scale=1, shrink-to-fit=no'
}
]
)
server = app.server
app.config.suppress_callback_exceptions = True
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
# config
server.config.update(
SECRET_KEY=os.urandom(12),
SQLALCHEMY_DATABASE_URI=config.get('database', 'con'),
SQLALCHEMY_TRACK_MODIFICATIONS=False
)
db.init_app(server)
# Setup the LoginManager for the server
login_manager = LoginManager()
login_manager.init_app(server)
login_manager.login_view = '/login'
# Create User class with UserMixin
class User(UserMixin, base):
pass
# callback to reload the user object
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| 42 | 16 | 44 |
d241f7c6226d200d83a89227e0cf209a29c2c07b | 634 | py | Python | heuristic/functions/remove_empty_routes.py | N-Wouda/OR-Analysis | 2070e2dee49f1556fcaa00044d3512e4ba08550f | [
"MIT"
] | 7 | 2020-04-12T14:02:08.000Z | 2021-10-09T07:52:08.000Z | heuristic/functions/remove_empty_routes.py | N-Wouda/OR-Analysis | 2070e2dee49f1556fcaa00044d3512e4ba08550f | [
"MIT"
] | 11 | 2020-02-16T22:15:28.000Z | 2020-03-28T09:35:38.000Z | heuristic/functions/remove_empty_routes.py | N-Wouda/OR-Analysis | 2070e2dee49f1556fcaa00044d3512e4ba08550f | [
"MIT"
] | 2 | 2020-05-21T23:19:44.000Z | 2020-09-04T05:22:45.000Z | from functools import wraps
from typing import Callable
from heuristic.classes import Solution
def remove_empty_routes(operator: Callable[..., Solution]):
"""
Wrapper function that removes empty routes from the returned solution
instance. These routes may come into existence because all customers have
been removed by e.g. a destroy operator.
"""
@wraps(operator)
return decorator
| 28.818182 | 77 | 0.676656 | from functools import wraps
from typing import Callable
from heuristic.classes import Solution
def remove_empty_routes(operator: Callable[..., Solution]):
"""
Wrapper function that removes empty routes from the returned solution
instance. These routes may come into existence because all customers have
been removed by e.g. a destroy operator.
"""
@wraps(operator)
def decorator(*args, **kwargs):
destroyed = operator(*args, **kwargs)
destroyed.routes = [route for route in destroyed.routes
if len(route) != 0]
return destroyed
return decorator
| 194 | 0 | 26 |
dcb952173008ca18cc782038e7b5a875d693a727 | 1,893 | py | Python | utilities/foto-files/manage-duplicates.py | davemungo/various | ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce | [
"MIT"
] | 1 | 2020-01-19T01:21:56.000Z | 2020-01-19T01:21:56.000Z | utilities/foto-files/manage-duplicates.py | davemungo/various | ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce | [
"MIT"
] | null | null | null | utilities/foto-files/manage-duplicates.py | davemungo/various | ed7c17f8b75a27fc59b0a5cad6125d64d00cd3ce | [
"MIT"
] | 1 | 2021-07-02T14:40:01.000Z | 2021-07-02T14:40:01.000Z | '''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
if "__main__" == __name__:
find_duplicates()
#EOF
| 25.581081 | 78 | 0.56841 | '''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
| 1,404 | 0 | 124 |
7a1e0e617105d2a68e3ed32c5ac41dc253446f33 | 1,855 | py | Python | utils/prepare_models.py | kerryeon/rust-sbert | 067c35008368a9e4c0cd130cffd65d26f4d5d47b | [
"Apache-2.0"
] | null | null | null | utils/prepare_models.py | kerryeon/rust-sbert | 067c35008368a9e4c0cd130cffd65d26f4d5d47b | [
"Apache-2.0"
] | null | null | null | utils/prepare_models.py | kerryeon/rust-sbert | 067c35008368a9e4c0cd130cffd65d26f4d5d47b | [
"Apache-2.0"
] | null | null | null | import sys
import os
import numpy as np
import torch
import subprocess
from pathlib import Path
import requests
import zipfile
import shutil
if __name__ == "__main__":
url = "https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/v0.2/distiluse-base-multilingual-cased.zip"
path = download_model(url, "distiluse-base-multilingual-cased")
convert_to_c_array(path + '/0_DistilBERT', prefix='distilbert.')
convert_to_c_array(path + '/2_Dense', suffix=True)
| 28.984375 | 130 | 0.661995 | import sys
import os
import numpy as np
import torch
import subprocess
from pathlib import Path
import requests
import zipfile
import shutil
def download_model(url, filename):
print("Downloading model...")
r = requests.get(url, allow_redirects=True)
zip_filename = filename + ".zip"
open(zip_filename, 'wb').write(r.content)
model_dir = str("models/") + filename
os.makedirs(model_dir, exist_ok=True)
print("Extracting model...")
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(model_dir)
os.remove(zip_filename)
print("Done.")
return model_dir
def convert_to_c_array(target_path, prefix="", suffix=False):
config_path = str(target_path + '/config.json')
vocab_path = str(target_path + '/vocab.txt')
model_path = str(target_path + '/pytorch_model.bin')
weights = torch.load(model_path, map_location='cpu')
nps = {}
for k, v in weights.items():
k_distil = prefix + k
if suffix:
k_distil = k_distil.split('.')[-1]
print(k_distil)
nps[k_distil] = np.ascontiguousarray(v.cpu().numpy())
np.savez(target_path + '/model.npz', **nps)
source = str(target_path + '/model.npz')
target = str(target_path + '/model.ot')
toml_location = (Path(__file__).resolve() / '..' / '..' / 'Cargo.toml').resolve()
subprocess.call(
['cargo', 'run', '--bin=convert-tensor', '--manifest-path=%s' % toml_location, '--', source, target])
if __name__ == "__main__":
url = "https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/v0.2/distiluse-base-multilingual-cased.zip"
path = download_model(url, "distiluse-base-multilingual-cased")
convert_to_c_array(path + '/0_DistilBERT', prefix='distilbert.')
convert_to_c_array(path + '/2_Dense', suffix=True)
| 1,306 | 0 | 46 |
38e031fb87b91426c92e51eed33e29d1f187058b | 6,347 | py | Python | gallery/test_models.py | guzalv/myks-gallery | 7944edfcaf12233a5a82e42298e42428170c1e54 | [
"BSD-3-Clause"
] | null | null | null | gallery/test_models.py | guzalv/myks-gallery | 7944edfcaf12233a5a82e42298e42428170c1e54 | [
"BSD-3-Clause"
] | null | null | null | gallery/test_models.py | guzalv/myks-gallery | 7944edfcaf12233a5a82e42298e42428170c1e54 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import Group, User
from django.test import TestCase
from .models import Album, AlbumAccessPolicy, Photo, PhotoAccessPolicy
| 46.669118 | 96 | 0.728533 | # coding: utf-8
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import Group, User
from django.test import TestCase
from .models import Album, AlbumAccessPolicy, Photo, PhotoAccessPolicy
class AccessPolicyTests(TestCase):
def setUp(self):
today = datetime.date.today()
self.album = Album.objects.create(category='default', dirpath='foo', date=today)
self.photo = Photo.objects.create(album=self.album, filename='bar')
self.group = Group.objects.create(name='group')
self.user = User.objects.create_user('user', 'user@gallery', 'pass')
self.user.groups.add(self.group)
self.other = User.objects.create_user('other', 'other@gallery', 'word')
def test_private_album(self):
self.assertFalse(self.album.is_allowed_for_user(self.user))
def test_public_album(self):
AlbumAccessPolicy.objects.create(album=self.album, public=True)
self.assertTrue(self.album.is_allowed_for_user(self.user))
def test_user_album(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.users.add(self.user)
self.assertTrue(self.album.is_allowed_for_user(self.user))
self.assertFalse(self.album.is_allowed_for_user(self.other))
def test_group_album(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.groups.add(self.group)
self.assertTrue(self.album.is_allowed_for_user(self.user))
self.assertFalse(self.album.is_allowed_for_user(self.other))
def test_user_group_album(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.groups.add(self.group)
policy.users.add(self.other)
self.assertTrue(self.album.is_allowed_for_user(self.user))
self.assertTrue(self.album.is_allowed_for_user(self.other))
def test_private_photo(self):
self.assertFalse(self.photo.is_allowed_for_user(self.user))
def test_public_photo(self):
PhotoAccessPolicy.objects.create(photo=self.photo, public=True)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
def test_user_photo(self):
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.users.add(self.user)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_group_photo(self):
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.groups.add(self.group)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_user_group_photo(self):
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.groups.add(self.group)
policy.users.add(self.other)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertTrue(self.photo.is_allowed_for_user(self.other))
def test_public_photo_inherit(self):
AlbumAccessPolicy.objects.create(album=self.album, public=True)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
def test_user_photo_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.users.add(self.user)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_group_photo_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.groups.add(self.group)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_user_group_photo_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, public=False)
policy.groups.add(self.group)
policy.users.add(self.other)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertTrue(self.photo.is_allowed_for_user(self.other))
def test_public_photo_no_inherit(self):
AlbumAccessPolicy.objects.create(album=self.album, inherit=False, public=True)
self.assertFalse(self.photo.is_allowed_for_user(self.user))
PhotoAccessPolicy.objects.create(photo=self.photo, public=True)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
def test_user_photo_no_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, inherit=False, public=False)
policy.users.add(self.user)
self.assertFalse(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.users.add(self.user)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_group_photo_no_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, inherit=False, public=False)
policy.groups.add(self.group)
self.assertFalse(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.groups.add(self.group)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
def test_user_group_photo_no_inherit(self):
policy = AlbumAccessPolicy.objects.create(album=self.album, inherit=False, public=False)
policy.groups.add(self.group)
policy.users.add(self.other)
self.assertFalse(self.photo.is_allowed_for_user(self.user))
self.assertFalse(self.photo.is_allowed_for_user(self.other))
policy = PhotoAccessPolicy.objects.create(photo=self.photo, public=False)
policy.groups.add(self.group)
policy.users.add(self.other)
self.assertTrue(self.photo.is_allowed_for_user(self.user))
self.assertTrue(self.photo.is_allowed_for_user(self.other))
| 5,566 | 13 | 536 |
3fb2560ef5f528ddb57bd0876a99c324cf70c2a9 | 1,262 | py | Python | PortScanner/bannerofport.py | saadhaxxan/Python-For-Ethical-Hacking | 87ef18b2c2876bf1711442a5f00ddb7d2dacfd43 | [
"MIT"
] | 26 | 2020-09-16T18:26:00.000Z | 2022-02-09T15:18:34.000Z | PortScanner/bannerofport.py | saadhaxxan/Python-For-Ethical-Hacking | 87ef18b2c2876bf1711442a5f00ddb7d2dacfd43 | [
"MIT"
] | null | null | null | PortScanner/bannerofport.py | saadhaxxan/Python-For-Ethical-Hacking | 87ef18b2c2876bf1711442a5f00ddb7d2dacfd43 | [
"MIT"
] | 3 | 2020-11-27T20:30:22.000Z | 2022-02-16T05:57:16.000Z | #!/usr/bin/python
import socket
from termcolor import colored
print(colored("Select Option"),"blue")
print(colored("1. Single Port\n2. Multi Port"),"blue")
option = int(input())
if option == 1:
print(colored("Enter Host IP Address:","green"))
host = input()
print(colored("Enter port number to scan:","green"))
port = int(input())
if option == 2:
print(colored("[*] Enter Host IP Address:","green"))
host = input()
print(colored("[*] Enter number of ports to scan:","green"))
num = int(input())
if option == 1:
PScanner(host,port,option)
if option == 2:
PScanner(host,num,option) | 26.851064 | 94 | 0.574485 | #!/usr/bin/python
import socket
from termcolor import colored
print(colored("Select Option"),"blue")
print(colored("1. Single Port\n2. Multi Port"),"blue")
option = int(input())
if option == 1:
print(colored("Enter Host IP Address:","green"))
host = input()
print(colored("Enter port number to scan:","green"))
port = int(input())
if option == 2:
print(colored("[*] Enter Host IP Address:","green"))
host = input()
print(colored("[*] Enter number of ports to scan:","green"))
num = int(input())
def getBanner(ip,port):
try:
socket.setdefaulttimeout(2)
soc = socket.socket()
soc.connect((ip, port))
banner = soc.recv(1024).strip('/n')
return banner
except:
return "Got Nothing"
def PScanner(host,port,option):
if option == 1:
banner = getBanner(host, port)
if banner:
print(colored("[+]" + host + '/' + str(port) + ":" + banner.strip('/n'),"blue"))
if option == 2:
for port in range(1,num):
banner = getBanner(host, port)
print(colored("[+] " + host + '/' + str(port) + ": " + banner.strip('/n'),"blue"))
if option == 1:
PScanner(host,port,option)
if option == 2:
PScanner(host,num,option) | 590 | 0 | 46 |
82d8e90703d98b7157175897677747df7c223cfb | 87 | py | Python | newsripper/ripper/apps.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | null | null | null | newsripper/ripper/apps.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | 1 | 2020-05-12T17:20:09.000Z | 2020-05-12T17:20:09.000Z | newsripper/ripper/apps.py | GriceTurrble/newsripper | c169d503433b88dda3e018c06b9e32f1407477d5 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.5 | 33 | 0.747126 | from django.apps import AppConfig
class RipperConfig(AppConfig):
name = "ripper"
| 0 | 29 | 23 |
4a9bfd6938ec15c2ddff4a233a31274462f859da | 1,401 | py | Python | wonderbits/WBPressure.py | BigCircleLaw/wonderguy | d78a100784c414b9c3945c301d07c4e1b04aab8f | [
"MIT"
] | 1 | 2019-07-15T10:38:48.000Z | 2019-07-15T10:38:48.000Z | wonderbits/WBPressure.py | BigCircleLaw/wonderguy | d78a100784c414b9c3945c301d07c4e1b04aab8f | [
"MIT"
] | 1 | 2019-09-11T02:33:14.000Z | 2019-09-11T02:33:14.000Z | wonderbits/WBPressure.py | BigCircleLaw/wonderguy | d78a100784c414b9c3945c301d07c4e1b04aab8f | [
"MIT"
] | 1 | 2019-07-26T03:13:18.000Z | 2019-07-26T03:13:18.000Z | from .WBits import WBits
from .event import Event
| 25.017857 | 80 | 0.552463 | from .WBits import WBits
from .event import Event
def _format_str_type(x):
if isinstance(x, str):
x = str(x).replace('"', '\\"')
x = "\"" + x + "\""
return x
class Pressure(WBits):
def __init__(self, index = 1):
WBits.__init__(self)
self.index = index
def set_onboard_rgb(self, rgb):
command = 'pressure{}.set_onboard_rgb({})'.format(self.index, rgb)
self._set_command(command)
def get_pressure(self):
"""
获取压力(kg),量程是0~10KG,超过量程范围可能会导致传感器不修复的损坏
:rtype: float
"""
command = 'pressure{}.get_pressure()'.format(self.index)
value = self._get_command(command)
return self.val_process(value)
def calibrate(self, block = None):
"""
校准压力感器注意:校准过程中请确保没有外力作用于传感器,否则会导致校准后不准确。校准时,模块指示灯会变为黄色,等待指示灯变蓝说明校准完成了。
:param block: 阻塞参数 False: 不阻塞 True: 阻塞
"""
args = []
if block != None:
args.append(str(block))
command = 'pressure{}.calibrate({})'.format(self.index, ",".join(args))
self._set_command(command)
@property
def source_pressure(self):
return self, 'pressure', []
def when_pressure_changed(self, val = 0.2):
return Event(self.source_pressure, Event.TRIGGER_CHANGED, p)
| 435 | 1,073 | 49 |
e2cc10f2d72e46b6a67147d41bced89c93856a14 | 188 | py | Python | Ofpp/__init__.py | Timothy-Edward-Kendon/ofpp | 2c105ad74d5f0276bab38993162ec6f0b4b6b86f | [
"MIT"
] | 26 | 2017-11-23T09:16:00.000Z | 2020-03-09T13:18:35.000Z | openfoamparser/__init__.py | emanuel-raad/openfoamparser | f67031c90109932c38a299bad8f42fe34fdb8c4a | [
"MIT"
] | 4 | 2017-11-24T10:20:11.000Z | 2019-07-23T13:42:58.000Z | openfoamparser/__init__.py | emanuel-raad/openfoamparser | f67031c90109932c38a299bad8f42fe34fdb8c4a | [
"MIT"
] | 12 | 2018-02-27T06:58:13.000Z | 2019-10-23T18:47:19.000Z | from __future__ import print_function
from .field_parser import parse_internal_field, parse_boundary_field, parse_field_all
from .mesh_parser import FoamMesh
from .utils import *
| 26.857143 | 86 | 0.824468 | from __future__ import print_function
from .field_parser import parse_internal_field, parse_boundary_field, parse_field_all
from .mesh_parser import FoamMesh
from .utils import *
| 0 | 0 | 0 |
0f8fa66873e3421bf1c4a1eb3dc896b2aa23c5a8 | 8,565 | py | Python | src/evaluator.py | v1otusc/node_evaluator | 37f634d77967082d6d84fe0176c1d02167b412af | [
"MIT"
] | null | null | null | src/evaluator.py | v1otusc/node_evaluator | 37f634d77967082d6d84fe0176c1d02167b412af | [
"MIT"
] | null | null | null | src/evaluator.py | v1otusc/node_evaluator | 37f634d77967082d6d84fe0176c1d02167b412af | [
"MIT"
] | 2 | 2022-02-18T06:39:49.000Z | 2022-02-18T07:42:23.000Z | import threading
import time
import psutil
import rostopic
import rospy
from glog import logging
from node_evaluator.msg import Bandwidth as BandwidthMsg
@EvaluatorFactory.register('cpu')
@EvaluatorFactory.register('mem')
@EvaluatorFactory.register('net')
@EvaluatorFactory.register('topic_bw')
@EvaluatorFactory.register('bw_from_msg')
@EvaluatorFactory.register('sys_bw')
| 31.722222 | 79 | 0.585989 | import threading
import time
import psutil
import rostopic
import rospy
from glog import logging
from node_evaluator.msg import Bandwidth as BandwidthMsg
class EvaluatorFactory:
registry = {}
@classmethod
def register(cls, name):
def inner_wrapper(wrapped_class):
if name in cls.registry:
print('Evaluator %s already exists.' % name)
cls.registry[name] = wrapped_class
return wrapped_class
return inner_wrapper
@classmethod
def create_evaluator(cls, name, **kwargs):
if name not in cls.registry:
print('Evaluator %s does not exists.')
raise NotImplementedError
eval_class = cls.registry[name]
evaluator = eval_class(**kwargs)
return evaluator
class EvaluatorBase(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self)
self.eval_rate_s = kwargs['eval_rate_s']
self.eval_stat = {}
self.eval_stat['time'] = []
self.stat_update_lock = threading.Lock()
self.term_event = threading.Event()
def print_start(self):
pass
def run(self):
self.print_start()
start_time = time.time()
while not self.term_event.is_set():
with self.stat_update_lock:
if self.eval():
self.eval_stat['time'].append(time.time())
time_to_sleep = start_time + self.eval_rate_s - time.time()
if time_to_sleep > 0:
time.sleep(time_to_sleep)
start_time = time.time()
print('%s evaluation stopped' % self.eval_mode)
def eval(self):
return False
def get_eval_stat(self):
with self.stat_update_lock:
return self.eval_stat
def stop(self):
self.term_event.set()
class ProcEvaluatorBase(EvaluatorBase):
def __init__(self, **kwargs):
super(ProcEvaluatorBase, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
self.node_pid = kwargs['node_pid']
if self.node_pid is not None:
self.process = psutil.Process(self.node_pid)
self.eval_stat[self.node_name] = []
def print_start(self):
print('Start %s evaluation on node %s pid %d' %
(self.eval_mode, self.node_name, self.node_pid))
@EvaluatorFactory.register('cpu')
class CPUEvaluator(ProcEvaluatorBase):
def __init__(self, **kwargs):
super(CPUEvaluator, self).__init__(**kwargs)
self.eval_mode = 'cpu'
def eval(self):
self.eval_stat[self.node_name].append(self.process.cpu_percent())
return True
@EvaluatorFactory.register('mem')
class MemEvaluator(ProcEvaluatorBase):
def __init__(self, **kwargs):
super(MemEvaluator, self).__init__(**kwargs)
self.eval_mode = 'mem'
def eval(self):
self.eval_stat[self.node_name].append(self.process.memory_percent())
return True
@EvaluatorFactory.register('net')
class NetEvaluator(ProcEvaluatorBase):
def __init__(self, **kwargs):
super(NetEvaluator, self).__init__(**kwargs)
self.eval_mode = 'net'
def eval(self):
raise NotImplementedError
print(self.process.connections())
class TopicEvaluatorBase(EvaluatorBase):
def __init__(self, **kwargs):
super(TopicEvaluatorBase, self).__init__(**kwargs)
self.topic = kwargs['topic']
self.eval_stat[self.topic] = []
def print_start(self):
print('Start %s evaluation on topic %s' % (self.eval_mode, self.topic))
@EvaluatorFactory.register('topic_bw')
class TopicBwEvaluator(TopicEvaluatorBase):
class ROSTopicBandwidth(rostopic.ROSTopicBandwidth):
def __init__(self, window_size=100):
super(TopicBwEvaluator.ROSTopicBandwidth,
self).__init__(window_size=window_size)
self.times.append(time.time())
self.sizes.append(0)
def get_bw(self):
if len(self.times) < 2:
return None
with self.lock:
n = len(self.times)
tn = time.time()
t0 = self.times[0]
total = sum(self.sizes)
bytes_per_s = total / (tn - t0)
mean = total / n
# min and max
max_s = max(self.sizes)
min_s = min(self.sizes)
bd_stat = {}
bd_stat['bytes_per_s'] = bytes_per_s
bd_stat['mean'] = mean
bd_stat['min_s'] = min_s
bd_stat['max_s'] = max_s
return bd_stat
def __init__(self, **kwargs):
super(TopicBwEvaluator, self).__init__(**kwargs)
self.eval_mode = 'topic_bw'
self.rt = {}
self.sub = {}
self.rt = TopicBwEvaluator.ROSTopicBandwidth(10)
self.sub = rospy.Subscriber(self.topic, rospy.AnyMsg,
self.rt.callback)
def eval(self):
new_bw = self.rt.get_bw()
if new_bw is not None:
self.eval_stat[self.topic].append(new_bw['bytes_per_s']/1000000)
return True
else:
return False
@EvaluatorFactory.register('bw_from_msg')
class BwFromMsgEvaluator(EvaluatorBase):
def __init__(self, **kwargs):
super(BwFromMsgEvaluator, self).__init__(**kwargs)
self.eval_mode = 'bw_from_msg'
self.topic = kwargs['topic']
self.sub = rospy.Subscriber(
self.topic, BandwidthMsg, self._bw_callback)
self.eval_stat = {}
self.tmp_eval_stat = {}
self.data_lock = threading.Lock()
def print_start(self):
print('Start to receive bandwidth msg in topic %s' % self.topic)
def _bw_callback(self, data):
with self.data_lock:
if data.name not in self.eval_stat:
self.eval_stat[data.name] = {}
self.eval_stat[data.name]['time'] = []
self.eval_stat[data.name][data.name] = []
self.tmp_eval_stat[data.name] = {}
self.tmp_eval_stat[data.name]['time'] = []
self.tmp_eval_stat[data.name][data.name] = []
self.tmp_eval_stat[data.name]['time'].append(data.time[1].to_sec())
self.tmp_eval_stat[data.name][data.name].append(
data.size/1000000.0)
def get_bw(self, name):
if len(self.tmp_eval_stat[name]['time']) < 2:
return None
with self.data_lock:
n = len(self.tmp_eval_stat[name]['time'])
tn = time.time()
t0 = self.tmp_eval_stat[name]['time'][0]
total = sum(self.tmp_eval_stat[name][name])
bytes_per_s = total / (tn - t0)
mean = total / n
# min and max
max_s = max(self.tmp_eval_stat[name][name])
min_s = min(self.tmp_eval_stat[name][name])
bd_stat = {}
bd_stat['bytes_per_s'] = bytes_per_s
bd_stat['mean'] = mean
bd_stat['min_s'] = min_s
bd_stat['max_s'] = max_s
return bd_stat
def eval(self):
for name in self.eval_stat:
new_bw = self.get_bw(name)
if new_bw is not None:
self.eval_stat[name][name].append(
new_bw['bytes_per_s'])
self.eval_stat[name]['time'].append(time.time())
# return false to stop base class to add now() to time list
return False
@EvaluatorFactory.register('sys_bw')
class SysBwEvaluator(EvaluatorBase):
def __init__(self, **kwargs):
super(SysBwEvaluator, self).__init__(**kwargs)
self.eval_mode = 'sys_bw'
self.eval_stat['total_recv'] = []
self.eval_stat['total_sent'] = []
self.old_recv = 0
self.old_sent = 0
def print_start(self):
print('Start %s evaluation' % self.eval_mode)
def eval(self):
result = True
new_recv = psutil.net_io_counters(pernic=True)['eth0'][0]
new_sent = psutil.net_io_counters(pernic=True)['eth0'][1]
if self.old_recv == 0 or self.old_sent == 0:
result = False
else:
self.eval_stat['total_recv'].append(
(new_recv-self.old_recv)/(self.eval_rate_s*1000000))
self.eval_stat['total_sent'].append(
(new_sent-self.old_sent)/(self.eval_rate_s*1000000))
result = True
self.old_recv = new_recv
self.old_sent = new_sent
return result
| 6,869 | 442 | 864 |
8eb76f8a97466c7e9df2a79b721a1fbab165b740 | 815 | py | Python | setup.py | beeware/rubicon | a2f2bd5178964b565b40033204a5b4a7ce99695e | [
"BSD-3-Clause"
] | 4 | 2019-08-11T15:42:57.000Z | 2021-08-06T19:31:19.000Z | setup.py | beeware/rubicon | a2f2bd5178964b565b40033204a5b4a7ce99695e | [
"BSD-3-Clause"
] | null | null | null | setup.py | beeware/rubicon | a2f2bd5178964b565b40033204a5b4a7ce99695e | [
"BSD-3-Clause"
] | 4 | 2019-10-11T13:10:00.000Z | 2021-06-24T02:24:53.000Z | #/usr/bin/env python
import io
from setuptools import setup
with io.open('README.rst', encoding='utf8') as readme:
long_description = readme.read()
setup(
name='rubicon',
version='0.0.0',
description='A collection of tools to bridge between Python and other language environments.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://pybee.org/rubicon',
packages=[],
license='New BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Widget Sets',
],
)
| 29.107143 | 98 | 0.651534 | #/usr/bin/env python
import io
from setuptools import setup
with io.open('README.rst', encoding='utf8') as readme:
long_description = readme.read()
setup(
name='rubicon',
version='0.0.0',
description='A collection of tools to bridge between Python and other language environments.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://pybee.org/rubicon',
packages=[],
license='New BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Widget Sets',
],
)
| 0 | 0 | 0 |
446aa28009d7caad9abca1127944ceab51f5a88f | 5,479 | py | Python | pp-cdmx/public_account/migrations/0001_initial.py | rickrebel/race-history | be93b88cf4658fd2c5ec409d8f422b2960d1ae60 | [
"MIT"
] | 1 | 2020-08-31T21:08:54.000Z | 2020-08-31T21:08:54.000Z | pp-cdmx/public_account/migrations/0001_initial.py | rickrebel/race-history | be93b88cf4658fd2c5ec409d8f422b2960d1ae60 | [
"MIT"
] | 1 | 2021-06-10T23:09:45.000Z | 2021-06-10T23:09:45.000Z | pp-cdmx/public_account/migrations/0001_initial.py | rickrebel/race-history | be93b88cf4658fd2c5ec409d8f422b2960d1ae60 | [
"MIT"
] | 1 | 2021-07-16T19:32:04.000Z | 2021-07-16T19:32:04.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-24 18:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 64.458824 | 191 | 0.616901 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-24 18:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('geographic', '0001_initial'),
('period', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PPImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=255)),
('json_variables', models.TextField(blank=True, null=True)),
('headers', models.TextField(blank=True, null=True)),
('table_data', models.TextField(blank=True, null=True)),
('first_headers_used', models.BooleanField(default=True)),
('vision_data', models.TextField(blank=True, null=True)),
('clean_data', models.TextField(blank=True, null=True)),
('error_cell', models.TextField(blank=True, null=True, verbose_name='pila de errores')),
('len_array_numbers', models.CharField(blank=True, max_length=80, null=True)),
('data_row_numbers', models.TextField(blank=True, null=True, verbose_name='Datos de columnas num\xe9ricas')),
('data_row_suburbs', models.TextField(blank=True, null=True, verbose_name='Datos de columna de suburbs')),
('status', models.CharField(blank=True, default='uncleaned', max_length=80, null=True)),
('need_manual_ref', models.NullBooleanField()),
('manual_ref', models.TextField(blank=True, null=True)),
('need_second_manual_ref', models.NullBooleanField()),
('validated', models.NullBooleanField()),
('table_ref', models.TextField(blank=True, null=True, verbose_name='Referencia de filas')),
('table_ref_columns', models.TextField(blank=True, null=True, verbose_name='Referencias de Columnas')),
('comments', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='PublicAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variables', models.TextField(blank=True, null=True)),
('status', models.CharField(blank=True, default='uncleaned', max_length=80, null=True)),
('error_cell', models.TextField(blank=True, null=True, verbose_name='pila de errores')),
('orphan_rows', models.TextField(blank=True, null=True, verbose_name='Filas no insertadas')),
('approved', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Aprobado')),
('assigned', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Asignado')),
('modified', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Modificado')),
('executed', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Ejecutado')),
('vertical_align_ammounts', models.CharField(blank=True, choices=[('top', 'top'), ('center', 'center'), ('bottom', 'bottom')], max_length=50, null=True)),
('unreadable', models.CharField(blank=True, choices=[('bajo', 'bajo'), ('media', 'media'), ('alto', 'alto')], max_length=50, null=True, verbose_name='Nivel de ilegibilidad')),
('ignore_columns', models.CharField(blank=True, help_text='columnas a ignorar para la alineacion horizontal (4-8)', max_length=50, null=True)),
('approved_mean', models.FloatField(blank=True, null=True)),
('executed_mean', models.FloatField(blank=True, null=True)),
('not_executed', models.IntegerField(blank=True, null=True)),
('minus_10', models.IntegerField(blank=True, null=True)),
('minus_5', models.IntegerField(blank=True, null=True)),
('similar', models.IntegerField(blank=True, null=True)),
('plus_5', models.IntegerField(blank=True, null=True)),
('no_info', models.NullBooleanField()),
('match_review', models.NullBooleanField()),
('suburb_count', models.IntegerField(blank=True, null=True)),
('manual_mach', models.TextField(blank=True, null=True)),
('comment_match', models.TextField(blank=True, null=True)),
('period_pp', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='period.PeriodPP', verbose_name='Periodo PP')),
('townhall', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='geographic.TownHall', verbose_name='Alcald\xeda')),
],
options={
'verbose_name': 'Cuenta Publica',
'verbose_name_plural': 'Cuentas Publicas',
},
),
migrations.AddField(
model_name='ppimage',
name='public_account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pp_images', to='public_account.PublicAccount'),
),
]
| 0 | 5,266 | 23 |
f758de11235ecd17f99ee62e8dff7e421c650588 | 306 | py | Python | contests/pythonist_3/capitalize.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2017-07-02T01:35:39.000Z | 2017-07-02T01:35:39.000Z | contests/pythonist_3/capitalize.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | null | null | null | contests/pythonist_3/capitalize.py | delaanthonio/hackerrank | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | [
"MIT"
] | 1 | 2018-04-03T15:11:56.000Z | 2018-04-03T15:11:56.000Z | """
Capitalize!
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/contests/pythonist3/challenges/capitalize
"""
print(solve('hi jake hj ')) | 23.538462 | 78 | 0.696078 | """
Capitalize!
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/contests/pythonist3/challenges/capitalize
"""
def solve(s: str):
words = [word.capitalize() for word in s.split(' ')]
return " ".join(words)
print(solve('hi jake hj ')) | 81 | 0 | 23 |
c8cf2fde919819d05af708ce20ebdb58e566f6f9 | 621 | py | Python | solstice/bootstrap/houdini/userSetup.py | Solstice-Short-Film/solstice-bootstrap | ace828e00d3f10ff3e8a70d23170f138e4633ff8 | [
"MIT"
] | null | null | null | solstice/bootstrap/houdini/userSetup.py | Solstice-Short-Film/solstice-bootstrap | ace828e00d3f10ff3e8a70d23170f138e4633ff8 | [
"MIT"
] | null | null | null | solstice/bootstrap/houdini/userSetup.py | Solstice-Short-Film/solstice-bootstrap | ace828e00d3f10ff3e8a70d23170f138e4633ff8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initialization for Solstice Tools
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
print('=' * 100)
print('| Solstice Pipeline | > Loading Solstice Tools')
try:
import solstice.loader
solstice.loader.init(import_libs=True)
print('| Solstice Pipeline | Solstice Tools loaded successfully!')
print('=' * 100)
except Exception as e:
print('ERROR: Impossible to load Solstice Tools, contact TD!')
print(str(e))
| 23 | 70 | 0.705314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initialization for Solstice Tools
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
print('=' * 100)
print('| Solstice Pipeline | > Loading Solstice Tools')
try:
import solstice.loader
solstice.loader.init(import_libs=True)
print('| Solstice Pipeline | Solstice Tools loaded successfully!')
print('=' * 100)
except Exception as e:
print('ERROR: Impossible to load Solstice Tools, contact TD!')
print(str(e))
| 0 | 0 | 0 |
bc641904aea7c3d83a27568f59778d62c1832186 | 99 | py | Python | inheritance/exercise/project_restaurant/beverage/cold_beverage.py | ivan-yosifov88/python_oop | 82b210e427cb80dbab3b9a5c3fceab431ee60164 | [
"MIT"
] | 1 | 2021-05-21T20:28:55.000Z | 2021-05-21T20:28:55.000Z | inheritance/exercise/project_restaurant/beverage/cold_beverage.py | ivan-yosifov88/python_oop | 82b210e427cb80dbab3b9a5c3fceab431ee60164 | [
"MIT"
] | null | null | null | inheritance/exercise/project_restaurant/beverage/cold_beverage.py | ivan-yosifov88/python_oop | 82b210e427cb80dbab3b9a5c3fceab431ee60164 | [
"MIT"
] | null | null | null | from project_restaurant.beverage.beverage import Beverage
| 16.5 | 57 | 0.818182 | from project_restaurant.beverage.beverage import Beverage
class ColdBeverage(Beverage):
pass
| 0 | 17 | 23 |
f72f8089cc89101fe7a243dfe2b57fdf92cb7ad2 | 22,912 | py | Python | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | 1 | 2019-07-13T12:04:04.000Z | 2019-07-13T12:04:04.000Z | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | LLDBagility/stubvm.py | killvxk/LLDBagility | 6cc0df91bd49e43997b77048611d1b73f43f7b29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/eflags.h
EFL_TF = 0x00000100
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/vm_param.h
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/EXTERNAL_HEADERS/mach-o/loader.h
MH_MAGIC_64 = 0xFEEDFACF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/exception_types.h
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/exception.h
EXC_I386_BPTFLT = 0x3
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/bsd/sys/signal.h
SIGINT = 0x2
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/proc_reg.h
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/machine.h
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
@lldbagilityutils.indented(logger)
@lldbagilityutils.indented(logger)
| 37.255285 | 148 | 0.659043 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import threading
import lldbagilityutils
from PyFDP.FDP import FDP
from VMSN import VMSN
logger = lldbagilityutils.create_indented_logger(__name__, "/tmp/stubvm.log")
NULL = 0x0
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/eflags.h
EFL_TF = 0x00000100
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/vm_param.h
I386_PGBYTES = 4096
VM_MIN_KERNEL_ADDRESS = 0xFFFFFF8000000000
VM_MAX_KERNEL_ADDRESS = 0xFFFFFFFFFFFFEFFF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/EXTERNAL_HEADERS/mach-o/loader.h
MH_MAGIC_64 = 0xFEEDFACF
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/exception_types.h
EXC_SOFTWARE = 0x5
EXC_BREAKPOINT = 0x6
EXC_SOFT_SIGNAL = 0x10003
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/i386/exception.h
EXC_I386_BPTFLT = 0x3
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/bsd/sys/signal.h
SIGINT = 0x2
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/proc_reg.h
MSR_IA32_GS_BASE = 0xC0000101
MSR_IA32_KERNEL_GS_BASE = 0xC0000102
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/mach/machine.h
CPU_TYPE_X86 = 0x7
CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64
CPU_SUBTYPE_X86_ARCH1 = 0x4
class STUBVM(object):
def __init__(self, stub, name):
self.stub = stub(name)
self.name = name
self.lock = threading.RLock()
self._exception = None
self._soft_breakpoints = {}
self._interrupt_at_next_resume = False
self._singlestep_at_next_resume = False
self._kdp_vaddr = None
self._store_kdp_at_next_write_virtual_memory = False
self._return_incremented_at_next_read_register_rip = False
@lldbagilityutils.indented(logger)
def _continue_until_kernel_code(self):
logger.debug("_continue_until_kernel_code()")
if _in_kernel_space(self.read_register("rip")):
return
# set a breakpoint on writes to the CR3 register (with high probability
# only the kernel is doing it)
cr3bp_id = self.stub.SetBreakpoint(
self.stub.CR_HBP,
0x0,
self.stub.WRITE_BP,
self.stub.VIRTUAL_ADDRESS,
0x3,
0x1,
self.stub.NO_CR3,
)
assert 0 <= cr3bp_id <= 254
# resume the VM execution until reaching kernel code
while True:
self.stub.Resume()
self.stub.WaitForStateChanged()
if _in_kernel_space(self.read_register("rip")):
logger.debug("> stopping: 0x{:016x}".format(self.read_register("rip")))
break
self.stub.SingleStep()
self.stub.UnsetBreakpoint(cr3bp_id)
@lldbagilityutils.indented(logger)
def _get_active_thread_vaddr(self):
logger.debug("_get_active_thread_vaddr()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L392
def _get_gs_base(self):
logger.debug("_get_gs_base()")
gs_base = self.read_msr64(MSR_IA32_GS_BASE)
logger.debug("> MSR_IA32_GS_BASE: 0x{:016x}".format(gs_base))
if not _in_kernel_space(gs_base):
gs_base = self.read_msr64(MSR_IA32_KERNEL_GS_BASE)
logger.debug("> MSR_IA32_KERNEL_GS_BASE: 0x{:016x}".format(gs_base))
return gs_base
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/mp_desc.c#L476
cpu_data_vaddr = _get_gs_base(self)
logger.debug("> cpu_data_vaddr: 0x{:016x}".format(cpu_data_vaddr))
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L149
cpu_this = lldbagilityutils.u64(self.read_virtual_memory(cpu_data_vaddr, 0x8))
logger.debug("> cpu_this: 0x{:016x}".format(cpu_this))
assert cpu_data_vaddr == cpu_this
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/i386/cpu_data.h#L150
cpu_active_thread = lldbagilityutils.u64(
self.read_virtual_memory(cpu_data_vaddr + 0x8, 0x8)
)
logger.debug("> cpu_active_thread: 0x{:016x}".format(cpu_active_thread))
return cpu_active_thread
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def complete_attach(self):
logger.debug("complete_attach()")
self.halt()
self.unset_all_breakpoints()
self._continue_until_kernel_code()
assert _in_kernel_space(self.read_register("rip"))
self.kernel_cr3 = self.read_register("cr3")
logger.debug("> kernel_cr3: 0x{:x}".format(self.kernel_cr3))
self.kernel_load_vaddr = _find_kernel_load_vaddr(self)
logger.debug("> kernel_load_vaddr: 0x{:016x}".format(self.kernel_load_vaddr))
self.kernel_slide = _compute_kernel_slide(self.kernel_load_vaddr)
logger.debug("> kernel_slide: 0x{:x}".format(self.kernel_slide))
self.kernel_version = _find_kernel_version(self)
logger.debug("> kernel_version: {}".format(self.kernel_version))
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_num_cpus(self):
logger.debug("get_num_cpus()")
return self.stub.GetCpuCount()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_host_info(self):
logger.debug("get_host_info()")
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/kdp/ml/x86_64/kdp_machdep.c#L256
cpus_mask = 0x0
for i in range(self.get_num_cpus()):
cpus_mask |= 1 << i
cpu_type = CPU_TYPE_X86_64
cpu_subtype = CPU_SUBTYPE_X86_ARCH1
return cpus_mask, cpu_type, cpu_subtype
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def get_kernel_version(self):
logger.debug("get_kernel_version()")
kernel_version = self.kernel_version
if b"stext" not in kernel_version:
logger.debug("> stext")
# return the known kernel load address to make LLDB do less requests
kernel_version += "; stext=0x{:016x}".format(self.kernel_load_vaddr)
return kernel_version
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_msr64(self, msr):
logger.debug("read_msr64(msr=0x{:x})".format(msr))
return self.stub.ReadMsr(msr, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_msr64(self, msr, val):
logger.debug("write_msr64(msr=0x{:x}, val=0x{:x})".format(msr, val))
self.stub.WriteMsr(self, msr, val, CpuId=self.stub.CPU0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_register(self, reg):
logger.debug("read_register(reg='{}')".format(reg))
val = getattr(self.stub, reg)
if reg == "rip" and self._return_incremented_at_next_read_register_rip:
logger.debug("> _return_incremented_at_next_read_register_rip")
self._return_incremented_at_next_read_register_rip = False
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/MacOSX-Kernel/ThreadKDP.cpp#L157
# https://github.com/llvm/llvm-project/tree/llvmorg-8.0.0/lldb/source/Plugins/Process/Utility/StopInfoMachException.cpp#L571
return val + 1
return val
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_registers(self, regs):
logger.debug("read_registers()")
return {reg: self.read_register(reg) for reg in regs}
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_register(self, reg, val):
logger.debug("write_register(reg='{}', val=0x{:x})".format(reg, val))
if reg == "rflags":
if val & EFL_TF:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = True
# disallow changes to RFLAGS
return
setattr(self.stub, reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_registers(self, regs):
logger.debug("write_registers()")
for reg, val in regs.items():
self.write_register(reg, val)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def read_virtual_memory(self, vaddr, nbytes):
logger.debug(
"read_virtual_memory(vaddr=0x{:016x}, nbytes=0x{:x})".format(vaddr, nbytes)
)
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
if not data and not _in_kernel_space(self.read_register("rip")):
# if reading fails, it could be the case that we are trying to read kernel
# virtual addresses from user space (e.g. when LLDB stops in user land and
# the user loads or uses lldbmacros)
# in this case, we try the read again but using the kernel pmap
logger.debug("> using kernel pmap")
process_cr3 = self.read_register("cr3")
# switch to kernel pmap
self.write_register("cr3", self.kernel_cr3)
# try the read again
data = self.stub.ReadVirtualMemory(vaddr, nbytes)
# switch back to the process pmap
self.write_register("cr3", process_cr3)
if self._kdp_vaddr and vaddr <= self._kdp_vaddr <= vaddr + nbytes:
# this request has very likely been generated by LLDBmacros
logger.debug("> fake kdp struct")
assert data is not None
# fill some fields of the empty (since the boot-arg "debug" is probably not set) kdp struct
saved_state = lldbagilityutils.p64(NULL)
kdp_thread = lldbagilityutils.p64(self._get_active_thread_vaddr())
fake_partial_kdp_struct = b"".join((saved_state, kdp_thread))
kdp_struct_offset = self._kdp_vaddr - vaddr
data = (
data[:kdp_struct_offset]
+ fake_partial_kdp_struct
+ data[kdp_struct_offset + len(fake_partial_kdp_struct) :]
)
data = data if data else b""
logger.debug("> len(data): 0x{:x}".format(len(data)))
return data
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def write_virtual_memory(self, vaddr, data):
logger.debug("write_virtual_memory(vaddr=0x{:016x}, data=...)".format(vaddr))
assert self.is_state_halted()
if self._store_kdp_at_next_write_virtual_memory:
logger.debug("> _store_kdp_at_next_write_virtual_memory")
self._store_kdp_at_next_write_virtual_memory = False
self._kdp_vaddr = vaddr
return
return self.stub.WriteVirtualMemory(vaddr, data)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_soft_exec_breakpoint(self, vaddr):
logger.debug("set_soft_exec_breakpoint(vaddr=0x{:016x})".format(vaddr))
assert self.is_state_halted()
id = 0x0
length = 0x1
self._soft_breakpoints[vaddr] = self.stub.SetBreakpoint(
self.stub.SOFT_HBP,
id,
self.stub.EXECUTE_BP,
self.stub.VIRTUAL_ADDRESS,
vaddr,
length,
self.stub.NO_CR3,
)
logger.debug("> bp id: {}".format(self._soft_breakpoints[vaddr]))
return self._soft_breakpoints[vaddr]
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_soft_breakpoint(self, vaddr):
logger.debug("unset_soft_breakpoint(vaddr=0x{:016x})")
assert self.is_state_halted()
try:
id = self._soft_breakpoints[vaddr]
except KeyError:
logger.debug("> no such breakpoint")
else:
del self._soft_breakpoints[vaddr]
return self.stub.UnsetBreakpoint(id)
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def set_hard_breakpoint(self, trigger, nreg, vaddr):
logger.debug(
"set_hard_exec_breakpoint(trigger='{}', nreg=0x{:016x}, vaddr=0x{:016x})".format(
trigger, nreg, vaddr
)
)
assert self.is_state_halted()
assert trigger in ("e", "w", "rw")
assert 0 <= nreg <= 3
trigger_bitshifts = {nreg: 16 + nreg * 4 for nreg in range(4)}
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# reset trigger entry for the chosen register to 0b00
ctrl_mask &= ~(0b11 << trigger_bitshifts[nreg])
# set new entry
if trigger == "e":
trigger_entry = 0b00
elif trigger == "w":
trigger_entry = 0b01
elif trigger == "rw":
trigger_entry = 0b11
else:
raise NotImplementedError
ctrl_mask |= trigger_entry << trigger_bitshifts[nreg]
# enable breakpoint globally
ctrl_mask |= 0b10 << status_bitshifts[nreg]
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), vaddr)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_hard_breakpoint(self, nreg):
logger.debug("unset_hard_breakpoint(nreg=0x{:016x})".format(nreg))
assert self.is_state_halted()
assert 0 <= nreg <= 3
status_bitshifts = {nreg: nreg * 2 for nreg in range(4)}
ctrl_mask = self.read_register("dr7")
# disable breakpoint globally and locally
ctrl_mask &= ~(0b11 << status_bitshifts[nreg])
logger.debug("> ctrl_mask: 0b{:032b}".format(ctrl_mask))
self.write_register("dr{}".format(nreg), 0x0)
self.write_register("dr7", ctrl_mask)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def unset_all_breakpoints(self):
logger.debug("unset_all_breakpoints()")
assert self.is_state_halted()
# remove soft breakpoints
self._soft_breakpoints.clear()
self.stub.UnsetAllBreakpoint()
# remove hard breakpoints
self.write_register("dr0", 0x0)
self.write_register("dr1", 0x0)
self.write_register("dr2", 0x0)
self.write_register("dr3", 0x0)
self.write_register("dr6", 0x0)
self.write_register("dr7", 0x0)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def halt(self):
logger.debug("halt()")
self.stub.Pause()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt(self):
logger.debug("interrupt()")
self._exception = (EXC_SOFTWARE, EXC_SOFT_SIGNAL, SIGINT)
self.halt()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def single_step(self):
logger.debug("single_step()")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
self.stub.SingleStep()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def resume(self):
logger.debug("resume()")
if self._interrupt_at_next_resume:
logger.debug("> _interrupt_at_next_resume")
self._interrupt_at_next_resume = False
self.interrupt()
return
if self._singlestep_at_next_resume:
logger.debug("> _singlestep_at_next_resume")
self._singlestep_at_next_resume = False
self.single_step()
return
if self.is_breakpoint_hit():
logger.debug(
"> state breakpoint hit: 0x{:016x}".format(self.read_register("rip"))
)
self.stub.SingleStep()
self.stub.Resume()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_take_snapshot(self):
logger.debug("interrupt_and_take_snapshot()")
self.interrupt()
self.stub.Save()
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_and_restore_last_snapshot(self):
logger.debug("interrupt_and_restore_last_snapshot()")
self.interrupt()
if self.stub.Restore():
# breakpoints are not restored
self._soft_breakpoints.clear()
return True
else:
logger.debug("> could not restore")
return False
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def state(self):
logger.debug("state()")
if self.is_breakpoint_hit():
logger.debug("> state breakpoint hit")
self._exception = (EXC_BREAKPOINT, EXC_I386_BPTFLT, 0x0)
# the following assumes that the next call to STUBVM.read_register("rip")
# will be made by LLDB in response to this EXC_BREAKPOINT exception
self._return_incremented_at_next_read_register_rip = True
state = (self.stub.GetState(), self._exception)
self._exception = None
return state
@lldbagilityutils.synchronized
def is_state_changed(self):
return self.stub.GetStateChanged() or self._exception
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_state_halted(self):
logger.debug("is_state_halted()")
return self.stub.GetState() & self.stub.STATE_PAUSED
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def is_breakpoint_hit(self):
logger.debug("is_breakpoint_hit()")
return self.stub.GetState() & (
self.stub.STATE_BREAKPOINT_HIT | self.stub.STATE_HARD_BREAKPOINT_HIT
)
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def interrupt_at_next_resume(self):
logger.debug("interrupt_at_next_resume()")
self._interrupt_at_next_resume = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def store_kdp_at_next_write_virtual_memory(self):
logger.debug("store_kdp_at_next_write_virtual_memory()")
self._store_kdp_at_next_write_virtual_memory = True
@lldbagilityutils.indented(logger)
@lldbagilityutils.synchronized
def abort_store_kdp_at_next_write_virtual_memory(self):
logger.debug("abort_store_kdp_at_next_write_virtual_memory()")
assert not self._kdp_vaddr
self._store_kdp_at_next_write_virtual_memory = False
def _in_kernel_space(addr):
return VM_MIN_KERNEL_ADDRESS <= addr <= VM_MAX_KERNEL_ADDRESS
@lldbagilityutils.indented(logger)
def _find_kernel_load_vaddr(vm):
logger.debug("_find_kernel_load_vaddr()")
assert _in_kernel_space(vm.read_register("rip"))
@lldbagilityutils.indented(logger)
def _is_kernel_load_vaddr(vaddr):
logger.debug("_is_kernel_load_vaddr()")
if not _in_kernel_space(vaddr):
return False
data = vm.read_virtual_memory(vaddr, 0x4)
return data and lldbagilityutils.u32(data) == MH_MAGIC_64
@lldbagilityutils.indented(logger)
def _get_debug_kernel_load_vaddr():
logger.debug("_get_debug_kernel_load_vaddr()")
# from the LLDB documentation: "If the debug flag is included in the
# boot-args nvram setting, the kernel's load address will be noted
# in the lowglo page at a fixed address"
# https://github.com/llvm/llvm-project/blob/llvmorg-8.0.0/lldb/source/Plugins/DynamicLoader/Darwin-Kernel/DynamicLoaderDarwinKernel.cpp#L226
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/lowglobals.h#L54
# https://github.com/apple/darwin-xnu/blob/xnu-4903.221.2/osfmk/x86_64/pmap.c#L1175
lgStext_vaddr = 0xFFFFFF8000002010
data = vm.read_virtual_memory(lgStext_vaddr, 0x8)
if data:
vaddr = lldbagilityutils.u64(data)
if _is_kernel_load_vaddr(vaddr):
return vaddr
else:
# probably trying to attach to the target before lgStext is initialised
return None
else:
return None
@lldbagilityutils.indented(logger)
def _search_kernel_load_vaddr(start_vaddr):
logger.debug(
"_search_kernel_load_vaddr(start_vaddr=0x{:016x})".format(start_vaddr)
)
# try to find the load address manually
assert _in_kernel_space(start_vaddr)
vaddr = start_vaddr & ~(I386_PGBYTES - 1)
while vaddr >= VM_MIN_KERNEL_ADDRESS:
if _is_kernel_load_vaddr(vaddr):
return vaddr
vaddr -= I386_PGBYTES
else:
raise AssertionError
kernel_load_vaddr = _get_debug_kernel_load_vaddr() or _search_kernel_load_vaddr(
vm.read_register("rip")
)
return kernel_load_vaddr
def _compute_kernel_slide(kernel_load_vaddr):
return kernel_load_vaddr - 0xFFFFFF8000200000
@lldbagilityutils.indented(logger)
def _find_kernel_version(vm):
logger.debug("_find_kernel_version()")
kernel_macho = b""
while len(kernel_macho) < 42 * 1024 * 1024: # a reasonable upper bound?
buf = b""
while len(buf) < 2 * 1024 * 1024:
vaddr = vm.kernel_load_vaddr + len(kernel_macho) + len(buf)
buf += vm.read_virtual_memory(vaddr, I386_PGBYTES)
kernel_macho += buf
try:
kernel_version = re.search(
b"(?P<version>Darwin Kernel Version .+?X86_64)\0", kernel_macho
).group("version")
except AttributeError:
continue
else:
return kernel_version
else:
raise AssertionError
class FDPSTUB(FDP):
NO_CR3 = FDP.FDP_NO_CR3
SOFT_HBP = FDP.FDP_SOFTHBP
CR_HBP = FDP.FDP_CRHBP
VIRTUAL_ADDRESS = FDP.FDP_VIRTUAL_ADDRESS
EXECUTE_BP = FDP.FDP_EXECUTE_BP
WRITE_BP = FDP.FDP_WRITE_BP
STATE_PAUSED = FDP.FDP_STATE_PAUSED
STATE_BREAKPOINT_HIT = FDP.FDP_STATE_BREAKPOINT_HIT
STATE_HARD_BREAKPOINT_HIT = FDP.FDP_STATE_HARD_BREAKPOINT_HIT
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(FDPSTUB, self).__init__(name)
assert self.GetCpuCount() == 1, (
"VMs with more than one CPU are not fully supported by FDP! "
"Decrease the number of processors in the VM settings"
)
class VMSNSTUB(VMSN):
NO_CR3 = 0
SOFT_HBP = 2
CR_HBP = 0
VIRTUAL_ADDRESS = 0
EXECUTE_BP = 0
WRITE_BP = 0
STATE_PAUSED = 1
STATE_BREAKPOINT_HIT = 1
STATE_HARD_BREAKPOINT_HIT = 0
CPU0 = FDP.FDP_CPU0
def __init__(self, name):
super(VMSNSTUB, self).__init__(name)
| 17,514 | 3,812 | 159 |
ce4178999bf32384813f16179d3b6b0f9cff55ee | 267 | py | Python | src/main/spotify/get_connection.py | bhankit1410/playlist-to-youtube | 74918eb74698e0f543fe86e2f760478ee4e407cf | [
"MIT"
] | null | null | null | src/main/spotify/get_connection.py | bhankit1410/playlist-to-youtube | 74918eb74698e0f543fe86e2f760478ee4e407cf | [
"MIT"
] | 1 | 2021-09-18T18:24:41.000Z | 2021-09-18T18:24:41.000Z | src/main/spotify/get_connection.py | bhankit1410/playlist-to-youtube | 74918eb74698e0f543fe86e2f760478ee4e407cf | [
"MIT"
] | null | null | null | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
spotify = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
url = "http://httpbin.org/post"
payload = dict(key1='value1', key2='value2')
res = requests.post(url, data=payload)
print(res.text) | 29.666667 | 66 | 0.786517 | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
spotify = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
url = "http://httpbin.org/post"
payload = dict(key1='value1', key2='value2')
res = requests.post(url, data=payload)
print(res.text) | 0 | 0 | 0 |
e512aa0599158cc149792421975a951cc3e855ba | 4,485 | py | Python | Detector/elk.py | KJ-black/Detecting-Alternate-Authentication-Based-APT-Attack-via-MITRE-Techniques-Correlation | e8ee9dcb28261a69d0534580c08a4620595978df | [
"MIT"
] | null | null | null | Detector/elk.py | KJ-black/Detecting-Alternate-Authentication-Based-APT-Attack-via-MITRE-Techniques-Correlation | e8ee9dcb28261a69d0534580c08a4620595978df | [
"MIT"
] | null | null | null | Detector/elk.py | KJ-black/Detecting-Alternate-Authentication-Based-APT-Attack-via-MITRE-Techniques-Correlation | e8ee9dcb28261a69d0534580c08a4620595978df | [
"MIT"
] | null | null | null | import json
import requests
from elasticsearch.connection import create_ssl_context
from elasticsearch import Elasticsearch
from getpass import getpass
class ElasticSearch:
"""Wrapper of Elasticsearch module.
This module is designed for using elastic search more friendly.
"""
def _set_condition_func(self):
"""Generate functions dynamically for setting conditions.
This function will generate 6 funcions, including:
must(conditions):
All of the conditions must be satisfied.
must_reg(conditions)
All of the conditions must be satisfied in regular
expression matching.
must_not(conditions)
All of the conditions must not be satisfied.
must_not_reg(conditions)
All of the conditions must not be satisfied in regular
expression matching.
should(conditions)
One of the conditions must be satisfied.
should_reg(conditions)
One of the conditions must be satisfied in regular
expression matching.
All these funcions have one parameter in `list of dict` type.
Each element in `list` is a `dict` which have only one
key-value pair.
"""
for operation in ['must', 'must_not', 'should']:
for method in ['match_phrase', 'regexp']:
func(operation, method)
if __name__ == '__main__':
es_host = 'https://140.113.194.82:9200'
es_username = input('username:')
es_password = getpass('password:')
es = ElasticSearch(es_host, (es_username, es_password))
es.index('logstash-router.zeek*')
# es.column()
# es.time('now-30s', 'now')
# es.time('2020-05-14T03:10:00+0800')
es.time('2021-04-17T15:19:19+0800', '2021-04-17T15:22:40+0800')
# es.should_reg([{'id_orig_h': '192.168.1.*'}])
data = es.search(size=10, clean=True)
print(len(data))
[print(datum) for datum in data]
data = es.search()
print(len(data))
# print(data) | 32.737226 | 74 | 0.547826 | import json
import requests
from elasticsearch.connection import create_ssl_context
from elasticsearch import Elasticsearch
from getpass import getpass
class ElasticSearch:
"""Wrapper of Elasticsearch module.
This module is designed for using elastic search more friendly.
"""
def __init__(self, host, credential):
self._set_condition_func()
requests.packages.urllib3.disable_warnings()
self._es = Elasticsearch(
hosts=[host],
http_auth=credential,
verify_certs=False)
self._index = '*'
self._query = {
'query': {'bool': {'must': []}},
'sort': {'@timestamp': {'order': 'desc'}}
}
def index(self, index):
self._index = index
def query(self):
return json.dumps(self._query)
def column(self, column):
self._query['_source'] = {'includes': column}
def sort(self, column, order):
self._query['sort'] = {column: {'order': order}}
def time(self, start=None, end=None):
self._query['query']['bool']['filter'] = {
'range': {'@timestamp': {'gte': start, 'lt': end }}
}
def range(self, column, start=None, end=None):
self._query['query']['bool']['must'].append({
'bool': {
'filter': [{'range': {column: {'gte': start, 'lt': end}}}]
}
})
def clean(self):
self._query['query']['bool']['must'] = []
def _set_condition_func(self):
"""Generate functions dynamically for setting conditions.
This function will generate 6 funcions, including:
must(conditions):
All of the conditions must be satisfied.
must_reg(conditions)
All of the conditions must be satisfied in regular
expression matching.
must_not(conditions)
All of the conditions must not be satisfied.
must_not_reg(conditions)
All of the conditions must not be satisfied in regular
expression matching.
should(conditions)
One of the conditions must be satisfied.
should_reg(conditions)
One of the conditions must be satisfied in regular
expression matching.
All these funcions have one parameter in `list of dict` type.
Each element in `list` is a `dict` which have only one
key-value pair.
"""
def func(operation, method):
func_name = operation + ('_reg' if method == 'regexp' else '')
def func_code(conditions):
clause = {
'bool': {operation: [{method: c} for c in conditions]}
}
if operation == 'should':
clause['bool']['minimum_should_match'] = 1
self._query['query']['bool']['must'].append(clause)
setattr(self, func_name, func_code)
for operation in ['must', 'must_not', 'should']:
for method in ['match_phrase', 'regexp']:
func(operation, method)
def search(self, size=None, clean=False):
data = []
res = self._es.search(
index=self._index,
size=10000 if not size else size,
scroll='2m',
body=self._query)
sid = res['_scroll_id']
count = len(res['hits']['hits'])
while count > 0:
data += res['hits']['hits']
if size and len(data) >= size:
break
res = self._es.scroll(scroll_id=sid, scroll='2m')
sid = res['_scroll_id']
count = len(res['hits']['hits'])
if clean:
self.clean()
return [datum['_source'] for datum in data][:size]
if __name__ == '__main__':
es_host = 'https://140.113.194.82:9200'
es_username = input('username:')
es_password = getpass('password:')
es = ElasticSearch(es_host, (es_username, es_password))
es.index('logstash-router.zeek*')
# es.column()
# es.time('now-30s', 'now')
# es.time('2020-05-14T03:10:00+0800')
es.time('2021-04-17T15:19:19+0800', '2021-04-17T15:22:40+0800')
# es.should_reg([{'id_orig_h': '192.168.1.*'}])
data = es.search(size=10, clean=True)
print(len(data))
[print(datum) for datum in data]
data = es.search()
print(len(data))
# print(data) | 2,109 | 0 | 273 |
878095fc229590aae383c04cea6968024bcbfd7d | 422 | py | Python | manage.py | ukch/betternamecoming | 05a7dde27099f7bea7d5fe6711279bb1fbecfc47 | [
"MIT"
] | 1 | 2021-06-02T15:12:21.000Z | 2021-06-02T15:12:21.000Z | manage.py | ukch/betternamecoming | 05a7dde27099f7bea7d5fe6711279bb1fbecfc47 | [
"MIT"
] | 48 | 2016-01-17T00:00:17.000Z | 2022-01-13T00:45:01.000Z | manage.py | ryanmrubin/refugeedata | d71bedb0895e8011f3b67245c17df3422553820c | [
"MIT"
] | 4 | 2015-09-14T09:25:43.000Z | 2015-12-29T19:13:23.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "refugeedata.app.settings")
try:
from refugeedata.app import local_settings
except ImportError:
pass
else:
os.environ.update(local_settings.SETTINGS_DICT)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.444444 | 79 | 0.729858 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "refugeedata.app.settings")
try:
from refugeedata.app import local_settings
except ImportError:
pass
else:
os.environ.update(local_settings.SETTINGS_DICT)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 0 | 0 | 0 |
eb50898ebb54ac7bbd12256db154af27efab9bda | 2,490 | py | Python | aiodec.py | cjrh/aiodec | ca5b0759e2c1d48a36b29a2c6eabb3af3b5b2c68 | [
"Apache-2.0"
] | 1 | 2018-05-23T21:53:26.000Z | 2018-05-23T21:53:26.000Z | aiodec.py | cjrh/aiodec | ca5b0759e2c1d48a36b29a2c6eabb3af3b5b2c68 | [
"Apache-2.0"
] | 3 | 2018-03-15T22:50:08.000Z | 2018-10-19T10:55:56.000Z | aiodec.py | cjrh/aiodec | ca5b0759e2c1d48a36b29a2c6eabb3af3b5b2c68 | [
"Apache-2.0"
] | 1 | 2018-08-08T03:33:37.000Z | 2018-08-08T03:33:37.000Z | """
aiodec
======
Decorators for coroutines
"""
import time
import logging
from functools import wraps
from string import Template
import inspect
from inspect import Signature
from typing import Callable, Optional, Mapping, Any
__version__ = '2018.10.1'
logger = logging.getLogger(__name__)
Callback = Callable[[Signature, Mapping[str, Any]], None]
| 27.362637 | 82 | 0.6249 | """
aiodec
======
Decorators for coroutines
"""
import time
import logging
from functools import wraps
from string import Template
import inspect
from inspect import Signature
from typing import Callable, Optional, Mapping, Any
__version__ = '2018.10.1'
logger = logging.getLogger(__name__)
Callback = Callable[[Signature, Mapping[str, Any]], None]
def adecorator(
f=None,
pre_callback: Optional[Callback] = None,
post_callback: Optional[Callback]= None):
def inner(g):
@wraps(g)
async def wrapper(*args, **kwargs):
# Get the function signature of the wrapped function. We need this
# in order to obtain all the parameter information.
template_parameters = dict(name_=g.__name__, qualname_=g.__qualname__)
sig = inspect.signature(g)
# Using the actually-provided args, bind them to the signature
# of the wrapped function
bound_args = sig.bind(*args, **kwargs)
# Now fill in the unsupplied parameters with their default
# values.
bound_args.apply_defaults()
template_parameters.update(bound_args.arguments)
pre_callback and pre_callback(sig, template_parameters)
try:
return await g(*args, **kwargs)
finally:
post_callback and post_callback(sig, template_parameters)
return wrapper
if f:
# astopwatch() was called WITHOUT evaluation, so we must return the
# actual wrapper function that replaces f.
return inner(f)
else:
# astopwatch() was called WITH evaluation, so we need to return ANOTHER
# decorator (that will receive and wrap function f).
return inner
def astopwatch(
f=None,
message_template='Time taken: $time_ seconds',
fmt='%.4g',
logger=logger,
):
# Using templates because safe_substitute is awesome.
tmpl = Template(message_template)
t0 = 0
def pre_callback(sig, template_parameters):
nonlocal t0
t0 = time.perf_counter()
def post_callback(sig, template_parameters):
nonlocal t0
dt = time.perf_counter() - t0
msg = tmpl.safe_substitute(
**template_parameters,
time_=fmt % dt
)
logger.info(msg)
return adecorator(
f,
pre_callback=pre_callback,
post_callback=post_callback
)
| 2,088 | 0 | 46 |
bde72676fc6a653ae2aa94f9c37e0a4e416f13e4 | 12,438 | py | Python | config/CConfig.py | ngoan1608/robotframework-testsuitesmanagement | b5288de09d6df08ec718a4d3b9ae1efb0c67f103 | [
"Apache-2.0"
] | null | null | null | config/CConfig.py | ngoan1608/robotframework-testsuitesmanagement | b5288de09d6df08ec718a4d3b9ae1efb0c67f103 | [
"Apache-2.0"
] | null | null | null | config/CConfig.py | ngoan1608/robotframework-testsuitesmanagement | b5288de09d6df08ec718a4d3b9ae1efb0c67f103 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2022 Robert Bosch Car Multimedia GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# **************************************************************************************************************
#
# CConfig.py
#
# CM-CI1/ECA3-Queckenstedt
#
# Purpose:
# - Compute and store all repository specific information, like the repository name,
# paths to repository subfolder, paths to interpreter and so on ...
#
# - All paths to subfolder depends on the repository root path that has to be provided to constructor of CConfig
#
# Additional hints:
# - Variable names like SPHINXBUILD, SOURCEDIR and BUILDDIR are taken over from original output of Sphinx
# (when documentation project files like make.bat are generated by Sphinx; for better understanding
# no new names here).
#
# - Output in PDF format requires LaTeX compiler and self.__bGenPDFSupported set to True (True is default)
#
# - Don't be confused: We have 'doc/_build' containing the documentation builder output
# and we have 'build' containing the build of the setup tools. These are different things.
#
# --------------------------------------------------------------------------------------------------------------
#
# 11.10.2021 / XC-CI1/ECA3-Queckenstedt
# Fixed path within site-packages (Linux)
#
# 06.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added Linux support
#
# 01.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added environment check
#
# 01.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added wrapper for error messages
#
# Initial version 08/2021
#
# --------------------------------------------------------------------------------------------------------------
import os, sys, platform, shlex, subprocess
import colorama as col
import pypandoc
col.init(autoreset=True)
COLBR = col.Style.BRIGHT + col.Fore.RED
COLBG = col.Style.BRIGHT + col.Fore.GREEN
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# eof def __InitConfig(self):
# eof def PrintConfig(self):
# eof def Get(self, sName=None):
# eof class CConfig():
# --------------------------------------------------------------------------------------------------------------
| 46.58427 | 191 | 0.61698 | # Copyright 2020-2022 Robert Bosch Car Multimedia GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# **************************************************************************************************************
#
# CConfig.py
#
# CM-CI1/ECA3-Queckenstedt
#
# Purpose:
# - Compute and store all repository specific information, like the repository name,
# paths to repository subfolder, paths to interpreter and so on ...
#
# - All paths to subfolder depends on the repository root path that has to be provided to constructor of CConfig
#
# Additional hints:
# - Variable names like SPHINXBUILD, SOURCEDIR and BUILDDIR are taken over from original output of Sphinx
# (when documentation project files like make.bat are generated by Sphinx; for better understanding
# no new names here).
#
# - Output in PDF format requires LaTeX compiler and self.__bGenPDFSupported set to True (True is default)
#
# - Don't be confused: We have 'doc/_build' containing the documentation builder output
# and we have 'build' containing the build of the setup tools. These are different things.
#
# --------------------------------------------------------------------------------------------------------------
#
# 11.10.2021 / XC-CI1/ECA3-Queckenstedt
# Fixed path within site-packages (Linux)
#
# 06.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added Linux support
#
# 01.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added environment check
#
# 01.10.2021 / XC-CI1/ECA3-Queckenstedt
# Added wrapper for error messages
#
# Initial version 08/2021
#
# --------------------------------------------------------------------------------------------------------------
import os, sys, platform, shlex, subprocess
import colorama as col
import pypandoc
col.init(autoreset=True)
COLBR = col.Style.BRIGHT + col.Fore.RED
COLBG = col.Style.BRIGHT + col.Fore.GREEN
# --------------------------------------------------------------------------------------------------------------
def printerror(sMsg):
sys.stderr.write(COLBR + f"Error: {sMsg}!\n")
def printexception(sMsg):
sys.stderr.write(COLBR + f"Exception: {sMsg}!\n")
# --------------------------------------------------------------------------------------------------------------
class CConfig():
def __init__(self, sReferencePath="."):
self.__dictConfig = {}
self.__sReferencePath = os.path.normpath(os.path.abspath(sReferencePath))
self.__dictConfig['sReferencePath'] = self.__sReferencePath # only to have the possibility to print out all values only with help of 'self.__dictConfig'
self.__bGenPDFSupported = False
# 1. basic setup stuff
self.__dictConfig['sPackageName'] = "RobotFramework_Testsuites"
self.__dictConfig['sVersion'] = "0.0.1"
self.__dictConfig['sAuthor'] = "ROBFW-AIO Team"
self.__dictConfig['sAuthorEMail'] = "Thomas.Pollerspoeck@de.bosch.com"
self.__dictConfig['sDescription'] = "This package together with JsonPreprocessor provide ROBFW-AIO testsuites management method"
self.__dictConfig['sLongDescriptionContentType'] = "text/markdown"
self.__dictConfig['sURL'] = "https://sourcecode.socialcoding.bosch.com/projects/ROBFW/repos/robotframework-testsuitesmanagement/browse"
self.__dictConfig['sProgrammingLanguage'] = "Programming Language :: Python :: 3"
self.__dictConfig['sLicence'] = "License :: OSI Approved :: MIT License"
self.__dictConfig['sOperatingSystem'] = "Operating System :: OS Independent"
self.__dictConfig['sPythonRequires'] = ">=3.9"
# self.__dictConfig[''] = ""
# 2. certain folder and executables (things that requires computation)
bSuccess, sResult = self.__InitConfig()
if bSuccess != True:
raise Exception(sResult)
print(COLBG + sResult)
print()
def __del__(self):
del self.__dictConfig
def __InitConfig(self):
sOSName = os.name
sPlatformSystem = platform.system()
SPHINXBUILD = None
sPython = None
sLaTeXInterpreter = None
sInstalledPackageFolder = None
sInstalledPackageDocFolder = None
try:
self.__dictConfig['sPythonVersion'] = sys.version
self.__dictConfig['sPandoc'] = pypandoc.get_pandoc_path()
except Exception as ex:
bSuccess = False
sResult = str(ex)
return bSuccess, sResult
if sPlatformSystem == "Windows":
# -- environment check
sRobotPythonPath_EnvVar = "%ROBOTPYTHONPATH%"
sRobotPythonPath = os.path.expandvars(sRobotPythonPath_EnvVar)
if sRobotPythonPath_EnvVar == sRobotPythonPath:
# environment variable not resolved => not existing
bSuccess = False
sResult = f"""Missing Windows environment variable %ROBOTPYTHONPATH%!
This application requires a Windows environment variable %ROBOTPYTHONPATH%, pointing to a Python installation (version required: {self.__dictConfig['sPythonRequires']}) that shall be updated.
Please create and try again"""
return bSuccess, sResult
SPHINXBUILD = os.path.normpath(os.path.expandvars("%ROBOTPYTHONPATH%/Scripts/sphinx-build.exe"))
sPython = os.path.normpath(os.path.expandvars("%ROBOTPYTHONPATH%/python.exe"))
if self.__bGenPDFSupported is True:
sLaTeXInterpreter = os.path.normpath(os.path.expandvars("%ROBOTLATEXPATH%/miktex/bin/x64/pdflatex.exe"))
sInstalledPackageFolder = os.path.normpath(os.path.expandvars("%ROBOTPYTHONPATH%/Lib/site-packages/" + self.__dictConfig['sPackageName']))
sInstalledPackageDocFolder = os.path.normpath(os.path.expandvars("%ROBOTPYTHONPATH%/Lib/site-packages/" + self.__dictConfig['sPackageName'] + "_doc"))
elif sPlatformSystem == "Linux":
# -- environment check
sRobotPythonPath_EnvVar = "${RobotPythonPath}"
sRobotPythonPath = os.path.expandvars(sRobotPythonPath_EnvVar)
if sRobotPythonPath_EnvVar == sRobotPythonPath:
# environment variable not resolved => not existing
bSuccess = False
sResult = f"""Missing Linux environment variable ${RobotPythonPath}!
This application requires a Linux environment variable ${RobotPythonPath}, pointing to a Python installation (version required: {self.__dictConfig['sPythonRequires']}) that shall be updated.
Please create and try again"""
return bSuccess, sResult
SPHINXBUILD = os.path.normpath(os.path.expandvars("${RobotPythonPath}/sphinx-build"))
sPython = os.path.normpath(os.path.expandvars("${RobotPythonPath}/python3.9"))
# if self.__bGenPDFSupported is True: # not yet!!
# sLaTeXInterpreter = os.path.normpath(os.path.expandvars("${ROBOTLATEXPATH}/miktex/bin/x64/pdflatex"))
sInstalledPackageFolder = os.path.normpath(os.path.expandvars("${RobotPythonPath}/../lib/python3.9/site-packages/" + self.__dictConfig['sPackageName']))
sInstalledPackageDocFolder = os.path.normpath(os.path.expandvars("${RobotPythonPath}/../lib/python3.9/site-packages/" + self.__dictConfig['sPackageName'] + "_doc"))
else:
bSuccess = False
sResult = "Operating system " + str(sPlatformSystem) + " (" + str(sOSName) + ") not supported"
return bSuccess, sResult
self.__dictConfig['SPHINXBUILD'] = SPHINXBUILD
self.__dictConfig['sPython'] = sPython
self.__dictConfig['sLaTeXInterpreter'] = sLaTeXInterpreter
self.__dictConfig['sInstalledPackageFolder'] = sInstalledPackageFolder
self.__dictConfig['sInstalledPackageDocFolder'] = sInstalledPackageDocFolder
if os.path.isfile(SPHINXBUILD) is False:
bSuccess = False
sResult = "Missing Sphinx '" + str(SPHINXBUILD) + "'"
return bSuccess, sResult
if os.path.isfile(sPython) is False:
bSuccess = False
sResult = "Missing Python '" + str(sPython) + "'"
return bSuccess, sResult
if self.__bGenPDFSupported is True:
if os.path.isfile(sLaTeXInterpreter) is False:
bSuccess = False
sResult = "Missing LaTeX '" + str(sLaTeXInterpreter) + "'"
return bSuccess, sResult
# ---- paths relative to repository root folder (where the srcipts are located that use this module)
# ====== 1. documentation
# This doesn't matter in case of the documentation builder itself is using this CConfig.
# But if the documentation builder is called by other apps like setup_ext.py, they need to know where to find.
sDocumentationBuilder = os.path.normpath(self.__sReferencePath + "/sphinx-makeall.py")
self.__dictConfig['sDocumentationBuilder'] = sDocumentationBuilder
# - documentation project source dir (relative to reference path (= position of executing script)
SOURCEDIR = os.path.normpath(self.__sReferencePath + "/doc")
self.__dictConfig['SOURCEDIR'] = SOURCEDIR
# - documentation project build dir
BUILDDIR = os.path.normpath(SOURCEDIR + "/_build")
self.__dictConfig['BUILDDIR'] = BUILDDIR
# - documentation project html output folder
sHTMLOutputFolder = os.path.normpath(BUILDDIR + "/html")
self.__dictConfig['sHTMLOutputFolder'] = sHTMLOutputFolder
# - README
sReadMe_rst = os.path.normpath(self.__sReferencePath + "/README.rst")
self.__dictConfig['sReadMe_rst'] = sReadMe_rst
sReadMe_md = os.path.normpath(self.__sReferencePath + "/README.md")
self.__dictConfig['sReadMe_md'] = sReadMe_md
# ====== 2. setuptools
self.__dictConfig['sSetupBuildFolder'] = os.path.normpath(self.__sReferencePath + "/build")
self.__dictConfig['sSetupBuildLibFolder'] = os.path.normpath(self.__sReferencePath + "/build/lib")
self.__dictConfig['sSetupBuildLibDocFolder'] = os.path.normpath(self.__sReferencePath + "/build/lib/" + self.__dictConfig['sPackageName'] + "_doc")
self.__dictConfig['sSetupDistFolder'] = os.path.normpath(self.__sReferencePath + "/dist")
self.__dictConfig['sEggInfoFolder'] = os.path.normpath(self.__sReferencePath + "/" + self.__dictConfig['sPackageName'] + ".egg-info")
print()
print("Running under " + str(sPlatformSystem) + " (" + str(sOSName) + ")")
self.PrintConfig()
bSuccess = True
sResult = "Repository setup done"
return bSuccess, sResult
# eof def __InitConfig(self):
def PrintConfig(self):
# -- printing configuration to console
nJust = 30
print()
for sKey in self.__dictConfig:
print(sKey.rjust(nJust, ' ') + " : " + str(self.__dictConfig[sKey]))
print()
# eof def PrintConfig(self):
def Get(self, sName=None):
if ( (sName is None) or (sName not in self.__dictConfig) ):
print()
printerror("Error: Configuration parameter '" + str(sName) + "' not existing!")
# from here it's standard output:
print("Use instead one of:")
self.PrintConfig()
return None # returning 'None' in case of key is not existing !!!
else:
return self.__dictConfig[sName]
# eof def Get(self, sName=None):
# eof class CConfig():
# --------------------------------------------------------------------------------------------------------------
| 9,393 | -5 | 204 |
087adbc91f50918dfe69281c5dc4021695d07c99 | 2,491 | py | Python | specio/core/tests/test_request.py | Maxwell0001/specio | a660f690ea98058bce818415fe85736a9cdaa0f5 | [
"BSD-3-Clause"
] | 13 | 2019-03-11T12:32:00.000Z | 2021-11-16T14:22:29.000Z | specio/core/tests/test_request.py | Maxwell0001/specio | a660f690ea98058bce818415fe85736a9cdaa0f5 | [
"BSD-3-Clause"
] | 37 | 2017-12-08T13:01:58.000Z | 2021-11-09T08:49:06.000Z | specio/core/tests/test_request.py | Maxwell0001/specio | a660f690ea98058bce818415fe85736a9cdaa0f5 | [
"BSD-3-Clause"
] | 10 | 2017-12-08T13:03:37.000Z | 2022-03-03T13:55:36.000Z | """Test the Request class"""
# Copyright (c) 2017
# Authors: Guillaume Lemaitre <guillaume.lemaitre@inria.fr>
# License: BSD 3 clause
import shutil
import os
from os.path import dirname, join, sep, expanduser
import pytest
from pytest import raises
from specio import core
from specio.core import Request
DATA_PATH = module_path = dirname(__file__)
@pytest.mark.parametrize(
'type_error,msg,params',
[(IOError, "Cannot understand given URI", ['invalid', 'uri'] * 10),
(IOError, "Cannot understand given URI", 4),
(IOError, "No such file", '/does/not/exist'),
(IOError, "No such file", '/does/not/exist.zip/spam.png')])
| 27.988764 | 73 | 0.668005 | """Test the Request class"""
# Copyright (c) 2017
# Authors: Guillaume Lemaitre <guillaume.lemaitre@inria.fr>
# License: BSD 3 clause
import shutil
import os
from os.path import dirname, join, sep, expanduser
import pytest
from pytest import raises
from specio import core
from specio.core import Request
DATA_PATH = module_path = dirname(__file__)
class File():
def read(self, n):
return b'\x00' * n
def seek(self, i):
raise IOError('Not supported')
def tell(self):
raise Exception('Not supported')
def close(self):
pass
def test_request():
filename = 'file://' + join(DATA_PATH, 'data', 'spectra.foobar')
R = Request(filename)
assert R._uri_type == core.request.URI_FILENAME
assert R.get_local_filename() == filename[7:]
file_obj = R.get_file()
assert file_obj.name == filename[7:]
filename = join(DATA_PATH, 'data', 'spectra.foobar')
R = Request(filename)
assert R._uri_type == core.request.URI_FILENAME
assert R.get_local_filename() == filename
file_obj = R.get_file()
assert file_obj.name == filename
filename = join(DATA_PATH, 'data', 'spectra.foobar')
file_obj = open(filename, 'rb')
R = Request(file_obj)
assert R.get_local_filename() == filename
assert R.get_file().name == filename
shutil.copy(filename, expanduser('~/'))
filename = '~/spectra.foobar'
R = Request(filename)
assert R.filename == expanduser('~/spectra.foobar').replace('/', sep)
os.remove(expanduser(filename))
filename = join(DATA_PATH, 'data', 'spectra.foobar')
R = Request(filename, some_kwarg='something')
assert R.kwargs == {'some_kwarg': 'something'}
@pytest.mark.parametrize(
'type_error,msg,params',
[(IOError, "Cannot understand given URI", ['invalid', 'uri'] * 10),
(IOError, "Cannot understand given URI", 4),
(IOError, "No such file", '/does/not/exist'),
(IOError, "No such file", '/does/not/exist.zip/spam.png')])
def test_request_error(type_error, msg, params):
with pytest.raises(type_error, message=msg):
Request(params)
def test_request_read_sources():
filename = join(DATA_PATH, 'data', 'spectra.foobar')
R = Request(filename)
first_bytes = R.firstbytes
all_bytes = open(filename, 'rb').read()
assert len(first_bytes) == 256
assert all_bytes.startswith(first_bytes)
def test_request_file_no_seek():
R = Request(File())
with raises(IOError):
R.firstbytes
| 1,625 | -8 | 221 |
53a23c8f4b7b8aea7a12aefbf04f21c2efcbc97b | 1,391 | py | Python | plugins/ctags_generator/test_ctags_generator.py | likev/gauravssnl.github.io | 2bb17e0e5621c1f03d782b762694f999cc62d4d0 | [
"MIT"
] | 2 | 2021-01-12T14:55:55.000Z | 2021-03-24T11:52:44.000Z | plugins/ctags_generator/test_ctags_generator.py | likev/gauravssnl.github.io | 2bb17e0e5621c1f03d782b762694f999cc62d4d0 | [
"MIT"
] | 1 | 2021-12-13T20:50:25.000Z | 2021-12-13T20:50:25.000Z | plugins/ctags_generator/test_ctags_generator.py | likev/gauravssnl.github.io | 2bb17e0e5621c1f03d782b762694f999cc62d4d0 | [
"MIT"
] | 3 | 2021-03-24T11:58:31.000Z | 2022-01-12T16:03:06.000Z | # -*- coding: utf-8 -*-
import os, shutil
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
from ctags_generator import generate_ctags
CUR_DIR = os.path.dirname(__file__)
TEST_CONTENT_DIR = os.path.join(CUR_DIR, 'test_content')
| 33.119048 | 104 | 0.660676 | # -*- coding: utf-8 -*-
import os, shutil
from pelican.generators import ArticlesGenerator
from pelican.tests.support import get_settings, unittest
from pelican.writers import Writer
from ctags_generator import generate_ctags
CUR_DIR = os.path.dirname(__file__)
TEST_CONTENT_DIR = os.path.join(CUR_DIR, 'test_content')
class CtagsGeneratorTest(unittest.TestCase):
def test_generate_ctags(self):
settings = get_settings(filenames={})
settings['GENERATE_CTAGS'] = True
context = settings.copy()
context['generated_content'] = dict()
context['static_links'] = set()
generator = ArticlesGenerator(
context=context, settings=settings,
path=TEST_CONTENT_DIR, theme=settings['THEME'], output_path=TEST_CONTENT_DIR)
generator.generate_context()
writer = Writer(TEST_CONTENT_DIR, settings=settings)
generate_ctags(generator, writer)
output_path = os.path.join(TEST_CONTENT_DIR, 'tags')
self.assertTrue(os.path.exists(output_path))
try:
# output content is correct
with open(output_path, 'r') as output_file:
ctags = [l.split('\t')[0] for l in output_file.readlines()]
self.assertEqual(['bar', 'bar', 'foo', 'foo', 'foobar', 'foobar', 'マック', 'パイソン'], ctags)
finally:
os.remove(output_path)
| 1,008 | 23 | 50 |
92c84247c3383168159a1c235a6ad78fa43a77c5 | 5,169 | py | Python | fsic/plot.py | olliethomas/fsic-test | 8372d46d56d1340432fb7bffe3662e9cb6f4f56e | [
"MIT"
] | 13 | 2017-01-09T16:23:05.000Z | 2022-03-18T14:29:10.000Z | fsic/plot.py | afcarl/fsic-test | 8372d46d56d1340432fb7bffe3662e9cb6f4f56e | [
"MIT"
] | null | null | null | fsic/plot.py | afcarl/fsic-test | 8372d46d56d1340432fb7bffe3662e9cb6f4f56e | [
"MIT"
] | 6 | 2016-11-06T09:59:45.000Z | 2022-03-13T19:21:08.000Z | """Module containing convenient functions for plotting"""
__author__ = 'wittawat'
import fsic.ex.exglobal as exglo
import fsic.glo as glo
import matplotlib.pyplot as plt
import numpy as np
def plot_2d_data(pdata):
"""
pdata: an instance of PairedData
Return a figure handle
"""
X, Y = pdata.xy()
n, d = X.shape
if d != 2:
raise ValueError('d must be 2 to plot.')
# plot
fig = plt.figure()
plt.plot(X, Y, 'ob')
plt.title(pdata.label)
return fig
def plot_prob_reject(ex, fname, h1_true, func_xvalues, xlabel,
func_title=None):
"""
plot the empirical probability that the statistic is above the threshold.
This can be interpreted as type-1 error (when H0 is true) or test power
(when H1 is true). The plot is against the specified x-axis.
- ex: experiment number
- fname: file name of the aggregated result
- h1_true: True if H1 is true
- func_xvalues: function taking aggregated results dictionary and return the values
to be used for the x-axis values.
- xlabel: label of the x-axis.
- func_title: a function: results dictionary -> title of the plot
Return loaded results
"""
#from IPython.core.debugger import Tracer
#Tracer()()
results = glo.ex_load_result(ex, fname)
#value_accessor = lambda job_results: job_results['test_result']['h0_rejected']
vf_pval = np.vectorize(rej_accessor)
# results['job_results'] is a dictionary:
# {'test_result': (dict from running perform_test(te) '...':..., }
rejs = vf_pval(results['job_results'])
repeats, _, n_methods = results['job_results'].shape
mean_rejs = np.mean(rejs, axis=0)
#print mean_rejs
#std_pvals = np.std(rejs, axis=0)
#std_pvals = np.sqrt(mean_rejs*(1.0-mean_rejs))
xvalues = func_xvalues(results)
#ns = np.array(results[xkey])
#te_proportion = 1.0 - results['tr_proportion']
#test_sizes = ns*te_proportion
line_styles = exglo.func_plot_fmt_map()
method_labels = exglo.get_func2label_map()
func_names = [f.__name__ for f in results['method_job_funcs'] ]
for i in range(n_methods):
te_proportion = 1.0 - results['tr_proportion']
fmt = line_styles[func_names[i]]
#plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
method_label = method_labels[func_names[i]]
plt.plot(xvalues, mean_rejs[:, i], fmt, label=method_label)
'''
else:
# h0 is true
z = stats.norm.isf( (1-confidence)/2.0)
for i in range(n_methods):
phat = mean_rejs[:, i]
conf_iv = z*(phat*(1-phat)/repeats)**0.5
#plt.errorbar(test_sizes, phat, conf_iv, fmt=line_styles[i], label=method_labels[i])
plt.plot(test_sizes, mean_rejs[:, i], line_styles[i], label=method_labels[i])
'''
ylabel = 'Test power' if h1_true else 'Type-I error'
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.xticks( np.hstack((xvalues) ))
alpha = results['alpha']
plt.legend(loc='best')
title = '%s. %d trials. $\\alpha$ = %.2g.'%( results['prob_label'],
repeats, alpha) if func_title is None else func_title(results)
plt.title(title)
#plt.grid()
return results
| 35.163265 | 96 | 0.639969 | """Module containing convenient functions for plotting"""
__author__ = 'wittawat'
import fsic.ex.exglobal as exglo
import fsic.glo as glo
import matplotlib.pyplot as plt
import numpy as np
def plot_2d_data(pdata):
"""
pdata: an instance of PairedData
Return a figure handle
"""
X, Y = pdata.xy()
n, d = X.shape
if d != 2:
raise ValueError('d must be 2 to plot.')
# plot
fig = plt.figure()
plt.plot(X, Y, 'ob')
plt.title(pdata.label)
return fig
def plot_prob_reject(ex, fname, h1_true, func_xvalues, xlabel,
func_title=None):
"""
plot the empirical probability that the statistic is above the threshold.
This can be interpreted as type-1 error (when H0 is true) or test power
(when H1 is true). The plot is against the specified x-axis.
- ex: experiment number
- fname: file name of the aggregated result
- h1_true: True if H1 is true
- func_xvalues: function taking aggregated results dictionary and return the values
to be used for the x-axis values.
- xlabel: label of the x-axis.
- func_title: a function: results dictionary -> title of the plot
Return loaded results
"""
#from IPython.core.debugger import Tracer
#Tracer()()
results = glo.ex_load_result(ex, fname)
def rej_accessor(jr):
rej = jr['test_result']['h0_rejected']
# When used with vectorize(), making the value float will make the resulting
# numpy array to be of float. nan values can be stored.
return float(rej)
#value_accessor = lambda job_results: job_results['test_result']['h0_rejected']
vf_pval = np.vectorize(rej_accessor)
# results['job_results'] is a dictionary:
# {'test_result': (dict from running perform_test(te) '...':..., }
rejs = vf_pval(results['job_results'])
repeats, _, n_methods = results['job_results'].shape
mean_rejs = np.mean(rejs, axis=0)
#print mean_rejs
#std_pvals = np.std(rejs, axis=0)
#std_pvals = np.sqrt(mean_rejs*(1.0-mean_rejs))
xvalues = func_xvalues(results)
#ns = np.array(results[xkey])
#te_proportion = 1.0 - results['tr_proportion']
#test_sizes = ns*te_proportion
line_styles = exglo.func_plot_fmt_map()
method_labels = exglo.get_func2label_map()
func_names = [f.__name__ for f in results['method_job_funcs'] ]
for i in range(n_methods):
te_proportion = 1.0 - results['tr_proportion']
fmt = line_styles[func_names[i]]
#plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
method_label = method_labels[func_names[i]]
plt.plot(xvalues, mean_rejs[:, i], fmt, label=method_label)
'''
else:
# h0 is true
z = stats.norm.isf( (1-confidence)/2.0)
for i in range(n_methods):
phat = mean_rejs[:, i]
conf_iv = z*(phat*(1-phat)/repeats)**0.5
#plt.errorbar(test_sizes, phat, conf_iv, fmt=line_styles[i], label=method_labels[i])
plt.plot(test_sizes, mean_rejs[:, i], line_styles[i], label=method_labels[i])
'''
ylabel = 'Test power' if h1_true else 'Type-I error'
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.xticks( np.hstack((xvalues) ))
alpha = results['alpha']
plt.legend(loc='best')
title = '%s. %d trials. $\\alpha$ = %.2g.'%( results['prob_label'],
repeats, alpha) if func_title is None else func_title(results)
plt.title(title)
#plt.grid()
return results
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
results = glo.ex_load_result(ex, fname)
value_accessor = lambda job_results: job_results['time_secs']
vf_pval = np.vectorize(value_accessor)
# results['job_results'] is a dictionary:
# {'test_result': (dict from running perform_test(te) '...':..., }
times = vf_pval(results['job_results'])
repeats, _, n_methods = results['job_results'].shape
time_avg = np.mean(times, axis=0)
time_std = np.std(times, axis=0)
xvalues = func_xvalues(results)
#ns = np.array(results[xkey])
#te_proportion = 1.0 - results['tr_proportion']
#test_sizes = ns*te_proportion
line_styles = exglo.func_plot_fmt_map()
method_labels = exglo.get_func2label_map()
func_names = [f.__name__ for f in results['method_job_funcs'] ]
for i in range(n_methods):
te_proportion = 1.0 - results['tr_proportion']
fmt = line_styles[func_names[i]]
#plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
method_label = method_labels[func_names[i]]
plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
label=method_label)
ylabel = 'Time (s)'
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.xlim([np.min(xvalues), np.max(xvalues)])
plt.xticks( xvalues, xvalues )
plt.legend(loc='best')
plt.gca().set_yscale('log')
title = '%s. %d trials. '%( results['prob_label'],
repeats ) if func_title is None else func_title(results)
plt.title(title)
#plt.grid()
return results
| 1,813 | 0 | 50 |
c4fc0d93b3f5a09645da6bbf503d6d4d11acefb1 | 918 | py | Python | tests/GroupStatisticsTest.py | amygreen/Pyhack | 2f950b73943309c321811282491cb3adde2b81c3 | [
"MIT"
] | 1 | 2018-07-13T01:39:18.000Z | 2018-07-13T01:39:18.000Z | tests/GroupStatisticsTest.py | amygreen/Pyhack | 2f950b73943309c321811282491cb3adde2b81c3 | [
"MIT"
] | null | null | null | tests/GroupStatisticsTest.py | amygreen/Pyhack | 2f950b73943309c321811282491cb3adde2b81c3 | [
"MIT"
] | 2 | 2018-06-28T09:09:49.000Z | 2018-06-28T09:14:42.000Z | from PythonHackathon.PythonHackathon_Mos.GroupStatistics import *
| 26.228571 | 65 | 0.734205 | from PythonHackathon.PythonHackathon_Mos.GroupStatistics import *
class TestGroupStatistics():
def test_invalid_run_inp1(self):
self.GroupStatistics = GroupStatistics(data_folder='.')
try:
result = GroupStatistics.run(mean='@', std=True)
except TypeError:
print('Input mean value must be True or False')
return True
else:
return False
def test_invalid_run_inp2(self):
self.GroupStatistics = GroupStatistics(data_folder='.')
try:
result = GroupStatistics.run(mean=True, std='@')
except TypeError:
print('Input std value must be True or False')
return True
else:
return False
def test_invalid_GroupStatistics_inp(self):
try:
result = GroupStatistics(data_folder='')
result = GroupStatistics(data_folder=123)
result = GroupStatistics(data_folder='#$%')
except TypeError:
print('Input data_folder must be a valid path')
return True
else:
return False
| 750 | 7 | 95 |
1854c4cfedb02ac45bd303d9df4ffa04df7201e9 | 12,595 | py | Python | src/test/rgw/rgw_multi/tests_es.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 4 | 2020-04-08T03:42:02.000Z | 2020-10-01T20:34:48.000Z | src/test/rgw/rgw_multi/tests_es.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 93 | 2020-03-26T14:29:14.000Z | 2020-11-12T05:54:55.000Z | src/test/rgw/rgw_multi/tests_es.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 23 | 2020-03-24T10:28:44.000Z | 2020-09-24T09:42:19.000Z | import json
import logging
import boto
import boto.s3.connection
import datetime
import dateutil
from nose.tools import eq_ as eq
from six.moves import range
from .multisite import *
from .tests import *
from .zone_es import *
log = logging.getLogger(__name__)
| 45.634058 | 164 | 0.597142 | import json
import logging
import boto
import boto.s3.connection
import datetime
import dateutil
from nose.tools import eq_ as eq
from six.moves import range
from .multisite import *
from .tests import *
from .zone_es import *
log = logging.getLogger(__name__)
def check_es_configured():
realm = get_realm()
zonegroup = realm.master_zonegroup()
es_zones = zonegroup.zones_by_type.get("elasticsearch")
if not es_zones:
raise SkipTest("Requires at least one ES zone")
def is_es_zone(zone_conn):
if not zone_conn:
return False
return zone_conn.zone.tier_type() == "elasticsearch"
def verify_search(bucket_name, src_keys, result_keys, f):
check_keys = []
for k in src_keys:
if bucket_name:
if bucket_name != k.bucket.name:
continue
if f(k):
check_keys.append(k)
check_keys.sort(key = lambda l: (l.bucket.name, l.name, l.version_id))
log.debug('check keys:' + dump_json(check_keys))
log.debug('result keys:' + dump_json(result_keys))
for k1, k2 in zip_longest(check_keys, result_keys):
assert k1
assert k2
check_object_eq(k1, k2)
def do_check_mdsearch(conn, bucket, src_keys, req_str, src_filter):
if bucket:
bucket_name = bucket.name
else:
bucket_name = ''
req = MDSearch(conn, bucket_name, req_str)
result_keys = req.search(sort_key = lambda k: (k.bucket.name, k.name, k.version_id))
verify_search(bucket_name, src_keys, result_keys, src_filter)
def init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = None):
check_es_configured()
realm = get_realm()
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns, buckets_per_zone = buckets_per_zone)
if bucket_init_cb:
for zone_conn, bucket in zone_bucket:
bucket_init_cb(zone_conn, bucket)
src_keys = []
owner = None
obj_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
# don't wait for meta sync just yet
for zone, bucket in zone_bucket:
for count in range(num_keys):
objname = obj_prefix + str(count)
k = new_key(zone, bucket.name, objname)
# k.set_contents_from_string(content + 'x' * count)
if not create_obj:
continue
create_obj(k, count)
if not owner:
for list_key in bucket.list_versions():
owner = list_key.owner
break
k = bucket.get_key(k.name, version_id = k.version_id)
k.owner = owner # owner is not set when doing get_key()
src_keys.append(k)
zonegroup_meta_checkpoint(zonegroup)
sources = []
targets = []
for target_conn in zonegroup_conns.zones:
if not is_es_zone(target_conn):
sources.append(target_conn)
continue
targets.append(target_conn)
buckets = []
# make sure all targets are synced
for source_conn, bucket in zone_bucket:
buckets.append(bucket)
for target_conn in targets:
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
return targets, sources, buckets, src_keys
def test_es_object_search():
min_size = 10
content = 'a' * min_size
def create_obj(k, i):
k.set_contents_from_string(content + 'x' * i)
targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 2)
for target_conn in targets:
# bucket checks
for bucket in buckets:
# check name
do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
# check on all buckets
for key in src_keys:
# limiting to checking specific key name, otherwise could get results from
# other runs / tests
do_check_mdsearch(target_conn.conn, None, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
# check on specific bucket
for bucket in buckets:
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name < ' + key.name, lambda k: k.name < key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name <= ' + key.name, lambda k: k.name <= key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name >= ' + key.name, lambda k: k.name >= key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name > ' + key.name, lambda k: k.name > key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + src_keys[0].name + ' or name >= ' + src_keys[2].name,
lambda k: k.name == src_keys[0].name or k.name >= src_keys[2].name)
# check etag
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag < ' + key.etag[1:-1], lambda k: k.etag < key.etag)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag == ' + key.etag[1:-1], lambda k: k.etag == key.etag)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag > ' + key.etag[1:-1], lambda k: k.etag > key.etag)
# check size
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size < ' + str(key.size), lambda k: k.size < key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size <= ' + str(key.size), lambda k: k.size <= key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size == ' + str(key.size), lambda k: k.size == key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size >= ' + str(key.size), lambda k: k.size >= key.size)
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size > ' + str(key.size), lambda k: k.size > key.size)
def date_from_str(s):
return dateutil.parser.parse(s)
def test_es_object_search_custom():
min_size = 10
content = 'a' * min_size
def bucket_init(zone_conn, bucket):
req = MDSearchConfig(zone_conn.conn, bucket.name)
req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
def create_obj(k, i):
date = datetime.datetime.now() + datetime.timedelta(seconds=1) * i
date_str = date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
k.set_contents_from_string(content + 'x' * i, headers = { 'X-Amz-Meta-Foo-Str': str(i * 5),
'X-Amz-Meta-Foo-Int': str(i * 5),
'X-Amz-Meta-Foo-Date': date_str})
targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
for target_conn in targets:
# bucket checks
for bucket in buckets:
str_vals = []
for key in src_keys:
# check string values
val = key.get_metadata('foo-str')
str_vals.append(val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str < ' + val, lambda k: k.get_metadata('foo-str') < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + val, lambda k: k.get_metadata('foo-str') <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str == ' + val, lambda k: k.get_metadata('foo-str') == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + val, lambda k: k.get_metadata('foo-str') >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str > ' + val, lambda k: k.get_metadata('foo-str') > val)
# check int values
sval = key.get_metadata('foo-int')
val = int(sval)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int < ' + sval, lambda k: int(k.get_metadata('foo-int')) < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int <= ' + sval, lambda k: int(k.get_metadata('foo-int')) <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int == ' + sval, lambda k: int(k.get_metadata('foo-int')) == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int >= ' + sval, lambda k: int(k.get_metadata('foo-int')) >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int > ' + sval, lambda k: int(k.get_metadata('foo-int')) > val)
# check int values
sval = key.get_metadata('foo-date')
val = date_from_str(sval)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date < ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) < val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date <= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) <= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date == ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) == val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date >= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) >= val)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date > ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) > val)
# 'or' query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + str_vals[i] + ' or x-amz-meta-foo-str >= ' + str_vals[-i],
lambda k: k.get_metadata('foo-str') <= str_vals[i] or k.get_metadata('foo-str') >= str_vals[-i] )
# 'and' query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + str_vals[i] + ' and x-amz-meta-foo-str <= ' + str_vals[i + 1],
lambda k: k.get_metadata('foo-str') >= str_vals[i] and k.get_metadata('foo-str') <= str_vals[i + 1] )
# more complicated query
for i in range(len(src_keys) // 2):
do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name + ' and x-amz-meta-foo-str >= ' + str_vals[i] +
' and (x-amz-meta-foo-str <= ' + str_vals[i + 1] + ')',
lambda k: k.bucket.name == bucket.name and (k.get_metadata('foo-str') >= str_vals[i] and
k.get_metadata('foo-str') <= str_vals[i + 1]) )
def test_es_bucket_conf():
min_size = 0
def bucket_init(zone_conn, bucket):
req = MDSearchConfig(zone_conn.conn, bucket.name)
req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date')
targets, sources, buckets, _ = init_env(None, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init)
for source_conn in sources:
for bucket in buckets:
req = MDSearchConfig(source_conn.conn, bucket.name)
conf = req.get_config()
d = {}
for entry in conf:
d[entry['Key']] = entry['Type']
eq(len(d), 3)
eq(d['x-amz-meta-foo-str'], 'str')
eq(d['x-amz-meta-foo-int'], 'int')
eq(d['x-amz-meta-foo-date'], 'date')
req.del_config()
conf = req.get_config()
eq(len(conf), 0)
break # no need to iterate over all zones
| 12,121 | 0 | 207 |
b970a0e6615c83cf1e5b12238ee629561c077642 | 3,921 | py | Python | blender-add-on/xrs/render.py | MikeFesta/3xr | e4b8a7a0db61c9247d590f293f67c9baacf77a7f | [
"Apache-2.0"
] | 7 | 2021-11-16T14:26:47.000Z | 2021-12-22T13:27:24.000Z | blender-add-on/xrs/render.py | MikeFesta/3xr | e4b8a7a0db61c9247d590f293f67c9baacf77a7f | [
"Apache-2.0"
] | null | null | null | blender-add-on/xrs/render.py | MikeFesta/3xr | e4b8a7a0db61c9247d590f293f67c9baacf77a7f | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
""" Render related functions
"""
import bpy
import threading
import xrs.collection
import xrs.material
def add_render_camera():
""" Adds one render camera that can have base settings in one place """
bpy.ops.object.camera_add()
renderCam = bpy.data.cameras['Camera']
renderCam.clip_start = 0.0254
def disable_direct_indirect_for_bake():
""" Turn off direct and indirect lighting in bake settings """
bpy.context.scene.render.bake.use_pass_direct = False
bpy.context.scene.render.bake.use_pass_indirect = False
def render_and_save():
""" Render the image from the active camera and save it """
bpy.ops.render.render(write_still=True)
result = None
def render_and_save():
""" Render the image from the active camera and save it """
bpy.ops.render.render(write_still=True)
def set_cpu():
""" Use the CPU for rendering """
bpy.context.scene.cycles.device = 'CPU'
def set_cycles(samples = 128):
""" Set the render engine to use cycles """
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.cycles.samples = samples
def set_eevee():
""" Set the render engine to use cycles """
bpy.context.scene.render.engine = "BLENDER_EEVEE"
def set_gpu():
""" Use the GPU for rendering """
bpy.context.scene.cycles.device = 'GPU'
def set_resolution(x=4096,y=4096):
""" Set the resolution of the image to render """
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.resolution_x = x
bpy.context.scene.render.resolution_y = y
if x <= 512:
bpy.context.scene.render.tile_x = x
elif x <= 1024:
bpy.context.scene.render.tile_x = x / 2
elif x <= 2048:
bpy.context.scene.render.tile_x = x / 4
else:
bpy.context.scene.render.tile_x = x / 8
if y <= 512:
bpy.context.scene.render.tile_y = y
elif y <= 1024:
bpy.context.scene.render.tile_y = y / 2
elif y <= 2048:
bpy.context.scene.render.tile_y = y / 4
else:
bpy.context.scene.render.tile_y = y / 8
def set_filepath_with_format(path, format):
""" Set the path of the file to be saved on render in the given format """
bpy.context.scene.render.filepath = path
bpy.context.scene.render.image_settings.file_format = format
def set_bake_render(resolution = 4096):
""" Set for the optimal baking settings by default """
set_cycles()
bpy.context.scene.display_settings.display_device = 'sRGB'
set_resolution(resolution, resolution)
def shadow_render(planeName):
""" Sets up AO shadow for renders """
longest_dim = xrs.collection.get_largest_dimension("master")
bpy.ops.mesh.primitive_plane_add(size=longest_dim*2, location=(0,0,-0.0001))
bpy.data.objects['Plane'].name = planeName
bpy.data.meshes['Plane'].name = planeName
planeObj = bpy.data.objects[planeName]
xrs.material.make_material()
planeMat = bpy.data.materials[planeName]
planeMat.blend_method = "BLEND"
xrs.material.new_image_texture(planeMat.name, "ao_plane", size=1024)
bpy.data.worlds['World'].light_settings.use_ambient_occlusion = True
distAO = bpy.data.worlds['World'].light_settings.distance
shortDim = xrs.collection.get_shortest_dimension("master")
distAO = 0.23*shortDim
if distAO > 6:
distAO = 6
bpy.data.scenes['Scene'].cycles.samples = 1024
bpy.context.scene.cycles.bake_type = 'AO'
planeObj.select_set(True)
planeMat.node_tree.nodes['ao_plane'].select = True
bpy.ops.object.bake(type="AO", save_mode='INTERNAL')
aoPlane = planeMat.node_tree.nodes['ao_plane']
transparentBSDF = planeMat.node_tree.nodes.new("ShaderNodeBsdfTransparent")
matOutput = planeMat.node_tree.nodes['Material Output']
xrs.material.link_output_to_slot_named(planeMat, aoPlane.outputs[0], transparentBSDF, "Color")
xrs.material.link_output_to_slot_named(planeMat, transparentBSDF.outputs[0], matOutput, "Surface")
| 34.394737 | 100 | 0.729406 | # SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
""" Render related functions
"""
import bpy
import threading
import xrs.collection
import xrs.material
def add_render_camera():
""" Adds one render camera that can have base settings in one place """
bpy.ops.object.camera_add()
renderCam = bpy.data.cameras['Camera']
renderCam.clip_start = 0.0254
def disable_direct_indirect_for_bake():
""" Turn off direct and indirect lighting in bake settings """
bpy.context.scene.render.bake.use_pass_direct = False
bpy.context.scene.render.bake.use_pass_indirect = False
def render_and_save():
""" Render the image from the active camera and save it """
bpy.ops.render.render(write_still=True)
result = None
def back_calc(function):
function()
global result
result==42
def render_and_save():
""" Render the image from the active camera and save it """
bpy.ops.render.render(write_still=True)
def set_cpu():
""" Use the CPU for rendering """
bpy.context.scene.cycles.device = 'CPU'
def set_cycles(samples = 128):
""" Set the render engine to use cycles """
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.cycles.samples = samples
def set_eevee():
""" Set the render engine to use cycles """
bpy.context.scene.render.engine = "BLENDER_EEVEE"
def set_gpu():
""" Use the GPU for rendering """
bpy.context.scene.cycles.device = 'GPU'
def set_resolution(x=4096,y=4096):
""" Set the resolution of the image to render """
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.resolution_x = x
bpy.context.scene.render.resolution_y = y
if x <= 512:
bpy.context.scene.render.tile_x = x
elif x <= 1024:
bpy.context.scene.render.tile_x = x / 2
elif x <= 2048:
bpy.context.scene.render.tile_x = x / 4
else:
bpy.context.scene.render.tile_x = x / 8
if y <= 512:
bpy.context.scene.render.tile_y = y
elif y <= 1024:
bpy.context.scene.render.tile_y = y / 2
elif y <= 2048:
bpy.context.scene.render.tile_y = y / 4
else:
bpy.context.scene.render.tile_y = y / 8
def set_filepath_with_format(path, format):
""" Set the path of the file to be saved on render in the given format """
bpy.context.scene.render.filepath = path
bpy.context.scene.render.image_settings.file_format = format
def set_bake_render(resolution = 4096):
""" Set for the optimal baking settings by default """
set_cycles()
bpy.context.scene.display_settings.display_device = 'sRGB'
set_resolution(resolution, resolution)
def shadow_render(planeName):
""" Sets up AO shadow for renders """
longest_dim = xrs.collection.get_largest_dimension("master")
bpy.ops.mesh.primitive_plane_add(size=longest_dim*2, location=(0,0,-0.0001))
bpy.data.objects['Plane'].name = planeName
bpy.data.meshes['Plane'].name = planeName
planeObj = bpy.data.objects[planeName]
xrs.material.make_material()
planeMat = bpy.data.materials[planeName]
planeMat.blend_method = "BLEND"
xrs.material.new_image_texture(planeMat.name, "ao_plane", size=1024)
bpy.data.worlds['World'].light_settings.use_ambient_occlusion = True
distAO = bpy.data.worlds['World'].light_settings.distance
shortDim = xrs.collection.get_shortest_dimension("master")
distAO = 0.23*shortDim
if distAO > 6:
distAO = 6
bpy.data.scenes['Scene'].cycles.samples = 1024
bpy.context.scene.cycles.bake_type = 'AO'
planeObj.select_set(True)
planeMat.node_tree.nodes['ao_plane'].select = True
bpy.ops.object.bake(type="AO", save_mode='INTERNAL')
aoPlane = planeMat.node_tree.nodes['ao_plane']
transparentBSDF = planeMat.node_tree.nodes.new("ShaderNodeBsdfTransparent")
matOutput = planeMat.node_tree.nodes['Material Output']
xrs.material.link_output_to_slot_named(planeMat, aoPlane.outputs[0], transparentBSDF, "Color")
xrs.material.link_output_to_slot_named(planeMat, transparentBSDF.outputs[0], matOutput, "Surface")
| 45 | 0 | 23 |
8bcdbd197b18cd7d102389feb825c01ea13c01e6 | 3,038 | py | Python | queue/scheduler.py | alexanderrichard/queueing-tool | c8858565497b4367b2f3a8ea3f9ea5140e5d00fc | [
"MIT"
] | 21 | 2016-04-28T11:00:21.000Z | 2021-05-21T21:10:27.000Z | queue/scheduler.py | alexanderrichard/queueing-tool | c8858565497b4367b2f3a8ea3f9ea5140e5d00fc | [
"MIT"
] | 3 | 2017-01-13T21:29:00.000Z | 2019-07-25T22:29:04.000Z | queue/scheduler.py | alexanderrichard/queueing-tool | c8858565497b4367b2f3a8ea3f9ea5140e5d00fc | [
"MIT"
] | 10 | 2016-11-05T08:18:38.000Z | 2021-05-07T15:01:32.000Z | #!/usr/bin/python2.7
import datetime
| 44.676471 | 152 | 0.648453 | #!/usr/bin/python2.7
import datetime
class Resources(object):
def __init__(self, gpus, threads, memory):
self.gpus = gpus
self.threads = threads
self.memory = memory
def __add__(self, other):
return Resources(self.gpus + other.gpus, self.threads + other.threads, self.memory + other.memory)
class SchedulerJob(object):
def __init__(self, job, job_id): # job is a Job object from the server
self.resources = Resources(job.n_gpus, job.threads, job.memory)
self.hours = job.hours # estimated duration of the job
self.time = max(1, (datetime.datetime.now() - job.time).total_seconds()) # waiting time/running time of the job
self.priority = job.priority
self.job_id = job_id
def fits(self, resources):
return (self.resources.gpus <= resources.gpus) and (self.resources.threads <= resources.threads) and (self.resources.memory <= resources.memory)
class Scheduler(object):
def __init__(self, free_gpus, free_threads, free_memory):
self.free_resources = Resources(free_gpus, free_threads, free_memory)
def update_resources(self, free_gpus, free_threads, free_memory):
self.free_resources = Resources(free_gpus, free_threads, free_memory)
def waiting_time(self, job, running_jobs):
# sort jobs by remaining runtime (in seconds)
runtimes = [ max(1, j.hours * 3600 - j.time) for j in running_jobs ]
runtimes, sorted_jobs = zip(*sorted(zip(runtimes, running_jobs)))
# determine waiting time until job can be submitted
tmp_resources = self.free_resources
for idx in range(len(sorted_jobs)):
tmp_resources = tmp_resources + sorted_jobs[idx].resources
if job.fits(tmp_resources):
return runtimes[idx]
return 0
def schedule(self, jobs, waiting_ids, running_ids):
try:
waiting_jobs = [ SchedulerJob(jobs[job_id], job_id) for job_id in waiting_ids ]
# sort waiting jobs by priority (first key) and job_id (second key in case of equal priority)
priorities = [ job.priority for job in waiting_jobs ]
job_ids = [ job.job_id for job in waiting_jobs ]
priorities, job_ids, waiting_jobs = zip(*sorted(zip(priorities, job_ids, waiting_jobs), key=lambda sl: (-sl[0], sl[1])))
# determine next job to schedule
if waiting_jobs[0].fits(self.free_resources):
return waiting_jobs[0].job_id
else:
# if job does not fit compute waiting time and possibly submit another job that runs in the meantime
running_jobs = [ SchedulerJob(jobs[job_id], job_id) for job_id in running_ids ]
time_slot = self.waiting_time(waiting_jobs[0], running_jobs)
for job in waiting_jobs[1:]:
if job.fits(self.free_resources) and job.hours * 3600 <= time_slot:
return job.job_id
except:
return None
| 2,693 | 12 | 293 |
49e8e76dd53fa2af96bbbae5e869db3118668d98 | 8,689 | py | Python | cogs/general.py | jgayfer/Spirit | 024bc60727f92fb742daf0d28a3465a56820a7c0 | [
"MIT"
] | 34 | 2017-08-28T18:57:55.000Z | 2021-06-12T18:04:20.000Z | cogs/general.py | jgayfer/Spirit | 024bc60727f92fb742daf0d28a3465a56820a7c0 | [
"MIT"
] | 75 | 2017-07-27T22:26:58.000Z | 2019-06-30T06:20:43.000Z | cogs/general.py | jgayfer/Spirit | 024bc60727f92fb742daf0d28a3465a56820a7c0 | [
"MIT"
] | 24 | 2017-08-28T00:27:02.000Z | 2022-03-02T22:31:00.000Z | from datetime import datetime
import discord
from discord.ext import commands
import psutil
import pytz
from cogs.utils.message_manager import MessageManager
from cogs.utils import constants
| 43.445 | 143 | 0.615491 | from datetime import datetime
import discord
from discord.ext import commands
import psutil
import pytz
from cogs.utils.message_manager import MessageManager
from cogs.utils import constants
class General:
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
@commands.command()
@commands.cooldown(rate=2, per=5, type=commands.BucketType.user)
async def countdown(self, ctx):
"""Show time until upcoming Destiny 2 releases"""
manager = MessageManager(ctx)
pst_now = datetime.now(tz=pytz.timezone('US/Pacific'))
text = ""
for name, date in constants.RELEASE_DATES:
diff = date - pst_now
days = diff.days + 1
if days == 0:
text += "{}: Today!\n".format(name)
elif days == 1:
text += "{}: Tomorrow!\n".format(name)
elif days > 1:
text += "{}: {} days\n".format(name, days)
if not text:
text = "There are no concrete dates for our next adventure..."
countdown = discord.Embed(title="Destiny 2 Countdown", color=constants.BLUE)
countdown.description = text
await manager.send_embed(countdown)
await manager.clean_messages()
@commands.command()
@commands.cooldown(rate=1, per=60.0, type=commands.BucketType.user)
async def feedback(self, ctx, *, message):
"""
Send a message to the bot's developer
Ex. '!feedback Your bot is awesome!'
This command was adapted from RoboDanny by Rapptz - https://www.github.com/Rapptz/RoboDanny
"""
manager = MessageManager(ctx)
e = discord.Embed(title='Feedback', colour=constants.BLUE)
e.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
e.description = message
e.timestamp = ctx.message.created_at
if ctx.guild is not None:
e.add_field(name='Server', value='{} (ID: {})'.format(ctx.guild.name, ctx.guild.id), inline=False)
e.add_field(name='Channel', value='{} (ID: {})'.format(ctx.channel, ctx.channel.id), inline=False)
e.set_footer(text='Author ID: {}'.format(ctx.author.id))
feedback_channel = self.bot.get_channel(359848505654771715)
if feedback_channel:
await feedback_channel.send(embed=e)
else:
asal = await self.bot.get_user_info("118926942404608003")
await asal.send(embed=e)
await manager.send_message("Your feedback has been sent to the developer!")
await manager.clean_messages()
@feedback.error
async def feedback_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
manager = MessageManager(ctx)
await manager.send_message("You forgot to include your feedback!")
await manager.clean_messages()
@commands.command()
@commands.cooldown(rate=2, per=5, type=commands.BucketType.user)
async def about(self, ctx):
"""Display information about the bot itself
This command was adapted from RoboDanny by Rapptz - https://www.github.com/Rapptz/RoboDanny
"""
manager = MessageManager(ctx)
e = discord.Embed(title='Spirit v{}'.format(constants.VERSION), colour=constants.BLUE)
e.description = ("[Invite Spirit](https://discordapp.com/oauth2/authorize?client_id=335084645743984641&scope=bot&permissions=523344)\n"
+ "[Spirit Support Server](https://discord.gg/GXCFpkr)")
owner = self.bot.get_user(118926942404608003)
e.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = sum(1 for _ in self.bot.get_all_members())
total_online = len({m.id for m in self.bot.get_all_members() if m.status is discord.Status.online})
total_unique = len(self.bot.users)
voice_channels = []
text_channels = []
for guild in self.bot.guilds:
voice_channels.extend(guild.voice_channels)
text_channels.extend(guild.text_channels)
text = len(text_channels)
voice = len(voice_channels)
e.add_field(name='Members', value='{} total\n{} unique\n{} unique online'.format(total_members, total_unique, total_online))
e.add_field(name='Channels', value='{} total\n{} text\n{} voice'.format(text + voice, text, voice))
memory_usage = "%0.2f" % (self.process.memory_full_info().uss / 1024**2)
cpu_usage = "%0.2f" % (self.process.cpu_percent() / psutil.cpu_count())
e.add_field(name='Process', value='{} MiB\n{}% CPU'.format(memory_usage, cpu_usage))
e.add_field(name='Guilds', value=len(self.bot.guilds))
e.add_field(name='Commands Run', value=self.bot.command_count)
e.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
e.set_footer(text='Made with discord.py', icon_url='http://i.imgur.com/5BFecvA.png')
await manager.send_embed(e)
await manager.clean_messages()
@commands.command()
@commands.cooldown(rate=3, per=5, type=commands.BucketType.user)
async def donate(self, ctx):
"""Support the continued development of Spirit!"""
manager = MessageManager(ctx)
e = discord.Embed(colour=constants.BLUE)
text = ("Spirit is a work of love that has taken countless hours to develop. Your donation "
+ "will go towards server hosting costs, development tools, and if you donate "
+ "monthly, will also earn you some special privelges on the Spirit Discord server!\n\n"
+ "Donate once: https://www.paypal.me/spiritbot\n"
+ "Donate monthly: https://www.patreon.com/spiritbot")
reward_1 = "- Colored name on the Spirit Discord server"
reward_2 = ("- Patron role and colored name on the Spirit Discord server\n"
+ "- Access to the developer blog on Patreon and the Spirit Discord server\n"
+ "- Access to a patron only channel on the Spirit Discord server which includes sneak peeks of new features!")
reward_3 = ("- All rewards from the previous tier\n"
+ "- Your own personalized message built right into Spirit!")
e.description = text
e.add_field(name="$1/Month", value=reward_1)
e.add_field(name="$5/Month", value=reward_2)
e.add_field(name="$10/Month", value=reward_3)
await manager.send_embed(e)
await manager.clean_messages()
def get_bot_uptime(self, *, brief=False):
now = datetime.utcnow()
delta = now - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if not brief:
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h}h {m}m {s}s'
if days:
fmt = '{d}d ' + fmt
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
async def on_guild_join(self, guild):
"""Send welcome message to the server owner"""
message = ("Greetings! My name is **{}**, and my sole responsibility is to help you and "
"your group kick ass in Destiny 2! You're receiving this message because you "
"or one of your trusted associates has added me to **{}**.\n\n"
"**Command Prefix**\n\n"
"My default prefix is **!**, but you can also just mention me with **@{}**. "
"If another bot is already using the **!** prefix, you can choose a different prefix "
"for your server with **!settings setprefix <new_prefix>** (don't include the brackets).\n\n"
"For a list of all available commands, use the **!help** command. If you want more "
"information on a command, use **!help <command_name>**.\n\n"
"If you have any feedback, you can use my **!feedback** command to send "
"a message to my developer! If you want to request a feature, report a bug, "
"stay up to date with new features, or just want some extra help, check out the official "
"{} Support server! (https://discord.gg/GXCFpkr)"
).format(self.bot.user.name, guild.name, self.bot.user.name,
self.bot.user.name, self.bot.user.name)
await guild.owner.send(message)
| 935 | 7,537 | 23 |
2b624a3a8bc3c9a72ad660ec8393a4ba93e2669f | 2,020 | py | Python | code/networks/cifar/lenet.py | shashankkotyan/RepresentationMetrics | 782c0ba727c523b7a159c497d2b91ef75aa9f201 | [
"MIT"
] | 3 | 2019-06-18T04:37:52.000Z | 2020-12-17T08:05:34.000Z | code/networks/cifar/lenet.py | shashankkotyan/DualQualityAssessment | 1b4fba13f14b0b6770b060081c3d0d1c3ea4a1c6 | [
"MIT"
] | 5 | 2020-05-13T16:47:36.000Z | 2022-02-10T01:38:57.000Z | code/networks/cifar/lenet.py | shashankkotyan/RepresentationMetrics | 782c0ba727c523b7a159c497d2b91ef75aa9f201 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from networks.cifar.cifar_model import CifarModel
class LeNet(CifarModel):
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
self.name = 'LeNet'
CifarModel.__init__(self, args)
def network(self, img_input):
"""
TODO: Write Comment
"""
from tensorflow.keras import initializers, layers, regularizers
weight_decay = 0.0001
x = layers.Conv2D(6, (5, 5), padding='valid', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay))(img_input)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2))(x)
x = layers.Conv2D(16, (5, 5), padding='valid', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(120, kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dense(84, kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu', name='Penultimate')(x)
x = layers.Dense(self.num_classes, name='Output', activation = 'softmax', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
return x
def scheduler(self, epoch):
"""
TODO: Write Comment
"""
if epoch < 100:
return 0.01
if epoch < 150:
return 0.005
return 0.001
| 31.076923 | 180 | 0.606436 | #!/usr/bin/env python
from networks.cifar.cifar_model import CifarModel
class LeNet(CifarModel):
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
self.name = 'LeNet'
CifarModel.__init__(self, args)
def network(self, img_input):
"""
TODO: Write Comment
"""
from tensorflow.keras import initializers, layers, regularizers
weight_decay = 0.0001
x = layers.Conv2D(6, (5, 5), padding='valid', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay))(img_input)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2))(x)
x = layers.Conv2D(16, (5, 5), padding='valid', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay))(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2))(x)
x = layers.Flatten()(x)
x = layers.Dense(120, kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Dense(84, kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu', name='Penultimate')(x)
x = layers.Dense(self.num_classes, name='Output', activation = 'softmax', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay) )(x)
return x
def scheduler(self, epoch):
"""
TODO: Write Comment
"""
if epoch < 100:
return 0.01
if epoch < 150:
return 0.005
return 0.001
| 0 | 0 | 0 |
ad5fc7cc875336bb8f74bd9b88a9dbe5cc61dabc | 938 | py | Python | scripts/send_msg.py | VladislavYU/OPIoT | ebe242eb214b05d9e34a2afd063cbe6e3a13f1bd | [
"Apache-2.0"
] | null | null | null | scripts/send_msg.py | VladislavYU/OPIoT | ebe242eb214b05d9e34a2afd063cbe6e3a13f1bd | [
"Apache-2.0"
] | null | null | null | scripts/send_msg.py | VladislavYU/OPIoT | ebe242eb214b05d9e34a2afd063cbe6e3a13f1bd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import locale
from gsmmodem.modem import GsmModem
import logging
parser = OptionParser()
parser.add_option("-a", "--address", action="store", dest="ip", type="string", help="Cureent ip address")
PORT = '/dev/modem0'
BAUDRATE = 115200
PIN = "0000" # SIM card PIN (if any)
if __name__ == '__main__':
encode = locale.getdefaultlocale()
(options, args) = parser.parse_args()
if options.ip is None:
parser.print_help()
exit()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
modem = GsmModem(PORT, BAUDRATE)
modem.smsTextMode = True
modem.connect(PIN)
modem.sendSms(+79227814419, options.ip)
try:
modem.rxThread.join(10) # Specify a (huge) timeout so that it essentially blocks indefinitely, but still receives CTRL+C interrupt signal
finally:
modem.close(); | 28.424242 | 146 | 0.680171 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import locale
from gsmmodem.modem import GsmModem
import logging
parser = OptionParser()
parser.add_option("-a", "--address", action="store", dest="ip", type="string", help="Cureent ip address")
PORT = '/dev/modem0'
BAUDRATE = 115200
PIN = "0000" # SIM card PIN (if any)
if __name__ == '__main__':
encode = locale.getdefaultlocale()
(options, args) = parser.parse_args()
if options.ip is None:
parser.print_help()
exit()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
modem = GsmModem(PORT, BAUDRATE)
modem.smsTextMode = True
modem.connect(PIN)
modem.sendSms(+79227814419, options.ip)
try:
modem.rxThread.join(10) # Specify a (huge) timeout so that it essentially blocks indefinitely, but still receives CTRL+C interrupt signal
finally:
modem.close(); | 0 | 0 | 0 |
dc7612dddef8c21ce0f90436648cb22d667ceb8b | 869 | py | Python | molecule/default/tests/test_default.py | while-true-do/ansible-role-srv_tftp | 14f242b3e600cfe9e3f5e68eed217da91440d05a | [
"BSD-3-Clause"
] | 2 | 2019-08-07T17:11:11.000Z | 2021-04-16T12:12:13.000Z | molecule/default/tests/test_default.py | while-true-do/ansible-role-srv_tftp | 14f242b3e600cfe9e3f5e68eed217da91440d05a | [
"BSD-3-Clause"
] | 8 | 2019-09-02T07:55:00.000Z | 2019-09-18T14:47:58.000Z | molecule/default/tests/test_default.py | while-true-do/ansible-role-srv_tftp | 14f242b3e600cfe9e3f5e68eed217da91440d05a | [
"BSD-3-Clause"
] | null | null | null | # Some examples are given below.
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
| 18.891304 | 63 | 0.731876 | # Some examples are given below.
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_tftp_package(host):
pkg = host.package('tftp-server')
assert pkg.is_installed
def test_tftp_boot_package(host):
pkg = host.package('syslinux-tftpboot')
assert pkg.is_installed
def test_tftp_socket(host):
srv = host.service('tftp.socket')
assert srv.is_running
assert srv.is_enabled
def test_tftboot_dir(host):
dir = host.file('/var/lib/tftpboot')
assert dir.is_directory
def test_syslinux_files(host):
file = host.file('/var/lib/tftpboot/pxelinux.0')
assert file.exists
def test_syslinux_pxe_file(host):
file = host.file('/var/lib/tftpboot/pxelinux.cfg/default')
assert file.exists
| 517 | 0 | 138 |
990b056b1cfe2f15252ddc2e1505d55d79fdb056 | 3,808 | py | Python | lib/pgdb.py | 5H4D0W-C0D3R/epic_store_bot | 6af28cee968d7911fb07b1c4bfe886cba7e83a82 | [
"MIT"
] | 13 | 2020-12-20T17:28:55.000Z | 2022-03-27T18:40:01.000Z | lib/pgdb.py | 5H4D0W-C0D3R/epic_store_bot | 6af28cee968d7911fb07b1c4bfe886cba7e83a82 | [
"MIT"
] | 2 | 2021-02-08T21:07:51.000Z | 2022-02-21T06:28:40.000Z | lib/pgdb.py | 5H4D0W-C0D3R/epic_store_bot | 6af28cee968d7911fb07b1c4bfe886cba7e83a82 | [
"MIT"
] | 8 | 2021-06-07T00:29:26.000Z | 2022-03-31T13:51:46.000Z | import psycopg2
from .config import Config
config = Config()
| 31.733333 | 127 | 0.448267 | import psycopg2
from .config import Config
config = Config()
class Dtabase:
def __init__(self):
try:
self.con = psycopg2.connect(
host=config['db']['host'],
database=config['db']['database'],
user=config['db']['user'],
password=config['db']['password']
)
commands =( """
CREATE TABLE IF NOT EXISTS games_ordered (
game_id SERIAL PRIMARY KEY,
game_title VARCHAR(255) NOT NULL,
game_data JSON NOT NULL
)
""",
"""
CREATE TABLE IF NOT EXISTS bot_config (
config_id SERIAL PRIMARY KEY,
config_data JSON NOT NULL
)
""",
"""
CREATE TABLE IF NOT EXISTS browser_cookies (
cookie_id SERIAL PRIMARY KEY,
cookie_data JSON NOT NULL
)
"""
)
self.cur = self.con.cursor()
for command in commands:
self.cur.execute(command)
self.con.commit()
print('Created tables')
except (Exception, psycopg2.DatabaseError) as e:
print('error db: ' + str(e))
def insert_dta(self, title, data):
try:
command = """INSERT INTO games_ordered (game_title, game_data)
VALUES ('{0}', '{1}') RETURNING game_id; """.format(title.replace("'", "''"), data.replace("'", "''"))
self.cur.execute(command)
self.con.commit()
print('Data_Inserted')
except (Exception, psycopg2.DatabaseError) as error:
print('error ins: ' + str(error))
def ins_conf(self, config):
try:
command = """INSERT INTO bot_config (config_id, config_data)
VALUES ('1', '{}')
ON CONFLICT (config_id)
DO
UPDATE SET config_data = EXCLUDED.config_data;
""".format(config)
self.cur.execute(command)
self.con.commit()
print('Config_Updated')
except (Exception, psycopg2.DatabaseError) as error:
print('error cins: ' + str(error))
def ins_cookie(self, cookie):
try:
command = """INSERT INTO browser_cookies (cookie_id, cookie_data)
VALUES ('1', '{}')
ON CONFLICT (cookie_id)
DO
UPDATE SET cookie_data = EXCLUDED.cookie_data;
""".format(cookie)
self.cur.execute(command)
self.con.commit()
print('Cookie_Updated')
except Exception as error:
print('error co: ' + str(error))
def get_dta(self, tab):
try:
self.cur.execute("SELECT * FROM {}".format(tab))
rows = self.cur.fetchall()
print('Got_Data')
gdta = {}
if tab == 'games_ordered':
for row in rows:
gdta[row[1]] = row[2]
print(gdta)
return gdta
elif tab == 'bot_config' or tab == 'browser_cookies':
print(rows[0][1])
return rows[0][1]
except (Exception, psycopg2.DatabaseError) as error:
print('error getd: ' + str(error))
def con_close(self):
self.cur.close()
self.con.close()
print('Connection_Closed')
| 3,550 | -7 | 201 |
2e85ecead62cc62f882a9e24cb13656903cdbd2a | 659 | py | Python | scm/style.py | rookiebulls/scm | f9baa225e049ff178d281ccb1709648bcc739228 | [
"MIT"
] | 3 | 2017-06-02T15:42:19.000Z | 2017-08-17T15:02:56.000Z | scm/style.py | rookiebulls/scm | f9baa225e049ff178d281ccb1709648bcc739228 | [
"MIT"
] | null | null | null | scm/style.py | rookiebulls/scm | f9baa225e049ff178d281ccb1709648bcc739228 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pygments.style import Style
from pygments.token import Token
from pygments.styles.default import DefaultStyle
| 26.36 | 72 | 0.646434 | # -*- coding: utf-8 -*-
from pygments.style import Style
from pygments.token import Token
from pygments.styles.default import DefaultStyle
class DocumentStyle(Style):
styles = {
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
Token.Host: '#00ff00',
Token.Pound: '#00ff00',
}
styles.update(DefaultStyle.styles)
def get_prompt_tokens(cli):
return [
(Token.Host, 'scm'),
(Token.Pound, '>>> '),
]
| 85 | 386 | 46 |
f86cda3548eba5900a9f51680381b27a4158cba6 | 498 | py | Python | qepy/tests/test_pw.py | QU-XIAO/yambopy | ff65a4f90c1bfefe642ebc61e490efe781709ff9 | [
"BSD-3-Clause"
] | 21 | 2016-04-07T20:53:29.000Z | 2021-05-14T08:06:02.000Z | qepy/tests/test_pw.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 22 | 2016-06-14T22:29:47.000Z | 2021-09-16T15:36:26.000Z | qepy/tests/test_pw.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 15 | 2016-06-14T18:40:57.000Z | 2021-08-07T13:17:43.000Z | # Copyright (C) 2018 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
import unittest
import os
from qepy.pw import PwIn
from yambopy.data.structures import Si
if __name__ == '__main__':
unittest.main()
| 20.75 | 55 | 0.654618 | # Copyright (C) 2018 Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
import unittest
import os
from qepy.pw import PwIn
from yambopy.data.structures import Si
class TestPwIn(unittest.TestCase):
def test_pwin(self):
pwi = PwIn.from_structure_dict(Si)
pwi.set_nscf(10)
print(pwi)
pwi.cell_parameters = [[1,0,0],[0,1,0],[0,0,1]]
pwi.ibrav = 0
print(pwi)
if __name__ == '__main__':
unittest.main()
| 185 | 13 | 50 |
6a5ac2b74841eacadd02a3f5c77be89965e5f5bc | 3,768 | py | Python | instrumentor/exposition.py | u9n/instrumenter | 2571288248b07eabe0711ae8f1b44565110d47bd | [
"BSD-3-Clause"
] | 1 | 2019-07-12T14:22:30.000Z | 2019-07-12T14:22:30.000Z | instrumentor/exposition.py | u9n/instrumenter | 2571288248b07eabe0711ae8f1b44565110d47bd | [
"BSD-3-Clause"
] | null | null | null | instrumentor/exposition.py | u9n/instrumenter | 2571288248b07eabe0711ae8f1b44565110d47bd | [
"BSD-3-Clause"
] | null | null | null | from itertools import groupby
import attr
import typing
@attr.s
@attr.s
@attr.s
class ExpositionClient:
"""
Very simple implementation of client that will get all available metrics from a
namespace and expose a string formatted accoring to Prometheus expositions format.
"""
def _get_data(self) -> typing.List[MetricSet]:
"""
Get and parse data from Redis.
:return:
"""
results = self.redis.hgetall(self.namespace)
sorted_results = sorted(
[
RedisKeyValuePair(key.decode(), val.decode())
for key, val in results.items()
]
)
metrics = list()
for metric_name, metric_items in groupby(
sorted_results, key=lambda x: x.key.split(":")[0]
):
metric = MetricSet(name=metric_name)
for item in list(metric_items):
metric.add_item(item)
metrics.append(metric)
return metrics
def expose(self):
"""
Returns Prometheus formatted data.
:return:
"""
metrics = self._get_data()
out = ""
for metric in metrics:
out += f"# HELP {metric.name} {metric.description}\n"
out += f"# TYPE {metric.name} {metric.type}\n"
for value in metric.values:
if value.type and value.labels:
out += (
f"{metric.name}_{value.type}{{{value.labels}}} {value.value}\n"
)
elif value.type and not value.labels:
out += f"{metric.name}_{value.type} {value.value}\n"
elif not value.type and value.labels:
out += f"{metric.name}{{{value.labels}}} {value.value}\n"
else:
out += f"{metric.name} {value.value}\n"
out += f"\n"
return out
| 27.304348 | 87 | 0.52707 | from itertools import groupby
import attr
import typing
@attr.s
class RedisKeyValuePair:
key = attr.ib()
value = attr.ib()
@attr.s
class MetricValue:
type = attr.ib()
labels = attr.ib()
value = attr.ib()
@attr.s
class MetricSet:
name: str = attr.ib()
description: str = attr.ib(default=None)
type: str = attr.ib(default=None)
values: typing.List[MetricValue] = attr.ib(default=attr.Factory(list))
def add_item(self, kv_pair: RedisKeyValuePair):
extension, labels = self._split_key(kv_pair.key)
if labels is None:
labels = ""
if extension == "d":
self.description = kv_pair.value
elif extension == "t":
if kv_pair.value == "c":
self.type = "counter"
elif kv_pair.value == "g":
self.type = "gauge"
elif kv_pair.value == "h":
self.type = "histogram"
elif kv_pair.value == "s":
self.type = "summary"
elif extension == "b":
self.values.append(
MetricValue(type="bucket", labels=labels, value=kv_pair.value)
)
elif extension == "c":
self.values.append(
MetricValue(type="count", labels=labels, value=kv_pair.value)
)
elif extension == "s":
self.values.append(
MetricValue(type="sum", labels=labels, value=kv_pair.value)
)
else:
self.values.append(
MetricValue(type=None, labels=labels, value=kv_pair.value)
)
@staticmethod
def _split_key(key):
parts = key.split(":")
name = parts[0]
ext = parts[1]
labels = parts[2]
return ext, labels
class ExpositionClient:
"""
Very simple implementation of client that will get all available metrics from a
namespace and expose a string formatted accoring to Prometheus expositions format.
"""
def __init__(self, redis_client, namespace: str):
self.redis = redis_client
self.namespace = namespace
def _get_data(self) -> typing.List[MetricSet]:
"""
Get and parse data from Redis.
:return:
"""
results = self.redis.hgetall(self.namespace)
sorted_results = sorted(
[
RedisKeyValuePair(key.decode(), val.decode())
for key, val in results.items()
]
)
metrics = list()
for metric_name, metric_items in groupby(
sorted_results, key=lambda x: x.key.split(":")[0]
):
metric = MetricSet(name=metric_name)
for item in list(metric_items):
metric.add_item(item)
metrics.append(metric)
return metrics
def expose(self):
"""
Returns Prometheus formatted data.
:return:
"""
metrics = self._get_data()
out = ""
for metric in metrics:
out += f"# HELP {metric.name} {metric.description}\n"
out += f"# TYPE {metric.name} {metric.type}\n"
for value in metric.values:
if value.type and value.labels:
out += (
f"{metric.name}_{value.type}{{{value.labels}}} {value.value}\n"
)
elif value.type and not value.labels:
out += f"{metric.name}_{value.type} {value.value}\n"
elif not value.type and value.labels:
out += f"{metric.name}{{{value.labels}}} {value.value}\n"
else:
out += f"{metric.name} {value.value}\n"
out += f"\n"
return out
| 1,378 | 360 | 93 |
d7feab2e2f3e0891add361cee28583deeb6349eb | 3,229 | py | Python | v1/tnb_faucet/core/fb_post.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 1 | 2021-07-24T06:54:11.000Z | 2021-07-24T06:54:11.000Z | v1/tnb_faucet/core/fb_post.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 4 | 2021-10-01T20:23:14.000Z | 2021-10-01T20:57:42.000Z | v1/tnb_faucet/core/fb_post.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 3 | 2021-02-23T01:09:05.000Z | 2021-09-12T15:52:56.000Z | #
# Author: Nikhil Taneja (taneja.nikhil03@gmail.com)
# fb_post.py (c) 2021
# Desc: description
# Created: Fri Jan 08 2021 04:19:09 GMT+0530 (India Standard Time)
# Modified: Fri Jan 08 2021 18:19:22 GMT+0530 (India Standard Time)
#
import logging
import re
from urllib.parse import parse_qs, unquote, urlparse
import requests
from bs4 import BeautifulSoup
from .model import PostModel
from .utils import find_account_number, validate_hashtag
logger = logging.getLogger('faucet')
| 28.324561 | 68 | 0.589347 | #
# Author: Nikhil Taneja (taneja.nikhil03@gmail.com)
# fb_post.py (c) 2021
# Desc: description
# Created: Fri Jan 08 2021 04:19:09 GMT+0530 (India Standard Time)
# Modified: Fri Jan 08 2021 18:19:22 GMT+0530 (India Standard Time)
#
import logging
import re
from urllib.parse import parse_qs, unquote, urlparse
import requests
from bs4 import BeautifulSoup
from .model import PostModel
from .utils import find_account_number, validate_hashtag
logger = logging.getLogger('faucet')
def process(fb_url, amount):
post_url = f'https://mbasic.facebook.com{urlparse(fb_url).path}'
url = urlparse(post_url)
params = parse_qs(url.query)
path = url.path
if path and path[-1] == '/':
path = path[:-1]
endpoint = path.split('/')[-1]
if endpoint.isnumeric():
post_id = int(endpoint)
else:
endpoint = params.get('story_fbid')
if endpoint:
post_id = int(endpoint[0])
else:
logger.error(f'Cannot determine post id for <{fb_url}>')
return
post = PostModel(post_id, amount.coins, amount.delay)
post.set_platform('facebook')
response = requests.get(post_url)
if response.status_code != 200:
logger.debug((
'Cannot find post of id '
f'<{post_id}> <Error:{response.text}>'))
logger.error((
f'Cannot find post of id for <{post_url}>'))
return
soup = BeautifulSoup(response.text, 'lxml')
element = soup.select_one('#mobile_login_bar a')
if not element:
logger.debug((
'Cannot extract text for '
f'<Facebook:{post_id}>'))
logger.error((
f'Cannot extract text for <{post_url}>'))
return
url = urlparse(unquote(element['href']))
params = parse_qs(url.query)
user_id_str = params.get('rid')
if not user_id_str:
logger.debug((
'Cannot determine user id '
f'for <Facebook:{post_id}>'))
logger.error((
'Cannot determine user id '
f'for <{post_url}>'))
return
user_id = int(user_id_str[0])
post.set_user(user_id)
text = ''
element = soup.select_one('.msg div')
if element:
text = element.text
else:
element = soup.select_one((
'#m_story_permalink_view div '
'div div div:nth-child(2)'))
if element:
text = element.text
if not text:
logger.debug((
'Invalid account number for '
f'<User:{user_id}> via <Facebook:{post_id}>'))
logger.error((
'Invalid account number for '
f'<{post_url}>'))
return
account_number = find_account_number(text)
if not account_number:
logger.debug((
'Invalid account number for '
f'<User:{user_id}> via <Facebook:{post_id}>'))
logger.error((
'Invalid account number for '
f'<{post_url}>'))
return
post.set_account_number(account_number)
hashtags = re.findall(r'#\w+', text)
if validate_hashtag(hashtags):
logger.debug(str(post))
logger.info(f'Seeking <{str(amount)}> via <{post_url}>')
return post
| 2,719 | 0 | 23 |
2c9ffada9418e48bf12fc704d93927d8756f9602 | 25,034 | py | Python | analyzer/data/dataset.py | frommwonderland/EManalysis | 2787f063e2e83521fd6439d06a07f5521e43dc94 | [
"MIT"
] | null | null | null | analyzer/data/dataset.py | frommwonderland/EManalysis | 2787f063e2e83521fd6439d06a07f5521e43dc94 | [
"MIT"
] | null | null | null | analyzer/data/dataset.py | frommwonderland/EManalysis | 2787f063e2e83521fd6439d06a07f5521e43dc94 | [
"MIT"
] | null | null | null | import glob
import json
import math
import multiprocessing
import os
import re
import time
import h5py
import imageio
import numpy as np
import pandas as pd
from numpyencoder import NumpyEncoder
from skimage.measure import label, regionprops
from skimage.transform import resize
from sklearn.cluster import KMeans
from tqdm import tqdm
from analyzer.data.utils.data_raw import readvol, folder2Vol
from analyzer.utils.eval_model import Evaluationmodel
class Dataloader():
'''
Dataloader class for handling the em dataset and the related labels.
:param cfg: configuration manager.
:param volume: the EM volume.
:param labels: labels that are defined by human or segmentation and will be clustered soon.
:param gt: groundtruth data (cluster)
:param feature: Defines the feature that the VAE should go for.
:param chunk_size: (tuple) defines the chunks in which the data is loaded. Can help to overcome Memory errors.
:param ff: (string) defines the file format that you want to work with. (default: png)
'''
def __len__(self):
'''
Required by torch to return the length of the dataset.
:returns: integer
'''
with h5py.File(self.mito_volume_file_name, 'r') as f:
return len(f["id"])
def __getitem__(self, idx):
'''
Required by torch to return one item of the dataset.
:param idx: index of the object
:returns: object from the volume
'''
with h5py.File(self.mito_volume_file_name, 'r') as f:
return f["chunk"][idx], idx
def get_fns(self):
'''returns the em, label and gt filenames of every image.'''
emfns = sorted(glob.glob(self.volpath + '*.' + self.ff))
labelfns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
gtfns = sorted(glob.glob(self.gtpath + '*.' + self.ff))
return (emfns, labelfns, gtfns)
def load_chunk(self, vol='all', mode='3d'):
'''
Load chunk of em and groundtruth data for further processing.
:param vol: (string) choose between -> 'all', 'em', 'label' in order to specify
with volume you want to load.
'''
emfns = sorted(glob.glob(self.volpath + '*.' + self.ff))
labelfns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
if mode == '2d':
if (vol == 'em') or (vol == 'all'):
emdata = readvol(emfns[0])
emdata = np.squeeze(emdata)
print('em data loaded: ', emdata.shape)
if (vol == 'label') or (vol == 'all'):
labels = readvol(labelfns[0])
labels = np.squeeze(labels)
print('label data loaded: ', labels.shape)
if mode == '3d':
if (vol == 'em') or (vol == 'all'):
if self.volume is None:
emdata = folder2Vol(self.volpath, self.chunk_size, file_format=self.ff)
print('em data loaded: ', emdata.shape)
if (vol == 'label') or (vol == 'all'):
if self.labels is None:
labels = folder2Vol(self.labelpath, self.chunk_size, file_format=self.ff)
print('label data loaded: ', labels.shape)
if (vol == 'gt') or (vol == 'all'):
if self.gt is None:
gt = folder2Vol(self.gtpath, self.chunk_size, file_format=self.ff)
print('gt data loaded: ', gt.shape)
return (emdata, labels, gt)
def list_segments(self, vol, labels, min_size=2000, os=0, mode='3d'):
'''
This function creats a list of arrays that contain the unique segments.
:param vol: (np.array) volume that contains the pure em data. (2d || 3d)
:param label: (np.array) volume that contains the groundtruth. (2d || 3d)
:param min_size: (int) this sets the minimum size of mitochondria region in order to be safed to the list. Used only in 2d.
:param os: (int) defines the offset that should be used for cutting the bounding box. Be careful with offset as it can lead to additional regions in the chunks.
:param mode: (string) 2d || 3d --> 2d gives you 2d arrays of each slice (same mitochondria are treated differently as they loose their touch after slicing)
--> 3d gives you the whole mitochondria in a 3d volume.
:returns: (dict) of (np.array) objects that contain the segments with labels as keys.
'''
bbox_dict = {}
mask = np.zeros(shape=vol.shape, dtype=np.uint16)
mask[labels > 0] = 1
vol[mask == 0] = 0
if mode == '2d':
bbox_list = []
for idx in range(vol.shape[0]):
image = vol[idx, :, :]
gt_img = labels[idx, :, :]
label2d, num_label = label(gt_img, return_num=True)
regions = regionprops(label2d, cache=False)
for props in regions:
boundbox = props.bbox
if props.bbox_area > min_size:
if ((boundbox[0] - os) < 0) or ((boundbox[2] + os) > image.shape[0]) or (
(boundbox[1] - os) < 0) or ((boundbox[3] + os) > image.shape[1]):
tmparr = image[boundbox[0]:boundbox[2], boundbox[1]:boundbox[3]]
else:
tmparr = image[(boundbox[0] - os):(boundbox[2] + os), (boundbox[1] - os):(boundbox[3] + os)]
bbox_list.append(tmparr)
bbox_dict = {i: bbox_list[i] for i in range(len(bbox_list))}
elif mode == '3d':
chunk_dict = {}
label3d, num_label = label(labels, return_num=True)
regions = regionprops(label3d, cache=False)
for props in regions:
boundbox = props.bbox
if ((boundbox[1] - os) < 0) or ((boundbox[4] + os) > vol.shape[1]) or ((boundbox[2] - os) < 0) or (
(boundbox[5] + os) > vol.shape[2]):
tmparr = vol[boundbox[0]:boundbox[3], boundbox[1]:boundbox[4], boundbox[2]:boundbox[5]]
else:
tmparr = vol[boundbox[0]:boundbox[3], (boundbox[1] - os):(boundbox[4] + os),
(boundbox[2] - os):(boundbox[5] + os)]
bbox_dict[props.label] = tmparr
else:
raise ValueError('No valid dimensionality mode in function list_segments.')
return (bbox_dict)
def prep_data_info(self, volopt='label', save=False):
'''
This function aims as an inbetween function iterating over the whole dataset in efficient
and memory proof fashion in order to preserve information that is needed for further steps.
:param volopt: (string) this sets the volume you want to use for the operation. default: gt
:param kernel_n: (int) number of CPU kernels you want to use for multiprocessing.
:returns added: (dict) that contains the labels with respective information as (list): [pixelsize, [slice_index(s)]]
'''
if volopt == 'label':
fns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
elif volopt == 'em':
fns = sorted(glob.glob(self.volpath + '*.' + self.ff))
else:
raise ValueError('Please enter the volume on which \'prep_data_info\' should run on.')
if self.exclude_borders:
fns = fns[1:-1]
with multiprocessing.Pool(processes=self.cpus) as pool:
result = pool.starmap(self.calc_props, enumerate(fns))
added = {}
for dicts in result:
for key, value in dicts.items():
if key in added:
added[key][0] += value[0]
added[key][1].append(value[1])
if self.exclude_borders:
if not added[key][2]:
added[key].append(value[2])
else:
added.setdefault(key, [])
added[key].append(value[0])
added[key].append([value[1]])
if self.exclude_borders:
added[key].append(value[2])
result_array = []
for result in added.keys():
if self.exclude_borders and added[result][2]:
continue
result_array.append({
'id': result,
'size': added[result][0],
'slices': added[result][1]
})
if save:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'w') as f:
json.dump(result_array, f, cls=NumpyEncoder)
f.close()
return (result_array)
def calc_props(self, idx, fns):
'''
Helper function for 'prep_data_info'
:param idx: (int) this is the slice index that correspondes to the image slice. E.g. idx 100 belongs to image 100.
:param fns: (string) list of filenames.
:returns result: (dict) with each segment. key: idx of segment -- value: [number of pixels in segment, idx of slice].
'''
result = {}
if os.path.exists(fns):
tmp = imageio.imread(fns)
for region in regionprops(tmp):
result.setdefault(region.label, [])
result[region.label].append(region.area)
result[region.label].append(idx)
result[region.label].append(False)
if self.exclude_borders:
minr, minc, maxr, maxc = region.bbox
if minr == 0 or minc == 0:
result[region.label][-1] = True
if maxr == tmp.shape[0] or maxc == tmp.shape[0]:
result[region.label][-1] = True
return result
def precluster(self, mchn='simple', n_groups=5):
'''
Function preclusters the mitochondria into buckets of similar size in order to avoid
sparsity and loss of information while extracting latent representation of the mitochondria.
'''
if os.path.exists(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)) \
and os.stat(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)).st_size != 0:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'r') as f:
data_info = json.loads(f.read())
else:
data_info = self.prep_data_info(save=True)
tmp = np.stack(([mito['id'] for mito in data_info], [mito['size'] for mito in data_info]), axis=-1)
if mchn == 'simple':
sorted = tmp[tmp[:, 1].argsort()[::-1]]
splitted = np.array_split(sorted, n_groups, axis=0)
id_lists = [tmp[:, 0].tolist() for tmp in splitted]
elif mchn == 'cluster':
model = KMeans(n_clusters=n_groups)
res_grps = model.fit_predict(np.array(tmp[:, 1]).reshape(-1, 1))
id_lists = [[]] * n_groups
for idx in range(len(res_grps)):
id_lists[res_grps[idx]].append(tmp[:, 0][idx])
else:
raise ValueError(
'Please enter the a valid mechanismn you want to group that mitochondria. \'simple\' or \'cluster\'.')
return id_lists
def extract_scale_mitos(self):
'''
Function to extract the objects as volumes and scale them. Then its saves the scaled volumes to an h5 file.
'''
if os.path.exists(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)) \
and os.stat(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)).st_size != 0:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'r') as f:
regions = json.loads(f.read())
else:
regions = self.prep_data_info(save=False)
print("{} objects found in the ground truth".format(len(regions)))
regions = pd.DataFrame(regions)
regions = regions[(self.upper_limit > regions['size']) & (self.lower_limit < regions['size']) & (
len(regions['slices']) > 1)].values.tolist()
filtered_length = len(regions)
print("{} within limits {} and {}".format(filtered_length, self.lower_limit, self.upper_limit))
if self.region_limit is not None:
regions = regions[:self.region_limit]
print("{} will be extracted due to set region_limit".format(self.region_limit))
with h5py.File(self.mito_volume_file_name, "w") as f:
f.create_dataset("shape_volume", (len(regions), 1, *self.target_size))
f.create_dataset("texture_volume", (len(regions), 1, *self.target_size))
f.create_dataset("id", (len(regions),))
if self.cpus < 2 and self.chunks_per_cpu < 2:
print("single cpu mode")
for i in tqdm(range(0, len(regions))):
if i < 11000:
continue
print(i)
result = self.get_mito_volume(regions[i])
f["id"][i] = result[0]
f["shape_volume"][i] = result[1]
f["texture_volume"][i] = result[2]
with multiprocessing.Pool(processes=self.cpus) as pool:
for i in tqdm(range(0, len(regions), int(self.cpus * self.chunks_per_cpu))):
try:
results = pool.map(self.get_mito_volume, regions[i:i + int(self.cpus * self.chunks_per_cpu)])
for j, result in enumerate(results):
f["id"][i + j] = result[0]
f["shape_volume"][i + j] = result[1]
f["texture_volume"][i + j] = result[2]
except:
print("error in extraction, i: {}".format(i))
exit()
def get_mito_volume(self, region):
'''
Preprocessing function to extract and scale the mitochondria as volume
:param region: (dict) one region object provided by Dataloader.prep_data_info
:returns result: (numpy.array) a numpy array with the target dimensions and the mitochondria in it
'''
gt_volume, em_volume = self.get_volumes_from_slices(region)
mito_regions = regionprops(gt_volume, cache=False)
if len(mito_regions) != 1:
print("something went wrong during volume building. region count: {}".format(len(mito_regions)))
mito_region = mito_regions[0]
if len(mito_region.bbox) < 6:
return [-1, np.zeros(shape=(1, *self.target_size)), np.zeros(shape=(1, *self.target_size))]
shape = gt_volume[mito_region.bbox[0]:mito_region.bbox[3] + 1,
mito_region.bbox[1]:mito_region.bbox[4] + 1,
mito_region.bbox[2]:mito_region.bbox[5] + 1].astype(np.float32)
texture = em_volume[mito_region.bbox[0]:mito_region.bbox[3] + 1,
mito_region.bbox[1]:mito_region.bbox[4] + 1,
mito_region.bbox[2]:mito_region.bbox[5] + 1].astype(np.float32)
scaled_shape = resize(shape, self.target_size, order=1, anti_aliasing=True)
scaled_shape = scaled_shape / scaled_shape.max()
scaled_shape = np.expand_dims(scaled_shape, 0)
scaled_texture = resize(texture, self.target_size, order=1, anti_aliasing=True)
scaled_texture = scaled_texture / scaled_texture.max()
scaled_texture = np.expand_dims(scaled_texture, 0)
if scaled_shape.sum() < self.lower_limit * 0.1:
print("region {} was too small".format(region[0]))
return [-1, np.zeros(shape=(1, *self.target_size)), np.zeros(shape=(1, *self.target_size))]
return [region[0], scaled_shape, scaled_texture]
def get_volumes_from_slices(self, region):
'''
#TODO
:param region:
:returns gt_volume, em_volume:
'''
gt_all_fn = sorted(glob.glob(self.labelpath + '*.' + self.ff))
em_all_fn = sorted(glob.glob(self.volpath + '*.' + self.ff))
gt_fns = [gt_all_fn[id] for id in region["slices"]]
em_fns = [em_all_fn[id] for id in region["slices"]]
gt_volume = []
em_volume = []
for i in range(len(gt_fns)):
gt_slice = imageio.imread(gt_fns[i])
em_slice = imageio.imread(em_fns[i])
gt_slice[gt_slice != region["id"]] = 0
em_slice[gt_slice != region["id"]] = 0
gt_volume.append(gt_slice)
em_volume.append(em_slice)
return np.array(gt_volume), np.array(em_volume)
def extract_scale_mitos_samples(self):
'''
Function to extract the objects as volumes and scale them. Then its saves the scaled volumes to an h5 file.
'''
regions = self.prep_data_info(save=True)
print("{} objects found in the ground truth".format(len(regions)))
regex = re.compile('([0-9]+)_mito_samples.h5')
for root, dirs, files in os.walk(self.cfg.DATASET.ROOTD):
for file in files:
if regex.match(file):
os.remove(self.cfg.DATASET.ROOTD + file)
in_q = multiprocessing.Queue()
processes = []
for region in regions:
in_q.put(region)
pbar = tqdm(total=len(regions))
for cpu in range(self.cpus):
p = multiprocessing.Process(target=self.get_mito_chunk, args=(in_q, cpu))
p.start()
processes.append(p)
progress = 0
while not in_q.empty():
progress_step = len(regions)-in_q.qsize()
if progress != progress_step:
pbar.update(progress_step-progress)
progress = progress_step
time.sleep(30)
for p in processes:
p.join()
self.cleanup_h5()
return
| 44.544484 | 168 | 0.548694 | import glob
import json
import math
import multiprocessing
import os
import re
import time
import h5py
import imageio
import numpy as np
import pandas as pd
from numpyencoder import NumpyEncoder
from skimage.measure import label, regionprops
from skimage.transform import resize
from sklearn.cluster import KMeans
from tqdm import tqdm
from analyzer.data.utils.data_raw import readvol, folder2Vol
from analyzer.utils.eval_model import Evaluationmodel
class Dataloader():
'''
Dataloader class for handling the em dataset and the related labels.
:param cfg: configuration manager.
:param volume: the EM volume.
:param labels: labels that are defined by human or segmentation and will be clustered soon.
:param gt: groundtruth data (cluster)
:param feature: Defines the feature that the VAE should go for.
:param chunk_size: (tuple) defines the chunks in which the data is loaded. Can help to overcome Memory errors.
:param ff: (string) defines the file format that you want to work with. (default: png)
'''
def __init__(self, cfg, volume=None, labels=None, gt=None, feature="shape"):
self.cfg = cfg
if volume is not None:
print('em data loaded: ', self.volume.shape)
else:
self.volpath = self.cfg.DATASET.EM_PATH
self.volume = volume
if labels is not None:
print('label data loaded: ', self.labels.shape)
else:
self.labelpath = self.cfg.DATASET.LABEL_PATH
self.labels = labels
if gt is not None:
print('gt data loaded: ', self.gt.shape)
else:
self.gtpath = self.cfg.DATASET.GT_PATH
self.gt = gt
self.chunk_size = self.cfg.DATASET.CHUNK_SIZE
self.ff = self.cfg.DATASET.FILE_FORMAT
if self.cfg.SYSTEM.NUM_CPUS is None:
self.cpus = multiprocessing.cpu_count()
else:
self.cpus = self.cfg.SYSTEM.NUM_CPUS
self.region_limit = cfg.AUTOENCODER.REGION_LIMIT
self.chunks_per_cpu = cfg.AUTOENCODER.CHUNKS_CPU
self.upper_limit = cfg.AUTOENCODER.UPPER_BOUND
self.lower_limit = cfg.AUTOENCODER.LOWER_BOUND
self.large_samples = cfg.AUTOENCODER.LARGE_OBJECT_SAMPLES
self.target_size = cfg.AUTOENCODER.TARGET
self.vae_feature = feature
self.mito_volume_file_name = "{}mito_samples.h5".format(cfg.DATASET.ROOTD)
self.exclude_borders = cfg.DATASET.EXCLUDE_BORDER_OBJECTS
def __len__(self):
'''
Required by torch to return the length of the dataset.
:returns: integer
'''
with h5py.File(self.mito_volume_file_name, 'r') as f:
return len(f["id"])
def __getitem__(self, idx):
'''
Required by torch to return one item of the dataset.
:param idx: index of the object
:returns: object from the volume
'''
with h5py.File(self.mito_volume_file_name, 'r') as f:
return f["chunk"][idx], idx
def get_fns(self):
'''returns the em, label and gt filenames of every image.'''
emfns = sorted(glob.glob(self.volpath + '*.' + self.ff))
labelfns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
gtfns = sorted(glob.glob(self.gtpath + '*.' + self.ff))
return (emfns, labelfns, gtfns)
def load_chunk(self, vol='all', mode='3d'):
'''
Load chunk of em and groundtruth data for further processing.
:param vol: (string) choose between -> 'all', 'em', 'label' in order to specify
with volume you want to load.
'''
emfns = sorted(glob.glob(self.volpath + '*.' + self.ff))
labelfns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
if mode == '2d':
if (vol == 'em') or (vol == 'all'):
emdata = readvol(emfns[0])
emdata = np.squeeze(emdata)
print('em data loaded: ', emdata.shape)
if (vol == 'label') or (vol == 'all'):
labels = readvol(labelfns[0])
labels = np.squeeze(labels)
print('label data loaded: ', labels.shape)
if mode == '3d':
if (vol == 'em') or (vol == 'all'):
if self.volume is None:
emdata = folder2Vol(self.volpath, self.chunk_size, file_format=self.ff)
print('em data loaded: ', emdata.shape)
if (vol == 'label') or (vol == 'all'):
if self.labels is None:
labels = folder2Vol(self.labelpath, self.chunk_size, file_format=self.ff)
print('label data loaded: ', labels.shape)
if (vol == 'gt') or (vol == 'all'):
if self.gt is None:
gt = folder2Vol(self.gtpath, self.chunk_size, file_format=self.ff)
print('gt data loaded: ', gt.shape)
return (emdata, labels, gt)
def list_segments(self, vol, labels, min_size=2000, os=0, mode='3d'):
'''
This function creats a list of arrays that contain the unique segments.
:param vol: (np.array) volume that contains the pure em data. (2d || 3d)
:param label: (np.array) volume that contains the groundtruth. (2d || 3d)
:param min_size: (int) this sets the minimum size of mitochondria region in order to be safed to the list. Used only in 2d.
:param os: (int) defines the offset that should be used for cutting the bounding box. Be careful with offset as it can lead to additional regions in the chunks.
:param mode: (string) 2d || 3d --> 2d gives you 2d arrays of each slice (same mitochondria are treated differently as they loose their touch after slicing)
--> 3d gives you the whole mitochondria in a 3d volume.
:returns: (dict) of (np.array) objects that contain the segments with labels as keys.
'''
bbox_dict = {}
mask = np.zeros(shape=vol.shape, dtype=np.uint16)
mask[labels > 0] = 1
vol[mask == 0] = 0
if mode == '2d':
bbox_list = []
for idx in range(vol.shape[0]):
image = vol[idx, :, :]
gt_img = labels[idx, :, :]
label2d, num_label = label(gt_img, return_num=True)
regions = regionprops(label2d, cache=False)
for props in regions:
boundbox = props.bbox
if props.bbox_area > min_size:
if ((boundbox[0] - os) < 0) or ((boundbox[2] + os) > image.shape[0]) or (
(boundbox[1] - os) < 0) or ((boundbox[3] + os) > image.shape[1]):
tmparr = image[boundbox[0]:boundbox[2], boundbox[1]:boundbox[3]]
else:
tmparr = image[(boundbox[0] - os):(boundbox[2] + os), (boundbox[1] - os):(boundbox[3] + os)]
bbox_list.append(tmparr)
bbox_dict = {i: bbox_list[i] for i in range(len(bbox_list))}
elif mode == '3d':
chunk_dict = {}
label3d, num_label = label(labels, return_num=True)
regions = regionprops(label3d, cache=False)
for props in regions:
boundbox = props.bbox
if ((boundbox[1] - os) < 0) or ((boundbox[4] + os) > vol.shape[1]) or ((boundbox[2] - os) < 0) or (
(boundbox[5] + os) > vol.shape[2]):
tmparr = vol[boundbox[0]:boundbox[3], boundbox[1]:boundbox[4], boundbox[2]:boundbox[5]]
else:
tmparr = vol[boundbox[0]:boundbox[3], (boundbox[1] - os):(boundbox[4] + os),
(boundbox[2] - os):(boundbox[5] + os)]
bbox_dict[props.label] = tmparr
else:
raise ValueError('No valid dimensionality mode in function list_segments.')
return (bbox_dict)
def prep_data_info(self, volopt='label', save=False):
'''
This function aims as an inbetween function iterating over the whole dataset in efficient
and memory proof fashion in order to preserve information that is needed for further steps.
:param volopt: (string) this sets the volume you want to use for the operation. default: gt
:param kernel_n: (int) number of CPU kernels you want to use for multiprocessing.
:returns added: (dict) that contains the labels with respective information as (list): [pixelsize, [slice_index(s)]]
'''
if volopt == 'label':
fns = sorted(glob.glob(self.labelpath + '*.' + self.ff))
elif volopt == 'em':
fns = sorted(glob.glob(self.volpath + '*.' + self.ff))
else:
raise ValueError('Please enter the volume on which \'prep_data_info\' should run on.')
if self.exclude_borders:
fns = fns[1:-1]
with multiprocessing.Pool(processes=self.cpus) as pool:
result = pool.starmap(self.calc_props, enumerate(fns))
added = {}
for dicts in result:
for key, value in dicts.items():
if key in added:
added[key][0] += value[0]
added[key][1].append(value[1])
if self.exclude_borders:
if not added[key][2]:
added[key].append(value[2])
else:
added.setdefault(key, [])
added[key].append(value[0])
added[key].append([value[1]])
if self.exclude_borders:
added[key].append(value[2])
result_array = []
for result in added.keys():
if self.exclude_borders and added[result][2]:
continue
result_array.append({
'id': result,
'size': added[result][0],
'slices': added[result][1]
})
if save:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'w') as f:
json.dump(result_array, f, cls=NumpyEncoder)
f.close()
return (result_array)
def calc_props(self, idx, fns):
'''
Helper function for 'prep_data_info'
:param idx: (int) this is the slice index that correspondes to the image slice. E.g. idx 100 belongs to image 100.
:param fns: (string) list of filenames.
:returns result: (dict) with each segment. key: idx of segment -- value: [number of pixels in segment, idx of slice].
'''
result = {}
if os.path.exists(fns):
tmp = imageio.imread(fns)
for region in regionprops(tmp):
result.setdefault(region.label, [])
result[region.label].append(region.area)
result[region.label].append(idx)
result[region.label].append(False)
if self.exclude_borders:
minr, minc, maxr, maxc = region.bbox
if minr == 0 or minc == 0:
result[region.label][-1] = True
if maxr == tmp.shape[0] or maxc == tmp.shape[0]:
result[region.label][-1] = True
return result
def precluster(self, mchn='simple', n_groups=5):
'''
Function preclusters the mitochondria into buckets of similar size in order to avoid
sparsity and loss of information while extracting latent representation of the mitochondria.
'''
if os.path.exists(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)) \
and os.stat(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)).st_size != 0:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'r') as f:
data_info = json.loads(f.read())
else:
data_info = self.prep_data_info(save=True)
tmp = np.stack(([mito['id'] for mito in data_info], [mito['size'] for mito in data_info]), axis=-1)
if mchn == 'simple':
sorted = tmp[tmp[:, 1].argsort()[::-1]]
splitted = np.array_split(sorted, n_groups, axis=0)
id_lists = [tmp[:, 0].tolist() for tmp in splitted]
elif mchn == 'cluster':
model = KMeans(n_clusters=n_groups)
res_grps = model.fit_predict(np.array(tmp[:, 1]).reshape(-1, 1))
id_lists = [[]] * n_groups
for idx in range(len(res_grps)):
id_lists[res_grps[idx]].append(tmp[:, 0][idx])
else:
raise ValueError(
'Please enter the a valid mechanismn you want to group that mitochondria. \'simple\' or \'cluster\'.')
return id_lists
def extract_scale_mitos(self):
'''
Function to extract the objects as volumes and scale them. Then its saves the scaled volumes to an h5 file.
'''
if os.path.exists(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)) \
and os.stat(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO)).st_size != 0:
with open(os.path.join(self.cfg.SYSTEM.ROOT_DIR, self.cfg.DATASET.DATAINFO), 'r') as f:
regions = json.loads(f.read())
else:
regions = self.prep_data_info(save=False)
print("{} objects found in the ground truth".format(len(regions)))
regions = pd.DataFrame(regions)
regions = regions[(self.upper_limit > regions['size']) & (self.lower_limit < regions['size']) & (
len(regions['slices']) > 1)].values.tolist()
filtered_length = len(regions)
print("{} within limits {} and {}".format(filtered_length, self.lower_limit, self.upper_limit))
if self.region_limit is not None:
regions = regions[:self.region_limit]
print("{} will be extracted due to set region_limit".format(self.region_limit))
with h5py.File(self.mito_volume_file_name, "w") as f:
f.create_dataset("shape_volume", (len(regions), 1, *self.target_size))
f.create_dataset("texture_volume", (len(regions), 1, *self.target_size))
f.create_dataset("id", (len(regions),))
if self.cpus < 2 and self.chunks_per_cpu < 2:
print("single cpu mode")
for i in tqdm(range(0, len(regions))):
if i < 11000:
continue
print(i)
result = self.get_mito_volume(regions[i])
f["id"][i] = result[0]
f["shape_volume"][i] = result[1]
f["texture_volume"][i] = result[2]
with multiprocessing.Pool(processes=self.cpus) as pool:
for i in tqdm(range(0, len(regions), int(self.cpus * self.chunks_per_cpu))):
try:
results = pool.map(self.get_mito_volume, regions[i:i + int(self.cpus * self.chunks_per_cpu)])
for j, result in enumerate(results):
f["id"][i + j] = result[0]
f["shape_volume"][i + j] = result[1]
f["texture_volume"][i + j] = result[2]
except:
print("error in extraction, i: {}".format(i))
exit()
def get_mito_volume(self, region):
'''
Preprocessing function to extract and scale the mitochondria as volume
:param region: (dict) one region object provided by Dataloader.prep_data_info
:returns result: (numpy.array) a numpy array with the target dimensions and the mitochondria in it
'''
gt_volume, em_volume = self.get_volumes_from_slices(region)
mito_regions = regionprops(gt_volume, cache=False)
if len(mito_regions) != 1:
print("something went wrong during volume building. region count: {}".format(len(mito_regions)))
mito_region = mito_regions[0]
if len(mito_region.bbox) < 6:
return [-1, np.zeros(shape=(1, *self.target_size)), np.zeros(shape=(1, *self.target_size))]
shape = gt_volume[mito_region.bbox[0]:mito_region.bbox[3] + 1,
mito_region.bbox[1]:mito_region.bbox[4] + 1,
mito_region.bbox[2]:mito_region.bbox[5] + 1].astype(np.float32)
texture = em_volume[mito_region.bbox[0]:mito_region.bbox[3] + 1,
mito_region.bbox[1]:mito_region.bbox[4] + 1,
mito_region.bbox[2]:mito_region.bbox[5] + 1].astype(np.float32)
scaled_shape = resize(shape, self.target_size, order=1, anti_aliasing=True)
scaled_shape = scaled_shape / scaled_shape.max()
scaled_shape = np.expand_dims(scaled_shape, 0)
scaled_texture = resize(texture, self.target_size, order=1, anti_aliasing=True)
scaled_texture = scaled_texture / scaled_texture.max()
scaled_texture = np.expand_dims(scaled_texture, 0)
if scaled_shape.sum() < self.lower_limit * 0.1:
print("region {} was too small".format(region[0]))
return [-1, np.zeros(shape=(1, *self.target_size)), np.zeros(shape=(1, *self.target_size))]
return [region[0], scaled_shape, scaled_texture]
def get_volumes_from_slices(self, region):
'''
#TODO
:param region:
:returns gt_volume, em_volume:
'''
gt_all_fn = sorted(glob.glob(self.labelpath + '*.' + self.ff))
em_all_fn = sorted(glob.glob(self.volpath + '*.' + self.ff))
gt_fns = [gt_all_fn[id] for id in region["slices"]]
em_fns = [em_all_fn[id] for id in region["slices"]]
gt_volume = []
em_volume = []
for i in range(len(gt_fns)):
gt_slice = imageio.imread(gt_fns[i])
em_slice = imageio.imread(em_fns[i])
gt_slice[gt_slice != region["id"]] = 0
em_slice[gt_slice != region["id"]] = 0
gt_volume.append(gt_slice)
em_volume.append(em_slice)
return np.array(gt_volume), np.array(em_volume)
def extract_scale_mitos_samples(self):
'''
Function to extract the objects as volumes and scale them. Then its saves the scaled volumes to an h5 file.
'''
regions = self.prep_data_info(save=True)
print("{} objects found in the ground truth".format(len(regions)))
regex = re.compile('([0-9]+)_mito_samples.h5')
for root, dirs, files in os.walk(self.cfg.DATASET.ROOTD):
for file in files:
if regex.match(file):
os.remove(self.cfg.DATASET.ROOTD + file)
in_q = multiprocessing.Queue()
processes = []
for region in regions:
in_q.put(region)
pbar = tqdm(total=len(regions))
for cpu in range(self.cpus):
p = multiprocessing.Process(target=self.get_mito_chunk, args=(in_q, cpu))
p.start()
processes.append(p)
progress = 0
while not in_q.empty():
progress_step = len(regions)-in_q.qsize()
if progress != progress_step:
pbar.update(progress_step-progress)
progress = progress_step
time.sleep(30)
for p in processes:
p.join()
self.cleanup_h5()
return
def get_mito_chunk(self, in_q, id):
h5file = self.cfg.DATASET.ROOTD + "{}_mito_samples.h5".format(id)
with h5py.File(h5file, "w") as f:
counter = 0
chunks = f.create_dataset("chunk", (1, 1, *self.target_size),
maxshape=(None, 1, *self.target_size))
ids = f.create_dataset("id", (1,), maxshape=(None,))
while True:
if in_q.empty():
break
region = in_q.get(timeout=10)
gt_volume, em_volume = self.get_volumes_from_slices(region)
mito_regions = regionprops(gt_volume, cache=False)
if len(mito_regions) != 1:
continue
mito_region = mito_regions[0]
texture = None
if len(mito_region.bbox) < 6:
texture = np.zeros((*em_volume.shape, 2))
texture[mito_region.bbox[0]:mito_region.bbox[2] + 1,
mito_region.bbox[1]:mito_region.bbox[3] + 1, 0] = em_volume[
mito_region.bbox[0]:mito_region.bbox[2] + 1,
mito_region.bbox[1]:mito_region.bbox[3] + 1]
else:
texture = em_volume[mito_region.bbox[0]:mito_region.bbox[3] + 1,
mito_region.bbox[1]:mito_region.bbox[4] + 1,
mito_region.bbox[2]:mito_region.bbox[5] + 1].astype(np.float32)
large = any([d > self.target_size[i] for i, d in enumerate(texture.shape)])
if large:
for i in range(self.large_samples):
x, y, z = 0, 0, 0
if texture.shape[0] > self.target_size[0]:
z = np.random.random_integers(0, texture.shape[0] - self.target_size[0])
if texture.shape[1] > self.target_size[1]:
x = np.random.random_integers(0, texture.shape[1] - self.target_size[1])
if texture.shape[2] > self.target_size[2]:
y = np.random.random_integers(0, texture.shape[2] - self.target_size[2])
sample = texture[
z:z + self.target_size[0],
x:x + self.target_size[1],
y:y + self.target_size[2]]
if np.count_nonzero(sample) / sample.size < 0.1:
continue
sample_padding = np.zeros(self.target_size)
sample_padding[0:texture.shape[0], 0:texture.shape[1], 0:texture.shape[2]] = sample
if len(chunks) <= counter:
chunks.resize((chunks.shape[0] + 1), axis=0)
ids.resize((ids.shape[0] + 1), axis=0)
np.expand_dims(sample_padding, 0)
chunks[counter] = sample_padding/sample_padding.max()
ids[counter] = region["id"]
counter += 1
else:
sample_padding = np.zeros(self.target_size)
sample_padding[0:texture.shape[0], 0:texture.shape[1], 0:texture.shape[2]] = texture
if len(chunks) <= counter:
chunks.resize((chunks.shape[0] + 1), axis=0)
ids.resize((ids.shape[0] + 1), axis=0)
np.expand_dims(sample_padding, 0)
chunks[counter] = sample_padding/sample_padding.max()
ids[counter] = region["id"]
counter += 1
return
def cleanup_h5(self):
eval_model = Evaluationmodel(cfg=self.cfg, dl=self)
regex = re.compile('([0-9]+)_mito_samples.h5')
size_needed = 0
for root, dirs, files in os.walk(self.cfg.DATASET.ROOTD):
for file in files:
if regex.match(file):
with h5py.File(self.cfg.DATASET.ROOTD + file, "r") as f:
size_needed += len(f["id"])
counter = 0
with h5py.File(self.cfg.DATASET.ROOTD + "mito_samples.h5", "w") as mainf:
chunks = mainf.create_dataset("chunk", (size_needed, 1, *self.target_size))
ids = mainf.create_dataset("id", (size_needed,))
gts = mainf.create_dataset("gt", (size_needed,))
for root, dirs, files in os.walk(self.cfg.DATASET.ROOTD):
for file in files:
if regex.match(file):
with h5py.File(self.cfg.DATASET.ROOTD + file, "r") as f:
for id, chunk in zip(f["id"], f["chunk"]):
ids[counter] = id
chunks[counter] = chunk.astype(np.float32)
counter += 1
os.remove(self.cfg.DATASET.ROOTD + file)
gt_vector = eval_model.fast_create_gt_vector(save=False)
gt_lookup = {}
sort_ids = sorted(list(set(ids)))
for i, id in enumerate(sort_ids):
gt_lookup[str(id)] = gt_vector[i]
for i, id in enumerate(ids):
gts[i] = gt_lookup[str(id)]
print("samples collected: {}".format(len(mainf["id"])))
| 6,874 | 0 | 81 |
8d81df8b8a0d792596b854b43fce0630aa70773e | 17,812 | py | Python | Knowledge Graphs/code/train.py | shravyagupta/BoltBio | 28d10e0a22af69f389cc2320d1d5c558165ff185 | [
"Apache-2.0"
] | null | null | null | Knowledge Graphs/code/train.py | shravyagupta/BoltBio | 28d10e0a22af69f389cc2320d1d5c558165ff185 | [
"Apache-2.0"
] | 9 | 2021-11-16T05:22:45.000Z | 2022-02-01T10:34:07.000Z | Knowledge Graphs/code/train.py | shravyagupta/BoltBio | 28d10e0a22af69f389cc2320d1d5c558165ff185 | [
"Apache-2.0"
] | 3 | 2021-11-19T11:38:22.000Z | 2022-01-10T09:29:16.000Z | import argparse
import yaml
import datetime
import os
import torch
import torch.nn.functional as F
import wandb
import random
import numpy as np
import utils
import graph
import dgl
import models
from dgl import DGLError
from utils import pbar
import torchmetrics
import pickle as pkl
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
type = str,
required = True,
help = 'path to config file')
parser.add_argument("-o", "--output",
type = str,
default = f"output/{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}",
help = "path to output file"
)
parser.add_argument("-w","--wandb",
action = "store_true",
help = 'wandb logging')
parser.add_argument("-n", "--name",
type = str,
default = f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}",
help = 'name of wandb run')
args = parser.parse_args()
try:
config = yaml.safe_load(open(args.config, "r"))
except:
raise ValueError(f"Incorrect path to config file : {args.config}")
if not config.get('output', False):
config['output'] = args.output
if args.wandb:
if not config.get('name', False):
config['name'] = args.name
config['wandb'] = args.wandb
trainer = Trainer(config, pretrain = True)
trainer.run()
config['encoder_save'] = f"{config['output']}/best_encoder.ckpt"
config['lr'] = 1.0e-4
config['weight_decay'] = 1.0e-8
trainer = Trainer(config, pretrain = False, reinit = True)
trainer.run()
| 33.671078 | 189 | 0.515439 | import argparse
import yaml
import datetime
import os
import torch
import torch.nn.functional as F
import wandb
import random
import numpy as np
import utils
import graph
import dgl
import models
from dgl import DGLError
from utils import pbar
import torchmetrics
import pickle as pkl
class EmbCriterion():
def __init__(self, nodes, edges, canonical_etypes, **kwargs):
temp = []
for ind, i in enumerate(nodes):
temp.append(torch.ones((nodes[i], )) * ind)
binary = []
for i in nodes:
for r in edges:
for j in nodes:
if (i, r, j) in canonical_etypes:
binary.append(1)
else:
binary.append(0)
self.bce_labels = torch.tensor(binary).float()
self.label = torch.cat(temp, 0).long()
self.contrast_loss = torch.nn.CrossEntropyLoss()
self.bce = torch.nn.BCEWithLogitsLoss()
def __call__(self, n_feat, e_feat, g):
with g.local_scope():
temp = []
temp2 = []
lens = []
for i in sorted(g.ntypes):
feat = n_feat[i]
temp.append(feat)
lens.append(feat.shape[0])
temp2.append(feat.mean(0, keepdims=True))
means = torch.cat(temp2, 0)
del temp2
feats = torch.cat(temp, 0)
del temp
logits = feats @ means.permute(1, 0)
label = self.label.to(logits.device)
contrast_loss = self.contrast_loss(logits.float(), label)
dim = feats.shape[-1]
logits = (((means[:, None, :] * e_feat[None, :, :])).view(-1, dim) @ means.permute(1, 0) ).view(-1, )
label = self.bce_labels.to(logits.device)
bce_loss = self.bce(logits.float(), label)
total_loss = 10 * bce_loss + contrast_loss
return {'loss' : total_loss, 'node_loss' : contrast_loss, 'relation_loss' : bce_loss}
class LinkCriterion():
def __init__(self, link_labels, relevant, vae_nodes, vae_edges, vae_canonical_etypes, **kwargs):
self.emb_crit = EmbCriterion(vae_nodes, vae_edges, vae_canonical_etypes)
self.labels = link_labels.float()
self.relevant = relevant
self.mse = torch.nn.MSELoss()
self.bce_loss = torch.nn.BCEWithLogitsLoss()
self.kl = torch.nn.KLDivLoss(log_target=True)
self.f1 = torchmetrics.F1(threshold=0.505)
#self.roc = torchmetrics.AUROC()
def __call__(self, gt, encoder, n_feat, e_feat, graph):
e_feat = e_feat[self.relevant]
loss_stats = self.emb_crit(gt[0], gt[1][self.relevant], graph)
gt[0] = torch.cat([gt[0][i] for i in gt[0].keys()], 0)
n_types, feat = list(n_feat.keys()), list(n_feat.values())
lens = [i.shape[0] for i in feat]
#feat = torch.cat(feat, 0)
reconstruction_loss = self.mse(torch.cat(feat, 0), gt[0]) + self.mse(gt[1][self.relevant], e_feat)
del gt
src, dst = feat
del feat
dim = src.shape[-1]
logits = (((src[:, None, :] * e_feat[None, :, :])).view(-1, dim) @ dst.permute(1, 0) ).view(lens[0], len(e_feat), lens[1],).permute(1, 0, 2).flatten()
labels = self.labels.to(logits.device)
link_loss = self.bce_loss(logits, labels)
del labels
# f1 = self.f1(logits.cpu(), self.labels.long())
# auroc = self.roc(logits.cpu(), self.labels.long())
del logits
nodes, edges = encoder
edges = edges[self.relevant]
kl_loss = self.kl(nodes, torch.normal(0, 1, nodes.shape).to(nodes.device)) + self.kl(edges, torch.normal(0, 1, edges.shape).to(edges.device))
loss = 0.01 * kl_loss + 100 * reconstruction_loss + 0.01 * loss_stats['loss'] + 100 * link_loss
loss_stats['kl_loss'] = kl_loss
loss_stats['reconstruction_loss'] = reconstruction_loss
loss_stats['link_loss'] = link_loss
loss_stats['loss'] = loss
#loss_stats['f1'] = f1
#loss_stats['auroc'] = auroc
return loss_stats
class decode_output():
def __init__(self, removed, vae_edge_dict, names, identifiers, actual_link_labels, thresh, relevant, **kwargs):
self.removed = removed.numpy()
self.edge_dict = vae_edge_dict
self.names = names
self.identifiers = identifiers
self.thresh = thresh
self.labels = actual_link_labels.long()
self.accuracy = torchmetrics.Accuracy()
self.f1 = torchmetrics.F1()
self.precision = torchmetrics.Precision()
self.recall = torchmetrics.Recall()
self.relevant = relevant
def __call__(self, n_feat, e_feat, graph):
n_types, feat = list(n_feat.keys()), list(n_feat.values())
lens = [i.shape[0] for i in feat]
e_feat = e_feat[self.relevant]
src, dst = feat
del feat
dim = src.shape[-1]
pred = (torch.sigmoid((((src[:, None, :] * e_feat[None, :, :])).view(-1, dim) @ dst.permute(1, 0) ).view(lens[0], len(e_feat), lens[1]).permute(1, 0, 2)) > self.thresh).long().cpu()
ids = torch.stack(torch.where(pred == 1), 0).T
pred = pred.flatten()
accuracy = self.accuracy(pred, self.labels)
f1 = self.f1(pred, self.labels)
precision = self.precision(pred, self.labels)
recall = self.recall(pred, self.labels)
del pred
c = 0
ids = ids.numpy()
c = np.array([x for x in set(tuple(x) for x in ids) & set(tuple(x) for x in self.removed)]).shape[0]
new_accuracy = c/len(self.removed)
data = []
for i in ids:
r, n1, n2 = i
r = int(r)
n1 = int(n1)
n2 = int(n2)
r_name = list(self.edge_dict.keys())[r]
node_p = 0
pos = n1
ind = int(graph.nodes[n_types[node_p]].data['ind'][pos])
name1 = self.names[ind]
identifiers1 = self.identifiers[ind]
node_p = 1
pos = n2
ind = int(graph.nodes[n_types[node_p]].data['ind'][pos])
name2 = self.names[ind]
identifiers2 = self.identifiers[ind]
temp = [r_name, name1, name2, identifiers1, identifiers2]
data += [temp]
return {'f1' : f1, 'accuracy' : accuracy, 'new_accuracy' : torch.tensor([new_accuracy]), 'data' : data, 'precision' : precision, 'recall' : recall}
class Trainer():
def __init__(self, config, pretrain = True, reinit = False):
self.config = config
self.out_dir = config['output']
self.device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
try:
self.graph, self.actual_graph, self.sub_graph = dgl.load_graphs(f'{config["graph"]["save"]}/graph.bin')[0]
except DGLError:
graph.create_graph(**config['graph'])
self.graph, self.actual_graph, self.sub_graph = dgl.load_graphs(f'{config["graph"]["save"]}/graph.bin')[0]
with open(f'{config["graph"]["save"]}/metapaths.pickle', 'rb') as f:
metapaths = pkl.load(f)
with open(f'{config["graph"]["save"]}/names.pickle', 'rb') as f:
names = pkl.load(f)
with open(f'{config["graph"]["save"]}/identifiers.pickle', 'rb') as f:
identifiers = pkl.load(f)
with open(f'{config["graph"]["save"]}/edges.pickle', 'rb') as f:
edges = pkl.load(f)
graph_spec = utils.analyse(self.graph, self.actual_graph, self.sub_graph, edges, metapaths)
model = models.Encoder(**self.config['encoder'], **graph_spec)
if pretrain:
self.model = model.to(self.device)
self.embed_crit = EmbCriterion(**graph_spec)
else:
model.load_state_dict(torch.load(config['encoder_save']))
self.model = models.VAE(**config['vae'], **graph_spec, encoder = model).to(self.device)
self.link_crit = LinkCriterion(**graph_spec)
self.decode = decode_output(**graph_spec, names = names, identifiers = identifiers, **config['decode'])
self.main_thread = True
self.optim = torch.optim.Adam(self.model.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'], betas=(0.9, 0.999))
self.sched = torch.optim.lr_scheduler.StepLR(self.optim, step_size=40, gamma=0.7)
if self.main_thread:
if pretrain:
if self.config["wandb"]:
run = wandb.init(name = self.config["name"])
config['wandb_url'] = run.get_url()
os.makedirs(config['output'], exist_ok=True)
with open(os.path.join(config["output"], 'config.yaml'), 'w') as outfile:
yaml.dump(self.config, outfile)
print(f"Configuration")
print("--------------------------------------------")
print(yaml.dump(self.config))
print("--------------------------------------------")
print(f"Model parameters: {sum(p.numel() for p in self.model.parameters())/1e6}M")
if os.path.exists(os.path.join(self.config["output"], "last.ckpt")) and not reinit:
ckpt = torch.load(os.path.join(self.config["output"], "last.ckpt"))
self.model.load_state_dict(ckpt["model"])
self.optim.load_state_dict(ckpt["optim"])
self.sched.load_state_dict(ckpt["sched"])
self.start_epoch = ckpt["epoch"] + 1
if self.main_thread:
print(f"Loaded checkpoint, continuing from {self.start_epoch} epochs...")
else:
self.start_epoch = 0
self.logs = {"train": [], "val": []}
if self.main_thread:
print(f"Checkpoint not found, starting fresh...")
self.train_steps = self.start_epoch
self.metric_meter = utils.AvgMeter()
self.log_wandb = self.config['wandb']
self.pretrain = pretrain
def train_embed(self):
self.model.train()
self.metric_meter.reset()
self.graph = self.graph.to(self.device)
for i in range(self.config['pretrain_steps']):
n_feat, e_feat = self.model(self.graph)
loss_stats = self.embed_crit(n_feat, e_feat, self.graph)
loss = loss_stats['loss']
self.optim.zero_grad()
loss.backward()
self.optim.step()
metrics = {f'train {s}' : v.item() for s,v in loss_stats.items()}
self.metric_meter.add(metrics)
if self.main_thread:
if self.log_wandb:
metrics["train step"] = self.train_steps
wandb.log(metrics)
pbar(i/self.config['pretrain_steps'], msg=self.metric_meter.msg())
self.train_steps += 1
pbar(1, msg=self.metric_meter.msg())
self.sched.step()
def train_link(self):
self.model.train()
self.metric_meter.reset()
self.sub_graph = self.sub_graph.to(self.device)
for i in range(self.config['steps']):
encoder, latent, n_feat, e_feat = self.model(self.sub_graph)
loss_stats = self.link_crit(encoder, latent, n_feat, e_feat, self.sub_graph)
loss = loss_stats['loss']
self.optim.zero_grad()
loss.backward()
self.optim.step()
metrics = {f'train {s}' : v.item() for s,v in loss_stats.items()}
self.metric_meter.add(metrics)
if self.main_thread:
if self.log_wandb:
metrics["train step"] = self.train_steps
wandb.log(metrics)
pbar(i/self.config['steps'], msg=self.metric_meter.msg())
self.train_steps += 1
pbar(1, msg=self.metric_meter.msg())
self.sched.step()
@torch.no_grad()
def val_link(self):
self.model.eval()
self.metric_meter.reset()
self.sub_graph = self.sub_graph.to(self.device)
_, _, n_feat, e_feat = self.model(self.sub_graph)
loss_stats = self.decode(n_feat, e_feat, self.graph)
metrics = {f'val {s}' : v.item() for s,v in loss_stats.items() if s != 'data'}
self.metric_meter.add(metrics)
if self.main_thread:
if self.log_wandb:
wandb.log(metrics)
pbar(1, msg=self.metric_meter.msg())
return loss_stats['data']
def run(self):
best_train_loss, best_val_accuracy, best_best_val_accuracy = float("inf"), 0, 0
self.metric_meter.reset()
n_epochs = self.config['pretrain_epochs'] if self.pretrain else self.config['epochs']
for epoch in range(self.start_epoch, n_epochs):
if self.main_thread:
print(f"Epoch: {epoch}")
print("---------------")
if self.pretrain:
self.train_embed()
else:
self.train_link()
if self.main_thread:
train_loss = self.metric_meter.get()["train loss"]
if train_loss < best_train_loss:
print(
"\x1b[34m"
+ f"train loss improved from {round(best_train_loss, 5)} to {round(train_loss, 5)}"
+ "\033[0m"
)
best_train_loss = train_loss
if self.pretrain:
torch.save(self.model.state_dict(),
os.path.join(self.out_dir, "best_encoder.ckpt"),
)
if self.log_wandb:
wandb.log(
{
"best_loss": best_train_loss,
}
)
else:
data = self.val_link()
val_accuracy = self.metric_meter.get()["val accuracy"]
best_accuracy = self.metric_meter.get()["val new_accuracy"]
if self.log_wandb:
wandb.log({"predictions" : wandb.Table(data = data, columns = ['edge_type', 'node_1_name', 'node_2_name', 'node_1_identifier', 'node_2_identifier'])})
wandb.log(
{
"epoch": epoch,
"train": train_loss,
"lr": self.optim.param_groups[0]["lr"],
}
)
if val_accuracy > best_val_accuracy:
print(
"\x1b[33m"
+ f"val accuracy improved from {round(best_val_accuracy, 5)} to {round(val_accuracy, 5)}"
+ "\033[0m"
)
best_val_accuracy = val_accuracy
if best_accuracy > best_best_val_accuracy:
if self.log_wandb:
wandb.log(
{
"best_accuracy": best_accuracy,
}
)
best_best_val_accuracy = best_accuracy
torch.save(
self.model.state_dict(),
os.path.join(self.out_dir, "best.ckpt"),
)
torch.save(
{
"model": self.model.state_dict(),
"sched": self.sched.state_dict(),
"optim": self.optim.state_dict(),
"epoch": epoch,
},
os.path.join(self.out_dir, "last.ckpt"),
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
type = str,
required = True,
help = 'path to config file')
parser.add_argument("-o", "--output",
type = str,
default = f"output/{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}",
help = "path to output file"
)
parser.add_argument("-w","--wandb",
action = "store_true",
help = 'wandb logging')
parser.add_argument("-n", "--name",
type = str,
default = f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')}",
help = 'name of wandb run')
args = parser.parse_args()
try:
config = yaml.safe_load(open(args.config, "r"))
except:
raise ValueError(f"Incorrect path to config file : {args.config}")
if not config.get('output', False):
config['output'] = args.output
if args.wandb:
if not config.get('name', False):
config['name'] = args.name
config['wandb'] = args.wandb
trainer = Trainer(config, pretrain = True)
trainer.run()
config['encoder_save'] = f"{config['output']}/best_encoder.ckpt"
config['lr'] = 1.0e-4
config['weight_decay'] = 1.0e-8
trainer = Trainer(config, pretrain = False, reinit = True)
trainer.run()
| 15,531 | 161 | 258 |
17e56847093136b88693a5589e3a97a1dccbe751 | 1,169 | py | Python | tests/test_text/test_base.py | mrtrkmn/yellowbrick | efd942063455f1c148c3c691d8100d726b09ac90 | [
"Apache-2.0"
] | 3,662 | 2016-05-19T19:16:28.000Z | 2022-03-30T00:25:19.000Z | tests/test_text/test_base.py | mrtrkmn/yellowbrick | efd942063455f1c148c3c691d8100d726b09ac90 | [
"Apache-2.0"
] | 1,047 | 2016-05-18T15:20:59.000Z | 2022-03-30T16:12:49.000Z | tests/test_text/test_base.py | mrtrkmn/yellowbrick | efd942063455f1c148c3c691d8100d726b09ac90 | [
"Apache-2.0"
] | 602 | 2016-05-18T15:02:35.000Z | 2022-03-27T16:57:11.000Z | # tests.test_text.test_base
# Tests for the text visualization base classes
#
# Author: Benjamin Bengfort
# Created: Mon Feb 20 06:34:50 2017 -0500
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_base.py [6aa9198] benjamin@bengfort.com $
"""
Tests for the text visualization base classes
"""
##########################################################################
## Imports
##########################################################################
from yellowbrick.base import *
from yellowbrick.text.base import *
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################################
## TextVisualizer Base Tests
##########################################################################
| 29.974359 | 74 | 0.540633 | # tests.test_text.test_base
# Tests for the text visualization base classes
#
# Author: Benjamin Bengfort
# Created: Mon Feb 20 06:34:50 2017 -0500
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_base.py [6aa9198] benjamin@bengfort.com $
"""
Tests for the text visualization base classes
"""
##########################################################################
## Imports
##########################################################################
from yellowbrick.base import *
from yellowbrick.text.base import *
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################################
## TextVisualizer Base Tests
##########################################################################
class TestTextVisualizerBase(object):
def test_subclass(self):
"""
Assert the text visualizer is subclassed correctly
"""
visualizer = TextVisualizer()
assert isinstance(visualizer, TransformerMixin)
assert isinstance(visualizer, BaseEstimator)
assert isinstance(visualizer, Visualizer)
| 0 | 325 | 23 |
15613916ffc71646e56348b60ef0100a02307c60 | 538 | py | Python | Max_product_subarray in Python/max_subarray.py | Hacktoberfest-2021/problem_on-_programming | 1e041a8238d91e1fc27e20a8174fcaaf8ed40ded | [
"Apache-2.0"
] | null | null | null | Max_product_subarray in Python/max_subarray.py | Hacktoberfest-2021/problem_on-_programming | 1e041a8238d91e1fc27e20a8174fcaaf8ed40ded | [
"Apache-2.0"
] | 1 | 2021-10-04T08:31:37.000Z | 2021-10-07T00:16:30.000Z | Max_product_subarray in Python/max_subarray.py | Hacktoberfest-2021/problem_on-_programming | 1e041a8238d91e1fc27e20a8174fcaaf8ed40ded | [
"Apache-2.0"
] | 3 | 2021-10-04T08:54:03.000Z | 2021-10-07T04:32:23.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 09:42:38 2021
@author: VISHAKHA V
"""
arr=list(map(int,input().split()))
result=max_subarray_product(arr)
print("The maximum sub_array product =",result)
#Take the input as [6,-3,-10,0,2] if you want, then the output will be 180
| 18.551724 | 75 | 0.552045 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 7 09:42:38 2021
@author: VISHAKHA V
"""
def max_subarray_product(arr):
n=len(arr)
res=arr[0]
for i in range(n):
m=arr[i]
for j in range(i+1,n):
res=max(m,res)
m = m * arr[j]
res=max(res,m)
return res
arr=list(map(int,input().split()))
result=max_subarray_product(arr)
print("The maximum sub_array product =",result)
#Take the input as [6,-3,-10,0,2] if you want, then the output will be 180
| 230 | 0 | 23 |
202b8971ca0d5dd05229ac896ef2fad5cff12756 | 702 | py | Python | QBG/DataLoader/tools/scripts/compress_intra_data.py | GYMS-PKU/Daily-Frequency-Quant | 808eda9930efecff04ecf98abf617404cadd0003 | [
"MIT"
] | 3 | 2021-11-21T04:35:04.000Z | 2022-03-04T09:19:53.000Z | QBG/DataLoader/tools/scripts/compress_intra_data.py | GYMS-PKU/Daily-Frequency-Quant | 808eda9930efecff04ecf98abf617404cadd0003 | [
"MIT"
] | null | null | null | QBG/DataLoader/tools/scripts/compress_intra_data.py | GYMS-PKU/Daily-Frequency-Quant | 808eda9930efecff04ecf98abf617404cadd0003 | [
"MIT"
] | 5 | 2021-10-03T00:00:22.000Z | 2022-03-07T09:02:00.000Z | # Copyright (c) 2022 Dai HBG
"""
该脚本用于将1分钟数据中不在市的部分删除
"""
import os
import pandas as pd
if __name__ == '__main__':
main()
| 24.206897 | 103 | 0.618234 | # Copyright (c) 2022 Dai HBG
"""
该脚本用于将1分钟数据中不在市的部分删除
"""
import os
import pandas as pd
def main():
data_path = 'D:/Documents/AutoFactoryData'
intra_path = 'E:/Backups/AutoFactoryData/StockIntraDayData/1m'
dates = os.listdir(intra_path)
for date in dates:
all_securities = pd.read_csv('{}/StockDailyData/{}/all_securities.csv'.format(data_path, date))
all_stocks = set(all_securities['code'])
files = os.listdir('{}/{}'.format(intra_path, date))
for f in files:
if f[:-4] not in all_stocks:
os.remove('{}/{}/{}'.format(intra_path, date, f))
print('{} done.'.format(date))
if __name__ == '__main__':
main()
| 546 | 0 | 23 |
a3c5eb7f64ffed2be27901164c889f549988b7a6 | 6,123 | py | Python | vision/nn/FairNAS_B.py | lippman1125/pytorch-ssd | d2c31b0d69e82ab2c46f2eecbbf9c12f3dd73309 | [
"MIT"
] | 15 | 2019-05-28T06:47:50.000Z | 2021-02-21T18:16:46.000Z | vision/nn/FairNAS_B.py | lippman1125/pytorch-ssd | d2c31b0d69e82ab2c46f2eecbbf9c12f3dd73309 | [
"MIT"
] | null | null | null | vision/nn/FairNAS_B.py | lippman1125/pytorch-ssd | d2c31b0d69e82ab2c46f2eecbbf9c12f3dd73309 | [
"MIT"
] | 6 | 2019-05-28T07:39:31.000Z | 2021-02-21T18:16:52.000Z | import math
import torch.nn as nn
import torch.nn.functional as F
# class InvertedResidual(nn.Module):
# def __init__(self, inp, oup, kernel_size, stride, expand_ratio):
# super(InvertedResidual, self).__init__()
# assert stride in [1, 2]
# self.stride = stride
# padding = kernel_size // 2
# hidden_dim = round(inp * expand_ratio)
# self.use_res_connect = self.stride == 1 and inp == oup
# self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
# self.bn1 = nn.BatchNorm2d(hidden_dim)
# self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False)
# self.bn2 = nn.BatchNorm2d(hidden_dim)
# self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
# self.bn3 = nn.BatchNorm2d(oup)
#
# def forward(self, x):
# inputs = x
# x = self.conv1(x)
# x = self.bn1(x)
# x = F.relu6(x, inplace=True)
# x = self.conv2(x)
# x = self.bn2(x)
# x = F.relu6(x, inplace=True)
# x = self.conv3(x)
# x = self.bn3(x)
# if self.use_res_connect:
# return inputs + x
# else:
# return x
| 34.016667 | 117 | 0.526049 | import math
import torch.nn as nn
import torch.nn.functional as F
def stem(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def separable_conv(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def conv_before_pooling(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, kernel_size, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
padding = kernel_size // 2
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
# class InvertedResidual(nn.Module):
# def __init__(self, inp, oup, kernel_size, stride, expand_ratio):
# super(InvertedResidual, self).__init__()
# assert stride in [1, 2]
# self.stride = stride
# padding = kernel_size // 2
# hidden_dim = round(inp * expand_ratio)
# self.use_res_connect = self.stride == 1 and inp == oup
# self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
# self.bn1 = nn.BatchNorm2d(hidden_dim)
# self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, padding, groups=hidden_dim, bias=False)
# self.bn2 = nn.BatchNorm2d(hidden_dim)
# self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
# self.bn3 = nn.BatchNorm2d(oup)
#
# def forward(self, x):
# inputs = x
# x = self.conv1(x)
# x = self.bn1(x)
# x = F.relu6(x, inplace=True)
# x = self.conv2(x)
# x = self.bn2(x)
# x = F.relu6(x, inplace=True)
# x = self.conv3(x)
# x = self.bn3(x)
# if self.use_res_connect:
# return inputs + x
# else:
# return x
class FairNasB(nn.Module):
def __init__(self, n_class=1000, input_size=224):
super(FairNasB, self).__init__()
assert input_size % 32 == 0
mb_config = [
# expansion, out_channel, kernel_size, stride,
[3, 32, 5, 2],
[3, 32, 3, 1],
[3, 40, 5, 2],
[3, 40, 3, 1],
[6, 40, 3, 1],
[3, 40, 5, 1],
[3, 80, 7, 2],
[3, 80, 3, 1],
[6, 80, 3, 1],
[3, 80, 5, 1],
[3, 96, 3, 1],
[6, 96, 3, 1],
[3, 96, 7, 1],
[3, 96, 3, 1],
[6, 192, 7, 2],
[6, 192, 5, 1],
[6, 192, 7, 1],
[6, 192, 3, 1],
[6, 320, 5, 1],
]
input_channel = 16
last_channel = 1280
self.last_channel = last_channel
# self.stem = stem(3, 32, 2)
# self.separable_conv = separable_conv(32, 16)
# self.mb_module = list()
self.features = list()
self.features.append(stem(3, 32, 2))
self.features.append(separable_conv(32, 16))
for t, c, k, s in mb_config:
output_channel = c
self.features.append(InvertedResidual(input_channel, output_channel, k, s, expand_ratio=t))
input_channel = output_channel
# self.mb_module = nn.Sequential(*self.mb_module)
# self.conv_before_pooling = conv_before_pooling(input_channel, self.last_channel)
self.features.append(conv_before_pooling(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
# x = self.stem(x)
# x = self.separable_conv(x)
# x = self.mb_module(x)
# x = self.conv_before_pooling(x)
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0) # fan-out
init_range = 1.0 / math.sqrt(n)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
| 4,614 | 18 | 248 |
0fb1c11e373b2420a9445fa755cd0d170d59f89e | 3,707 | py | Python | samples/core/get_started/premade_estimator.py | mumingwu/models | c439119e990784e5fd60b2adb869ee06663d7806 | [
"Apache-2.0"
] | 5 | 2018-05-30T03:14:00.000Z | 2022-03-09T09:42:41.000Z | samples/core/get_started/premade_estimator.py | devzhui/models | 4364390adbf16b57c093a05217897831f48da7d3 | [
"Apache-2.0"
] | null | null | null | samples/core/get_started/premade_estimator.py | devzhui/models | 4364390adbf16b57c093a05217897831f48da7d3 | [
"Apache-2.0"
] | 8 | 2020-04-14T13:46:54.000Z | 2022-02-21T05:18:11.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pandas as pd
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=200, type=int,
help='number of training steps')
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
COLUMNS = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Sentosa', 'Versicolor', 'Virginica']
def load_data(train_fraction=0.8, seed=0, y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
train = pd.read_csv(train_path, names=COLUMNS, header=0)
train_x, train_y = train, train.pop(y_name)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
test = pd.read_csv(test_path, names=COLUMNS, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 34.971698 | 79 | 0.691664 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pandas as pd
import tensorflow as tf
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=200, type=int,
help='number of training steps')
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
COLUMNS = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Sentosa', 'Versicolor', 'Virginica']
def load_data(train_fraction=0.8, seed=0, y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
train = pd.read_csv(train_path, names=COLUMNS, header=0)
train_x, train_y = train, train.pop(y_name)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
test = pd.read_csv(test_path, names=COLUMNS, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
def make_dataset(*inputs):
return tf.data.Dataset.from_tensor_slices(inputs)
def from_dataset(ds):
return lambda: ds.make_one_shot_iterator().get_next()
def main(argv):
args = parser.parse_args(argv[1:])
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data()
train_x = dict(train_x)
test_x = dict(test_x)
# Feature columns describe the input: all columns are numeric.
feature_columns = [tf.feature_column.numeric_column(col_name)
for col_name in COLUMNS[:-1]]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
# Train the Model.
train = (
make_dataset(train_x, train_y)
.repeat()
.shuffle(1000)
.batch(args.batch_size))
classifier.train(input_fn=from_dataset(train), steps=args.train_steps)
# Evaluate the model.
test = make_dataset(test_x, test_y).batch(args.batch_size)
eval_result = classifier.evaluate(input_fn=from_dataset(test))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# Generate predictions from the model
predict_input = make_dataset({
'SepalLength': [6.4, 5.8],
'SepalWidth': [3.2, 3.1],
'PetalLength': [4.5, 5.0],
'PetalWidth': [1.5, 1.7],
}).batch(args.batch_size)
for p in classifier.predict(input_fn=from_dataset(predict_input)):
template = ('Prediction is "{}" ({:.1f}%)')
class_id = p['class_ids'][0]
probability = p['probabilities'][class_id]
print(template.format(SPECIES[class_id], 100 * probability))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 1,656 | 0 | 69 |
1cdd4d3893e2e7a540e5721f281a59389fbbcdab | 5,033 | py | Python | itest/testing/testbase.py | Koos85/siridb-server | e245cec21f29fd649fb5c363d197012cdd5992f6 | [
"MIT"
] | null | null | null | itest/testing/testbase.py | Koos85/siridb-server | e245cec21f29fd649fb5c363d197012cdd5992f6 | [
"MIT"
] | null | null | null | itest/testing/testbase.py | Koos85/siridb-server | e245cec21f29fd649fb5c363d197012cdd5992f6 | [
"MIT"
] | null | null | null | import unittest
import time
import asyncio
import random
import math
import collections
from .siridb import SiriDB
from .server import Server
from .client import Client
| 37.007353 | 79 | 0.468707 | import unittest
import time
import asyncio
import random
import math
import collections
from .siridb import SiriDB
from .server import Server
from .client import Client
def default_test_setup(nservers=1, **kwargs):
def wrapper(func):
async def wrapped(self):
self.db = SiriDB(**kwargs)
self.servers = [
Server(n, title=self.title, **kwargs) for n in range(nservers)]
for n, server in enumerate(self.servers):
setattr(self, 'server{}'.format(n), server)
setattr(self, 'client{}'.format(n), Client(self.db, server))
server.create()
await server.start()
time.sleep(2.0)
await self.db.create_on(self.server0, sleep=5)
close = await func(self)
if close or close is None or Server.TERMINAL is None:
for server in self.servers:
result = await server.stop()
self.assertTrue(
result,
msg='Server {} did not close correctly'.format(
server.name))
return wrapped
return wrapper
class TestBase(unittest.TestCase):
title = 'No title set'
async def run():
raise NotImplementedError()
async def assertIsRunning(self, db, client, timeout=None):
while True:
result = await client.query('list servers name, status')
result = result['servers']
try:
assert len(result) == len(self.db.servers), \
'Server(s) are missing: {} (expexting: {})'.format(
result, self.db.servers)
except AssertionError as e:
if not timeout:
raise e
else:
try:
assert all([
status == 'running' for name, status in result]), \
'Not all servers have status running: {}'.format(
result)
except AssertionError as e:
if not timeout:
raise e
else:
break
if timeout is not None:
timeout -= 1
await asyncio.sleep(1.0)
async def assertSeries(self, client, series):
d = {s.name: s.points for s in series if s.points}
result = await client.query('list series name, length limit {}'.format(
len(series)))
result = {name: length for name, length in result['series']}
for s in series:
if s.points:
length = result.get(s.name, None)
assert length is not None, \
'series {!r} is missing in the result'.format(s.name)
assert length == len(s.points) or \
s.commit_points() or \
length == len(s.points), \
'expected {} point(s) but found {} point(s) ' \
'for series {!r}' \
.format(len(s.points), length, s.name)
n = min(len(series), 10)
li = list(d.keys())
random.shuffle(li)
for i in range(n):
result = await client.query('select * from "{}"'.format(li[i]))
points = result[li[i]]
expected = sorted(d[li[i]])
assert len(points) == len(expected), \
'incorrect number of points: {}, expected: {}'.format(
len(points), len(expected))
c = collections.Counter([p[0] for p in points])
for i, p in enumerate(points):
ts, val = p
if c[ts] == 1:
self.assertEqual(p, expected[i])
def assertAlmostEqual(self, a, b, *args, **kwargs):
if isinstance(b, dict):
for series, points in b.items():
assert isinstance(points, list), 'Expecting a list of points'
for i, point in enumerate(points):
assert isinstance(point, list) and len(point) == 2, \
'Expecting a point to be a list of 2 items'
super().assertEqual(a[series][i][0], point[0])
if isinstance(a[series][i][1], str):
super().assertEqual(
a[series][i][1].replace(',', '.'),
point[1].replace(',', '.'))
elif math.isnan(a[series][i][1]):
assert math.isnan(point[1]), \
'Expecting point `{}` to be `nan`, got: `{}`' \
.format(i, point[1])
else:
super().assertAlmostEqual(
a[series][i][1],
point[1],
*args,
**kwargs)
else:
super().assertAlmostEqual(a, b, *args, **kwargs)
| 4,667 | 149 | 46 |
f84a2e516b70afc34fcba7bb626975d57d29a5d2 | 30,312 | py | Python | degraph/entity.py | aljabr0/degraph | ac44a1c979da7605ed424853e7fef8ffa8790b27 | [
"Apache-2.0"
] | 31 | 2020-02-01T23:25:23.000Z | 2021-11-16T07:24:03.000Z | degraph/entity.py | aljabr0/degraph | ac44a1c979da7605ed424853e7fef8ffa8790b27 | [
"Apache-2.0"
] | null | null | null | degraph/entity.py | aljabr0/degraph | ac44a1c979da7605ed424853e7fef8ffa8790b27 | [
"Apache-2.0"
] | 4 | 2020-02-01T23:25:28.000Z | 2020-02-27T04:41:18.000Z | from .types import StaticShape, RBF, Adjacency
from .math import tf_hgrid_coords, hgrid_normalize, bezier_multi, tri_ones_lower, tf_repeat_1d
from .math import piecewise_linear_curve_closest_pts
from .math import random_uniform_hypersphere, mutual_sq_distances, gauss_rbf, rbf_factory, crbf_wendland_d3c2
from .config import TF_FLOAT, EPSILON, BEZIER_DEFAULT_DEGREE
from .model import create_entity_name, get_active_model, Entity, SingleVarContrib, TensorRef, expand_tensor_ref
from .model import stop_gradient_tape
from .utils import join_ident, export, apply_colormap
from typing import Sequence, List, Optional, Union, Dict, Callable, Tuple
import abc
import tensorflow as tf
import pandas as pd
import numpy as np
import h5py
from copy import copy
from contextlib import nullcontext
RBF_DEFAULT = 'gauss'
class Lambda(Entity):
"""
Entity wrapping an arbitrary expression involving tensors.
"""
def __init__(self, fun: Callable, tensor_ref: Union[TensorRef, Dict[str, TensorRef], List[TensorRef]], *,
name=None, enable_grad: bool = True):
"""
Init lambda entity.
:param fun: The callable to be wrapped.
:param tensor_ref: Tensor ref or a list or dict of tensor refs.
:param name: The name of the entity.
:param enable_grad: Boolean (default true) specifying whether the gradient tape should be enabled when in
training mode.
"""
super().__init__(name=name)
self.fun = fun
self.tensor_ref = copy(tensor_ref)
self.enable_grad = enable_grad
self.add_to_active_model()
@export
def lambda_(fun: Callable, tensor_ref: Union[TensorRef, Dict[str, TensorRef], List[TensorRef]], *,
name=None, enable_grad: bool = True):
"""
Create an entity that wraps an arbitrary expression involving tensors.
:param fun: A lambda function invoked with the expanded tensor refs declared in the parameter tensor_ref.
:param tensor_ref: A tensor ref, a list of tensor refs or a dictionary. In the case of a dictionary the function
will be invoked with named parameters whereas in the other cases positional parameters will be used.
:param name: Name of the entity wrapping the expression.
:param enable_grad: if true (default) the tensor tape is enabled while in training mode.
:return: An entity wrapping the expression implemented in fun.
"""
return Lambda(fun=fun, tensor_ref=tensor_ref, name=name, enable_grad=enable_grad).get_value_ref()
TensorRefs = Union[Sequence[TensorRef], TensorRef]
def tensor_refs_clone(refs: TensorRefs) -> TensorRefs:
"""
Clone a tensor ref or a sequence of tensor refs.
:param refs:
:return:
"""
if isinstance(refs, (tf.Tensor, tf.Variable, Callable)):
return refs
return list(refs)
@export
@export
@export
def aggregate_rasters(rasters: Union[TensorRef, List[TensorRef]], *, bias: float = 0., name=None):
"""
Aggregate a list of images. The activation function ReLU is applied after the input tensors are stacked and added.
:param rasters: A list of tensor refs representing the input images.
:param bias: Bias value.
:param name: Name of the entity.
:return: An entity implementing the aggregation.
"""
return lambda_(adder, tensor_ref=rasters, name=name)
class RBFNet:
"""
Radial basis function network.
"""
def __init__(self, rbf: RBF):
"""
Init RBF network.
:param rbf: The radial basis function, eg the value returned by degraph.math.rbf_factory(...).
"""
self.rbf = rbf
@staticmethod
@tf.function
@tf.function
def grid_lines_on_grid(self, shape: StaticShape, centres: tf.Tensor):
"""
Apply radial basis function on a grid using distances from lines aligned with the axes.
"""
grid_acc = tf.zeros(shape=tf.reduce_prod(shape), dtype=TF_FLOAT)
grid = tf.cast(tf.expand_dims(tf_hgrid_coords(shape), axis=1), dtype=TF_FLOAT)
for k in range(centres.shape[1]):
level = tf.squeeze(tf.square(grid[:, :, k:k+1] - centres[:, k:k+1]), axis=-1)
level = self.rbf(level)
level = tf.reduce_sum(level, axis=-1) # form a mixture
grid_acc += level
grid_acc = tf.reshape(grid_acc, shape=shape) # reshape values to an image
return grid_acc
@export
class GraphRepr: # TODO extend tf.Module?
"""
Symmetric directed graph representation.
"""
# TODO Another ideas would be of getting rid of this class and have individual entities for var and other op,
# this is in line with the idea of using other type of coordinates (eg. polar).
HDF5_SER_TYPE = 'degraph.GraphRepr'
DEF_NAME_PREFIX = 'graph_def'
def __init__(self, adjacency: Adjacency, dim: int = 2, name=None, **kwargs):
"""
Init the graph representation object.
:param adjacency: Adjacency matrix, this can be either a Numpy array, a Pandas DataFrame or a tensor.
:param dim: Number of spatial dimensions of this representation, currently many internal components are limited
to the 2D case it is the only option.
:param name:
:param kwargs:
"""
# TODO optional param positions_generator (space positions)
self._name = create_entity_name(name, prefix=self.DEF_NAME_PREFIX)
self._static = False
if '_internal_skip_init' in kwargs:
return
if len(kwargs) != 0:
raise ValueError(f'Unsupported extra parameters: {kwargs.keys()}')
dim = int(dim)
assert dim >= 1
if dim not in (2, 3):
raise ValueError(f'Space dimensions not supported: {dim}')
if isinstance(adjacency, pd.DataFrame):
assert adjacency.index == adjacency.columns
adjacency = np.array(adjacency)
if isinstance(adjacency, np.ndarray):
adjacency = tf.convert_to_tensor(adjacency)
if not isinstance(adjacency, (tf.Tensor, tf.Variable)):
raise ValueError(f'Unsupported type for adjacency matrix: {type(adjacency)}')
assert len(adjacency.shape) == 2 and adjacency.shape[0] == adjacency.shape[1]
# Mask upper triangular values of the adjacency matrix
adjacency = tf.cast(adjacency, dtype=TF_FLOAT) * tf.cast(tri_ones_lower(adjacency.shape[0]), dtype=TF_FLOAT)
adjacency /= tf.reduce_max(tf.abs(adjacency)) + EPSILON # Normalize edge abs weights
edge_count = tf.math.count_nonzero(adjacency)
edge_extreme_indexes = tf.where(adjacency)
assert edge_count == edge_extreme_indexes.shape[0] and edge_extreme_indexes.shape[1] == 2
self.adjacency = adjacency
# The indexes of the extreme points of each edge. The indexes point at the first axis of the variable positions.
self.edge_extreme_indexes = edge_extreme_indexes
with tf.name_scope(self.name):
self.positions = tf.Variable(random_uniform_hypersphere(size=adjacency.shape[0], dim=dim), trainable=True,
name='positions')
total_ctrl_point_count = edge_extreme_indexes.shape[0] * ((BEZIER_DEFAULT_DEGREE+1)-2)
ctrl_points_init = tf.reshape(random_uniform_hypersphere(size=total_ctrl_point_count, dim=dim),
shape=(-1, (BEZIER_DEFAULT_DEGREE+1)-2, dim))
self.edge_internal_ctrl_points = tf.Variable(ctrl_points_init, trainable=True,
name='edge_internal_ctrl_points')
@property
@property
def static(self) -> bool:
"""
Boolean property, true if the current graph representation is static (i.e. the positions are not of the type
tf.Variable).
:return:
"""
return self._static
def copy(self, *, static=False, name: Optional[str] = None):
"""
Clone the current instance of GraphRepr with the excpection of the static flag which is passed through the
arguments.
:param static: When static is set the variables are transformed to static tensors in the destination object.
This is useful to take snapshots of the status.
:param name: Optional name for the graph, otherwise an automatic one is generated.
:return:
"""
obj = GraphRepr(np.asarray(0), _internal_skip_init=True)
obj.adjacency = self.adjacency
obj.edge_extreme_indexes = self.edge_extreme_indexes
obj._static = static
obj._name = create_entity_name(name, prefix=self.DEF_NAME_PREFIX)
# Convert variables to tensors, note that when GraphRepr.static is set these are already tensors.
positions = tf.convert_to_tensor(self.positions)
edge_internal_ctrl_points = tf.convert_to_tensor(self.edge_internal_ctrl_points)
if static:
obj.positions = positions
obj.edge_internal_ctrl_points = edge_internal_ctrl_points
else:
obj.positions = tf.Variable(positions, trainable=True)
obj.edge_internal_ctrl_points = tf.Variable(edge_internal_ctrl_points, trainable=True)
return obj
def get_ctrl_points_vars(self) -> List[tf.Variable]:
"""
Get the control point variables relative to the vertexes and the edges.
:return: A list of elements of type tf.Variable.
"""
if self.static:
return []
return [self.positions, self.edge_internal_ctrl_points]
def serialize(self, fobj):
"""
Serialize the current object in HDF5 format using the file-like object provided.
:param fobj: A file-like object
:return:
"""
with h5py.File(fobj, mode='w') as f:
f['type'] = self.HDF5_SER_TYPE
f['adjacency'] = self.adjacency.numpy()
f['edge_extreme_indexes'] = self.edge_extreme_indexes.numpy()
f['positions'] = self.positions.numpy()
f['edge_internal_ctrl_points'] = self.edge_internal_ctrl_points.numpy()
@property
def dim(self) -> int:
"""
The number of spatial dimentions of this representation.
:return:
"""
return self.positions.shape[1]
def get_positions(self) -> tf.Tensor:
"""
Get a tensor containing the positions of the vertexes. The expected shape is [pt_count, dim].
:return:
"""
# TODO optionally we may include a calculation here, eg polar coordinates to cartesian
return tf.convert_to_tensor(self.positions)
def get_positions_ref(self) -> TensorRef:
"""
Get a tensor ref relative to the positions of the vertexes. See get_positions().
:return:
"""
return fun
def get_edges_ctrl_points(self) -> tf.Tensor:
"""
Get the control points of the edges, the shape of the tensor is [edge_count, ctrl_point_count, dim]
:return:
"""
# edge_extreme_points, shape: [edge_count, 2, dim]
edge_extreme_points = tf.gather(self.positions, indices=self.edge_extreme_indexes)
# Compose a tensor with edge's end point positions and internal control points
return tf.concat([edge_extreme_points[:, 0:1, :], # start points
self.edge_internal_ctrl_points, # internal points
edge_extreme_points[:, 1:2, :]], axis=1) # end points
class Vertexes(SingleVarContrib):
"""
Entity representing the vertexes of a graph.
"""
@export
def vertexes(graph: GraphRepr, *, trainable: bool = True, name=None) -> TensorRef:
"""
Create an entity that represents the vertexes of a graph.
:param graph: The graph object.
:param trainable: If true the variable relative to the positions of the vertexes are marked as trainable.
:param name: The name of the entity.
:return: The entity object.
"""
return Vertexes(graph=graph, trainable=trainable, name=name).get_value_ref()
@export
@export
def unit_sphere_bounds_loss(points_tensor_ref: TensorRef, *, factor: float = 1.0):
"""
Get a loss that penalises points laying outside the unit hyper-sphere centred in the origin.
:param points_tensor_ref: A tensor ref containing the coordinates of the points.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
return lambda_(fun, points_tensor_ref)
@export
def mse_loss(value_ref: TensorRef, *, factor: float = 1.0) -> TensorRef:
"""
Get a MSE Loss.
:param value_ref: The input tensor.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
return lambda_(fun, value_ref)
@export
def sse_loss(value_ref: TensorRef, *, factor: float = 1.0) -> TensorRef:
"""
Get a sum of squares loss.
:param value_ref: The input tensor.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
return lambda_(fun, value_ref)
class RBFNetRaster(SingleVarContrib):
"""
An entity that creates a raster using a radial basis function network.
"""
def __init__(self, points_tensor_ref: TensorRef, *, shape: StaticShape,
rbf: str = RBF_DEFAULT, peak: float = 1.0, spread: float = 1.0, name=None):
"""
Init the entity.
:param points_tensor_ref: A tensor ref referencing a tensor with expected shape: [point_count, dim].
The points are used as centres of the radial basis functions.
:param shape: The shape of the raster.
:param rbf: The rbf to be used, see degraph.math.rbf_factory.
:param peak: The peak of the RBF.
:param spread: The spread of the RBF.
:param name: The name of the entity.
"""
super().__init__(name=name)
self.points_tensor_ref = points_tensor_ref
self.shape = shape
self.rbf = rbf
self.peak = peak
self.spread = spread
self._rbf_net = None
self.add_to_active_model()
@export
@export
def _scope_prepare(scope: str) -> Tuple[object, str]:
"""
Parse a scope string a return a tuple consisting of context manager for the assignation of the tf's scope
and a string representing the summary name. The scope is of the form "<ident1>.<ident2>. ... .<ident3>", the
righmost identifier is used as summary name whereas the prefix is used as scope name.
:param scope: A string containing a qualified name.
:return:
"""
splits = scope.rsplit('.', 1)
if any(map(lambda v: len(v) == 0, splits)):
raise ValueError(f'Invalid scope name: {scope}')
if len(splits) == 1:
return nullcontext(), splits[0]
return tf.name_scope(splits[0]), splits[1]
SummaryFunction = Callable[[tf.Tensor, str], None]
class SummaryBase(Entity):
"""
A base template for summary entities.
"""
def __init__(self, var: TensorRef, fun: SummaryFunction, *, scope: str, name=None):
"""
Init summary entity.
:param var: The variable to the summarised.
:param fun: A callable of the form fun(tensor, name) that invokes the low level Tensorflow functions.
:param scope:
:param name: The name of this entity, note that the name of the summary is taken from parameter scope.
"""
super().__init__(name=name)
self.var = var
self.fun = fun
self.scope = scope
self.add_to_active_model()
@export
@export
@export
def summary_image(var: TensorRef, *, scope: str, name=None, **kwargs):
"""
Create an image summary entity. This function wraps tf.summary.image.
:param var: The tensor to be interpreted as image.
:param scope:
:param name: The name of the entity representing this operation, note that the identifier of the summary in
Tensorboard is determined by the parameter scope.
:param kwargs: Additional parameters to be passed to tf.summary.image.
:return:
"""
return SummaryBase(var, fun=fun, scope=scope, name=name)
| 39.366234 | 120 | 0.654097 | from .types import StaticShape, RBF, Adjacency
from .math import tf_hgrid_coords, hgrid_normalize, bezier_multi, tri_ones_lower, tf_repeat_1d
from .math import piecewise_linear_curve_closest_pts
from .math import random_uniform_hypersphere, mutual_sq_distances, gauss_rbf, rbf_factory, crbf_wendland_d3c2
from .config import TF_FLOAT, EPSILON, BEZIER_DEFAULT_DEGREE
from .model import create_entity_name, get_active_model, Entity, SingleVarContrib, TensorRef, expand_tensor_ref
from .model import stop_gradient_tape
from .utils import join_ident, export, apply_colormap
from typing import Sequence, List, Optional, Union, Dict, Callable, Tuple
import abc
import tensorflow as tf
import pandas as pd
import numpy as np
import h5py
from copy import copy
from contextlib import nullcontext
RBF_DEFAULT = 'gauss'
class Lambda(Entity):
"""
Entity wrapping an arbitrary expression involving tensors.
"""
def __init__(self, fun: Callable, tensor_ref: Union[TensorRef, Dict[str, TensorRef], List[TensorRef]], *,
name=None, enable_grad: bool = True):
"""
Init lambda entity.
:param fun: The callable to be wrapped.
:param tensor_ref: Tensor ref or a list or dict of tensor refs.
:param name: The name of the entity.
:param enable_grad: Boolean (default true) specifying whether the gradient tape should be enabled when in
training mode.
"""
super().__init__(name=name)
self.fun = fun
self.tensor_ref = copy(tensor_ref)
self.enable_grad = enable_grad
self.add_to_active_model()
def get_value(self):
tensor_ref = self.tensor_ref
if isinstance(tensor_ref, dict):
params = map(lambda it: (it[0], expand_tensor_ref(it[1])), tensor_ref.items())
params = dict(params)
with stop_gradient_tape(not self.enable_grad):
return self.fun(**params)
if isinstance(tensor_ref, list):
params = map(lambda it: expand_tensor_ref(it), tensor_ref)
with stop_gradient_tape(not self.enable_grad):
return self.fun(*params)
param = expand_tensor_ref(tensor_ref)
with stop_gradient_tape(not self.enable_grad):
return self.fun(param)
def _var_name(self) -> str:
return join_ident(self.name, 'value')
def get_value_ref(self) -> TensorRef:
def fun():
model = get_active_model()
assert model
return model.runtime_context[self._var_name()]
return fun
def run(self):
get_active_model().runtime_context[self._var_name()] = self.get_value()
@export
def lambda_(fun: Callable, tensor_ref: Union[TensorRef, Dict[str, TensorRef], List[TensorRef]], *,
name=None, enable_grad: bool = True):
"""
Create an entity that wraps an arbitrary expression involving tensors.
:param fun: A lambda function invoked with the expanded tensor refs declared in the parameter tensor_ref.
:param tensor_ref: A tensor ref, a list of tensor refs or a dictionary. In the case of a dictionary the function
will be invoked with named parameters whereas in the other cases positional parameters will be used.
:param name: Name of the entity wrapping the expression.
:param enable_grad: if true (default) the tensor tape is enabled while in training mode.
:return: An entity wrapping the expression implemented in fun.
"""
return Lambda(fun=fun, tensor_ref=tensor_ref, name=name, enable_grad=enable_grad).get_value_ref()
TensorRefs = Union[Sequence[TensorRef], TensorRef]
def tensor_refs_clone(refs: TensorRefs) -> TensorRefs:
"""
Clone a tensor ref or a sequence of tensor refs.
:param refs:
:return:
"""
if isinstance(refs, (tf.Tensor, tf.Variable, Callable)):
return refs
return list(refs)
@export
def reduce_sum(tensor_refs: Union[Sequence[TensorRef], TensorRef], *, name=None): # TODO find a more proper name
tensor_refs = tensor_refs_clone(tensor_refs)
def fun(*tensors):
return tf.reduce_sum(tf.convert_to_tensor(tensors))
return lambda_(fun, tensor_ref=tensor_refs, name=name)
@export
def reduce_max(tensor_refs: Union[Sequence[TensorRef], TensorRef], *, name=None): # TODO find a more proper name
tensor_refs = tensor_refs_clone(tensor_refs)
def fun(*tensors):
return tf.reduce_max(tf.convert_to_tensor(tensors))
return lambda_(fun, tensor_ref=tensor_refs, name=name)
@export
def aggregate_rasters(rasters: Union[TensorRef, List[TensorRef]], *, bias: float = 0., name=None):
"""
Aggregate a list of images. The activation function ReLU is applied after the input tensors are stacked and added.
:param rasters: A list of tensor refs representing the input images.
:param bias: Bias value.
:param name: Name of the entity.
:return: An entity implementing the aggregation.
"""
def adder(*values):
values = list(filter(lambda val: val is not None, values))
ret = tf.reduce_sum(tf.stack(values), axis=0) + tf.convert_to_tensor(bias, dtype=TF_FLOAT)
ret = tf.nn.relu(ret)
# ret = tf.math.exp(ret) - 1 # TODO param for lambda here
# TODO create debug mode, in that case summarise some statistics for the raster
return ret
return lambda_(adder, tensor_ref=rasters, name=name)
class RBFNet:
"""
Radial basis function network.
"""
def __init__(self, rbf: RBF):
"""
Init RBF network.
:param rbf: The radial basis function, eg the value returned by degraph.math.rbf_factory(...).
"""
self.rbf = rbf
@staticmethod
def create_grid(shape: StaticShape):
return tf.cast(tf_hgrid_coords(shape), dtype=TF_FLOAT)
@tf.function
def __call__(self, shape: StaticShape, centres: tf.Tensor):
grid = tf.cast(tf.expand_dims(tf_hgrid_coords(shape), axis=1), dtype=TF_FLOAT)
grid = tf.reduce_sum(tf.square(grid - centres), axis=-1)
grid = self.rbf(grid)
grid = tf.reduce_sum(grid, axis=-1) # form a mixture
grid = tf.reshape(grid, shape=shape) # reshape values to an image
return grid
@tf.function
def grid_lines_on_grid(self, shape: StaticShape, centres: tf.Tensor):
"""
Apply radial basis function on a grid using distances from lines aligned with the axes.
"""
grid_acc = tf.zeros(shape=tf.reduce_prod(shape), dtype=TF_FLOAT)
grid = tf.cast(tf.expand_dims(tf_hgrid_coords(shape), axis=1), dtype=TF_FLOAT)
for k in range(centres.shape[1]):
level = tf.squeeze(tf.square(grid[:, :, k:k+1] - centres[:, k:k+1]), axis=-1)
level = self.rbf(level)
level = tf.reduce_sum(level, axis=-1) # form a mixture
grid_acc += level
grid_acc = tf.reshape(grid_acc, shape=shape) # reshape values to an image
return grid_acc
class RBFNetEntityBase(Entity):
def __init__(self, peak: float = 1.0, spread: float = 1.0, name=None):
super().__init__(name=name)
self.peak = peak
self.spread = spread
self._rbf_net = None
def update_rbf_net(self, force: bool = True):
if not force and self._rbf_net is not None:
return
peak = tf.convert_to_tensor(self.peak, dtype=TF_FLOAT)
assert tf.rank(peak) == 0
spread = tf.convert_to_tensor(self.spread, dtype=TF_FLOAT)
assert tf.rank(spread) == 0
# TODO use Normal core, use partial also try truncated normal
self._rbf_net = RBFNet(rbf=lambda values: tf.exp(-values/(2.0 * tf.square(spread))) * peak)
# self._rbf_net = RBFNet(rbf=lambda values: crbf_wendland_d3c2(values, sq_r=tf.square(spread)) * peak)
# TODO two-phases: get_centres and get_layer, in the middle optionally normalize
@abc.abstractmethod
def get_centres(self) -> tf.Tensor:
"""
Returns a tensor containing the normalized coordinates of the Gaussian centres. The coordinates are expected
to belong to the interval [-1., 1.].
:return:
"""
pass
def get_layer(self, shape: StaticShape) -> tf.Tensor:
centres = self.get_centres()
assert len(shape) == centres.shape[-1] # Assert centres and grid have the same dimension
# TODO assert centres.shape compatibility with len(shape)
# Note centres coordinates are assumed in the interval [-1., 1.]
centres = (centres + 1.)/2. * tf.convert_to_tensor(shape, dtype=TF_FLOAT) # Scale to layer coordinates size
self.update_rbf_net(force=False)
grid = self._rbf_net(shape=shape, centres=centres)
return grid
@export
class GraphRepr: # TODO extend tf.Module?
"""
Symmetric directed graph representation.
"""
# TODO Another ideas would be of getting rid of this class and have individual entities for var and other op,
# this is in line with the idea of using other type of coordinates (eg. polar).
HDF5_SER_TYPE = 'degraph.GraphRepr'
DEF_NAME_PREFIX = 'graph_def'
def __init__(self, adjacency: Adjacency, dim: int = 2, name=None, **kwargs):
"""
Init the graph representation object.
:param adjacency: Adjacency matrix, this can be either a Numpy array, a Pandas DataFrame or a tensor.
:param dim: Number of spatial dimensions of this representation, currently many internal components are limited
to the 2D case it is the only option.
:param name:
:param kwargs:
"""
# TODO optional param positions_generator (space positions)
self._name = create_entity_name(name, prefix=self.DEF_NAME_PREFIX)
self._static = False
if '_internal_skip_init' in kwargs:
return
if len(kwargs) != 0:
raise ValueError(f'Unsupported extra parameters: {kwargs.keys()}')
dim = int(dim)
assert dim >= 1
if dim not in (2, 3):
raise ValueError(f'Space dimensions not supported: {dim}')
if isinstance(adjacency, pd.DataFrame):
assert adjacency.index == adjacency.columns
adjacency = np.array(adjacency)
if isinstance(adjacency, np.ndarray):
adjacency = tf.convert_to_tensor(adjacency)
if not isinstance(adjacency, (tf.Tensor, tf.Variable)):
raise ValueError(f'Unsupported type for adjacency matrix: {type(adjacency)}')
assert len(adjacency.shape) == 2 and adjacency.shape[0] == adjacency.shape[1]
# Mask upper triangular values of the adjacency matrix
adjacency = tf.cast(adjacency, dtype=TF_FLOAT) * tf.cast(tri_ones_lower(adjacency.shape[0]), dtype=TF_FLOAT)
adjacency /= tf.reduce_max(tf.abs(adjacency)) + EPSILON # Normalize edge abs weights
edge_count = tf.math.count_nonzero(adjacency)
edge_extreme_indexes = tf.where(adjacency)
assert edge_count == edge_extreme_indexes.shape[0] and edge_extreme_indexes.shape[1] == 2
self.adjacency = adjacency
# The indexes of the extreme points of each edge. The indexes point at the first axis of the variable positions.
self.edge_extreme_indexes = edge_extreme_indexes
with tf.name_scope(self.name):
self.positions = tf.Variable(random_uniform_hypersphere(size=adjacency.shape[0], dim=dim), trainable=True,
name='positions')
total_ctrl_point_count = edge_extreme_indexes.shape[0] * ((BEZIER_DEFAULT_DEGREE+1)-2)
ctrl_points_init = tf.reshape(random_uniform_hypersphere(size=total_ctrl_point_count, dim=dim),
shape=(-1, (BEZIER_DEFAULT_DEGREE+1)-2, dim))
self.edge_internal_ctrl_points = tf.Variable(ctrl_points_init, trainable=True,
name='edge_internal_ctrl_points')
def empty_positions(self) -> tf.Tensor:
return tf.zeros(shape=(0, self.dim), dtype=TF_FLOAT)
@property
def name(self):
return self._name
@property
def static(self) -> bool:
"""
Boolean property, true if the current graph representation is static (i.e. the positions are not of the type
tf.Variable).
:return:
"""
return self._static
def copy(self, *, static=False, name: Optional[str] = None):
"""
Clone the current instance of GraphRepr with the excpection of the static flag which is passed through the
arguments.
:param static: When static is set the variables are transformed to static tensors in the destination object.
This is useful to take snapshots of the status.
:param name: Optional name for the graph, otherwise an automatic one is generated.
:return:
"""
obj = GraphRepr(np.asarray(0), _internal_skip_init=True)
obj.adjacency = self.adjacency
obj.edge_extreme_indexes = self.edge_extreme_indexes
obj._static = static
obj._name = create_entity_name(name, prefix=self.DEF_NAME_PREFIX)
# Convert variables to tensors, note that when GraphRepr.static is set these are already tensors.
positions = tf.convert_to_tensor(self.positions)
edge_internal_ctrl_points = tf.convert_to_tensor(self.edge_internal_ctrl_points)
if static:
obj.positions = positions
obj.edge_internal_ctrl_points = edge_internal_ctrl_points
else:
obj.positions = tf.Variable(positions, trainable=True)
obj.edge_internal_ctrl_points = tf.Variable(edge_internal_ctrl_points, trainable=True)
return obj
def get_ctrl_points_vars(self) -> List[tf.Variable]:
"""
Get the control point variables relative to the vertexes and the edges.
:return: A list of elements of type tf.Variable.
"""
if self.static:
return []
return [self.positions, self.edge_internal_ctrl_points]
def autoscale_on_vertexes(self, center_scale: float = 1.):
points = self.get_positions()
limits = tf.reduce_min(points), tf.reduce_max(points)
def get_update(value):
return tf.math.divide_no_nan(value - limits[0],
limits[1] - limits[0]) * center_scale + (1. - center_scale)/2.
if self.static:
self.positions = get_update(self.positions)
self.edge_internal_ctrl_points = get_update(self.edge_internal_ctrl_points)
else:
for var in self.get_ctrl_points_vars():
var.assign(get_update(var.value()))
return self
def serialize(self, fobj):
"""
Serialize the current object in HDF5 format using the file-like object provided.
:param fobj: A file-like object
:return:
"""
with h5py.File(fobj, mode='w') as f:
f['type'] = self.HDF5_SER_TYPE
f['adjacency'] = self.adjacency.numpy()
f['edge_extreme_indexes'] = self.edge_extreme_indexes.numpy()
f['positions'] = self.positions.numpy()
f['edge_internal_ctrl_points'] = self.edge_internal_ctrl_points.numpy()
@property
def dim(self) -> int:
"""
The number of spatial dimentions of this representation.
:return:
"""
return self.positions.shape[1]
def get_positions(self) -> tf.Tensor:
"""
Get a tensor containing the positions of the vertexes. The expected shape is [pt_count, dim].
:return:
"""
# TODO optionally we may include a calculation here, eg polar coordinates to cartesian
return tf.convert_to_tensor(self.positions)
def get_positions_ref(self) -> TensorRef:
"""
Get a tensor ref relative to the positions of the vertexes. See get_positions().
:return:
"""
def fun() -> tf.Tensor:
return self.get_positions()
return fun
def get_edges_ctrl_points(self) -> tf.Tensor:
"""
Get the control points of the edges, the shape of the tensor is [edge_count, ctrl_point_count, dim]
:return:
"""
# edge_extreme_points, shape: [edge_count, 2, dim]
edge_extreme_points = tf.gather(self.positions, indices=self.edge_extreme_indexes)
# Compose a tensor with edge's end point positions and internal control points
return tf.concat([edge_extreme_points[:, 0:1, :], # start points
self.edge_internal_ctrl_points, # internal points
edge_extreme_points[:, 1:2, :]], axis=1) # end points
def get_edges_ctrl_points_ref(self) -> TensorRef:
def fun() -> tf.Tensor:
return self.get_edges_ctrl_points()
return fun
class Vertexes(SingleVarContrib):
"""
Entity representing the vertexes of a graph.
"""
def __init__(self, graph: GraphRepr, *, trainable: bool = True, name=None):
super().__init__(name=name)
self.graph = graph
self.trainable = bool(trainable)
self.add_to_active_model()
def get_trainable_variables(self) -> Optional[Sequence[tf.Tensor]]:
return [self.graph.positions] if self.trainable else []
def get_value(self) -> tf.Tensor:
return tf.convert_to_tensor(self.graph.positions)
@export
def vertexes(graph: GraphRepr, *, trainable: bool = True, name=None) -> TensorRef:
"""
Create an entity that represents the vertexes of a graph.
:param graph: The graph object.
:param trainable: If true the variable relative to the positions of the vertexes are marked as trainable.
:param name: The name of the entity.
:return: The entity object.
"""
return Vertexes(graph=graph, trainable=trainable, name=name).get_value_ref()
class PiecewiseLinearEdges(SingleVarContrib):
def __init__(self, graph: GraphRepr, *, trainable: bool = True, steps: int = 25, space_radius: float = 10.,
name=None):
super().__init__(name=name)
trainable = bool(trainable)
self.graph = graph
self.trainable = trainable
self.steps = steps
self.space_radius = space_radius
self.last_pts = graph.empty_positions()
self.add_to_active_model()
def get_trainable_variables(self) -> Optional[Sequence[tf.Tensor]]:
return [self.graph.edge_internal_ctrl_points] if self.trainable else []
@staticmethod
@tf.function
def _bezier(ctrl_points, t):
return bezier_multi(ctrl_points=ctrl_points, t=t)
def get_value(self) -> tf.Tensor:
t = tf.linspace(0., 1., self.steps) # TODO selector between stochastic and regular
# Note points too close can create problems to the gradient...
# t = random_parametric_steps(self.steps)
# TODO use get_edges_ctrl_points_ref() and activate tf.function
# return bezier_multi(ctrl_points=self.graph.get_edges_ctrl_points(), t=t)
return self._bezier(ctrl_points=self.graph.get_edges_ctrl_points(), t=t)
@export
def piecewise_linear_edges(graph: GraphRepr, *, trainable: bool = True, steps: int = 25, space_radius: float = 10.,
name=None) -> TensorRef:
obj = PiecewiseLinearEdges(graph, trainable=trainable, steps=steps, space_radius=space_radius, name=name)
return obj.get_value_ref()
@export
def unit_sphere_bounds_loss(points_tensor_ref: TensorRef, *, factor: float = 1.0):
"""
Get a loss that penalises points laying outside the unit hyper-sphere centred in the origin.
:param points_tensor_ref: A tensor ref containing the coordinates of the points.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
def fun(points: tf.Tensor):
if factor <= 0.:
return tf.convert_to_tensor(0., dtype=TF_FLOAT)
# TODO assert rank, support shape [..., dim]
return factor * tf.reduce_sum(tf.nn.relu(tf.reduce_sum(tf.square(points), axis=-1) - 1.0))
return lambda_(fun, points_tensor_ref)
class VertexDistancesLoss(SingleVarContrib): # TODO remove, create simple func with lambda
def __init__(self, points_tensor_ref: TensorRef, *, factor: float = 1.0, spread: float = 1.0):
super().__init__()
self.points_tensor_ref = points_tensor_ref
self.factor = factor
self.spread = spread
self.add_to_active_model()
def get_value(self):
if self.factor <= 0.:
return 0.
model = get_active_model()
assert model is not None
pts = expand_tensor_ref(self.points_tensor_ref) # TODO assert rank
# TODO set tf.function here!
# Scale to layer coordinates size
positions = tf.add(pts, 1.) / 2. * tf.convert_to_tensor(model.shape, dtype=TF_FLOAT)
distances = mutual_sq_distances(positions)
distances = tf.reduce_sum(gauss_rbf(sq_d=distances, spread=self.spread))
return distances * self.factor
@export
def mse_loss(value_ref: TensorRef, *, factor: float = 1.0) -> TensorRef:
"""
Get a MSE Loss.
:param value_ref: The input tensor.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
def fun(value: tf.Tensor):
if factor <= 0.:
return tf.convert_to_tensor(0., dtype=TF_FLOAT)
value = tf.reshape(value, shape=(-1, ))
# TODO Optional param mask, basically it should mask the values that are part of the mean.
# return tf.reduce_mean(tf.square(value)) * factor
return tf.reduce_mean(tf.square(value * tf.sqrt(factor)))
return lambda_(fun, value_ref)
@export
def sse_loss(value_ref: TensorRef, *, factor: float = 1.0) -> TensorRef:
"""
Get a sum of squares loss.
:param value_ref: The input tensor.
:param factor: A multiplicative factor for the loss.
:return: An entity implementing the loss.
"""
@tf.function
def fun(value: tf.Tensor):
if factor <= 0.:
return tf.convert_to_tensor(0., dtype=TF_FLOAT)
value = tf.reshape(value, shape=(-1, ))
return tf.reduce_sum(tf.square(value)) * factor
return lambda_(fun, value_ref)
class RBFNetRaster(SingleVarContrib):
"""
An entity that creates a raster using a radial basis function network.
"""
def __init__(self, points_tensor_ref: TensorRef, *, shape: StaticShape,
rbf: str = RBF_DEFAULT, peak: float = 1.0, spread: float = 1.0, name=None):
"""
Init the entity.
:param points_tensor_ref: A tensor ref referencing a tensor with expected shape: [point_count, dim].
The points are used as centres of the radial basis functions.
:param shape: The shape of the raster.
:param rbf: The rbf to be used, see degraph.math.rbf_factory.
:param peak: The peak of the RBF.
:param spread: The spread of the RBF.
:param name: The name of the entity.
"""
super().__init__(name=name)
self.points_tensor_ref = points_tensor_ref
self.shape = shape
self.rbf = rbf
self.peak = peak
self.spread = spread
self._rbf_net = None
self.add_to_active_model()
def update_rbf_net(self, force: bool = True):
if not force and self._rbf_net is not None:
return
peak = tf.convert_to_tensor(self.peak, dtype=TF_FLOAT)
assert tf.rank(peak) == 0
spread = tf.convert_to_tensor(self.spread, dtype=TF_FLOAT)
assert tf.rank(spread) == 0
self._rbf_net = RBFNet(rbf=rbf_factory(self.rbf, peak=peak, spread=spread))
def get_raster(self) -> tf.Tensor:
centres = expand_tensor_ref(self.points_tensor_ref) # TODO assert rank
shape = self.shape
assert len(shape) == centres.shape[-1] # Assert centres and grid have the same dimension
# TODO assert centres.shape compatibility
# Note centres coordinates are assumed in the interval [-1., 1.]
centres = (centres + 1.)/2. * tf.convert_to_tensor(shape, dtype=TF_FLOAT) # Scale to layer coordinates size
self.update_rbf_net(force=False)
grid = self._rbf_net(shape=shape, centres=centres)
return grid
def get_value(self) -> tf.Tensor:
return self.get_raster()
@export
def rbf_net_raster(points_tensor_ref: TensorRef, *, shape: StaticShape, rbf: str = RBF_DEFAULT,
peak: float = 1.0, spread: float = 1.0):
obj = RBFNetRaster(points_tensor_ref, rbf=rbf, shape=shape, peak=peak, spread=spread)
return obj.get_value_ref()
class RBFSegNetRaster(SingleVarContrib):
def __init__(self, segments_tensor_ref: TensorRef, *, shape: StaticShape,
rbf: str = RBF_DEFAULT, peak: float = 1.0, spread: float = 1.0, name=None):
super().__init__(name=name)
self.segments_tensor_ref = segments_tensor_ref
self.shape = shape
self.rbf = rbf
self.peak = peak
self.subsample = 1.0
self.spread = spread
self._rbf_net = None
self.add_to_active_model()
def update_rbf_net(self, force: bool = True):
if not force and self._rbf_net is not None:
return
peak = tf.convert_to_tensor(self.peak, dtype=TF_FLOAT)
assert tf.rank(peak) == 0
spread = tf.convert_to_tensor(self.spread, dtype=TF_FLOAT)
assert tf.rank(spread) == 0
self._rbf_net = RBFNet(rbf=rbf_factory(self.rbf, peak=peak, spread=spread))
def get_raster(self) -> tf.Tensor:
segments = expand_tensor_ref(self.segments_tensor_ref) # TODO assert rank
shape = self.shape
assert len(shape) == segments.shape[-1] # Assert centres and grid have the same dimension
# Note centres coordinates are assumed in the interval [-1., 1.]
segments = (segments + 1.)/2. * tf.convert_to_tensor(shape, dtype=TF_FLOAT) # Scale to layer coordinates size
self.update_rbf_net(force=False)
grid_coords = self._rbf_net.create_grid(shape=shape)
subsample = self.subsample
if subsample != 1.0:
with stop_gradient_tape():
segments = tf.random.shuffle(segments)[:int(subsample*len(segments))]
# TODO tf.function
raster = tf.zeros(shape=shape, dtype=TF_FLOAT)
for segment in segments:
layer = piecewise_linear_curve_closest_pts(curve_pts=segment, centers=grid_coords)
layer = self._rbf_net.rbf(layer)
layer = tf.reduce_sum(layer, axis=-1)
raster += tf.reshape(layer, shape=shape)
return raster
def get_value(self) -> tf.Tensor:
return self.get_raster()
@export
def rbf_segnet_raster(points_tensor_ref: TensorRef, *, shape: StaticShape, peak: float = 1.0, spread: float = 1.0):
obj = RBFSegNetRaster(points_tensor_ref, shape=shape, peak=peak, spread=spread)
return obj.get_value_ref()
def _scope_prepare(scope: str) -> Tuple[object, str]:
"""
Parse a scope string a return a tuple consisting of context manager for the assignation of the tf's scope
and a string representing the summary name. The scope is of the form "<ident1>.<ident2>. ... .<ident3>", the
righmost identifier is used as summary name whereas the prefix is used as scope name.
:param scope: A string containing a qualified name.
:return:
"""
splits = scope.rsplit('.', 1)
if any(map(lambda v: len(v) == 0, splits)):
raise ValueError(f'Invalid scope name: {scope}')
if len(splits) == 1:
return nullcontext(), splits[0]
return tf.name_scope(splits[0]), splits[1]
SummaryFunction = Callable[[tf.Tensor, str], None]
class SummaryBase(Entity):
"""
A base template for summary entities.
"""
def __init__(self, var: TensorRef, fun: SummaryFunction, *, scope: str, name=None):
"""
Init summary entity.
:param var: The variable to the summarised.
:param fun: A callable of the form fun(tensor, name) that invokes the low level Tensorflow functions.
:param scope:
:param name: The name of this entity, note that the name of the summary is taken from parameter scope.
"""
super().__init__(name=name)
self.var = var
self.fun = fun
self.scope = scope
self.add_to_active_model()
def run(self):
model = get_active_model()
if model is None:
return
value = expand_tensor_ref(self.var)
scope_ctx, name = _scope_prepare(self.scope)
with stop_gradient_tape():
with scope_ctx:
self.fun(value, name)
if tf.rank(value) == 0:
model.update_history_rec({self.scope: float(value)})
@export
def summary_histogram(var: TensorRef, *, scope: str, name=None):
return SummaryBase(var, fun=lambda value, name_: tf.summary.histogram(name=name_, data=value),
scope=scope, name=name)
@export
def summary_scalar(var: TensorRef, *, scope: str, name=None):
return SummaryBase(var, fun=lambda value, name_: tf.summary.scalar(name=name_, data=value),
scope=scope, name=name)
@export
def summary_image(var: TensorRef, *, scope: str, name=None, **kwargs):
"""
Create an image summary entity. This function wraps tf.summary.image.
:param var: The tensor to be interpreted as image.
:param scope:
:param name: The name of the entity representing this operation, note that the identifier of the summary in
Tensorboard is determined by the parameter scope.
:param kwargs: Additional parameters to be passed to tf.summary.image.
:return:
"""
def fun(value, name_):
# Note the tape is not recording here (see SummaryBase.run)
value = apply_colormap(value)
if len(value.shape) == 3:
value = tf.expand_dims(value, axis=0)
assert len(value.shape) == 4
tf.summary.image(name=name_, data=value, **kwargs),
return SummaryBase(var, fun=fun, scope=scope, name=name)
| 12,304 | 714 | 1,022 |
8287ee5acde6360cef61dda364580736932901e4 | 3,397 | py | Python | tests/test_driver.py | highlight-slm/Draytek-Web-Auto-Configuration | 0b3e96b892fb332a1252fc231b30561b2374071f | [
"MIT"
] | null | null | null | tests/test_driver.py | highlight-slm/Draytek-Web-Auto-Configuration | 0b3e96b892fb332a1252fc231b30561b2374071f | [
"MIT"
] | 2 | 2020-02-13T07:41:55.000Z | 2020-03-02T21:56:12.000Z | tests/test_driver.py | highlight-slm/Draytek-Web-Auto-Configuration | 0b3e96b892fb332a1252fc231b30561b2374071f | [
"MIT"
] | 2 | 2019-07-05T15:09:07.000Z | 2021-09-06T23:50:33.000Z | # import unittest
# from unittest.mock import patch
# from selenium.common.exceptions import WebDriverException
# from draytekwebadmin.driver import load_driver, unload_driver
# class TestDriver(unittest.TestCase):
# def setUp(self):
# pass
# def tearDown(self):
# pass
# @patch("selenium.webdriver.FirefoxOptions", autospec=True)
# @patch("selenium.webdriver.Firefox", autospec=True)
# def test_Load_Driver_Named_Firefox(self, mock_firefox, mock_FirefoxOptions):
# load_driver(browser_name="firefox")
# self.assertTrue(mock_firefox.called)
# self.assertTrue(mock_FirefoxOptions.called)
# self.assertTrue(mock_firefox().maximize_window.called)
# self.assertFalse(mock_FirefoxOptions().headless)
# @patch("selenium.webdriver.FirefoxOptions", autospec=True)
# @patch("selenium.webdriver.Firefox", autospec=True)
# def test_Load_Driver_Named_Firefox_Headless(
# self, mock_firefox, mock_FirefoxOptions
# ):
# load_driver(browser_name="firefox", headless=True)
# self.assertTrue(mock_firefox.called)
# self.assertTrue(mock_FirefoxOptions.called)
# self.assertTrue(mock_FirefoxOptions().headless)
# @patch("selenium.webdriver.ChromeOptions", autospec=True)
# @patch("selenium.webdriver.Chrome", autospec=True)
# def test_Load_Driver_Named_Chrome(self, mock_chrome, mock_ChromeOptions):
# load_driver(browser_name="chrome")
# self.assertTrue(mock_chrome.called)
# self.assertTrue(mock_ChromeOptions.called)
# self.assertFalse(mock_ChromeOptions().headless)
# @patch("selenium.webdriver.ChromeOptions", autospec=True)
# @patch("selenium.webdriver.Chrome", autospec=True)
# def test_Load_Driver_Named_Chrome_Headless(self, mock_chrome, mock_ChromeOptions):
# load_driver(browser_name="chrome", headless=True)
# self.assertTrue(mock_chrome.called)
# self.assertTrue(mock_ChromeOptions.called)
# self.assertTrue(mock_ChromeOptions().headless)
# @patch("selenium.webdriver.Edge", autospec=True)
# def test_Load_Driver_Named_Edge(self, mock_edge):
# load_driver(browser_name="edge")
# self.assertTrue(mock_edge.called)
# @patch("draytekwebadmin.driver.load_firefox", autospec=True)
# def test_Load_Driver_Unspecified_Browser(self, mock_load_firefox):
# load_driver()
# self.assertTrue(mock_load_firefox.called)
# def test_Load_Driver_Unsupported_Browser(self):
# with self.assertRaises(Exception):
# load_driver(browser_name="NonExistent Browser")
# @patch("draytekwebadmin.driver.load_firefox", side_effect=WebDriverException())
# @patch("draytekwebadmin.driver.load_chrome", side_effect=WebDriverException())
# @patch("draytekwebadmin.driver.load_edge", side_effect=WebDriverException())
# def test_Load_Driver_FallThrough(self, mock_edge, mock_chrome, mock_firefox):
# with self.assertRaises(Exception) as cm:
# load_driver()
# self.assertEqual(
# "Unable to find suitable browser. Error Message: None",
# str(cm.exception).rstrip(),
# )
# @patch("selenium.webdriver.firefox")
# def test_Unload_Driver(self, mock_firefox):
# unload_driver(mock_firefox())
# self.assertTrue(mock_firefox().quit.called)
| 43 | 88 | 0.707683 | # import unittest
# from unittest.mock import patch
# from selenium.common.exceptions import WebDriverException
# from draytekwebadmin.driver import load_driver, unload_driver
# class TestDriver(unittest.TestCase):
# def setUp(self):
# pass
# def tearDown(self):
# pass
# @patch("selenium.webdriver.FirefoxOptions", autospec=True)
# @patch("selenium.webdriver.Firefox", autospec=True)
# def test_Load_Driver_Named_Firefox(self, mock_firefox, mock_FirefoxOptions):
# load_driver(browser_name="firefox")
# self.assertTrue(mock_firefox.called)
# self.assertTrue(mock_FirefoxOptions.called)
# self.assertTrue(mock_firefox().maximize_window.called)
# self.assertFalse(mock_FirefoxOptions().headless)
# @patch("selenium.webdriver.FirefoxOptions", autospec=True)
# @patch("selenium.webdriver.Firefox", autospec=True)
# def test_Load_Driver_Named_Firefox_Headless(
# self, mock_firefox, mock_FirefoxOptions
# ):
# load_driver(browser_name="firefox", headless=True)
# self.assertTrue(mock_firefox.called)
# self.assertTrue(mock_FirefoxOptions.called)
# self.assertTrue(mock_FirefoxOptions().headless)
# @patch("selenium.webdriver.ChromeOptions", autospec=True)
# @patch("selenium.webdriver.Chrome", autospec=True)
# def test_Load_Driver_Named_Chrome(self, mock_chrome, mock_ChromeOptions):
# load_driver(browser_name="chrome")
# self.assertTrue(mock_chrome.called)
# self.assertTrue(mock_ChromeOptions.called)
# self.assertFalse(mock_ChromeOptions().headless)
# @patch("selenium.webdriver.ChromeOptions", autospec=True)
# @patch("selenium.webdriver.Chrome", autospec=True)
# def test_Load_Driver_Named_Chrome_Headless(self, mock_chrome, mock_ChromeOptions):
# load_driver(browser_name="chrome", headless=True)
# self.assertTrue(mock_chrome.called)
# self.assertTrue(mock_ChromeOptions.called)
# self.assertTrue(mock_ChromeOptions().headless)
# @patch("selenium.webdriver.Edge", autospec=True)
# def test_Load_Driver_Named_Edge(self, mock_edge):
# load_driver(browser_name="edge")
# self.assertTrue(mock_edge.called)
# @patch("draytekwebadmin.driver.load_firefox", autospec=True)
# def test_Load_Driver_Unspecified_Browser(self, mock_load_firefox):
# load_driver()
# self.assertTrue(mock_load_firefox.called)
# def test_Load_Driver_Unsupported_Browser(self):
# with self.assertRaises(Exception):
# load_driver(browser_name="NonExistent Browser")
# @patch("draytekwebadmin.driver.load_firefox", side_effect=WebDriverException())
# @patch("draytekwebadmin.driver.load_chrome", side_effect=WebDriverException())
# @patch("draytekwebadmin.driver.load_edge", side_effect=WebDriverException())
# def test_Load_Driver_FallThrough(self, mock_edge, mock_chrome, mock_firefox):
# with self.assertRaises(Exception) as cm:
# load_driver()
# self.assertEqual(
# "Unable to find suitable browser. Error Message: None",
# str(cm.exception).rstrip(),
# )
# @patch("selenium.webdriver.firefox")
# def test_Unload_Driver(self, mock_firefox):
# unload_driver(mock_firefox())
# self.assertTrue(mock_firefox().quit.called)
| 0 | 0 | 0 |
fa662ee8b73ba35f56d3832035f14a2107cb7243 | 632 | py | Python | tests/unit/test_cli.py | gtmanfred/teststack | c7f671b45b81a036abcb21df6f1ef26c8a138e93 | [
"Apache-2.0"
] | 1 | 2021-11-09T18:44:40.000Z | 2021-11-09T18:44:40.000Z | tests/unit/test_cli.py | gtmanfred/teststack | c7f671b45b81a036abcb21df6f1ef26c8a138e93 | [
"Apache-2.0"
] | 2 | 2021-11-11T17:43:42.000Z | 2022-03-08T19:26:31.000Z | tests/unit/test_cli.py | gtmanfred/teststack | c7f671b45b81a036abcb21df6f1ef26c8a138e93 | [
"Apache-2.0"
] | null | null | null | import sys
import toml
import pytest
from teststack import cli
from teststack import import_commands
from teststack.errors import IncompatibleVersionError
| 26.333333 | 70 | 0.707278 | import sys
import toml
import pytest
from teststack import cli
from teststack import import_commands
from teststack.errors import IncompatibleVersionError
def test_import_commands():
import_commands()
assert "teststack.commands.containers" in sys.modules
assert "teststack.commands.environment" in sys.modules
def test_min_version(runner):
with runner.isolated_filesystem() as th_:
with open(f'{th_}/teststack.toml', 'w') as fh_:
toml.dump({'tests': {'min_version': 'v999.999.999'}}, fh_)
result = runner.invoke(cli, [f'--path={th_}', 'env'])
assert result.exit_code == 10
| 426 | 0 | 46 |
48eaecb36e577e897f08a2f2794bd1322961e4b1 | 3,689 | py | Python | Models/Segmentation_Models/Unet2d_Vertebrae/utils/save_fig.py | BIMCV-CSUSP/BIMCV-MIDAS-PROJECT | ef7311873fc29416c7caa8babdf341c058404ec4 | [
"CC-BY-4.0"
] | null | null | null | Models/Segmentation_Models/Unet2d_Vertebrae/utils/save_fig.py | BIMCV-CSUSP/BIMCV-MIDAS-PROJECT | ef7311873fc29416c7caa8babdf341c058404ec4 | [
"CC-BY-4.0"
] | null | null | null | Models/Segmentation_Models/Unet2d_Vertebrae/utils/save_fig.py | BIMCV-CSUSP/BIMCV-MIDAS-PROJECT | ef7311873fc29416c7caa8babdf341c058404ec4 | [
"CC-BY-4.0"
] | 2 | 2021-07-14T09:59:16.000Z | 2021-07-14T10:01:29.000Z | #! /usr/bin/python
# -*- coding: utf8 -*-
from __future__ import print_function
# Description:
"""
plot or save MRI images / masks in a screenshot
-------
Types of sequences in the data = ['Sag_T2' 'Sag_T1' 'Sag_Stir']
"""
# Imports
import os, time
import glob
import random
import pandas as pd
import numpy as np
import numpy.ma as ma
import matplotlib
import math
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_screenshot(images, mask = True, cols=4, smooth=True, ch=1, path_name = './', label_name='screenshot', save = True):
"""
plot or save images / masks in a screenshot
Parameters
----------
images = images or mask, np.ndarray image data [slice][width][height][channel]
mask = if True, returns mask's screenshot
smooth = if True, interpolation = 'spline16', if False = interpolation = 'nearest'
path_name = save path, str
label_name = filename, str
save = if True, save images in path_ + label_name + .jpg
Returns
-------
plot or save images / masks in a screenshot
"""
n = np.shape(images)[0]
rows = np.rint(n/np.float(cols)).astype(int)
if (rows*cols)>n:
cover = np.zeros((np.shape(images)[1],np.shape(images)[2]))
for i in range(rows*cols-n):
images=np.dstack((np.array(images), np.array(cover)))
# Create figure with sub-plots.
fig, axes = plt.subplots(rows, cols, figsize=(12,10))
# Adjust vertical spacing if we need to print ensemble and best-net.
hspace = 0.03
fig.subplots_adjust(hspace=hspace, wspace=0.03)
for i, ax in enumerate(axes.flat):
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
if mask :
# Plot mask.
ax.imshow(rotate((images[i,...,ch]/np.max(images[i,...,ch])),90).squeeze(), cmap='jet',
interpolation=interpolation)
xlabel = "Mask: {0}".format(i+1)
else:
# Plot image.
ax.imshow(rotate(images[i,...,ch],90).squeeze(), cmap='bone',
interpolation=interpolation)
xlabel = "MRI_Scan: {0}".format(i+1)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
if save:
fig.savefig(path_name+label_name+'.jpg', format='jpg', bbox_inches='tight')#, dpi=600
plt.close(fig)
else: plt.show()
| 32.9375 | 124 | 0.596639 | #! /usr/bin/python
# -*- coding: utf8 -*-
from __future__ import print_function
# Description:
"""
plot or save MRI images / masks in a screenshot
-------
Types of sequences in the data = ['Sag_T2' 'Sag_T1' 'Sag_Stir']
"""
# Imports
import os, time
import glob
import random
import pandas as pd
import numpy as np
import numpy.ma as ma
import matplotlib
import math
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def save_learning_curve(results,path, save=True):
fig, ax = plt.subplots(1,2, figsize=(16, 8)) # figsize=(8, 8)
ax1, ax2 = ax.ravel()
ax1.set_title("Learning curve\n Training and Validation - loss")
ax1.plot(results.history["loss"][2:], label="loss")
ax1.plot(results.history["val_loss"][2:], label="val_loss")
ax1.plot( np.argmin(results.history["val_loss"][2:]),
np.min(results.history["val_loss"][2:]),
marker="x", color="r", label="Best loss")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("loss")
ax1.legend();
ax2.set_title("Learning curve\n Training and Validation - m-IoU")
ax2.plot(results.history["mean_iou"][2:], label="m-IoU")
ax2.plot(results.history["val_mean_iou"][2:], label="val_m-IoU")
ax2.plot( np.argmax(results.history["val_mean_iou"][2:]),
np.max(results.history["val_mean_iou"][2:]),
marker="x", color="r", label="Best m-IoU")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("m-IoU")
ax2.legend();
if save:
plt.savefig(path+"Learning_curve.png", bbox_inches='tight', format='png')#, dpi=600
plt.close(fig)
else: plt.show()
def plot_screenshot(images, mask = True, cols=4, smooth=True, ch=1, path_name = './', label_name='screenshot', save = True):
"""
plot or save images / masks in a screenshot
Parameters
----------
images = images or mask, np.ndarray image data [slice][width][height][channel]
mask = if True, returns mask's screenshot
smooth = if True, interpolation = 'spline16', if False = interpolation = 'nearest'
path_name = save path, str
label_name = filename, str
save = if True, save images in path_ + label_name + .jpg
Returns
-------
plot or save images / masks in a screenshot
"""
n = np.shape(images)[0]
rows = np.rint(n/np.float(cols)).astype(int)
if (rows*cols)>n:
cover = np.zeros((np.shape(images)[1],np.shape(images)[2]))
for i in range(rows*cols-n):
images=np.dstack((np.array(images), np.array(cover)))
# Create figure with sub-plots.
fig, axes = plt.subplots(rows, cols, figsize=(12,10))
# Adjust vertical spacing if we need to print ensemble and best-net.
hspace = 0.03
fig.subplots_adjust(hspace=hspace, wspace=0.03)
for i, ax in enumerate(axes.flat):
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
if mask :
# Plot mask.
ax.imshow(rotate((images[i,...,ch]/np.max(images[i,...,ch])),90).squeeze(), cmap='jet',
interpolation=interpolation)
xlabel = "Mask: {0}".format(i+1)
else:
# Plot image.
ax.imshow(rotate(images[i,...,ch],90).squeeze(), cmap='bone',
interpolation=interpolation)
xlabel = "MRI_Scan: {0}".format(i+1)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
if save:
fig.savefig(path_name+label_name+'.jpg', format='jpg', bbox_inches='tight')#, dpi=600
plt.close(fig)
else: plt.show()
| 1,160 | 0 | 23 |
43402eb70cf833f3e370cecf3e2c8e835252e1a4 | 103 | py | Python | src/hpp/corbaserver/romeo/__init__.py | florent-lamiraux/hpp_romeo | 71ed7143e3ffe1eaeeeedb49b81f147fa2e18027 | [
"BSD-2-Clause"
] | null | null | null | src/hpp/corbaserver/romeo/__init__.py | florent-lamiraux/hpp_romeo | 71ed7143e3ffe1eaeeeedb49b81f147fa2e18027 | [
"BSD-2-Clause"
] | null | null | null | src/hpp/corbaserver/romeo/__init__.py | florent-lamiraux/hpp_romeo | 71ed7143e3ffe1eaeeeedb49b81f147fa2e18027 | [
"BSD-2-Clause"
] | 2 | 2018-12-19T15:19:16.000Z | 2019-12-06T16:25:11.000Z | #!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Florent Lamiraux
#
from .robot import Robot
| 14.714286 | 26 | 0.718447 | #!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Florent Lamiraux
#
from .robot import Robot
| 0 | 0 | 0 |
01fec71a21a3dc4ae1e01434284b6e698cc857a1 | 912 | py | Python | narcissistic.py | GGSimmons1992/CodewarsPython | 795e1e38cc9043f2228c7d5a4bf94c8446ae2727 | [
"MIT"
] | 1 | 2019-03-01T00:15:36.000Z | 2019-03-01T00:15:36.000Z | narcissistic.py | GGSimmons1992/CodewarsPython | 795e1e38cc9043f2228c7d5a4bf94c8446ae2727 | [
"MIT"
] | null | null | null | narcissistic.py | GGSimmons1992/CodewarsPython | 795e1e38cc9043f2228c7d5a4bf94c8446ae2727 | [
"MIT"
] | null | null | null | """
Gary Simmons
March 2018
Kata Prompt Description: A Narcissistic Number is a number which is the sum of its own digits, each raised to the power of the number of digits in a given base. In this Kata, we will restrict ourselves to decimal (base 10).
For example, take 153 (3 digits):
1^3 + 5^3 + 3^3 = 1 + 125 + 27 = 153
and 1634 (4 digits):
1^4 + 6^4 + 3^4 + 4^4 = 1 + 1296 + 81 + 256 = 1634
The Challenge:
Your code must return true or false depending upon whether the given number is a Narcissistic number in base 10.
Error checking for text strings or other invalid inputs is not required, only valid integers will be passed into the function.
"""
| 31.448276 | 223 | 0.701754 | """
Gary Simmons
March 2018
Kata Prompt Description: A Narcissistic Number is a number which is the sum of its own digits, each raised to the power of the number of digits in a given base. In this Kata, we will restrict ourselves to decimal (base 10).
For example, take 153 (3 digits):
1^3 + 5^3 + 3^3 = 1 + 125 + 27 = 153
and 1634 (4 digits):
1^4 + 6^4 + 3^4 + 4^4 = 1 + 1296 + 81 + 256 = 1634
The Challenge:
Your code must return true or false depending upon whether the given number is a Narcissistic number in base 10.
Error checking for text strings or other invalid inputs is not required, only valid integers will be passed into the function.
"""
def narcissistic( value ):
stringNum=str(value)
brokenNum=list(stringNum)
exponent=len(brokenNum)
narcValue=0
for x in brokenNum:
narcValue=narcValue+(int(x))**exponent
return narcValue==value
#// Code away
| 221 | 0 | 22 |
ff73cfec6bbcc04feed5f59b9ae989efec03fae8 | 1,552 | py | Python | core/utils/Worker.py | xingfeT/mirageMan | 7443bce072817adc843c5e34468f1dab32cb48ae | [
"Apache-2.0"
] | null | null | null | core/utils/Worker.py | xingfeT/mirageMan | 7443bce072817adc843c5e34468f1dab32cb48ae | [
"Apache-2.0"
] | null | null | null | core/utils/Worker.py | xingfeT/mirageMan | 7443bce072817adc843c5e34468f1dab32cb48ae | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import logging
from daemonize import Daemonize
from rq import Connection, Worker
from core.utils.Executor import check_output
__workers__ = [
'alpha',
'beta',
'gamma',
'delta',
'epsilon',
'zeta',
'eta',
'theta',
'iota',
'kappa',
'lambda',
'mu',
'nu',
'xi',
'omicron',
'pi',
'rho',
'sigma',
'tau',
'upsilon',
'phi',
'chi',
'psi',
'omega'
]
def get_available_rq_worker_name() -> str:
"""
Assign a worker name which is not already used
:return: Name of the worker
"""
out = check_output(
cmd='rq info',
cwd='/'
)
for each in __workers__:
if each not in out:
return each
# TODO: Raise exception for worker limit
def launch_rq_worker() -> None:
"""
Blocking function to launch a worker using Python RQ's internal API
"""
with Connection():
w = Worker(
get_available_rq_worker_name()
)
w.work()
| 18.046512 | 71 | 0.567655 | #!/usr/bin/env python
import logging
from daemonize import Daemonize
from rq import Connection, Worker
from core.utils.Executor import check_output
__workers__ = [
'alpha',
'beta',
'gamma',
'delta',
'epsilon',
'zeta',
'eta',
'theta',
'iota',
'kappa',
'lambda',
'mu',
'nu',
'xi',
'omicron',
'pi',
'rho',
'sigma',
'tau',
'upsilon',
'phi',
'chi',
'psi',
'omega'
]
def get_available_rq_worker_name() -> str:
"""
Assign a worker name which is not already used
:return: Name of the worker
"""
out = check_output(
cmd='rq info',
cwd='/'
)
for each in __workers__:
if each not in out:
return each
# TODO: Raise exception for worker limit
def launch_rq_worker() -> None:
"""
Blocking function to launch a worker using Python RQ's internal API
"""
with Connection():
w = Worker(
get_available_rq_worker_name()
)
w.work()
def launch_rq_daemon():
worker = get_available_rq_worker_name()
pid = "/tmp/%s.pid" % worker
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.FileHandler("/tmp/%s.log" % worker, "w")
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
keep_fds = [fh.stream.fileno()]
daemon = Daemonize(
app="dune",
pid=pid,
action=launch_rq_worker,
keep_fds=keep_fds
)
daemon.start()
| 483 | 0 | 23 |
b6c42ddd52e84260db5a55b47c1d05b955befeb3 | 7,371 | py | Python | solution/data/processors/prep.py | taeukkkim/temp | 91c90fe5da4678424d8aacacbf15773dc624021d | [
"MIT"
] | null | null | null | solution/data/processors/prep.py | taeukkkim/temp | 91c90fe5da4678424d8aacacbf15773dc624021d | [
"MIT"
] | null | null | null | solution/data/processors/prep.py | taeukkkim/temp | 91c90fe5da4678424d8aacacbf15773dc624021d | [
"MIT"
] | null | null | null | import re
import numpy as np
from solution.utils.constant import (
QUESTION_COLUMN_NAME,
CONTEXT_COLUMN_NAME,
ANSWER_COLUMN_NAME,
)
def get_extractive_features(tokenizer, mode, data_args):
""" Get extractive features for train, eval and test.
Args:
tokenizer (BERT Tokenizer): tokenizer for preprocessing
mode (str): [description] : one of train, eval, test
data_args (DataArguments): data arguments
"""
def tokenize_fn(examples):
"""Tokenize questions and contexts
Args:
examples (Dict): DatasetDict
Returns:
Dict: Tokenized examples
"""
pad_on_right = tokenizer.padding_side == "right"
max_seq_length = min(data_args.max_seq_length,
tokenizer.model_max_length)
# truncation과 padding을 통해 tokenization을 진행
# stride를 이용하여 overflow를 유지
# 각 example들은 이전의 context와 조금씩 겹침
# overflow 발생 시 지정한 batch size보다 더 많은 sample이 들어올 수 있음 -> data augmentation
tokenized_examples = tokenizer(
examples[QUESTION_COLUMN_NAME if pad_on_right else CONTEXT_COLUMN_NAME],
examples[CONTEXT_COLUMN_NAME if pad_on_right else QUESTION_COLUMN_NAME],
# 길이가 긴 context가 등장할 경우 truncation을 진행
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
# overflow 발생 시 원래 인덱스를 찾을 수 있게 mapping 가능한 값이 필요
return_overflowing_tokens=True,
# token의 캐릭터 단위 position을 찾을 수 있는 offset을 반환
# start position과 end position을 찾는데 도움을 줌
return_offsets_mapping=True,
# sentence pair가 입력으로 들어올 때 0과 1로 구분지음
return_token_type_ids=data_args.return_token_type_ids,
padding="max_length" if data_args.pad_to_max_length else False,
# return_tensors='pt'
)
return tokenized_examples
def prepare_train_features(examples):
"""
Reset for train dataset that do not have the correct answer
or where the correct answer position has changed.
Args:
examples (Dict): DatasetDict
Returns:
Dict: Tokenized examples where the answer has been reset
"""
pad_on_right = tokenizer.padding_side == "right"
tokenized_examples = tokenize_fn(examples)
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
offset_mapping = tokenized_examples.pop("offset_mapping")
# 데이터셋에 "start position", "enc position" label을 부여합니다.
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id) # cls index
# sequence id를 설정합니다 (context와 question을 구분).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# 길이가 긴 context에 대해 truncation을 진행하기 때문에
# 하나의 example이 여러 개의 span을 가질 수 있음
sample_index = sample_mapping[i]
answers = examples[ANSWER_COLUMN_NAME][sample_index]
# answer가 없을 경우 cls_index를 answer로 설정
# example에서 정답이 없는 경우가 있을 수 있음
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# text에서 정답의 start/end character index를 가져옴
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# sequence_ids는 0, 1, None의 세 값만 가짐
# None 0 0 ... 0 None 1 1 ... 1 None
# text에서 context가 시작하는 위치로 이동
token_start_index = 0
while sequence_ids[token_start_index] != context_index:
token_start_index += 1
# text에서 context가 끝나는 위치로 이동
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != context_index:
token_end_index -= 1
# 정답이 span을 벗어나는지 체크.
# 정답이 없는 경우 CLS index로 labeling (Retro일 경우 다르게 처리)
if not (
offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# token_start_index 및 token_end_index를 answer의 끝으로 이동
# Note: answer가 마지막 단어인 경우 last offset을 따라갈 수 있음
# token_start_index를 실제 위치로 맞춰주는 과정
while (
token_start_index < len(offsets) and
offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(
token_start_index - 1)
# token_end_index를 실제 위치로 맞춰주는 과정
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(
token_end_index + 1)
return tokenized_examples
def prepare_validation_features(examples, retriever=None):
"""Preprocessing validation dataset for extractive model
Args:
examples (Dict): DatasetDict
retriever (Dict): DatasetDict from wiki. Defaults to None.
Returns:
Dict: Tokenized examples
"""
pad_on_right = tokenizer.padding_side == "right"
tokenized_examples = tokenize_fn(examples)
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# evaluation을 위해 prediction을 context의 substring으로 변환
# corresponding example_id를 유지하고 offset mappings을 저장
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# sequence id를 설정합니다 (context와 question을 구분).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# 하나의 example이 여러 개의 span을 가질 수 있음
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(
examples["id"][sample_index])
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if mode == "train":
get_features_fn = prepare_train_features
elif mode == "eval":
get_features_fn = prepare_validation_features
elif mode == "test":
get_features_fn = prepare_validation_features
return get_features_fn, True
PREP_PIPELINE = {
"extractive": get_extractive_features,
}
| 36.671642 | 84 | 0.60114 | import re
import numpy as np
from solution.utils.constant import (
QUESTION_COLUMN_NAME,
CONTEXT_COLUMN_NAME,
ANSWER_COLUMN_NAME,
)
def get_extractive_features(tokenizer, mode, data_args):
""" Get extractive features for train, eval and test.
Args:
tokenizer (BERT Tokenizer): tokenizer for preprocessing
mode (str): [description] : one of train, eval, test
data_args (DataArguments): data arguments
"""
def tokenize_fn(examples):
"""Tokenize questions and contexts
Args:
examples (Dict): DatasetDict
Returns:
Dict: Tokenized examples
"""
pad_on_right = tokenizer.padding_side == "right"
max_seq_length = min(data_args.max_seq_length,
tokenizer.model_max_length)
# truncation과 padding을 통해 tokenization을 진행
# stride를 이용하여 overflow를 유지
# 각 example들은 이전의 context와 조금씩 겹침
# overflow 발생 시 지정한 batch size보다 더 많은 sample이 들어올 수 있음 -> data augmentation
tokenized_examples = tokenizer(
examples[QUESTION_COLUMN_NAME if pad_on_right else CONTEXT_COLUMN_NAME],
examples[CONTEXT_COLUMN_NAME if pad_on_right else QUESTION_COLUMN_NAME],
# 길이가 긴 context가 등장할 경우 truncation을 진행
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
# overflow 발생 시 원래 인덱스를 찾을 수 있게 mapping 가능한 값이 필요
return_overflowing_tokens=True,
# token의 캐릭터 단위 position을 찾을 수 있는 offset을 반환
# start position과 end position을 찾는데 도움을 줌
return_offsets_mapping=True,
# sentence pair가 입력으로 들어올 때 0과 1로 구분지음
return_token_type_ids=data_args.return_token_type_ids,
padding="max_length" if data_args.pad_to_max_length else False,
# return_tensors='pt'
)
return tokenized_examples
def prepare_train_features(examples):
"""
Reset for train dataset that do not have the correct answer
or where the correct answer position has changed.
Args:
examples (Dict): DatasetDict
Returns:
Dict: Tokenized examples where the answer has been reset
"""
pad_on_right = tokenizer.padding_side == "right"
tokenized_examples = tokenize_fn(examples)
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
offset_mapping = tokenized_examples.pop("offset_mapping")
# 데이터셋에 "start position", "enc position" label을 부여합니다.
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id) # cls index
# sequence id를 설정합니다 (context와 question을 구분).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# 길이가 긴 context에 대해 truncation을 진행하기 때문에
# 하나의 example이 여러 개의 span을 가질 수 있음
sample_index = sample_mapping[i]
answers = examples[ANSWER_COLUMN_NAME][sample_index]
# answer가 없을 경우 cls_index를 answer로 설정
# example에서 정답이 없는 경우가 있을 수 있음
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# text에서 정답의 start/end character index를 가져옴
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# sequence_ids는 0, 1, None의 세 값만 가짐
# None 0 0 ... 0 None 1 1 ... 1 None
# text에서 context가 시작하는 위치로 이동
token_start_index = 0
while sequence_ids[token_start_index] != context_index:
token_start_index += 1
# text에서 context가 끝나는 위치로 이동
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != context_index:
token_end_index -= 1
# 정답이 span을 벗어나는지 체크.
# 정답이 없는 경우 CLS index로 labeling (Retro일 경우 다르게 처리)
if not (
offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# token_start_index 및 token_end_index를 answer의 끝으로 이동
# Note: answer가 마지막 단어인 경우 last offset을 따라갈 수 있음
# token_start_index를 실제 위치로 맞춰주는 과정
while (
token_start_index < len(offsets) and
offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(
token_start_index - 1)
# token_end_index를 실제 위치로 맞춰주는 과정
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(
token_end_index + 1)
return tokenized_examples
def prepare_validation_features(examples, retriever=None):
"""Preprocessing validation dataset for extractive model
Args:
examples (Dict): DatasetDict
retriever (Dict): DatasetDict from wiki. Defaults to None.
Returns:
Dict: Tokenized examples
"""
pad_on_right = tokenizer.padding_side == "right"
tokenized_examples = tokenize_fn(examples)
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# evaluation을 위해 prediction을 context의 substring으로 변환
# corresponding example_id를 유지하고 offset mappings을 저장
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# sequence id를 설정합니다 (context와 question을 구분).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# 하나의 example이 여러 개의 span을 가질 수 있음
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(
examples["id"][sample_index])
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
def identity(examples):
return examples
if mode == "train":
get_features_fn = prepare_train_features
elif mode == "eval":
get_features_fn = prepare_validation_features
elif mode == "test":
get_features_fn = prepare_validation_features
return get_features_fn, True
PREP_PIPELINE = {
"extractive": get_extractive_features,
}
| 26 | 0 | 27 |
71b44fa14fddd3b9884847de5e274d8e96e1a0ae | 4,258 | py | Python | tests/screensize.py | alex1712/kms-core | d197c971693738df39e33e3ecf9dff40ce7fa861 | [
"Apache-2.0"
] | 287 | 2015-01-03T18:27:51.000Z | 2022-03-24T13:48:24.000Z | tests/screensize.py | alex1712/kms-core | d197c971693738df39e33e3ecf9dff40ce7fa861 | [
"Apache-2.0"
] | 22 | 2015-06-05T10:32:16.000Z | 2022-03-14T07:24:02.000Z | tests/screensize.py | alex1712/kms-core | d197c971693738df39e33e3ecf9dff40ce7fa861 | [
"Apache-2.0"
] | 217 | 2015-01-05T14:24:57.000Z | 2022-02-24T07:14:09.000Z | #!/usr/bin/python3
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
from gi.repository import GLib
from gi.repository import Gtk
if __name__ == "__main__":
main(sys.argv)
#self.pipe = Gst.parse_launch ("videotestsrc is-live=true ! capsfilter name=caps ! x264enc speed-preset=superfast ! h264parse ! decodebin ! autovideosink")
| 36.706897 | 319 | 0.716534 | #!/usr/bin/python3
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
from gi.repository import GLib
from gi.repository import Gtk
def bus_callback(bus, message, not_used):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
Gtk.main_quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
element = message.src
#Gtk.main_quit()
return True
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Screen size changes test")
hbox = Gtk.Box(spacing=10)
hbox.set_homogeneous(False)
vbox_left = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
vbox_left.set_homogeneous(False)
vbox_right = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
vbox_right.set_homogeneous(False)
self.add (hbox)
hbox.pack_start(vbox_left, True, True, 0)
hbox.pack_start(vbox_right, True, True, 0)
adjustment = Gtk.Adjustment(320, 1, 1280, 10, 100, 0)
self.width_spin = Gtk.SpinButton()
self.width_spin.set_adjustment(adjustment)
self.width_spin.set_value (self.width_spin.get_value())
vbox_right.pack_start(self.width_spin, True, True, 0)
width_label = Gtk.Label("Width")
vbox_left.pack_start(width_label, True, True, 0)
adjustment = Gtk.Adjustment(240, 1, 1280, 10, 100, 0)
self.height_spin = Gtk.SpinButton()
self.height_spin.set_adjustment(adjustment)
self.height_spin.set_value (self.height_spin.get_value())
vbox_right.pack_start(self.height_spin, True, True, 0)
height_label = Gtk.Label("Height")
vbox_left.pack_start(height_label, True, True, 0)
caps_change = Gtk.Button(label="Change Size")
caps_change.connect("clicked", self.on_caps_change_clicked)
hbox.pack_start(caps_change, True, True, 0)
adjustment = Gtk.Adjustment(1000, 100, 10000000, 100, 1000, 0)
self.bitrate_spin = Gtk.SpinButton()
self.bitrate_spin.set_adjustment(adjustment)
self.bitrate_spin.set_value (self.bitrate_spin.get_value())
hbox.pack_start(self.bitrate_spin, True, True, 0)
change_bitrate = Gtk.Button(label="Change Bitrate")
change_bitrate.connect("clicked", self.on_change_bitrate_clicked)
hbox.pack_start(change_bitrate, True, True, 0)
self.create_pipeline()
self.pipe.set_state(Gst.State.PLAYING)
def create_pipeline(self):
#self.pipe = Gst.parse_launch ("v4l2src ! clockoverlay ! capsfilter name=caps ! vp8enc end-usage=cbr resize-allowed=true name=enc target-bitrate=500000 deadline=200000 threads=1 cpu-used=16 ! vp8parse ! tee name=t ! queue ! vp8dec ! autovideosink sync=false t. ! queue ! webmmux ! filesink location=/tmp/test.webm")
self.pipe = Gst.parse_launch ("v4l2src ! videoscale ! clockoverlay ! capsfilter name=caps ! vp8enc end-usage=cbr resize-allowed=true name=enc target-bitrate=500000 deadline=200000 threads=1 cpu-used=16 ! vp8parse ! vp8dec ! autovideosink sync=false ")
bus = self.pipe.get_bus()
bus.add_watch(GLib.PRIORITY_DEFAULT, bus_callback, None)
self.capsfilter = self.pipe.get_by_name ("caps")
caps = Gst.caps_from_string ("video/x-raw,height=(int)720")
self.capsfilter.set_property("caps", caps)
self.encoder = self.pipe.get_by_name ("enc")
def on_caps_change_clicked(self, widget):
width = int(self.width_spin.get_value())
height = int (self.height_spin.get_value())
caps = Gst.caps_from_string ("video/x-raw,width=(int)" + str(width) + ",height=(int)" + str(height))
print ("setting caps to: " + str(width) + ", " + str(height))
self.capsfilter.set_property("caps", caps)
def on_change_bitrate_clicked(self, widget):
bitrate = int (self.bitrate_spin.get_value())
self.encoder.set_property ("target-bitrate", bitrate)
def main(argv):
Gst.init(argv)
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
try:
Gtk.main()
except:
pass
win.pipe.set_state(Gst.State.NULL)
if __name__ == "__main__":
main(sys.argv)
#self.pipe = Gst.parse_launch ("videotestsrc is-live=true ! capsfilter name=caps ! x264enc speed-preset=superfast ! h264parse ! decodebin ! autovideosink")
| 3,710 | 6 | 169 |
76e4bd02018aff8b31e0e277aab69e6a94b7a230 | 1,902 | py | Python | ChessDRF/server/views/account_views.py | Pythongor/Chess-DRF | d8dedd9fa6db85c00e6e3ee4c63729448a9bfba5 | [
"MIT"
] | null | null | null | ChessDRF/server/views/account_views.py | Pythongor/Chess-DRF | d8dedd9fa6db85c00e6e3ee4c63729448a9bfba5 | [
"MIT"
] | 3 | 2021-09-08T03:38:52.000Z | 2022-01-13T03:57:08.000Z | ChessDRF/server/views/account_views.py | Pythongor/Chess-DRF | d8dedd9fa6db85c00e6e3ee4c63729448a9bfba5 | [
"MIT"
] | null | null | null | from os import listdir
from random import choice
from django.conf import settings
from django.contrib.auth import login, authenticate
from django.contrib.auth.views import LoginView
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import TemplateView, DetailView
from django.views.generic.edit import FormView
from ..forms import ImageForm, ChessUserCreationForm
from ..models import ChessUser
| 30.677419 | 77 | 0.692955 | from os import listdir
from random import choice
from django.conf import settings
from django.contrib.auth import login, authenticate
from django.contrib.auth.views import LoginView
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import TemplateView, DetailView
from django.views.generic.edit import FormView
from ..forms import ImageForm, ChessUserCreationForm
from ..models import ChessUser
class EnterView(TemplateView):
template_name = 'accounts/enter.html'
class SignInView(LoginView):
template_name = 'accounts/login.html'
def get_success_url(self):
username = self.get_form_kwargs()['data'].get('username')
return 'users/' + username
class SignUpView(FormView):
template_name = 'accounts/sign_up.html'
form_class = ChessUserCreationForm
user = None
def form_valid(self, form):
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
self.user = authenticate(username=username, password=password)
figure = choice(listdir(f'{settings.STATIC_ROOT}/figures'))
self.user.photo = f'/figures/{figure}'
self.user.save()
login(self.request, self.user)
return redirect(self.get_success_url())
def form_invalid(self, form):
print(form.errors)
return super().form_invalid(form)
def get_success_url(self):
return f'users/{self.user}'
class UserView(DetailView):
model = ChessUser
template_name = 'accounts/profile.html'
def get_object(self, queryset=None):
return get_object_or_404(ChessUser, username=self.kwargs['username'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = ImageForm()
return context
| 938 | 439 | 92 |
6e34695bf587cf6d83d1b845593c9f45af7b74d9 | 1,858 | py | Python | steinerpy/abstract_algo.py | rooshm/steinerpy | 777b55fa94527365322ba5fa675c8be090333715 | [
"MIT"
] | 3 | 2021-06-10T16:46:20.000Z | 2022-02-11T14:24:15.000Z | steinerpy/abstract_algo.py | rooshm/steinerpy | 777b55fa94527365322ba5fa675c8be090333715 | [
"MIT"
] | 12 | 2021-03-31T03:31:24.000Z | 2021-11-18T21:51:18.000Z | steinerpy/abstract_algo.py | rooshm/steinerpy | 777b55fa94527365322ba5fa675c8be090333715 | [
"MIT"
] | 1 | 2021-06-13T15:01:24.000Z | 2021-06-13T15:01:24.000Z | """A module with the class `AbstractAlgorithm` defined"""
# from abc import ABC, abstractmethod
from abc import abstractmethod
from steinerpy.library.misc.abc_utils import abstract_attribute, ABC as newABC
from steinerpy.library.graphs.graph import IGraph
from typing import List
class AbstractAlgorithm(newABC):
"""An abstract barebones superclass for each algorithm implementation.
All algorithm implementations should inhereit :py:class:: AbstractAlgorithm.
Do not instantiate this directly!
Attributes:
terminals (list): A list of tuples representing terminals on a graph.
Exact format depends on the type of graph used (see below).
graph (SquareGrid, MyGraph): Graph classes from superclass IGraph.
Created using 'GraphFactory' class from the 'graph' module
S (dict): A dictionary containing information to output Steiner Tree
'sol': is a list of tree edges, e.g. ((x1,y1),(x2,y2)) if using SquareGrid graph
'dist': is a list of each tree edge's distance cost
'path': is a list of vertices of G, that make up each tree edge
'stats': {'run_time': x, closed_nodes: y, open_nodes: z}
"""
def return_solutions(self):
"""Return solution set of final tree
Returns:
S (dict): A dictionary containing information to output Steiner Tree
"""
return self.S
@abstractmethod
def run_algorithm(self):
"""Queries the algorithm and populates solution set 'S'
This is an abstract method, which must be explicitly defined
in subclasses
"""
pass | 37.918367 | 92 | 0.655005 | """A module with the class `AbstractAlgorithm` defined"""
# from abc import ABC, abstractmethod
from abc import abstractmethod
from steinerpy.library.misc.abc_utils import abstract_attribute, ABC as newABC
from steinerpy.library.graphs.graph import IGraph
from typing import List
class AbstractAlgorithm(newABC):
"""An abstract barebones superclass for each algorithm implementation.
All algorithm implementations should inhereit :py:class:: AbstractAlgorithm.
Do not instantiate this directly!
Attributes:
terminals (list): A list of tuples representing terminals on a graph.
Exact format depends on the type of graph used (see below).
graph (SquareGrid, MyGraph): Graph classes from superclass IGraph.
Created using 'GraphFactory' class from the 'graph' module
S (dict): A dictionary containing information to output Steiner Tree
'sol': is a list of tree edges, e.g. ((x1,y1),(x2,y2)) if using SquareGrid graph
'dist': is a list of each tree edge's distance cost
'path': is a list of vertices of G, that make up each tree edge
'stats': {'run_time': x, closed_nodes: y, open_nodes: z}
"""
def __init__(self, G, T):
self.terminals = T
self.graph = G
self.S = {'sol':[], 'dist':[], 'path':[], 'stats':{}}
# self.FLAG_STATUS_completeTree = False
def return_solutions(self):
"""Return solution set of final tree
Returns:
S (dict): A dictionary containing information to output Steiner Tree
"""
return self.S
@abstractmethod
def run_algorithm(self):
"""Queries the algorithm and populates solution set 'S'
This is an abstract method, which must be explicitly defined
in subclasses
"""
pass | 164 | 0 | 26 |
5a401582065b6cfbb9f8df5ad1f1e65bd2e6695b | 550 | py | Python | 2020/udp_server.py | aleimu/code-puzzle | 1aaa86e6b49e1fe15a2a6c6be22badd783594024 | [
"MIT"
] | null | null | null | 2020/udp_server.py | aleimu/code-puzzle | 1aaa86e6b49e1fe15a2a6c6be22badd783594024 | [
"MIT"
] | null | null | null | 2020/udp_server.py | aleimu/code-puzzle | 1aaa86e6b49e1fe15a2a6c6be22badd783594024 | [
"MIT"
] | null | null | null | from socket import *
from time import ctime
HOST = ''
PORT = 8888
BUFSIZ = 1024
ADDRESS = (HOST, PORT)
udpServerSocket = socket(AF_INET, SOCK_DGRAM)
udpServerSocket.bind(ADDRESS) # 绑定客户端口和地址
while True:
print("udp waiting for message...")
data, addr = udpServerSocket.recvfrom(BUFSIZ)
print("接收到数据:", data.decode('utf-8'))
content = '[%s] %s' % (bytes(ctime(), 'utf-8'), data.decode('utf-8'))
udpServerSocket.sendto(content.encode('utf-8'), addr)
print('...received from and returned to:', addr)
udpServerSocket.close()
| 25 | 73 | 0.678182 | from socket import *
from time import ctime
HOST = ''
PORT = 8888
BUFSIZ = 1024
ADDRESS = (HOST, PORT)
udpServerSocket = socket(AF_INET, SOCK_DGRAM)
udpServerSocket.bind(ADDRESS) # 绑定客户端口和地址
while True:
print("udp waiting for message...")
data, addr = udpServerSocket.recvfrom(BUFSIZ)
print("接收到数据:", data.decode('utf-8'))
content = '[%s] %s' % (bytes(ctime(), 'utf-8'), data.decode('utf-8'))
udpServerSocket.sendto(content.encode('utf-8'), addr)
print('...received from and returned to:', addr)
udpServerSocket.close()
| 0 | 0 | 0 |
8210e4bf0620acb34ec328bb601ed845570d9b37 | 2,128 | py | Python | cloudshell/devices/runners/autoload_runner.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | null | null | null | cloudshell/devices/runners/autoload_runner.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | 34 | 2016-11-28T10:52:44.000Z | 2019-10-01T08:52:59.000Z | cloudshell/devices/runners/autoload_runner.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | 1 | 2017-05-23T08:46:45.000Z | 2017-05-23T08:46:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from abc import abstractproperty, ABCMeta
from cloudshell.devices.networking_utils import command_logging
from cloudshell.devices.runners.interfaces.autoload_runner_interface import AutoloadOperationsInterface
| 32.738462 | 112 | 0.606203 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from abc import abstractproperty, ABCMeta
from cloudshell.devices.networking_utils import command_logging
from cloudshell.devices.runners.interfaces.autoload_runner_interface import AutoloadOperationsInterface
class AutoloadRunner(AutoloadOperationsInterface):
__metaclass__ = ABCMeta
def __init__(self, resource_config, logger):
"""
Facilitate SNMP autoload
:param resource_config:
:param logging.Logger logger:
"""
self.resource_config = resource_config
self._logger = logger
@abstractproperty
def autoload_flow(self):
""" Autoload flow property
:return: AutoloadFlow object
"""
pass
def _log_device_details(self, details):
needed_attrs = {'Vendor', 'Model', 'OS Version'}
attrs = {}
for attr in details.attributes:
attr_name = attr.attribute_name.rsplit('.', 1)[-1]
if attr.relative_address == '' and attr_name in needed_attrs:
attrs[attr_name] = attr.attribute_value
needed_attrs.remove(attr_name)
if not needed_attrs:
break
self._logger.info('Device Vendor: "{}", Model: "{}", OS Version: "{}"'.format(
attrs.get('Vendor', ''), attrs.get('Model', ''), attrs.get('OS Version', ''),
))
@command_logging
def discover(self):
"""Enable and Disable SNMP communityon the device, Read it's structure and attributes: chassis, modules,
submodules, ports, port-channels and power supplies
:return: AutoLoadDetails object
:rtype: cloudshell.shell.core.driver_context.AutoLoadDetails
"""
details = self.autoload_flow.execute_flow(self.resource_config.supported_os,
self.resource_config.shell_name,
self.resource_config.family,
self.resource_config.name)
self._log_device_details(details)
return details
| 629 | 1,221 | 23 |
f134212aed556da4d00930f3d31654d7f980e384 | 1,019 | py | Python | Chapter09/ch9_grover_ancilla.py | PacktPublishing/Quantum-Computing-in-Practice-with-IBM-Q-Experience | 91423f8ff1d039b5eb3fd18fc64bbb5967fdd5c1 | [
"MIT"
] | 24 | 2020-11-21T20:33:51.000Z | 2022-03-26T06:41:27.000Z | Chapter09/ch9_grover_ancilla.py | videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience | 938123d051c5bab72110011b3a05e515bb69ca09 | [
"MIT"
] | 2 | 2021-02-07T14:32:12.000Z | 2022-03-25T07:23:46.000Z | Chapter09/ch9_grover_ancilla.py | videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience | 938123d051c5bab72110011b3a05e515bb69ca09 | [
"MIT"
] | 16 | 2020-11-03T07:49:11.000Z | 2022-03-26T06:41:29.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
from qiskit import QuantumCircuit, Aer, execute
from IPython.core.display import display
from qiskit.tools.visualization import plot_histogram
print("Ch 9: Grover with ancilla qubits")
print("--------------------------------")
# Create 3 qubit circuit with two classical bits
qc=QuantumCircuit(3,2)
qc.h([0,1])
qc.x(2)
# Code for the oracle
qc.barrier([0,1,2])
qc.x(0)
qc.barrier([0,1,2])
# Phase kickback using the ancilla qubit
qc.h(2)
qc.ccx(0,1,2)
qc.h(2)
# End code for the oracle
qc.barrier([0,1,2])
qc.x(0)
qc.barrier([0,1,2])
# Amplifier
qc.h([0,1])
qc.x([0,1])
qc.h(1)
qc.cx(0,1)
qc.h(1)
qc.barrier([0,1,2])
qc.x([0,1])
qc.h([0,1])
# Measure two qubits
qc.measure([0,1],[0,1])
# Display circuit and execute on simulator
display(qc.draw('mpl'))
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1)
result = job.result()
counts = result.get_counts(qc)
display(plot_histogram(counts))
| 16.983333 | 53 | 0.664377 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
from qiskit import QuantumCircuit, Aer, execute
from IPython.core.display import display
from qiskit.tools.visualization import plot_histogram
print("Ch 9: Grover with ancilla qubits")
print("--------------------------------")
# Create 3 qubit circuit with two classical bits
qc=QuantumCircuit(3,2)
qc.h([0,1])
qc.x(2)
# Code for the oracle
qc.barrier([0,1,2])
qc.x(0)
qc.barrier([0,1,2])
# Phase kickback using the ancilla qubit
qc.h(2)
qc.ccx(0,1,2)
qc.h(2)
# End code for the oracle
qc.barrier([0,1,2])
qc.x(0)
qc.barrier([0,1,2])
# Amplifier
qc.h([0,1])
qc.x([0,1])
qc.h(1)
qc.cx(0,1)
qc.h(1)
qc.barrier([0,1,2])
qc.x([0,1])
qc.h([0,1])
# Measure two qubits
qc.measure([0,1],[0,1])
# Display circuit and execute on simulator
display(qc.draw('mpl'))
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend, shots=1)
result = job.result()
counts = result.get_counts(qc)
display(plot_histogram(counts))
| 0 | 0 | 0 |
5ed5cc32f654e4a5cca36ab832db215acb10916a | 1,703 | py | Python | pythalesians/market/requests/backtestrequest.py | NunoEdgarGFlowHub/pythalesians | 3ea77b9f52cc704258d55369922955f4010bf4ea | [
"Apache-2.0"
] | 22 | 2017-06-04T21:05:18.000Z | 2022-01-22T19:01:43.000Z | pythalesians/market/requests/backtestrequest.py | NunoEdgarGFlowHub/pythalesians | 3ea77b9f52cc704258d55369922955f4010bf4ea | [
"Apache-2.0"
] | null | null | null | pythalesians/market/requests/backtestrequest.py | NunoEdgarGFlowHub/pythalesians | 3ea77b9f52cc704258d55369922955f4010bf4ea | [
"Apache-2.0"
] | 11 | 2016-10-06T21:25:20.000Z | 2020-07-08T19:13:35.000Z | __author__ = 'saeedamen'
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.timeseries.techind.techparams import TechParams
| 25.80303 | 85 | 0.677041 | __author__ = 'saeedamen'
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.timeseries.techind.techparams import TechParams
class BacktestRequest(TimeSeriesRequest):
def __init__(self):
super(TimeSeriesRequest, self).__init__()
self.logger = LoggerManager().getLogger(__name__)
self.__signal_name = None
self.__tech_params = TechParams()
@property
def signal_name(self):
return self.__signal_name
@signal_name.setter
def signal_name(self, signal_name):
self.__signal_name = signal_name
@property
def tech_params(self):
return self.__tech_params
@tech_params.setter
def tech_params(self, tech_params):
self.__tech_params = tech_params
@property
def spot_tc_bp(self):
return self.__spot_tc_bp
@spot_tc_bp.setter
def spot_tc_bp(self, spot_tc_bp):
self.__spot_tc_bp = spot_tc_bp / (2.0 * 100.0 * 100.0)
@property
def asset(self):
return self.__asset
@asset.setter
def asset(self, asset):
valid_asset = ['fx', 'multi-asset']
if not asset in valid_asset:
self.logger.warning(asset & " is not a defined asset.")
self.__asset = asset
@property
def instrument(self):
return self.__instrument
@instrument.setter
def instrument(self, instrument):
valid_instrument = ['spot', 'futures', 'options']
if not instrument in valid_instrument:
self.logger.warning(instrument & " is not a defined trading instrument.")
self.__instrument = instrument
| 953 | 499 | 23 |
60f942e7943c7c823d5eaeabb57331b57170913c | 2,102 | py | Python | Behavioural Cloning.py | Vishal0703/Udacity_CarND_Behavioural_Cloning | 02a3c2432ce46220a39ba49ec13933269d9d7c21 | [
"MIT"
] | null | null | null | Behavioural Cloning.py | Vishal0703/Udacity_CarND_Behavioural_Cloning | 02a3c2432ce46220a39ba49ec13933269d9d7c21 | [
"MIT"
] | null | null | null | Behavioural Cloning.py | Vishal0703/Udacity_CarND_Behavioural_Cloning | 02a3c2432ce46220a39ba49ec13933269d9d7c21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[43]:
import cv2
import csv
from scipy import ndimage
import numpy as np
# In[44]:
lines = []
with open("./data/driving_log.csv") as dl:
reader = csv.reader(dl)
for line in reader:
lines.append(line)
# In[45]:
lines = lines[1:]
corr = 0.2
source_path = "./data/"
images = []
measurements = []
for line in lines:
m = float(line[3])
m_list = [m, m+corr, m-corr]
for i in range(3):
# try:
image = cv2.imread(source_path + line[i].strip())
# print(source_path + line[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)[:,:,1:]
# image = np.reshape(image, newshape = (image.shape[0], image.shape[1], 1))
images.append(image)
measurements.append(m_list[i])
# except Exception as e:
# continue
# In[5]:
print(image.shape)
aug_images = []
aug_measure = []
# In[6]:
for image, measurement in zip(images, measurements):
aug_images.append(image)
aug_measure.append(measurement)
aug_images.append(np.fliplr(image))
aug_measure.append(-1.0*measurement)
# In[7]:
X_train = np.array(aug_images)
y_train = np.array(aug_measure)
# In[19]:
import tensorflow as tf
# In[40]:
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D
# In[42]:
model = Sequential()
model.add(Cropping2D(cropping=((60,20),(0,0)), input_shape = (160,320,3)))
model.add(Lambda(lambda x:x/255.0 - 0.5))
model.add(Conv2D(24, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(64, (3,3),activation='relu'))
model.add(Conv2D(64, (3,3),activation='relu'))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10))
model.add(Dense(1))
# In[13]:
model.compile(optimizer='adam', loss = 'mse')
model.fit(X_train, y_train, validation_split=0.2,shuffle=True, epochs=5)
# In[40]:
model.save('model_hls.h5')
| 17.965812 | 83 | 0.6451 | #!/usr/bin/env python
# coding: utf-8
# In[43]:
import cv2
import csv
from scipy import ndimage
import numpy as np
# In[44]:
lines = []
with open("./data/driving_log.csv") as dl:
reader = csv.reader(dl)
for line in reader:
lines.append(line)
# In[45]:
lines = lines[1:]
corr = 0.2
source_path = "./data/"
images = []
measurements = []
for line in lines:
m = float(line[3])
m_list = [m, m+corr, m-corr]
for i in range(3):
# try:
image = cv2.imread(source_path + line[i].strip())
# print(source_path + line[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)[:,:,1:]
# image = np.reshape(image, newshape = (image.shape[0], image.shape[1], 1))
images.append(image)
measurements.append(m_list[i])
# except Exception as e:
# continue
# In[5]:
print(image.shape)
aug_images = []
aug_measure = []
# In[6]:
for image, measurement in zip(images, measurements):
aug_images.append(image)
aug_measure.append(measurement)
aug_images.append(np.fliplr(image))
aug_measure.append(-1.0*measurement)
# In[7]:
X_train = np.array(aug_images)
y_train = np.array(aug_measure)
# In[19]:
import tensorflow as tf
# In[40]:
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D
# In[42]:
model = Sequential()
model.add(Cropping2D(cropping=((60,20),(0,0)), input_shape = (160,320,3)))
model.add(Lambda(lambda x:x/255.0 - 0.5))
model.add(Conv2D(24, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(36, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(48, (5,5),strides=(2,2),activation='relu'))
model.add(Conv2D(64, (3,3),activation='relu'))
model.add(Conv2D(64, (3,3),activation='relu'))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10))
model.add(Dense(1))
# In[13]:
model.compile(optimizer='adam', loss = 'mse')
model.fit(X_train, y_train, validation_split=0.2,shuffle=True, epochs=5)
# In[40]:
model.save('model_hls.h5')
| 0 | 0 | 0 |
51f18c307bfe7de9cf366c1eff9e4ea3c3bde826 | 4,953 | py | Python | jupyterlab_translate/api.py | fcollonval/jupyterlab-translate | 9c518200c39c431f91ba4c16d162e56f0253ba14 | [
"BSD-3-Clause"
] | 1 | 2020-07-11T22:54:15.000Z | 2020-07-11T22:54:15.000Z | jupyterlab_translate/api.py | goanpeca/jupyterlab-translate | 9c518200c39c431f91ba4c16d162e56f0253ba14 | [
"BSD-3-Clause"
] | null | null | null | jupyterlab_translate/api.py | goanpeca/jupyterlab-translate | 9c518200c39c431f91ba4c16d162e56f0253ba14 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
API interface.
"""
import os
import shutil
from .constants import EXTENSIONS_FOLDER
from .constants import JUPYTERLAB
from .constants import LANG_PACKS_FOLDER
from .constants import LC_MESSAGES
from .constants import LOCALE_FOLDER
from .converters import convert_catalog_to_json
from .utils import check_locale
from .utils import compile_to_mo
from .utils import compile_translations
from .utils import create_new_language_pack
from .utils import extract_translations
from .utils import update_translations
def check_locales(locales):
"""
Check if a given list of locale values is valid.
Raises an exception if an invalid locale value is found.
Parameters
----------
locales: list
List of locales
"""
for locale in locales:
if not check_locale(locale):
raise Exception("Invalid locale '{locale}'".format(locale=locale))
def normalize_project(project):
"""
FIXME:
Parameters
----------
project: str
FIXME:
"""
return project.lower().replace("-", "_")
def extract_package(package_repo_dir, project):
"""
FIXME:
"""
def update_package(package_repo_dir, project, locales):
"""
FIXME:
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = os.path.join(package_repo_dir, project)
if not os.path.isdir(output_dir):
raise Exception(
"Output dir `{output_dir}` not found!".format(output_dir=output_dir)
)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_package(package_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = os.path.join(package_repo_dir, project)
po_paths = compile_translations(output_dir, project, locales)
for __, po_path in po_paths.items():
output_path = os.path.dirname(po_path)
convert_catalog_to_json(po_path, output_path, project)
def extract_language_pack(package_repo_dir, language_packs_repo_dir, project):
"""
FIXME:
"""
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(language_packs_repo_dir, EXTENSIONS_FOLDER, project)
os.makedirs(output_dir, exist_ok=True)
extract_translations(package_repo_dir, output_dir, project)
def update_language_pack(package_repo_dir, language_packs_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(
language_packs_repo_dir, "jupyterlab_extensions", project
)
os.makedirs(output_dir, exist_ok=True)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_language_pack(language_packs_repo_dir, project, locales):
"""
FIXME:
"""
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(language_packs_repo_dir, EXTENSIONS_FOLDER, project)
po_paths = compile_translations(output_dir, project, locales)
for locale, po_path in po_paths.items():
output_path = os.path.dirname(po_path)
json_path = convert_catalog_to_json(po_path, output_path, project)
mo_path = compile_to_mo(po_path)
# Move to language pack folder
language_packs_dir = os.path.join(language_packs_repo_dir, LANG_PACKS_FOLDER)
pkg_name = "jupyterlab-language-pack-{locale}".format(locale=locale).replace(
"_", "-"
)
locale_language_pack_dir = os.path.join(
language_packs_dir, pkg_name, pkg_name.replace("-", "_")
)
# Check if it exists, otherwise create it
if not os.path.isdir(locale_language_pack_dir):
create_new_language_pack(language_packs_dir, locale)
if project == JUPYTERLAB:
output_dir = os.path.join(locale_language_pack_dir)
else:
output_dir = os.path.join(locale_language_pack_dir, EXTENSIONS_FOLDER)
shutil.rmtree(
os.path.join(output_dir, os.path.basename(mo_path)), ignore_errors=True
)
shutil.rmtree(
os.path.join(output_dir, os.path.basename(json_path)), ignore_errors=True
)
shutil.move(mo_path, os.path.join(output_dir, os.path.basename(mo_path)))
shutil.move(json_path, os.path.join(output_dir, os.path.basename(json_path)))
| 28.796512 | 86 | 0.68827 | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""
API interface.
"""
import os
import shutil
from .constants import EXTENSIONS_FOLDER
from .constants import JUPYTERLAB
from .constants import LANG_PACKS_FOLDER
from .constants import LC_MESSAGES
from .constants import LOCALE_FOLDER
from .converters import convert_catalog_to_json
from .utils import check_locale
from .utils import compile_to_mo
from .utils import compile_translations
from .utils import create_new_language_pack
from .utils import extract_translations
from .utils import update_translations
def check_locales(locales):
"""
Check if a given list of locale values is valid.
Raises an exception if an invalid locale value is found.
Parameters
----------
locales: list
List of locales
"""
for locale in locales:
if not check_locale(locale):
raise Exception("Invalid locale '{locale}'".format(locale=locale))
def normalize_project(project):
"""
FIXME:
Parameters
----------
project: str
FIXME:
"""
return project.lower().replace("-", "_")
def extract_package(package_repo_dir, project):
"""
FIXME:
"""
def update_package(package_repo_dir, project, locales):
"""
FIXME:
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = os.path.join(package_repo_dir, project)
if not os.path.isdir(output_dir):
raise Exception(
"Output dir `{output_dir}` not found!".format(output_dir=output_dir)
)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_package(package_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
output_dir = os.path.join(package_repo_dir, project)
po_paths = compile_translations(output_dir, project, locales)
for __, po_path in po_paths.items():
output_path = os.path.dirname(po_path)
convert_catalog_to_json(po_path, output_path, project)
def extract_language_pack(package_repo_dir, language_packs_repo_dir, project):
"""
FIXME:
"""
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(language_packs_repo_dir, EXTENSIONS_FOLDER, project)
os.makedirs(output_dir, exist_ok=True)
extract_translations(package_repo_dir, output_dir, project)
def update_language_pack(package_repo_dir, language_packs_repo_dir, project, locales):
"""
FIXME
"""
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(
language_packs_repo_dir, "jupyterlab_extensions", project
)
os.makedirs(output_dir, exist_ok=True)
update_translations(package_repo_dir, output_dir, project, locales)
def compile_language_pack(language_packs_repo_dir, project, locales):
"""
FIXME:
"""
if locales:
check_locales(locales)
project = normalize_project(project)
if project == JUPYTERLAB:
output_dir = os.path.join(language_packs_repo_dir, project)
else:
output_dir = os.path.join(language_packs_repo_dir, EXTENSIONS_FOLDER, project)
po_paths = compile_translations(output_dir, project, locales)
for locale, po_path in po_paths.items():
output_path = os.path.dirname(po_path)
json_path = convert_catalog_to_json(po_path, output_path, project)
mo_path = compile_to_mo(po_path)
# Move to language pack folder
language_packs_dir = os.path.join(language_packs_repo_dir, LANG_PACKS_FOLDER)
pkg_name = "jupyterlab-language-pack-{locale}".format(locale=locale).replace(
"_", "-"
)
locale_language_pack_dir = os.path.join(
language_packs_dir, pkg_name, pkg_name.replace("-", "_")
)
# Check if it exists, otherwise create it
if not os.path.isdir(locale_language_pack_dir):
create_new_language_pack(language_packs_dir, locale)
if project == JUPYTERLAB:
output_dir = os.path.join(locale_language_pack_dir)
else:
output_dir = os.path.join(locale_language_pack_dir, EXTENSIONS_FOLDER)
shutil.rmtree(
os.path.join(output_dir, os.path.basename(mo_path)), ignore_errors=True
)
shutil.rmtree(
os.path.join(output_dir, os.path.basename(json_path)), ignore_errors=True
)
shutil.move(mo_path, os.path.join(output_dir, os.path.basename(mo_path)))
shutil.move(json_path, os.path.join(output_dir, os.path.basename(json_path)))
| 0 | 0 | 0 |
f87448190a9f21e196edac22d504675466d4f3e3 | 2,279 | py | Python | advent3_triangles.py | mike10004/adventofcode2016 | 2d74ae23f79c9d08366516b285f3eb1e686232b9 | [
"MIT"
] | null | null | null | advent3_triangles.py | mike10004/adventofcode2016 | 2d74ae23f79c9d08366516b285f3eb1e686232b9 | [
"MIT"
] | null | null | null | advent3_triangles.py | mike10004/adventofcode2016 | 2d74ae23f79c9d08366516b285f3eb1e686232b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# advent3_triangles.py
#
import itertools
import sys
_KNOWN_TRUE = ((5, 12, 13), (3, 3, 3), (200, 300, 450), (2, 1, 2))
_KNOWN_FALSE = ((1, 2, 3), (5, 10, 25))
if __name__ == '__main__':
assert sum(evaluate(_KNOWN_TRUE)) == len(_KNOWN_TRUE)
assert sum(evaluate(_KNOWN_FALSE)) == 0
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument("parse_mode", choices=('rows', 'cols'), help="how triples are oriented in the input")
p.add_argument("sidelengths", nargs="*", help="side lengths, every 3 of which are interpreted as a possible triangle; leave empty to read from stdin", default=[])
p.add_argument("--verbose", default=False, action="store_true")
args = p.parse_args()
if len(args.sidelengths) == 0:
args.sidelengths = sys.stdin.read().split()
sys.exit(main(args))
| 34.014925 | 166 | 0.637999 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# advent3_triangles.py
#
import itertools
import sys
def triad(t, oddman_index):
pair = tuple([t[i] for i in xrange(len(t)) if i != oddman_index])
oddman = t[oddman_index]
return pair, oddman
def is_triangle(t):
a, b, c = t
assert a > 0 and b > 0 and c > 0, "values must be positive: %s" % str(t)
triads = [triad(t, i) for i in xrange(len(t))]
for pair, oddman in triads:
if sum(pair) <= oddman:
return False
return True
_KNOWN_TRUE = ((5, 12, 13), (3, 3, 3), (200, 300, 450), (2, 1, 2))
_KNOWN_FALSE = ((1, 2, 3), (5, 10, 25))
def evaluate(triples):
return [1 if is_triangle(t) else 0 for t in triples]
def parse_triples_every_3(sidelengths):
triples = []
for i in xrange(0, len(sidelengths), 3):
triples.append([sidelengths[i+j] for j in xrange(0, 3)])
return triples
def parse_triples_every_third(sidelengths):
triples = []
for i in xrange(0, len(sidelengths), 9):
for j in xrange(0, 3):
triples.append([sidelengths[i + j + k * 3] for k in xrange(0, 3)])
return triples
def main(args):
if args.parse_mode == 'rows':
triples = parse_triples_every_3(map(int, args.sidelengths))
else:
triples = parse_triples_every_third(map(int, args.sidelengths))
evaluations = evaluate(triples)
if args.verbose:
print >> sys.stderr, "evaluated", len(triples), "triples:", triples
print "%d of %d triples represent valid triangle side lengths" % (sum(evaluations), len(triples))
return 0
if __name__ == '__main__':
assert sum(evaluate(_KNOWN_TRUE)) == len(_KNOWN_TRUE)
assert sum(evaluate(_KNOWN_FALSE)) == 0
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument("parse_mode", choices=('rows', 'cols'), help="how triples are oriented in the input")
p.add_argument("sidelengths", nargs="*", help="side lengths, every 3 of which are interpreted as a possible triangle; leave empty to read from stdin", default=[])
p.add_argument("--verbose", default=False, action="store_true")
args = p.parse_args()
if len(args.sidelengths) == 0:
args.sidelengths = sys.stdin.read().split()
sys.exit(main(args))
| 1,237 | 0 | 150 |
41ef00c687f4ea9fe746ec56e1be8278c6eb3bf9 | 1,064 | py | Python | ParProcCo/scheduler_mode_interface.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | null | null | null | ParProcCo/scheduler_mode_interface.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | 5 | 2021-09-07T15:02:49.000Z | 2022-03-17T20:43:00.000Z | ParProcCo/scheduler_mode_interface.py | DiamondLightSource/ParProcCo | 75742278f567a36e7bb74bb7c00f98407270a8ac | [
"BSD-3-Clause"
] | 1 | 2021-12-07T08:50:48.000Z | 2021-12-07T08:50:48.000Z | from __future__ import annotations
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple
| 38 | 135 | 0.694549 | from __future__ import annotations
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple
class SchedulerModeInterface:
def __init__(self) -> None:
self.number_jobs: int
self.cores: int
self.program_path: Optional[Path]
self.allowed_modules: Optional[Tuple[str,...]] = None
def set_parameters(self, sliced_results: List) -> None:
"""Sets parameters for generating jobscript args for use within JobScheduler"""
raise NotImplementedError
def generate_output_paths(self, output_dir: Optional[Path], error_dir: Path, i: int, t: datetime.datetime) -> Tuple[str, str, str]:
"""Generates output, stdout and stderr file paths for job template within JobScheduler"""
raise NotImplementedError
def generate_args(self, job_number: int, memory: str, cores: int, jobscript_args: List[str],
output_fp: str) -> Tuple[str, ...]:
"""Generates jobscript args for use within JobScheduler"""
raise NotImplementedError
| 164 | 744 | 23 |