seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
28029416892
|
from django.shortcuts import render,redirect
from django.contrib.auth.decorators import login_required
from django.http.response import JsonResponse
from ..models import Image
import json
from django.template.loader import render_to_string
# Create your views here.
@login_required
def menu_main(request):
print('北')
params = {
'add_image_bottom':'新規追加',
}
#画像情報の取得
object_list = Image.objects.all()
#カテゴリーデータの取得
categry_name=Image.objects.values_list('category_name', flat=True)
# 重複するカテゴリーデータの取り除きソートする
categry_list = set(categry_name)
categry_list_sort=sorted(categry_list,reverse=True)
# パラメーターに格納する
params['categry_list']=categry_list
params['object_list']=object_list
if (request.method == 'POST'):
print(30)
# ユーザー情報の確認
# object_list = User.objects.all()
# object_list = User.objects.get(username='test')
# username=request.POST['username']
# password=request.POST['password']
# print(username)
# print(password)
# try:
# user = User.objects.create_user(username,'', password)
# except :
# params[message] = '対象のユーザーが見つかりません'
# return redirect('login')
# if user is not None:
# login(request, user)
# return redirect('menu')
# else:
# return redirect('login')
return render(request,'menu.html',params)
def search_category(request):
# hoge = json.loads(request.POST.get("category_name"))
select_category =request.POST.get("category_name")
params = {
'a':'1',
}
# object_list = Image.objects.values(category_name=select_category)
object_list = Image.objects.filter(category_name=select_category)
params['object_list']=object_list
rendered_result = render_to_string('list.html', params)
return JsonResponse({
'html': rendered_result,
})
def delete_image(request):
# hoge = json.loads(request.POST.get("category_name"))
image_id =request.POST.get("image_id")
print(request.POST)
params = {
'a':'1',
}
# object_list = Image.objects.values(category_name=select_category)
# 指定のデータを削除
Image.objects.filter(id=image_id).delete()
# object_list = Image.objects.all()
# params['object_list']=object_list
# rendered_result = render_to_string('list.html', params)
return JsonResponse({
'hoge': "hoge",
})
|
mituoka/hobby_management
|
hobby_management/main_app/views/menu.py
|
menu.py
|
py
| 2,688
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19280716334
|
import dict
# print(dict.word_dict)
words = dict.word_dict
center_letter = input("enter the center letter: ")
other_letters = []
for i in range(1, 7):
letter = input("enter other letter " + str(i) + ": ")
while letter in other_letters or letter == center_letter:
print("letter has been used ")
print("center letter: ", center_letter)
print("other letters: ", other_letters)
letter = input("enter other letter " + str(i) + ": ")
other_letters.append(letter)
layer1 = []
for word in words:
if center_letter in word:
layer1.append(word)
layer2 = []
for word in layer1:
breaker = True
for letter in word:
if letter not in other_letters and letter != center_letter:
breaker = False
break
if breaker:
layer2.append(word)
print(layer2)
print(len(layer2), "words has been founded")
|
LovelyGkotta/script
|
python_spelling_bee_crack/spell.py
|
spell.py
|
py
| 889
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4459921919
|
from . import dataset
import os
import shutil
from tqdm import tqdm
import cv2
import numpy as np
def coco_data(images_path, json_annotation_path):
# list files in dir
if not os.path.exists(images_path):
raise FileExistsError("images path not found")
if not os.path.exists(json_annotation_path):
raise FileExistsError("json annotation path not found")
png_images_path = "/dataset/temp/pngimages"
try:
os.mkdir(png_images_path)
except FileExistsError:
shutil.rmtree(png_images_path)
os.mkdir(png_images_path)
dataset.batch_jpg_to_png(images_path, png_images_path)
pngmasks_path = "/dataset/temp/pngmasks"
try:
os.mkdir(pngmasks_path)
except FileExistsError:
shutil.rmtree(pngmasks_path)
os.mkdir(pngmasks_path)
dataset.CocoHandler(json_annotation_path,
images_path).convert_dataset_to_masks(pngmasks_path)
return png_images_path, pngmasks_path
def pascal_voc_data(images_path, annotation_path, labelmap_path):
dataset_path = os.path.dirname(images_path)
converted_mask_p =os.path.join(dataset_path, "temp/converted_masks")
try:
os.makedirs(converted_mask_p)
except FileExistsError:
shutil.rmtree(converted_mask_p)
os.makedirs(converted_mask_p)
png_images_path = os.path.join(dataset_path, "temp/pngimages")
try:
os.mkdir(png_images_path)
except FileExistsError:
shutil.rmtree(png_images_path)
os.mkdir(png_images_path)
dataset.batch_jpg_to_png(images_path, png_images_path)
pngmasks_path = os.path.join(dataset_path,"temp/pngmasks")
try:
os.mkdir(pngmasks_path)
except FileExistsError:
shutil.rmtree(pngmasks_path)
os.mkdir(pngmasks_path)
dataset.batch_jpg_to_png(annotation_path, pngmasks_path)
images_path = png_images_path
annotation_path = pngmasks_path
label_map = open(labelmap_path, "r")
labelmaps = label_map.readlines()
label_map.close()
labelmaps = [x.strip() for x in labelmaps]
class_names = []
class_index = []
class_color = []
for idx, labelmap in enumerate(labelmaps):
class_names.append(labelmap.split(":")[0])
class_index.append(idx)
class_color.append(labelmap.split(":")[1])
mask_paths = os.listdir(annotation_path)
mask_paths = [os.path.join(annotation_path, x) for x in mask_paths]
for mask_path in tqdm(mask_paths):
mask = cv2.imread(mask_path, 1)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
converted_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8)
# converted_mask = cv2.cvtColor(converted_mask, cv2.COLOR_BGR2GRAY)
for idx, color in enumerate(class_color):
color = color.split(",")
color = [int(x) for x in color]
converted_mask[np.where((mask == color).all(axis=2))] = class_index[idx]
cv2.imwrite(os.path.join(converted_mask_p, os.path.basename(mask_path)), converted_mask)
return images_path, converted_mask_p, len(class_names)
|
virasad/semantic_segmentation_service
|
train/utils/datahandler.py
|
datahandler.py
|
py
| 3,116
|
python
|
en
|
code
| 2
|
github-code
|
6
|
32909477589
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import time
import random
import decimal
from PySide2.QtWidgets import QApplication, QMessageBox, QWidget
from PySide2 import QtCore
from PySide2.QtUiTools import QUiLoader
from PyQt5.QtCore import QTimer
marry_date = '2020-07-06'
birth_date = '2022-01-22 11:01'
birth_datetime = datetime.datetime.strptime(birth_date, '%Y-%m-%d %H:%M')
marry_datetime = datetime.datetime.strptime(marry_date, '%Y-%m-%d')
def decimal_float_number(number):
decimal.getcontext().rounding = "ROUND_HALF_UP"
res = decimal.Decimal(str(number)).quantize(decimal.Decimal("0.00"))
return str(res)
def format_date(input_date):
return datetime.datetime.strptime(input_date, '%Y-%m-%d')
def format_datetime(input_date):
return datetime.datetime.strptime(input_date, '%Y-%m-%d %H:%M')
class ComputeTools(QWidget):
def __init__(self):
self.ui = QUiLoader().load('compute_day.ui')
self.timer = QTimer()
self.timer.timeout.connect(self.compute_day_second)
def compute_day_second(self):
input_date = self.ui.dateEdit.text()
input_date = format_date(input_date)
compute_second = datetime.datetime.now().timestamp() - input_date.timestamp()
compute_day = decimal_float_number(compute_second / 3600 / 24)
self.ui.secondEdit.setText(str(compute_second))
self.ui.dayEdit.setText(str(compute_day))
def msg_notifaction(self, compute_day, compute_second):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText('{}秒, {}天'.format(compute_second, compute_day))
msg_box.setDetailedText('{}秒, {}天'.format(compute_second, compute_day))
msg_box.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msg_box.setDefaultButton(QMessageBox.Ok)
msg_box.show()
msg_box.exec()
def compute_realtime_day_second(self):
self.timer.start(random.randint(10, 1000))
self.ui.computeRealtime.setEnabled(False)
def stop_realtime_compute(self):
self.timer.stop()
self.ui.computeRealtime.setEnabled(True)
def open_page(self):
self.ui.computeRealtime.clicked.connect(self.compute_realtime_day_second)
self.ui.stopCompute.clicked.connect(self.stop_realtime_compute)
self.ui.show()
if __name__ == '__main__':
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QApplication([])
qt_tools = ComputeTools()
qt_tools.open_page()
app.exec_()
|
id10tttt/tools
|
qt_tools/compute_day.py
|
compute_day.py
|
py
| 2,564
|
python
|
en
|
code
| 1
|
github-code
|
6
|
36223514660
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 24 11:01:22 2023
@author: brand
"""
import math
import matplotlib.pyplot as plt
import numpy as np
timeStep = 0.01 #seconds
timeRange = np.arange(0,5,timeStep).tolist() # 20 seconds to take off
# reynolds is 200k
class Aircraft():
def __init__(self, weight, wingAvgChord, wingSpan):
self.wingArea = wingAvgChord * wingSpan # square meters
self.xVel=0
self.yVel=0
self.xPos=0
self.yPos=0
self.weight=weight
def calculateLift(self, AoA, curSpeed):
if AoA == 0:
Cl=0.4304
elif AoA == 5:
Cl = 0.9118
elif AoA== 10:
Cl = 1.2591
else:
Cl=0
print("Incorrect Angle")
self.lift=(0.5)*1.225*curSpeed*curSpeed*Cl*self.wingArea # Newtons
def calculateDrag(self, AoA, curSpeed):
if AoA == 0:
Cd=0.01004
elif AoA == 5:
Cd = 0.01213
elif AoA == 10:
Cd = 0.02158
else:
Cd=0
print("Incorrect Angle")
self.drag=(0.5)*1.225*curSpeed*curSpeed*Cd*self.wingArea # Newtons
def calculatePropThrust(self, curSpeed, propDia, propPitch, propRPM):
self.thrust = (4.392e-8)*propRPM*(pow(propDia,3.5)/pow(propPitch,0.5))*((4.233e-4)*propRPM*propPitch-curSpeed)
#print(self.thrust)
def calculateForces(self,AoA):
if self.yPos==0:
fric_force=self.weight*9.81*0.1
else:
fric_force=0
self.xForces = math.cos(math.radians(AoA))*(self.thrust-self.drag)-fric_force
self.yForces = math.cos(math.radians(AoA))*(self.lift)-self.weight*9.81
#print(self.yForces)
def calcVel(self,timeStep):
self.xVel=self.xForces*timeStep+self.xVel
self.yVel=self.yForces*timeStep+self.yVel
def calcPos(self,timeStep):
self.xPos = self.xVel*timeStep + self.xPos
self.yPos = self.yVel*timeStep + self.yPos
if self.yPos<0:
self.yPos=0
self.yVel=0
class PID():
def __init__(self,P,I,D,step):
self.pGain=P
self.iGain=I
self.dGain=D
self.step=step
self.p=0
self.i=0
self.d=0
self.errSum=0
self.errPrev=0
def gain(self,curAlt,tarAlt):
err=tarAlt-curAlt
self.i=self.errSum+err*self.step
self.d = (err-self.errPrev)/self.step
self.output=err*self.pGain + self.iGain*self.i + self.dGain*self.d
self.errPrev=err
self.errSum=self.i
self.output = max(min(22000,self.output),0)
#print(self.output)
# 0 AoA
plane = Aircraft(1.3, 0.2, 1)
control = PID(700,140/2,140/8,timeStep)
xPos=[]
yPos=[]
xVel=[]
yVel=[]
lift=[]
thrust=[]
curSpeed=0
AoA=0
RPM_l=[]
RPM=22000
for x in timeRange:
plane.calculateLift(AoA,curSpeed)
plane.calculateDrag(AoA,curSpeed)
plane.calculatePropThrust(curSpeed,7,3,RPM)
plane.calculateForces(AoA)
plane.calcVel(timeStep)
plane.calcPos(timeStep)
xPos.append(plane.xPos)
yPos.append(plane.yPos)
curSpeed=plane.xVel
xVel.append(plane.xVel)
yVel.append(plane.yVel)
lift.append(plane.yForces)
thrust.append(plane.thrust)
#RPM_l.append(control.output/1000)
#plt.plot(timeRange,yPos/199)
#plt.plot(timeRange,thrust)
#plt.plot(timeRange,lift)
#plt.plot(timeRange,xPos)
plt.plot(timeRange,yPos)
#plt.plot(timeRange,thrust)
#plt.plot(timeRange, RPM_l)
#plt.plot(timeRange,yVel)
plt.legend(['alt'])
plt.xlabel('Time (s)')
plt.ylabel(['Meters'])
|
Brandonh291/RC-Plane
|
Systems Engineering/Phase B - Preliminary Design and Technology Completition/Plane Take-off Sim.py
|
Plane Take-off Sim.py
|
py
| 3,633
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12025294058
|
from typing import Dict
from numbers import Number
from transformers.trainer_utils import EvalPrediction
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def compute_sts_metrics(eval_pred: EvalPrediction) -> Dict[str, Number]:
predictions, labels = eval_pred
preds = predictions.argmax(axis=-1)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds, average='macro')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
|
jinmang2/sts-sift
|
solution/metrics.py
|
metrics.py
|
py
| 596
|
python
|
en
|
code
| 1
|
github-code
|
6
|
9264743430
|
import numpy
import numpy as np
def vec2diad(vec):
return np.outer(vec, vec)
def cross_prod_mat(vec):
identity = np.eye(3)
return np.cross(identity, vec.reshape(-1, ))
def vector_angle(a, b, normalized=False):
if normalized:
return np.arccos(np.clip(np.dot(a, b), -1., 1.))
else:
return np.arccos(np.clip(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)), -1., 1.))
def xyz2ra_dec(vec):
ra = np.arctan2(vec[1], vec[0])
sin_ra = np.sin(ra)
cos_ra = np.cos(ra)
# two branches to increase accuracy
# as if abs(sin(x)) is close to 1, its slope is small
if np.abs(sin_ra) < .8:
dec = np.arctan2(vec[2] * sin_ra, vec[1])
else:
dec = np.arctan2(vec[2] * cos_ra, vec[0])
return ra, dec
def matrix_minor(mat, i, j):
return np.delete(np.delete(mat, i, axis=0), j, axis=1)
def adjoint_matrix(mat, symmetric=False):
adjoint = np.zeros_like(mat)
num_rows, num_columns = adjoint.shape[0], adjoint.shape[1]
for i in range(num_rows):
for j in range(num_columns):
if not symmetric:
# transpose is included
adjoint[j, i] = ((-1) ** (i + j)) * np.linalg.det(matrix_minor(mat, i, j))
else:
if j < i:
continue
adjoint[j, i] = adjoint[i, j] = ((-1) ** (i + j)) * np.linalg.det(matrix_minor(mat, i, j))
return adjoint
def chol_update(L: np.ndarray, x: np.ndarray, weight: float = 1.):
"""
Rank-1 Cholesky-update of the Cholesky-factor (if x is a matrix, x_num_col updates are
carried out)
:param L: Cholesky factor (triangular matrix)
:param x: update vector (if matrix it is also handled)
:param weight: sign of the update (-: downdate/+:update)
:return:
"""
# todo: consider this version for efficiency: https://christian-igel.github.io/paper/AMERCMAUfES.pdf, Alg. 3.1
def chol_vec_update(L: np.ndarray, x: np.ndarray, vec_dim: int, weight: float = 1.):
"""
Rank-1 Cholesky-update of the Cholesky-factor
:param L: Cholesky factor (triangular matrix)
:param x: update vector
:param vec_dim: dimension of the vector
:param weight: weight of the update (-: downdate/+:update)
:return:
"""
for i in range(vec_dim):
if L[i, i] ** 2 + weight * x[i] ** 2 < 0:
# breakpoint()
# raise ValueError(f"negative value\n, {L}")
print(f"negative value\n, {L}")
pass
r = np.sqrt(L[i, i] ** 2 + weight * (x[i] ** 2))
inv_diag_i = 1 / L[i, i]
c = r * inv_diag_i
s = x[i] * inv_diag_i
L[i, i] = r
L[i, i + 1:vec_dim] = (L[i, i + 1:vec_dim] + weight * s * x[i + 1:vec_dim]) / c
x[i + 1:vec_dim] = c * x[i + 1:vec_dim] - s * L[i, i + 1:vec_dim]
if len(x.shape) == 1:
chol_vec_update(L, x, x.size, weight)
else:
vec_dim = x.shape[0]
num_updates = x.shape[1]
for i in range(num_updates):
chol_vec_update(L, x[:, i], vec_dim, weight)
return L
def separated_chol_update(pos_sigma_points, neg_sigma_points, noise_cov_sqrt):
S_hat = np.linalg.qr(np.concatenate((pos_sigma_points, noise_cov_sqrt), axis=1).T, mode="r").T
if neg_sigma_points.shape[-1] is not 0:
S_hat = chol_update(S_hat, neg_sigma_points, -1.)
return S_hat
|
rpatrik96/adcs-simulation
|
src/alg_utils.py
|
alg_utils.py
|
py
| 3,468
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1293512231
|
import inspect
from onnx import defs
from onnx.backend.test.runner import BackendIsNotSupposedToImplementIt
from onnx_jax.logger import logger
class Handler(object):
ONNX_OP = None
DOMAIN = defs.ONNX_DOMAIN
VERSION = 0
SINCE_VERSION = 0
@classmethod
def check_cls(cls):
if not cls.ONNX_OP:
logger.warning(
f"{cls.__name__} doesn't have ONNX_OP. "
"Please use Handler.onnx_op decorator to register ONNX_OP."
)
@classmethod
def args_check(cls, node, **kwargs):
pass
@classmethod
def handle(cls, node, **kwargs):
ver_handle = getattr(cls, "version_{}".format(cls.SINCE_VERSION), None)
if ver_handle:
cls.args_check(node, **kwargs)
return ver_handle(node, **kwargs)
raise BackendIsNotSupposedToImplementIt(
"{} version {} is not implemented.".format(node.op_type, cls.SINCE_VERSION)
)
@classmethod
def get_versions(cls):
versions = []
for k, v in inspect.getmembers(cls, inspect.ismethod):
if k.startswith("version_"):
versions.append(int(k.replace("version_", "")))
return versions
@staticmethod
def onnx_op(op):
return Handler.property_register("ONNX_OP", op)
@staticmethod
def domain(d):
return Handler.property_register("DOMAIN", d)
@staticmethod
def property_register(name, value):
def deco(cls):
setattr(cls, name, value)
return cls
return deco
domain = Handler.domain
onnx_op = Handler.onnx_op
property_register = Handler.property_register
|
gglin001/onnx-jax
|
onnx_jax/handlers/handler.py
|
handler.py
|
py
| 1,679
|
python
|
en
|
code
| 7
|
github-code
|
6
|
37864457588
|
import subprocess
import sys
def process_exists(process_name):
if sys.platform.startswith("win32"):
# https://stackoverflow.com/questions/7787120/python-check-if-a-process-is-running-or-not
# Use tasklist to reduce package dependency.
call = "TASKLIST", "/FI", "imagename eq %s" % process_name
# use buildin check_output right away
output = subprocess.check_output(call).decode()
# check in last line for process name
last_line = output.strip().split("\r\n")[-1]
# because Fail message could be translated
return last_line.lower().startswith(process_name.lower())
else:
call = f"""pgrep "{process_name}" """
retcode = subprocess.call(call)
return retcode == 0
|
juria90/service_ppt
|
process_exists.py
|
process_exists.py
|
py
| 768
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72532696509
|
""" Subsystem to communicate with catalog service
"""
import logging
from aiohttp import web
from pint import UnitRegistry
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from . import _handlers
_logger = logging.getLogger(__name__)
@app_module_setup(
__name__,
ModuleCategory.ADDON,
settings_name="WEBSERVER_CATALOG",
depends=["simcore_service_webserver.rest"],
logger=_logger,
)
def setup_catalog(app: web.Application):
# ensures routes are names that corresponds to function names
assert all( # nosec
route_def.kwargs["name"] == route_def.handler.__name__
for route_def in _handlers.routes
)
app.add_routes(_handlers.routes)
# prepares units registry
app[UnitRegistry.__name__] = UnitRegistry()
|
ITISFoundation/osparc-simcore
|
services/web/server/src/simcore_service_webserver/catalog/plugin.py
|
plugin.py
|
py
| 801
|
python
|
en
|
code
| 35
|
github-code
|
6
|
1584126381
|
# -*- coding: utf-8 -*-
import os
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from filer.settings import FILER_STATICMEDIA_PREFIX
from cmsplugin_filer_html5video.models import FilerHTML5Video
class FilerHTML5VideoPlugin(CMSPluginBase):
model = FilerHTML5Video
name = _("HTML5 Video (Filer)")
render_template = "cmsplugin_filer_html5video/video.html"
text_enabled = True
general_fields = [
'title',
('width', 'height'),
'auto_play',
'auto_hide',
'fullscreen',
'loop',
]
fieldsets = [
(None, {
'fields': general_fields,
}),
(_('formats'), {
'fields': ('video_mp4', 'video_webm', 'video_ogv', 'image')
})
]
def render(self, context, instance, placeholder):
formats = {}
for format in ('video_mp4', 'video_webm', 'video_ogv'):
if getattr(instance, format + '_id'):
formats[format.replace('_', '/')] = getattr(instance, format).url
context.update({
'object': instance,
'placeholder':placeholder,
'formats': formats
})
return context
def icon_src(self, instance):
return os.path.normpath(u"%s/icons/video_%sx%s.png" % (FILER_STATICMEDIA_PREFIX, 32, 32,))
plugin_pool.register_plugin(FilerHTML5VideoPlugin)
|
beniwohli/cmsplugin-filer-html5video
|
cmsplugin_filer_html5video/cms_plugins.py
|
cms_plugins.py
|
py
| 1,464
|
python
|
en
|
code
| 8
|
github-code
|
6
|
29806901602
|
import os
from dataclasses import dataclass
from datetime import datetime
from fastapi.encoders import jsonable_encoder
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from MenuApp.src.models import Menu, Submenu
@dataclass
class ReportService:
db: AsyncSession
async def get_data(self) -> list:
"""Generate a list of all menus, submenus and dishes.
Returns:
report: a list of all menus, submenus and dishes.
"""
stmt = select(Menu).options(joinedload(Menu.submenus).joinedload(Submenu.dishes))
data = await self.db.execute(stmt)
return jsonable_encoder(data.scalars().unique().all())
@staticmethod
def formate_data(report_data) -> dict:
"""Generate a dict-template to write in xlsx-file.
Parameters:
report_data: JSON-file with all data,
Returns:
template: a dict-template to write in xlsx-file.
"""
generated_date = datetime.now().strftime("%d %B %Y at %H:%M")
timezone = datetime.now().astimezone()
description = f"Report generated {generated_date} ({timezone.tzinfo.__str__()})"
template = {
"A": [description],
"B": [""],
"C": [""],
"D": [""],
"E": [""],
"F": [""],
}
for i, menu in enumerate(report_data, 1):
template["A"].append(str(i))
template["B"].append(menu["title"])
template["C"].append(menu["description"])
template["D"].append("")
template["E"].append("")
template["F"].append("")
for j, submenu in enumerate(menu["submenus"], 1):
template["A"].append("")
template["B"].append(str(j))
template["C"].append(submenu["title"])
template["D"].append(submenu["description"])
template["E"].append("")
template["F"].append("")
for k, dish in enumerate(submenu["dishes"], 1):
template["A"].append("")
template["B"].append("")
template["C"].append(str(k))
template["D"].append(dish["title"])
template["E"].append(dish["description"])
template["F"].append(dish["price"])
return template
@staticmethod
def is_exist(file_path):
return os.path.exists(file_path)
|
Aliakseeva/MenuApp
|
MenuApp/src/services/tasks/report_service.py
|
report_service.py
|
py
| 2,538
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10138014338
|
import engine.db_structure as db_py
import os
filename = "io.vdb"
if os.path.isfile(filename):
os.remove(filename)
db = db_py.Database(False, filename)
def test_create_io():
db.create_table("vadik_table", {"zhenya1": "int", "zhenya2": "str"})
assert db.get_io_count() == 31
def test_insert_io():
db.tables[0].insert(["zhenya1", "zhenya2"], [5000, "b"])
assert db.get_io_count() == 89
def test_update_io():
db.tables[0].update(["zhenya2"], [["lovetsov"]], [db.tables[0].get_row_by_id(0)])
assert db.get_io_count() == 278
def test_delete_io():
db.tables[0].delete()
assert db.get_io_count() == 400
def test_cache_io():
db.tables[0].insert(["zhenya1", "zhenya2"], [5000, "b"])
io_count_first = db.get_io_count()
db.tables[0].get_row_by_id(0)
io_count_second = db.get_io_count()
db.tables[0].get_row_by_id(0)
io_count_third = db.get_io_count()
assert (io_count_second - io_count_first) > (io_count_third - io_count_second)
|
etozhezhenechka/VadikDB
|
engine_tests/io_tests.py
|
io_tests.py
|
py
| 993
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36416683518
|
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
num = [0]*len(temperatures)
index = []
for i,j in enumerate(temperatures):
while len(index)!=0 and temperatures[index[-1]] < j:
i1 = index.pop()
num[i1] = i - i1
index.append(i)
return num
|
eyosiasbitsu/Competitive-programming-A2SV
|
Before BootCamp/week2/daily-temperatures.py
|
daily-temperatures.py
|
py
| 363
|
python
|
en
|
code
| 3
|
github-code
|
6
|
74189715388
|
# 3 : Write a Python program to display the first and last colors from the following list.
# Example : color_list = ["Red","Green","White" ,"Black"].
# Your list should be flexible such that it displays any color that is part of the list.
from typing import List
color_list = ["Red", "Green", "White", "Black", "Pink", "Azure", "Brown"]
newColorlist = color_list[0]
finalColorlist = color_list[len(color_list) - 1]
print(newColorlist + finalColorlist)
|
aryashah0907/Arya_GITSpace
|
Test_Question_2.py
|
Test_Question_2.py
|
py
| 454
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2077956577
|
from odoo import models, api, fields, _
# from odoo.exceptions import UserError
from datetime import datetime
from dateutil.relativedelta import relativedelta
import json
import xlsxwriter
_
from odoo.exceptions import ValidationError
from odoo.exceptions import UserError
import base64
import io
try:
import xlwt
except ImportError:
xlwt = None
class billingMonthModel(models.Model):
_name = 'billing.month'
_description = 'Billing Month Model'
name = fields.Char(string='Name', required=True)
description = fields.Text(string='Description')
class AccountMoveReport(models.TransientModel):
_name = 'account.recovery.report.move.line'
billing_cycle=fields.Char('Billing Cycle')
total_issuance=fields.Integer('Total Billing (Bills Issuance)')
no_of_std=fields.Integer('#No of Students')
total_recovery=fields.Integer('Recovery')
recovery_percentage=fields.Char('Percentage of Recovery on Amount')
class RecoveryReportWizard(models.TransientModel):
_name="recovery.report.wizard"
_description='Print Recovery Wizard'
# selected_month= fields.Many2many('billing.month', string='Select Month')
from_date = fields.Date(string='From')
to_date = fields.Date(string='To')
all_branch=fields.Boolean(string=" Select All Branches")
one_branch=fields.Many2one('school.school', string= 'Select any one branch')
account_recovery_report_line=fields.Many2many('account.recovery.report.move.line', string='Account report Line')
# groups_ids = fields.Many2many('aging.invoice.group', string='Groups')
def _branch_constrains(self):
if self.all_branch and self.one_branch:
raise ValidationError(_('Sorry, You Must select only one option.'))
elif not self.one_branch and not self.all_branch:
raise ValidationError(_('Sorry, You Must select atleast one option.'))
if not self.to_date or not self.from_date:
raise ValidationError(_('Please Select the both dates.'))
def list_months(self):
next_month = self.to_date + relativedelta(months=1)
first_day_of_next_month = next_month.replace(day=1)
# Subtract one day from the first day of the next month to get the last day of the current month
last_day_of_month = first_day_of_next_month - relativedelta(days=1)
# Initialize the result list
covered_months = []
# Iterate over each month within the duration
current_month = self.from_date
while current_month <= last_day_of_month:
# Format the month as "Mon-YY" (e.g., Feb-22)
month_str = current_month.strftime("%b-%y")
# Add the formatted month to the result list
covered_months.append(month_str)
# Move to the next month
current_month += relativedelta(months=1)
return covered_months
def action_print_report(self):
lines=[]
selected_month = self.list_months()
for month in selected_month:
if self.all_branch==True:
inv_ids=self.env['account.move'].search([('move_type','=','out_invoice'),('journal_id','=',125),('state','=','posted'),('invoice_date',">=",self.from_date),('invoice_date',"<=",self.to_date)])
else:
inv_ids=self.env['account.move'].search([('move_type','=','out_invoice'),('state','=','posted'),('journal_id','=',125),('x_studio_current_branchschool','=',self.one_branch.id),('invoice_date',">=",self.from_date),('invoice_date',"<=",self.to_date)])
stud_lst=[]
month_issuance=0
month_due_amount=0
month_recovery=0
perc=0
for rec in inv_ids:
invoice_month = rec.invoice_date.strftime("%b-%y")
if invoice_month==month:
if rec.x_studio_udid_monthly_bills not in stud_lst:
stud_lst.append(rec.x_studio_udid_monthly_bills)
month_issuance=month_issuance+rec.amount_total
if rec.payment_state=='paid':
month_recovery = month_recovery+rec.amount_total
nostd=len(stud_lst)
if month_issuance !=0 :
number=(month_recovery/month_issuance)*100
perc = round(number, 2)
mvl=self.env['account.recovery.report.move.line'].create({
"billing_cycle":month,
"total_issuance":month_issuance,
"no_of_std":nostd,
"total_recovery":month_recovery,
"recovery_percentage":str(perc)+'%',
})
lines.append(mvl.id)
self.write({
"account_recovery_report_line":[(6,0,lines)]
})
def action_print_excel_recovery_report(self):
self._branch_constrains()
self.action_print_report()
if xlwt:
branch=""
if self.all_branch:
branch="All Branches"
else:
branch=self.one_branch.name
filename = str(branch)+"-"+str(self.from_date)+"-"+str(self.to_date)+".xls"
# One sheet by partner
workbook = xlwt.Workbook()
# sheet = workbook.add_sheet(report_name[:31])
worksheet = workbook.add_sheet('Recovery Report')
style_title = xlwt.easyxf(
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
red_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour pale_blue;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
yellow_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour yellow;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
lime_style_title = xlwt.easyxf('pattern: pattern solid, fore_colour lime;'
"font:bold on,; align: vertical center,horiz center; border: top thin, bottom thin, right thin, left thin")
grand_heading_style = xlwt.easyxf('pattern: pattern solid, fore_colour white;'
'font: colour black, bold True;')
heading_style = xlwt.easyxf('align: vertical center,horiz center;')
date_format = xlwt.XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
# worksheet.write_merge(0, 1, 0, 5,"LACAS SCHOOL NETWORK ",style=style_title)
# worksheet.write_merge(0, 1, 6, 11, "Billing Cycle wise recovery report", style=style_title)
worksheet.write_merge(0,1,0,0,"Billing Cycle.", style=red_style_title)
worksheet.write_merge(0,1,1,1,"Total Billing (Bills Issuance)",style=red_style_title)
worksheet.write_merge(0,1,2,2,"No of Std",style=red_style_title)
worksheet.write_merge(0,1,3,3,"Recovery",style=red_style_title)
worksheet.write_merge(0,1,4,4,"Percentage of Recovery on Amount",style=red_style_title)
row=2
for rec in self.account_recovery_report_line:
if rec:
worksheet.write_merge(row,row,0,0,rec.billing_cycle, style=style_title)
worksheet.write_merge(row,row,1,1,rec.total_issuance,style=style_title)
worksheet.write_merge(row,row,2,2,rec.no_of_std,style=style_title)
worksheet.write_merge(row,row,3,3,rec.total_recovery,style=style_title)
worksheet.write_merge(row,row,4,4,rec.recovery_percentage,style=style_title)
row+=1
fp = io.BytesIO()
workbook.save(fp)
export_id = self.env['sale.day.book.report.excel'].create({'excel_file': base64.encodestring(fp.getvalue()), 'file_name': filename})
res = {
'view_mode': 'form',
'res_id': export_id.id,
'res_model': 'sale.day.book.report.excel',
'type': 'ir.actions.act_window',
'target':'new'
}
return res
else:
raise Warning (""" You Don't have xlwt library.\n Please install it by executing this command : sudo pip3 install xlwt""")
|
Odolution/lacas
|
ol_lacas_custom_recovery_report/wizard/custom_wizard.py
|
custom_wizard.py
|
py
| 8,991
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17931901914
|
'''. Average Salary Excluding the Minimum and Maximum Salary
Easy
2K
174
Companies
You are given an array of unique integers salary where salary[i] is the salary of the ith employee.
Return the average salary of employees excluding the minimum and maximum salary. Answers within 10-5 of the actual answer will be accepted.
'''
'''Approach
Here Firstly we find how many numbers are in between of the range [low,high][low, high][low,high].
Which we can simply find by doing high−low+1high - low + 1high−low+1
Now if we have these no. of number odd, then we need to check if our starting point (low)(low)(low) is oddoddodd or notnotnot.
If start is odd then answer will be (no. of number + 1) // 2.
else it will be no. of number // 2.'''
class Solution:
def average(self, salary) -> float:
salary=sorted(salary)
return (sum(salary[1:len(salary)-1]))/(len(salary)-2 )
solution_object = Solution()
print(solution_object.average(salary = [4000,3000,1000,2000]))
|
LakshmiN5/leetcode_ex
|
avg_sal_exc_max_min.py
|
avg_sal_exc_max_min.py
|
py
| 988
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7266170385
|
from chaco.api import Plot
from chaco.tools.api import BetterSelectingZoom, PanTool
"""
Chaco wrapper.
"""
class ChacoPlot(Plot):
"""
A 2D Chaco plot wrapped with useful common functionality.
"""
@staticmethod
def sci_formatter(value):
"""
Convert a value to a scientific notation string as applicable.
"""
# Subtly different from g or n presentation types.
if value != 0 and (abs(value) < 1e-3 or abs(value) > 1e3):
parts = '{0:e}'.format(value).split('e')
result = parts[0].rstrip('0').rstrip('.') + 'e' + parts[1]
else:
result = '{0:f}'.format(value).rstrip('0').rstrip('.')
return result
def configure(self):
"""
Configure padding, tools, etc.
"""
# Padding.
self.padding = 20
self.padding_left = 120
self.padding_bottom = 55
# Axes.
self.index_axis.tick_label_formatter = self.sci_formatter
self.value_axis.tick_label_formatter = self.sci_formatter
# Tools.
self.tools.append(PanTool(self))
zoom = BetterSelectingZoom(self)
self.tools.append(zoom)
self.overlays.append(zoom)
@property
def x_label(self):
"""
The x axis label.
"""
return self.index_axis.title
@x_label.setter
def x_label(self, value):
self.index_axis.title = value
@property
def y_label(self):
"""
The y axis label.
"""
return self.value_axis.title
@y_label.setter
def y_label(self, value):
self.value_axis.title = value
|
0/SpanishAcquisition
|
spacq/gui/display/plot/common/chaco_plot.py
|
chaco_plot.py
|
py
| 1,396
|
python
|
en
|
code
| 1
|
github-code
|
6
|
1701692877
|
#!/usr/bin/python3
# hashtable
"""
ADT:
slot
init
private:
_load_factory 计算负载因子
_rehash 重新hash
_hash hash计算index
_find_key
_check_can_insert
_find_slot_for_insert
public:
get
add
remove
reindex: (index * 5 + 1) % _len
factory: table_real_length / table_all_length
"""
class Slot:
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable:
UNUSED = None # 从未被使用过
EMPTY = Slot(None, None) # 使用过却被删除过
def __init__(self):
self._table = [HashTable.UNUSED] * 8 # 保持 2 * i 的次方的
self.length = 0
def __len__(self):
return self.length # 表的实际长度
@property
def _load_factory(self):
# 负载因子超过0.8重新分配
return self.length / float(len(self._table))
def _hash(self, key):
return abs(hash(key)) % len(self._table)
def _find_key(self, key):
""" 寻找key元素返回其index, 否则为None """
index = self._hash(key) # 计算出索引
_len = len(self._table) # 表的总长度
while self._table[index] is not HashTable.UNUSED: # 使用过才能找到元素...
if self._table[index] is HashTable.EMPTY: # 被删除了
index = (index * 5 + 1) % _len # 重新计算位置
continue # 再次查找
elif self._table[index].key == key: # ok 返回索引
return index
else: # 找不到哦
index = (index * 5 + 1) % _len # 重新计算位置
return None
def __contains__(self, key):
""" 使用_find_key查找哈希表是否包含指定key """
index = self._find_key(key)
return index
def _check_can_insert(self, index):
return (self._table[index] is HashTable.EMPTY or
self._table[index] is HashTable.UNUSED)
def _find_slot_for_insert(self, key):
""" 寻找可以插入的槽位置 """
index = self._hash(key)
_len = len(self._table)
while not self._check_can_insert(index):
index = (index * 5 + 1) % _len
return index
def _rehash(self):
""" 重新hash """
old_table = self._table
new_size = len(self._table) * 2 # 扩展2倍
self._table = [HashTable.UNUSED] * new_size
self.length = 0
for slot in old_table:
if slot is not HashTable.UNUSED and slot is not HashTable.EMPTY: # 有数据
index = self._find_slot_for_insert(slot.key) # 寻找可插入的槽
self._table[index] = slot
self.length += 1
def add(self, key, value):
""" 添加元素到哈希表, 如果存在则覆盖 """
if key in self: # call __contains__
index = self._find_key(key) # 既然存在则寻找其位置
self._table[index].value = value # 重新赋值
else: # 新元素
index = self._find_slot_for_insert(key)
self._table[index] = Slot(key, value) # ok
self.length += 1
if self._load_factory >= 0.8:
self._rehash() # 重新hash
return True
def get(self, key, default=None):
""" 获取key的value, 没有则返回None """
index = self._find_key(key)
if index is None:
print("in default")
return default
else:
return self._table[index].value
def remove(self, key):
""" 移除一个key及其value """
index = self._find_key(key)
if not index:
raise KeyError("key不存在")
value = self._table[index].value
self._table[index] = HashTable.EMPTY
self.length -= 1
return value
def __iter__(self):
for s in self._table:
if s not in (HashTable.UNUSED, HashTable.EMPTY):
yield s.key
if __name__ == '__main__':
h = HashTable()
h.add('a', 0)
h.add('b', 1)
h.add('c', 2)
print('len ', len(h))
print('get a : ', h.get('a'))
print('get b : ', h.get('b'))
print('get c : ', h.get('c'))
|
ChenBaiYii/DataStructure
|
hashtable.py
|
hashtable.py
|
py
| 4,277
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7166024264
|
import pathlib
from typing import Any
import pytest
from competitive_verifier.models import (
AddtionalSource,
CommandVerification,
ConstVerification,
ResultStatus,
VerificationFile,
)
test_parse_VerificationFile_params: list[
tuple[VerificationFile, dict[str, Any], dict[str, Any]]
] = [
(
VerificationFile(),
{},
{
"dependencies": set(),
"document_attributes": {},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
dependencies=set(
[
pathlib.Path("bar1"),
pathlib.Path("bar2"),
]
),
),
{
"dependencies": [
"bar1",
"bar2",
],
},
{
"dependencies": set(
[
pathlib.Path("bar1"),
pathlib.Path("bar2"),
]
),
"document_attributes": {},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
document_attributes={
"title": "Bar bar",
},
),
{
"document_attributes": {
"title": "Bar bar",
},
},
{
"dependencies": set(),
"document_attributes": {
"title": "Bar bar",
},
"verification": [],
"additonal_sources": [],
},
),
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
{
"verification": [
{
"type": "const",
"status": "success",
}
],
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [ConstVerification(status=ResultStatus.SUCCESS)],
"additonal_sources": [],
},
),
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
{
"verification": {
"type": "const",
"status": "success",
},
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [ConstVerification(status=ResultStatus.SUCCESS)],
"additonal_sources": [],
},
),
(
VerificationFile(
additonal_sources=[
AddtionalSource(name="dummy", path=pathlib.Path("tmp/dummy.sh"))
]
),
{
"additonal_sources": [{"name": "dummy", "path": "tmp/dummy.sh"}],
},
{
"dependencies": set(),
"document_attributes": {},
"verification": [],
"additonal_sources": [
{"name": "dummy", "path": pathlib.Path("tmp/dummy.sh")}
],
},
),
]
@pytest.mark.parametrize(
"obj, raw_dict, output_dict",
test_parse_VerificationFile_params,
)
def test_parse_VerificationFile(
obj: VerificationFile,
raw_dict: dict[str, Any],
output_dict: dict[str, Any],
):
assert obj == VerificationFile.parse_obj(raw_dict)
assert obj.dict() == output_dict
test_is_verification_params = [
(
VerificationFile(
verification=[ConstVerification(status=ResultStatus.SUCCESS)],
),
True,
True,
),
(
VerificationFile(
verification=[
ConstVerification(status=ResultStatus.SUCCESS),
ConstVerification(status=ResultStatus.FAILURE),
],
),
True,
True,
),
(
VerificationFile(
verification=[CommandVerification(command="true")],
),
True,
False,
),
(
VerificationFile(
verification=[
ConstVerification(status=ResultStatus.SUCCESS),
CommandVerification(command="true"),
],
),
True,
False,
),
(
VerificationFile(
verification=[],
),
False,
False,
),
]
@pytest.mark.parametrize(
"obj, is_verification, is_skippable_verification", test_is_verification_params
)
def test_is_verification(
obj: VerificationFile,
is_verification: bool,
is_skippable_verification: bool,
):
assert obj.is_verification() == is_verification
assert obj.is_skippable_verification() == is_skippable_verification
|
competitive-verifier/competitive-verifier
|
tests/models/test_file.py
|
test_file.py
|
py
| 4,735
|
python
|
en
|
code
| 8
|
github-code
|
6
|
39372786389
|
# -*- coding: utf-8 -*-
'''
Server Program used to handle multiple clients in a secure manner using
certificates and SSL/TLS protocol, store data
to the database.
@author: Manish Gupta <manishthaparian.gupta@gmail.com>
'''
# Copyright (C) 2018 Manish Gupta <manishthaparian.gupta@gmail.com>;
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Manish Gupta <manishthaparian.gupta@gmail.com>"
__copyright__ = "Copyright 2018"
__credits__ = [ "Manish Gupta" ]
__license__ = "GPL"
__version__ = "1"
__maintainer__ = "Manish Gupta"
__email__ = "<manishthaparian.gupta@gmail.com>"
__status__ = "Prototype"
#!/usr/bin/python3
import socket
import ssl
import time
from threading import Thread
import queue
import threading
from collections import OrderedDict
listen_addr = '192.168.0.182'
listen_port = 8082
server_cert = 'server.crt'
server_key = 'server.key'
client_certs = 'client_combine.crt'
threads = []
BUF_SIZE = 1024
dataQueue = queue.Queue(BUF_SIZE)
nodelist = []
firmWareLocation = ""
firmWareUpdate = ""
versionNumber = 1.1
################################################################################
# There are 2 threads running to handle communication with clients and process all
# the data coming from the clients
################################################################################
# DataThread processes all the data in the queue and pushes it to the database.
# this also check for the type of packet received
class DataThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,args=(), kwargs=None, verbose=None):
super(DataThread,self).__init__()
self.target = target
self.name = name
# run function of this thread
def run(self):
global firmWareUpdate
global firmWareLocation
global dataQueue
global versionNumber
idIndex = 1
commandIndex = 2
fieldIndex = 4
while True:
try:
if not dataQueue.empty():
datarequest = (dataQueue.get())
requestField = str(datarequest).split('/')
print(requestField)
if requestField[idIndex].lower().strip() == 'pingpacket':
print("It is a ping packet")
# Store into database
elif requestField[idIndex].lower().strip() == 'datapacket':
print("It is a data packet")
# Store into database
elif requestField[idIndex].lower().strip() == 'update':
print("It is an update request")
firmWareUpdate = True
firmWareLocation = requestField[commandIndex]
versionNumber = requestField[fieldIndex]
print("Current Status:",firmWareUpdate)
print("Location",firmWareLocation)
print("Version Number",versionNumber)
for node in nodelist:
print("Updating nodes status for updating required")
node['Update'] = True
print(nodelist)
if (firmWareUpdate == True):
print("Checking if all nodes have been updated")
UpdateFlag = True
for node in nodelist:
print("Actual Node Status:" ,node['Update'])
if(node['Update'] == True):
UpdateFlag = False
print("UpdateFlag",UpdateFlag)
if(UpdateFlag == True):
print("All clients have been updated:")
firmWareUpdate = False
except Exception as e:
print("Exception ------->",e)
# ClientThread take care of connecting to each client by making instance of new thread
# connection with client
class ClientThread(Thread):
def __init__(self,conn,ip,port):
Thread.__init__(self)
self.ip = ip
self.port = port
self.conn = conn
self.firstcontact = int(time.time()*1000)
self.lastactivity = int(time.time()*1000)
self.connected = True
print("New server socket thread started for " + ip + ":" + str(port))
nodeStatus=OrderedDict()
nodeStatus['ip'] = self.ip
nodeStatus['port'] = self.port
nodeStatus['conn'] = self.conn
nodeStatus['Update'] = False
nodelist.append(nodeStatus)
print("List of nodes:",nodelist)
def run(self):
global firmWareUpdate
global firmWareLocation
global versionNumber
while True :
print("Waiting for data from client")
try:
data = self.conn.recv(4096)
data1 = data.decode()
if data1:
self.lastactivity = int(time.time()*1000)
print("Server received data:", data1)
print("Last activity at:",self.lastactivity)
print("thread running", self.name)
print("firmware update required:",firmWareUpdate)
if(firmWareUpdate == True):
print("Need to update client firmware")
for node in nodelist:
if(node['conn']==self.conn):
locationdata = '/Update/' + str(firmWareLocation) + '/version/' + str(versionNumber)
print("Sending firmware location" + locationdata)
self.conn.send(str(locationdata).encode())
node['Update'] = False
break
else:
self.conn.send("/Recieved".encode())
if not dataQueue.full():
dataQueue.put(data1)
else:
print("Didn't get anything")
self.connected = False
self.conn.close()
for node in nodelist:
if (node['conn']==self.conn):
nodelist.remove(node)
except Exception as error:
print(error)
self.connected = False
self.conn.close()
for node in nodelist:
if (node['conn']==self.conn):
nodelist.remove(node)
if(self.connected == False):
break
print("Exiting thread")
# Start the datathread on starting of program
datathread = DataThread(name='DataThread')
datathread.start()
#Load certificates and necessary keys to create ssl instance
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=server_cert, keyfile=server_key)
context.load_verify_locations(cafile=client_certs)
#create a socket connection and start listening on the port
bindsocket = socket.socket()
bindsocket.bind((listen_addr, listen_port))
bindsocket.listen(1)
#waiting for connections from clients
while True:
try:
print("Waiting for client")
newsocket, fromaddr = bindsocket.accept()
print("Client connected: {}:{}".format(fromaddr[0], fromaddr[1]))
conn = context.wrap_socket(newsocket, server_side=True)
print("SSL established. Peer: {}".format(conn.getpeercert()))
newthread = ClientThread(conn,fromaddr[0], fromaddr[1])
newthread.start()
threads.append(newthread)
print("Active threads: ",threading.active_count())
except Exception as error:
print(error)
for t in threads:
t.join()
|
manishgupta1208/SP-home
|
home.py
|
home.py
|
py
| 8,738
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14347113756
|
import pandas as pd
def pre_processing_state_table(data_df):
"""
This function takes a pandas DataFrame as input and splits the next_state_0 and next_state_1 columns
into two columns each.
"""
data_df = data_df.applymap(lambda x: x.strip())
data_df[['next_state_0', 'output_0']] = data_df['next_state_0'].str.split('-|/|\s', expand=True)
data_df[['next_state_1', 'output_1']] = data_df['next_state_1'].str.split('-|/|\s', expand=True)
return data_df
def grouping_states(data_df):
"""
This function groups the present_state, next_state_0, and next_state_1 columns
by their output values.
"""
grouped = data_df.groupby(['output_0', 'output_1']).agg({
'present_state': lambda x: list(x),
'next_state_0': lambda x: list(x),
'next_state_1': lambda x: list(x)
}).reset_index()
grouped['group'] = grouped['present_state']
grouped.drop(['present_state','output_0', 'output_1'], axis=1, inplace=True)
grouped_table = grouped.transpose()
grouped_table = grouped_table.iloc[[-1] + list(range(len(grouped_table)-1))]
return grouped_table
def create_state_map(grouped_table):
"""
This function creates a state map that maps each state to its group.
"""
state_map = pd.DataFrame(columns=['state','group'])
for col_name, col_data in grouped_table.iloc[0].items():
for data in col_data:
new_row = [data,col_name]
state_map.loc[len(state_map)] = new_row
return state_map
def group_next_states(grouped_table, state_map):
"""
This function groups the next states based on the state map.
"""
def find_group_by_state(state_map, state_group):
"""
This function finds the group for a given state (or a list of states).
"""
list_group = list(range(len(state_group)))
for index, state_value in enumerate(state_group):
list_group[index] = state_map.loc[state_map['state'] == state_value, 'group'].iloc[0]
return list_group
i = 0
while i < grouped_table.shape[1]:
grouped_table.iat[1,i] = find_group_by_state(state_map, grouped_table.iat[1,i])
grouped_table.iat[2,i] = find_group_by_state(state_map, grouped_table.iat[2,i])
i += 1
grouped_table = grouped_table.rename(index={'next_state_0': 'next_group_0', 'next_state_1': 'next_group_1'})
return grouped_table
def check_finish(grouped_table):
"""
This function checks if the state table minimization is finished.
"""
grouped_table = grouped_table.iloc[[1,2]].copy()
for row in grouped_table.itertuples():
for index in range(1,grouped_table.shape[1]+1):
cell = row[index]
if len(cell) > 1:
for j in range(1,len(cell)):
if cell[j] != cell[j-1]:
return False
return True
def split_groups(grouped_table, states_table):
"""
This function splits the groups in the state table.
"""
def split_columns(pre_list, split_position):
new_list = [pre_list[i] for i in split_position]
offset = 0
for i in split_position:
pre_list.pop(i-offset)
offset += 1
return new_list
i = 0
grouped_table = pd.concat([grouped_table, states_table], axis=0)
while i < grouped_table.shape[1]:
list1 = grouped_table.iloc[1,i]
list2 = grouped_table.iloc[2,i]
if len(list1) > 1:
split_position = []
have_split = False
for j in range(1,len(list1)):
if list1[0] != list1[j] or list2[0] != list2[j]:
split_position.append(j)
have_split = True
if have_split:
new_col = grouped_table.iloc[:,i].apply(split_columns, args=(split_position,))
# grouped_table.insert(i+1, grouped_table.shape[1], new_col)
grouped_table = pd.concat([grouped_table, pd.DataFrame(new_col)], axis=1, ignore_index=True)
i += 1
grouped_table = grouped_table.drop(['next_group_0', 'next_group_1'])
return grouped_table
def minimize_state_table(data):
# Create a dictionary to store the return data
return_data = {
"status": "success",
"message": "API executed successfully",
"data": {"steps": [],"result": {},},
"author": "nguyenhoangkhanhduy030903@gmail.com",
}
# Pre-process the state table by splitting the next state columns and removing any leading/trailing white space
state_table = pre_processing_state_table(pd.DataFrame(data))
# Group the states based on their next states and output symbols
grouped_table = grouping_states(state_table.copy())
# Extract the next states from the grouped table
next_states = grouped_table.iloc[1:].copy()
# Create a mapping between each state and its corresponding group
state_map = create_state_map(grouped_table)
# Group the next states based on their group mappings
grouped_table = group_next_states(grouped_table, state_map)
# Write step1 to the return data
return_data["data"]["steps"].append({"step1": grouped_table.to_json(orient='records')})
# Repeatedly split the groups until no further splitting is possible
step_number = 2
while check_finish(grouped_table) is not True:
# Split the groups based on their next states and output symbols
splited_table = split_groups(grouped_table,next_states)
grouped_table = splited_table
# Extract the next states from the updated grouped table
next_states = grouped_table.iloc[1:].copy()
# Update the mapping between each state and its corresponding group
state_map = create_state_map(grouped_table)
# Group the next states based on their group mappings
grouped_table = group_next_states(grouped_table, state_map)
# Write the updated groups to the return data
return_data["data"]["steps"].append({"step" + str(step_number): grouped_table.to_json(orient='records')})
step_number += 1
# Create a dictionary to map each state in a group to the group's representative state
new_name_dict = dict()
for _, cell in grouped_table.iloc[0].items():
for i in range(1,len(cell)):
new_name_dict[cell[i]] = cell[0]
# Remove duplicate states from the original state table
result_state_table = state_table.set_index("present_state")
for _, cell in grouped_table.iloc[0].items():
result_state_table = result_state_table.drop(cell[1:], axis=0)
result_state_table.reset_index(inplace=True)
# Replace the next state names with their group representative names and concatenate the output symbols
result_state_table = result_state_table.replace({'next_state_0':new_name_dict,'next_state_1':new_name_dict})
result_state_table['next_state_0'] = result_state_table.apply(lambda x: x['next_state_0']+'/'+str(x["output_0"]),axis=1)
result_state_table['next_state_1'] = result_state_table.apply(lambda x: x['next_state_1']+'/'+str(x["output_1"]),axis=1)
result_state_table = result_state_table.drop(['output_0','output_1'],axis=1)
# Write the minimized state table to the return data
return_data["data"]["result"] = result_state_table.to_json(orient="records")
return return_data
# minimize_state_table(" ")
|
Duynghk/LogicDesign
|
website/minimize_state_table.py
|
minimize_state_table.py
|
py
| 7,473
|
python
|
en
|
code
| 1
|
github-code
|
6
|
19882697980
|
import os
import trio
import ssl
from async_generator import asynccontextmanager
from structlog import get_logger
from typing import Optional, Union
from guardata.crypto import SigningKey
from guardata.api.transport import Transport, TransportError, TransportClosedByPeer
from guardata.api.protocol import (
DeviceID,
ProtocolError,
HandshakeError,
BaseClientHandshake,
AuthenticatedClientHandshake,
InvitedClientHandshake,
APIV1_AnonymousClientHandshake,
APIV1_AdministrationClientHandshake,
)
from guardata.client.types import (
BackendAddr,
BackendOrganizationAddr,
BackendOrganizationBootstrapAddr,
BackendInvitationAddr,
)
from guardata.client.backend_connection.exceptions import (
BackendConnectionError,
BackendNotAvailable,
BackendConnectionRefused,
BackendProtocolError,
)
logger = get_logger()
TIMEOUT_SERVER_CONNECT = 8
async def apiv1_connect(
addr: Union[BackendAddr, BackendOrganizationBootstrapAddr, BackendOrganizationAddr],
device_id: Optional[DeviceID] = None,
signing_key: Optional[SigningKey] = None,
administration_token: Optional[str] = None,
keepalive: Optional[int] = None,
) -> Transport:
"""
Raises:
BackendConnectionError
"""
handshake: BaseClientHandshake
if administration_token:
if not isinstance(addr, BackendAddr):
raise BackendConnectionError(f"Invalid url format `{addr}`")
handshake = APIV1_AdministrationClientHandshake(administration_token)
elif not device_id:
if isinstance(addr, BackendOrganizationBootstrapAddr):
handshake = APIV1_AnonymousClientHandshake(addr.organization_id)
elif isinstance(addr, BackendOrganizationAddr):
handshake = APIV1_AnonymousClientHandshake(addr.organization_id, addr.root_verify_key)
else:
raise BackendConnectionError(
f"Invalid url format `{addr}` "
"(should be an organization url or organization bootstrap url)"
)
else:
raise BackendConnectionError("Invalid v1 auth method")
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def connect_as_invited(addr: BackendInvitationAddr, keepalive: Optional[int] = None):
handshake = InvitedClientHandshake(
organization_id=addr.organization_id, invitation_type=addr.invitation_type, token=addr.token
)
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def connect_as_authenticated(
addr: BackendOrganizationAddr,
device_id: DeviceID,
signing_key: SigningKey,
keepalive: Optional[int] = None,
):
handshake = AuthenticatedClientHandshake(
organization_id=addr.organization_id,
device_id=device_id,
user_signkey=signing_key,
root_verify_key=addr.root_verify_key,
)
return await _connect(addr.hostname, addr.port, addr.use_ssl, keepalive, handshake)
async def _connect(
hostname: str,
port: int,
use_ssl: bool,
keepalive: Optional[int],
handshake: BaseClientHandshake,
) -> Transport:
try:
with trio.fail_after(TIMEOUT_SERVER_CONNECT):
stream = await trio.open_tcp_stream(hostname, port)
except (OSError, trio.TooSlowError) as exc:
logger.debug("Impossible to connect to backend", reason=exc)
raise BackendNotAvailable(exc) from exc
if use_ssl:
stream = _upgrade_stream_to_ssl(stream, hostname)
try:
transport = await Transport.init_for_client(stream, host=hostname)
transport.handshake = handshake
transport.keepalive = keepalive
except TransportError as exc:
logger.debug("Connection lost during transport creation", reason=exc)
raise BackendNotAvailable(exc) from exc
try:
await _do_handshake(transport, handshake)
except Exception as exc:
transport.logger.debug("Connection lost during handshake", reason=exc)
await transport.aclose()
raise
return transport
def _upgrade_stream_to_ssl(raw_stream, hostname):
# The ssl context should be generated once and stored into the config
# however this is tricky (should ssl configuration be stored per device ?)
cafile = os.environ.get("SSL_CAFILE")
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if cafile:
ssl_context.load_verify_locations(cafile)
else:
ssl_context.load_default_certs()
return trio.SSLStream(raw_stream, ssl_context, server_hostname=hostname)
async def _do_handshake(transport: Transport, handshake):
try:
challenge_req = await transport.recv()
answer_req = handshake.process_challenge_req(challenge_req)
await transport.send(answer_req)
result_req = await transport.recv()
handshake.process_result_req(result_req)
except TransportError as exc:
raise BackendNotAvailable(exc) from exc
except HandshakeError as exc:
raise BackendConnectionRefused(str(exc)) from exc
except ProtocolError as exc:
transport.logger.exception("Protocol error during handshake")
raise BackendProtocolError(exc) from exc
class TransportPool:
def __init__(self, connect_cb, max_pool):
self._connect_cb = connect_cb
self._transports = []
self._closed = False
self._lock = trio.Semaphore(max_pool)
@asynccontextmanager
async def acquire(self, force_fresh=False):
"""
Raises:
BackendConnectionError
trio.ClosedResourceError: if used after having being closed
"""
async with self._lock:
transport = None
if not force_fresh:
try:
# Fifo style to retrieve oldest first
transport = self._transports.pop(0)
except IndexError:
pass
if not transport:
if self._closed:
raise trio.ClosedResourceError()
transport = await self._connect_cb()
try:
yield transport
except TransportClosedByPeer:
raise
except Exception:
await transport.aclose()
raise
else:
self._transports.append(transport)
|
bitlogik/guardata
|
guardata/client/backend_connection/transport.py
|
transport.py
|
py
| 6,416
|
python
|
en
|
code
| 9
|
github-code
|
6
|
38967040281
|
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
import re
from desk_zol.items import DeskZolItem
class BizhiSpider(scrapy.Spider):
name = 'bizhi'
start_urls = ['http://desk.zol.com.cn/nb/','http://desk.zol.com.cn/pc/']
def parse(self, response):
soup = BeautifulSoup(response.text, 'lxml')
next = soup.select('.next')
alist = soup.select('.pic-list2')[0].find_all('a')
for a in alist:
item = DeskZolItem()
item['name'] = a.span['title']
item['url']='http://desk.zol.com.cn'+a['href']
item['image_urls'] = []
yield scrapy.Request('http://desk.zol.com.cn'+a['href'] , meta={'item':item},callback=self.parse_img)
if next:
yield scrapy.Request('http://desk.zol.com.cn' +next[0]['href'], callback=self.parse)
def parse_img(self,response):
item = response.meta['item']
soup =BeautifulSoup(response.text,'lxml')
lis= soup.find('ul',id='showImg').find_all('li')
for li in lis:
img = str(li.a.img)
if re.search('srcs',img):
real_url = re.sub('144x90', '1600x900', li.a.img['srcs'])
elif re.search('src',img):
real_url = re.sub('144x90', '1600x900', li.a.img['src'])
item['image_urls'].append(real_url)
yield item
|
zaoyubo/desk_zol
|
desk_zol/spiders/bizhi.py
|
bizhi.py
|
py
| 1,401
|
python
|
en
|
code
| 0
|
github-code
|
6
|
40290878008
|
def profitTable(maxPrice):
"""Prints a table of profits from a show based on ticket price. Parameters: maxPrice: maximum price to consider Return value: None """
print('Price Income Profit')
print('------ --------- ---------')
for price in range(1, 2*maxPrice + 1):
realprice = price/2
sales = 2500 - 80 * realprice
income = sales * realprice
profit = income - 8000
formatString = '${0:>5.2f} ${1:>8.2f} ${2:8.2f}'
print(formatString.format(realprice, income, profit))
def main():
profitTable(25)
main()
|
vivekworks/learning-to-code
|
4. Discovering Computer Science/Python/Chapter 4 - Growth And Decay/concert.py
|
concert.py
|
py
| 616
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43536075224
|
import requests
import json
import os
import sys
import logging
logger = logging.getLogger(__name__)
def gdc_read_file(file_id="11443f3c-9b8b-4e47-b5b7-529468fec098"):
data_endpt = "https://api.gdc.cancer.gov/slicing/view/{}".format(file_id)
TOKEN_FILE_PATH = os.environ.get('GDC_TOKEN')
if not TOKEN_FILE_PATH:
logger.warning("GDC_TOKEN environment variable should point to GDC token file")
sys.exit(1)
with open(TOKEN_FILE_PATH, "r") as token:
token_string = str(token.read().strip())
params = {"gencode": ["BRCA1", "BRCA2"]}
response = requests.post(
data_endpt,
data=json.dumps(params),
headers={
"Content-Type": "application/json",
"X-Auth-Token": token_string
})
return response.content
|
neksa/mutagene
|
mutagene/io/gdc.py
|
gdc.py
|
py
| 807
|
python
|
en
|
code
| 3
|
github-code
|
6
|
9093667498
|
filename = 'input.txt'
# filename = 'test.txt'
data = ['A Y', 'B X', 'C Z']
def load_data(filename):
data = []
with open(filename) as f:
lines = f.readlines()
for line in lines:
if line != '\n':
data.append(line.strip())
return data
def calc_match_score(match):
score = 0
elf = match[0]
santa = match[2]
print(elf, santa)
if elf == 'A':
if santa == 'X':
score = 4
elif santa == 'Y':
score = 8
elif santa == 'Z':
score = 3
if elf == 'B':
if santa == 'X':
score = 1
elif santa == 'Y':
score = 5
elif santa == 'Z':
score = 9
if elf == 'C':
if santa == 'X':
score = 7
elif santa == 'Y':
score = 2
elif santa == 'Z':
score = 6
print(score)
return score
def calc_match_score_for_part2(match):
score = 0
elf = match[0]
result = match[2]
print(elf, result)
# A - Rock (1) B - Paper (2) C - Scissors
# X - lose Y - draw Z - win
if elf == 'A':
if result == 'X':
score = 3
elif result == 'Y':
score = 4
elif result == 'Z':
score = 8
if elf == 'B':
if result == 'X':
score = 1
elif result == 'Y':
score = 5
elif result == 'Z':
score = 9
if elf == 'C':
if result == 'X':
score = 2
elif result == 'Y':
score = 6
elif result == 'Z':
score = 7
print(score)
return score
def part1(data):
# A - Rock (1) B - Paper (2) C - Scissors
# X - Rock (1) Y - Paper (2) Z - Scissors
# Win - 6 Draw - 3 Lose - 0
score = 0
for match in data:
score += calc_match_score(match)
return score
def part2(data):
# A - Rock (1) B - Paper (2) C - Scissors
# X - Rock (1) Y - Paper (2) Z - Scissors
# Win - 6 Draw - 3 Lose - 0
score = 0
for match in data:
score += calc_match_score_for_part2(match)
return score
data = load_data(filename)
print(data)
print('part1 resut = ', part1(data))
print('part2 resut = ', part2(data))
|
lapssh/advent_of_code
|
2022/day02/day02.py
|
day02.py
|
py
| 2,332
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26113028615
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/04/2017"
import ctypes
import numpy
from .....math.combo import min_max
from .... import _glutils as glutils
from ...._glutils import gl
from .GLPlotItem import GLPlotItem
class GLPlotTriangles(GLPlotItem):
"""Handle rendering of a set of colored triangles"""
_PROGRAM = glutils.Program(
vertexShader="""
#version 120
uniform mat4 matrix;
attribute float xPos;
attribute float yPos;
attribute vec4 color;
varying vec4 vColor;
void main(void) {
gl_Position = matrix * vec4(xPos, yPos, 0.0, 1.0);
vColor = color;
}
""",
fragmentShader="""
#version 120
uniform float alpha;
varying vec4 vColor;
void main(void) {
gl_FragColor = vColor;
gl_FragColor.a *= alpha;
}
""",
attrib0='xPos')
def __init__(self, x, y, color, triangles, alpha=1.):
"""
:param numpy.ndarray x: X coordinates of triangle corners
:param numpy.ndarray y: Y coordinates of triangle corners
:param numpy.ndarray color: color for each point
:param numpy.ndarray triangles: (N, 3) array of indices of triangles
:param float alpha: Opacity in [0, 1]
"""
super().__init__()
# Check and convert input data
x = numpy.ravel(numpy.array(x, dtype=numpy.float32))
y = numpy.ravel(numpy.array(y, dtype=numpy.float32))
color = numpy.array(color, copy=False)
# Cast to uint32
triangles = numpy.array(triangles, copy=False, dtype=numpy.uint32)
assert x.size == y.size
assert x.size == len(color)
assert color.ndim == 2 and color.shape[1] in (3, 4)
if numpy.issubdtype(color.dtype, numpy.floating):
color = numpy.array(color, dtype=numpy.float32, copy=False)
elif numpy.issubdtype(color.dtype, numpy.integer):
color = numpy.array(color, dtype=numpy.uint8, copy=False)
else:
raise ValueError('Unsupported color type')
assert triangles.ndim == 2 and triangles.shape[1] == 3
self.__x_y_color = x, y, color
self.xMin, self.xMax = min_max(x, finite=True)
self.yMin, self.yMax = min_max(y, finite=True)
self.__triangles = triangles
self.__alpha = numpy.clip(float(alpha), 0., 1.)
self.__vbos = None
self.__indicesVbo = None
self.__picking_triangles = None
def pick(self, x, y):
"""Perform picking
:param float x: X coordinates in plot data frame
:param float y: Y coordinates in plot data frame
:return: List of picked data point indices
:rtype: Union[List[int],None]
"""
if (x < self.xMin or x > self.xMax or
y < self.yMin or y > self.yMax):
return None
xPts, yPts = self.__x_y_color[:2]
if self.__picking_triangles is None:
self.__picking_triangles = numpy.zeros(
self.__triangles.shape + (3,), dtype=numpy.float32)
self.__picking_triangles[:, :, 0] = xPts[self.__triangles]
self.__picking_triangles[:, :, 1] = yPts[self.__triangles]
segment = numpy.array(((x, y, -1), (x, y, 1)), dtype=numpy.float32)
# Picked triangle indices
indices = glutils.segmentTrianglesIntersection(
segment, self.__picking_triangles)[0]
# Point indices
indices = numpy.unique(numpy.ravel(self.__triangles[indices]))
# Sorted from furthest to closest point
dists = (xPts[indices] - x) ** 2 + (yPts[indices] - y) ** 2
indices = indices[numpy.flip(numpy.argsort(dists), axis=0)]
return tuple(indices) if len(indices) > 0 else None
def discard(self):
"""Release resources on the GPU"""
if self.isInitialized():
self.__vbos[0].vbo.discard()
self.__vbos = None
self.__indicesVbo.discard()
self.__indicesVbo = None
def isInitialized(self):
return self.__vbos is not None
def prepare(self):
"""Allocate resources on the GPU"""
if self.__vbos is None:
self.__vbos = glutils.vertexBuffer(self.__x_y_color)
# Normalization is need for color
self.__vbos[-1].normalization = True
if self.__indicesVbo is None:
self.__indicesVbo = glutils.VertexBuffer(
numpy.ravel(self.__triangles),
usage=gl.GL_STATIC_DRAW,
target=gl.GL_ELEMENT_ARRAY_BUFFER)
def render(self, context):
"""Perform rendering
:param RenderContext context: Rendering information
"""
self.prepare()
if self.__vbos is None or self.__indicesVbo is None:
return # Nothing to display
self._PROGRAM.use()
gl.glUniformMatrix4fv(self._PROGRAM.uniforms['matrix'],
1,
gl.GL_TRUE,
context.matrix.astype(numpy.float32))
gl.glUniform1f(self._PROGRAM.uniforms['alpha'], self.__alpha)
for index, name in enumerate(('xPos', 'yPos', 'color')):
attr = self._PROGRAM.attributes[name]
gl.glEnableVertexAttribArray(attr)
self.__vbos[index].setVertexAttrib(attr)
with self.__indicesVbo:
gl.glDrawElements(gl.GL_TRIANGLES,
self.__triangles.size,
glutils.numpyToGLType(self.__triangles.dtype),
ctypes.c_void_p(0))
|
silx-kit/silx
|
src/silx/gui/plot/backends/glutils/GLPlotTriangles.py
|
GLPlotTriangles.py
|
py
| 5,702
|
python
|
en
|
code
| 106
|
github-code
|
6
|
26529515886
|
#한수
N = int(input())
result = N
for i in range(1, N+1):
temp = []
while i != 0:
temp.append(i % 10)
i = i // 10
if len(temp) < 3:
continue
dif = temp[0] - temp[1]
for j in range(1, len(temp)-1):
if dif != (temp[j] - temp[j+1]):
result -= 1
break
print(result)
|
Jaeheon-So/baekjoon-algorithm
|
완전탐색/1065.py
|
1065.py
|
py
| 340
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33875335541
|
import torch
import wandb
from .utils import matrix_to_dict
class Logger(object):
def __init__(self, hparams, model) -> None:
super().__init__()
self.hparams = hparams
self._setup_exp_management(model)
self.total_loss_values = None
def _setup_exp_management(self, model):
if self.hparams.use_wandb is True:
wandb.init(
entity="causal-representation-learning",
project=self.hparams.project,
notes=self.hparams.notes,
config=self.hparams,
tags=self.hparams.tags,
)
wandb.watch(model, log_freq=self.hparams.n_log_steps, log="all")
# define metrics
wandb.define_metric("total_loss", summary="min")
wandb.define_metric("lin_dis_score", summary="max")
wandb.define_metric("perm_dis_score", summary="max")
def log_jacobian(
self, dep_mat, name="gt_decoder", inv_name="gt_encoder", log_inverse=True
):
jac = dep_mat.detach().cpu()
cols = [f"a_{i}" for i in range(dep_mat.shape[1])]
gt_jacobian_dec = wandb.Table(columns=cols, data=jac.tolist())
self.log_summary(**{f"{name}_jacobian": gt_jacobian_dec})
if log_inverse is True:
gt_jacobian_enc = wandb.Table(columns=cols, data=jac.inverse().tolist())
self.log_summary(**{f"{inv_name}_jacobian": gt_jacobian_enc})
|
rpatrik96/nl-causal-representations
|
care_nl_ica/logger.py
|
logger.py
|
py
| 1,456
|
python
|
en
|
code
| 12
|
github-code
|
6
|
10368808313
|
from . import get_help
__doc__ = get_help("help_autoban")
from telethon import events
from pyUltroid.dB.base import KeyManager
from . import LOGS, asst, ultroid_bot, ultroid_cmd
Keym = KeyManager("DND_CHATS", cast=list)
def join_func(e):
return e.user_joined and Keym.contains(e.chat_id)
async def dnd_func(event):
for user in event.users:
try:
await (await event.client.kick_participant(event.chat_id, user)).delete()
except Exception as ex:
LOGS.error("Error in DND:")
LOGS.exception(ex)
await event.delete()
@ultroid_cmd(
pattern="autokick (on|off)$",
admins_only=True,
manager=True,
require="ban_users",
fullsudo=True,
)
async def _(event):
match = event.pattern_match.group(1)
if match == "on":
if Keym.contains(event.chat_id):
return await event.eor("`Chat already in do not disturb mode.`", time=3)
Keym.add(event.chat_id)
event.client.add_handler(dnd_func, events.ChatAction(func=join_func))
await event.eor("`Do not disturb mode activated for this chat.`", time=3)
elif match == "off":
if not Keym.contains(event.chat_id):
return await event.eor("`Chat is not in do not disturb mode.`", time=3)
Keym.remove(event.chat_id)
await event.eor("`Do not disturb mode deactivated for this chat.`", time=3)
if Keym.get():
ultroid_bot.add_handler(dnd_func, events.ChatAction(func=join_func))
asst.add_handler(dnd_func, events.ChatAction(func=join_func))
|
TeamUltroid/Ultroid
|
plugins/autoban.py
|
autoban.py
|
py
| 1,550
|
python
|
en
|
code
| 2,615
|
github-code
|
6
|
32060115586
|
import torch
from torch import nn
from torchvision import models, transforms
class VGG16Extractor(nn.Module):
def __init__(self):
super(VGG16Extractor, self).__init__()
vgg = models.vgg16(pretrained=True)
features = vgg.features
self.relu_1_2 = nn.Sequential()
self.relu_2_2 = nn.Sequential()
self.relu_3_3 = nn.Sequential()
self.relu_4_3 = nn.Sequential()
for x in range(4):
self.relu_1_2.add_module(str(x), features[x])
for x in range(4, 9):
self.relu_2_2.add_module(str(x), features[x])
for x in range(9, 16):
self.relu_3_3.add_module(str(x), features[x])
for x in range(16, 23):
self.relu_4_3.add_module(str(x), features[x])
for params in self.parameters():
params.requires_grad = False
def forward(self, input):
h_relu_1_2 = self.relu_1_2(input)
h_relu_2_2 = self.relu_2_2(h_relu_1_2)
h_relu_3_3 = self.relu_3_3(h_relu_2_2)
h_relu_4_3 = self.relu_4_3(h_relu_3_3)
return h_relu_1_2, h_relu_2_2, h_relu_3_3, h_relu_4_3
def gram(x):
(bs, ch, h, w) = x.size()
f = x.view(bs, ch, w*h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (ch * h * w)
return G
def vgg_tensor_transformer():
transformer = transforms.Compose([transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
transforms.Resize(size=(224, 224))])
return transformer
|
harsh020/image-colorization
|
colorizer/utils.py
|
utils.py
|
py
| 1,588
|
python
|
en
|
code
| 1
|
github-code
|
6
|
9384835860
|
import argparse
from click import secho
import sys
from DNScanner.DNScanner import DNScanner
savesys = sys.stdout
# Flags
parser = argparse.ArgumentParser(description='\t Scan domains https://github.com/ChinadaCam/DNScanner')
parser.add_argument('-d', '--domain', required=True, type=str, help='Set domain (example.com)')
parser.add_argument('-cS', '--checkSubdomains', const='Others\wordlists\subdomainlist.txt', nargs='?' , help='Check subdomains and give an output if founded. (Default path: Others\wordlists\10000-dnswords.txt) ')
parser.add_argument('-O', '--Output', const='Others\Discovers',nargs='?', help='Output to file.\n Default is Other/Discovers, change directory with --directory ')
parser.add_argument('-D', '--Directory', const='Others\Discovers',nargs='?', help='Define a directory to output.\n Default is Discovers')
parser.add_argument('-mx', '--mxrecords', nargs='?', const='True' ,help='Show Mail Exanger Records (MX RECORDS)')
parser.add_argument('-ns', '--Nameserver', nargs='?', const='True' ,help='Show Nameserver Records (NS RECORDS)')
parser.add_argument('-A', '--all', nargs='?', const='True' ,help='Run all parameters (output not included)')
parser.add_argument('-cn', '--cname', nargs='?', const='True' ,help='Show Canonical Name Records(CN Records)')
parser.add_argument('-W', '--whois', nargs='?', const='True' ,help='Who is (Clean format)')
parser.add_argument('-WJ', '--whoisJ', nargs='?', const='True' ,help='Who is (JSON)')
#parser.add_argument('-geo', '--geolocation', nargs='?', const='True' ,help='Try to get coordinates')
args = parser.parse_args()
def main():
print('------------------------------------------------')
print('\t DNScanner '
'\n\tMade by Tiago Faustino'
'\n Project link: https://github.com/ChinadaCam/DNScanner ' )
print('------------------------------------------------\n')
Scanner = DNScanner(args.domain)
if args.all:
Scanner.getNS()
Scanner.whoIsJson()
Scanner.subdomainspath = 'DNScanner\Others\wordlists\subdomainlist.txt'
Scanner.subdomainbool = True
Scanner.getCN()
Scanner.getMX()
args.mxrecords = True
# check if output is used
if args.Output:
if args.Directory:
Scanner.output(args.Directory)
else:
Scanner.output(args.Output)
Scanner.start()
if args.checkSubdomains:
Scanner.subdomainspath = args.checkSubdomains
Scanner.subdomainbool = True
# Toggle mx
if args.mxrecords:
Scanner.getMX()
if args.Nameserver:
Scanner.getNS()
if args.whois:
Scanner.whoIs()
if args.whoisJ:
Scanner.whoIsJson()
if args.cname:
Scanner.getCN()
sys.stdout = savesys
secho("\n[+] Finished ", fg="green")
#Scanner.whoIs()
if __name__ == '__main__':
main()
|
ChinadaCam/DNScanner
|
start.py
|
start.py
|
py
| 2,894
|
python
|
en
|
code
| 9
|
github-code
|
6
|
38961189531
|
class Employe:
def setEmploye(self,Eid,Ename,Desig,Salary):
self.Eid=Eid
self.Ename=Ename
self.Desig=Desig
self.Salary=Salary
def PrintEmploye(self):
print("Your Id is",self.Eid)
print(self.Ename)
print(self.Desig)
print(self.Salary)
ob=Employe()
ob.setEmploye(1,"raghu","manager",15000)
ob.PrintEmploye()
|
Aswin2289/LuminarPython
|
LuminarPythonPrograms/Oops/Employe.py
|
Employe.py
|
py
| 378
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72532004029
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from copy import deepcopy
import pytest
from pytest import MonkeyPatch
from settings_library.docker_registry import RegistrySettings
MOCKED_BASE_REGISTRY_ENV_VARS: dict[str, str] = {
"REGISTRY_AUTH": "False",
"REGISTRY_USER": "usr",
"REGISTRY_PW": "pwd",
"REGISTRY_SSL": "False",
}
def _add_parameter_to_env(env: dict[str, str], key: str, value: str) -> dict[str, str]:
registry_env = deepcopy(env)
registry_env[key] = value
return registry_env
def _mock_env_vars(monkeypatch: MonkeyPatch, env_vars: dict[str, str]) -> None:
for key, value in env_vars.items():
monkeypatch.setenv(key, value)
@pytest.mark.parametrize(
"env_key, env_var",
[
("REGISTRY_PATH", "some_dev_path"),
("REGISTRY_URL", "some_prod_url"),
],
)
def test_model_ok(env_key: str, env_var: str, monkeypatch: MonkeyPatch) -> None:
registry_env_vars = _add_parameter_to_env(
MOCKED_BASE_REGISTRY_ENV_VARS, env_key, env_var
)
_mock_env_vars(monkeypatch, registry_env_vars)
registry_settings = RegistrySettings()
assert registry_settings
assert registry_settings.resolved_registry_url == env_var
def test_registry_path_none_string(monkeypatch: MonkeyPatch) -> None:
registry_env_vars = _add_parameter_to_env(
MOCKED_BASE_REGISTRY_ENV_VARS, "REGISTRY_PATH", "None"
)
registry_env_vars = _add_parameter_to_env(
registry_env_vars, "REGISTRY_URL", "some_prod_url"
)
_mock_env_vars(monkeypatch, registry_env_vars)
registry_settings = RegistrySettings()
assert registry_settings
assert registry_settings.resolved_registry_url == registry_env_vars["REGISTRY_URL"]
|
ITISFoundation/osparc-simcore
|
packages/settings-library/tests/test_docker_registry.py
|
test_docker_registry.py
|
py
| 1,754
|
python
|
en
|
code
| 35
|
github-code
|
6
|
277948458
|
import torch
import torch.nn as nn
from shapmagn.global_variable import Shape
from shapmagn.utils.obj_factory import obj_factory
from shapmagn.modules_reg.module_gradient_flow import gradient_flow_guide
from shapmagn.shape.point_sampler import point_fps_sampler
class GradFlowPreAlign(nn.Module):
def __init__(self, opt):
super(GradFlowPreAlign, self).__init__()
self.opt = opt
self.niter = opt[("niter", 10, "self iteration")]
self.rel_ftol = opt[("rel_ftol", 1e-2, "relative tolerance")]
self.plot = opt[("plot", False, "plot the shape")]
self.method_name = opt[("method_name", "affine", "affine or rigid")]
self.eval_scale_for_rigid = opt[
(
"eval_scale_for_rigid",
True,
"evaluate scale for the rigid transformation",
)
]
self.control_points = opt[
(
"control_points",
-1,
"compute prealign with # control point, points are sampled from farthest point sampling",
)
]
self.sampler = point_fps_sampler(self.control_points)
self.use_barycenter_weight = opt[
(
"use_barycenter_weight",
False,
"use barycenter weight for partial registration",
)
]
pair_feature_extractor_obj = self.opt[
("pair_feature_extractor_obj", "", "feature extraction function")
]
self.pair_feature_extractor = (
obj_factory(pair_feature_extractor_obj)
if pair_feature_extractor_obj
else None
)
self.get_correspondence_shape = self.solve_correspondence_via_gradflow()
self.solver = (
self.solve_affine if self.method_name == "affine" else self.solve_rigid
)
def set_mode(self, mode):
self.prealign = True
def solve_affine(self,x, y, w):
"""
:param x: BxNxD
:param y: BxNxD
:param w: BxNx1
:return:
"""
# Optimal affine transform: ================================================
# A = (X^T @ diag(w) @ X)^-1 @ (X^T @ diag(w) @ y)
# (B,D+1,N) (B,N,N) (B,N,D+1) (B,D+1,N) (B,N,N) (B,N,D)
#
# = Xt_wX \ Xt_yw
# (B,D+1,D+1) (B,D+1, D)
# (x, y, z, 1) array to work easily with affine transforms:
X = torch.cat((x, torch.ones_like(x[:, :, :1])), dim=2) # (B,N, D+1)
Xt_wX = X.transpose(2, 1) @ (w * X) # (B,D+1, N) @ (B,N, D+1) = (B,D+1, D+1)
Xt_wy = X.transpose(2, 1) @ (w * y) # (B,D+1, N) @ (B,N, D) = (B,D+1, D)
# Affine transformation:
A = torch.solve(Xt_wy, Xt_wX).solution # (B,D+1, D)
return A, X @ A
def solve_rigid(self, x, y, w):
"""
:param x: BxNxD
:param y: BxNxD
:param w: BxNx1
:return:
"""
B, N, D = x.shape[0], x.shape[1], x.shape[2]
device = x.device
sum_w = w.sum(1, keepdim=True)
mu_x = (x * w).sum(1, keepdim=True) / sum_w
mu_y = (y * w).sum(1, keepdim=True) / sum_w
x_hat = x - mu_x
wx_hat = x_hat * w
y_hat = y - mu_y
wy_hat = y_hat * w
a = wy_hat.transpose(2, 1) @ wx_hat # BxDxN @ BxNxD BxDxD
u, s, v = torch.svd(a)
c = torch.ones(B, D).to(device)
c[:, -1] = torch.det(u @ v) #
r = (u * (c[..., None])) @ v.transpose(2, 1)
tr_atr = torch.diagonal(a.transpose(2, 1) @ r, dim1=-2, dim2=-1).sum(-1)
tr_xtwx = torch.diagonal(wx_hat.transpose(2, 1) @ wx_hat, dim1=-2, dim2=-1).sum(
-1
)
s = (
(tr_atr / tr_xtwx)[..., None][..., None]
if self.eval_scale_for_rigid
else 1.0
)
t = mu_y - s * (r @ mu_x.transpose(2, 1)).transpose(2, 1)
A = torch.cat([r.transpose(2, 1) * s, t], 1)
X = torch.cat((x, torch.ones_like(x[:, :, :1])), dim=2) # (B,N, D+1)
return A, X @ A
def compose_transform(self, A_prev, A_cur):
D = A_prev.shape[-1]
A_composed_matrix = A_prev[:, :D, :] @ A_cur[:, :D, :] # BxDxD
A_composed_trans = (
A_prev[:, D:, :] @ A_cur[:, :D, :] + A_cur[:, D:, :]
) # Bx1XD @ BxDxD Bx1xD
return torch.cat([A_composed_matrix, A_composed_trans], 1)
def solve_correspondence_via_gradflow(self):
from functools import partial
self.gradflow_mode = self.opt[
(
"gradflow_mode",
"grad_forward",
" 'grad_forward' if only use position info otherwise 'ot_mapping'",
)
]
self.search_init_transform = self.opt[
(
"search_init_transform",
False,
" the 16(2D)/64(3D) initial transforms (based on position and ot similarity) would be searched and return the best one ",
)
]
self.geomloss_setting = self.opt[("geomloss", {}, "settings for geomloss")]
return partial(
gradient_flow_guide(self.gradflow_mode),
geomloss_setting=self.geomloss_setting,
local_iter=torch.tensor([0]),
)
def _solve_transform(self, source, flowed):
return self.solver(source.points, flowed.points, source.weights)
def extract_point_fea(self, flowed, target, iter=-1):
flowed.pointfea = flowed.points.clone()
target.pointfea = target.points.clone()
return flowed, target
def extract_fea(self, flowed, target, iter):
if not self.pair_feature_extractor:
return self.extract_point_fea(flowed, target, iter)
else:
return self.pair_feature_extractor(flowed, target, iter)
def find_initial_transform(self, source, target):
import numpy as np
from scipy.spatial.transform import Rotation as R
source_center = source.points.mean(dim=1, keepdim=True)
target_center = target.points.mean(dim=1, keepdim=True)
max_diameter = lambda x: (x.points.max(1)[0] - x.points.min(1)[0]).max(1)[0]
scale = max_diameter(target) / max_diameter(source)
bias_center = (
target_center - source_center
) / 10 # avoid fail into the identity local minimum
D = source.points.shape[-1]
n_init = 16 if D == 2 else 64
r = None
if D == 2:
angle_comp = np.mgrid[0:271:90, 0:271:90].transpose(1, 2, 0).reshape(-1, D)
r = R.from_euler("yx", angle_comp, degrees=True)
elif D == 3:
angle_comp = (
np.mgrid[0:271:90, 0:271:90, 0:271:90]
.transpose(1, 2, 3, 0)
.reshape(-1, D)
)
r = R.from_euler("zyx", angle_comp, degrees=True)
init_rotation_matrix = torch.tensor(r.as_matrix().astype(np.float32)).to(
source.points.device
)
init_best_transformed = []
init_best_transform = []
for i, (
b_source_points,
b_target_points,
b_source_weights,
b_target_weights,
) in enumerate(
zip(source.points, target.points, source.weights, target.weights)
):
b_source_points = b_source_points.repeat(n_init, 1, 1)
b_target_points = b_target_points.repeat(n_init, 1, 1)
b_source_weights = b_source_weights.repeat(n_init, 1, 1)
b_target_weights = b_target_weights.repeat(n_init, 1, 1)
b_init_rotation_bias = bias_center[i].repeat(n_init, 1, 1)
b_transform = torch.cat(
[init_rotation_matrix * scale[i], b_init_rotation_bias], 1
)
geo_dist = obj_factory(self.geomloss_setting["geom_obj"])
b_init_transformed = (
torch.cat(
(b_source_points, torch.ones_like(b_source_points[:, :, :1])), dim=2
)
@ b_transform
)
bdist = geo_dist(
b_source_weights[..., 0],
b_init_transformed,
b_target_weights[..., 0],
b_target_points,
)
min_val, min_index = bdist.min(0)
b_init_best_transformed = b_init_transformed[min_index]
b_init_best_transform = b_transform[min_index]
print("the best init transform is {}".format(b_init_best_transform))
init_best_transformed.append(b_init_best_transformed)
init_best_transform.append(b_init_best_transform)
return torch.stack(init_best_transform, 0), Shape().set_data_with_refer_to(
torch.stack(init_best_transformed, 0), source
)
def sampling_input(self, toflow, target):
compute_at_low_res = self.control_points > 0
sampled_toflow = self.sampler(toflow) if compute_at_low_res else toflow
sampled_target = self.sampler(target) if compute_at_low_res else target
return sampled_toflow, sampled_target
def __call__(self, source, target, init_A=None):
"""
:param source: Shape with points BxNxD
:param target_batch: Shape with points BxMxD
:return: Bx(D+1)xD transform matrix
"""
source, target = self.sampling_input(source, target)
toflow = source
A_prev = init_A if init_A is not None else None
A = None
if self.search_init_transform:
A_prev, toflow = self.find_initial_transform(source, target)
for i in range(self.niter):
toflow, target = self.extract_fea(toflow, target, i)
flowed, weight_map_ratio = self.get_correspondence_shape(toflow, target)
if not self.use_barycenter_weight:
A, transforme_points = self._solve_transform(toflow, flowed)
else:
toflow_weights = toflow.weights
toflow.weights = weight_map_ratio
A, transforme_points = self._solve_transform(toflow, flowed)
toflow.weights = toflow_weights
A = self.compose_transform(A_prev, A) if A_prev is not None else A
transformed_points = (
torch.cat(
(source.points, torch.ones_like(source.points[:, :, :1])), dim=2
)
@ A
)
toflow = Shape().set_data_with_refer_to(transformed_points, source)
if i > 0 and torch.norm(A - A_prev) < self.rel_ftol:
print(
"reach relative tolerance {}".format(torch.norm(A - A_prev).item())
)
break
A_prev = A
if self.plot:
self.visualize(
source, toflow, target, weight_map_ratio, self.geomloss_setting, i
)
return A
def visualize(
self, source, transformed, target, weight_map_ratio, geomloss_setting, iter
):
from shapmagn.utils.visualizer import visualize_source_flowed_target_overlap, default_plot
from shapmagn.demos.demo_utils import get_omt_mapping
# mapped_fea = get_omt_mapping(geomloss_setting,source, target,
# source.points[0], p=2, mode="hard", confid=0.0)
weight_map_ratio = torch.log10(weight_map_ratio + 1e-8)
weight_map_ratio = (weight_map_ratio - weight_map_ratio.min()) / (
weight_map_ratio.max() - weight_map_ratio.min()
).repeat(1, 1, 1)
visualize_source_flowed_target_overlap(
source.points,
transformed.points,
target.points,
source.points,
weight_map_ratio,
target.points,
"source",
"attention",
"target",
source_plot_func=default_plot(cmap="viridis",rgb=True),
flowed_plot_func=default_plot(cmap="magma",rgb=False),
target_plot_func=default_plot(cmap="magma",rgb=True),
opacity= (0.1,"linear",0.02),
show=True,
add_bg_contrast=False,
)
|
uncbiag/shapmagn
|
shapmagn/modules_reg/module_gradflow_prealign.py
|
module_gradflow_prealign.py
|
py
| 12,227
|
python
|
en
|
code
| 94
|
github-code
|
6
|
38831023014
|
from hyperopt import hp, STATUS_OK
import numpy as np
from mne.filter import resample
from crossvalidate import crossvalidate,test_ensamble,test_naive, run_a_trial
from keras.utils import to_categorical
import keras.backend as K
import uuid
from utils import save_results,get_subj_split
from my_models import ShallowConvNet
import os
import sys
sys.path.append(os.path.join(os.path.split(os.getcwd())[0],'data_loader'))
from data import DataBuildClassifier,EEG_SAMPLE_RATE
RESULTS_DIR = "results_shallow/"
WEIGHTS_DIR = "weights_shallow/"
space = {'resample_to': hp.choice('resample_to', range(128, 501)),
'dropoutRate': hp.uniform('dropoutRate', 0, 1),
'lr': hp.loguniform('lr', -5 * np.log(10), -3 * np.log(10))
}
def build_and_train_all_subjects(params,subjects,subj_tr_val_ind,subj_tst_ind):
params_uuid = str(uuid.uuid4())[:5]
subj_val_aucs,subj_tst_aucs_ens,subj_tst_aucs_naive = {},{},{}
tmp_weights_res_path = os.path.join(WEIGHTS_DIR,params_uuid)
# for subj in subjects.keys():
for subj in [25,26]:
K.clear_session()
tr_val_ind = subj_tr_val_ind[subj]
tst_ind = subj_tst_ind[subj]
x_tr_val,y_tr_val = subjects[subj][0][tr_val_ind], to_categorical(subjects[subj][1][tr_val_ind],2)
x_tst, y_tst = subjects[subj][0][tst_ind], to_categorical(subjects[subj][1][tst_ind],2)
x_tr_val = resample(x_tr_val, up=1., down=EEG_SAMPLE_RATE/params['resample_to'], npad='auto', axis=1)
x_tst = resample(x_tst, up=1., down=EEG_SAMPLE_RATE / params['resample_to'], npad='auto', axis=1)
model_path = os.path.join(tmp_weights_res_path,str(subj))
model = ShallowConvNet(params,Chans=x_tr_val.shape[2], Samples=x_tr_val.shape[1])
x_tr_val = x_tr_val.transpose(0, 2, 1)[:,np.newaxis,:,:]
x_tst = x_tst.transpose(0, 2, 1)[:, np.newaxis, :, :]
val_aucs, val_aucs_epochs,_ = crossvalidate(x_tr_val, y_tr_val, model, model_path)
test_auc_ensemble = test_ensamble(x_tst,y_tst,model_path)
test_naive_history = test_naive(x_tr_val, y_tr_val, x_tst, y_tst, model, int(np.mean(val_aucs_epochs)), model_path)
test_auc_naive = test_naive_history['val_auc'][-1]
subj_val_aucs[subj] = np.mean(val_aucs)
subj_tst_aucs_ens[subj] = test_auc_ensemble
subj_tst_aucs_naive[subj] = test_auc_naive
median_val_aucs = np.median(list(subj_val_aucs.values()))
weights_res_path = os.path.join(WEIGHTS_DIR, '%.2f_%s' % (median_val_aucs,params_uuid))
os.rename(tmp_weights_res_path,weights_res_path)
params_res_path = os.path.join(RESULTS_DIR, '%.2f_%s' % (median_val_aucs,params_uuid))
save_results(params_res_path, subj_val_aucs,subj_tst_aucs_naive, subj_tst_aucs_ens, params)
result= {
'loss': -median_val_aucs,
'real_loss': np.mean(list(subj_tst_aucs_naive.values())),
'subj_tst_aucs_naive':subj_tst_aucs_naive,
'subj_tst_aucs_ens':subj_tst_aucs_ens,
'subj_val_aucs':subj_val_aucs,
'status': STATUS_OK
}
return result
if __name__ == '__main__':
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
if not os.path.exists(WEIGHTS_DIR):
os.makedirs(WEIGHTS_DIR)
data = DataBuildClassifier('/home/likan_blk/BCI/NewData')
subjects, subj_tr_val_ind, subj_tst_ind = get_subj_split(data)
# split_subj = lambda x, ind: {key: (x[key][0][ind[key]], x[key][1][ind[key]]) for key in x}
# subj_train_val = split_subj(subjects,subj_tr_val_ind)
# subj_test = split_subj(subjects, subj_tst_ind)
for t in range(3):
run_a_trial(subjects, subj_tr_val_ind, subj_tst_ind,RESULTS_DIR,build_and_train_all_subjects,space)
|
bkozyrskiy/NN_hyperopt_search
|
opt_shallow.py
|
opt_shallow.py
|
py
| 3,725
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11474271839
|
'''
Created on Jan 9, 2010
@author: eric
'''
import asyncore
import socket
import time
from ParsedMessage import ParsedMessage
class Connection(asyncore.dispatcher):
'''
maintains the connection to the server
'''
buffer = ""
bytesIn = 0
bytesOut = 0
connectionAttempts = 0
reconnectWait = 3
maxAttempts = 100
def __init__(self, Pyibber):
'''
ctor, pass in the Pyibber instance
'''
asyncore.dispatcher.__init__(self)
self.Pyibber = Pyibber
self.omgPoniesConnect()
"""
def omgPoniesConnect(self):
print "omgPoniesConnect called"
count = 0
while (count < self.maxAttempts):
config = self.Pyibber.config;
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (count, server, port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
x = self.connect((server, port))
print "x: (%s)" % x
count = count + 1
time.sleep(self.reconnectWait)
self.Pyibber.logger.error('Unable to connect to server after %d tries' % self.maxAttempts)
self.Pyibber.stop()
"""
def omgPoniesConnect(self):
config = self.Pyibber.config;
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (self.connectionAttempts, server, port))
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((server, port))
def write(self, message):
self.Pyibber.logger.debug("socket.send: [%s]" % message)
self.buffer = self.buffer + message + "\r\n"
self.bytesOut += len(self.buffer)
def handle_connect(self):
pass
def handle_close(self):
self.Pyibber.logger.debug("connection.handle_close")
self.close()
def handle_error(self):
self.Pyibber.logger.debug("connection.handle_error")
self.close()
def handle_read(self):
data = self.recv(4096)
self.Pyibber.logger.debug('socket.recv: [%s]' % data)
self.bytesIn += len(data)
lines = data.splitlines()
for line in lines:
message = ParsedMessage(line)
self.Pyibber.Commandx.createFromMessage(message)
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
"""
def arghconnect(self, server, port):
count = 0
while (count < self.maxAttempts):
server = str(config.get("pyib", "serverAddress"))
port = int(config.get("pyib", "serverPort"))
self.Pyibber.logger.info('attempt %d connecting to: %s:%d' % (count, server, port))
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((server, port))
return
except Exception, e:
#self.socket.close()
#self.socket = None
self.Pyibber.logger.warning('socket fail: %s' % e)
#time.sleep(self.reconnectWait)
sys.exit(1)
count = count + 1
if self.socket is None:
self.Pyibber.logger.error('unable to connect to server after %d tries' % self.maxAttempts)
self.Pyibber.stop()
return
"""
|
ericbutera/pyib
|
src/Pyib/Connection.py
|
Connection.py
|
py
| 3,734
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16838640248
|
import pytest
import requests_mock
from csvcubed.utils.cache import session
from csvcubed.definitions import ROOT_DIR_PATH
@pytest.fixture(scope="package", autouse=True)
def mock_http_session_qube_config_schema():
"""
Fixture which mocks the HTTP responses of the JSON qube-config schema file for testing.
"""
with session.cache_disabled(), requests_mock.Mocker(
session=session, real_http=True
) as mocker:
schema_path = (
ROOT_DIR_PATH
/ "csvcubed"
/ "schema"
/ "cube-config"
/ "v1_0"
/ "schema.json"
)
with open(schema_path) as f:
mocker.register_uri(
"GET",
"//purl.org/csv-cubed/qube-config/v1.0",
text=f.read(),
)
yield session
|
GDonRanasinghe/csvcubed-models-test-5
|
csvcubed/tests/unit/readers/cubeconfig/v1_0/conftest.py
|
conftest.py
|
py
| 844
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74883082427
|
from collections import defaultdict
class UnionFind():
def __init__(self, n):
# 頂点の値が0から始まる前提なので注意
self.par = [i for i in range(n)]
def root(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.root(self.par[x])
return self.par[x]
def same(self, x, y):
return self.root(x) == self.root(y)
def unite(self, x, y):
x = self.root(x)
y = self.root(y)
if x == y:
return
self.par[x] = y
return
def main():
N, K, L = map(int, input().split())
uf1 = UnionFind(N)
uf2 = UnionFind(N)
# O(K)
for _ in range(K):
p, q = map(int, input().split())
uf1.unite(p-1, q-1)
# O(L)
for _ in range(L):
r, s = map(int, input().split())
uf2.unite(r-1, s-1)
# O(N) x O(log_N)
# 理解が少し難しいが、根ペアをキーに加算していく
# => 根から見ると、連結している頂点数(自身を含む)を数えている。
cnts = defaultdict(int)
for i in range(N):
pos = (uf1.root(i), uf2.root(i))
cnts[pos] += 1
ans = []
for i in range(N):
pos = (uf1.root(i), uf2.root(i))
ans.append(cnts[pos])
print(*ans)
if __name__ == '__main__':
main()
|
kazuo-mu/at_coder_answers
|
ABC049/d_connectivity.py
|
d_connectivity.py
|
py
| 1,393
|
python
|
ja
|
code
| 0
|
github-code
|
6
|
42137815915
|
import numpy as np
# Set the seed for reproducibility
np.random.seed(42)
# Define the number of bulbs and the number of defective bulbs
num_bulbs = 100
num_defective = 10
# Define the sample size and the number of simulations
sample_size = 5
num_simulations = 100000
tolerance = 0.01
# Simulate drawing samples and count the number of times all samples are non-defective
count = 0
for i in range(num_simulations):
sample = np.random.choice(num_bulbs, sample_size, replace=False)
if all(sample >= num_defective):
count += 1
# Calculate the empirical probability of all samples being non-defective
empirical_prob = count / num_simulations
print("Empirical probability:", empirical_prob)
if abs(empirical_prob - 0.59) <= tolerance:
print("solution is correct")
|
gadepall/digital-communication
|
ncert/12/13/5/14/codes/verify_soln.py
|
verify_soln.py
|
py
| 782
|
python
|
en
|
code
| 7
|
github-code
|
6
|
24680635783
|
# -*- coding: utf-8 -*-
"""
@author: Yashoeep
@Roll number: 170003060
@Read Inputs
@ This function will read inputs from the inpus folder and store them properly
@ in a dictionary which is also the return value
return valuedescription -->
{
...
subject : {
images: [ list of paths to images for this subject ]
templates: [ list of paths to temppaltes for this subject ]
results: [list of expected paths to the results of this subject ]
}
....
}
"""
import os
import cv2 as cv
def read_inputs():
print("{0:=^50}".format(" Reading the Inputs "))
# get the current working directory
cwd = os.getcwd()
cwd = cwd.replace("/", "\\")
# ----------------------------------------setting the correct directory -------------------------------------------
# if the current working directory is not the input images foolder then change it
temp_path = cwd.split("\\")
# check if we are operating in the main folder
if temp_path[-1] == "main":
inputs_root=os.path.join(cwd, "Inputs")
subject_folder_names = [ item for item in os.listdir(inputs_root) if os.path.isdir(os.path.join(inputs_root, item)) ]
#print (subject_folder_names)
#print("There are {} subjects in the input".format( len(subject_folder_names) ))
subject_folder_paths = [os.path.join(inputs_root, item) for item in subject_folder_names]
temp = {}
for i, subject in enumerate(subject_folder_names):
subject_folder_path = subject_folder_paths[i]
subj_images_folder_path = os.path.join(subject_folder_path, "1_Images")
subj_templates_folder_path = os.path.join(subject_folder_path, "2_Templates")
subj_results_folder_path = os.path.join(subject_folder_path, "3_Processed_Images")
subject_images_names = os.listdir(subj_images_folder_path)
subject_template_names = os.listdir(subj_templates_folder_path)
subject_results_names = ["processed_"+item for item in os.listdir(subj_images_folder_path)]
subject_images_path = [os.path.join(subj_images_folder_path, item) for item in subject_images_names]
subject_templates_path = [os.path.join(subj_templates_folder_path, item) for item in subject_template_names]
subject_results_path = [os.path.join(subj_results_folder_path, item) for item in subject_results_names]
temp[subject] = {
"images": subject_images_path,
"templates": subject_templates_path,
"results": subject_results_path
}
return (temp, True)
else:
print("{0:=^50}".format(" Reading Inputs --> Failed "))
print("{0:=^50}".format(" Make sure you are in the same directory as the main.py file "))
return ({}, False)
|
yashodeepchikte/Multi-Template-Matching
|
main/read_inputs.py
|
read_inputs.py
|
py
| 3,023
|
python
|
en
|
code
| 5
|
github-code
|
6
|
3774041977
|
#!/usr/bin/env python
import argparse
import yaml
import sys
import http.server
import http.client
import requests
configFile = 'config.yaml'
configDefault = {'server': {'host': "127.0.0.1", 'port': 2323},
'proxies': None,
'forwarder': {'host': "127.0.0.1", 'headers': ["Content-Type"]}}
config = {}
def GetRealHost(headers: http.client.HTTPMessage) -> str:
'''获取实际的Host信息'''
# print(headers,flush=True)
for header, value in headers.items():
if header.lower() == 'x-forwarded-host' or header.lower() == 'host':
return value
def GetForwarder(forwardHost: str) -> dict:
'''从配置文件中获取跟host相匹配的forwarder配置
没有匹配的就返回None
'''
for forwarder in config['forwarders']:
if forwarder['host'] == forwardHost:
return forwarder
return None
def TransformProxies(proxies: list) -> dict:
'''将[{target,proxy}]转换成{target:proxy}形式'''
return {x['target']: x['proxy'] for x in proxies}
class RequestHandler(http.server.BaseHTTPRequestHandler):
def badresponse(self, msg: str):
self.send_response(http.HTTPStatus.BAD_REQUEST)
self.send_header('Content-type', 'text/plain')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(msg.encode())
def do_request(self, method: str):
targetPath = self.path
forwardHost = GetRealHost(self.headers)
forwarder = GetForwarder(forwardHost)
if forwarder is None:
self.badresponse('no matching forwarder')
return
forwardTarget = forwarder['target']
proxies = None
if not config['proxies'] is None:
proxies = TransformProxies(config['proxies'])
# 获取请求的headers
forwarderHeaders = {}
for header, value in self.headers.items():
if header.lower() in [x.lower() for x in forwarder['headers']]:
forwarderHeaders[header] = value
# 从客户端读取请求体
contentLength = int(self.headers.get('Content-Length', 0))
forwardBody = self.rfile.read(contentLength)
# 发送请求到代理服务器
try:
response = None
response = requests.request(
method=method, url=f'{forwardTarget}{targetPath}', headers=forwarderHeaders, data=forwardBody, proxies=proxies)
# 转发响应给客户端
self.send_response(response.status_code)
for header, value in response.headers.items():
if not header.lower() in ('transfer-encoding', 'content-encoding', 'content-length', 'connection', 'date', 'server'):
self.send_header(header, value)
self.send_header('Connection', 'close')
self.end_headers()
r = response.content
self.wfile.write(r)
except Exception as e:
self.badresponse(str(e))
finally:
# 关闭连接
if not response is None:
response.close()
def do_GET(self):
self.do_request("GET")
def do_POST(self):
self.do_request("POST")
def do_PUT(self):
self.do_request("PUT")
def do_DELETE(self):
self.do_request("DELETE")
def do_PATCH(self):
self.do_request("PATCH")
def do_HEAD(self):
self.do_request("HEAD")
if __name__ == "__main__":
# 解析命令行参数
parser = argparse.ArgumentParser(description='http forwarder')
parser.add_argument('-c', '--config', default=configFile,
help=f'config file default is {configFile}')
args = parser.parse_args()
configFile = args.config
# 初始化配置文件
with open(configFile) as file:
config = yaml.safe_load(file)
if config is None:
config = {}
config['server'] = config.get('server', configDefault['server'])
config['server']['host'] = config['server'].get(
'host', configDefault['server']['host'])
config['server']['port'] = config['server'].get(
'port', configDefault['server']['port'])
config['proxies'] = config.get('proxies', configDefault['proxies'])
if type(config['proxies']) == list:
for i in range(len(config['proxies'])):
if not 'target' in config['proxies'][i]:
print(f"proxies[{i}].target is not defined",
file=sys.stderr)
exit(1)
if not 'proxy' in config['proxies'][i]:
print(f"proxies[{i}].proxy is not defined",
file=sys.stderr)
exit(1)
config['forwarders'] = config.get('forwarders', [])
for i in range(len(config['forwarders'])):
if (not 'target' in config['forwarders'][i]) or (not type(config['forwarders'][i]['target']) is str):
print(f"forwarder[{i}].target is not defined", file=sys.stderr)
exit(1)
target = config['forwarders'][i]['target']
if (not target.startswith('http://')) and (not target.startswith('https://')):
print(
f"forwarder[{i}].target not startswith http:// or https://", file=sys.stderr)
exit(1)
elif target.endswith('/'):
print(
f"forwarder[{i}].target can not endswith /", file=sys.stderr)
exit(1)
config['forwarders'][i]['description'] = config['forwarders'][i].get(
'description', f'forward {target}')
config['forwarders'][i]['host'] = config['forwarders'][i].get(
'host', configDefault['forwarder']['host'])
config['forwarders'][i]['headers'] = config['forwarders'][i].get(
'headers', configDefault['forwarder']['headers'])
print(config)
host = config['server']['host']
port = config['server']['port']
# 启动服务
serverAddress = (host, port)
httpd = http.server.HTTPServer(serverAddress, RequestHandler)
print(f'Starting HTTP Forward server on {host}:{port}...', flush=True)
httpd.serve_forever()
|
ecator/http-forwarder
|
http-forwarder.py
|
http-forwarder.py
|
py
| 6,296
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4312235542
|
def print_rangoli(size):
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
line_size = 4 * size - 3
l = letters[0: size]
limit = (2 * size - 1) // 2 + 1
sb = "-"
S = []
for i in range(limit):
aux = l[-(1 + i)]
if (i != limit - 1):
string = sb + aux + sb[-1::-1]
else:
string = sb + aux + sb[-1::-1]
string = string[1:len(string) - 1]
num_of_traces = (line_size - len(string)) // 2
S.append("-" * num_of_traces + string + "-" * num_of_traces)
print(S[-1])
sb += aux + "-"
S.pop()
[print(line) for line in S[-1::-1]]
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
|
thiagojuliao/Hacker-Rank
|
Python/Strings/AlphabetRangoli.py
|
AlphabetRangoli.py
|
py
| 813
|
python
|
en
|
code
| 1
|
github-code
|
6
|
42931890464
|
from turtle import right
class Solution:
def minSwaps(self, arr, b):
minCount = 0
for i in range(len(arr)):
if arr[i] < b:
minCount +=1
if minCount <= 1:
return 0
else:
rightCount, leftCount, count = 0, 0, 0
while rightCount < minCount:
if arr[rightCount] > b:
count += 1
rightCount += 1
ans = count
while rightCount < len(arr):
if arr[rightCount] > b:
count += 1
if arr[leftCount] > b:
count -= 1
ans = min(ans, count)
rightCount += 1
leftCount += 1
return ans
obj = Solution()
arr, b = [1, 12, 10, 3, 14, 10, 5], 8
ans = obj.minSwaps(arr, b)
print(ans)
|
shwetakumari14/Practice-Problems
|
Pythons Solutions/Minimum Swaps.py
|
Minimum Swaps.py
|
py
| 890
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26624507906
|
from livesettings import *
from django.utils.translation import ugettext_lazy as _
# this is so that the translation utility will pick up the string
gettext = lambda s: s
_strings = (gettext('CreditCard'), gettext('Credit Card'))
PAYMENT_GROUP = ConfigurationGroup('PAYMENT_AUTHORIZENET',
_('Authorize.net Payment Settings'),
ordering=101)
config_register_list(
StringValue(PAYMENT_GROUP,
'CONNECTION',
description=_("Submit to URL"),
help_text=_("""This is the address to submit live transactions."""),
default='https://secure.authorize.net/gateway/transact.dll'),
StringValue(PAYMENT_GROUP,
'CONNECTION_TEST',
description=_("Submit to Test URL"),
help_text=("""If you have a test account with authorize.net and you log in through
https://test.authorize.net/gateway/transact.dll, then you should use the default
test URL. If you do not have a test account you will get an Error 13 message
unless you change the URL to https://secure.authorize.net/gateway/transact.dll.
You will also need to login in to authorize.net and make sure your account has
test mode turned on.
"""),
default='https://test.authorize.net/gateway/transact.dll'),
BooleanValue(PAYMENT_GROUP,
'LIVE',
description=_("Accept real payments"),
help_text=_("False if you want to submit to the test urls. NOTE: If you are testing, then you can use the cc# 4222222222222 to force a bad credit card response. If you use that number and a ccv of 222, that will force a bad ccv response from authorize.net"),
default=False),
BooleanValue(PAYMENT_GROUP,
'SIMULATE',
description=_("Force a test post?"),
help_text=_("True if you want to submit to the live url using a test flag, which won't be accepted."),
default=False),
ModuleValue(PAYMENT_GROUP,
'MODULE',
description=_('Implementation module'),
hidden=True,
default = 'payment.modules.authorizenet'),
StringValue(PAYMENT_GROUP,
'KEY',
description=_("Module key"),
hidden=True,
default = 'AUTHORIZENET'),
StringValue(PAYMENT_GROUP,
'LABEL',
description=_('English name for this group on the checkout screens'),
default = 'Credit Cards',
help_text = _('This will be passed to the translation utility')),
StringValue(PAYMENT_GROUP,
'URL_BASE',
description=_('The url base used for constructing urlpatterns which will use this module'),
default = r'^credit/'),
MultipleStringValue(PAYMENT_GROUP,
'CREDITCHOICES',
description=_('Available credit cards'),
choices = (
(('American Express', 'American Express')),
(('Visa','Visa')),
(('Mastercard','Mastercard')),
(('Discover','Discover'))),
default = ('Visa', 'Mastercard', 'Discover')),
StringValue(PAYMENT_GROUP,
'LOGIN',
description=_('Your authorize.net transaction login'),
default=""),
StringValue(PAYMENT_GROUP,
'TRANKEY',
description=_('Your authorize.net transaction key'),
default=""),
BooleanValue(PAYMENT_GROUP,
'CAPTURE',
description=_('Capture Payment immediately?'),
default=True,
help_text=_('IMPORTANT: If false, a capture attempt will be made when the order is marked as shipped."')),
BooleanValue(PAYMENT_GROUP,
'EXTRA_LOGGING',
description=_("Verbose logs"),
help_text=_("Add extensive logs during post."),
default=False)
)
ARB_ENABLED = config_register(
BooleanValue(PAYMENT_GROUP,
'ARB',
description=_('Enable ARB?'),
default=False,
help_text=_('Enable ARB processing for setting up subscriptions. You must have this enabled in your Authorize account for it to work.')))
config_register(
StringValue(PAYMENT_GROUP,
'ARB_CONNECTION',
description=_("Submit to URL (ARB)"),
help_text=_("""This is the address to submit live transactions for ARB."""),
requires=ARB_ENABLED,
default='https://api.authorize.net/xml/v1/request.api'))
config_register(
StringValue(PAYMENT_GROUP,
'ARB_CONNECTION_TEST',
description=_("Submit to Test URL (ARB)"),
help_text=_("""This is the address to submit test transactions for ARB."""),
requires=ARB_ENABLED,
default='https://apitest.authorize.net/xml/v1/request.api'))
|
dokterbob/satchmo
|
satchmo/apps/payment/modules/authorizenet/config.py
|
config.py
|
py
| 4,529
|
python
|
en
|
code
| 30
|
github-code
|
6
|
34225982423
|
#This file is part of Chess-game-tracker.
#Chess-game-tracker is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Chess-game-tracker is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Chess-game-tracker. If not, see <https://www.gnu.org/licenses/>.
from threading import Thread
import imutils
import cv2
import sys
import time
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue
class Capturer:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
self.stopped = False
self.Q = Queue(maxsize=200)
self.t = Thread(target=self.get, args=())
self.t.daemon = True
def start(self):
self.t.start()
return self
def running(self):
return self.more() or not self.stopped
def get(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
break
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stopped = True
# if there are transforms to be done, might as well
# do them on producer thread before handing back to
# consumer thread. ie. Usually the producer is so far
# ahead of consumer that we have time to spare.
#
# Python is not parallel but the transform operations
# are usually OpenCV native so release the GIL.
#
# Really just trying to avoid spinning up additional
# native threads and overheads of additional
# producer/consumer queues since this one was generally
# idle grabbing frames.
# add the frame to the queue
self.Q.put(frame)
else:
time.sleep(0.1) # Rest for 10ms, we have a full queue
self.stream.release()
def stop(self):
self.stopped = True
self.t.join()
def more(self):
# return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
tries = 0
while self.Q.qsize() == 0 and not self.stopped and tries < 5:
time.sleep(0.1)
tries += 1
return self.Q.qsize() > 0
def read(self):
# return next frame in the queue
return self.Q.get()
|
nandovm/chess-game-tracker
|
Chess-game-tracker/multithread/Capturer.py
|
Capturer.py
|
py
| 2,793
|
python
|
en
|
code
| 1
|
github-code
|
6
|
33317389259
|
import pandas as pd
import streamlit as st
import folium
from streamlit_folium import st_folium
st.title("Peta Nilai Properti Kota Tangerang")
st.markdown("Sumber Data: Pusat Pembinaan Profesi Keuangan")
st.markdown("")
# Load data
df_tangcity = pd.read_csv('df_property.csv')
# Set up map
tangcity_map = folium.Map(location=[df_tangcity['Latitude'].mean(), df_tangcity['Longitude'].mean()], zoom_start=14)
# Create sidebar for filtering
st.sidebar.subheader("")
st.sidebar.subheader("Filter Atribut Data Properti")
selected_subdistrict = st.sidebar.selectbox("Pilih Kecamatan", df_tangcity['Kecamatan'].unique())
filtered_subdistrict = df_tangcity[df_tangcity['Kecamatan']== selected_subdistrict]
selected_village = st.sidebar.selectbox("Pilih Desa/Kelurahan", filtered_subdistrict['Desa'].unique())
filtered_village = filtered_subdistrict[filtered_subdistrict['Desa']== selected_village]
selected_valuation_objectives = [st.sidebar.selectbox("Pilih Tujuan Penilaian", df_tangcity['Tujuan Penilaian'].unique())]
filtered_valuation_objectives = filtered_village[filtered_village['Tujuan Penilaian'].isin(selected_valuation_objectives)]
selected_property_types = st.sidebar.multiselect("Pilih Jenis Properti (Bisa >1)", df_tangcity['Jenis_Objek'].unique())
filtered_data = filtered_valuation_objectives[filtered_valuation_objectives['Jenis_Objek'].isin(selected_property_types)]
selected_display = st.sidebar.multiselect("Pilih Nilai untuk Ditampilkan (Bisa >1)", options=["Nilai Tanah/m2", "Nilai Objek", "Total Nilai"], default=[])
# Set up map
if len(filtered_data) == 0:
tangcity_map = folium.Map(location=[df_tangcity['Latitude'].mean(), df_tangcity['Longitude'].mean()], zoom_start=14)
else:
tangcity_map= folium.Map(location=[filtered_data['Latitude'].mean(), filtered_data['Longitude'].mean()], zoom_start=16)
# Loop over filtered data and add markers to map
for index, row in filtered_data.iterrows():
lat = row['Latitude']
lon = row['Longitude']
nilai_tanah = row['Indikasi Nilai Tanah']
nilai_objek = row['Nilai Objek']
total_nilai = row['Total Nilai']
tanggal_penilaian = row['Tgl Penilaian']
# Construct html string based on selected values
html = ""
if "Nilai Tanah/m2" in selected_display:
html += f"Nilai Tanah: {nilai_tanah}/m<sup>2</sup><br>"
if "Nilai Objek" in selected_display:
html += f"Nilai Objek: {nilai_objek}<br>"
if "Total Nilai" in selected_display:
html += f"Total Nilai: {total_nilai}<br>"
# Always add Tanggal Penilaian as hover information
html += f"Tgl. Penilaian: {tanggal_penilaian}"
# Add marker to map with hover information
folium.Marker(
[lat, lon],
tooltip=html
).add_to(tangcity_map)
# Display the map
tangcity_data = st_folium(tangcity_map, width=725, height=450)
|
danarssidig/propertymap
|
property_map.py
|
property_map.py
|
py
| 2,935
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72197108348
|
#Look for #IMPLEMENT tags in this file. These tags indicate changes in the
#file to implement the required routines.
'''8-Puzzle STATESPACE
'''
import copy
from search import *
class eightPuzzle(StateSpace):
StateSpace.n = 0
def __init__(self, action, gval, state, parent = None):
'''Create an 8-puzzle state object.
The parameter state represents the puzzle configation as a list of 9 numbers in the range [0-8]
The 9 numbers specify the position of the tiles in the puzzle from the
top left corner, row by row, to the bottom right corner. E.g.:
[2, 4, 5, 0, 6, 7, 8, 1, 3] represents the puzzle configuration
|-----------|
| 2 | 4 | 5 |
|-----------|
| | 6 | 7 |
|-----------|
| 8 | 1 | 3 |
|-----------|
'''
#Note we represent the puzzle configuration in the state member.
#the list of tile positions.
StateSpace.__init__(self, action, gval, parent)
self.state = state
def successors(self) :
#IMPLEMENT
'''Implement the actions of the 8-puzzle search space.'''
# IMPORTANT. The list of successor states returned must be in the ORDER
# Move blank down move, move blank up, move blank right, move blank left
# (with some successors perhaps missing if they are not available
# moves from the current state, but the remaining ones in this
# order!)
states = list()
blank_index = self.state.index(0)
#if you can move blank down
if blank_index != 6 and blank_index != 7 and blank_index != 8:
new_state_down = copy.deepcopy(self.state)
new_state_down[blank_index] = new_state_down[blank_index+3]
new_state_down[blank_index+3] = 0
states.append(eightPuzzle("Blank-Down", self.gval+1, new_state_down, self))
#if you can move blank up
if blank_index != 0 and blank_index != 1 and blank_index != 2:
new_state_up = copy.deepcopy(self.state)
new_state_up[blank_index] = new_state_up[blank_index-3]
new_state_up[blank_index-3] = 0
states.append(eightPuzzle("Blank-Up", self.gval+1, new_state_up, self))
#if you can move blank right
if blank_index != 2 and blank_index != 5 and blank_index != 8:
new_state_right = copy.deepcopy(self.state)
new_state_right[blank_index] = new_state_right[blank_index+1]
new_state_right[blank_index+1] = 0
states.append(eightPuzzle("Blank-Right", self.gval+1, new_state_right, self))
#if you can move blank left
if blank_index != 0 and blank_index != 3 and blank_index != 6:
new_state_left = copy.deepcopy(self.state)
new_state_left[blank_index] = new_state_left[blank_index-1]
new_state_left[blank_index-1] = 0
states.append(eightPuzzle("Blank-Left", self.gval+1, new_state_left, self))
return states
def hashable_state(self) :
#IMPLEMENT
return (tuple(self.state))
def print_state(self):
#DO NOT CHANGE THIS METHOD
if self.parent:
print("Action= \"{}\", S{}, g-value = {}, (From S{})".format(self.action, self.index, self.gval, self.parent.index))
else:
print("Action= \"{}\", S{}, g-value = {}, (Initial State)".format(self.action, self.index, self.gval))
print("|-----------|")
print("| {} | {} | {} |".format(self.state[0],self.state[1],self.state[2]))
print("|-----------|")
print("| {} | {} | {} |".format(self.state[3],self.state[4],self.state[5]))
print("|-----------|")
print("| {} | {} | {} |".format(self.state[6],self.state[7],self.state[8]))
print("|-----------|")
#Set up the goal.
#We allow any full configuration of the puzzle to be a goal state.
#We use the class variable "eightPuzzle.goal_state" to store the goal configuration.
#The goal test function compares a state's configuration with the goal configuration
eightPuzzle.goal_state = False
def eightPuzzle_set_goal(state):
'''set the goal state to be state. Here state is a list of 9
numbers in the same format as eightPuzzle.___init___'''
eightPuzzle.goal_state = state
def eightPuzzle_goal_fn(state):
return (eightPuzzle.goal_state == state.state)
def heur_zero(state):
'''Zero Heuristic use to make A* search perform uniform cost search'''
return 0
def h_misplacedTiles(state):
#IMPLEMENT
#return the number of tiles (NOT INCLUDING THE BLANK) in state that are not in their goal
#position. (will need to access the class variable eigthPuzzle.goal_state)
misplaced_tiles = 0
for x in range(len(state.state)):
if (state.state[x] != eightPuzzle.goal_state[x] and state.state[x] != 0):
misplaced_tiles += 1
return misplaced_tiles
def h_MHDist(state):
#return the sum of the manhattan distances each tile (NOT INCLUDING
#THE BLANK) is from its goal configuration.
#The manhattan distance of a tile that is currently in row i column j
#and that has to be in row x column y in the goal is defined to be
# abs(i - x) + abs(j - y)
array = matrixfy(state.state)
goal_state = matrixfy(eightPuzzle.goal_state)
manhattan_distance = 0
for x in state.state:
if x != 0:
i, j = locate(x, array)
x, y = locate(x, goal_state)
manhattan_distance += (abs(i - x) + abs(j - y))
return manhattan_distance
def matrixfy(lst):
array = []
array.append(lst[0:3])
array.append(lst[3:6])
array.append(lst[6:9])
return array
def locate(number, array):
for x in array:
if number in x:
return (array.index(x), x.index(number))
|
TylerPham/Eight_puzzle_solver
|
eightPuzzle.py
|
eightPuzzle.py
|
py
| 5,880
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26626826683
|
# Import the qrcode library
import qrcode
# Create a qr code instance
qr = qrcode.QRCode(
version = 1,
error_correction = qrcode.constants.ERROR_CORRECT_L,
box_size = 10,
border = 4,
)
# The data that you want to encode
data = "192.168.1.19:8765"
# Add the data
qr.add_data(data)
qr.make(fit=True)
# Create an image from the QR code instance
img = qr.make_image(fill_color="black", back_color="white")
# Save it somewhere, change the extension as needed:
img.save("./image_name.png")
# img.save("image_name.bmp")
# img.save("image_name.jpeg")
|
Gex-devs/val_overlay
|
ts/BackUp_Local_Api_py/QRcode.py
|
QRcode.py
|
py
| 564
|
python
|
en
|
code
| 2
|
github-code
|
6
|
41119379113
|
import random
random.seed(1)
import numpy as np
np.random.seed(1)
import tensorflow.compat.v1 as tf
tf.random.set_random_seed(1)
import gym
import os
tf.disable_v2_behavior()
env = gym.make('CartPole-v1')
class PolicyNetwork:
def __init__(self, state_size, action_size, learning_rate, name='policy_network'):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
with tf.variable_scope(name):
self.state = tf.placeholder(tf.float32, [None, self.state_size], name="state")
self.action = tf.placeholder(tf.int32, [self.action_size], name="action")
self.td_error = tf.placeholder(tf.float32, name="td_error")
self.I = tf.placeholder(tf.float32, name="I")
self.W1 = tf.get_variable("W1", [self.state_size, 12], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b1 = tf.get_variable("b1", [12], initializer=tf.zeros_initializer())
self.W2 = tf.get_variable("W2", [12, self.action_size], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b2 = tf.get_variable("b2", [self.action_size], initializer=tf.zeros_initializer())
self.Z1 = tf.add(tf.matmul(self.state, self.W1), self.b1)
self.A1 = tf.nn.relu(self.Z1)
self.output = tf.add(tf.matmul(self.A1, self.W2), self.b2)
# Softmax probability distribution over actions
self.actions_distribution = tf.squeeze(tf.nn.softmax(self.output))
# Loss with negative log probability
self.neg_log_prob = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.output, labels=self.action)
self.loss = tf.reduce_mean(self.I * self.neg_log_prob * self.td_error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
class ValueNetwork:
def __init__(self, state_size, learning_rate, name='value_network'):
self.state_size = state_size
self.learning_rate = learning_rate
with tf.variable_scope(name):
self.state = tf.placeholder(tf.float32, [None, self.state_size], name="state")
self.td_error = tf.placeholder(tf.float32, name='td_error')
self.I = tf.placeholder(tf.float32, name="I")
self.W1 = tf.get_variable("W1", [self.state_size, 256], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b1 = tf.get_variable("b1", [256], initializer=tf.zeros_initializer())
self.W2 = tf.get_variable("W2", [256, 64], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b2 = tf.get_variable("b2", [64], initializer=tf.zeros_initializer())
self.W3 = tf.get_variable("W3", [64, 1], initializer=tf.keras.initializers.glorot_normal(seed=0))
self.b3 = tf.get_variable("b3", [1], initializer=tf.zeros_initializer())
self.Z1 = tf.add(tf.matmul(self.state, self.W1), self.b1)
self.A1 = tf.nn.relu(self.Z1)
self.Z2 = tf.add(tf.matmul(self.A1, self.W2), self.b2)
self.A2 = tf.nn.relu(self.Z2)
self.output = tf.add(tf.matmul(self.A2, self.W3), self.b3)
self.loss = tf.reduce_mean(-self.I * self.output * self.td_error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
# Define hyperparameters
state_size = 4
action_size = env.action_space.n
max_episodes = 5000
max_steps = 501
discount_factor = 0.99
critic_learning_rate = 0.002
actor_learning_rate = 0.0004
render = False
# Initialize the actor network
tf.reset_default_graph()
actor = PolicyNetwork(state_size, action_size, actor_learning_rate)
critic = ValueNetwork(state_size, critic_learning_rate)
# tensorboard logs
actor_loss_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="policy_losses", tensor=actor_loss_placeholder)
critic_loss_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="value_losses", tensor=actor_loss_placeholder)
reward_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="reward", tensor=reward_placeholder)
avg_reward_placeholder = tf.compat.v1.placeholder(tf.float32)
tf.compat.v1.summary.scalar(name="avg_reward", tensor=avg_reward_placeholder)
log_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
if not os.path.isdir(log_path):
os.mkdir(log_path)
writer = tf.compat.v1.summary.FileWriter(log_path)
summaries = tf.compat.v1.summary.merge_all()
print('saving logs to: %s' % log_path)
# Start training the agent with REINFORCE algorithm
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
solved = False
episode_rewards = np.zeros(max_episodes)
average_rewards = 0.0
episode_critic_loss = []
episode_actor_loss = []
for episode in range(max_episodes):
state = env.reset()
# state = np.concatenate([state, np.asarray([0])])
state = state.reshape([1, state_size])
episode_transitions = []
I=1
for step in range(max_steps):
value = sess.run(critic.output, {critic.state: state})
actions_distribution = sess.run(actor.actions_distribution, {actor.state: state})
action = np.random.choice(np.arange(len(actions_distribution)), p=actions_distribution)
next_state, reward, done, _ = env.step(action)
# next_state = np.concatenate([next_state, np.asarray([(step + 1) / max_steps])])
next_state = next_state.reshape([1, state_size])
next_value = sess.run(critic.output, {critic.state: next_state}) if not done else 0
if render:
env.render()
action_one_hot = np.zeros(action_size)
action_one_hot[action] = 1
episode_rewards[episode] += reward
target = reward + discount_factor * next_value
td_error = target - value
value_feed_dict = {critic.state: state, critic.td_error: td_error, critic.I: I}
_, critic_loss = sess.run([critic.optimizer, critic.loss], value_feed_dict)
policy_feed_dict = {actor.state: state, actor.td_error: td_error, actor.action: action_one_hot,actor.I: I}
_, actor_loss = sess.run([actor.optimizer, actor.loss], policy_feed_dict)
state = next_state
episode_critic_loss.append(critic_loss)
episode_actor_loss.append(actor_loss)
if done:
if episode > 98:
# Check if solved
average_rewards = np.mean(episode_rewards[(episode - 99):episode+1])
print("Episode {} Reward: {} Average over 100 episodes: {}".format(episode, episode_rewards[episode], round(average_rewards, 2)))
if average_rewards > 475:
print(' Solved at episode: ' + str(episode))
solved = True
break
I = I * discount_factor
if solved:
break
avg_actor_loss = np.mean(episode_actor_loss)
avg_critic_loss = np.mean(episode_critic_loss)
summery = sess.run(summaries, feed_dict={actor_loss_placeholder: avg_actor_loss,
critic_loss_placeholder: avg_critic_loss,
reward_placeholder: episode_rewards[episode],
avg_reward_placeholder: average_rewards if episode > 98 else 0})
writer.add_summary(summery, global_step=episode)
|
eladfeld/DRL_hw
|
hw2/actor_critic.py
|
actor_critic.py
|
py
| 7,669
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23777756221
|
from conformity.fields import Dictionary, UnicodeString, List
import json
instance = Dictionary({
"title": UnicodeString(),
"url": UnicodeString(),
"about_url": UnicodeString(),
"description": UnicodeString(),
"tags": List(UnicodeString()),
}, optional_keys=["description", "tags", "about_url"])
instances = List(instance)
def test_registry():
data = json.load(open('registry.json'))
assert [] == instances.errors(data)
|
simonw/datasette-registry
|
test_registry.py
|
test_registry.py
|
py
| 451
|
python
|
en
|
code
| 1
|
github-code
|
6
|
25608742266
|
# n, m 입력
n, m = map(int, input().split())
# 떡의 길이 리스트 입력
array = list(map(int, input().split()))
# 이진탐색을 위한 범위인 절단기 높이를 1부터 가장 긴 떡의 길이까지 설정
lt=1
rt = max(array)
# 이진 탐색
while lt<=rt:
mid=(lt+rt)//2
total=0
# 떡의 길이가 mid 보다 길 때 떡의 길이 추가
for i in array:
if i>mid:
total+=i-mid
# 누적된 떡의 양이 m과 같다면 반복문 종료
if total==m:
print(mid)
break
# 누적된 떡의 양이 m보다 크면 절단기 높이를 높여야 함
elif total>m:
lt=mid+1
# 누적된 떡의 양이 m보다 작으면 절단기 높이를 낮춰야 함
else:
rt=mid-1
|
seyiclover/AlgorithmStudy
|
Seyi/BinarySearch/떡볶이 떡 만들기.py
|
떡볶이 떡 만들기.py
|
py
| 767
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
9345123435
|
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, InnerObjectWrapper, Completion, Keyword, Text
from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["140.143.211.106"])
class CustomAnalyzer(_CustomAnalyzer):
def get_analysis_definition(self):
return {}
ik_analyzer = CustomAnalyzer("ik_max_word", filter=["lowercase"])
class BaiduType(DocType):
suggest = Completion(analyzer=ik_analyzer)
url = Keyword()
title = Text(analyzer="ik_max_word")
summary = Text(analyzer="ik_max_word")
content = Text(analyzer="ik_max_word")
class Meta:
index = "baidu"
doc_type = "baike"
def gen_suggest(index, info_tuple):
# 根据字符串生成搜索建议数组
es = connections.create_connection(BaiduType._doc_type.using,hosts=["140.143.211.106"]) # 连接elasticsearch(搜索引擎),使用操作搜索引擎的类下面的_doc_type.using连接
used_words = set()
suggests = []
for text, weight in info_tuple:
if text:
# 调用es的analyze接口分析字符串,
words = es.indices.analyze(index="baidu", analyzer="ik_max_word", params={'filter': ["lowercase"]}, body=text)
anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"])>1])
new_words = anylyzed_words - used_words
else:
new_words = set()
if new_words:
suggests.append({"input":list(new_words), "weight":weight})
return suggests
if __name__ == "__main__":
BaiduType.init()
|
XiaoShenLong/scrapy-search
|
baike_spider/baike/models/es_types.py
|
es_types.py
|
py
| 1,703
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33562211468
|
import cv2
import numpy as np
def contraste(inp):
f,c,color=inp.shape
c1=np.min(inp)
d=np.max(inp)
for i in range(f):
for j in range(c):
inp[i][j][0]=round((inp[i][j][0]-c1)*((255)/(d-c1)))
inp[i][j][1]=round((inp[i][j][1]-c1)*((255)/(d-c1)))
inp[i][j][2]=round((inp[i][j][2]-c1)*((255)/(d-c1)))
return inp
def multi(img1,constante):
f,c,color=img1.shape
for i in range(f):
for j in range(c):
r1=int(img1[i][j][0])*constante
r2=int(img1[i][j][1])*constante
r3=int(img1[i][j][2])*constante
if(r1<0):
img1[i][j][0]=0
elif(r1>255):
img1[i][j][0]=255
else:
img1[i][j][0]=r1
if(r2<0):
img1[i][j][1]=0
elif(r2>255):
img1[i][j][1]=255
else:
img1[i][j][1]=r2
if(r3<0):
img1[i][j][2]=0
elif(r3>255):
img1[i][j][2]=255
else:
img1[i][j][2]=r3
return img1
img1=cv2.imread('tigre.jpeg')
img1=cv2.resize(img1,(400,400))
img2=contraste(img1)
cv2.imshow('res1',img1)
cv2.imshow('res2',img2)
|
renzovc987/CG
|
multipliacion.py
|
multipliacion.py
|
py
| 1,313
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38938537041
|
from Cache import Cache
from Bus import Bus
class Control:
def __init__(self, number):
self.cache = Cache()
self.cache_state = self.cache.cache_mem
self.cache_state
self.bus = Bus.get_instance()
self.bus.set_proc_control(number, self)
self.number = number
def read_data(self, dir_mem, out_request=False):
block_set = []
block = ""
i = 0
if dir_mem % 2 == 0:
block_set = self.cache_state["0"]
else:
block_set = self.cache_state["1"]
if block_set[0]["dir"] == dir_mem and block_set[0]["state"] != "I":
block = self.cache.read_data(dir_mem)
i = 0
elif block_set[1]["dir"] == dir_mem and block_set[1]["state"] != "I":
block = self.cache.read_data(dir_mem)
i = 1
if isinstance(block, str) and not out_request:
return self.check_bus_data(dir_mem, "read")
elif not isinstance(block, str) and out_request:
replacement_state = "S"
print(block)
if block["state"] == "E" or block["state"] == "M":
replacement_local_state = ""
if block["state"] == "E":
replacement_local_state = "S"
elif block["state"] == "M":
replacement_local_state = "O"
self.cache.change_state(dir_mem, replacement_local_state, i)
return (replacement_state, block["value"])
elif not isinstance(block, str):
return block["value"]
else:
return ()
def check_bus_data(self, dir_mem, flag_func):
block_set = []
block = ""
if dir_mem % 2 == 0:
block_set = self.cache_state["0"]
else:
block_set = self.cache_state["1"]
if block_set[0]["dir"] == dir_mem:
block = block_set[0]
elif block_set[1]["dir"] == dir_mem:
block = block_set[1]
block = self.bus.read_data(dir_mem, self.number)
i = self.__replacement_policie(dir_mem)
self.cache.write_data(block[1], i, dir_mem, block[0])
return block
def write_data(self, dir_mem, data):
block_set = []
block = 0
if dir_mem % 2 == 0:
block_set = self.cache_state["0"]
else:
block_set = self.cache_state["1"]
if block_set[0]["dir"] == dir_mem:
block = 0
elif block_set[1]["dir"] == dir_mem:
block = 1
if isinstance(block, str):
block = self.__replacement_policie(dir_mem)
self.bus.invalidate_all(dir_mem, self.number)
return self.cache.write_data(data, block, dir_mem, "M")
def __replacement_policie(self, dir_mem):
block_set = []
if dir_mem % 2 == 0:
block_set = self.cache_state["0"]
else:
block_set = self.cache_state["1"]
if block_set[0]["state"] == "I" or block_set[1]["state"] == "I":
i = 0
if block_set[1]["state"] == "I" and block_set[0]["state"] != "I":
i = 1
elif block_set[0]["state"] == "E" or block_set[1]["state"] == "E":
i = 0
if block_set[1]["state"] == "E" and block_set[0]["state"] != "E":
i = 1
elif block_set[0]["state"] == "S" or block_set[1]["state"] == "S":
i = 0
if block_set[1]["state"] == "S" and block_set[0]["state"] != "S":
i = 1
elif block_set[0]["state"] == "M" or block_set[1]["state"] == "M":
i = 0
if block_set[1]["state"] == "M" and block_set[0]["state"] != "M":
i = 1
value = self.cache.read_data(block_set[i]["dir"])
self.bus.write_mem_data(block_set[i]["dir"], value)
elif block_set[0]["state"] == "O" or block_set[1]["state"] == "O":
i = 0
if block_set[1]["state"] == "O" and block_set[0]["state"] != "O":
i = 1
value = self.cache.read_data(block_set[i]["dir"])
self.bus.write_mem_data(block_set[i]["dir"], value)
return i
def invalidate_bw(self, dir_mem):
block_set = []
if dir_mem % 2 == 0:
block_set = self.cache_state["0"]
else:
block_set = self.cache_state["1"]
block = ""
if block_set[0]["dir"] == dir_mem:
block = 0
elif block_set[1]["dir"] == dir_mem:
block = 1
if not isinstance(block, str):
if block_set[block]["state"] == "M" or block_set[block]["state"] == "O":
value = self.cache.read_data(block_set[block]["dir"])
self.bus.write_mem_data(block_set[block]["dir"], value)
self.cache.change_state(dir_mem, "I", block)
return True
else:
return False
|
ortegajosant/cachecoherence
|
Control.py
|
Control.py
|
py
| 4,935
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41635641329
|
'''
This program rewrite corpus like LLL_triplets.txt into [ID \t sentence] in each line.
There are ___, which are other protein names besides PROT1 and PROT2, in corpus.
We replace ___ with PROT3, PROT4...
'''
import sys, getopt
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('usage: RewriteCorpus.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('usage: RewriteCorpus.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
IN = open(inputfile, 'r')
OUT = open(outputfile, 'w')
trip_count = 0
sen_ID = []
for line in IN:
arr = line.strip().split('|')
if arr[1].strip().split('\t')[0] != sen_ID:
trip_count = 0
sen_ID = arr[1].strip().split('\t')[0]
#trip_ID = sen_ID + '_' + str(trip_count)
trip_ID = sen_ID
trip_count += 1
sen = arr[-1].strip().replace('-',' - ').replace('(',' ( ').replace(')',' ) ').replace('/',' / ').split(' ')
p_count = 3
for i, word in enumerate(sen):
if word == '':
continue
if word in '_______________________':
sen[i] = 'PROT'+str(p_count)
p_count += 1
sentence = ' '.join(sen).replace(' - ','-').replace(' ( ','(').replace(' ) ',')')
OUT.write(trip_ID + '\t' + sentence + '\n')
OUT.close()
|
PeiYau-Lung/ChemProtBioCreativeVI
|
src/RewriteCorpus.py
|
RewriteCorpus.py
|
py
| 1,381
|
python
|
en
|
code
| 7
|
github-code
|
6
|
12877884293
|
from sklearn import datasets
from sklearn.preprocessing import MaxAbsScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.exceptions import ConvergenceWarning
from numpy import mean, array
from articles_to_features import vectorize, get_feature_mappings
from newspaper import Article
import warnings
import pickle
import io
import os
import requests
def load_training_data():
X, y = datasets.load_svmlight_file(open('features_file.txt', 'rb'))
true_center = mean(y)
# Preprocess
left_threshold = 7.5
right_threshold = 7.5
def discretize(val):
if val < -left_threshold:
return -1
elif val < right_threshold:
return 0
else:
return 1
return MaxAbsScaler().fit(X).transform(X), [discretize(val) for val in y]
def load_test_data():
X, y = datasets.load_svmlight_file(open('allsides_vectors.txt', 'rb'))
# Preprocess
def discretize(val):
if val <= -2:
return -1
elif val < 2:
return 0
else:
return 1
return MaxAbsScaler().fit(X).transform(X), [discretize(val) for val in y]
def load_model():
return LogisticRegression(solver='saga', random_state=0)
def load_trained_model():
if not os.path.exists('left_right_model.pkl'):
X, y = load_training_data()
model = load_model()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
pickle.dump(model, open('left_right_model.pkl', 'wb'))
else:
model = pickle.load(open('left_right_model.pkl', 'rb'))
return model
class Left_right_classifier(object):
def __init__(self):
self.__model = load_trained_model()
def classify_article_from_url(self, x_article_url):
return self.classify_html_article(requests.get(x_article_url).content)
def classify_html_article(self, x_article_html):
article = Article(url='')
article.download(input_html=x_article_html)
article.parse()
return self.classify_article(article.text, article.title)
def classify_article(self, x_article_text, x_article_title=''):
vectorized = vectorize(get_feature_mappings(), x_article_title + '\n' + x_article_text, 0)
return self.classify_vectorized_article(vectorized)
def classify_vectorized_article(self, x_vec):
if isinstance(x_vec, str):
x_vec, _ = datasets.load_svmlight_file(io.BytesIO(x_vec.encode()), n_features=len(self.__model.coef_[0]))
return self.__model.predict(x_vec)[0]
if __name__ == '__main__':
X, y = load_training_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model = load_model()
model.fit(X_train, y_train)
y_predictions = model.predict(X_test)
print(f'Model test accuracy_score={accuracy_score(y_test, y_predictions)}')
print(classification_report(y_test, y_predictions, target_names=['left', 'center', 'right']))
conf_matrix = confusion_matrix(y_test, y_predictions)
print('Confusion Matrix')
print(conf_matrix)
print(f' Left marked as right = {conf_matrix[0][2]/sum(conf_matrix[0])}')
print(f' Right marked as left = {conf_matrix[2][0]/sum(conf_matrix[2])}')
print()
print(f' Center marked as right = {conf_matrix[1][2]/sum(conf_matrix[1])}')
print(f' Center marked as left = {conf_matrix[1][0]/sum(conf_matrix[1])}')
print()
classifier = Left_right_classifier()
print(classifier.classify_article_from_url('https://www.vox.com/2020/4/20/21225016/protests-stay-at-home-orders-trump-conservative-group-michigan'))
print(classifier.classify_article_from_url('https://www.cnn.com/2020/04/20/politics/aoc-2022-senate-schumer/index.html'))
print(classifier.classify_article_from_url('https://www.vox.com/covid-19-coronavirus-us-response-trump/2020/4/19/21227175/coronavirus-trump-who-information-china-embeds-december'))
print(classifier.classify_article_from_url('https://www.vice.com/en_us/article/4agzpn/texas-anti-lockdown-protesters-are-coming-for-fauci-now'))
print(classifier.classify_article_from_url('https://www.infowars.com/trump-to-press-you-and-the-obama-administration-were-duped-for-years-by-china/'))
print(classifier.classify_article_from_url('https://www.dailywire.com/news/poll-people-have-no-idea-joe-biden-is-talking-about-coronavirus'))
print(classifier.classify_article_from_url('https://www.louderwithcrowder.com/opinion-sorry-democrats-its-not-the-republicans-who-are-nazis/'))
print(classifier.classify_article_from_url('https://dailycaller.com/2020/04/20/alexandria-ocasio-cortez-oil-drop-tweet-lost-jobs/'))
|
abhi-baireddy/IRProject
|
left_right_classifier.py
|
left_right_classifier.py
|
py
| 5,085
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18244520374
|
import json
from datetime import datetime, timedelta
from news_utilities import add_news
cache_file = '/home/pi/AlarmClockProject/AlarmClock/cache/calendars/' + 'cal_ed.json'
homeworks = None
calendar = None
notes = None
datetime_now = datetime.now()
def str_to_datetime(date_str):
if len(date_str) == 16:
datetime_time = datetime(int(date_str[:4]), # Year
int(date_str[5:7]), # Month
int(date_str[8:10]), # Day
int(date_str[11:13]) % 24, # Hour
int(date_str[14:16]) % 60) # Minute
else:
datetime_time = datetime(int(date_str[:4]), # Year
int(date_str[5:7]), # Month
int(date_str[8:10])) # Day
return(datetime_time)
def float_to_str(floating_number):
if floating_number == float(int(floating_number)):
string_number = str(int(floating_number))
else:
string_number = str(floating_number)
return string_number
def get_ed_data():
global homeworks, calendar, notes
from EcoleDirect import EcoleDirect
file_path = "/home/pi/credentials/EcoleDirecte/credentials.txt"
# file as following : 'id:user\npwd:password
creds = open(file_path, "r").read().split("\n")
user = creds[0].split(':')[1]
pwd = creds[1].split(':')[1]
ed = EcoleDirect(user, pwd)
homeworks = ed.getHW() # Get HomeWork
calendar = ed.getWT() # Get WorkTime
notes = ed.getNotes() # Get Notes
def store_calendar():
if any(data is None for data in [homeworks, calendar, notes]):
get_ed_data()
home_works = []
for work_day in homeworks:
for work in homeworks[work_day]:
code = work['codeMatiere']
if not work['effectue']:
home_works.append([code, work_day])
events = []
for lesson in calendar:
dtstart = lesson['start_date']
dtend = lesson['end_date']
summary = lesson['codeMatiere']
if any(dtstart[:10] == work[1] and summary == work[0] for work in home_works):
todo = True
else:
todo = False
if summary == '':
continue
event = {'DTSTART': dtstart,
'DTEND': dtend,
'SUMMARY': summary,
'TODO': todo,
'CAL_ID': '200'}
events.append(event)
# Store the events in a new calendar file
with open(cache_file, 'w', encoding='utf-8') as jsonfile:
json.dump(events, jsonfile, ensure_ascii=False, indent=4)
def get_calendar():
with open(cache_file, "r") as json_file:
events = json.load(json_file)
return events
def get_latest_notes():
if any(data is None for data in [homeworks, calendar, notes]):
get_ed_data()
last_n_days = 10
notes_ = sorted(notes['notes'], key=lambda i: i['dateSaisie'])
news_desc = ''
notes_by_subject = {}
for note in notes_:
saisie_time = str_to_datetime(note['dateSaisie'])
if saisie_time < datetime_now-timedelta(days=last_n_days):
continue
individual_note = float(note['valeur'].replace(",", "."))
note_max = float(note['noteSur'].replace(",", "."))
class_avg = float(note['moyenneClasse'].replace(",", "."))
better_than_class = individual_note > class_avg
note_display = (float_to_str(individual_note)
+ ('+' if better_than_class else '-')
+ (float_to_str(note_max) if note_max != 20.0 else "")
+ " ")
if not note['codeMatiere'] in notes_by_subject.keys():
notes_by_subject[note['codeMatiere']] = ""
notes_by_subject[note['codeMatiere']] += note_display
for note_subject in notes_by_subject.keys():
note = notes_by_subject[note_subject]
news_desc += f"\n{note_subject} : {note}"
add_news(300, datetime_now, 'Latest notes', news_desc)
if __name__ == "__main__":
store_calendar()
get_latest_notes()
|
cg-Kdaf/RPIAlarmClock
|
src/ED_utilities.py
|
ED_utilities.py
|
py
| 4,112
|
python
|
en
|
code
| 1
|
github-code
|
6
|
33816564423
|
from pyswip.prolog import Prolog
from pyswip.easy import *
prolog = Prolog() # Global handle to interpreter
def query_the_KB(query):
price, distance, cuisine_type = query
retractall = Functor("retractall")
known = Functor("known",3)
def read_list_choice_py(A, V, Y):
if str(A) == 'price':
Y.unify(price.lower())
return True
elif str(A) == 'distance':
Y.unify(distance.lower())
return True
elif str(A) == 'cuisine_type':
Y.unify(cuisine_type.lower())
return True
read_list_choice_py.arity = 3
registerForeign(read_list_choice_py)
prolog.consult("KB.pl") # open the KB
call(retractall(known))
results = []
for result in prolog.query("restaurant(X).", maxresult=30):
results.append(result['X'])
return results
for query in queries_from_GUI:
print(query_the_KB(query))
|
AshNguyen/CS152-LBA
|
query_KB.py
|
query_KB.py
|
py
| 929
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43085107371
|
#!/bin/env python
import numpy as np
from matplotlib import pyplot as plt
import argparse
import sys
import parse_ats
def load(fname, density):
dat = np.loadtxt(fname) # units s, mol/s
dat[:,0] = dat[:,0] / 86400. # convert to days
dat[:,1] = dat[:,1] / density * 86400 # convert to m^3/d
return dat
def plot(data, format='-', color='b', name=None, ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data[:,0], data[:,1], format, color=color, label=name)
ax.set_xlabel("time [days]")
ax.set_ylabel("runoff [m^3 / day]")
return ax
def load_area_rain(args):
k,t,d = parse_ats.readATS(args.directory, args.filename)
cv = d[args.area_key][k[0]][:]
area = cv.sum()
rain = np.array([(d[args.rainfall_rate_key][key][:] * cv).sum() for key in k]) * 86400
return area, t*365.25, rain # units m^2, days, m^3/s
def plot_rain(area, t, rain, format='--', color='k', ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, rain, format, color=color, label="rainfall rate")
return ax
if __name__ == "__main__":
parser = argparse.ArgumentParser("Plot discharge observation from ATS run")
parser.add_argument("runoff_filename", type=str, help="Runoff observation filename.")
parser.add_argument("-p", "--plot-rainfall", action="store_true", help="Plot rainfall rate as an asymptotic limit.")
parser.add_argument("-d", "--directory", type=str, help="Simulation output directory", default='.')
parser.add_argument("-f", "--filename", type=str, help="Simulation surface output filename", default="visdump_surface_data.h5")
parser.add_argument("-r", "--rainfall-rate-key", type=str, help="Rainfall rate variable name", default="surface-mass_source.cell.0")
parser.add_argument("-a", "--area-key", type=str, help="Surface cell area variable name", default="surface-cell_volume.cell.0")
parser.add_argument("--density", type=float, help="Density of water", default=55000.)
args = parser.parse_args()
ax = None
if args.plot_rainfall:
area, time, rain = load_area_rain(args)
ax = plot_rain(area, time, rain)
plot(load(args.runoff_filename, args.density), ax=ax)
plt.show()
sys.exit(0)
|
amanzi/ats
|
tools/utils/plot_runoff.py
|
plot_runoff.py
|
py
| 2,293
|
python
|
en
|
code
| 35
|
github-code
|
6
|
18298344467
|
import argparse
import socket
import struct
import codecs
import dns.resolver
import dns.message
import dns.query
import base64
from aes import aes
# Address of the DNS server
#dns_server = "8.8.8.8"
# DNS query message format
#dns_query = struct.pack("!6H", 0x1234, 1, 1, 0, 0, 0) + b"\x03foo\x03bar\x00\x00\x01\x00\x01"
def encrypt_message(message, key):
# Pad the message to a multiple of 16 bytes
message = message + b' ' * (16 - len(message) % 16)
# Create an AES cipher object and encrypt the message
cipher = aes.new(key, aes.MODE_ECB)
encrypted_message = cipher.encrypt(message)
# Return the base64 encoded encrypted message
return base64.b64encode(encrypted_message).decode()
def encode_message(message, key):
# Encrypt the message using AES encryption
encrypted_message = encrypt_message(message.encode(), key)
# Convert the encrypted message into the format described in the specifications
encoded_message = ''
for char in encrypted_message:
encoded_message += str(ord(char) * 2) + '.'
# Return the encoded message
return encoded_message.rstrip('.')
#def encode_message(message):
# # Map of characters to binary
# mapping = {chr(97 + i): format(i, '05b') for i in range(26)}
# mapping['EOF'] = '11111'
#
# # Encode message as binary
# message = ''.join(mapping[c] for c in message)
#
# # Split message into 10-bit chunks
# message = [message[i:i + 10] for i in range(0, len(message), 10)]
#
# # Convert 10-bit chunks to integer values
# message = [int(chunk, 2) for chunk in message]
#
# return message
#
def decode_message(encoded_message):
# Split the encoded message into individual values
values = encoded_message.split('.')
# Convert the values back into characters
decoded_message = ''
for value in values:
decoded_message += chr(int(value) // 2)
# Decrypt the message using AES encryption
decrypted_message = decrypt_message(decoded_message.encode(), key)
# Return the decrypted message
return decrypted_message.rstrip()
def decrypt_message(encrypted_message, key):
# Decode the base64 encoded encrypted message
encrypted_message = base64.b64decode(encrypted_message)
# Create an AES cipher object and decrypt the message
cipher = aes.new(key, aes.MODE_ECB)
decrypted_message = cipher.decrypt(encrypted_message)
# Return the decrypted message
return decrypted_message.rstrip()
def send_payload_to_target(message, domain, source):
mapping = {'00000': 'a', '00001': 'b', '00010': 'c', '00011': 'd',
'00100': 'e', '00101': 'f', '00110': 'g', '00111': 'h',
'01000': 'i', '01001': 'j', '01010': 'k', '01011': 'l',
'01100': 'm', '01101': 'n', '01110': 'o', '01111': 'p',
'10000': 'q', '10001': 'r', '10010': 's', '10011': 't',
'10100': 'u', '10101': 'v', '10110': 'w', '10111': 'x',
'11000': 'y', '11001': 'z', '11011': '0', '11100': '1',
'11101': '2', '11110': '3', '11111': '4'}
# Check if message is a string
if not isinstance(message, str):
raise ValueError("Message must be a string")
# Check if message contains only lowercase letters and numbers
for char in message:
if char not in mapping.values():
raise ValueError("Message must contain only lowercase letters and numbers")
# Convert message to binary
message = ''.join(format(ord(char) - ord('a'), '05b') for char in message)
# Pad message with EOF character to make its length a multiple of 10
message += '11011' * (10 - len(message) % 10)
# Multiply binary values by 5 to obtain larger TTL values
message = ''.join(format(int(char, 2) * 5, '05b') for char in message)
# Split data into 10-bit chunks
chunks = [message[i:i+10] for i in range(0, len(message), 10)]
# Convert 10-bit chunks to integer values
chunks = [int(chunk, 2) for chunk in chunks]
# Send DNS requests with TTL values
for chunk in chunks:
request = dns.message.make_query(domain, dns.rdatatype.A)
response = dns.query.udp(request, source, timeout=1)
if response.rcode() != dns.rcode.NOERROR:
raise Exception("DNS query failed")
ttl = response.answer[0].ttl
if ttl != chunk:
raise Exception("Unexpected TTL value")
return True
# Function to decode the covert message from the DNS reply
#def decode_message(data):
# # Map of binary to characters
# mapping = {format(i, '05b'): chr(97 + i) for i in range(26)}
# mapping['11111'] = 'EOF'
#
# # Split data into 10-bit chunks
# chunks = [data[i:i + 10] for i in range(0, len(data), 10)]
#
# # Convert 10-bit chunks to integer values
# chunks = [int(chunk, 2) for chunk in chunks]
#
# # Divide integer values by 5 to obtain original message
# chunks = [chunk // 5 for chunk in chunks]
#
# # Convert integer values to binary
# chunks = [format(chunk, '05b') for chunk in chunks]
#
# # Join binary values to form the message
# message = ''.join(chunks)
#
# # Split message into character codes
# message = [message[i:i + 5] for i in range(0, len(message), 5)]
#
# # Convert character codes to characters
# message = ''.join(mapping[code] for code in message)
#
# return message
def dns_spoof(target, source_ip, source_port, payload, aes_key=None):
try:
# Encode the message using the text only scheme
encoded_message = ''
for char in payload:
encoded_message += str((ord(char) - 97) * 26 ** 2)
# AES encryption implementation here
encrypted_message = encrypt_message(aes_key, encoded_message) if aes_key else encoded_message
# Construct the DNS packet
packet = b''
packet += struct.pack("!H", 0x1234) # Transaction ID
packet += struct.pack("!H", 0x0100) # Flags
packet += struct.pack("!H", 1) # Questions
packet += struct.pack("!H", 0) # Answer RRs
packet += struct.pack("!H", 0) # Authority RRs
packet += struct.pack("!H", 0) # Additional RRs
packet += b'\x03\x77\x77\x77\x06\x67\x6f\x6f\x67\x6c\x65\x03\x63\x6f\x6d\x00' # Domain name
packet += struct.pack("!H", 0x0001) # Query type
packet += struct.pack("!H", 0x0001) # Query class
# Split the message into 4 character segments
message_segments = [encrypted_message[i:i+4] for i in range(0, len(encrypted_message), 4)]
# Encode the message segments into TTL values
ttl_values = []
for segment in message_segments:
ttl = 0
for char in segment:
ttl = ttl * 26 + ord(char) - 97
ttl_values.append(ttl * 5)
# Add the TTL values to the packet as answers
for ttl in ttl_values:
packet += b'\xc0\x0c' # Pointer to domain name
packet += struct.pack("!H", 0x0001) # Query type
packet += struct.pack("!H", 0x0001) # Query class
packet += struct.pack("!I", ttl) # TTL
# Create a raw socket
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
# Set the source IP and source port for spoofing
s.bind((source_ip, source_port))
# Send the packet
s.sendto(packet, (target, 53))
# Passive listening for a reply
response, addr = s.recvfrom(1024)
# Verify that the reply is from the expected target
if addr[0] == target:
# Extract the TTL values from the response
ttl_values = []
for i in range(len(response)):
if response[i:i+2] == b'\x00\x01':
ttl = struct.unpack("!I", response[i+10:i+14])[0]
except socket.error as e:
print(f"Error: {e}")
finally:
s.close()
def parse_arguments():
parser = argparse.ArgumentParser(description='Send payload over a covert DNS channel.')
parser.add_argument('payload', type=str, help='The message to send.')
parser.add_argument('target', type=str, help='The target to send the message to.')
parser.add_argument('source', type=str, help='The true client to receive the message')
parser.add_argument('-s', '--spoof', dest='spoof', action='store_true', help='Spoof the source address on the request.')
parser.add_argument('--key', type=str, default='1234567890abcdef', help='Encryption key')
return parser.parse_args()
#python covert_channel_client.py <payload> <target> [--key <key>]
if __name__ == '__main__':
args = parse_arguments()
payload = args.payload
target = args.target
source = args.source
key = args.key
spoof = args.spoof
if spoof:
print("Spoofing address on request...")
dns_spoof(target, spoof, 53, payload, key)
# Encode the payload
encoded_payload = encode_message(payload, key)
# Send the encoded payload to the target domain
send_payload_to_target(encoded_payload, target, source)
|
unicycling-amphibian/CovertDNS
|
DNSCovert_Client.py
|
DNSCovert_Client.py
|
py
| 9,310
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34408932008
|
from flask_restplus import Resource
from flask import current_app as cur_app
from flask import request
from app.main.services.story.brand_story_services import duplicate_story, get_all_draft_or_published_story, get_story, issue_story_template_before_save, post_story_publish_and_draft, remove_story_from_search, update_story_by_id, get_all_system_story
from app.main.utils.api_story_dto import BrandStoryDTO
api = BrandStoryDTO.api
_res_issue_template_before_save = BrandStoryDTO.res_issue_template_before_save
_post_req_for_draft_publish = BrandStoryDTO.post_req_for_draft_publish
_res_for_draft_publish = BrandStoryDTO.res_for_draft_publish
_req_for_draft_publish_update = BrandStoryDTO.post_req_for_draft_publish_update
_res_story_by_id = BrandStoryDTO.res_story_by_id
_res_get_all_draft_and_publish_story = BrandStoryDTO.res_get_all_draft_and_publish_story
_res_get_all_system = BrandStoryDTO.res_all_Storye_pages
@api.route("api/v1.0/brand/story/<story_id>")
class BrandStoryOpe(Resource):
@api.marshal_with(_res_story_by_id)
def get(self, story_id):
return get_story(story_id)
def delete(self, story_id):
return remove_story_from_search(story_id=story_id)
@api.route("api/v1.0/story/template/<template_id>/data")
class IssueStoryTemplate(Resource):
@api.marshal_with(_res_issue_template_before_save)
def get(self, template_id):
return issue_story_template_before_save(template_id)
@api.route("api/v1.0/brand/<brand_id>/story")
class BrandStoryOperation(Resource):
@api.expect(_post_req_for_draft_publish)
@api.marshal_with(_res_for_draft_publish)
def post(self, brand_id):
return post_story_publish_and_draft(brand_id, data=request.json)
@api.route("api/v1.0/brand/<brand_id>/story/<story_id>")
class BrandStoryOperationUpdate(Resource):
@api.expect(_req_for_draft_publish_update)
@api.marshal_with(_res_for_draft_publish)
def put(self, brand_id, story_id):
return update_story_by_id(brand_id, story_id, data=request.json)
@api.route("api/v1.0/brand/<brand_id>/storys")
class FetchStatusStory(Resource):
@api.marshal_with(_res_get_all_draft_and_publish_story)
# @token_required
def get(self, brand_id):
status = request.args.get('status')
active = request.args.get('active')
category = request.args.get('category')
args = request.args
search = args.get('search', '')
page = int(args.get('page', cur_app.config['PAGE']))
limit = int(args.get('limit', cur_app.config['LIMIT']))
return get_all_draft_or_published_story(brand_id, status, active, search, category, page, limit)
@api.route("api/v1.0/brand/<brand_id>/story/<story_id>/duplicate")
class BrandDuplicateStory(Resource):
@api.marshal_with(_res_for_draft_publish)
def get(self, brand_id, story_id):
return duplicate_story(brand_id=brand_id, story_id=story_id)
@api.route("api/v1.0/story")
class SystemBrandPages(Resource):
@api.marshal_with(_res_get_all_system)
def get(self):
args = request.args
page = int(args.get('page', cur_app.config['PAGE']))
limit = int(args.get('limit', cur_app.config['LIMIT']))
category = request.args.get('category')
return get_all_system_story(category, page, limit)
|
deepakarya09/cureas_reads
|
app/main/controllers/api_story_controller.py
|
api_story_controller.py
|
py
| 3,300
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3562737092
|
from modules import s3upload, s3transcribe, parse
import argparse
if __name__ == "__main__":
# Create Argument Parser
parser = argparse.ArgumentParser(description='Process video, create transcripts, proofread with OpenAI GPT.')
parser.add_argument('input_folder', type=str, help='Input folder with .mp4 interview video(s)')
parser.add_argument('s3_folder', type=str, help='Output folder name to save files to in S3 bucket')
args = parser.parse_args()
# Step 1: Upload videos to S3
print("Step 1: Uploading videos to S3...")
s3upload.upload_to_s3(args.input_folder, args.s3_folder)
# Step 2: Transcribe videos from S3 and download the transcriptions
print("Step 2: Transcribing videos from S3 and downloading the transcriptions...")
transcribe_folder = s3transcribe.transcribe_from_s3(args.s3_folder)
s3transcribe.download_transcripts(transcribe_folder)
# Step 3: Parse transcriptions
print("Step 3: Parsing and proofreading transcriptions...")
parse.proofread_transcripts(transcribe_folder)
print("Finished processing videos! View the resulting transcript and .docx file in the timestamped folder.")
|
uitrial/Interview-Transcribe-Proofread
|
process_transcripts.py
|
process_transcripts.py
|
py
| 1,168
|
python
|
en
|
code
| 4
|
github-code
|
6
|
42269789956
|
from .models import *
from .forms import *
from app import filtersets
import cx_Oracle
from django.http.response import Http404
from django.shortcuts import render, redirect
from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.db import connection
from functools import wraps
from django.contrib.admin.views import decorators
def staff_member_required(view_func):
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
else:
return HttpResponseRedirect('/acceso-denegado')
return wraps(view_func)(_checklogin)
decorators.staff_member_required = staff_member_required
def accesodenegado(request):
return render(request,'acceso-denegado.html')
def groups_only(*groups):
def inner(view_func):
@wraps(view_func)
def wrapper_func(request, *args, **kwargs):
if request.user.groups.filter(name__in=groups).exists():
return view_func(request, *args, **kwargs)
else:
return redirect(to='acceso-denegado')
return wrapper_func
return inner
@staff_member_required
def crear_grupo(request):
data = {
'form': GrupoForm
}
if request.method == 'POST':
formulario = GrupoForm(data=request.POST)
if formulario.is_valid():
gru = formulario.save()
messages.success(request, "Grupo "+gru.name+" creado correctamente!")
return redirect (to='mantenedor-usr')
else:
data["form"] = formulario
return render(request, 'registration/group.html',data )
@staff_member_required
def user_filter(request):
# https://www.youtube.com/watch?v=dkJ3uqkdCcY
#https://django-filter.readthedocs.io/en/stable/index.html
"""
filtro = filtersets.UsertFilter(
request.GET,
queryset= User.objects.all()
)
PerfilF = filtersets.PerfilFilter(
request.GET,
queryset= Perfil.objects.all()
)
page = request.GET.get('page', 1)
try:
paginator = Paginator(PerfilF, 5)
PerfilF = paginator.page(page)
except:
raise Http404
context = {
'filtro': filtro,
'entity':PerfilF,
'paginator': paginator
}
"""
filtro = filtersets.UsertFilter(
request.GET,
queryset= User.objects.all()
)
PerfilF = filtersets.PerfilFilter(
request.GET,
queryset= Perfil.objects.all()
)
context = {
'filtro': filtro,
'PerfilF':PerfilF,
}
return render(request, 'pruebas/ekisde.html', context)
@staff_member_required
def signup_view(request):
context = {'form': CustomUserCreationForm(),
'form_p':PerfilForm(),
'adminform':AdminForm(),
'proform': ProfesionalForm(),
'cliform': ClienteForm(),
}
if request.method == 'POST':
formulario = CustomUserCreationForm(data=request.POST)
formPerfil = PerfilForm(data=request.POST)
formAdm = AdminForm(data=request.POST)
formProf = ProfesionalForm(data=request.POST)
formCli = ClienteForm(data=request.POST)
if formulario.is_valid() and formPerfil.is_valid():
usuario = formulario.save()
group = request.POST.get('groups')
usuario.groups.add(group)
perfil = formPerfil.save(commit=False)
perfil.id_auth_user = usuario
perfil.save()
if perfil.tipo_perf=='1':
admin = formAdm.save(commit=False)
admin.id_perfil = perfil
admin.save()
elif perfil.tipo_perf=='2':
prof = formProf.save(commit=False)
prof.id_perfil = perfil
prof.save()
elif perfil.tipo_perf=='3':
cli = formCli.save(commit=False)
cli.id_perfil =perfil
cli.save()
messages.success(request, 'Usuario '+usuario.username+' creado correctamente')
return redirect(to="mantenedor-usr")
context = {'form': CustomUserCreationForm(),
'form_p':PerfilForm(),
'adminform':AdminForm(),
'proform':ProfesionalForm(),
'cliform': ClienteForm(),
}
return render(request, 'registration/signup.html', context)
def home(request):
return render(request, 'home.html')
@staff_member_required
def home_admin(request):
usuario = User.objects.all().order_by('id')
context = {'usuario': usuario }
return render(request,'administrador/home-adm.html', context)
@staff_member_required
def maintainer(request):
return render(request, 'administrador/mantenedor.html')
@staff_member_required
def maintainer_user(request):
return render(request, 'administrador/mantenedor-usuario.html')
@staff_member_required
def maintainer_plan(request):
return render(request, 'administrador/mantenedor-plan.html')
@staff_member_required
def maintainer_service(request):
return render(request, 'administrador/mantenedor-servicio.html')
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
return redirect(to='home')
else:
form = AuthenticationForm()
return render(request,'registration/login.html',{'form':form})
def login_filter(request):
if request.user.groups.filter(name="Administrador") or request.user.is_staff:
return redirect(to='home-adm')
elif request.user.groups.filter(name="Profesional"):
return redirect(to='home-prof')
else:
return redirect(to='home-cliente')
@staff_member_required
#mantenedor
def UserLista(request):
usuario = User.objects.all().order_by('id')
page = request.GET.get('page', 1)
try:
paginator = Paginator(usuario, 5)
usuario = paginator.page(page)
except:
raise Http404
context = {'entity': usuario,
'paginator': paginator}
return render(request, 'administrador/lista.html', context)
@staff_member_required
def UserEdit(request,id):
usuario = User.objects.get(id=id)
if request.method == 'GET':
form = UserUpdateForm(instance=usuario)
else:
form = UserUpdateForm(request.POST, instance=usuario)
if form.is_valid():
form.save()
messages.success(request, "Usuario "+usuario.username+" modificado correctamente")
return redirect(to="listar")
context = {
'form':form,
}
return render(request,'administrador/editar.html', context)
@staff_member_required
def UserDelete(request,id):
usuario = User.objects.get(id=id)
usuario.is_active = 0
if request.method == 'POST':
form = UserActive(instance=usuario)
else:
form = UserActive(request.POST, instance=usuario)
if form.is_valid():
form.save()
messages.success(request, "Usuario desactivado correctamente")
return redirect(to="listar")
@staff_member_required
def UserActivate(request,id):
usuario = User.objects.get(id=id)
if request.method == 'POST':
form = UserActive(instance=usuario)
else:
form = UserActive(request.POST, instance=usuario)
if form.is_valid():
user = form.save()
user.is_active = True
user.save()
messages.success(request, "Usuario activado correctamente")
return redirect(to="listar")
@staff_member_required
##PLAN
def PlanCreate(request):
data = {
'form': PlanForm
}
if request.method == 'POST':
formulario = PlanForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Plan creado correctamente!")
return redirect (to='mantenedor')
else:
data["form"] = formulario
return render(request, 'administrador/planes/agregar-plan.html', data)
@staff_member_required
def plan_lista(request):
plan = Plan.objects.all().order_by('id_plan')
page = request.GET.get('page', 1)
try:
paginator = Paginator(plan, 5)
plan = paginator.page(page)
except:
raise Http404
context = {'entity': plan,
'paginator': paginator}
return render(request, 'administrador/planes/lista-plan.html', context)
@staff_member_required
def PlanEdit(request,id_plan):
plan = Plan.objects.get(id_plan=id_plan)
if request.method == 'GET':
form = PlanUpdateForm(instance=plan)
else:
form = PlanUpdateForm(request.POST, instance=plan)
if form.is_valid():
form.save()
messages.success(request, "Plan modificado correctamente")
return redirect(to='lista-plan')
return render(request,'administrador/planes/editar-plan.html',{'form':form})
@staff_member_required
def PlanDelete(request,id):
plan = Plan.objects.get(id_plan=id)
plan.estado = 0
if request.method == 'POST':
form = PlanActive(instance=plan)
else:
form = PlanActive(request.POST, instance=plan)
if form.is_valid():
form.save()
messages.success(request, "Plan desactivado correctamente")
return redirect(to="lista-plan")
@staff_member_required
def PlanActivate(request,id):
plan = Plan.objects.get(id_plan=id)
if request.method == 'POST':
form = PlanActive(instance=plan)
else:
form = PlanActive(request.POST, instance=plan)
if form.is_valid():
plan = form.save()
plan.estado = 1
plan.save()
messages.success(request, "Plan activado correctamente")
return redirect(to="lista-plan")
##SERVICIOS
@staff_member_required
def ServicioCreate(request):
data = {
'form': ServicioForm
}
if request.method == 'POST':
formulario = ServicioForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Servicio creado correctamente!")
return redirect (to='mantenedor')
else:
data["form"] = formulario
return render(request, 'administrador/servicios/agregar-servicio.html', data)
@staff_member_required
def Servicio_lista(request):
servicio = Servicio.objects.all().order_by('id_servicio')
page = request.GET.get('page', 1)
try:
paginator = Paginator(servicio, 5)
servicio = paginator.page(page)
except:
raise Http404
context = {'entity': servicio,
'paginator': paginator}
return render(request, 'administrador/servicios/lista-servicio.html', context)
@staff_member_required
def ServicioEdit(request,id_servicio):
servicio = Servicio.objects.get(id_servicio=id_servicio)
if request.method == 'GET':
form = ServicioUpdateForm(instance=servicio)
else:
form = ServicioUpdateForm(request.POST, instance=servicio)
if form.is_valid():
form.save()
messages.success(request, "Servicio modificado correctamente")
return redirect(to='lista-servicios')
return render(request,'administrador/servicios/editar-servicio.html',{'form':form})
@staff_member_required
def ServicioDelete(request,id):
serv = Servicio.objects.get(id_servicio=id)
serv.estado = 0
if request.method == 'POST':
form = ServicioActive(instance=serv)
else:
form = ServicioActive(request.POST, instance=serv)
if form.is_valid():
form.save()
messages.success(request, "Servicio desactivado correctamente")
return redirect(to="lista-servicios")
@staff_member_required
def ServicioActivate(request,id):
serv = Servicio.objects.get(id_servicio=id)
if request.method == 'POST':
form = ServicioActive(instance=serv)
else:
form = ServicioActive(request.POST, instance=serv)
if form.is_valid():
serv = form.save()
serv.estado = 1
serv.save()
messages.success(request, "Servicio activado correctamente")
return redirect(to="lista-servicios")
#informacion de clientes ClienteForm
def cliente_datos():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_datos_cliente",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
@staff_member_required
def infoCliente(request):
cliente = cliente_datos()
page = request.GET.get('page', 1)
try:
paginator = Paginator(cliente, 5)
cliente = paginator.page(page)
except:
raise Http404
context = {'entity': cliente,
'paginator': paginator,
}
return render(request, 'administrador/info_cliente/info-cliente.html',context)
#informacion de clientes ProfesionalForm
@staff_member_required
def infoProfesional(request):
pro = Profesional.objects.all().order_by('id_prof')
page = request.GET.get('page', 1)
try:
paginator = Paginator(pro, 5)
pro = paginator.page(page)
except:
raise Http404
context = {'entity': pro,
'paginator': paginator}
return render(request, 'administrador/info_profesional/info-profesional.html',context)
#informacion de perfiles
@staff_member_required
def infoPerfil(request):
PerfilF = filtersets.PerfilFilter(request.GET,queryset= Perfil.objects.all())
page = request.GET.get('page', 1)
try:
paginator = Paginator(PerfilF, 5)
PerfilF = paginator.page(page)
except:
"""raise Http404"""
context = {
'entity':PerfilF,
'paginator': paginator
}
return render(request, 'administrador/info_perfil/info-perfil.html', context)
@staff_member_required
def modificar_perfil(request,id_perfil):
perfil = Perfil.objects.get(id_perfil=id_perfil)
if request.method == 'GET':
form = PerfilModificar(instance=perfil)
else:
form = PerfilModificar(request.POST, instance=perfil)
if form.is_valid():
form.save()
messages.success(request,"Perfil de "+perfil.id_auth_user.first_name+" "+perfil.id_auth_user.last_name+" modificado correctamente!")
else:
messages.error(request, "Ha ingresado un rut ya registrado, no se han guardado cambios.")
return redirect(to='infoPerfil')
context = {
'form':form
}
return render(request, 'administrador/info_perfil/modificar-perfil.html',context)
"""
Utilizando procedures
"""
def lista_actividades():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_actividades",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_capacitacion():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_capacitacion",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_asesoria():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_asesoria",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_visita():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_visita",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_cliente():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_cliente",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def lista_profesional():
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
out_cur = django_cursor.connection.cursor() # este recive
cursor.callproc("sp_listar_profesional",[out_cur])
lista =[]
for fila in out_cur:
lista.append(fila)
return lista
def guardar_actividad(nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof):
django_cursor = connection.cursor()
cursor = django_cursor.connection.cursor() #Este llama
salida = cursor.var(cx_Oracle.NUMBER)
cursor.callproc('sp_agregar_actividad_corta',[nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof, salida])
return salida.getvalue()
# actividades
@staff_member_required
def actividades(request):
actividad = lista_actividades()
page = request.GET.get('page', 1)
try:
paginator = Paginator(actividad, 5)
actividad = paginator.page(page)
except:
raise Http404
context = {'entity': actividad,
'paginator': paginator,
}
return render(request, 'administrador/actividades/actividades_lista.html',context)
@staff_member_required
def crear_actividad(request):
capacitacion = lista_capacitacion()
asesoria = lista_asesoria()
visita = lista_visita()
cliente = Cliente.objects.all()
profesional = Profesional.objects.all()
data = {
'capacitacion':capacitacion,
'asesoria':asesoria,
'visita':visita,
'cliente':cliente,
'profesional':profesional
}
if request.method == 'POST':
nombre = request.POST.get('nombre')
descripcion =request.POST.get('descripcion')
tipo_act = request.POST.get('tipo_act')
fec_estimada = request.POST.get('fec_estimada')
estado = request.POST.get('estado')
id_cli = request.POST.get('id_cli')
id_prof = request.POST.get('id_prof')
salida = guardar_actividad(nombre,descripcion,tipo_act,fec_estimada,estado,id_cli,id_prof)
if salida == 1:
data['mensaje'] = 'Agregado Correctamente'
return redirect(to='actividades')
else:
data['mensaje'] = 'No se a podido guardar'
return render(request, 'administrador/actividades/crear.html',data)
@staff_member_required
def actualizar_actividad(request,id_actividad):
act = Actividad.objects.get(id_actividad=id_actividad)
if request.method == 'GET':
form = ActualizarActividad(instance=act)
else:
form = ActualizarActividad(request.POST, instance=act)
if form.is_valid():
form.save()
messages.success(request, "Actividad modificada correctamente")
return redirect(to='actividades')
context = {'form':form}
return render(request, 'administrador/actividades/actualizar.html',context)
@staff_member_required
def checklist(request):
data = {
'form': listaForm
}
if request.method == 'POST':
formulario = listaForm(data=request.POST)
if formulario.is_valid():
formulario.save()
messages.success(request, "Creado correctamente!")
return redirect(to='listaCheck')
else:
data["form"] = formulario
return render(request, 'administrador/checklist/checklist.html', data)
@staff_member_required
def listaCheck(request):
lista = CliCheckPro.objects.all().order_by('id_clicheck')
page = request.GET.get('page', 1)
try:
paginator = Paginator(lista, 5)
lista = paginator.page(page)
except:
raise Http404
context = {'entity': lista,
'paginator': paginator}
return render(request, 'administrador/checklist/listado.html', context)
@staff_member_required
def modificaCheck(request,id_clicheck):
lista = CliCheckPro.objects.get(id_clicheck=id_clicheck)
if request.method == 'GET':
form = listaForm(instance=lista)
else:
form = listaForm(request.POST, instance=lista)
if form.is_valid():
form.save()
messages.success(request, "Modificado correctamente")
return redirect(to='listaCheck')
return render(request,'administrador/checklist/modificar.html',{'form':form})
|
maxquevedo/safelyWeb
|
safely/app/views.py
|
views.py
|
py
| 21,095
|
python
|
en
|
code
| 2
|
github-code
|
6
|
40687101493
|
import time
import unittest
import s1ap_types
import s1ap_wrapper
class TestMultiEnbCompleteReset(unittest.TestCase):
"""Unittest: TestMultiEnbCompleteReset"""
def setUp(self):
"""Initialize before test case execution"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup()
def test_multi_enb_complete_reset(self):
"""Multi Enb Multi UE attach detach + complete reset
Note: Before execution of this test case,
make sure that following steps are correct
1. Configure same plmn and tac in both MME and s1ap tester
2. How to configure plmn and tac in MME:
a. Set mcc and mnc in gateway.mconfig for mme service
b. Set tac in gateway.mconfig for mme service
c. Restart MME service
3. How to configure plmn and tac in s1ap tester,
a. For multi-eNB test case, configure plmn and tac from test case.
In each multi-eNB test case, set plmn, plmn length and tac
in enb_list
b. For single eNB test case, configure plmn and tac in nbAppCfg.txt
"""
# column is an enb parameter, row is number of enbs
# Cell Id, Tac, EnbType, PLMN Id, PLMN length
enb_list = [
[1, 1, 1, "00101", 5],
[2, 1, 1, "00101", 5],
[3, 1, 1, "00101", 5],
[4, 1, 1, "00101", 5],
[5, 1, 1, "00101", 5],
]
self._s1ap_wrapper.multiEnbConfig(len(enb_list), enb_list)
print("Waiting for 2 seconds for Multiple ENBs to get configured")
time.sleep(2)
ue_ids = []
# UEs will attach to the ENBs in a round-robin fashion
# each ENBs will be connected with 32UEs
num_ues = 5
self._s1ap_wrapper.configUEDevice(num_ues)
for _ in range(num_ues):
req = self._s1ap_wrapper.ue_req
print(
"******************** Calling attach for UE id ",
req.ue_id,
)
self._s1ap_wrapper.s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
ue_ids.append(req.ue_id)
# Trigger eNB Reset
# Add delay to ensure S1APTester sends attach complete before sending
# eNB Reset Request
time.sleep(0.5)
print("************************* Sending eNB Reset Request")
reset_req = s1ap_types.ResetReq()
reset_req.rstType = s1ap_types.resetType.COMPLETE_RESET.value
reset_req.cause = s1ap_types.ResetCause()
reset_req.cause.causeType = (
s1ap_types.NasNonDelCauseType.TFW_CAUSE_MISC.value
)
# Set the cause to MISC.hardware-failure
reset_req.cause.causeVal = 3
reset_req.r = s1ap_types.R()
reset_req.r.completeRst = s1ap_types.CompleteReset()
reset_req.r.completeRst.enbId = 2
self._s1ap_wrapper.s1_util.issue_cmd(
s1ap_types.tfwCmd.RESET_REQ,
reset_req,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.RESET_ACK.value
print(
"Waiting for 3 seconds to ensure that MME has cleaned up all S1 "
"state before detaching the UE",
)
time.sleep(3)
for ue in ue_ids:
print("************************* Calling detach for UE id ", ue)
self._s1ap_wrapper.s1_util.detach(
ue,
s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
)
if __name__ == "__main__":
unittest.main()
|
magma/magma
|
lte/gateway/python/integ_tests/s1aptests/test_multi_enb_complete_reset.py
|
test_multi_enb_complete_reset.py
|
py
| 3,952
|
python
|
en
|
code
| 1,605
|
github-code
|
6
|
70078277628
|
import numpy as np
import torch
import os
import yaml
import tqdm
from addict import Dict
from collections import defaultdict
from matplotlib import pyplot as plt
import matplotlib.patches as patches
import pickle
import random
import pytorch_ssim
from skimage.measure import compare_ssim as ssim
from train import build_loaders
from scene_generation.data import imagenet_deprocess_batch
from scene_generation.metrics import jaccard
from scene_generation.model import Model
from scene_generation.args import get_args
#perceptual error
from PerceptualSimilarity import models
#from PerceptualSimilarity.util import util
GPU = 0
PRECOMPUTED = False # use precomputed samples (saved in the checkpoint) for evaluation
EVAL_ALL = True # evaluate on all bounding boxes (batch size=1)
USE_GT = True # use ground truth bounding boxes for evaluation
USE_FEATS = False #True
IGNORE_SMALL = False
SPLIT = '../sg2im/datasets/vg/test.h5'
BATCH_SIZE = 1
PRINT_EVERY = 50
SAVE_EVERY = 500
#EXPERIMENT = 'jitter_L_0.05_FixBoxes'
#EXPERIMENT = 'clean_infeats_64'
EXPERIMENT = "aGCN_SPADE"
ckpt = "checkpoint"
#EXPERIMENT = 'baseline_64_noise_250k'
CHECKPOINT = './output/Nov12_14-43-14_atnavab21/{}_with_model.pt'.format(ckpt)
#config_file = 'experiments/default/logs/{}/args.yaml'.format(EXPERIMENT)
results_file = 'test_results_{}.pickle'
def main():
if not os.path.isfile(CHECKPOINT):
print('ERROR: Checkpoint file "%s" not found' % CHECKPOINT)
return
# Read config file of the model
args = get_args()
print(args)
torch.manual_seed(1)
random.seed(1)
np.random.seed(1)
# reset some arguments
args.add_jitter_bbox = None
args.add_jitter_layout = None
args.add_jitter_feats = None
args.batch_size = BATCH_SIZE
args.test_h5 = SPLIT
device = torch.device("cuda:0") #torch.cuda.set_device(GPU)
# Load the model, with a bit of care in case there are no GPUs
map_location = 'cpu' if device == torch.device('cpu') else None
checkpoint = torch.load(CHECKPOINT, map_location=map_location)
if not PRECOMPUTED:
# initialize model and load checkpoint
kwargs = checkpoint['model_kwargs']
model = Model(**kwargs)
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.to(device)
# create data loaders
_, train_loader, val_loader, test_loader = build_loaders(args, evaluating=True)
# testing model
print('Batch size: ', BATCH_SIZE)
print('Evaluating on {} set'.format(SPLIT))
eval_model(args, model, test_loader, device, use_gt=USE_GT, use_feats=USE_FEATS, filter_box=IGNORE_SMALL)
# losses, samples, avg_iou = results
else:
# sample images and scores already computed while training (only one batch)
samples = checkpoint['val_samples'][-1] # get last iteration
original_img = samples['gt_img'].cpu().numpy()
predicted_img = samples['gt_box_pred_mask'].cpu().numpy()
return
def eval_model(args, model, loader, device, use_gt=False, use_feats=False, filter_box=False):
all_losses = defaultdict(list)
all_boxes = defaultdict(list)
total_iou = []
total_boxes = 0
num_batches = 0
num_samples = 0
mae_per_image = []
mae_roi_per_image = []
roi_only_iou = []
ssim_per_image = []
ssim_rois = []
rois = 0
margin = 2
## Initializing the perceptual loss model
lpips_model = models.PerceptualLoss(model='net-lin',net='alex',use_gpu=True)
perceptual_error_image = []
perceptual_error_roi = []
# ---------------------------------------
with torch.no_grad():
for batch in tqdm.tqdm(loader):
num_batches += 1
# if num_batches > 10:
# break
batch = [tensor.to(device) for tensor in batch]
masks = None
if len(batch) == 6:
imgs, objs, boxes, triples, obj_to_img, triple_to_img = batch
elif len(batch) == 7:
imgs, objs, boxes, masks, triples, obj_to_img, triple_to_img = batch
elif len(batch) == 12:
imgs, objs, boxes, triples, obj_to_img, triple_to_img, \
objs_r, boxes_r, triples_r, obj_to_img_r, triple_to_img_r, imgs_in = batch
elif len(batch) == 13:
imgs, objs, boxes, triples, obj_to_img, triple_to_img, attributes, \
objs_r, boxes_r, triples_r, obj_to_img_r, triple_to_img_r, imgs_in = batch
else:
assert False
predicates = triples[:, 1]
# #EVAL_ALL = True
if EVAL_ALL:
imgs, imgs_in, objs, boxes, triples, obj_to_img, \
dropbox_indices, dropfeats_indices = process_batch(
imgs, imgs_in, objs, boxes, triples, obj_to_img, triple_to_img, device,
use_feats=use_feats, filter_box=filter_box)
else:
dropbox_indices = None
dropfeats_indices = None
#
# if use_gt: # gt boxes
# model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=masks, src_image=imgs_in,
# drop_box_idx=None, drop_feat_idx=dropfeats_indices, mode='eval')
# else:
# model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, src_image=imgs_in,
# drop_box_idx=dropbox_indices, drop_feats_idx=dropfeats_indices, mode='eval')
masks_gt = None
gt_train = False
attributes = torch.zeros_like(attributes)
all_features = None
# Run the model with predicted masks
model_out = model(imgs, objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=masks_gt, attributes=attributes,
gt_train=gt_train, test_mode=False, use_gt_box=True, features=all_features
, drop_box_idx=dropbox_indices, drop_feat_idx= dropfeats_indices, src_image= imgs_in)
#imgs_pred, boxes_pred, masks_pred, _, layout, _ = model_out
# OUTPUT
imgs_pred, boxes_pred, masks_pred, predicate_scores, layout, _ = model_out
# --------------------------------------------------------------------------------------------------------------
#imgs_pred *= 3
#print(imgs_pred.min(), imgs_pred.max())
# Save all box predictions
all_boxes['boxes_gt'].append(boxes)
all_boxes['objs'].append(objs)
all_boxes['boxes_pred'].append(boxes_pred)
all_boxes['drop_targets'].append(dropbox_indices)
# IoU over all
total_iou.append(jaccard(boxes_pred, boxes).cpu().numpy()) #.detach()
total_boxes += boxes_pred.size(0)
# IoU over targets only
pred_dropbox = boxes_pred[dropbox_indices.squeeze() == 0, :]
gt_dropbox = boxes[dropbox_indices.squeeze() == 0, :]
roi_only_iou.append(jaccard(pred_dropbox, gt_dropbox).detach().cpu().numpy())
rois += pred_dropbox.size(0)
# assert(pred_dropbox.size(0) == imgs.size(0))
num_samples += imgs.shape[0]
imgs = imagenet_deprocess_batch(imgs).float()
imgs_pred = imagenet_deprocess_batch(imgs_pred).float()
# Uncomment to plot images (for debugging purposes)
#visualize_imgs_boxes(imgs, imgs_pred, boxes, boxes)
# MAE per image
mae_per_image.append(torch.mean(
torch.abs(imgs - imgs_pred).view(imgs.shape[0], -1), 1).cpu().numpy())
for s in range(imgs.shape[0]):
# get coordinates of target
left, right, top, bottom = bbox_coordinates_with_margin(boxes[s, :], margin, imgs)
# calculate errors only in RoI one by one - good, i wanted to check this too since the errors were suspicious pheww
mae_roi_per_image.append(torch.mean(
torch.abs(imgs[s, :, top:bottom, left:right] - imgs_pred[s, :, top:bottom, left:right])).cpu().item())
ssim_per_image.append(
pytorch_ssim.ssim(imgs[s:s+1, :, :, :] / 255.0,
imgs_pred[s:s+1, :, :, :] / 255.0, window_size=3).cpu().item())
ssim_rois.append(
pytorch_ssim.ssim(imgs[s:s+1, :, top:bottom, left:right] / 255.0,
imgs_pred[s:s+1, :, top:bottom, left:right] / 255.0, window_size=3).cpu().item())
imgs_pred_norm = imgs_pred[s:s+1, :, :, :] / 127.5 - 1 # = util.im2tensor(imgs_pred[s:s+1, :, :, :].detach().cpu().numpy())
imgs_gt_norm = imgs[s:s+1, :, :, :] / 127.5 - 1 # util.im2tensor(imgs[s:s+1, :, :, :].detach().cpu().numpy())
#perceptual_error_roi.append(lpips_model.forward(imgs_pred_norm[:,:, top:bottom, left:right],
# imgs_gt_norm[:,:, top:bottom, left:right]))
#print(imgs_pred_norm.shape)
perceptual_error_image.append(
lpips_model.forward(imgs_pred_norm, imgs_gt_norm).detach().cpu().numpy())
if num_batches % PRINT_EVERY == 0:
calculate_scores(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi)
if num_batches % SAVE_EVERY == 0:
save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi, all_boxes, num_batches)
# mean_losses = {k: np.mean(v) for k, v in all_losses.items()}
save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_error_image, perceptual_error_roi, all_boxes, 'final')
# masks_to_store = masks
# if masks_to_store is not None:
# masks_to_store = masks_to_store.data.cpu().clone()
# masks_pred_to_store = masks_pred
# if masks_pred_to_store is not None:
# masks_pred_to_store = masks_pred_to_store.data.cpu().clone()
# batch_data = {
# 'objs': objs.detach().cpu().clone(),
# 'boxes_gt': boxes.detach().cpu().clone(),
# 'masks_gt': masks_to_store,
# 'triples': triples.detach().cpu().clone(),
# 'obj_to_img': obj_to_img.detach().cpu().clone(),
# 'triple_to_img': triple_to_img.detach().cpu().clone(),
# 'boxes_pred': boxes_pred.detach().cpu().clone(),
# 'masks_pred': masks_pred_to_store
# }
# out = [mean_losses, samples, batch_data, avg_iou]
# out = [mean_losses, mean_L1, avg_iou]
return # mae_per_image, mae_roi_per_image, total_iou, roi_only_iou
def calculate_scores(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_image, perceptual_roi):
mae_all = np.mean(np.hstack(mae_per_image), dtype=np.float64)
mae_std = np.std(np.hstack(mae_per_image), dtype=np.float64)
mae_roi = np.mean(mae_roi_per_image, dtype=np.float64)
mae_roi_std = np.std(mae_roi_per_image, dtype=np.float64)
iou_all = np.mean(np.hstack(total_iou), dtype=np.float64)
iou_std = np.std(np.hstack(total_iou), dtype=np.float64)
iou_roi = np.mean(np.hstack(roi_only_iou), dtype=np.float64)
iou_roi_std = np.std(np.hstack(roi_only_iou), dtype=np.float64)
ssim_all = np.mean(ssim_per_image, dtype=np.float64)
ssim_std = np.std(ssim_per_image, dtype=np.float64)
ssim_roi = np.mean(ssim_rois, dtype=np.float64)
ssim_roi_std = np.std(ssim_rois, dtype=np.float64)
# percept error -----------
percept_all = np.mean(perceptual_image, dtype=np.float64)
#print(perceptual_image, percept_all)
percept_all_std = np.std(perceptual_image, dtype=np.float64)
percept_roi = np.mean(perceptual_roi, dtype=np.float64)
percept_roi_std = np.std(perceptual_roi, dtype=np.float64)
# ------------------------
print()
print('MAE: Mean {:.6f}, Std {:.6f}'.format(mae_all, mae_std))
print('MAE-RoI: Mean {:.6f}, Std {:.6f}: '.format(mae_roi, mae_roi_std))
print('IoU: Mean {:.6f}, Std {:.6f}'.format(iou_all, iou_std))
print('IoU-RoI: Mean {:.6f}, Std {:.6f}'.format(iou_roi, iou_roi_std))
print('SSIM: Mean {:.6f}, Std {:.6f}'.format(ssim_all, ssim_std))
print('SSIM-RoI: Mean {:.6f}, Std {:.6f}'.format(ssim_roi, ssim_roi_std))
print('LPIPS: Mean {:.6f}, Std {:.6f}'.format(percept_all, percept_all_std))
print('LPIPS-RoI: Mean {:.6f}, Std {:.6f}'.format(percept_roi, percept_roi_std))
return
def save_results(mae_per_image, mae_roi_per_image, total_iou, roi_only_iou, ssim_per_image, ssim_rois,
perceptual_per_image, perceptual_rois, all_boxes, iter):
results = dict()
results['mae_per_image'] = mae_per_image
results['mae_rois'] = mae_roi_per_image
results['iou_per_image'] = total_iou
results['iou_rois'] = roi_only_iou
results['ssim_per_image'] = ssim_per_image
results['ssim_rois'] = ssim_rois
results['perceptual_per_image'] = perceptual_per_image
results['perceptual_rois'] = perceptual_rois
results['data'] = all_boxes
with open(results_file.format(iter), 'wb') as p:
pickle.dump(results, p)
def process_batch(imgs, imgs_in, objs, boxes, triples, obj_to_img, triples_to_img, device,
use_feats=True, filter_box=False):
num_imgs = imgs.shape[0]
imgs_stack = []
imgs_in_stack = []
boxes_stack = []
objs_stack = []
triples_stack = []
obj_to_img_new = []
candidates_stack = []
previous_idx = 0
for i in range(num_imgs):
start_idx_for_img = (obj_to_img == i).nonzero()[0]
last_idx_for_img = (obj_to_img == i).nonzero()[-1]
boxes_i = boxes[start_idx_for_img: last_idx_for_img + 1, :] # this includes the 'image' box!
objs_i = objs[start_idx_for_img: last_idx_for_img + 1]
start_idx_for_img = (triples_to_img == i).nonzero()[0]
last_idx_for_img = (triples_to_img == i).nonzero()[-1]
triples_i = triples[start_idx_for_img:last_idx_for_img + 1]
num_boxes = boxes_i.shape[0] # number of boxes in current image minus the 'image' box
if filter_box:
min_dim = 0.05 # about 3 pixels
keep = [b for b in range(boxes_i.shape[0] - 1) if
boxes_i[b, 2] - boxes_i[b, 0] > min_dim and boxes_i[b, 3] - boxes_i[b, 1] > min_dim]
print('Ignoring {} out of {} boxes'.format(boxes_i.shape[0] - len(keep), boxes_i.shape[0]))
times_to_rep = len(keep)
img_indices = torch.LongTensor(keep)
else:
times_to_rep = num_boxes - 1
img_indices = torch.arange(0, times_to_rep)
# boxes that will be dropped for each sample (always shift the index by one to get the next box)
drop_indices = img_indices * (num_boxes + 1)
# replicate things for current image
imgs_stack.append(imgs[i, :, :, :].repeat(times_to_rep, 1, 1, 1))
imgs_in_stack.append(imgs_in[i, :, :, :].repeat(times_to_rep, 1, 1, 1))
objs_stack.append(objs_i.repeat(times_to_rep)) # replicate object ids #boxes times
boxes_stack.append(boxes_i.repeat(times_to_rep, 1)) # replicate boxes #boxes times
obj_to_img_new.append(img_indices.unsqueeze(1).repeat(1, num_boxes).view(-1) + previous_idx)
previous_idx = obj_to_img_new[-1].max() + 1
triplet_offsets = (num_boxes * img_indices.unsqueeze(1).repeat(1, triples_i.size(0)).view(-1)).cuda()
triples_i = triples_i.repeat(times_to_rep, 1)
triples_i[:, 0] = triples_i[:, 0] + triplet_offsets # offset for replicated subjects
triples_i[:, 2] = triples_i[:, 2] + triplet_offsets # offset for replicated objects
triples_stack.append(triples_i)
# create index to drop for each sample
candidates = torch.ones(boxes_stack[-1].shape[0], device=device)
candidates[drop_indices] = 0 # set to zero the boxes that should be dropped
candidates_stack.append(candidates)
imgs = torch.cat(imgs_stack)
imgs_in = torch.cat(imgs_in_stack)
boxes = torch.cat(boxes_stack)
objs = torch.cat(objs_stack)
triples = torch.cat(triples_stack)
obj_to_img_new = torch.cat(obj_to_img_new)
candidates = torch.cat(candidates_stack).unsqueeze(1)
if use_feats:
feature_candidates = torch.ones((candidates.shape[0], 1), device=device)
else:
feature_candidates = candidates
return imgs, imgs_in, objs, boxes, triples, obj_to_img_new, candidates, feature_candidates
def bbox_coordinates_with_margin(bbox, margin, img):
# extract bounding box with a margin
left = max(0, bbox[0] * img.shape[3] - margin)
top = max(0, bbox[1] * img.shape[2] - margin)
right = min(img.shape[3], bbox[2] * img.shape[3] + margin)
bottom = min(img.shape[2], bbox[3] * img.shape[2] + margin)
return int(left), int(right), int(top), int(bottom)
def visualize_imgs_boxes(imgs, imgs_pred, boxes, boxes_pred):
nrows = imgs.size(0)
imgs = imgs.detach().cpu().numpy()
imgs_pred = imgs_pred.detach().cpu().numpy()
boxes = boxes.detach().cpu().numpy()
boxes_pred = boxes_pred.detach().cpu().numpy()
plt.figure()
for i in range(0, nrows):
# i = j//2
ax1 = plt.subplot(2, nrows, i+1)
img = np.transpose(imgs[i, :, :, :], (1, 2, 0)) / 255.
plt.imshow(img)
left, right, top, bottom = bbox_coordinates_with_margin(boxes[i, :], 0, imgs[i:i+1, :, :, :])
bbox_gt = patches.Rectangle((left, top),
width=right-left,
height=bottom-top,
linewidth=1, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax1.add_patch(bbox_gt)
plt.axis('off')
ax2 = plt.subplot(2, nrows, i+nrows+1)
pred = np.transpose(imgs_pred[i, :, :, :], (1, 2, 0)) / 255.
plt.imshow(pred)
left, right, top, bottom = bbox_coordinates_with_margin(boxes_pred[i, :], 0, imgs[i:i+1, :, :, :])
bbox_pr = patches.Rectangle((left, top),
width=right-left,
height=bottom-top,
linewidth=1, edgecolor='r', facecolor='none')
# ax2.add_patch(bbox_gt)
ax2.add_patch(bbox_pr)
plt.axis('off')
plt.show()
return
if __name__ == '__main__':
main()
|
azadef/interactive_scene_generation
|
evaluate_vg.py
|
evaluate_vg.py
|
py
| 18,847
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41118007213
|
import logging
import time
import traceback
from pathlib import Path
from requests_tracker.request import WebRequestType
from requests_tracker.session import WebSessionFactory
from requests_tracker.storage import convert_HAR_to_markdown, write_HAR_to_local_file, CookiesFileStorage
from requests_tracker.util import LogHelper
if __name__ == '__main__':
logger = LogHelper.configure(logging.DEBUG)
session_cache_path = Path(__file__).parent.parent.joinpath('session_cache')
cookies_storage = CookiesFileStorage(session_cache_path)
# creates session and pre-loads cookies from persisted local cookie storage
web_session = WebSessionFactory.create(
cookies_storage,
default_referer='https://www.jet2holidays.com',
sensitive_values=[],
sensitive_params=[],
retry_count=1,
timeout=10.00
)
try:
response1 = web_session.get('https://www.jet2holidays.com')
# print(response1.text)
time.sleep(1)
response2 = web_session.post(
url='https://www.jet2holidays.com/api/jet2/sitesearch/HotelAndRegionList',
request_type=WebRequestType.XHR,
data={
'term': 'radi',
'maxResults': 30
}
)
print(response2.text)
except Exception as ex:
logger.error(traceback.print_exc())
finally:
# persists cookies to local file
cookies_storage.save(web_session.cookies)
# writes to 'session-cache/session-DD-MM-YYYY HH-MM-SS.har' file
write_HAR_to_local_file(session_cache_path, web_session.request_session_context)
# converts HAR file to markdown file + response files in folder 'session-cache/session-DD-MM-YYYY HH-MM-SS/'
convert_HAR_to_markdown(session_cache_path, web_session.request_session_context)
|
eladeon/requests-tracker-python
|
examples/scraper.py
|
scraper.py
|
py
| 1,851
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29379124351
|
def convert(h):
return {'A': 1, 'B': 2, 'C': 3}[h]
score = 0
for round in open('strategy.txt', 'r').read().split('\n'):
foe, result = round.split(' ')
foe = convert(foe)
me = 0
if (result == 'Y'):
score += 3
me = foe
elif (result == 'Z'):
score += 6
if (foe == 1):
me = 2
elif (foe == 2):
me = 3
elif (foe == 3):
me = 1
else:
if (foe == 1):
me = 3
elif (foe == 2):
me = 1
elif (foe == 3):
me = 2
score += me
print(score)
|
patrikjanson/AoC2022
|
Day2_RockPaperScissors/part2.py
|
part2.py
|
py
| 606
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7965040405
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.collections import PolyCollection
#
# fs = 11240.
# t = 10
# time = np.arange(fs*t) / fs
# frequency = 1000.
# mysignal = np.sin(2.0 * np.pi * frequency * time)
#
# nperseg = 2**14
# noverlap = 2**13
# f, t, Sxx = signal.spectrogram(mysignal, fs, nperseg=nperseg,noverlap=noverlap)
#
# myfilter = (f>800) & (f<1200)
#
# f = f[myfilter]
# Sxx = Sxx[myfilter, ...]
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# ax.plot_surface(f[:, None], t[None, :], 10.0*np.log10(Sxx), cmap=cm.coolwarm)
# plt.show()
import dtw
def colloc(titlename,numberfilter):
va = pd.read_csv('Plot/' + titlename + '.csv')
Type = va.Type
IsSave = va.loc[(va['Type'] == numberfilter)]
return IsSave
if __name__ == '__main__':
nf = 11
savename = 'PutPhone'
namefile = 'Calling'
Calling = colloc(namefile,nf)
namefile = 'Pocket'
Pocket = colloc(namefile, nf)
namefile = 'Swinging'
Swinging = colloc(namefile, nf)
namefile = 'Texting'
Texting = colloc(namefile, nf)
aa = pd.concat([Calling,Pocket,Swinging,Texting], axis = 0)
aa.to_csv('Plot/test1.csv', index=False)
va = pd.read_csv('Plot/test1.csv')
Pose = va.Pose
posename = []
for i in range(len(Pose)):
if Pose[i] == 1:
posename.append('Calling')
elif Pose[i] == 2:
posename.append('Pocket')
elif Pose[i] == 3:
posename.append('Swinging')
elif Pose[i] == 4:
posename.append('Texting')
va['PoseName'] = posename
idarr = []
sums = 0
for i in range(len(Pose)):
idarr.append(sums)
sums+=1
va['Timestamp'] = idarr
va.to_csv('Plot/' + savename + '.csv', index=False)
print('Done')
|
phongcpt72/Unitmotion-Detection
|
Unitmotion/testplot.py
|
testplot.py
|
py
| 2,027
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7505782016
|
import printing
file_name = "game_stat.txt"
gametitle = "Diablo II"
# Export functions
def exportdata(datas):
with open("export.txt", "w") as data:
for i in range(len(datas)):
data.write(str(datas[i]) + "\n")
if __name__ == "__main__":
gamedata = printing.print_datas(file_name, gametitle)
exportdata(gamedata)
|
CodecoolMSC2017/pbwp-3rd-si-game-statistics-roskogabor
|
part2/export.py
|
export.py
|
py
| 348
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8266559866
|
import copy
import logging
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, NamedTuple, Union
import yaml
import cekit
from cekit.cekit_types import _T, RawDescriptor
from cekit.config import Config
from cekit.descriptor import (
Arg,
Descriptor,
Env,
Label,
Modules,
Osbs,
Packages,
Port,
Run,
Volume,
)
from cekit.descriptor.resource import Resource, create_resource
from cekit.errors import CekitError
from cekit.tools import get_latest_image_version
if TYPE_CHECKING:
from cekit.descriptor.modules import Install
from cekit.descriptor.overrides import Overrides
from cekit.generator.base import ModuleRegistry
logger = logging.getLogger("cekit")
config = Config()
_image_schema = yaml.safe_load(
"""
map:
name: {type: str, required: True}
version: {type: text, required: True}
schema_version: {type: int}
release: {type: text}
from: {type: str}
follow_tag: {type: str}
description: {type: text}
args: {type: any}
labels: {type: any}
envs: {type: any}
ports: {type: any}
run: {type: any}
artifacts: {type: any}
modules: {type: any}
packages: {type: any}
osbs: {type: any}
volumes: {type: any}
help:
map:
add: {type: bool}
template: {type: text}"""
)
class ImageOverrides(NamedTuple):
artifacts: Dict[str, "Resource"]
modules: Dict[str, "Install"]
def get_image_schema():
return copy.deepcopy(_image_schema)
class Image(Descriptor):
def __init__(self, descriptor: RawDescriptor, artifact_dir: str):
self._artifact_dir: str = artifact_dir
self.path: str = artifact_dir
self.schema = _image_schema.copy()
super(Image, self).__init__(descriptor)
self.skip_merging = ["description", "version", "name", "release"]
self._prepare()
def _prepare(self):
# TODO: Separating raw image descriptor from a higher level Image class would change this
# confusing code into a connector/factory.
self._descriptor["labels"] = [
Label(x) for x in self._descriptor.get("labels", [])
]
self._descriptor["envs"] = [Env(x) for x in self._descriptor.get("envs", [])]
self._descriptor["ports"] = [Port(x) for x in self._descriptor.get("ports", [])]
if "run" in self._descriptor:
self._descriptor["run"] = Run(self._descriptor["run"])
self._descriptor["artifacts"] = [
create_resource(a, directory=self._artifact_dir)
for a in self._descriptor.get("artifacts", [])
]
self._descriptor["modules"] = Modules(
self._descriptor.get("modules", {}), self.path
)
self._descriptor["packages"] = Packages(
self._descriptor.get("packages", {}), self.path
)
self._descriptor["osbs"] = Osbs(self._descriptor.get("osbs", {}), self.path)
self._descriptor["volumes"] = [
Volume(x) for x in self._descriptor.get("volumes", [])
]
# make sure image declarations override any module definitions
# TODO: Make into a NamedTuple to make types easier to reason about.
self._image_overrides = ImageOverrides(
artifacts=Image._to_dict(self.artifacts),
modules=Image._to_dict(self.modules.install),
)
self._all_artifacts: Dict[str, Resource] = Image._to_dict(self.artifacts)
def process_defaults(self):
"""Prepares default values before rendering"""
if not self.run:
self.run = Run({})
# do we want to force a user?
if "user" not in self.run:
self.run._descriptor["user"] = cekit.DEFAULT_USER
# Default package manager is yum
if not self.packages.manager:
self.packages._descriptor["manager"] = "yum"
# Default directory for supplementary files that should be copied to dist-git directory
if not self.osbs.extra_dir:
self.osbs._descriptor["extra_dir"] = "osbs_extra"
# Placing this here rather than in init as apply_image_overrides runs after that. This means
# follow_tag is applied *after* overrides.
if self.follow:
if not config.get("common", "redhat"):
raise CekitError(
"follow_tag annotation only supported with redhat flag"
)
self.base = get_latest_image_version(self.follow)
@property
def name(self) -> str:
return self.get("name")
@name.setter
def name(self, value: str):
self._descriptor["name"] = value
@property
def version(self) -> Any:
# TODO: Convert to string up front to simplify
return self.get("version")
@version.setter
def version(self, value: Any):
self._descriptor["version"] = value
# TODO: release is undocumented.
@property
def release(self) -> str:
return self.get("release")
@release.setter
def release(self, value: str):
self._descriptor["release"] = value
@property
def base(self) -> str:
return self.get("from")
@base.setter
def base(self, value: str):
self._descriptor["from"] = value
@property
def follow(self) -> str:
return self.get("follow_tag")
@follow.setter
def follow(self, value: str):
self._descriptor["follow_tag"] = value
@property
def description(self) -> str:
return self.get("description")
@description.setter
def description(self, value: str) -> None:
self._descriptor["description"] = value
@property
def labels(self) -> List[Label]:
return self.get("labels", [])
@property
def envs(self) -> List[Env]:
return self.get("envs", [])
@property
def args(self) -> List[Arg]:
return self.get("args", [])
@property
def ports(self) -> List[Port]:
return self.get("ports", [])
@property
def run(self) -> Run:
return self.get("run")
@run.setter
def run(self, value: Run):
self._descriptor["run"] = value
@property
def all_artifacts(self) -> Iterable[Resource]:
return self._all_artifacts.values()
@property
def artifacts(self) -> List[Resource]:
return self.get("artifacts", [])
@property
def modules(self) -> Modules:
return self.get("modules", Modules({}, self._artifact_dir))
@property
def packages(self) -> Packages:
return self.get("packages", Packages({}, self.path))
@property
def osbs(self) -> Osbs:
return self.get("osbs")
@osbs.setter
def osbs(self, value: Osbs):
self._descriptor["osbs"] = value
@property
def volumes(self) -> List[Volume]:
return self.get("volumes", [])
@property
def help(self) -> dict:
return self.get("help", {})
def apply_image_overrides(self, overrides: List["Overrides"]):
"""
Applies overrides to the image descriptor.
"""
if not overrides:
return
for override in overrides:
if override.name:
self.name = override.name
if override.version:
self.version = override.version
if override.base:
self.base = override.base
if override.description:
self.description = override.description
labels = Image._to_dict(self.labels)
for label in override.labels:
name = label.name
if name in labels:
labels[name] = label.merge(labels[name])
else:
labels[name] = label
self._descriptor["labels"] = list(labels.values())
envs = Image._to_dict(self.envs)
for env in override.envs:
name = env.name
if name in envs:
envs[name] = env.merge(envs[name])
else:
envs[name] = env
self._descriptor["envs"] = list(envs.values())
ports = Image._to_dict(self.ports)
for port in override.ports:
name = port.value
if name in ports:
ports[name] = port.merge(ports[name])
else:
ports[name] = port
self._descriptor["ports"] = list(ports.values())
module_repositories = Image._to_dict(self.modules.repositories)
for repository in override.modules.repositories:
name = repository.name
if name in module_repositories:
module_repositories[name] = repository.merge(
module_repositories[name]
)
else:
module_repositories[name] = repository
self.modules._descriptor["repositories"] = list(
module_repositories.values()
)
self.packages._descriptor = override.packages.merge(self.packages)
# In case content sets are provided as null values
# Remove the key entirely.
# TODO: This should be handled probably at general level, for every key
for flag in ["content_sets", "content_sets_file"]:
if flag in override.packages and override.packages[flag] is None:
self.packages._descriptor.pop("content_sets", None)
self.packages._descriptor.pop("content_sets_file", None)
# Merge override osbs items into self.
self.osbs = self.osbs.merge(override.osbs)
# Using 'or []' to avoid having to set default value in packages.py for _descriptor["remove"]
for package in override.packages.remove or []:
if package not in self.packages.remove:
self.packages.remove.append(package)
for package in override.packages.install or []:
if package not in self.packages.install:
self.packages.install.append(package)
for package in override.packages.reinstall or []:
if package not in self.packages.reinstall:
self.packages.reinstall.append(package)
artifact_overrides = self._image_overrides.artifacts
image_artifacts = Image._to_dict(self.artifacts)
for i, artifact in enumerate(override.artifacts):
name = artifact.name
# override.artifact contains override values WITH defaults.
# override.original_descriptor contains override value WITHOUT defaults.
# artifact_overrides contains original dictionary
#
# Iterating over dest / target / ...
# If we have _not_ supplied a target (check original_descriptor),
# then check artifact_overrides,
# otherwise use default from override.artifact
override_without_defaults = override.original_descriptor.get(
"artifacts"
)[i]
for key in ["dest", "target", "description"]:
if override_without_defaults.get(key):
logger.debug(
"Key ({}) found in override as {}".format(
key, override_without_defaults.get(key)
)
)
artifact[key] = override_without_defaults.get(key)
elif artifact_overrides.get(name) and artifact_overrides.get(
name
).get(key):
new_value = artifact_overrides.get(name).get(key)
logger.debug(
"Key ({}) found in original artifact as {}".format(
key, new_value
)
)
artifact[key] = new_value
# collect override so we can apply it to modules
artifact_overrides[name] = artifact
# add it to the list of everything
self._all_artifacts[name] = artifact
# Apply override to image descriptor
image_artifacts[name] = artifact
# Sort the output as it makes it easier to view and test.
logger.debug(
"Final (with override) artifact is {}".format(
sorted(artifact.items())
)
)
self._descriptor["artifacts"] = list(image_artifacts.values())
module_overrides = self._image_overrides.modules
image_modules = Image._to_dict(self.modules.install)
for module in override.modules.install:
name = module.name
# collect override so we can apply it to modules.
# this allows us to override module versions without affecting ordering.
module_overrides[name] = module
# Apply override to image descriptor
# If the module does not exists in the original descriptor, add it there
image_modules[name] = module
self.modules._descriptor["install"] = list(image_modules.values())
if override.run is not None:
if self.run:
self.run = override.run.merge(self.run)
else:
self.run = override.run
def apply_module_overrides(self, module_registry: "ModuleRegistry"):
"""
Applies overrides to included modules. This includes:
Artifact definitions
Module dependency version overrides
Also incorporates module contributed global configuration into the image:
Run specification
Package repository definitions
Required artifacts
"""
install_list: Dict[str, "Install"] = OrderedDict()
# index by name for easier access
self._package_repositories = Image._to_dict(self.packages.repositories)
# collect final 'run' value from modules
self._module_run = Run({})
# process the modules and integrate relevant bits into ourself
self.process_install_list(
self, self.modules.install, install_list, module_registry
)
# update ourself based on module declarations
# final order of modules to be installed
self.modules._descriptor["install"] = list(install_list.values())
# all package repositories required for installing packages
self.packages._descriptor["repositories"] = list(
self._package_repositories.values()
)
# final 'run' value
if self.run:
self.run = self.run.merge(self._module_run)
else:
self.run = self._module_run
def process_install_list(
self,
source: Union["Image"],
to_install_list: List["Install"],
install_list: Dict[str, "Install"],
module_registry: "ModuleRegistry",
) -> None:
# TODO: Return value is passed as parameter in `install_list`
module_overrides = self._image_overrides.modules
artifact_overrides = self._image_overrides.artifacts
for to_install in to_install_list:
logger.debug(
"Preparing module '{}' required by '{}'.".format(
to_install.name, source.name
)
)
override = module_overrides.get(to_install.name, None)
if override:
if override.version != to_install.version:
logger.debug(
"Module '{}:{}' being overridden with '{}:{}'.".format(
to_install.name,
to_install.version,
override.name,
override.version,
)
)
# apply module override
to_install = override
existing = install_list.get(to_install.name, None)
# see if we've already processed this
if existing:
# check for a version conflict
if existing.version != to_install.version:
logger.warning(
"Module version inconsistency for {}: {} requested, but {} will be used.".format(
to_install.name, to_install.version, existing.version
)
)
continue
module = module_registry.get_module(to_install.name, to_install.version)
if not module:
raise CekitError(
"Could not locate module %s version %s. Please verify that it is included in one of the "
"specified module repositories."
% (to_install.name, to_install.version)
)
# collect artifacts and apply overrides
module_artifacts = Image._to_dict(module.artifacts)
for artifact in module.artifacts:
name = artifact.name
if name in artifact_overrides:
override = artifact_overrides[name]
self._all_artifacts[name] = override
module_artifacts[name] = override
else:
self._all_artifacts[name] = artifact
module._descriptor["artifacts"] = list(module_artifacts.values())
# collect package repositories
for repo in module.packages.repositories:
name = repo.name
if name not in self._package_repositories:
self._package_repositories[name] = repo
# collect package manager
if not self.packages.manager and module.packages.manager:
logger.debug(
f"Applying module package manager of {module.packages.manager} to image"
)
self.packages._descriptor["manager"] = module.packages.manager
# incorporate run specification contributed by module
if module.run:
# we're looping in order of install, so we want the current module to override whatever we have
self._module_run = module.run.merge(self._module_run)
# prevent circular dependencies. we'll move it to the end after processing
install_list[to_install.name] = to_install
# process this modules dependencies
self.process_install_list(
module, module.modules.install, install_list, module_registry
)
# move this module to the end of the list.
install_list.pop(to_install.name)
install_list[to_install.name] = to_install
# helper to simplify merging lists of objects
@classmethod
def _to_dict(cls, named_items: Iterable[_T], key="name") -> Dict[str, _T]:
# TODO: `key` argument is never used?
# TODO: This assumes that `name` is always a string, but in fact it isn't for Port
dictionary = OrderedDict()
for item in named_items:
dictionary[item[key]] = item
return dictionary
|
cekit/cekit
|
cekit/descriptor/image.py
|
image.py
|
py
| 19,524
|
python
|
en
|
code
| 70
|
github-code
|
6
|
29876271313
|
# pylint: disable=invalid-name, unused-argument, unspecified-encoding, missing-function-docstring
"""
Implements all winreg functions
https://docs.python.org/3/library/winreg.html#functions
"""
import os
from typing import Union
from re import findall
from tempfile import TemporaryDirectory
from warnings import warn
from typing import Optional, List
from unixreg.key import RegKey
from unixreg.constants import STANDARD_RIGHTS_REQUIRED, KEY_WOW64_64KEY, KEY_WRITE, KEY_READ
KEY_TYPE = Union[str, RegKey]
SUBKEY_TYPE = Union[str, RegKey, None]
_KEY_CACHE: List[RegKey] = []
_ENV_REPLACE = {
"USERPROFILE": "HOME"
}
_CONFIG_DIR = os.getenv("XDG_CONFIG_HOME")
if not _CONFIG_DIR:
home = os.getenv("HOME")
if home:
_CONFIG_DIR = os.path.join(home, ".config")
else:
_CONFIG_DIR = TemporaryDirectory().name
if not os.getenv("TOX"):
warn(f"Could not find directory to put registry in. Falling back to {_CONFIG_DIR}")
_CONFIG_DIR = os.path.join(_CONFIG_DIR, "unixreg")
def __init_values(key: KEY_TYPE, sub_key: SUBKEY_TYPE = None, access = STANDARD_RIGHTS_REQUIRED):
if isinstance(key, str):
key = RegKey(key)
if sub_key is not None:
print(sub_key)
key = key + sub_key
key.access = access
return key
def __create_key(key: RegKey):
if _CONFIG_DIR and key and key.key:
path = os.path.join(_CONFIG_DIR, key.key)
os.makedirs(path, exist_ok=True)
def CloseKey(key: RegKey):
"""
Closes a previously opened registry key.
The key argument specifies a previously opened key.
"""
key.Close()
try:
_KEY_CACHE.remove(key)
except ValueError:
pass
def ConnectRegistry(computer: Union[str, None], key: RegKey):
"""
Opens a registry handle on another computer and returns the handle
If computer_name is None, the local computer is used, otherwise
OSError is raised to signify the function failing
"""
if not computer:
return OpenKey(key, None)
raise OSError("Not Implemented")
def OpenKeyEx(key: RegKey, sub_key: SUBKEY_TYPE, reserved=0, access=KEY_READ):
return CreateKeyEx(key, sub_key, reserved, access)
OpenKey = OpenKeyEx
def CreateKey(key: RegKey, sub_key: SUBKEY_TYPE):
return CreateKeyEx(key, sub_key)
def CreateKeyEx(key: RegKey, sub_key: SUBKEY_TYPE, reserved=0, access=KEY_WRITE):
key = __init_values(key, sub_key, access)
__create_key(key)
_KEY_CACHE.append(key)
return key
def DeleteKey(key: KEY_TYPE, sub_key: SUBKEY_TYPE):
return DeleteKeyEx(key, sub_key)
def DeleteKeyEx(key: KEY_TYPE, sub_key: SUBKEY_TYPE, access=KEY_WOW64_64KEY, reserved=0):
kkey = __init_values(key, sub_key, access)
if _CONFIG_DIR:
path = os.path.join(_CONFIG_DIR, kkey.key)
if os.path.isfile(path):
os.remove(path)
def DeleteValue(key: KEY_TYPE, value: str):
kkey = __init_values(key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key, value)
try:
os.remove(filepath)
except FileNotFoundError:
pass
def EnumKey(key: KEY_TYPE, index: int):
raise NotImplementedError("Not Implemented")
def EnumValue(key: KEY_TYPE, index: int):
raise NotImplementedError("Not Implemented")
def ExpandEnvironmentStrings(env: str):
for key, val in _ENV_REPLACE.items():
env = env.replace(f"%{key}%", f"%{val}%")
match = findall(r"%(.+?)%", env)
for val in match:
valenv = os.getenv(val)
if valenv:
env = env.replace(f"%{val}%", valenv)
env.replace("\\", os.path.sep)
return env
def FlushKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryInfoKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryValueEx(key: KEY_TYPE, sub_key: SUBKEY_TYPE) -> str:
kkey = __init_values(key, sub_key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key)
with open(filepath, "r") as file:
return file.read()
return ""
QueryValue = QueryValueEx
def LoadKey(key: RegKey, sub_key: SUBKEY_TYPE, file_name: str):
# this requires a win32 permission compatibility layer
raise OSError("Not Implemented")
def SaveKey(key: RegKey, file_name: str) -> None:
# this requires a win32 permission compatibility layer
raise OSError("Not Implemented")
def SetValue(key: KEY_TYPE, sub_key: SUBKEY_TYPE, typearg: int, value: str) -> None:
if isinstance(sub_key, RegKey):
sub_key = sub_key.key
if sub_key:
return SetValueEx(key, sub_key, 0, typearg, value)
def SetValueEx(key: KEY_TYPE, value_name: str, reserved: int, typearg: int, value: str) -> None:
kkey = __init_values(key)
if _CONFIG_DIR:
filepath = os.path.join(_CONFIG_DIR, kkey.key, value_name)
with open(filepath, "w") as file:
file.write(value)
def DisableReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def EnableReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
def QueryReflectionKey(key: KEY_TYPE):
raise NotImplementedError("Not Implemented")
# Non winreg functions
def LoadRegFile(file_name: str) -> Optional[str]:
def _strip_quotes(val) -> str:
_QUOTE_LIST = ("\"", '\'')
if val.startswith(_QUOTE_LIST) and val.endswith(_QUOTE_LIST):
val = val[1:-1]
return val
def _strip_brackets(val) -> str:
_BRACKET_LIST = ("[", "]")
if val.startswith(_BRACKET_LIST) and val.endswith(_BRACKET_LIST):
val = val[1:-1]
return val
with open(file_name, "r") as reg:
nextline = reg.readline()
key: Optional[str] = None
while nextline:
line = nextline.strip()
nextline = reg.readline()
if len(line) == 1:
continue
split = line.split("=")
keyline = _strip_brackets(line)
if keyline:
key = keyline
elif key and len(split) == 2:
name, value = split
name = _strip_quotes(name)
value = _strip_quotes(value)
os.makedirs(key, exist_ok=True)
if _CONFIG_DIR:
with open(os.path.join(_CONFIG_DIR, key, name), "w") as regvalue:
regvalue.write(value)
print(f"[{key}] {name}={value}")
return None
|
Jan200101/unixreg
|
unixreg/functions.py
|
functions.py
|
py
| 6,498
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70338202429
|
#!/usr/bin/env python3
"""
The main program that will be run on the Raspberry Pi,
which is the controller for the pharmacy client.
DINs of drugs on this pharmacy should be specified in din.cfg
"""
# these libraries come with python
import logging
import datetime
import struct
import asyncio
import json
import base64
# please run setup.sh first to install these libraries
import numpy as np
import cv2
import face_recognition
import aiohttp
# constant: endpoint for the web API
api_endpoint = 'https://example.com/arka'
# constant: current api version
api_version = '0'
# local drug database
din_to_motor = {}
async def dispense(din):
"""
Try to dispense a drug.
Returns true if it succeeded.
"""
motor = din_to_motor[din]
return False
async def report_dispensed(auth_token, drugs_dispensed):
"""
Reports back to the server that drugs were dispensed... later
"""
# get the timestamp NOW
ts = datetime.datetime.utcnow().timestamp()
# wait until dispensing should be done
await asyncio.sleep(30)
# if nothing was dispensed, easy
if not drugs_dispensed:
logging.log(logging.INFO, 'No drug dispensing to report')
return True
logging.log(logging.DEBUG, 'Now trying to report drug dispensed')
# start a HTTP session
async with aiohttp.ClientSession() as session:
logging.log(logging.DEBUG, 'HTTP session started from report_dispensed')
# build the json object to send
data_send = {
'version': api_version,
'id': auth_token,
'din': drugs_dispensed,
'timestamp': ts
}
# response is assumed none until we get something
data_response = None
# it's not done until we've confirmed it's done
while data_response is None:
# connect to the api!
async with session.get(
api_endpoint + '/user/pharmacy_done',
json = data_send
) as response:
# get data as json
data_response = response.json()
if data_response['version'] != api_version:
raise AssertionError('Incorrect API version encountered in report_dispensed')
elif not data_response['success']:
logging.log(logging.INFO, 'API endpoint said drug dispense report failed for whatever reason')
data_response = None
await asyncio.sleep(30)
logging.log(logging.INFO, 'Drug delivery report completed and confirmed')
def pack_fingerprint(fingerprint):
"""
Takes the vector which is a face fingerprint and
creates a bytes object to represent it.
Some information will be lost.
"""
# test our assumptions
if np.any(fingerprint > 1):raise ValueError('Fingerprint contains value greater than 1')
if np.any(fingerprint < -1):raise ValueError('Fingerprint contains value less than -1')
# convert from 64-bit float in range [-1, 1] to 16-bit int in full range
# 1 - 2^-53 is the largest double value below 1
# by scaling by this much, we prevent the edge case of boundary number 1 which can overflow to -2^15 after scaling
scale = 1 - 2 ** -53
if scale >= 1:raise AssertionError('Fingerprint packing uses incorrect scaling factor')
# scale to get the 16-bit int range
scale *= 2 ** 15
# convert to the 16-bit int vector
values = np.array(np.floor(fingerprint * scale), dtype=np.int16)
# pack in bytes
# 128 values, 16-bit integer, little endian -> 256 bytes
result = struct.pack('<128h', *values)
return result
async def main_step(capture):
"""
Contains the code for the main loop.
A return here will act as a continue in the loop.
"""
# wait for either user to press the button or a certain number of seconds to pass
await asyncio.sleep(1)
logging.log(logging.DEBUG, 'Now trying to capture an image')
# capture an image
succeeded, pixels = capture.read()
logging.log(logging.DEBUG, 'Image capture completed, and it ' + ('succeeded' if succeeded else 'failed'))
# this line explains itself well
if not succeeded:return
# OpenCV uses BGR as its output format but we want RGB
pixels = cv2.cvtColor(pixels, cv2.COLOR_BGR2RGB)
logging.log(logging.DEBUG, 'Image colour channels changed to RGB')
# find face locations in the image
face_boxes = face_recognition.face_locations(pixels, model='hog')
num_faces = len(face_boxes)
logging.log(logging.DEBUG, 'Found ' + str(num_faces) + 'faces in the image')
# no faces means nothing to do
if num_faces == 0:return
# TODO filter faces so only 1 is left, or else give up
# generate the 128-vector as face fingerprint
fingerprints = face_recognition.face_encodings(pixels, face_boxes)
fingerprint = fingerprints[0]
logging.log(logging.DEBUG, 'Face fingerprint was generated')
# pack the fingerprint as bytes
packed_fingerprint = pack_fingerprint(fingerprint)
logging.log(logging.INFO, 'Packed face fingerprint as ' + packed_fingerprint.hex())
# start a HTTP session
async with aiohttp.ClientSession() as session:
logging.log(logging.DEBUG, 'HTTP session started from main_step')
# build the json object to send
data_send = {
'version': api_version,
'fingerprint': base64.b64encode(packed_fingerprint)
}
# response is assumed none until we get something
data_response = None
# connect to the api!
async with session.get(
api_endpoint + '/user/pharmacy_get',
json = data_send
) as response:
logging.log(logging.DEBUG, 'Sent face fingerprint to authenticate')
# get the response as json
data_response = await response.json()
logging.log(logging.DEBUG, 'Decoded response data as JSON')
# continue if it succeeded
if data_response is not None and data.get('success', None) and data['version'] == api_version:
logging.log(logging.DEBUG, 'Authenticated and prescription data acquired')
# the authentication token for this session
auth_token = data['id']
# make a list of drugs that were dispensed
drugs_dispensed = []
await asyncio.create_task(report_dispensed(auth_token, drugs_dispensed))
# loop over all valid prescriptions
for pres in data['prescriptions']:
# get the DIN of the drug
din = pres['din']
# is this drug in this pharmacy?
if din in din_to_motor:
logging.log(logging.INFO, 'Attempting to dispense drug with DIN ' + din)
# try to dispense it
drug_was_dispensed = await dispense(din)
if drug_was_dispensed:
logging.log(logging.INFO, 'Drug dispense reported success')
drugs_dispensed.append(din)
else:
logging.log(logging.INFO, 'Drug dispense reported failure')
async def main_async():
"""
Actual main function to be used in production.
"""
# log timing information
logging.log(logging.INFO, 'Starting main function | Current UTC time is ' + str(datetime.datetime.utcnow()))
# set up the video capture object
capture = cv2.VideoCapture(0)
# the main loop
while True:
# log some timing information
logging.log(logging.DEBUG, 'Starting the main loop | Current UTC time is ' + str(datetime.datetime.utcnow()))
# try block to prevent errors from breaking the program
try:
# special function represents the code of the main loop
await main_step(capture)
except KeyboardInterrupt:
# the user intends to stop the program, so we respect this
logging.log(logging.INFO, 'Exiting main loop because a keyboard interrupt (SIGINT) was received')
raise KeyboardInterrupt
except Exception as exc:
# any other error must not break the program
logging.log(logging.ERROR, exc)
# get rid of the video capture object
capture.release()
# say bye bye
logging.log(logging.WARNING, 'Exiting main function, program is ending | Current UTC time is ' + str(datetime.datetime.utcnow()))
def main():
"""
Entry point to the program.
Will first read in the local database from the config file.
Redirects to main_async.
"""
global din_to_motor
with open('din.cfg','r') as file:
for line in file:
din, motor = line.strip().split()
din_to_motor[din] = motor
asyncio.run(main_async())
def main_test():
"""
Previous main function left over from testing. Will be removed when it is no longer useful.
"""
print('start of program')
cap = cv2.VideoCapture(0)
print('camera initialized')
for _ in range(1):
print('start of main loop')
# try to capture an image
# image is a 3d array: (Y, X, bgr)
ret, frame = cap.read()
print('image captured')
# reorder to RGB
# not necessary to do it this way but it works
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
print('image converted to rgb')
# we must first detect and locate faces within the image
# this is separate from the face fingerprinting
face_boxes = face_recognition.face_locations(frame,
model='hog')
print('faces detected in image')
# face_recognition library includes a premade AI
# this will spit out a 1d array with 128 floating point entries
# they seem to be within [-1, 1] and average at 0
# this fingerprint is a summary of the features of the faces
# we will later transform this vector and then send that to the server for processing
fingerprints = face_recognition.face_encodings(frame, face_boxes)
print('face fingerprints generated')
print(f'created {len(fingerprints)} fingerprints')
for index, fingerprint in enumerate(fingerprints):
print('-'*40)
print(f'data of fingerprint #{index}')
print(f'is a vector with shape {fingerprint.shape} and type {fingerprint.dtype}')
print(f'min is {np.min(fingerprint)}')
print(f'max is {np.max(fingerprint)}')
print(f'mean is {np.mean(fingerprint)}')
print('raw data')
print(fingerprint)
print('main loop exited')
print('cleaning up')
cap.release()
cv2.destroyAllWindows()
print('bye bye!')
# standard way to invoke main but only if this script is run as the program and not a library
if __name__ == '__main__':
main_test()
|
alimzhan2000/arka_project_on_python
|
drug_delivering_code.py
|
drug_delivering_code.py
|
py
| 11,430
|
python
|
en
|
code
| 1
|
github-code
|
6
|
1883485650
|
import sys
import os
import pefile
# Imprime as seções de um executável
def print_sections(directory, executable):
pe = pefile.PE(executable if directory is None else directory + "/" + executable)
sections = []
for section in pe.sections:
sections.append(section.Name.decode('utf-8'))
print(executable + ": [", end="")
for i, section in enumerate(sections):
print("'{}'".format(section), end="")
if i < len(sections) - 1:
print(', ', end="")
print("]")
# Checa se o argumento de entrada é um arquivo ou uma pasta
if os.path.isdir(sys.argv[1]):
for filename in os.listdir(sys.argv[1]):
if filename.endswith(".exe"):
print_sections(sys.argv[1], filename)
else:
print_sections(None, sys.argv[1])
|
kkatzer/CDadosSeg
|
T2/Parte2/T2P2a.py
|
T2P2a.py
|
py
| 791
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19052604441
|
import json
import snappy
from structlog import get_logger
from jwcrypto.common import base64url_decode
from app.data_model.app_models import QuestionnaireState
from app.storage import data_access
from app.storage.storage_encryption import StorageEncryption
logger = get_logger()
class EncryptedQuestionnaireStorage:
def __init__(self, user_id, user_ik, pepper, stateless_updates_enabled=False):
if user_id is None:
raise ValueError('User id must be set')
self._user_id = user_id
self.encrypter = StorageEncryption(user_id, user_ik, pepper)
self.stateless_updates_enabled = stateless_updates_enabled
def add_or_update(self, data, version):
compressed_data = snappy.compress(data)
encrypted_data = self.encrypter.encrypt_data(compressed_data)
if self.stateless_updates_enabled:
logger.debug('saving questionnaire data', user_id=self._user_id)
questionnaire_state = QuestionnaireState(self._user_id, encrypted_data, version)
else:
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
logger.debug('updating questionnaire data', user_id=self._user_id)
questionnaire_state.state_data = encrypted_data
questionnaire_state.version = version
else:
logger.debug('creating questionnaire data', user_id=self._user_id)
questionnaire_state = QuestionnaireState(self._user_id, encrypted_data, version)
data_access.put(questionnaire_state)
def get_user_data(self):
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
version = questionnaire_state.version or 0
try:
# legacy data was stored in a dict, base64-encoded, and not compressed
data = json.loads(questionnaire_state.state_data)['data']
is_legacy_data = True
except ValueError:
data = questionnaire_state.state_data
is_legacy_data = False
decrypted_data = self.encrypter.decrypt_data(data)
if is_legacy_data:
decrypted_data = base64url_decode(decrypted_data.decode()).decode()
else:
decrypted_data = snappy.uncompress(decrypted_data).decode()
return decrypted_data, version
return None, None
def delete(self):
logger.debug('deleting users data', user_id=self._user_id)
questionnaire_state = self._find_questionnaire_state()
if questionnaire_state:
data_access.delete(questionnaire_state)
def _find_questionnaire_state(self):
logger.debug('getting questionnaire data', user_id=self._user_id)
return data_access.get_by_key(QuestionnaireState, self._user_id)
|
ONSdigital/census-survey-runner
|
app/storage/encrypted_questionnaire_storage.py
|
encrypted_questionnaire_storage.py
|
py
| 2,887
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6634012203
|
def isGlodonNumber(num):
hset = set()
t = 0
while t == len(hset):
num = sum(map(lambda x: int(x)**2, list(str(num))))
if num == 1:
return True
t+=1
hset.add(num)
return False
# if 1 < num < 4:
# return False
# return True
# map(lambda x: x^2, intlist)
print(isGlodonNumber(32))
|
rh01/gofiles
|
lcode1-99/ex06/glodon.py
|
glodon.py
|
py
| 370
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14875088196
|
import bpy
import bmesh
import sys
import time
import argparse
# blender -b -P Resize.py -- --height 0.8 --inm Objects/Bed.obj --outm oBed2.obj
def get_args():
parser = argparse.ArgumentParser()
# get all script args
_, all_arguments = parser.parse_known_args()
double_dash_index = all_arguments.index('--')
script_args = all_arguments[double_dash_index + 1: ]
# add parser rules
# add parser rules
parser.add_argument('-hei', '--height', help="Final Height Dimension")
parser.add_argument('-in', '--inm', help="Original Model")
parser.add_argument('-out', '--outm', help="Rescaled output file")
parsed_script_args, _ = parser.parse_known_args(script_args)
return parsed_script_args
args = get_args()
height = float(args.height)
print(height)
input_model = str(args.inm)
print(input_model)
output_model = str(args.outm)
print(output_model)
print('\n Clearing blender scene (default garbage...)')
# deselect all
bpy.ops.object.select_all(action='DESELECT')
print('\n Beginning the process of import & export using Blender Python API ...')
bpy.ops.import_scene.obj(filepath=input_model)
print('\n Obj file imported successfully ...')
### just imported obj
print('\n Starting Resize...')
print('\n Z Dimension of the object is')
for o in bpy.data.objects:
if o.type == 'MESH':
z=o.dimensions.z
#x= bpy.data.objects[0].dimensions.x
#y=bpy.data.objects[0].dimensions.y
#z=bpy.data.objects[0].dimensions.z
# Resize the object
newscale=1
print(z)
if z != 0 :
newscale= height/z
bpy.ops.transform.resize(value=(newscale,newscale,newscale))
print('\n new scale is',newscale ,'\n')
#bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
bpy.ops.export_scene.obj(filepath=output_model)
print('\n Ending Resize...')
|
Niloofar-didar/AR-Realtime-Decimation-main
|
eAR-offline_modeling/Resize.py
|
Resize.py
|
py
| 1,784
|
python
|
en
|
code
| 2
|
github-code
|
6
|
36181808903
|
"""
gradient descent 연습
"""
import matplotlib.pyplot as plt
from scratch08.ex01 import difference_quotient, tangent, move
def g(x):
"""y = (1/3)x**3 - x"""
return x ** 3 / 3 - x
if __name__ == '__main__':
# ex01에서 작성한 함수를 이용, 함수 g(x)의 그래프를 그림
# 극값(local 최소/최대)를 경사 하강법으로 찾음
xs = [x / 10 for x in range(-30, 31)]
ys = [g(x) for x in xs]
plt.plot(xs, ys)
plt.axhline(y=0, color='0.3')
plt.axvline(x=0, color='0.3')
plt.axvline(x=-1, color='0.75')
plt.axvline(x=1, color='0.75')
plt.ylim(bottom=-2, top=2)
x_init = 1.9
tolerance = 0.00001
count = 0
while True:
count += 1
gradient = difference_quotient(g, x_init, 0.0001)
x_next = move(x_init, gradient, -0.1)
print(f'{count} x: {x_next}')
# ys_next = [tangent(x, gradient, x_next, g(x_next)) for x in xs]
# plt.plot(xs, ys_next)
if abs(x_init - x_next) < tolerance:
break
else:
x_init = x_next
x_init = -1.9
count = 0
while True:
count += 1
gradient = difference_quotient(g, x_init, 0.0001)
x_next = move(x_init, gradient, 0.1)
print(f'{count} x: {x_next}')
# ys_next = [tangent(x, gradient, x_next, g(x_next)) for x in xs]
# plt.plot(xs, ys_next)
if abs(x_init - x_next) < tolerance:
break
else:
x_init = x_next
plt.show()
|
lee-saint/lab-python
|
scratch08/ex02.py
|
ex02.py
|
py
| 1,510
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39749765607
|
"""
This file creates all the tables and databases,
used in the in_Voice APP as class,
and also does the CRUD operations of database by using the methods.
"""
# Importing the required modules to working with database
import sqlite3
# Importing os module to work with files and folders
import os
# Importing a function to open and read the data in the url
from urllib.request import urlopen
# Importing json module to convert the data from the url into json format
import json
# Importing the required modules from the Python-Docx module to create and work with '.docx' files
from docx import Document
from docx.shared import Inches, Pt, RGBColor
from docx.enum.text import WD_ALIGN_PARAGRAPH
# Creating a class for doing all the CRUD operations in the 'userCredentials' table on the inVoice App's Database
class UserCredential():
"""
Class for Registering a New Account,
which is used for working with InVoice App.
"""
# Assigning a name for the Directory where .db files are stored
DB_directory = "./.DB"
def __init__(self, db_name="in_Voice.db"):
# Assigning 'userCredentials' table name into a variable
self.tableName = "userCredentials"
# Creating a '.DB' Directory for storing the database files
self.createHiddenDIR(self.DB_directory)
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(f"{self.DB_directory}/{db_name}")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userCredentials' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
firstName TEXT,
lastName TEXT,
email CHAR NOT NULL UNIQUE,
mobileNumber INTEGER,
userId CHAR NOT NULL PRIMARY KEY,
passWord CHAR
)""")
'''CREATE USER'''
# Function for creating new record in the 'userCredentials' table
def createUser(self, firstName, lastName, email, mobileNumber, userId, password):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:firstName, :lastName, :email, :mobileNumber, :userId, :password)",
{
"firstName": firstName,
"lastName": lastName,
"email": email,
"mobileNumber": mobileNumber,
"userId": userId,
"password": password
})
'''UPDATE USER PASSWORD'''
# Function for updating only a specific field from a existing record(user detail) in the 'userCredentials' table
def changePassword(self, userId, password):
# Updating only the specific field from the existing record(user detail) in the table which matches the userID
self.co.execute(f"UPDATE {self.tableName} SET password = :password WHERE userId = :userId",
{
"password": password,
"userId": userId
})
'''GET USER'''
# Function for querying the 'userCredentials' table for specific record
def getUser(self, userId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userCredentials' table, to pick a matched record
self.co.execute(
f"SELECT *, oid FROM {self.tableName} WHERE userId = '{userId}'")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchone()
return records
'''GET ALL USERS'''
# Function for getting all the records from the table
def getallUsers(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userCredentials' table, and picking all the available records
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE USER'''
# Function for deleting a specific record from the 'userCredentials' table
def delUser(self, userId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Deleting a specific record from the 'userCredentials' table which matches the UserId
self.co.execute(
f"DELETE FROM {self.tableName} WHERE userId = '{userId}'")
'''CREATE HIDDEN DIRECTORY'''
# Function for Creating a Hidden Directory and Deleting the Directory and its files
def createHiddenDIR(self, dirName, status=""):
# Confirming Directory not exists
if (os.path.exists(dirName) is False):
# Creating a Directory in the name passed as parameter
os.mkdir(dirName)
# Making the Directory Hidden
os.system("attrib +h " + dirName)
elif (os.path.exists(dirName) and status == "clearCache"):
# Deleting the all the files in the Directory and its files
for item in os.listdir(dirName):
os.remove(f"{dirName}/{item}")
# Deleting the empty Directory
os.rmdir(dirName)
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'userDetails' table on the User's Database
class UserDetail():
"""
Class for Creating a New Database,
which is used to separate all user data separate from each other,
and also from the main InVoice App.
"""
# Assigning a name for the Directory where binary files are stored
cache_directory = "./.__appcache__"
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'userDetails' table name into a variable
self.tableName = "userDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
userId CHAR NOT NULL PRIMARY KEY,
email CHAR NOT NULL UNIQUE,
firstName TEXT NOT NULL,
lastName TEXT NOT NULL,
profile BLOB NOT NULL
)""")
'''CONVERT IMAGE TO BINARY DATA'''
# Function for converting Binary file into a BLOB data
def convertToBinary(self, imageFile="Images/profile.png"):
# Opening the 'image' file in binary format for reading the binary data
with open(imageFile, 'rb') as file:
# Assigning the read binary data to variable and closing the file
blobData = file.read()
file.close()
return blobData
'''CONVERT BINARY DATA TO IMAGE'''
# Function for converting BLOB data into Binary file and saving it to a local folder
def convertToImage(self, blobData, fileName):
# Generating the path to file from the passed UserID
pathToImageFile = f"{self.cache_directory}/{fileName}_image.png"
count = 0
while (os.path.exists(pathToImageFile)):
count += 1
# Generating the new image path if generated path exists
pathToImageFile = f"{self.cache_directory}/{fileName}_image{count}.png"
# Opening a new 'image' file in binary format for writing the binary data
with open(pathToImageFile, 'wb') as file:
# Writing the binary data queryied from database and close the file
file.write(blobData)
file.close()
return pathToImageFile
'''CREATE PROFILE'''
# Function for creating new record in the 'userDetails' table
def create(self, userId, email, firstName, lastName):
# Converting binary file to BLOB data
image = self.convertToBinary()
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:userId, :email, :firstName, :lastName, :profile)",
{
"userId": userId,
"email": email,
"firstName": firstName,
"lastName": lastName,
"profile": image
})
'''GET PROFILE'''
# Function for querying the 'userDetails' table for specific record
def get(self):
# Creating a '.__appcache__' Directory for storing the binary files
UserCredential.createHiddenDIR(self, dirName=self.cache_directory)
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'userDetails' table, to pick the binary data from matched record
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE userId = '{self.userId}'")
# Assigning the queryed record into a variable to use further
record = self.co.fetchone()
# Converting the queried Blob data into a '.png' file in a specified path
pathToProfile = self.convertToImage(record[4], record[0])
# Assigning the queried list as a dictionary and passing it as a response
userRecords = {
"userId": str(record[0]),
"email": str(record[1]),
"fullName": str(record[2]) + " " + str(record[3]),
"profile": str(pathToProfile)
}
return userRecords
'''UPDATE PROFILE PICTURE'''
# Function for updating the User's profile picture in the 'userDetails' table
def updateProfilePicture(self, pathToNewPicture):
# Converting binary file to BLOB data
image = self.convertToBinary(pathToNewPicture)
# Updating the existing record in the table which matches to UserId
self.co.execute(f"UPDATE {self.tableName} SET profile = :profile WHERE userId = :userId",
{
"profile": image,
"userId": self.userId
})
# Deleting the temporary image file passed by User
os.remove(pathToNewPicture)
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'userPreferences' table on the inVoice User's Database
class UserPreference():
"""
Class for creating and updating the User's Preference for working with the App,
which includes the default for Currency, tax percentage, customer message and due date.
"""
def __init__(self, userId):
# Assigning 'userPreferences' table name into a variable
self.tableName = "userPreferences"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'userPreferences' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
currencyCode TEXT NOT NULL UNIQUE,
dueDate TEXT NOT NULL,
taxInPercentage INTEGER NOT NULL
)""")
'''CREATE USER DEFAULT PREFERENCES'''
# Function for creating new record in the 'userPreferences' table
def createDefault(self):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:currencyCode, :dueDate, :taxInPercentage)",
{
"currencyCode": "INR",
"dueDate": "Nil",
"taxInPercentage": 0
})
'''UPDATE USER DEFAULT PREFERENCES'''
# Function for updating the existing record(user preference) in the 'userPreferences' table
def updateDefault(self, currencyCode, dueDate, taxInPercentage):
# Updating the existing record in the table which matches the uniqueID
self.co.execute(f"UPDATE {self.tableName} SET currencyCode = :currencyCode, dueDate = :dueDate, taxInPercentage = :taxInPercentage WHERE oid = :uniqueID",
{
"currencyCode": currencyCode,
"dueDate": dueDate,
"taxInPercentage": taxInPercentage,
"uniqueID": "1"
})
'''GET USER PREFERENCES'''
# Function for getting all the records from the 'userPreferences' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'currencyRates' table, and picking all the available records
self.co.execute(f"SELECT * FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchone()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'currencyRates' table on the inVoice User's Database
class CurrencyRate():
"""
Class for creating and updating the current Currency Exchange Rate to INR,
and the json data used in this process is collected from (http://www.floatrates.com).
"""
# Assigning the website URL into a variable which contains the JSON data for INR
site_URL = "http://www.floatrates.com/daily/inr.json"
# Defining a List of Currency Codes and Symbols for specific Countries as a python tuple list
CODES_and_SYMBOLS = [("aud", "$"),
("cad", "$"),
("chf", "chf"),
("eur", "€"),
("gbp", "£"),
("inr", "₹"),
("jpy", "¥"),
("nzd", "$"),
("usd", "$"),
("zar", "R")]
# Defining the App's default List for Currency Exchange Rate
default_currencyRates = [("AUD", "Australian Dollar", "$", 0.02, 55.68),
("CAD", "Canadian Dollar", "$", 0.02, 61.78),
("CHF", "Swiss Franc", "chf", 0.01, 83.49),
("EUR", "Euro", "€", 0.01, 80.73),
("GBP", "U.K. Pound Sterling", "£", 0.01, 96.06),
("INR", "Indian Rupee", "₹", 1.0, 1.0),
("JPY", "Japanese Yen", "¥", 1.70, 0.59),
("NZD", "New Zealand Dollar", "$", 0.02, 50.41),
("USD", "U.S. Dollar", "$", 0.01, 79.34),
("ZAR", "South African Rand", "R", 0.21, 4.82)]
def __init__(self, userId):
# Assigning 'currencyRates' table name into a variable
self.tableName = "currencyRates"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'currencyRates' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
currencyCode TEXT NOT NULL PRIMARY KEY,
currencyName TEXT NOT NULL,
symbol TEXT NOT NULL,
INRvalue REAL NOT NULL,
exchangeValue REAL NOT NULL
)""")
'''UPDATE CURRENCY EXCHANGE RATE'''
# Function for creating new record in the 'currencyRates' table
def update(self, status="update"):
try:
# Opening the website URL
weburl = urlopen(self.site_URL, timeout=1)
resultCode = weburl.getcode()
# Creating a List to hold the Currency Exchange Rate for each Country
currencyRates = []
# Checking the result code of the website is 200
if (resultCode == 200):
# Reading the data on the website URL and assigning it into a variable
data = weburl.read()
# Parsing the JSON data available on the website and storing it on a variable
jsonData = json.loads(data)
# Iterating through each Country's currency code
for code in self.CODES_and_SYMBOLS:
# Checking the Country's currency code exists in the JSON data
if (jsonData.get(code[0]) is None):
# Creating a tuple with the values for the each Country's Currency (Code, name, rate, exchangerate)
item = ("INR",
"Indian Rupee",
code[1],
"%.2f" % 1,
"%.2f" % 1)
# Adding the tuple into the Currency Exchange Rate List as a item
currencyRates.append(item)
else:
# Creating a tuple with the values for the each Country's Currency (Code, name, rate, exchangerate)
item = (jsonData.get(code[0])["code"],
jsonData.get(code[0])["name"],
code[1],
"%.2f" % jsonData.get(code[0])["rate"],
"%.2f" % jsonData.get(code[0])["inverseRate"])
# Adding the tuple into the Currency Exchange Rate List as a item
currencyRates.append(item)
except:
# Querying the 'currencyRates' table, and picking all the available records(Currencies) if has one
if (self.all() != []):
currencyRates = self.all()
else:
# Assigning the App's default Currency Exchange Rate List
currencyRates = self.default_currencyRates
finally:
# Iterating through each Country's currency code in the Currency Exchange Rate List
for item in currencyRates:
# Confirming the status is 'update'
if (status == "update"):
# Updating the existing record in the table which matches to Currency Code
self.co.execute(f"UPDATE {self.tableName} SET INRvalue = :INRvalue, exchangeValue = :exchangeValue WHERE currencyCode = :uniqueID",
{
"INRvalue": float(item[3]),
"exchangeValue": float(item[4]),
"uniqueID": item[0]
})
elif (status == "create"):
# Inserting a new record to the table for every item if status is 'create'
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:currencyCode, :currencyName, :symbol, :INRvalue, :exchangeValue)",
{
"currencyCode": item[0],
"currencyName": item[1],
"symbol": item[2],
"INRvalue": float(item[3]),
"exchangeValue": float(item[4])
})
'''GET ALL CURRENCY EXCHANGE RATES'''
# Function for getting all the records(Currencies) from the 'currencyRates' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'currencyRates' table, and picking all the available records(Currencies)
self.co.execute(f"SELECT * FROM {self.tableName}")
# Assigning the queryed records(Currencies) into a variable to return as response
records = self.co.fetchall()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'dueDates' table on the inVoice User's Database
class DueDate():
"""
Class for creating and updating the due dates,
which is in the form of days count.
"""
# Defining the App's default List for Due Dates dropdown
default_dueDates = ["Nil", "7", "14", "28", "56", "84"]
def __init__(self, userId):
# Assigning 'dueDates' table name into a variable
self.tableName = "dueDates"
# Connecting to the App's Database, if not exists creating a new database
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'dueDates' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
dayCount TEXT NOT NULL
)""")
'''ADD DUE DATE'''
# Function for adding new record in the 'dueDates' table
def add(self, days=None):
if (days is None):
for day in self.default_dueDates:
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:dayCount)",
{
"dayCount": str(day)
})
elif (days is not None):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:dayCount)",
{
"dayCount": str(days)
})
'''GET ALL DUE DATE'''
# Function for getting all the records from the 'dueDates' table
def all(self):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'dueDates' table, and picking all the available records
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records into a variable to return as response
records = self.co.fetchall()
return records
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'inVoiceDetails' table on the inVoice User's Database
class InVoiceDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Invoices created by the User for their clients.
Each Invoice holds the client details, date of purchase, due date , total amount , inVoice number which is unique,
and also has some additional data.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'inVoiceDetails' table name into a variable
self.tableName = "inVoiceDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'inVoiceDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
invoiceNumber CHAR NOT NULL PRIMARY KEY,
clientName TEXT NOT NULL,
currencyCode TEXT NOT NULL,
currencySymbol TEXT NOT NULL,
datePurchased DATE NOT NULL,
dueDate DATE NOT NULL,
productsPurchased BLOB NOT NULL,
customerMessage TEXT,
taxInPercentage INTEGER,
subTotal REAL NOT NULL,
calculatedTAX REAL NOT NULL,
totalAmount REAL NOT NULL,
balanceAmount REAL NOT NULL,
paymentStatus TEXT
)""")
'''CREATE INVOICE'''
# Function for creating new record in the 'inVoiceDetails' table
def create(self, inVoiceNo, clientName, currencyCode, currencySymbol, purchaseDate, dueDate, productsPurchased, customerMessage, taxInPercentage, subTotal, calculatedTAX, totalAmount, balanceAmount):
# Converting a list of tuples to json data
purchasedProducts = json.dumps(productsPurchased)
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:invoiceNumber, :clientName, :currencyCode, :currencySymbol, :datePurchased, :dueDate, :productsPurchased, :customerMessage, :taxInPercentage, :subTotal, :calculatedTAX, :totalAmount, :balanceAmount, :paymentStatus)",
{
"invoiceNumber": inVoiceNo,
"clientName": clientName,
"currencyCode": currencyCode,
"currencySymbol": currencySymbol,
"datePurchased": purchaseDate,
"dueDate": dueDate,
"productsPurchased": purchasedProducts,
"customerMessage": customerMessage,
"taxInPercentage": taxInPercentage,
"subTotal": subTotal,
"calculatedTAX": calculatedTAX,
"totalAmount": totalAmount,
"balanceAmount": balanceAmount,
"paymentStatus": paymentStatus
})
'''GET INVOICE'''
# Function for getting specific record(inVoice) from the 'inVoiceDetails' table
def get(self, inVoiceId):
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Querying the 'inVoiceDetails' table, and picking the matched record(inVoice)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE invoiceNumber = '{inVoiceId}'")
# Assigning the queryed record(inVoice) into a variable to return as response
records = self.co.fetchone()
# Creating a empty python list to store the queryed record
new_records = []
for item in records:
if (records.index(item) != 6):
# Adding data to the empty list
new_records.append(item)
else:
# Converting the BLOB data to a python list and adding it to the empty list
new_records.append(json.loads(item))
return new_records
'''UPDATE INVOICE'''
# Function for updating specific record(inVoice) in the 'inVoiceDetails' table
def update(self, inVoiceId, clientName, currencyCode, currencySymbol, purchaseDate, dueDate, productsPurchased, customerMessage, taxInPercentage, subTotal, calculatedTAX, totalAmount, balanceAmount):
# Converting a list of tuples to json data
purchasedProducts = json.dumps(productsPurchased)
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Updating a specific record(inVoice) in the 'inVoiceDetails' table using its invoiceNumber
self.co.execute(f"UPDATE {self.tableName} SET clientName = :clientName, currencyCode = :currencyCode, currencySymbol = :currencySymbol, datePurchased = :datePurchased, dueDate = :dueDate, productsPurchased = :productsPurchased, customerMessage = :customerMessage, taxInPercentage = :taxInPercentage, subTotal = :subTotal, calculatedTAX = :calculatedTAX, totalAmount = :totalAmount, balanceAmount = :balanceAmount, paymentStatus = :paymentStatus WHERE invoiceNumber = :inVoiceId",
{
"clientName": clientName,
"currencyCode": currencyCode,
"currencySymbol": currencySymbol,
"datePurchased": purchaseDate,
"dueDate": dueDate,
"productsPurchased": purchasedProducts,
"customerMessage": customerMessage,
"taxInPercentage": taxInPercentage,
"subTotal": subTotal,
"calculatedTAX": calculatedTAX,
"totalAmount": totalAmount,
"balanceAmount": balanceAmount,
"paymentStatus": paymentStatus,
"inVoiceId": inVoiceId
})
'''GET ALL INVOICES'''
# Function for getting all the records(invoices) from the 'inVoiceDetails' table
def all(self):
# Querying the 'inVoiceDetails' table, and picking all the available records(invoices)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(invoices) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE INVOICE DETAILS'''
# Function for deleting a specific record(inVoice) from the 'inVoiceDetails' table
def delete(self, userId):
# Deleting a specific record(inVoice) from the 'inVoiceDetails' table which matches the record(inVoice)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE invoiceNumber = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'clientDetails' table on the inVoice User's Database
class ClientDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Clients data created by the User for using it later on creating InVoice.
Each record has the individual client details.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'clientDetails' table name into a variable
self.tableName = "clientDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'clientDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
clientName TEXT NOT NULL,
emailId CHAR NOT NULL,
contactNumber INTEGER NOT NULL,
addressLine1 TEXT NOT NULL,
addressLine2 TEXT,
addressLine3 TEXT,
cityName TEXT NOT NULL,
pinCode TEXT NOT NULL,
customerNote TEXT
)""")
'''ADD CLIENT'''
# Function for creating new record in the 'clientDetails' table
def add(self, clientName, emailId, contactNumber, addressLine1, addressLine2, addressLine3, cityName, pinCode, customerNote):
# Inserting a new record to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:clientName, :emailId, :contactNumber, :addressLine1, :addressLine2, :addressLine3, :cityName, :pinCode, :customerNote)",
{
"clientName": clientName,
"emailId": emailId,
"contactNumber": contactNumber,
"addressLine1": addressLine1,
"addressLine2": addressLine2,
"addressLine3": addressLine3,
"cityName": cityName,
"pinCode": pinCode,
"customerNote": customerNote
})
'''GET CLIENT'''
# Function for getting specific record(client) from the 'clientDetails' table
def get(self, clientId):
# Querying the 'clientDetails' table, and picking the matched record(client)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE oid = '{clientId}'")
# Assigning the queryed record(client) into a variable to return as response
records = self.co.fetchone()
return records
'''UPDATE CLIENT'''
# Function for updating specific record(client) in the 'clientDetails' table
def update(self, clientId, clientName, emailId, contactNumber, addressLine1, addressLine2, addressLine3, cityName, pinCode, customerNote):
# Updating a specific record(client) in the 'clientDetails' table using its oid
self.co.execute(f"UPDATE {self.tableName} SET clientName = :clientName, emailId = :emailId, contactNumber = :contactNumber, addressLine1 = :addressLine1, addressLine2 = :addressLine2, addressLine3 = :addressLine3, cityName = :cityName, pinCode = :pinCode, customerNote = :customerNote WHERE oid = :uniqueId",
{
"clientName": clientName,
"emailId": emailId,
"contactNumber": contactNumber,
"addressLine1": addressLine1,
"addressLine2": addressLine2,
"addressLine3": addressLine3,
"cityName": cityName,
"pinCode": pinCode,
"customerNote": customerNote,
"uniqueId": clientId
})
'''GET ALL CLIENTS'''
# Function for getting all the records(clients) from the 'clientDetails' table
def all(self):
# Querying the 'clientDetails' table, and picking all the available records(clients)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(clients) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE CLIENT DETAILS'''
# Function for deleting a specific record(client) from the 'clientDetails' table
def delete(self, userId):
# Deleting a specific record(client) from the 'clientDetails' table which matches the record(client)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE oid = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Creating a class for doing all the CRUD operations in the 'productDetails' table on the inVoice User's Database
class ProductDetail():
"""
Class for Creating a New Table in the User's Database,
which contains all the Products created by the User for using it later on creating InVoice.
"""
def __init__(self, userId):
# Assigning the UserId to a variable so it can be accessed by other functions inside this Class
self.userId = userId
# Assigning 'productDetails' table name into a variable
self.tableName = "productDetails"
# Connecting to the User's Database, if not exists creating a new database for each user
self.inVoice_DB = sqlite3.connect(
f"{UserCredential.DB_directory}/{userId}.db")
# Creating a cursor
self.co = self.inVoice_DB.cursor()
# Creating a 'productDetails' table if not exists
self.co.execute(f"""CREATE TABLE IF NOT EXISTS {self.tableName} (
productName TEXT NOT NULL UNIQUE,
productMRP REAL,
quantity INTEGER NOT NULL,
purchaseRate REAL,
salesRate REAL NOT NULL,
reOrderQuantity INTEGER NOT NULL
)""")
'''ADD PRODUCT'''
# Function for creating new record (product) in the 'productDetails' table
def add(self, productName, productMRP, quantity, purchaseRate, salesRate, reOrderQuantity):
# Inserting a new record (product) to the table
self.co.execute(f"INSERT INTO {self.tableName} VALUES (:productName, :productMRP, :quantity, :purchaseRate, :salesRate, :reOrderQuantity)",
{
"productName": productName,
"productMRP": productMRP,
"quantity": quantity,
"purchaseRate": purchaseRate,
"salesRate": salesRate,
"reOrderQuantity": reOrderQuantity
})
'''UPDATE PRODUCT'''
# Function for updating specific record(product) in the 'productDetails' table
def update(self, productId, productName, productMRP, quantity, purchaseRate, salesRate, reOrderQuantity, getBy="oid"):
# Updating a specific record(product) in the 'productDetails' table using its oid
self.co.execute(f"UPDATE {self.tableName} SET productName = :productName, productMRP = :productMRP, quantity = :quantity, purchaseRate = :purchaseRate, salesRate = :salesRate, reOrderQuantity = :reOrderQuantity WHERE {getBy} = :uniqueId",
{
"productName": productName,
"productMRP": productMRP,
"quantity": quantity,
"purchaseRate": purchaseRate,
"salesRate": salesRate,
"reOrderQuantity": reOrderQuantity,
"uniqueId": productId
})
'''GET A PRODUCT BY NAME'''
# Function for getting a specified record(product) from the 'productDetails' table
def get(self, filterby, productName):
# Querying the 'productDetails' table, and picking the matched record(product)
self.co.execute(
f"SELECT * FROM {self.tableName} WHERE {filterby} = '{productName}'")
# Assigning the queryed record(product) into a variable to return as response
records = self.co.fetchone()
return records
'''GET ALL PRODUCTS'''
# Function for getting all the records(products) from the 'productDetails' table
def all(self):
# Querying the 'productDetails' table, and picking all the available records(products)
self.co.execute(f"SELECT *, oid FROM {self.tableName}")
# Assigning the queryed records(products) into a variable to return as response
records = self.co.fetchall()
return records
'''DELETE PRODUCT DETAILS'''
# Function for deleting a specific record(product) from the 'productDetails' table
def delete(self, userId):
# Deleting a specific record(product) from the 'productDetails' table which matches the record(product)
self.co.execute(
f"DELETE FROM {self.tableName} WHERE oid = '{userId}'")
'''CLOSE DB'''
# Function for Closing the connection to Database
def closeDB(self):
# Committing the changes to the database
self.inVoice_DB.commit()
# Closing the connection with the database
self.inVoice_DB.close()
# Function for creating the InVoice in '.docx' file format
def wordDocGenerator(inVoiceId, clientData, purchaseDate, dueDate, currencySymbol, productDetails, subTotal, calculatedTAX, totalAmount, balanceAmount, customerMessage):
# Function for adding a empty line
def emptyLines(count):
for _ in range(count):
linespace_style = document.styles["Body Text"]
linespace_style.font.size = Pt(10)
document.add_paragraph(style=linespace_style).add_run("")
# >>
# Opening a new Word document for storing the InVoice details as human readable data
document = Document()
# -------------------------------
# Headings
# -------------------------------
# Creating the Main heading for the 'InVoice' document and Aligning it to the center
mainheading = document.add_heading()
mainheading.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Settings custom margins for the 'InVoice' document
sections = document.sections
section = sections[0]
section.top_margin = Inches(0.04)
section.bottom_margin = Inches(0.2)
section.left_margin = Inches(0.9)
section.right_margin = Inches(0.8)
# Adding the first content and styling of the Main heading
run = mainheading.add_run("in")
run.font.size = Pt(55)
run.font.name = "Magneto"
run.font.color.rgb = RGBColor(0x00, 0x00, 0x40)
# Adding the first content and styling of the Main heading
run = mainheading.add_run("Voice")
run.font.size = Pt(12)
run.font.name = "Matura MT Script Capitals"
run.font.color.rgb = RGBColor(0x46, 0x46, 0x46)
# Adding an empty line
emptyLines(1)
# -------------------------------
# InVoice-Id
# -------------------------------
# Creating the template for inVoice number
inVoiceId_container = document.add_paragraph("id: ")
inVoiceId_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the inVoice number template with the inVoiceId's value passed through
inVoiceNumber = inVoiceId_container.add_run(f"{inVoiceId}")
inVoiceNumber.font.name = "Consolas"
inVoiceNumber.font.size = Pt(13)
inVoiceNumber.font.bold = True
# -------------------------------
# Client Details
# -------------------------------
# Creating the template for Client Details
clientdetails_container = document.add_paragraph("")
# Filling the Client Details template with the client's name, address and phone
client_name = clientdetails_container.add_run(
f"{clientData[0]}\n{clientData[3]}\n{clientData[4]}\n{clientData[6]}\n{clientData[5]}\n\n{clientData[2]}")
client_name.font.name = "Times New Roman"
# -------------------------------
# Due Date
# -------------------------------
# Creating the template for Due Date
duedate_container = document.add_paragraph("Due Date : ")
duedate_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Due Date template with the due date
due_date = duedate_container.add_run(f"{dueDate}")
due_date.font.size = Pt(13)
due_date.font.name = "Times New Roman"
# Adding an empty line
emptyLines(1)
# -------------------------------
# Product Details
# -------------------------------
# Creating a table for holding the product purchased details
product_table = document.add_table(1, 5)
# Creating the Table Header
heading_cells = product_table.rows[0].cells
# Populating the Table Header
heading_cells[0].text = "No"
heading_cells[1].text = "Description"
heading_cells[2].text = "Quantity"
heading_cells[3].text = f"Rate {currencySymbol}"
heading_cells[4].text = f"Amount {currencySymbol}"
# Aligning and Styling the names in Table Header
heading_cells[1].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
heading_cells[2].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[3].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[4].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
heading_cells[1].paragraphs[0].runs[0].font.bold = True
heading_cells[2].paragraphs[0].runs[0].font.bold = True
heading_cells[3].paragraphs[0].runs[0].font.bold = True
heading_cells[4].paragraphs[0].runs[0].font.bold = True
heading_cells[1].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[2].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[3].paragraphs[0].runs[0].font.size = Pt(13)
heading_cells[4].paragraphs[0].runs[0].font.size = Pt(13)
product_table.rows[0].height = Inches(0.6)
# Populating the product details inside the table
for detail in productDetails:
# Creating a new row for each product
cells = product_table.add_row().cells
# Filling the content for each field of the row
cells[0].text = str(detail[0])
cells[1].text = detail[1]
cells[2].text = str(detail[2])
cells[3].text = str(detail[3])
cells[4].text = str(detail[4])
# Aligning and Styling the each row
cells[1].width = Inches(2)
cells[2].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
cells[3].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
cells[4].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Adding an empty line
emptyLines(1)
# -------------------------------
# Sub Total
# -------------------------------
# Creating the template for Sub Total
subtotal_container = document.add_paragraph("Sub Total : ")
subtotal_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Sub Total template with the value of sub total
sub_total = subtotal_container.add_run(f"{subTotal}")
sub_total.font.size = Pt(14)
sub_total.font.name = "Times New Roman"
sub_total.font.bold = True
# -------------------------------
# Sales Tax
# -------------------------------
# Creating the template for Sales Tax
salestax_container = document.add_paragraph("Sales Tax : ")
salestax_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Sales Tax template with the value of sales tax
sales_tax = salestax_container.add_run(f"{calculatedTAX}")
sales_tax.font.size = Pt(13)
sales_tax.font.name = "Times New Roman"
sales_tax.font.bold = True
# Adding an empty line
emptyLines(1)
# -------------------------------
# Total Amount
# -------------------------------
# Creating the template for Total Amount
totalamount_container = document.add_paragraph("Total Amount : ")
totalamount_container.alignment = WD_ALIGN_PARAGRAPH.RIGHT
# Filling the Total Amount template with the value of total amount
total_amount = totalamount_container.add_run(
f"{totalAmount} {currencySymbol}")
total_amount.font.size = Pt(15)
total_amount.font.name = "Times New Roman"
total_amount.font.bold = True
# Adding an empty line
emptyLines(2)
# -------------------------------
# Customer Message
# -------------------------------
# Creating the template for Customer Message
customermsg_container = document.add_paragraph("")
customermsg_container.alignment = WD_ALIGN_PARAGRAPH.CENTER
# Filling the Customer Message template with the value of Customer Message
customer_msg = customermsg_container.add_run(f"~ {customerMessage} ~")
customer_msg.font.size = Pt(13)
customer_msg.font.name = "Times New Roman"
customer_msg.font.bold = True
# -------------------------------
# Balance Amount
# -------------------------------
# Adding an empty line
emptyLines(1)
# Creating the template for Balance Amount
balanceamount_container = document.add_paragraph("Balance Amount : ")
balanceamount_container.alignment = WD_ALIGN_PARAGRAPH.LEFT
# Setting the value of Balance Amount by checking the Balance Amount is zero
to_pay = "Nil" if (int(balanceAmount) ==
0) else f"{balanceAmount} {currencySymbol}"
# Filling the Balance Amount template with the value of balance amount
balance_amount = balanceamount_container.add_run(f"{to_pay}")
balance_amount.font.size = Pt(13)
balance_amount.font.name = "Times New Roman"
balance_amount.font.bold = True
# Setting the value of payment status by checking the Balance Amount
paymentStatus = "Paid" if (int(balanceAmount) == 0) else "Pending"
# Settings the Date purchase and Payment status as content for footer
footerDate = section.footer.paragraphs[0]
footerDate.text = f"Dated On : {purchaseDate}\t\tPayment Status : {paymentStatus}"
footerDate.style = document.styles["Footer"]
# Generating the name of the document from the inVoice id
pathToDOCXFile = f"{inVoiceId}.docx"
count = 0
while (os.path.exists(pathToDOCXFile)):
count += 1
# Generating the new name of the document from the inVoice id
pathToDOCXFile = f"{inVoiceId}_{count}.docx"
# Saving the document as in the generated name from inVoice id
document.save(pathToDOCXFile)
|
Kumara2mahe/in_Voice
|
inVoiceDB.py
|
inVoiceDB.py
|
py
| 50,993
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14349515929
|
import numpy as np
# import packages
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import re
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done")
args = vars(ap.parse_args())
# load the example image and convert it to grayscale
image = cv2.imread(args["image"])
gray = image
#gray = gray.resize((500, 500 * height / width), Image.ANTIALIAS)
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_TOZERO)[1]
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
kernel = np.array([[0,-3,-3],
[-1, 14,-1],
[-2,1,-2]])
gray = cv2.filter2D(gray, -1, kernel)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
#gray = cv2.threshold(gray, 0, 255,
# cv2.THRESH_TOZERO | cv2.THRESH_OTSU)[1]
filename = "{}.jpg".format(os.getpid())
cv2.imwrite(filename, gray)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename), lang="deu")
os.remove(filename)
text = re.sub("[^a-zA-Z]+", " ", text)
print(text)
# show the output images
#cv2.imshow("Image", image)
#cv2.imshow("Output", gray)
#cv2.waitKey(0)
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
endpoint = os.environ['ACCOUNT_ENDPOINT']
key = os.environ['ACCOUNT_KEY']
# Set credentials
credentials = CognitiveServicesCredentials(key)
# Create client
client = ComputerVisionClient(endpoint, credentials)
url = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Bündnis_90_-_Die_Grünen_Logo.svg/2560px-Bündnis_90_-_Die_Grünen_Logo.svg.png"
image_analysis = client.analyze_image(url,visual_features=[VisualFeatureTypes.tags])
for tag in image_analysis.tags:
print(tag)
|
guessthepartei/App
|
magic/parse.py
|
parse.py
|
py
| 2,186
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6547068714
|
"""
Tests for Randomized Reconstruction Commands
"""
import unittest
import requests
import sys
import os
import importlib
from pathlib import Path
import json
# Add the client folder to sys.path
CLIENT_DIR = os.path.join(os.path.dirname(__file__), "..", "client")
if CLIENT_DIR not in sys.path:
sys.path.append(CLIENT_DIR)
from fusion360gym_client import Fusion360GymClient
HOST_NAME = "127.0.0.1"
PORT_NUMBER = 8080
class TestFusion360GymRandomizedReconstruction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = Fusion360GymClient(f"http://{HOST_NAME}:{PORT_NUMBER}")
current_dir = Path(__file__).parent
test_config_file = current_dir / "test_config.json"
if not test_config_file.exists():
print("Error: test_config.json file not found in the test directory")
with open(test_config_file, encoding="utf8") as f:
test_config = json.load(f)
dataset_dir = Path(test_config["dataset_dir"])
if not dataset_dir.exists():
print("Error: dataset_dir does not exist")
cls.data_dir = dataset_dir
cls.void_data_dir = dataset_dir.parent / "void"
cls.split_file = dataset_dir.parent / "train_test.json"
cls.void_split_file = dataset_dir.parent / "void.json"
cls.distributions_json = dataset_dir.parent / "d7_distributions.json"
cls.distributions_training_only_json = dataset_dir.parent / "d7_training_distributions.json"
def test_sample_design(self):
# Sample the whole dataset
r = self.client.sample_design(self.data_dir, filter=False)
# Sample the training data
r = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
def test_sample_design_invalid_data_dir(self):
# Sample from a non-existent directory
r = self.client.sample_design(self.void_data_dir, filter=False)
# Sample from a non-existent directory with the split file
r = self.client.sample_design(self.void_data_dir, filter=True, split_file=self.split_file)
# Sample from a non-existent string
r = self.client.sample_design("random_data_dir", filter=False)
def test_sample_design_invalid_split_file(self):
# the split file is void
r = self.client.sample_design(self.data_dir, filter=True, split_file=self.void_split_file)
def test_get_distributions_from_dataset(self):
import json
# distributions of the whole dataset
r = self.client.get_distributions_from_dataset(self.data_dir, filter=False)
# with open('d7_distributions.json', 'w') as outfile:
# json.dump(r, outfile)
# distributions of the training dataset
r = self.client.get_distributions_from_dataset(self.data_dir, filter=True, split_file=self.split_file)
# with open('d7_training_distributions.json', 'w') as outfile:
# json.dump(r, outfile)
def test_get_distributions_from_json(self):
# distributions of the whole dataset
r = self.client.get_distributions_from_json(self.distributions_json)
# distributions of the training dataset
r = self.client.get_distributions_from_json(self.distributions_training_only_json)
# invalid input file
r = self.client.get_distributions_from_json("void")
def test_distribution_sampling(self):
# test invalid distributions
distributions = {"invalid": "testing"}
r = self.client.distribution_sampling(distributions)
# sample all parameters
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.distribution_sampling(distributions)
# test invalid parameters
r = self.client.distribution_sampling(distributions, ["invalid"])
# sample a list of selected parameters
r = self.client.distribution_sampling(distributions, ["num_faces", "num_bodies"])
def test_sample_sketch(self):
json_data, _ = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
# test invlid sampling type
r = self.client.sample_sketch(json_data, "invalid")
# random sampling
r = self.client.sample_sketch(json_data, sampling_type = "random")
# deterministic sampling
r = self.client.sample_sketch(json_data, sampling_type = "deterministic")
# distributive sampling
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=distributions["sketch_areas"])
# test invalid area distribution
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=["invalid"])
def test_sample_profiles(self):
json_data, _ = self.client.sample_design(self.data_dir, filter=True, split_file=self.split_file)
sketch_data = self.client.sample_sketch(json_data, sampling_type = "random")
# test invalid sketch data
r = self.client.sample_profiles({"data":"invalid"}, max_number_profiles = 1, sampling_type = "random")
# test invalid max number of profiles
r = self.client.sample_profiles(sketch_data, max_number_profiles = -1, sampling_type = "random")
# random sampling
r = self.client.sample_profiles(sketch_data, max_number_profiles = 2, sampling_type = "random")
# deterministic sampling
r = self.client.sample_profiles(sketch_data, max_number_profiles = 2, sampling_type = "deterministic")
# distributive sampling
distributions = self.client.get_distributions_from_json(self.distributions_training_only_json)
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=distributions["profile_areas"])
# test invalid area distribution
r = self.client.sample_sketch(json_data, sampling_type = "distributive", area_distribution=["invalid"])
if __name__ == "__main__":
unittest.main()
|
albertotono/Fusion360GalleryDataset
|
tools/fusion360gym/test/test_fusion360gym_randomized_reconstruction.py
|
test_fusion360gym_randomized_reconstruction.py
|
py
| 6,122
|
python
|
en
|
code
| null |
github-code
|
6
|
31019892466
|
import json
import re
import os
from bs4 import BeautifulSoup
import io
import html2text
#import transformationScript
import datetime
#from pprint import pprint
class Word:
content = ""
tag = ""
# The class "constructor" - It's actually an initializer
def __init__(self, content, tag):
self.content = content
self.tag = tag
class Component:
distance = 20
startPosition = -1
position = -1
def setDistance(self, distance):
self.distance = distance
self.startPosition = -1
self.position = -1
class Premise(Component):
words = []
# The class "constructor" - It's actually an initializer
def __init__(self, words):
self.words = words
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def getTags(self):
words = []
for word in self.words:
words.append(word.tag)
return ' '.join(words)
class Claim(Component):
words = []
# The class "constructor" - It's actually an initializer
def __init__(self, words):
self.words = words
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def getTags(self):
words = []
for word in self.words:
words.append(word.tag)
return ' '.join(words)
class Translator:
contents = {}
def __init__(self):
self.contents = {}
def addPair(self, htmlFile, jsonfile):
self.contents[jsonfile] = htmlFile
def createAssociation(self, nodeSet):
fileName = "corpusInput/json/" + nodeSet
file = open(fileName, "r")
contents = file.read()
elements = json.loads(contents)
for node in elements['nodes']:
if 'http' in node['text']:
link = re.search("(?P<url>https?://[^\s]+)", node['text']).group("url")
link = re.sub('http://web.fe.up.pt/~ei11124/argmine_news/', '', link)
link = link[:-1]
self.addPair(link,nodeSet)
break
def createAssociations(self):
fileList = os.listdir("corpusInput/json")
for file in fileList:
self.createAssociation(file)
class TextDumper:
_nArgTag = "(O,|)"
words = []
file = ""
def __init__(self, htmlFile):
self.file = "corpusInput/html/" + htmlFile + '.html'
self.words = []
def getText(self):
words = []
for word in self.words:
words.append(word.content)
return ' '.join(words)
def stripHtml(self):
with io.open(self.file, 'r', encoding='utf8') as f:
contents = f.read()
plainText = html2text.html2text(contents)
sentences = plainText.split('\n')
maxSize = sentenceNumber = chosen = 0
for sentence in sentences:
size = len(sentence)
if(size > maxSize):
chosen = sentenceNumber
sentenceNumber += 1
sentences[chosen] = re.sub(r'[.]+(?![0-9])', r' .', sentences[chosen])
sentences[chosen] = re.sub(r'[:]+(?![0-9])', r' :', sentences[chosen])
sentences[chosen] = re.sub(r'[,]+(?![0-9])', r' ,', sentences[chosen])
sentences[chosen] = re.sub(r'[;]+(?![0-9])', r' ;', sentences[chosen])
sentences[chosen] = re.sub(r'[?]+(?![0-9])', r' ?', sentences[chosen])
sentences[chosen] = re.sub(r'[!]+(?![0-9])', r' !', sentences[chosen])
sentences[chosen] = re.sub(r'[…]+(?![0-9])', r' …', sentences[chosen])
sentences[chosen] = re.sub(r'[“]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[”]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'["]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[‘]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[’]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[(]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[)]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[\']+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[`]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[`]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[[]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[]]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[«]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[»]+(?![0-9])', r'', sentences[chosen])
sentences[chosen] = re.sub(r'[**]+(?![0-9])', r'', sentences[chosen])
print(sentences[chosen])
return sentences[chosen]
def wordifyText(self):
text = self.stripHtml()
originalWords = text.split(' ')
for word in originalWords:
'''if(word == '.'):
taggedWord = Word(word, '.')
self.words.append(taggedWord)
elif(word != ''):
taggedWord = Word(word, self._nArgTag)
self.words.append(taggedWord)'''
if (word != ''):
taggedWord = Word(word, self._nArgTag)
self.words.append(taggedWord)
class claimsAndPremises:
claims = []
premises = []
premisesToClaims = {}
file = ""
def __init__(self, jsonFile):
self.file = "corpusInput/json/" + jsonFile + '.json'
self.claims = []
self.premises = []
self.premisesToClaims = {}
def removeHttp(self, elements):
for node in elements['nodes']:
if 'http' in node['text']:
elements['nodes'].remove(node)
return elements
def removeInferences(self, elements):
for node in elements['nodes']:
if 'Default Inference' in node['text']:
elements['nodes'].remove(node)
return elements
def collapseEdges(self, edges, nodes):
collapsedEdges = []
for originEdge in edges:
for destinationEdge in edges:
if ((destinationEdge['fromID'] == originEdge['toID']) & (self.getNodeText(nodes,originEdge['fromID']) != self.getNodeText(nodes,destinationEdge['toID']))):
edge = {originEdge['fromID']:destinationEdge['toID']}
collapsedEdges.append(edge)
#collapsedEdges[originEdge['fromID']] = destinationEdge['toID']
#print(collapsedEdges)
return collapsedEdges
def getNodeText(self, nodes, nodeId):
nodeText = ''
for node in nodes:
if (node['nodeID'] == nodeId):
nodeText = node['text']
nodeText = re.sub(r'[.]+(?![0-9])', r' .', nodeText)
nodeText = re.sub(r'[:]+(?![0-9])', r' :', nodeText)
nodeText = re.sub(r'[,]+(?![0-9])', r' ,', nodeText)
nodeText = re.sub(r'[;]+(?![0-9])', r' ;', nodeText)
nodeText = re.sub(r'[?]+(?![0-9])', r' ?', nodeText)
nodeText = re.sub(r'[!]+(?![0-9])', r' !', nodeText)
nodeText = re.sub(r'[…]+(?![0-9])', r' …', nodeText)
nodeText = re.sub(r'[“]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[”]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'["]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[‘]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[’]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[(]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[)]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[\']+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[`]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[`]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[[]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[]]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[«]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[»]+(?![0-9])', r'', nodeText)
nodeText = re.sub(r'[**]+(?![0-9])', r'', nodeText)
return nodeText
def tagClaimOrPremise(self, words, type):
if (type == 'premise'):
distance = '1'
else:
distance = '|'
taggedSentence = []
for wordIndex in range(0, len(words)):
word = words[wordIndex]
if (wordIndex == 0):
#tag = '(B,' + type + ',' + distance + ')'
#tag = '(B,' + type + ')'
tag = '(I,' + type + ')'
elif ((word == '.') or (word == ':') or (word == ';') or (word == '?') or (word == '!')):
#tag = '(O,|,|)'
tag = '(O,|)'
#tag = '.'
else:
#tag = '(I,' + type + ',' + distance + ')'
tag = '(I,' + type + ')'
taggedWord = Word(word,tag)
taggedSentence.append(taggedWord)
return taggedSentence
def isIntermediatePremise(self, claim, connections):
isIntermediate = False
for connection in connections:
if next(iter(connection)) == claim:
isIntermediate = True
return isIntermediate
def getPremisesAndClaims(self):
file = open(self.file, "r")
contents = file.read()
elements = self.removeHttp(json.loads(contents))
#elements = self.removeInferences(elements)
connections = self.collapseEdges(elements['edges'], elements['nodes'])
#print(self.file)
#print(connections)
nodes = elements['nodes']
for connection in connections:
claim = self.getNodeText(nodes, connection[next(iter(connection))])
claimWords = claim.split()
taggedClaim = Claim(self.tagClaimOrPremise(claimWords, 'claim'))
self.claims.append(taggedClaim)
premise = self.getNodeText(nodes, next(iter(connection)))
premiseWords = premise.split()
taggedPremise = Premise(self.tagClaimOrPremise(premiseWords, 'premise'))
self.premises.append(taggedPremise)
#print(taggedPremise.getText())
self.premisesToClaims[premise] = claim
class claimsReplacer:
processedText = []
originalText = []
existingClaimsAndPremises = []
def __init__(self, originalText, existingClaimsAndPremises):
self.originalText = originalText
self.existingClaimsAndPremises = existingClaimsAndPremises
self.processedText = originalText
def getOriginalText(self):
words = []
for word in self.originalText:
words.append(word.content)
return ' '.join(words)
def getProcessedText(self):
words = []
for word in self.processedText:
words.append(word.content)
return ' '.join(words)
def getTags(self):
tags = []
for word in self.processedText:
tags.append(word.tag)
return ' '.join(tags)
def matchText(self, wordPosition, component, textSize):
#print(textSize)
isMatch = True
for word in component.words:
#print(word.content)
if ((wordPosition >= textSize) or
(word.content.lower() != self.originalText[wordPosition].content.lower())):
isMatch = False
break
wordPosition += 1
return isMatch
def replaceText(self, wordPosition, component):
for word in component.words:
self.processedText[wordPosition] = word
wordPosition += 1
def processText(self):
#print(self.getOriginalText())
for claim in self.existingClaimsAndPremises.claims:
wordPosition = 0
for word in self.originalText:
if (claim.words[0].content.lower() == word.content.lower()):
if(self.matchText(wordPosition, claim, len(self.originalText))):
self.replaceText(wordPosition, claim)
claim.startPosition = wordPosition
wordPosition += 1
for premise in self.existingClaimsAndPremises.premises:
wordPosition = 0
for word in self.originalText:
if (premise.words[0].content.lower() == word.content.lower()):
if(self.matchText(wordPosition, premise, len(self.originalText))):
self.replaceText(wordPosition, premise)
premise.startPosition = wordPosition
wordPosition += 1
class DistanceCalculator:
processedText = []
claimsAndPremises = []
def __init__(self, processedText, existingClaimsAndPremises):
self.processedText = processedText
self.claimsAndPremises = existingClaimsAndPremises
def getKey(self, component):
return component.startPosition
def returnUniqueComponents(self, components):
index = 0
uniqueComponents = []
nonUniqueComponents = []
unique = True
for component in components:
for secondComponent in uniqueComponents:
if component.getText() == secondComponent.getText():
unique = False
nonUniqueComponents.append(component)
if (unique):
#print(component.getText())
uniqueComponents.append(component)
index += 1
return uniqueComponents
def arrangeComponents(self):
#claims = self.returnUniqueComponents(self.claimsAndPremises.claims)
#premises = self.returnUniqueComponents(self.claimsAndPremises.premises)
'''for claim in claims:
print("claim - " + claim.getText())
for premise in premises:
print("premise - " + premise.getText())'''
components = self.claimsAndPremises.claims + self.claimsAndPremises.premises
components = self.returnUniqueComponents(components)
components = sorted(components, key=self.getKey)
position = 1
for component in components:
#print("component - " + component.getText())
#print (component.startPosition)
for claim in self.claimsAndPremises.claims:
if claim.startPosition == component.startPosition:
claim.position = position
#print("premise " + premise.getText())
for premise in self.claimsAndPremises.premises:
if premise.startPosition == component.startPosition:
premise.position = position
#print(premise.position)
#print("premise " + premise.getText())
position += 1
def calculateDistances(self):
index = 0
for premise in self.claimsAndPremises.premises:
distance = self.claimsAndPremises.claims[index].position - self.claimsAndPremises.premises[index].position
premise.distance = distance
#print(distance)
index += 1
def updatePremises(self):
for premise in self.claimsAndPremises.premises:
for word in premise.words:
tag = list(word.tag)
if tag[1] != 'O':
tag[len(tag)-2] = str(premise.distance)
tag = "".join(tag)
word.tag = tag
class OutputWriter:
processedText = []
textFile = ""
tagFile = ""
file = ""
def __init__(self, processedText, file):
self.processedText = processedText
self.file = open("corpusOutputPunctuation/txt/" + file + '.txt', "w", encoding='utf-8')
#self.textFile = open("corpusOutput/txt/textsWithSentences/" + file + '.txt', "w", encoding='utf-8')
self.textFile = open("corpusOutputPunctuation/txt/texts/" + file + '.txt', "w", encoding='utf-8')
self.tagFile = open("corpusOutputPunctuation/txt/tags/" + file + '.txt', "w", encoding='utf-8')
def writeToTextFile(self):
for word in self.processedText:
content = word.content
tag = word.tag
self.textFile.write(u'' + content + '\n')
self.tagFile.write(u'' + tag + '\n')
self.file.write(u'' + content + '' + tag + '\n')
class Pipeline:
def translate(self):
translator = Translator()
translator.createAssociations()
files = translator.contents
startTime = datetime.datetime.now().replace(microsecond=0)
for (jsonFile, htmlFile) in files.items():
htmlFile = re.sub('.html', '', htmlFile)
jsonFile = re.sub('.json', '', jsonFile)
dumper = TextDumper(htmlFile)
dumper.wordifyText()
claims = claimsAndPremises(jsonFile)
claims.getPremisesAndClaims()
replacer = claimsReplacer(dumper.words, claims)
replacer.processText()
#distanceCalculator = DistanceCalculator(replacer.processedText, replacer.existingClaimsAndPremises)
#distanceCalculator.arrangeComponents()
#distanceCalculator.calculateDistances()
#distanceCalculator.updatePremises()
#replacer = claimsReplacer(dumper.words, distanceCalculator.claimsAndPremises)
#replacer.processText()
output = OutputWriter(replacer.processedText, jsonFile)
output.writeToTextFile()
endTime = datetime.datetime.now().replace(microsecond=0)
timeTaken = endTime - startTime
print("Isto demorou ")
print(timeTaken)
pipeline = Pipeline()
pipeline.translate()
|
fspring/NeuralArgMining
|
Translators/Portuguese/TransFormationScriptComplete.py
|
TransFormationScriptComplete.py
|
py
| 17,944
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35920335524
|
import requests
from bifocal import utils, models
from polo import Polo
from coindesk import Coindesk
class Blockscan(object):
@staticmethod
def _request(**kwargs):
uri = 'http://xcp.blockscan.com/api2?%s' % utils.encode_args(kwargs)
ret = requests.get(uri)
return utils.parse_json(ret)
@staticmethod
def get_tx_by_id(txid):
return Blockscan._request(
module='transaction',
action='info',
txhash=txid
)
@staticmethod
def get_address_transactions(address, asset):
data = Blockscan._request(
module='address',
action='credit_debit',
btc_address=address,
asset=asset
)
transactions = data['data']
return map(Blockscan._parse_tx, transactions)
@staticmethod
def get_tx_source(txid):
tx = Blockscan.get_tx_by_id(txid)
return tx['data'][0]['source']
@staticmethod
def get_tx_destination(txid):
tx = Blockscan.get_tx_by_id(txid)
return tx['data'][0]['destination']
@staticmethod
def _parse_tx(tx):
stamp = int(tx['block_time'])
pair = "BTC_%s" % tx['asset']
btc_rate = Polo.get_daily_close_price(pair, stamp)
return models.Transaction(
timestamp=stamp,
quantity=int(tx['quantity']),
asset=tx['asset'],
id=tx['event'],
price=btc_rate * Coindesk.get_price_by_timestamp(stamp),
price_in_btc=btc_rate,
source=Blockscan.get_tx_source(tx['event']),
destination=Blockscan.get_tx_destination(tx['event'])
)
|
super3/bifocal
|
bifocal/apis/blockscan.py
|
blockscan.py
|
py
| 1,672
|
python
|
en
|
code
| 1
|
github-code
|
6
|
388251844
|
favorites = ['Creme Brulee', 'Apple Pie', 'Churros', 'Tiramisú', 'Chocolate Cake']
for i in range(10):
print(f"looping {i}")
for item in favorites:
print(f"I like this {item}")
count = 0
while count < len(favorites):
print(f"I like this desert, {favorites[count]}")
count += 1
for idx, item in enumerate(favorites):
print(idx, item)
# search for churros
for dessert in favorites:
if dessert == 'Churros':
print('Yes one of my favorite desserts is', dessert)
break
else:
print('No sorry, that dessert is not on my list')
# skips over churros
for dessert in favorites:
if dessert == 'Churros':
continue
print('Other desserts I like are', dessert)
# ignore the if
for dessert in favorites:
if dessert == 'Churros':
pass
print('Other desserts I like are', dessert)
|
andrejarboe/meta
|
python/course1/1loops.py
|
1loops.py
|
py
| 847
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15211930040
|
"""
Perform Outlier Rejection with MCMC
-----------------------------------
Figure 8.9
Bayesian outlier detection for the same data as shown in figure 8.8. The
top-left panel shows the data, with the fits from each model. The top-right
panel shows the 1-sigma and 2-sigma contours for the slope and intercept with
no outlier correction: the resulting fit (shown by the dotted line) is clearly
highly affected by the presence of outliers. The bottom-left panel shows the
marginalized 1-sigma and 2-sigma contours for a mixture model (eq. 8.67). The
bottom-right panel shows the marginalized 1-sigma and 2-sigma contours for a
model in which points are identified individually as "good" or "bad"
(eq. 8.68). The points which are identified by this method as bad with a
probability greater than 68% are circled in the first panel.
"""
# Author: Jake VanderPlas (adapted to PyMC3 by Brigitta Sipocz)
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
import pymc3 as pm
from matplotlib import pyplot as plt
from theano import shared as tshared
import theano.tensor as tt
from astroML.datasets import fetch_hogg2010test
from astroML.plotting.mcmc import convert_to_stdev
# ----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
np.random.seed(0)
# ------------------------------------------------------------
# Get data: this includes outliers. We need to convert them to Theano variables
data = fetch_hogg2010test()
xi = tshared(data['x'])
yi = tshared(data['y'])
dyi = tshared(data['sigma_y'])
size = len(data)
# ----------------------------------------------------------------------
# Define basic linear model
def model(xi, theta, intercept):
slope = np.tan(theta)
return slope * xi + intercept
# ----------------------------------------------------------------------
# First model: no outlier correction
with pm.Model():
# set priors on model gradient and y-intercept
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
y = pm.Normal('y', mu=model(xi, theta, inter), sd=dyi, observed=yi)
trace0 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Second model: nuisance variables correcting for outliers
# This is the mixture model given in equation 17 in Hogg et al
def mixture_likelihood(yi, xi):
"""Equation 17 of Hogg 2010"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
root2pi = np.sqrt(2 * np.pi)
L_in = (1. / root2pi / dyi * np.exp(-0.5 * (yi - mu) ** 2 / Vi))
L_out = (1. / root2pi / np.sqrt(Vi + Vb)
* np.exp(-0.5 * (yi - Yb) ** 2 / (Vi + Vb)))
return tt.sum(tt.log((1 - Pb) * L_in + Pb * L_out))
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -200, 400)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2, testval=np.pi / 4)
y_mixture = pm.DensityDist('mixturenormal', logp=mixture_likelihood,
observed={'yi': yi, 'xi': xi})
trace1 = pm.sample(draws=5000, tune=1000)
# ----------------------------------------------------------------------
# Third model: marginalizes over the probability that each point is an outlier.
# define priors on beta = (slope, intercept)
def outlier_likelihood(yi, xi):
"""likelihood for full outlier posterior"""
sigmab = tt.exp(log_sigmab)
mu = model(xi, theta, inter)
Vi = dyi ** 2
Vb = sigmab ** 2
logL_in = -0.5 * tt.sum(qi * (np.log(2 * np.pi * Vi)
+ (yi - mu) ** 2 / Vi))
logL_out = -0.5 * tt.sum((1 - qi) * (np.log(2 * np.pi * (Vi + Vb))
+ (yi - Yb) ** 2 / (Vi + Vb)))
return logL_out + logL_in
with pm.Model():
# uniform prior on Pb, the fraction of bad points
Pb = pm.Uniform('Pb', 0, 1.0, testval=0.1)
# uniform prior on Yb, the centroid of the outlier distribution
Yb = pm.Uniform('Yb', -10000, 10000, testval=0)
# uniform prior on log(sigmab), the spread of the outlier distribution
log_sigmab = pm.Uniform('log_sigmab', -10, 10, testval=5)
inter = pm.Uniform('inter', -1000, 1000)
theta = pm.Uniform('theta', -np.pi / 2, np.pi / 2)
# qi is bernoulli distributed
qi = pm.Bernoulli('qi', p=1 - Pb, shape=size)
y_outlier = pm.DensityDist('outliernormal', logp=outlier_likelihood,
observed={'yi': yi, 'xi': xi})
trace2 = pm.sample(draws=5000, tune=1000)
# ------------------------------------------------------------
# plot the data
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.25,
bottom=0.1, top=0.95, hspace=0.2)
# first axes: plot the data
ax1 = fig.add_subplot(221)
ax1.errorbar(data['x'], data['y'], data['sigma_y'], fmt='.k', ecolor='gray', lw=1)
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
#------------------------------------------------------------
# Go through models; compute and plot likelihoods
linestyles = [':', '--', '-']
labels = ['no outlier correction\n(dotted fit)',
'mixture model\n(dashed fit)',
'outlier rejection\n(solid fit)']
x = np.linspace(0, 350, 10)
bins = [(np.linspace(140, 300, 51), np.linspace(0.6, 1.6, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51)),
(np.linspace(-40, 120, 51), np.linspace(1.8, 2.8, 51))]
for i, trace in enumerate([trace0, trace1, trace2]):
H2D, bins1, bins2 = np.histogram2d(np.tan(trace['theta']),
trace['inter'], bins=50)
w = np.where(H2D == H2D.max())
# choose the maximum posterior slope and intercept
slope_best = bins1[w[0][0]]
intercept_best = bins2[w[1][0]]
# plot the best-fit line
ax1.plot(x, intercept_best + slope_best * x, linestyles[i], c='k')
# For the model which identifies bad points,
# plot circles around points identified as outliers.
if i == 2:
Pi = trace['qi'].mean(0)
outlier_x = data['x'][Pi < 0.32]
outlier_y = data['y'][Pi < 0.32]
ax1.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5,
facecolors='none', edgecolors='red')
# plot the likelihood contours
ax = plt.subplot(222 + i)
H, xbins, ybins = np.histogram2d(trace['inter'],
np.tan(trace['theta']), bins=bins[i])
H[H == 0] = 1E-16
Nsigma = convert_to_stdev(np.log(H))
ax.contour(0.5 * (xbins[1:] + xbins[:-1]),
0.5 * (ybins[1:] + ybins[:-1]),
Nsigma.T, levels=[0.683, 0.955], colors='black')
ax.set_xlabel('intercept')
ax.set_ylabel('slope')
ax.grid(color='gray')
ax.xaxis.set_major_locator(plt.MultipleLocator(40))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.text(0.96, 0.96, labels[i], ha='right', va='top',
bbox=dict(fc='w', ec='none', alpha=0.5),
transform=ax.transAxes)
ax.set_xlim(bins[i][0][0], bins[i][0][-1])
ax.set_ylim(bins[i][1][0], bins[i][1][-1])
ax1.set_xlim(0, 350)
ax1.set_ylim(100, 700)
plt.show()
|
astroML/astroML_figures
|
book_figures/chapter8/fig_outlier_rejection.py
|
fig_outlier_rejection.py
|
py
| 8,174
|
python
|
en
|
code
| 7
|
github-code
|
6
|
13394323895
|
# -*- coding: utf-8 -*-
"""This module loads images from csv files and outputs numpy arrays"""
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import numpy as np
import tensorflow as tf
from six import string_types
import niftynet.utilities.util_csv as util_csv
from niftynet.io.image_type import ImageFactory
from niftynet.layer.base_layer import Layer, DataDependentLayer, RandomisedLayer
from niftynet.utilities.user_parameters_helper import make_input_tuple
from niftynet.utilities.util_common import print_progress_bar
# NP_TF_DTYPES = {'i': tf.int32, 'u': tf.int32, 'b': tf.int32, 'f': tf.float32}
from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig
NP_TF_DTYPES = {'i': tf.float32,
'u': tf.float32,
'b': tf.float32,
'f': tf.float32}
def infer_tf_dtypes(image_array):
return NP_TF_DTYPES.get(image_array.dtype[0].kind, tf.float32)
class ImageReader(Layer):
"""
For a concrete example:
_input_sources define multiple modality mappings, e.g.,
_input_sources {'image': ('T1', 'T2'),
'label': ('manual_map',)}
means
'image' consists of two components, formed by
concatenating 'T1' and 'T2' input source images.
'label' consists of one component, loading from 'manual_map'
self._names: a tuple of the output names of this reader.
('image', 'labels')
self._shapes: the shapes after combining input sources
{'image': (192, 160, 192, 1, 2), 'label': (192, 160, 192, 1, 1)}
self._dtypes: store the dictionary of tensorflow shapes
{'image': tf.float32, 'label': tf.float32}
self.output_list is a list of dictionaries, with each item:
{'image': <niftynet.io.image_type.SpatialImage4D object>,
'label': <niftynet.io.image_type.SpatialImage3D object>}
"""
def __init__(self, names):
# list of file names
self._file_list = None
self._input_sources = None
self._shapes = None
self._dtypes = None
self._names = None
self.names = names
self._global_config = NiftyNetGlobalConfig()
# list of image objects
self.output_list = None
self.current_id = -1
self.preprocessors = []
super(ImageReader, self).__init__(name='image_reader')
def initialise_reader(self, data_param, task_param):
"""
task_param specifies how to combine user input modalities
e.g., for multimodal segmentation 'image' corresponds to multiple
modality sections, 'label' corresponds to one modality section
"""
if not self.names:
tf.logging.fatal('Please specify data names, this should '
'be a subset of SUPPORTED_INPUT provided '
'in application file')
raise ValueError
self._names = [name for name in self.names
if vars(task_param).get(name, None)]
self._input_sources = {name: vars(task_param).get(name)
for name in self.names}
data_to_load = {}
for name in self._names:
for source in self._input_sources[name]:
try:
data_to_load[source] = data_param[source]
except KeyError:
tf.logging.fatal(
'reader name [%s] requires [%s], however it is not '
'specified as a section in the config, '
'current input section names: %s',
name, source, list(data_param))
raise ValueError
default_data_folder = self._global_config.get_niftynet_home_folder()
self._file_list = util_csv.load_and_merge_csv_files(data_to_load, default_data_folder)
self.output_list = _filename_to_image_list(
self._file_list, self._input_sources, data_param)
for name in self.names:
tf.logging.info(
'image reader: loading [%s] from %s (%d)',
name, self.input_sources[name], len(self.output_list))
def prepare_preprocessors(self):
for layer in self.preprocessors:
if isinstance(layer, DataDependentLayer):
layer.train(self.output_list)
def add_preprocessing_layers(self, layers):
assert self.output_list is not None, \
'Please initialise the reader first, ' \
'before adding preprocessors.'
if isinstance(layers, Layer):
self.preprocessors.append(layers)
else:
self.preprocessors.extend(layers)
self.prepare_preprocessors()
# pylint: disable=arguments-differ
def layer_op(self, idx=None, shuffle=True):
"""
this layer returns a dictionary
keys: self.output_fields
values: image volume array
"""
if idx is None and shuffle:
# training, with random list output
idx = np.random.randint(len(self.output_list))
if idx is None and not shuffle:
# testing, with sequential output
# accessing self.current_id, not suitable for multi-thread
idx = self.current_id + 1
self.current_id = idx
try:
idx = int(idx)
except ValueError:
idx = -1
if idx < 0 or idx >= len(self.output_list):
return -1, None, None
image_dict = self.output_list[idx]
image_data_dict = {field: image.get_data()
for (field, image) in image_dict.items()}
interp_order_dict = {field: image.interp_order
for (field, image) in image_dict.items()}
if self.preprocessors:
preprocessors = [deepcopy(layer) for layer in self.preprocessors]
# dictionary of masks is cached
mask = None
for layer in preprocessors:
# import time; local_time = time.time()
if layer is None:
continue
if isinstance(layer, RandomisedLayer):
layer.randomise()
image_data_dict = layer(image_data_dict, interp_order_dict)
else:
image_data_dict, mask = layer(image_data_dict, mask)
# print('%s, %.3f sec'%(layer, -local_time + time.time()))
return idx, image_data_dict, interp_order_dict
@property
def shapes(self):
"""
image shapes before any preprocessing
:return: tuple of integers as image shape
"""
# to have fast access, the spatial dimensions are not accurate
# 1) only read from the first image in list
# 2) not considering effects of random augmentation layers
# but time and modality dimensions should be correct
if not self.output_list:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
if not self._shapes:
first_image = self.output_list[0]
self._shapes = {field: first_image[field].shape
for field in self.names}
return self._shapes
@property
def tf_dtypes(self):
if not self.output_list:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
if not self._dtypes:
first_image = self.output_list[0]
self._dtypes = {field: infer_tf_dtypes(first_image[field])
for field in self.names}
return self._dtypes
@property
def input_sources(self):
if not self._input_sources:
tf.logging.fatal("please initialise the reader first")
raise RuntimeError
return self._input_sources
@property
def names(self):
return self._names
@names.setter
def names(self, fields_tuple):
# output_fields is a sequence of output names
# each name might correspond to a list of multiple input sources
# this should be specified in CUSTOM section in the config
self._names = make_input_tuple(fields_tuple, string_types)
def get_subject_id(self, image_index):
return self._file_list.iloc[image_index, 0]
def _filename_to_image_list(file_list, mod_dict, data_param):
"""
converting a list of filenames to a list of image objects
useful properties (e.g. interp_order) are added to each object
"""
volume_list = []
for idx in range(len(file_list)):
print_progress_bar(idx, len(file_list),
prefix='reading datasets headers',
decimals=1, length=10, fill='*')
# combine fieldnames and volumes as a dictionary
_dict = {field: _create_image(file_list, idx, modalities, data_param)
for (field, modalities) in mod_dict.items()}
volume_list.append(_dict)
return volume_list
def _create_image(file_list, idx, modalities, data_param):
"""
data_param consists of description of each modality
This function combines modalities according to the 'modalities'
parameter and create <niftynet.io.input_type.SpatialImage*D>
"""
try:
file_path = tuple(file_list.loc[idx, mod] for mod in modalities)
interp_order = tuple(data_param[mod].interp_order for mod in modalities)
pixdim = tuple(data_param[mod].pixdim for mod in modalities)
axcodes = tuple(data_param[mod].axcodes for mod in modalities)
except KeyError:
tf.logging.fatal(
"Specified modality names %s "
"not found in config: input sections %s",
modalities, list(data_param))
raise
except AttributeError:
tf.logging.fatal(
'data params must contain: interp_order, pixdim, axcodes')
raise
image_properties = {'file_path': file_path,
'name': modalities,
'interp_order': interp_order,
'output_pixdim': pixdim,
'output_axcodes': axcodes}
return ImageFactory.create_instance(**image_properties)
|
LUYU0004/ISLES2018-1
|
lib/niftynet/io/image_reader.py
|
image_reader.py
|
py
| 10,262
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6827571219
|
""" https://adventofcode.com/2020/day/17 """
from typing import List
from copy import deepcopy
from functools import lru_cache
def part1(data: List[str]) -> int:
""" O(n) solution """
size_x = len(data[0]) + 2 * CYCLES
size_y = len(data) + 2 * CYCLES
size_z = CYCLES * 2 + 1
pocket = [[[False] * size_x for _ in range(size_y)] for _ in range(size_z)]
for y in range(len(data)):
for x in range(len(data[y])):
pocket[CYCLES][CYCLES + y][CYCLES + x] = data[y][x] == "#"
for _ in range(CYCLES):
temp = deepcopy(pocket)
for z, depth in enumerate(pocket):
for y, row in enumerate(depth):
for x, cube in enumerate(row):
position = (z, y, x)
adjacents = find_3d_adjacents(
position, len(pocket), len(pocket[0]))
active = sum([(pocket[i][j][k]) for i, j, k in adjacents])
if cube and active not in (2, 3):
temp[z][y][x] = False
elif not cube and active == 3:
temp[z][y][x] = True
pocket = deepcopy(temp)
return sum([x for z in pocket for y in z for x in y])
def part2(data: List[str]) -> int:
""" O(?) solution """
size_x = len(data[0]) + 2 * CYCLES
size_y = len(data) + 2 * CYCLES
size_z = CYCLES * 2 + 1
size_w = CYCLES * 2 + 1
pocket = [[[[False] * size_x for _ in range(size_y)]
for _ in range(size_z)] for _ in range(size_w)]
for y, _ in enumerate(data):
for x, _ in enumerate(data[y]):
pocket[CYCLES][CYCLES][CYCLES +
y][CYCLES + x] = data[y][x] == "#"
for _ in range(CYCLES):
temp = deepcopy(pocket)
for w, time in enumerate(pocket):
for z, depth in enumerate(time):
for y, row in enumerate(depth):
for x, cube in enumerate(row):
position = (w, z, y, x)
adjacents = find_4d_adjacents(position, len(
pocket), len(pocket[0]), len(pocket[0][0]))
active = sum([(pocket[i][j][k][l])
for i, j, k, l in adjacents])
if cube and active not in (2, 3):
temp[w][z][y][x] = False
elif not cube and active == 3:
temp[w][z][y][x] = True
pocket = deepcopy(temp)
return sum([x for w in pocket for z in w for y in z for x in y])
@lru_cache(maxsize=None)
def find_3d_adjacents(pos, depth, width):
z, y, x = pos
adjacents = []
for i in range(z - 1, z + 2):
for j in range(y - 1, y + 2):
for k in range(x - 1, x + 2):
if (i, j, k) != pos and - 1 < i < depth and - 1 < j < width and - 1 < k < width:
adjacents.append((i, j, k))
return adjacents
@lru_cache(maxsize=None)
def find_4d_adjacents(pos, time, depth, width):
w, z, y, x = pos
adjacents = []
for i in range(w - 1, w + 2):
for j in range(z - 1, z + 2):
for k in range(y - 1, y + 2):
for l in range(x - 1, x + 2):
if (i, j, k, l) != pos and - 1 < i < time and - 1 < j < depth and - 1 < k < width and - 1 < l < width:
adjacents.append((i, j, k, l))
return adjacents
if __name__ == "__main__":
TEST = [line.strip() for line in open("tests/d17.txt", "r")]
PUZZLE = [line.strip() for line in open("puzzles/d17.txt", "r")]
CYCLES = 6
assert part1(TEST) == 112
assert part2(TEST) == 848
print(f"Part 1: {part1(PUZZLE)}")
print(f"Part 2: {part2(PUZZLE)}")
|
pozhega/AoC
|
2020/d17.py
|
d17.py
|
py
| 3,804
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22093127538
|
import sys
import itertools
expected = open(sys.argv[1], 'r').readlines()
actual = open(sys.argv[2], 'r').readlines()
# do a pc analysis. easiest bugs to find are the ones where we simply dont run a set of code that
# the expected output shows should be run.
expected_pcs = list(map(lambda x : x.strip().split(" ")[-1], expected))
actual_pcs = list(map(lambda x : x.strip().split(" ")[-1], actual))
expected_pcs = list(set(expected_pcs))
actual_pcs = list(set(actual_pcs))
print("Running PC Analysis...")
pcs_not_found = []
for pc in expected_pcs:
if not pc in actual_pcs:
# personal constraints:
if (int(pc, 16) & 0xFF000000) == 0x00000000:
continue
if (int(pc, 16) & 0xFF000000) == 0x03000000:
continue
pcs_not_found.append(int(pc, 16))
pcs_not_found.sort()
ranged_pcs_not_found = []
previous_pc = -1
start_range = -1
range_len = -1
for pc in pcs_not_found:
if pc == previous_pc + 2 or pc == previous_pc + 4 or pc == previous_pc:
range_len += (pc - previous_pc)
else:
ranged_pcs_not_found.append((start_range, range_len))
range_len = 0
start_range = pc
previous_pc = pc
pcs_not_found = pcs_not_found[1:]
print("\nResults:")
for r in ranged_pcs_not_found:
start = '{0:#010x}'.format(r[0])
end = '{0:#010x}'.format(r[0] + r[1])
print("PCs in Range: {} to {} were not found.".format(start, end))
print("PC Analysis Complete")
|
bmchtech/GameBeanAdvance
|
source/emu/core/diag/compare-logs.py
|
compare-logs.py
|
py
| 1,480
|
python
|
en
|
code
| 24
|
github-code
|
6
|
73819284349
|
import numpy as np
import dill
import math
import sys
sys.path.append('../')
sys.path.append('./')
from src.graph import Graph
from src.evolution_strategies import one_plus_lambda, tournament_selection
from src.population import Population
from src.arg_parser import parse_args
import cProfile
import pstats
def bool_and(x, y): return x and y
def bool_or(x, y): return x or y
def bool_nand(x, y): return not(x and y)
def bool_nor(x, y): return not(x or y)
Population.add_operation(arity=2, func=bool_and, string="AND")
Population.add_operation(arity=2, func=bool_or, string="OR")
Population.add_operation(arity=2, func=bool_nand, string="NAND")
Population.add_operation(arity=2, func=bool_nor, string="NOR")
def eleven_multiplexer(arr):
if len(arr) != 11:
print("AAAAAAAAAAAAAAAAAAAAAAA")
raise
d = arr[0:8]
a = arr[8:11]
index = (int(a[0]) * 4) + (int(a[1]) * 2) + (int(a[2]) * 1)
if d[index] == "1":
return True
return False
def create_tests(n):
tests = []
for i in range(2**n):
base_2_v = bin(i).replace("0b", "").zfill(n)
cont = 0
input_arr = []
for c in base_2_v:
inp = False
if c == "1":
cont += 1
inp = True
input_arr.append(inp)
response = True if cont%2 == 0 else False
tests.append((input_arr, [response]))
return tests
def fitness_func(individual: Graph, gen: int, tests):
fitness = 0
for t in tests:
inputs = t[0]
expected_out = t[1]
graph_out = individual.operate(inputs)
for h, y in zip(graph_out, expected_out):
if h == y:
fitness += 1
fitness = fitness/len(tests)
return np.clip(fitness, -1*(10**10), 10**10)
def main():
n = 5
args = parse_args()
Population.rng = np.random.default_rng(args["seed"])
tests = create_tests(n)
def fit_func(indv, gen): return fitness_func(indv, gen, tests)
population = Population(
population_size=args["pop_size"],
n_in=n,
n_out=1,
n_middle=args["n_middle_nodes"]
)
def t_select(): return tournament_selection(
population=population,
generations=args["max_gens"],
goal_fit=1,
fitness_func=fit_func,
minimize_fitness=False,
fit_share=args["fit_share"],
stagnation=args["stagnation"],
stag_preservation=args["stag_preservation"],
report=args["report"],
mutate_active_only=args["mut_active_only"],
mutation_rate=args["mut_rate"],
elitism=args["elitism"],
crossover_rate=args["crossover_rate"],
tournament_size=args["tourney_size"],
species_threshold=args["species_threshold"],
n_threads=args["n_threads"],
csv_file=args["csv"],
fit_partition_size=args["fit_partition"]
)
def p_lambda(): return one_plus_lambda(
population=population,
generations=args["max_gens"],
goal_fit=1,
fitness_func=fit_func,
minimize_fitness=False,
fit_share=args["fit_share"],
stagnation=args["stagnation"],
stag_preservation=args["stag_preservation"],
report=args["report"],
n_champions=args["elitism"],
mutate_active_only=args["mut_active_only"],
mutation_rate=args["mut_rate"],
species_threshold=args["species_threshold"],
n_threads=args["n_threads"],
csv_file=args["csv"],
fit_partition_size=args["fit_partition"]
)
exec_func = t_select
if args["selection_method"] == "lambda":
exec_func = p_lambda
# profile = cProfile.Profile()
# profile.runcall(exec_func)
# ps = pstats.Stats(profile)
# ps.print_stats()
# print()
exec_func()
if args["save_to"] is not None:
dill.dump(population, open(args["save_to"], mode='wb'))
if __name__ == "__main__":
main()
|
fhtanaka/CGPython
|
tests/diversity_parity_test.py
|
diversity_parity_test.py
|
py
| 3,954
|
python
|
en
|
code
| 2
|
github-code
|
6
|
16606388338
|
from rich import print
from napalm import get_network_driver
from my_devices import arista1, arista2, arista3, arista4
def main():
for device in (arista1, arista2, arista3, arista4):
driver = get_network_driver('eos')
with driver(**device) as device:
device.open()
vlans = device.get_vlans()
host = device.hostname
device.load_merge_candidate(filename='vlans.cfg')
diff = device.compare_config()
print(f'diff for host {host}:')
print(diff)
print('-'*70)
if diff:
print('committing')
device.commit_config()
else:
print('no changes')
print()
if __name__ == '__main__':
main()
|
caseymorris87/pynet_test2
|
napalm/ex2.py
|
ex2.py
|
py
| 839
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20105217581
|
#https://towardsdatascience.com/how-to-perform-lasso-and-ridge-regression-in-python-3b3b75541ad8
import numpy as np
import pandas as pd
#we only have three advertising mediums, and sales is our target variable.
DATAPATH = 'Advertising.csv'
data = pd.read_csv(DATAPATH)
print(data.head())
data.drop(['Unnamed: 0'], axis=1, inplace=True) #remove first column which have the record number
#Least square regression
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
Xs = data.drop(['sales'], axis=1)
y = data['sales'].values.reshape(-1,1)
lin_reg = LinearRegression()
MSEs = cross_val_score(lin_reg, Xs, y, scoring='neg_mean_squared_error', cv=5)
mean_MSE = np.mean(MSEs)
print("Least square MSE ",mean_MSE)
#Lasso regression
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
lasso = Lasso()
parameters = {'alpha': [1e-15, 1e-10, 1e-8, 1e-4, 1e-3,1e-2, 1, 5, 10, 20]}
#GridSearchCV. This will allow us to automatically perform 5-fold cross-validation
# with a range of different regularization parameters in order to find the optimal value of alpha.
lasso_regressor = GridSearchCV(lasso, parameters, scoring='neg_mean_squared_error', cv = 5)
lasso_regressor.fit(Xs, y)
print("Lasso best alpth value ",lasso_regressor.best_params_)
print("Lasso MSE score ",lasso_regressor.best_score_) #MSE
|
eyadwin/Machine_Learning
|
regularization_lasso_regression.py
|
regularization_lasso_regression.py
|
py
| 1,396
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26420683804
|
from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from job.config.ConfigStore import *
from job.udfs.UDFs import *
from job.graph import *
def pipeline(spark: SparkSession) -> None:
df_Source_1 = Source_1(spark)
df_FileOps_1 = FileOps_1(spark, df_Source_1)
df_rank = rank(spark, df_Source_1)
df_Reformat_5 = Reformat_5(spark, df_rank)
df_Script_1 = Script_1(spark, df_Source_1)
df_Aggregate_1 = Aggregate_1(spark, df_Source_1)
df_Reformat_6 = Reformat_6(spark, df_Aggregate_1)
df_Reformat_1 = Reformat_1(spark, df_Source_1)
df_ref = ref(spark, df_Reformat_1)
df_Reformat_3 = Reformat_3(spark, df_ref)
df_Reformat_2 = Reformat_2(spark, df_Script_1)
df_bug2 = bug2(spark, df_Source_1)
df_Reformat_4 = Reformat_4(spark, df_bug2)
df_bug = bug(spark, df_Source_1)
def main():
Utils.initializeFromArgs(Utils.parseArgs())
spark = SparkSession.builder\
.config("spark.default.parallelism", "4")\
.enableHiveSupport()\
.appName("Prophecy Pipeline")\
.getOrCreate()
pipeline(spark)
if __name__ == "__main__":
main()
|
anshuman-91/GameDay11thMay
|
code/scala/pipelines/Reformat/code/job/pipeline.py
|
pipeline.py
|
py
| 1,189
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17641215127
|
import numpy as np
def MAPE(actual, forecast):
return np.mean(np.abs((actual - forecast) / actual)) * 100
def double_exponential_smoothing(x, alpha=0.3, beta=0.5, l_zero=2, b_zero=0, mape=False):
if not (0 <= alpha <= 1):
raise ValueError("Invalid alpha")
if not (0 <= beta <= 1):
raise ValueError("Invalid beta")
n = len(x)
forecasts = np.zeros(n)
l_prev = l_zero
b_prev = b_zero
for t in range(1, n):
l_t = alpha * x[t] + (1 - alpha) * (l_prev + b_prev)
b_t = beta * (l_t - l_prev) + (1 - beta) * b_prev
forecasts[t] = l_t + b_t
l_prev = l_t
b_prev = b_t
forecasts[0] = np.nan
if mape:
mape_value = MAPE(x[1:], forecasts[1:])
return forecasts, mape_value
else:
return forecasts
# Sample input observations
x = np.array([2.92, 0.84, 2.69, 2.42, 1.83, 1.22,0.10,1.32,0.56,-0.35])
forecasts = double_exponential_smoothing(x)
forecasts, mape_value = double_exponential_smoothing(x, l_zero=2 , b_zero=0 ,alpha=0.3,beta=0.5)
print("Forecasts:", forecasts)
print("MAPE:", mape_value)
|
akiffbaba0/Double-Exponantial-Smoothing-to-Create-Forecasts
|
untitled1.py
|
untitled1.py
|
py
| 1,122
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20665200266
|
import csv
class NameDescriptor:
"""
Дескриптор для проверки и хранения ФИО студента.
Проверяет, что каждая часть ФИО содержит только буквы и начинается с заглавной буквы.
"""
def __get__(self, instance, owner):
return f'{instance._first_name} {instance._first_name} {instance._patronymic}'
def __set__(self, instance, value):
first_name, last_name, patronymic = value
if not all(part.isalpha() and part.istitle() for part in [first_name, last_name, patronymic]):
raise ValueError("Каждая часть ФИО должна содержать только буквы и начинаться с заглавной буквы")
instance._first_name = first_name
instance._last_name = last_name
instance._patronymic = patronymic
class Subject:
"""
Класс, представляющий предмет, с валидацией оценок и результатов тестов.
"""
def __init__(self, name):
self.name = name
self.scores = []
self.test_results = []
def add_score(self, score):
if 2 <= score <= 5:
self.scores.append(score)
else:
raise ValueError("Оценка должна быть в диапазоне от 2 до 5")
def add_test_result(self, result):
if 0 <= result <= 100:
self.test_results.append(result)
else:
raise ValueError("Результат теста должен быть в диапазоне от 0 до 100")
def average_test_score(self):
if self.test_results:
return sum(self.test_results) / len(self.test_results)
return 0
def average_score(self):
if self.scores:
return sum(self.scores) / len(self.scores)
return 0
class Student:
"""
Класс, представляющий студента.
"""
name = NameDescriptor()
def __init__(self, first_name, last_name, patronymic, file):
self.name = (first_name, last_name, patronymic)
self.subjects = self.load_subjects(file)
def load_subjects(self, csv_file):
subjects = []
with open(csv_file, 'r') as file:
reader = csv.reader(file)
subjects = [Subject(row[0]) for row in reader]
return subjects
def add_score(self, subject_name, score):
subject = self.find_subject(subject_name)
if subject:
subject.add_score(score)
else:
raise ValueError("Предмет не найден")
def add_test_result(self, subject_name, result):
subject = self.find_subject(subject_name)
if subject:
subject.add_test_result(result)
else:
raise ValueError("Предмет не найден")
def find_subject(self, subject_name):
for subject in self.subjects:
if subject.name == subject_name:
return subject
return None
def average_test_score_per_subject(self):
averages = {}
for subject in self.subjects:
averages[subject.name] = subject.average_test_score()
return averages
def average_score_all_subjects(self):
total_scores = [score for subject in self.subjects for score in subject.scores]
if total_scores:
return sum(total_scores) / len(total_scores)
return 0
subjects = 'subjects.csv'
student = Student('Иванов', 'Иван', 'Иванович', subjects)
student.add_score('Математика', 5)
student.add_score('Математика', 4)
student.add_test_result('Математика', 85)
student.add_test_result('Математика', 90)
student.add_score('Физика', 3)
student.add_score('Физика', 4)
student.add_test_result('Физика', 70)
student.add_test_result('Физика', 80)
print("Средний балл по тестам для каждого предмета:", student.average_test_score_per_subject())
print("Средний балл по оценкам всех предметов:", student.average_score_all_subjects())
|
nadia3373/GeekBrains-Python-Developer
|
Diving into Python/s12/homework.py
|
homework.py
|
py
| 4,274
|
python
|
ru
|
code
| 1
|
github-code
|
6
|
17203277817
|
#Pasos a seguir:
#1) Installar el repostorio de proyecto EELabs, con .env y credentials.json en \resources_folder\google, y el entorno conda
#2) Pegar este script en dentro del repositorio \
#3) Ejecutar desde su ubicacion
#IMPORTANTE no permite realizar actualizaciones de fecha, está pensado para una descarga única no para realizar descargas actualizadas
import select
from utils.devices_api.eelabs_devices_api import EELabsDevicesApi
from utils.devices.tess import TESS
from utils.devices.skyglow import SkyGlow
from utils.devices.sqm import SQM
from utils.devices.astmon import ASTMON
import pandas as pd
import numpy as np
from datetime import datetime
from utils.my_utils import Utils
import config as conf #Configuration variables
from utils.filter import Filter
from datetime import date
import argparse
import os
#Input of the script
parser = argparse.ArgumentParser()
parser.add_argument('--f', '--from', type=int, help='Year from')
parser.add_argument('--to', type=int, help='Year to')
parser.add_argument('--out','--output', required=True, type=str, help='Output filename')
parser.add_argument('--filter', type=str, help='Data filtering: sun, moon, clouds, galaxy, zodiacal, sigma. Format example: [sun,galaxy,zodaical] Write all without brackets for all filters. Format example: all')
parser.add_argument('--device',type=str,help='Format exalmple: [LPL1_001,LPL2_033,stars1] rite all without brackets for ones device. Format example: stars1')
parser.add_argument('--ephemeris',type=bool,help='True for ephemeris included')
args = parser.parse_args()
initial_year = args.f
final_year = args.to
output = args.out
filter = args.filter
ephemerides = args.ephemeris
if filter:
if filter[0]=='[':
filter=filter[1:-1].split(',')
else:
filter=filter
else:
filter=[]
select_devices=args.device
if select_devices:
if select_devices[0]=='[':
select_devices=set(select_devices[1:-1].split(','))
else:
select_devices=set([select_devices])
#Create the save folder
output=output+'\Photometer_data'
if not os.path.exists(output):
os.mkdir(output)
#Photometers dataset
devices=EELabsDevicesApi().get_all_devices_info()
devices=pd.DataFrame(devices)
devices=devices.drop(['sg_type','lpl','zero_point','filters','mov_sta_position','local_timezone','location','info_img','info_tess','place','tester','info_org','__v','latitude','longitude','country','city'],axis=1)
localizacion=pd.DataFrame(list(devices['info_location'])).drop(['latitude_hide','longitude_hide'],axis=1)
devices['place']=localizacion['place']
devices['town']=localizacion['town']
devices['sub_region']=localizacion['sub_region']
devices['region']=localizacion['region']
devices['country']=localizacion['country']
devices['latitude']=localizacion['latitude']
devices['longitude']=localizacion['longitude']
devices['elevation']=localizacion['elevation']
devices=devices.drop(['info_location'],axis=1)
devices.to_csv(output+'\All_devices.csv', index = False)
#Folder to save records
if not os.path.exists(output+'\Records'):
os.mkdir(output+'\Records')
#Obtain the device class regardless of its type
def Device(device_name):
devices=pd.read_csv(output+'\All_devices.csv')
type=devices[devices['name']==device_name]['TYPE'].values[0]
if type==TESS.TYPE:
device_obj=TESS(name=device_name)
elif type==SkyGlow.TYPE:
device_obj=SkyGlow(name=device_name)
elif type==SQM.TYPE:
device_obj=SQM(name=device_name)
elif type==ASTMON.TYPE:
device_obj=ASTMON(name=device_name)
return device_obj
#Obtain filtered data for a device and year
def Data(device_name,year,filter): #filter: data vector such as ['sun', 'moon'] for example.
device_obj=Device(device_name)
FIRST_DATE=pd.Timestamp(datetime(year, 1, 1, 0, 0), tz='UTC')
LAST_DATE=pd.Timestamp(datetime(year+1, 1, 1, 0, 0), tz='UTC')
df_all=None
try:
df_all = device_obj.get_all_data(date_from=FIRST_DATE, date_to=LAST_DATE,force=False)
No_data=False
except:
df_all=None
No_data=True
if No_data:
print('The device '+device_name+' not responded due to an error')
df_all=df_all[(df_all['mag']>conf.MAG_MIN) & (df_all['mag']<conf.MAG_MAX)] #Filter for extreme magnitudes
if __name__ == '__main__':
df_all = Utils().add_ephems(df_all, device_obj.getObserver(), parallelize=False) # The parallelize option is causing issues
V=[]
if 'sun' in filter or filter=='all':
df_all = Filter().filter_sun(df_all, max_sun_alt=conf.SUN_ALT_MAX)
else:
df_filter=Filter().filter_sun(df_all, max_sun_alt=conf.SUN_ALT_MAX)
F=np.array([True]*(df_all.index[-1]+1)) #Vector with all True for all indices
F[df_filter.index]=False #Replace remaining indices to False after filtering
df_all['sun']=F[df_all.index] #Retrieve data according to the original index
V=V+['sun']
if 'moon' in filter or filter=='all':
df_all = Filter().filter_moon(df_all, max_moon_alt=conf.MOON_ALT_MAX)
else:
df_filter=Filter().filter_moon(df_all, max_moon_alt=conf.MOON_ALT_MAX)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['moon']=F[df_all.index]
V=V+['moon']
if 'clouds' in filter or filter=='all':
clouds_threshold=conf.CLOUD_STD_FREQ
df_all = Filter().filter_column(df_all, device_obj.getMagSTDColname(), max=clouds_threshold)
else:
clouds_threshold=conf.CLOUD_STD_FREQ
df_filter=Filter().filter_column(df_all, device_obj.getMagSTDColname(), max=clouds_threshold)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['clouds']=F[df_all.index]
V=V+['clouds']
if 'galaxy' in filter or filter=='all':
df_all = Filter().filter_galactic_abs_lat(df_all, min_lat=conf.GALACTIC_LAT_MIN, max_lat=180)
else:
df_filter=Filter().filter_galactic_abs_lat(df_all, min_lat=conf.GALACTIC_LAT_MIN, max_lat=180)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['galaxy']=F[df_all.index]
V=V+['galaxy']
if 'zodiacal' in filter or filter=='all':
df_all = Filter().filter_column(df_all, col_name='ecliptic_f', max=conf.ECLIPTIC_F_MAX)
else:
df_filter=Filter().filter_column(df_all, col_name='ecliptic_f', max=conf.ECLIPTIC_F_MAX)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['zodiacal']=F[df_all.index]
V=V+['zodiacal']
if 'sigma' in filter or filter=='all':
sigma=conf.NSIGMA
df_all = Filter().filter_nsigma(df_all, col_name='mag', sigma=sigma)
else:
sigma=conf.NSIGMA
df_filter=Filter().filter_nsigma(df_all, col_name='mag', sigma=sigma)
F=np.array([True]*(df_all.index[-1]+1))
F[df_filter.index]=False
df_all['sigma']=F[df_all.index]
V=V+['sigma']
if ephemerides:
df=pd.DataFrame({'time':df_all['time'],'mag':df_all['mag'],'name':device_name,'moon_phase':df_all['moon_phase'],'moon_alt':df_all['moon_alt'],'galactic_lat':df_all['galactic_lat'],'galactic_lon':df_all['galactic_lon'],'helioecliptic_lon_abs':df_all['helioecliptic_lon_abs'],'ecliptic_lat_abs':df_all['ecliptic_lat_abs']})
else:
df=pd.DataFrame({'time':df_all['time'],'mag':df_all['mag'],'name':device_name})
for ii in V:
df[ii]=df_all[ii]
return df
#Obtain all data between two years
def Data_download(V,initial_year=None,final_year=None,iterate=True): #Iterate to prompt for enter key per iteration
#Downloaded devices
Downloaded_devices=set()
for j in range(0,1000):
try:
df_records=pd.read_csv(output+'\Records\Records_'+str(j)+'.csv')
Downloaded_devices=Downloaded_devices|set(df_records['name'])
except:
Downloaded_devices=Downloaded_devices
#Log devices
try:
df_log=pd.read_csv(output+'\Log.csv')
Log_devices=set(df_log['Devices'])
Log_exists=True
except:
Log_devices=set()
Log_exists=False
diff=Downloaded_devices-Log_devices
Downloaded_devices=Downloaded_devices|Log_devices
print(Downloaded_devices)
df_all_devices=pd.read_csv(output+'\All_devices.csv')
All_devices=set(df_all_devices['name'])
if select_devices:
Missing_devices=select_devices-Downloaded_devices
else:
Missing_devices=All_devices-Downloaded_devices #To know which devices need to be download
n_missing_devices=len(Missing_devices)
if initial_year:
i_year=initial_year
else:
i_year=2010
if final_year:
f_year=final_year
else:
f_year=date.today().year
Downloaded_missing_devices=[]
v_empty=[]
v_time=[]
#Loop where it goes device by device and then year by year
for i in Missing_devices:
df=pd.DataFrame()
empty=True
for ii in range(i_year,f_year+1):
try:
dat=Data(i,ii,V)
df=pd.concat([df,dat])
if list(dat.values)!=[]:
empty=False
except:
df=df
print('Year: '+str(ii))
#Save
#Saving with files limited to 1 GB
try:
df_records=pd.read_csv(output+'\Records\Records_1.csv')
Records_exist=True
except:
df_final=df
df_final.to_csv(output+'\Records\Records_1.csv', index = False)
Records_exist=False
if Records_exist==True:
counter=0
for j in range(1,1000):
try:
df_records=pd.read_csv(output+'\Records\Records_'+str(j)+'.csv')
if os.stat(output+'\Records\Records_'+str(j)+'.csv').st_size<1000000000:
df_final=pd.concat([df_records,df])
df_final.to_csv(output+'\Records\Records_'+str(j)+'.csv', index = False)
counter=1
except:
if counter==0:
df_final=df
df_final.to_csv(output+'\Records\Records_'+str(j)+'.csv', index = False)
counter=1
time=datetime.now()
v_empty=v_empty+[empty]
v_time=v_time+[time]
Downloaded_missing_devices=Downloaded_missing_devices+[i]
Log_downloaded_devices=pd.DataFrame({'Devices':Downloaded_missing_devices,'Time':v_time,'Empty':v_empty})
Log_downloaded_devices_2=pd.DataFrame({'Devices':list(diff),'Time':None,'Empty':False})
Log=pd.concat([Log_downloaded_devices_2,Log_downloaded_devices])
#Save log
if Log_exists:
Log_2=pd.concat([df_log,Log])
else:
Log_2=Log
Log_2.to_csv(output+'\Log.csv', index = False)
n_no_downloaded_missing_devices=n_missing_devices-len(Downloaded_missing_devices)
print(str(n_no_downloaded_missing_devices)+' are still pending for download')
if iterate:
if input('Downloaded device:'+i+'\n')=='exit':
break
else:
print('Downloaded device:'+i+'\n')
#Run
Data_download(filter,initial_year,final_year,iterate=False)
|
mt4sd/EELabs_paper
|
Download_data/Photometer_data/Download_EELabs_photometers.py
|
Download_EELabs_photometers.py
|
py
| 11,585
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26306025238
|
#!/usr/bin/python3
"""Use reddit api to get info about subredit subscribers"""
def number_of_subscribers(subreddit):
"""Return number of subscribers in subreddit given as argument"""
import requests
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
headers = {'user-agent': 'andy'}
res = requests.get(url, headers=headers, allow_redirects=False)
if res.status_code != 200:
return (0)
else:
return (res.json()['data']['subscribers'])
|
AndyMSP/holbertonschool-system_engineering-devops
|
0x16-api_advanced/0-subs.py
|
0-subs.py
|
py
| 496
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21837055614
|
"""Parsing url to check its SEO and availability"""
from datetime import date
from bs4 import BeautifulSoup
def get_page_data(response):
"""Check SEO functionality of url"""
result = {'status_code': response.status_code}
page = BeautifulSoup(response.text, 'html.parser')
result['h1'] = page.h1.get_text() if page.h1 else ''
result['title'] = page.title.get_text() if page.title else ''
result['description'] = page.find(
'meta', {'name': 'description'}
).get('content') if page.find('meta', {'name': 'description'}) else ''
result['created_at'] = date.today()
return result
|
GunGalla/python-project-83
|
page_analyzer/parse_url.py
|
parse_url.py
|
py
| 628
|
python
|
en
|
code
| 0
|
github-code
|
6
|
655866557
|
from __future__ import annotations
import contextlib
import inspect
import os
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
import torch.cuda.amp as amp
from tqdm import tqdm
from .tensorboard_logger import TensorboardLogger
from .wandb_logger import WandbLogger
from ..util import auto_compile, get_constructor_arguments, is_compiled
class DefaultTrainer:
"""Trainer class for 2d/3d training on a single GPU."""
def __init__(
self,
name: Optional[str],
train_loader: torch.utils.data.DataLoader,
val_loader: torch.utils.data.DataLoader,
model: torch.nn.Module,
loss,
optimizer,
metric,
device: Union[str, torch.device],
lr_scheduler=None,
log_image_interval=100,
mixed_precision=True,
early_stopping=None,
logger=TensorboardLogger,
logger_kwargs: Optional[Dict[str, Any]] = None,
id_: Optional[str] = None,
save_root: Optional[str] = None,
compile_model: Optional[Union[bool, str]] = None,
):
if name is None and not issubclass(logger, WandbLogger):
raise TypeError("Name cannot be None if not using the WandbLogger")
if not all(hasattr(loader, "shuffle") for loader in [train_loader, val_loader]):
raise ValueError(f"{self.__class__} requires each dataloader to have 'shuffle' attribute.")
self._generate_name = name is None
self.name = name
self.id_ = id_ or name
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.loss = loss
self.optimizer = optimizer
self.metric = metric
self.device = device
self.lr_scheduler = lr_scheduler
self.log_image_interval = log_image_interval
self.save_root = save_root
self.compile_model = compile_model
self._iteration = 0
self._epoch = 0
self._best_epoch = 0
self.mixed_precision = mixed_precision
self.early_stopping = early_stopping
self.scaler = amp.GradScaler() if mixed_precision else None
self.logger_class = logger
self.logger_kwargs = logger_kwargs
self.log_image_interval = log_image_interval
@property # because the logger may generate and set trainer.id on logger.__init__
def checkpoint_folder(self):
assert self.id_ is not None
# save_root enables saving the checkpoints somewhere else than in the local
# folder. This is handy for filesystems with limited space, where saving the checkpoints
# and log files can easily lead to running out of space.
save_root = getattr(self, "save_root", None)
return os.path.join("./checkpoints", self.id_) if save_root is None else\
os.path.join(save_root, "./checkpoints", self.id_)
@property
def iteration(self):
return self._iteration
@property
def epoch(self):
return self._epoch
class Deserializer:
"""Determines how to deserialize the trainer kwargs from serialized 'init_data'
Examples:
To extend the initialization process you can inherite from this Deserializer in an inherited Trainer class.
Note that `DefaultTrainer.Deserializer.load_generic()` covers most cases already.
This example adds `the_answer` kwarg, which requires 'calculations' upon initialization:
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> self.the_answer = the_answer # this allows the default Serializer to save the new kwarg,
>>> # see DefaultTrainer.Serializer
>>>
>>> class Deserializer(DefaultTrainer.Deserializer):
>>> def load_the_answer(self):
>>> generic_answer = self.init_data["the_answer"]
>>> # (device dependent) special deserialization
>>> if self.trainer_kwargs["device"].type == "cpu": # accessing previously deserialized kwarg
>>> self.trainer_kwargs["the_answer"] = generic_answer + 1
>>> else:
>>> self.trainer_kwargs["the_answer"] = generic_answer * 2
"""
def __init__(self, init_data: dict, save_path: str, device: Union[str, torch.device]):
self.init_data = init_data
self.save_path = save_path
# populate with deserialized trainer kwargs during deserialization; possibly overwrite 'device'
self.trainer_kwargs: Dict[str, Any] = dict(
device=torch.device(self.init_data["device"]) if device is None else torch.device(device)
)
def load(self, kwarg_name: str, optional):
"""`optional` is True if self.trainer.__class__.__init__ specifies a default value for 'kwarg_name'"""
if kwarg_name == "device":
pass # deserialized in __init__
elif kwarg_name.endswith("_loader"):
self.load_data_loader(kwarg_name, optional)
else:
load = getattr(self, f"load_{kwarg_name}", self.load_generic)
load(kwarg_name, optional=optional)
def load_data_loader(self, loader_name, optional) -> None:
ds = self.init_data.get(loader_name.replace("_loader", "_dataset"))
if ds is None and optional:
return
loader_kwargs = self.init_data[f"{loader_name}_kwargs"]
loader = torch.utils.data.DataLoader(ds, **loader_kwargs)
# monkey patch shuffle loader_name to the loader
loader.shuffle = loader_kwargs.get("shuffle", False)
self.trainer_kwargs[loader_name] = loader
def load_generic(
self,
kwarg_name: str,
*dynamic_args,
optional: bool,
only_class: bool = False,
dynamic_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
if kwarg_name in self.init_data:
self.trainer_kwargs[kwarg_name] = self.init_data[kwarg_name]
return
this_cls = self.init_data.get(f"{kwarg_name}_class", None)
if this_cls is None:
if optional:
return
else:
raise RuntimeError(f"Could not find init data for {kwarg_name} in {self.save_path}")
assert isinstance(this_cls, str), this_cls
assert "." in this_cls, this_cls
cls_p, cls_m = this_cls.rsplit(".", 1)
this_cls = getattr(import_module(cls_p), cls_m)
if only_class:
self.trainer_kwargs[kwarg_name] = this_cls
else:
self.trainer_kwargs[kwarg_name] = this_cls(
*dynamic_args, **self.init_data.get(f"{kwarg_name}_kwargs", {}), **(dynamic_kwargs or {})
)
def load_name(self, kwarg_name: str, optional: bool):
self.trainer_kwargs[kwarg_name] = os.path.split(os.path.dirname(self.save_path))[1]
def load_optimizer(self, kwarg_name: str, optional: bool):
self.load_generic(kwarg_name, self.trainer_kwargs["model"].parameters(), optional=optional)
def load_lr_scheduler(self, kwarg_name: str, optional: bool):
self.load_generic(kwarg_name, self.trainer_kwargs["optimizer"], optional=optional)
# todo: remove and rename kwarg 'logger' to 'logger_class'
def load_logger(self, kwarg_name: str, optional: bool):
assert kwarg_name == "logger"
self.load_generic("logger", optional=optional, only_class=True)
@staticmethod
def _get_save_dict(save_path, device):
if not os.path.exists(save_path):
raise ValueError(f"Cannot find checkpoint {save_path}")
return torch.load(save_path, map_location=device)
@classmethod
def from_checkpoint(cls, checkpoint_folder, name="best", device=None):
save_path = os.path.join(checkpoint_folder, f"{name}.pt")
# make sure the correct device is set if we don't have access to CUDA
if not torch.cuda.is_available():
device = "cpu"
save_dict = cls._get_save_dict(save_path, device)
deserializer = cls.Deserializer(save_dict["init"], save_path, device)
has_kwargs = False
deserialized = []
for name, parameter in inspect.signature(cls).parameters.items():
if name == "kwargs":
has_kwargs = True
continue
deserializer.load(name, optional=parameter.default is not inspect.Parameter.empty)
deserialized.append(name)
# to deserialze kwargs we can't rely on inspecting the signature, so we
# go through the remaning kwarg names in init data instead
if has_kwargs:
kwarg_names = list(set(deserializer.init_data.keys()) - set(deserialized))
for name in kwarg_names:
if name.endswith("_kwargs"):
continue
elif name.endswith("_dataset"):
deserializer.load(name.replace("dataset", "loader"), optional=False)
elif name.endswith("_class"):
deserializer.load(name.replace("_class", ""), optional=False)
else:
deserializer.load(name, optional=False)
trainer = cls(**deserializer.trainer_kwargs)
trainer._initialize(0, save_dict)
trainer._is_initialized = True
return trainer
class Serializer:
"""Implements how to serialize trainer kwargs from a trainer instance
Examples:
To extend the serialization process you can inherite from this Serializer in a derived Trainer class.
Note that the methods `dump_generic_builtin()`, `dump_generic_class()` and `dump_generic_instance()`
called by the `dump()` method when appropriate cover most cases already.
This example adds `the_answer` kwarg, which requires extra steps on dumping only because we don't keep a
'the_answer' attribute:
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> # self.the_answer = the_answer # this would allow the default Serializer to save the new kwarg,
>>> # but let's make things more interesting...
>>> self.the = the_answer // 10
>>> self.answer = the_answer % 10
>>>
>>> class Serializer(DefaultTrainer.Serializer):
>>> trainer: MyTrainer
>>> def dump_the_answer(self, kwarg_name: str) -> None: # custom dump method for 'the_answer' kwarg
>>> assert kwarg_name == "the_answer"
>>> # populate self.init_data with the serialized data required by Deserializer
>>> # to restore the trainer kwargs
>>> self.init_data["the_answer"] = self.trainer.the * 10 + self.trainer.answer
This example with both Serializer and Deserializer adds `the_answer` kwarg,
while saving it in two separate entries 'the' and 'answer'
>>> class MyTrainer(DefaultTrainer):
>>> def __init__(self, *args, the_answer: int, **kwargs):
>>> super().__init__(*args, **kwargs)
>>> self.the_answer = the_answer
>>>
>>> class Serializer(DefaultTrainer.Serializer):
>>> trainer: MyTrainer
>>> def dump_the_answer(self, kwarg_name: str):
>>> assert kwarg_name == "the_answer"
>>> self.init_data.update({
>>> "the": self.trainer.the_answer // 10,
>>> "answer": self.trainer.the_answer % 10
>>> })
>>>
>>> class Deserializer(DefaultTrainer.Deserializer):
>>> def load_the_answer(self, kwarg_name: str, optional: bool):
>>> assert kwarg_name == "the_answer"
>>> # 'optional' is True if MyTrainer.__init__ specifies a default value for 'kwarg_name'
>>> self.trainer_kwargs[kwarg_name] = self.init_data["the"] * 10 + self.init_data["answer"]
"""
def __init__(self, trainer: DefaultTrainer):
self.trainer = trainer
self.init_data = {} # to be populated during serialization process
def dump(self, kwarg_name: str) -> None:
dumper = getattr(self, f"dump_{kwarg_name}", None)
if dumper is not None:
dumper(kwarg_name)
elif kwarg_name.endswith("_loader"):
self.dump_data_loader(kwarg_name)
elif kwarg_name.endswith("_class"):
self.dump_generic_class(kwarg_name)
elif not hasattr(self.trainer, kwarg_name):
raise AttributeError(
f"{self.trainer.__class__} missing attribute '{kwarg_name}' "
f"or special dump method {self.trainer.__class__}.Serializer.dump_{kwarg_name}()"
)
else:
assert hasattr(self.trainer, kwarg_name)
obj = getattr(self.trainer, kwarg_name)
if obj is None or type(obj) in (
bool,
bytearray,
bytes,
dict,
float,
frozenset,
int,
list,
set,
str,
tuple,
):
self.dump_generic_builtin(kwarg_name)
else:
self.dump_generic_instance(kwarg_name)
def dump_generic_builtin(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = getattr(self.trainer, kwarg_name)
def dump_generic_class(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
assert kwarg_name.endswith("_class")
obj = getattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = None if obj is None else f"{obj.__module__}.{obj.__name__}"
def dump_generic_instance(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
instance = getattr(self.trainer, kwarg_name)
self.init_data.update(
{
f"{kwarg_name}_class": f"{instance.__class__.__module__}.{instance.__class__.__name__}",
f"{kwarg_name}_kwargs": get_constructor_arguments(instance),
}
)
def dump_device(self, kwarg_name: str):
assert hasattr(self.trainer, kwarg_name)
self.init_data[kwarg_name] = str(getattr(self.trainer, kwarg_name))
def dump_data_loader(self, kwarg_name: str) -> None:
assert hasattr(self.trainer, kwarg_name)
loader = getattr(self.trainer, kwarg_name)
if loader is None:
return
self.init_data.update(
{
f"{kwarg_name.replace('_loader', '_dataset')}": loader.dataset,
f"{kwarg_name}_kwargs": get_constructor_arguments(loader),
}
)
def dump_logger(self, kwarg_name: str): # todo: remove and rename kwarg 'logger' to 'logger_class'
self.dump_generic_class(f"{kwarg_name}_class")
def dump_model(self, kwarg_name: str):
if is_compiled(self.trainer.model):
self.init_data.update(
{
"model_class": self.trainer._model_class,
"model_kwargs": self.trainer._model_kwargs,
}
)
else:
self.dump_generic_instance("model")
def _build_init(self) -> Dict[str, Any]:
serializer = self.Serializer(self)
for name in inspect.signature(self.__class__).parameters:
# special rules to serialize kwargs
# if a trainer class inherits from DefaultTrainer and has **kwargs
# they need to be saved in self._kwargs
if name == "kwargs":
if not hasattr(self, "_kwargs"):
msg = "The trainer class has **kwargs in its signature, but is missing the _kwargs attribute. " +\
"Please add self._kwargs to its __init__ function"
raise RuntimeError(msg)
kwargs = getattr(self, "_kwargs")
for kwarg_name in kwargs:
serializer.dump(kwarg_name)
continue
serializer.dump(name)
return serializer.init_data
def _initialize(self, iterations, load_from_checkpoint, epochs=None):
assert self.train_loader is not None
assert self.val_loader is not None
assert self.model is not None
assert self.loss is not None
assert self.optimizer is not None
assert self.metric is not None
assert self.device is not None
if load_from_checkpoint is not None:
self.load_checkpoint(load_from_checkpoint)
if sum((iterations is not None, epochs is not None)) != 1:
raise ValueError(
"Exactly one of 'iterations' or 'epochs' has to be specified to initialize the trainer."
f"You have passed 'iterations'={iterations} and 'epochs'={epochs}"
)
if epochs is None:
epochs = int(np.ceil(float(iterations) / len(self.train_loader)))
else:
iterations = epochs * len(self.train_loader)
self.max_iteration = self._iteration + iterations
self.max_epoch = self._epoch + epochs
if not getattr(self, "_is_initialized", False):
# check if we compile the model (only supported by pytorch 2)
# to enable (de)serialization of compiled models, we keep track of the model class and kwargs
if is_compiled(self.model):
warnings.warn(
"You have passed a compiled model to the trainer."
"It will not be possible to (de)serialize the trainer with it."
"If you want to be able to do this please pass the normal model."
"It can be automatically compiled by setting 'compile_model' to True"
)
self._model_class = f"{self.model.__class__.__module__}.{self.model.__class__.__name__}"
self._model_kwargs = get_constructor_arguments(self.model)
self.model = auto_compile(self.model, self.compile_model)
self.model.to(self.device)
self.loss.to(self.device)
# this saves all the information that is necessary
# to fully load the trainer from the checkpoint
self.init_data = self._build_init()
if self.logger_class is None:
self.logger = None
else:
# may set self.name if self.name is None
save_root = getattr(self, "save_root", None)
self.logger = self.logger_class(self, save_root, **(self.logger_kwargs or {}))
try:
os.makedirs(self.checkpoint_folder, exist_ok=True)
except PermissionError:
warnings.warn(
f"The checkpoint folder at {self.checkpoint_folder} could not be created."
"The most likely reason for this is that you copied the checkpoint somewhere else,"
"so we skip this error to enable loading the model from this checkpoint."
)
pass
best_metric = np.inf
return best_metric
def save_checkpoint(self, name, best_metric, **extra_save_dict):
save_path = os.path.join(self.checkpoint_folder, f"{name}.pt")
extra_init_dict = extra_save_dict.pop("init", {})
save_dict = {
"iteration": self._iteration,
"epoch": self._epoch,
"best_epoch": self._best_epoch,
"best_metric": best_metric,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"init": self.init_data | extra_init_dict,
}
save_dict.update(**extra_save_dict)
if self.scaler is not None:
save_dict.update({"scaler_state": self.scaler.state_dict()})
if self.lr_scheduler is not None:
save_dict.update({"scheduler_state": self.lr_scheduler.state_dict()})
torch.save(save_dict, save_path)
def load_checkpoint(self, checkpoint="best"):
if isinstance(checkpoint, str):
save_path = os.path.join(self.checkpoint_folder, f"{checkpoint}.pt")
if not os.path.exists(save_path):
warnings.warn(f"Cannot load checkpoint. {save_path} does not exist.")
return
save_dict = torch.load(save_path)
elif isinstance(checkpoint, dict):
save_dict = checkpoint
else:
raise RuntimeError
self._iteration = save_dict["iteration"]
self._epoch = save_dict["epoch"]
self._best_epoch = save_dict["best_epoch"]
self.best_metric = save_dict["best_metric"]
model_state = save_dict["model_state"]
# to enable loading compiled models
compiled_prefix = "_orig_mod."
model_state = OrderedDict(
[(k[len(compiled_prefix):] if k.startswith(compiled_prefix) else k, v) for k, v in model_state.items()]
)
self.model.load_state_dict(model_state)
# we need to send the network to the device before loading the optimizer state!
self.model.to(self.device)
self.optimizer.load_state_dict(save_dict["optimizer_state"])
if self.scaler is not None:
self.scaler.load_state_dict(save_dict["scaler_state"])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(save_dict["scheduler_state"])
return save_dict
def fit(self, iterations=None, load_from_checkpoint=None, epochs=None, save_every_kth_epoch=None):
"""Run neural network training.
Exactly one of 'iterations' or 'epochs' has to be passed.
Parameters:
iterations [int] - how long to train, specified in iterations (default: None)
load_from_checkpoint [str] - path to a checkpoint from where training should be continued (default: None)
epochs [int] - how long to train, specified in epochs (default: None)
save_every_kth_epoch [int] - save checkpoints after every kth epoch separately.
The corresponding checkpoints will be saved with the naming scheme 'epoch-{epoch}.pt'. (default: None)
"""
best_metric = self._initialize(iterations, load_from_checkpoint, epochs)
print(
"Start fitting for",
self.max_iteration - self._iteration,
"iterations / ",
self.max_epoch - self._epoch,
"epochs",
)
print("with", len(self.train_loader), "iterations per epoch")
if self.mixed_precision:
train_epoch = self._train_epoch_mixed
validate = self._validate_mixed
print("Training with mixed precision")
else:
train_epoch = self._train_epoch
validate = self._validate
print("Training with single precision")
progress = tqdm(
total=epochs * len(self.train_loader) if iterations is None else iterations,
desc=f"Epoch {self._epoch}", leave=True
)
msg = "Epoch %i: average [s/it]: %f, current metric: %f, best metric: %f"
train_epochs = self.max_epoch - self._epoch
for _ in range(train_epochs):
# run training and validation for this epoch
t_per_iter = train_epoch(progress)
current_metric = validate()
# perform all the post-epoch steps:
# apply the learning rate scheduler
if self.lr_scheduler is not None:
self.lr_scheduler.step(current_metric)
# save this checkpoint as the new best checkpoint if
# it has the best overall validation metric
if current_metric < best_metric:
best_metric = current_metric
self._best_epoch = self._epoch
self.save_checkpoint("best", best_metric)
# save this checkpoint as the latest checkpoint
self.save_checkpoint("latest", best_metric)
# if we save after every k-th epoch then check if we need to save now
if save_every_kth_epoch is not None and (self._epoch + 1) % save_every_kth_epoch == 0:
self.save_checkpoint(f"epoch-{self._epoch + 1}", best_metric)
# if early stopping has been specified then check if the stopping condition is met
if self.early_stopping is not None:
epochs_since_best = self._epoch - self._best_epoch
if epochs_since_best > self.early_stopping:
print("Stopping training because there has been no improvement for", self.early_stopping, "epochs")
break
self._epoch += 1
progress.set_description(msg % (self._epoch, t_per_iter, current_metric, best_metric), refresh=True)
print(f"Finished training after {self._epoch} epochs / {self._iteration} iterations.")
print(f"The best epoch is number {self._best_epoch}.")
if self._generate_name:
self.name = None
# TODO save the model to wandb if we have the wandb logger
if isinstance(self.logger, WandbLogger):
self.logger.get_wandb().finish()
def _backprop(self, loss):
loss.backward()
self.optimizer.step()
def _backprop_mixed(self, loss):
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
def _train_epoch(self, progress):
return self._train_epoch_impl(progress, contextlib.nullcontext, self._backprop)
def _train_epoch_mixed(self, progress):
return self._train_epoch_impl(progress, amp.autocast, self._backprop_mixed)
def _forward_and_loss(self, x, y):
pred = self.model(x)
if self._iteration % self.log_image_interval == 0:
if pred.requires_grad:
pred.retain_grad()
loss = self.loss(pred, y)
return pred, loss
def _train_epoch_impl(self, progress, forward_context, backprop: Callable[[torch.Tensor], None]):
self.model.train()
n_iter = 0
t_per_iter = time.time()
for x, y in self.train_loader:
x, y = x.to(self.device), y.to(self.device)
self.optimizer.zero_grad()
with forward_context():
pred, loss = self._forward_and_loss(x, y)
backprop(loss)
lr = [pm["lr"] for pm in self.optimizer.param_groups][0]
if self.logger is not None:
self.logger.log_train(self._iteration, loss, lr, x, y, pred, log_gradients=True)
self._iteration += 1
n_iter += 1
if self._iteration >= self.max_iteration:
break
progress.update(1)
t_per_iter = (time.time() - t_per_iter) / n_iter
return t_per_iter
def _validate(self):
return self._validate_impl(contextlib.nullcontext)
def _validate_mixed(self):
return self._validate_impl(amp.autocast)
def _validate_impl(self, forward_context):
self.model.eval()
metric_val = 0.0
loss_val = 0.0
with torch.no_grad():
for x, y in self.val_loader:
x, y = x.to(self.device), y.to(self.device)
with forward_context():
pred, loss = self._forward_and_loss(x, y)
metric = self.metric(pred, y)
loss_val += loss.item()
metric_val += metric.item()
metric_val /= len(self.val_loader)
loss_val /= len(self.val_loader)
if self.logger is not None:
self.logger.log_validation(self._iteration, metric_val, loss_val, x, y, pred)
return metric_val
|
constantinpape/torch-em
|
torch_em/trainer/default_trainer.py
|
default_trainer.py
|
py
| 29,151
|
python
|
en
|
code
| 42
|
github-code
|
6
|
14095456258
|
# Aditya Halder // @AdityaHalder
import os
import aiofiles
import aiohttp
import ffmpeg
import requests
from os import path
from asyncio.queues import QueueEmpty
from typing import Callable
from pyrogram import Client, filters
from pyrogram.types import Message, Voice, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram.errors import UserAlreadyParticipant
from modules.cache.admins import set
from modules.clientbot import clientbot, queues
from modules.clientbot.clientbot import client as USER
from modules.helpers.admins import get_administrators
from youtube_search import YoutubeSearch
from modules import converter
from modules.downloaders import youtube
from modules.config import que, SUDO_USERS
from modules.cache.admins import admins as a
from modules.helpers.command import commandpro
from modules.helpers.filters import command, other_filters
from modules.helpers.decorators import errors, sudo_users_only
from modules.helpers.errors import DurationLimitError
from modules.helpers.gets import get_url, get_file_name
from pytgcalls import StreamType
from pytgcalls.types.input_stream import InputStream
from pytgcalls.types.input_stream import InputAudioStream
# plus
chat_id = None
useer = "NaN"
def transcode(filename):
ffmpeg.input(filename).output(
"input.raw", format="s16le", acodec="pcm_s16le", ac=2, ar="48k"
).overwrite_output().run()
os.remove(filename)
# Convert seconds to mm:ss
def convert_seconds(seconds):
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%02d:%02d" % (minutes, seconds)
# Convert hh:mm:ss to seconds
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(":"))))
@Client.on_message(
commandpro(["ply"])
& filters.group
& ~filters.edited
& ~filters.forwarded
& ~filters.via_bot
)
@errors
@sudo_users_only
async def play(_, message: Message):
global que
global useer
await message.delete()
lel = await message.reply("**🔄 Ƥɤøƈɘssɩɳʛ ...**")
administrators = await get_administrators(message.chat)
chid = message.chat.id
audio = (
(message.reply_to_message.audio or message.reply_to_message.voice)
if message.reply_to_message
else None
)
url = get_url(message)
if audio:
file_name = get_file_name(audio)
title = file_name
thumb_name = "https://te.legra.ph/file/ed6920a2f0ab5af3fd55d.png"
thumbnail = thumb_name
duration = round(audio.duration / 60)
views = "Locally added"
requested_by = message.from_user.first_name
file_path = await converter.convert(
(await message.reply_to_message.download(file_name))
if not path.isfile(path.join("downloads", file_name))
else file_name
)
elif url:
try:
results = YoutubeSearch(url, max_results=1).to_dict()
# print results
title = results[0]["title"]
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
durl = url
durl = durl.replace("youtube", "youtubepp")
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
except Exception as e:
title = "NaN"
thumb_name = "https://te.legra.ph/file/ed6920a2f0ab5af3fd55d.png"
duration = "NaN"
views = "NaN"
requested_by = message.from_user.first_name
file_path = await converter.convert(youtube.download(url))
else:
if len(message.command) < 2:
return await lel.edit(
"**🤖 Wɦɑʈ 🙃 Yøʋ 💿 Wɑŋʈ ðŸ˜�\n💞 Ƭø 🔊 ƤÉÉ‘yâ�“ ...**"
)
await lel.edit("**🔎 Sɘɑɤƈɦɩɳʛ ...**")
query = message.text.split(None, 1)[1]
# print(query)
await lel.edit("**🔄 Ƥɤøƈɘssɩɳʛ ...**")
try:
results = YoutubeSearch(query, max_results=1).to_dict()
url = f"https://youtube.com{results[0]['url_suffix']}"
# print results
title = results[0]["title"]
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
durl = url
durl = durl.replace("youtube", "youtubepp")
secmul, dur, dur_arr = 1, 0, duration.split(":")
for i in range(len(dur_arr) - 1, -1, -1):
dur += int(dur_arr[i]) * secmul
secmul *= 60
except Exception as e:
await lel.edit(
"**🔊 Ɱʋsɩƈ 😕 �øʈ 📵 Føʋɳɗ��\n💞 Ƭɤy ♨� Ʌɳøʈɦɘɤ 🌷...**"
)
print(str(e))
return
requested_by = message.from_user.first_name
file_path = await converter.convert(youtube.download(url))
ACTV_CALLS = []
chat_id = message.chat.id
for x in clientbot.pytgcalls.active_calls:
ACTV_CALLS.append(int(x.chat_id))
if int(chat_id) in ACTV_CALLS:
position = await queues.put(chat_id, file=file_path)
await lel.edit("**💥 ƘɑɑÉ🤞Ʌɗɗɘɗ 💿 Søɳʛâ�—ï¸�\n🔊 Ʌʈ 💞 Ƥøsɩʈɩøɳ » `{}` 🌷 ...**".format(position),
)
else:
await clientbot.pytgcalls.join_group_call(
chat_id,
InputStream(
InputAudioStream(
file_path,
),
),
stream_type=StreamType().local_stream,
)
await lel.edit("**💥 ƘɑɑÉ🤞MÊ‹sɩƈ 🎸 Nøω 💞\n🔊 ƤÉÉ‘yɩɳʛ ðŸ˜� ØƤ 🥀 ...**".format(),
)
return await lel.delete()
@Client.on_message(commandpro(["pse"]) & other_filters)
@errors
@sudo_users_only
async def pause(_, message: Message):
await message.delete()
await clientbot.pytgcalls.pause_stream(message.chat.id)
pase = await message.reply_text("**▶� Ƥɑʋsɘɗ 🌷 ...**")
await pase.delete()
@Client.on_message(commandpro(["rsm"]) & other_filters)
@errors
@sudo_users_only
async def resume(_, message: Message):
await message.delete()
await clientbot.pytgcalls.resume_stream(message.chat.id)
rsum = await message.reply_text("**� Ʀɘsʋɱɘɗ 🌷 ...**")
await rsum.delete()
@Client.on_message(commandpro(["skp", "nxt"]) & other_filters)
@errors
@sudo_users_only
async def skip(_, message: Message):
global que
await message.delete()
ACTV_CALLS = []
chat_id = message.chat.id
for x in clientbot.pytgcalls.active_calls:
ACTV_CALLS.append(int(x.chat_id))
if int(chat_id) not in ACTV_CALLS:
novc = await message.reply_text("**💥 Æ�øʈɦɩɳʛ 🔇 ƤÉÉ‘yɩɳʛ 🌷 ...**")
await novc.delete()
else:
queues.task_done(chat_id)
if queues.is_empty(chat_id):
empt = await message.reply_text("**🥀 Empty Queue, Leaving VC ✨ ...**")
await empt.delete()
await clientbot.pytgcalls.leave_group_call(chat_id)
else:
next = await message.reply_text("**� Sƙɩƥƥɘɗ 🌷 ...**")
await next.delete()
await clientbot.pytgcalls.change_stream(
chat_id,
InputStream(
InputAudioStream(
clientbot.queues.get(chat_id)["file"],
),
),
)
@Client.on_message(commandpro(["end", "stp"]) & other_filters)
@errors
@sudo_users_only
async def stop(_, message: Message):
await message.delete()
try:
clientbot.queues.clear(message.chat.id)
except QueueEmpty:
pass
await clientbot.pytgcalls.leave_group_call(message.chat.id)
leav = await message.reply_text("**� Sʈøƥƥɘɗ 🌷 ...**")
await leav.delete()
|
ndika22/KaalMusic
|
plugins/vcbot.py
|
vcbot.py
|
py
| 8,167
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.