hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4eeba8c507dbd8f435dcd16c1fc5b07ff0fd0417 | 818 | py | Python | src/uglier/colored_logger.py | MonliH/uglier | 0922ddfab52677f959290400de578953b59e4597 | [
"MIT"
] | 1 | 2022-02-14T02:54:50.000Z | 2022-02-14T02:54:50.000Z | src/uglier/colored_logger.py | MonliH/uglier | 0922ddfab52677f959290400de578953b59e4597 | [
"MIT"
] | null | null | null | src/uglier/colored_logger.py | MonliH/uglier | 0922ddfab52677f959290400de578953b59e4597 | [
"MIT"
] | null | null | null | # Credits to https://stackoverflow.com/a/56944256/9470078
# I wanted a colored logger without dependencies, so here it is!
import logging
class ColoredFormatter(logging.Formatter):
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
blue = "\x1b[34;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
format = "%(name)s: %(levelname)s - %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: blue + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
| 30.296296 | 64 | 0.625917 | 677 | 0.827628 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.284841 |
4eec746d820c53cfe1acfbbe561209eb0112a801 | 207 | py | Python | tests/SHA256/run.py | weikengchen/Virgo | 73b924f33e18c017bafa3ed37da84a7b0d80ec5d | [
"Apache-2.0"
] | 9 | 2020-06-02T04:54:08.000Z | 2021-12-07T12:54:09.000Z | tests/SHA256/run.py | weikengchen/Virgo | 73b924f33e18c017bafa3ed37da84a7b0d80ec5d | [
"Apache-2.0"
] | 2 | 2021-01-07T18:34:10.000Z | 2021-03-22T20:29:43.000Z | tests/SHA256/run.py | weikengchen/Virgo | 73b924f33e18c017bafa3ed37da84a7b0d80ec5d | [
"Apache-2.0"
] | 5 | 2020-06-08T09:11:37.000Z | 2021-07-29T12:15:47.000Z | import os
os.system('mkdir -p LOG')
for i in range(8):
os.system('./zk_proof SHA256_64_merkle_' + str(i + 1) + '_circuit.txt SHA256_64_merkle_' + str(i + 1) + '_meta.txt LOG/SHA256_' + str(i + 1) + '.txt')
| 41.4 | 151 | 0.647343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.507246 |
4eee40c453e96ec4d8088f99f82cf3eed41c437d | 1,539 | py | Python | projects/07/src/Parser.py | danibachar/Nand2Tetris | fdce0e87a4869213f3eeb0004ba2af688ea1b1cc | [
"MIT"
] | null | null | null | projects/07/src/Parser.py | danibachar/Nand2Tetris | fdce0e87a4869213f3eeb0004ba2af688ea1b1cc | [
"MIT"
] | null | null | null | projects/07/src/Parser.py | danibachar/Nand2Tetris | fdce0e87a4869213f3eeb0004ba2af688ea1b1cc | [
"MIT"
] | null | null | null | from Lex import Lex, ARITHMETIC_COMMANDS, PUSH_OR_POP_COMMANDS
class Parser:
def __init__(self, src_file_name):
self._line_index = 0
self._line_index = 0
self._lines = []
f = open(src_file_name)
# First assesment of the Assembler
for line in f.readlines():
strip_line = line.lstrip()
# Skipping none relevant
if len(strip_line) == 0 or strip_line[0:2] == '//':
continue
#l = strip_line.replace(' ', '') # Removing whitespace
l = strip_line.replace('\n', '') # Removing new line
l = l.replace('\t', '') # Removing tabs
l = l.split('/')[0] # Removing comments
self._lines.append(l)
def current_command(self):
curr_line = self._lines[self._line_index]
return curr_line.split(" ")[0]
def advance(self):
self._line_index+=1
def has_more_command(self):
return len(self._lines) > self._line_index
def command_type(self):
command = self.current_command()
if command in PUSH_OR_POP_COMMANDS:
return Lex.C_PUSH_OR_POP
if command in ARITHMETIC_COMMANDS:
return Lex.C_ARITMETIC
raise Exception("Command Type not found in line = {}".format(curr_line))
def arg1(self):
curr_line = self._lines[self._line_index]
return curr_line.split(" ")[1]
def arg2(self):
curr_line = self._lines[self._line_index]
return curr_line.split(" ")[2]
| 29.596154 | 80 | 0.591293 | 1,474 | 0.957765 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.149448 |
4eef1320c0e0691a8298a782442f1c3ab4a42e10 | 40 | py | Python | testing/examples/import_error.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 175 | 2018-07-21T13:04:44.000Z | 2020-05-27T15:31:06.000Z | tests/helpers/examples/import_error.py | proofit404/dependencies | 204e0cfadca801d64857f24aa4c74e7939ed9af0 | [
"BSD-2-Clause"
] | 325 | 2016-05-16T11:16:11.000Z | 2022-03-04T00:45:57.000Z | testing/examples/import_error.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 18 | 2018-06-17T09:33:16.000Z | 2020-05-20T18:12:30.000Z | from astral import Vision # noqa: F401
| 20 | 39 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.3 |
4eef83ede604e40066824025d8e5da2b23f0f80b | 1,297 | py | Python | pywxwork/contact/async.py | renqiukai/pywxwork | 980283b4c89bf60f998071f30551b8802f1cdbf9 | [
"MIT"
] | 2 | 2021-05-22T11:16:47.000Z | 2021-05-27T12:42:38.000Z | pywxwork/contact/async.py | renqiukai/pywxwork | 980283b4c89bf60f998071f30551b8802f1cdbf9 | [
"MIT"
] | null | null | null | pywxwork/contact/async.py | renqiukai/pywxwork | 980283b4c89bf60f998071f30551b8802f1cdbf9 | [
"MIT"
] | null | null | null | from loguru import logger
from ..base import base
class async(base):
def __init__(self, token) -> None:
super().__init__(token)
def syncuser(self, data):
api_name = "batch/syncuser"
response = self.request(
api_name=api_name, method="post", json=data)
logger.debug(response)
return response
def replaceuser(self, data):
"""全量覆盖成员
https://open.work.weixin.qq.com/api/doc/90000/90135/90981
"""
api_name = "batch/replaceuser"
response = self.request(
api_name=api_name, method="post", json=data)
logger.debug(response)
return response
def replaceparty(self, data):
"""全量覆盖部门
https://open.work.weixin.qq.com/api/doc/90000/90135/90982
"""
api_name = "batch/replaceparty"
response = self.request(
api_name=api_name, method="post", json=data)
logger.debug(response)
return response
def getresult(self, data):
"""获取异步任务结果
https://open.work.weixin.qq.com/api/doc/90000/90135/90983
"""
api_name = "batch/getresult"
response = self.request(
api_name=api_name, method="get", params=data)
logger.debug(response)
return response
| 28.822222 | 65 | 0.59522 | 1,284 | 0.960359 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.297681 |
4eefbfc6df35a9a798314233bdb1d9a5b18867b2 | 551 | py | Python | saleor/University/migrations/0012_auto_20191220_1226.py | pandeyroshan/Saleor | 9f391d7a9d58626d3696781c169b2c8a059f1c0b | [
"CC-BY-4.0"
] | null | null | null | saleor/University/migrations/0012_auto_20191220_1226.py | pandeyroshan/Saleor | 9f391d7a9d58626d3696781c169b2c8a059f1c0b | [
"CC-BY-4.0"
] | null | null | null | saleor/University/migrations/0012_auto_20191220_1226.py | pandeyroshan/Saleor | 9f391d7a9d58626d3696781c169b2c8a059f1c0b | [
"CC-BY-4.0"
] | null | null | null | # Generated by Django 2.2 on 2019-12-20 06:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('University', '0011_auto_20191219_1913'),
]
operations = [
migrations.RemoveField(
model_name='university',
name='user',
),
migrations.RemoveField(
model_name='university',
name='userEmail',
),
migrations.RemoveField(
model_name='university',
name='userPhone',
),
]
| 21.192308 | 50 | 0.555354 | 468 | 0.849365 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.264973 |
4ef0b50ef3594f7350bca88a70048229aa579679 | 1,539 | py | Python | app/places/migrations/0001_initial.py | aykutgk/nearapp-web | 469b9878cf7434278c78a733eeac40713dcc40a4 | [
"MIT"
] | null | null | null | app/places/migrations/0001_initial.py | aykutgk/nearapp-web | 469b9878cf7434278c78a733eeac40713dcc40a4 | [
"MIT"
] | null | null | null | app/places/migrations/0001_initial.py | aykutgk/nearapp-web | 469b9878cf7434278c78a733eeac40713dcc40a4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 23:40
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name of the business or place')),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326, verbose_name='Latitude/Longitude on Map')),
('google_place_id', models.CharField(max_length=60, unique=True, verbose_name='Google place id')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True, verbose_name='Is this business or place still active?')),
('google_map_url', models.URLField(blank=True, default=None, null=True, verbose_name='Google map url')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner')),
],
),
]
| 43.971429 | 141 | 0.660169 | 1,272 | 0.826511 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.201429 |
4ef201c0a10d56d741c0b5148b73d6e3ae7ebce4 | 143 | py | Python | acmgnyr2011/b.py | AnAverageHuman/competitive | 4c4b9bdbe91fde1c52f731426f9a53bff97796e1 | [
"BSD-3-Clause"
] | null | null | null | acmgnyr2011/b.py | AnAverageHuman/competitive | 4c4b9bdbe91fde1c52f731426f9a53bff97796e1 | [
"BSD-3-Clause"
] | null | null | null | acmgnyr2011/b.py | AnAverageHuman/competitive | 4c4b9bdbe91fde1c52f731426f9a53bff97796e1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
P = int(input())
for _ in range(P):
N, n, m = [int(i) for i in input().split()]
print(f"{N} {(n - m) * m + 1}")
| 20.428571 | 47 | 0.48951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.314685 |
4ef25edc19b260d4a3119393c3b4c0a16a1f9130 | 462 | py | Python | preprocessing/importlib_read.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | preprocessing/importlib_read.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | preprocessing/importlib_read.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | #importing lib
import pandas as pd
import numpy as np
#Take data
df = pd.DataFrame({"Name":['Kunal' , 'Mohit' , 'Rohit' ] ,"age":[np.nan , 23, 45] , "sex":['M' , np.nan , 'M']})
#check for nnull value
print(df.isnull().sum())
print(df.describe())
# ignore the nan rows
print(len(df.dropna()) , df.dropna())
#for ignoring columns
print(len(df.dropna(axis=1)) , df.dropna(axis=1))
print(len(df) , df)
#All the rows are ignore
print(df.isnull().sum())
| 15.931034 | 112 | 0.634199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.335498 |
4ef2b218f2e39c030635631f9cea78b7d43ed7b8 | 2,774 | py | Python | isw2-master/src/app/ui/ajustarReloj.py | marlanbar/academic-projects | bcdc8ca36b6984ab3f83c10b8a3ed45576ecfca1 | [
"MIT"
] | null | null | null | isw2-master/src/app/ui/ajustarReloj.py | marlanbar/academic-projects | bcdc8ca36b6984ab3f83c10b8a3ed45576ecfca1 | [
"MIT"
] | null | null | null | isw2-master/src/app/ui/ajustarReloj.py | marlanbar/academic-projects | bcdc8ca36b6984ab3f83c10b8a3ed45576ecfca1 | [
"MIT"
] | null | null | null | import datetime
from .consola import Consola
from .uiscreen import UIScreen
from ..core.reloj import Reloj
class AjustarReloj(UIScreen):
def __init__(self, unMain, unUsuario):
super().__init__(unMain)
self.usuario = unUsuario
def run(self):
self.consola.prnt("")
self.consola.prnt(" Ahora: %s" % self.main.getReloj().getFechaYTiempo().strftime("%d/%m/%Y %H:%M:%S"))
self.consola.prnt("===========================================================")
self.consola.prnt("1. Ajustar fecha")
self.consola.prnt("2. Ajustar tiempo")
self.consola.prnt("3. Avanzar una cantidad de dias")
self.consola.prnt("4. Correr procesos dependientes del tiempo")
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("9. Volver a la pantalla anterior")
self.consola.prnt("")
opt = self.consola.askInput("Ingrese el número de la opcion que le interesa: ")
if opt == "1":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputDate = self.consola.askInput("Ingrese la fecha en formato DD/MM/YYYY: ")
try:
date = datetime.datetime.strptime(inputDate, "%d/%m/%Y")
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(datetime.datetime.combine(date, self.main.getReloj().getTiempo()))
except:
self.consola.clear()
self.consola.prnt("[ERROR] La fecha ingresada es inválida")
elif opt == "2":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputTime = self.consola.askInput("Ingrese el tiempo en formato HH:MM:SS: ")
try:
time = datetime.datetime.strptime(inputTime, "%H:%M:%S")
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(datetime.datetime.combine(self.main.getReloj().getFecha(), time.time()))
except:
self.consola.clear()
self.consola.prnt("[ERROR] El tiempo ingresado es inválido")
elif opt == "3":
self.consola.prnt("-----------------------------------------------------------")
self.consola.prnt("")
inputDays = self.consola.askInput("Ingrese la cantidad de dias: ")
try:
days = datetime.timedelta(days=int(inputDays))
except:
days = None
self.consola.clear()
self.consola.prnt("[ERROR] La cantidad ingresada es inválida")
if days is not None:
self.consola.clear()
self.main.getReloj().resetFechaYTiempo(self.main.getReloj().getFechaYTiempo() + days)
elif opt == "4":
self.consola.clear()
self.main.getReloj().notificar()
self.consola.prnt("[MSG] Procesos ejecutados")
elif opt == "9":
from .homeScreen import HomeScreen
self.consola.clear()
self.main.setScreen(HomeScreen(self.main, self.usuario))
else:
self.consola.clear()
| 37.486486 | 115 | 0.612112 | 2,667 | 0.960043 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.309215 |
4ef40c95ccc6546b171182631d74d1be2a580530 | 2,359 | py | Python | signin/jd.py | nujabse/simpleSignin | e818f9880d97d39beddcadccb53dc23e18fe6d8e | [
"MIT"
] | 11 | 2018-03-07T04:13:05.000Z | 2019-11-28T04:43:55.000Z | signin/jd.py | nujabse/simpleSignin | e818f9880d97d39beddcadccb53dc23e18fe6d8e | [
"MIT"
] | 2 | 2018-03-04T15:08:01.000Z | 2018-05-28T07:42:47.000Z | signin/jd.py | nujabse/simpleSignin | e818f9880d97d39beddcadccb53dc23e18fe6d8e | [
"MIT"
] | 15 | 2018-01-25T10:54:06.000Z | 2019-11-05T07:09:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import traceback
from selenium.webdriver import ChromeOptions
from signin.chrome import find_chrome_driver_path, JdSession
from signin.jd_job import jobs_all
from lib.log import logger
from lib.settings import PC_UA
from lib.settings import MOBILE_UA
class JDUser:
def __init__(self, username, password, jobs_skip=None):
self.headless = True
self.logger = logger
self.ua_pc = PC_UA
self.ua = MOBILE_UA
self.username = username
self.password = password
self.jobs_skip = jobs_skip or []
class JD:
def __init__(self, username, password):
self.user = JDUser(username, password)
self.session = self.make_session()
self.job_list = [job for job in jobs_all if job.__name__ not in self.user.jobs_skip]
def sign(self):
jobs_failed = []
for job_class in self.job_list:
job = job_class(self)
# 默认使用移动设备User-agent,否则使用PC版User-Agent
# if job.is_mobile:
# job.session.headers.update({
# 'User-Agent': self.user.ua
# })
# else:
# job.session.headers.update({
# 'User-Agent': self.user.ua_pc})
try:
job.run()
except Exception as e:
logger.error('# 任务运行出错: ' + repr(e))
traceback.print_exc()
if not job.job_success:
jobs_failed.append(job.job_name)
print('=================================')
print('= 任务数: {}; 失败数: {}'.format(len(self.job_list), len(jobs_failed)))
if jobs_failed:
print('= 失败的任务: {}'.format(jobs_failed))
else:
print('= 全部成功 ~')
print('=================================')
return len(jobs_failed) == 0
def make_session(self) -> JdSession:
chrome_path = find_chrome_driver_path()
session = JdSession(webdriver_path=str(chrome_path),
browser='chrome',
webdriver_options=ChromeOptions())
session.webdriver_options.add_argument('lang=zh_CN.UTF-8')
if self.user.headless:
session.webdriver_options.add_argument('headless')
return session
if __name__ == '__main__':
pass
| 30.24359 | 92 | 0.561679 | 2,082 | 0.857849 | 0 | 0 | 0 | 0 | 0 | 0 | 501 | 0.206428 |
4ef48cca62f034a2764603154909770a46970bca | 284 | py | Python | tests/tasks/test_aws_athena_task.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | null | null | null | tests/tasks/test_aws_athena_task.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | 4 | 2021-05-06T12:36:46.000Z | 2022-02-11T09:47:57.000Z | tests/tasks/test_aws_athena_task.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | 2 | 2021-04-21T04:48:47.000Z | 2022-01-14T04:29:17.000Z | from unittest import TestCase
from unittest.mock import Mock
from tests.test_types_generator import athena_task
class TestAwsAthenaTask(TestCase):
def test_run_task(self) -> None:
with self.assertRaises(NotImplementedError):
athena_task()._run_task(Mock())
| 25.818182 | 52 | 0.757042 | 168 | 0.591549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4ef53f10b384a2cdfc48e2d923372b8c68776225 | 1,966 | py | Python | Post-Exploitation/LaZagne/Linux/lazagne/softwares/wallet/gnome.py | FOGSEC/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | 5 | 2018-01-15T13:58:40.000Z | 2022-02-17T02:38:58.000Z | Post-Exploitation/LaZagne/Linux/lazagne/softwares/wallet/gnome.py | bhattsameer/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | null | null | null | Post-Exploitation/LaZagne/Linux/lazagne/softwares/wallet/gnome.py | bhattsameer/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | 4 | 2019-06-21T07:51:11.000Z | 2020-11-04T05:20:09.000Z | #!/usr/bin/env python
import os
from lazagne.config.write_output import print_debug
from lazagne.config.moduleInfo import ModuleInfo
class Gnome(ModuleInfo):
def __init__(self):
options = {'command': '-g', 'action': 'store_true', 'dest': 'gnomeKeyring', 'help': 'Gnome Keyring'}
ModuleInfo.__init__(self, 'gnomeKeyring', 'wallet', options)
def run(self, software_name = None):
if os.getuid() == 0:
print_debug('WARNING', 'Do not run it with root privileges\n')
return
try:
import gnomekeyring
if len(gnomekeyring.list_keyring_names_sync()) > 0:
pwdFound = []
for keyring in gnomekeyring.list_keyring_names_sync():
for id in gnomekeyring.list_item_ids_sync(keyring):
values = {}
item = gnomekeyring.item_get_info_sync(keyring, id)
attr = gnomekeyring.item_get_attributes_sync(keyring, id)
if attr:
if item.get_display_name():
values["Item"] = item.get_display_name()
if attr.has_key('server'):
values["Host"] = attr['server']
if attr.has_key('protocol'):
values["Protocol"] = attr['protocol']
if attr.has_key('unique'):
values["Unique"] = attr['unique']
if attr.has_key('domain'):
values["Domain"] = attr['domain']
if attr.has_key('origin_url'):
values["URL"] = attr['origin_url']
if attr.has_key('username_value'):
values["Login"] = attr['username_value']
if attr.has_key('user'):
values["User"] = attr['user']
if item.get_secret():
values["Password"] = item.get_secret()
# write credentials into a text file
if len(values) != 0:
pwdFound.append(values)
return pwdFound
else:
print_debug('WARNING', 'The Gnome Keyring wallet is empty')
except Exception,e:
print_debug('ERROR', 'An error occurs with the Gnome Keyring wallet: {0}'.format(e))
| 31.206349 | 102 | 0.619023 | 1,828 | 0.929807 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.256867 |
4ef55661668508edfc12cbfc3fabe43b080c3a01 | 810 | py | Python | clearsky/main.py | NathanStern/ClearSky | 979583de2c44dcd3f62b96b9b43ba6369dff8a71 | [
"MIT"
] | null | null | null | clearsky/main.py | NathanStern/ClearSky | 979583de2c44dcd3f62b96b9b43ba6369dff8a71 | [
"MIT"
] | null | null | null | clearsky/main.py | NathanStern/ClearSky | 979583de2c44dcd3f62b96b9b43ba6369dff8a71 | [
"MIT"
] | null | null | null | from . import db
from flask import Flask, current_app
from . import create_app
import os
from . import db
app = create_app()
with app.app_context():
if os.path.exists("clearsky/config.json"):
pass
else:
with open('clearsky/config.json', 'w') as configuration:
print("Opened config file")
configuration.write("""
{
"OpenWeather-url": "https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&exclude=minutely,daily,alerts&appid={}&units=imperial",
"OpenWeather-key": "",
"Radar.io-url": "https://api.radar.io/v1/geocode/forward?query={}",
"Radar.io-key": ""
}
""")
if not os.path.exists(current_app.instance_path + "/" + ('clearsky.sqlite')):
print("Initializing database for first-time use")
db.init_db()
| 28.928571 | 141 | 0.630864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.51358 |
4ef5f5eab5f2ada2e47d1bb10c80337bb76665c1 | 6,966 | py | Python | logger.py | despargy/Shade | 8ed54c80154448c2cdc29623c2798a01d594995e | [
"MIT"
] | 3 | 2019-12-28T09:04:09.000Z | 2020-04-19T15:04:49.000Z | logger.py | despargy/Shade | 8ed54c80154448c2cdc29623c2798a01d594995e | [
"MIT"
] | null | null | null | logger.py | despargy/Shade | 8ed54c80154448c2cdc29623c2798a01d594995e | [
"MIT"
] | 1 | 2022-03-25T14:10:45.000Z | 2022-03-25T14:10:45.000Z | import logging
from abc import ABC, abstractmethod
from file_read_backwards import FileReadBackwards
import threading
import os
class Logger(ABC):
def __init__(self,filename):
self.lock = threading.Lock()
self.dir = "Logs"
if(not os.path.isdir(self.dir)):
os.mkdir(self.dir)
self.file_name = "{dir}/{filename}".format(dir=self.dir,filename=filename)
last_index = self.get_last_index()
self.last_sended_index = str(last_index)
self.log_id = 0 if last_index == 1 else last_index
def set_up_logger(self,formatter,name):
"""Set formmater and name of logge
Arguments:
formatter {string} -- The message formatter
name {string} -- The name of logger
"""
self.formatter = logging.Formatter(formatter)
self.handler = logging.FileHandler(self.file_name)
self.handler.setFormatter(self.formatter)
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(self.handler)
@abstractmethod
def get_instance(self):
"""Return the singleton instance
of logger. Every logger must
override.
"""
pass
def set_last_sended_index(self,index):
"""Set value of variable last_sended_index
Arguments:
index {int} -- The last index that ground station received
"""
self.last_sended_index = index
def get_last_index(self):
"""Get index from last valid row
Returns:
[int] -- The index from last row or
1 if there is no file.
"""
if(self.isSafeToRead()):
with FileReadBackwards(self.file_name, encoding="utf-8") as log_file:
for line in log_file:
try:
return int(line.split(',')[0])
except:
continue
return 1
def isSafeToRead(self):
"""Checks if is safe to read the log file
Returns:
[boolean] -- True: dir and filename exists
False: the dir or the filename doesn't exists
"""
return os.path.isdir(self.dir) and os.path.exists(self.file_name)
def get_unsend_data(self):
"""Get the logs that haven't been send to ground station
Returns:
[list: unsend_logs] -- The unsend logs
[int: total_logs] -- The total count of unsend logs
"""
unsend_logs = []
total_logs = 0
with FileReadBackwards(self.file_name, encoding="utf-8") as log_file:
for line in log_file:
line_id = line.split(',')[0]
if line_id == self.last_sended_index:
if line_id == 1 :
total_logs += 1
unsend_logs.insert(0, line)
break
total_logs += 1
unsend_logs.insert(0, line)
return unsend_logs, total_logs
def inc_log_id(self):
"""Safely increases log id
"""
self.lock.acquire()
self.log_id += 1
self.lock.release()
def write_info(self,message):
"""Logs info message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.info(message,
extra={'log_id':self.log_id})
def write_error(self,message):
"""Logs error message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.error(message,
extra={'log_id':self.log_id})
def write_warning(self,message):
"""Logs warning message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.warning(message,
extra={'log_id':self.log_id})
def write_debug(self,message):
"""Logs debug message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.debug(message,
extra={'log_id':self.log_id})
def write_critical(self,message):
"""Logs critical message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.critical(message,
extra={'log_id':self.log_id})
def write_exception(self,message):
"""Logs exception message
Arguments:
message {string} -- the log message
"""
self.inc_log_id()
self.logger.exception(message,
extra={'log_id':self.log_id})
"""
Class for Logging ADCS action so you can
recover your system.
"""
class AdcsLogger(Logger):
__instance = None
def __init__(self, filename = 'adcs.log'):
if AdcsLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(AdcsLogger, self).__init__(filename)
formatter = '%(message)s'
self.set_up_logger(formatter,'adcs_logger')
AdcsLogger.__instance = self
def get_instance(self):
if AdcsLogger.__instance == None:
AdcsLogger()
return AdcsLogger.__instance
class InfoLogger(Logger):
__instance = None
def __init__(self, filename = 'info.log'):
if InfoLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(InfoLogger, self).__init__(filename)
formatter = '%(log_id)s,%(asctime)s %(levelname)s %(message)s'
self.set_up_logger(formatter,'info_logger')
InfoLogger.__instance = self
def get_instance(self):
if InfoLogger.__instance == None:
InfoLogger()
return InfoLogger.__instance
class DataLogger(Logger):
__instance = None
def __init__(self, filename = 'data.log'):
if DataLogger.__instance != None:
raise Exception("This class is a singleton!")
else:
super(DataLogger, self).__init__(filename)
formatter = '%(log_id)s,%(message)s'
self.set_up_logger(formatter,'data_logger')
DataLogger.__instance = self
def get_instance():
if DataLogger.__instance == None:
DataLogger()
return DataLogger.__instance
class GroundLogger(Logger):
def __init__(self, filename = 'elink.info.log'):
super(GroundLogger, self).__init__(filename)
formatter = '%(message)s'
self.set_up_logger(formatter,'logger_{filename}'.format(filename=filename))
def get_instance(self):
pass
| 27.864 | 83 | 0.557852 | 6,747 | 0.968562 | 0 | 0 | 171 | 0.024548 | 0 | 0 | 2,164 | 0.310652 |
4ef6051cf73af32c482601c57498a5943fed63f6 | 5,387 | py | Python | nydus/db/base.py | Elec/nydus | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | [
"Apache-2.0"
] | 102 | 2015-01-12T20:07:37.000Z | 2022-02-21T10:00:07.000Z | nydus/db/base.py | Elec/nydus | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | [
"Apache-2.0"
] | 4 | 2015-02-18T04:04:47.000Z | 2021-06-09T12:14:50.000Z | nydus/db/base.py | Elec/nydus | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | [
"Apache-2.0"
] | 9 | 2015-07-28T15:16:04.000Z | 2021-06-08T20:31:04.000Z | """
nydus.db.base
~~~~~~~~~~~~~
:copyright: (c) 2011-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
__all__ = ('LazyConnectionHandler', 'BaseCluster')
import collections
from nydus.db.map import DistributedContextManager
from nydus.db.routers import BaseRouter, routing_params
from nydus.utils import apply_defaults
def iter_hosts(hosts):
# this can either be a dictionary (with the key acting as the numeric
# index) or it can be a sorted list.
if isinstance(hosts, collections.Mapping):
return hosts.iteritems()
return enumerate(hosts)
def create_connection(Connection, num, host_settings, defaults):
# host_settings can be an iterable or a dictionary depending on the style
# of connection (some connections share options and simply just need to
# pass a single host, or a list of hosts)
if isinstance(host_settings, collections.Mapping):
return Connection(num, **apply_defaults(host_settings, defaults or {}))
elif isinstance(host_settings, collections.Iterable):
return Connection(num, *host_settings, **defaults or {})
return Connection(num, host_settings, **defaults or {})
class BaseCluster(object):
"""
Holds a cluster of connections.
"""
class MaxRetriesExceededError(Exception):
pass
def __init__(self, hosts, backend, router=BaseRouter, max_connection_retries=20, defaults=None):
self.hosts = dict(
(conn_number, create_connection(backend, conn_number, host_settings, defaults))
for conn_number, host_settings
in iter_hosts(hosts)
)
self.max_connection_retries = max_connection_retries
self.install_router(router)
def __len__(self):
return len(self.hosts)
def __getitem__(self, name):
return self.hosts[name]
def __getattr__(self, name):
return CallProxy(self, name)
def __iter__(self):
for name in self.hosts.iterkeys():
yield name
def install_router(self, router):
self.router = router(self)
def execute(self, path, args, kwargs):
connections = self.__connections_for(path, args=args, kwargs=kwargs)
results = []
for conn in connections:
for retry in xrange(self.max_connection_retries):
func = conn
for piece in path.split('.'):
func = getattr(func, piece)
try:
results.append(func(*args, **kwargs))
except tuple(conn.retryable_exceptions), e:
if not self.router.retryable:
raise e
elif retry == self.max_connection_retries - 1:
raise self.MaxRetriesExceededError(e)
else:
conn = self.__connections_for(path, retry_for=conn.num, args=args, kwargs=kwargs)[0]
else:
break
# If we only had one db to query, we simply return that res
if len(results) == 1:
return results[0]
else:
return results
def disconnect(self):
"""Disconnects all connections in cluster"""
for connection in self.hosts.itervalues():
connection.disconnect()
def get_conn(self, *args, **kwargs):
"""
Returns a connection object from the router given ``args``.
Useful in cases where a connection cannot be automatically determined
during all steps of the process. An example of this would be
Redis pipelines.
"""
connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)
if len(connections) is 1:
return connections[0]
else:
return connections
def map(self, workers=None, **kwargs):
return DistributedContextManager(self, workers, **kwargs)
@routing_params
def __connections_for(self, attr, args, kwargs, **fkwargs):
return [self[n] for n in self.router.get_dbs(attr=attr, args=args, kwargs=kwargs, **fkwargs)]
class CallProxy(object):
"""
Handles routing function calls to the proper connection.
"""
def __init__(self, cluster, path):
self.__cluster = cluster
self.__path = path
def __call__(self, *args, **kwargs):
return self.__cluster.execute(self.__path, args, kwargs)
def __getattr__(self, name):
return CallProxy(self.__cluster, self.__path + '.' + name)
class LazyConnectionHandler(dict):
"""
Maps clusters of connections within a dictionary.
"""
def __init__(self, conf_callback):
self.conf_callback = conf_callback
self.conf_settings = {}
self.__is_ready = False
def __getitem__(self, key):
if not self.is_ready():
self.reload()
return super(LazyConnectionHandler, self).__getitem__(key)
def is_ready(self):
return self.__is_ready
def reload(self):
from nydus.db import create_cluster
for conn_alias, conn_settings in self.conf_callback().iteritems():
self[conn_alias] = create_cluster(conn_settings)
self._is_ready = True
def disconnect(self):
"""Disconnects all connections in cluster"""
for connection in self.itervalues():
connection.disconnect()
| 32.451807 | 108 | 0.633748 | 4,196 | 0.778912 | 85 | 0.015779 | 181 | 0.033599 | 0 | 0 | 1,059 | 0.196584 |
4ef8c03f9234184a71c8d62d34f7bd0e16bc0579 | 1,212 | py | Python | pytest_voluptuous/plugin.py | F-Secure/pytest-voluptuous | d71da445e49d3c17c959391d56857a468d344f4b | [
"Apache-2.0"
] | 27 | 2018-01-11T09:21:40.000Z | 2021-11-17T06:23:49.000Z | pytest_voluptuous/plugin.py | F-Secure/pytest-voluptuous | d71da445e49d3c17c959391d56857a468d344f4b | [
"Apache-2.0"
] | 8 | 2018-02-16T10:56:36.000Z | 2020-06-12T12:35:49.000Z | pytest_voluptuous/plugin.py | F-Secure/pytest-voluptuous | d71da445e49d3c17c959391d56857a468d344f4b | [
"Apache-2.0"
] | 7 | 2018-02-13T23:05:15.000Z | 2020-05-18T11:59:46.000Z | from __future__ import absolute_import
from voluptuous import MultipleInvalid
from pytest_voluptuous.voluptuous import S
def pytest_assertrepr_compare(op, left, right):
if isinstance(left, S) and (op == '<=' or op == '==') or isinstance(right, S) and op == '==':
if isinstance(left, S):
source = left
data = right
else:
source = right
data = left
if isinstance(source.error, MultipleInvalid):
errors = [format_error(error, data) for error in source.error.errors]
else:
errors = [format_error(source.error, data)]
return [
'failed due to validation error(s):'
] + errors
return None
def format_error(error, data):
if error.path:
prefix = '.'.join(map(str, error.path)) + ': '
try:
value = get_value(data, error.path)
suffix = (' (actual: ' + repr(value) + ')')
except:
suffix = ''
else:
prefix = ''
suffix = ''
return '- {}{}{}'.format(prefix, error.msg, suffix)
def get_value(data, path):
value = data
for key in path:
value = value[key]
return value
| 24.24 | 97 | 0.55198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.070957 |
4ef95f448285155ef51a0543042976937c9cfbc9 | 1,487 | py | Python | amftrack/pipeline/scripts/post_processing/make_small_exp.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | 1 | 2021-06-10T02:51:53.000Z | 2021-06-10T02:51:53.000Z | amftrack/pipeline/scripts/post_processing/make_small_exp.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | amftrack/pipeline/scripts/post_processing/make_small_exp.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | from path import path_code_dir
import sys
sys.path.insert(0, path_code_dir)
from amftrack.pipeline.functions.image_processing.extract_width_fun import *
from amftrack.pipeline.functions.image_processing.experiment_class_surf import Experiment, save_graphs, load_graphs
from amftrack.util import get_dates_datetime, get_dirname
import pickle
import networkx as nx
import pandas as pd
from amftrack.pipeline.paths.directory import directory_scratch
from path import path_code_dir
import os
import json
from datetime import datetime
from pymatreader import read_mat
import cv2
import matplotlib.pyplot as plt
from IPython.display import clear_output
from amftrack.plotutil import plot_t_tp1
from amftrack.notebooks.analysis.util import directory_scratch
import imageio
directory = str(sys.argv[1])
overwrite = eval(sys.argv[2])
i = int(sys.argv[-1])
op_id = int(sys.argv[-2])
run_info = pd.read_json(f'{directory_scratch}temp/{op_id}.json')
list_f,list_args = pickle.load(open(f'{directory_scratch}temp/{op_id}.pick', "rb"))
folder_list = list(run_info['folder_analysis'])
directory_name = folder_list[i]
select = run_info.loc[run_info['folder_analysis'] == directory_name]
row = [row for index, row in select.iterrows()][0]
plate_num = row['Plate']
path_exp = f'{directory}{row["path_exp"]}'
exp = pickle.load(open(path_exp, "rb"))
exp.dates.sort()
save_graphs(exp)
exp.nx_graph = None
dirName = exp.save_location
exp.pickle_save(f"{dirName}/")
| 36.268293 | 116 | 0.778749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.114997 |
4efaee537f830f9cc1209c150188c0e51e4634b7 | 12,476 | py | Python | src/style_transfer/style_transfer_v2_gluon.py | gilbertfrancois/mxnet-cookbook | 95add0415d7c1ddfc4bcdd3d3a41c2e44162f888 | [
"Apache-2.0"
] | 1 | 2020-02-28T03:00:19.000Z | 2020-02-28T03:00:19.000Z | src/style_transfer/style_transfer_v2_gluon.py | gilbertfrancois/mxnet-cookbook | 95add0415d7c1ddfc4bcdd3d3a41c2e44162f888 | [
"Apache-2.0"
] | null | null | null | src/style_transfer/style_transfer_v2_gluon.py | gilbertfrancois/mxnet-cookbook | 95add0415d7c1ddfc4bcdd3d3a41c2e44162f888 | [
"Apache-2.0"
] | 1 | 2020-10-14T20:26:19.000Z | 2020-10-14T20:26:19.000Z | # Copyright 2019 Gilbert Francois Duivesteijn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on the explanation in the book:
# "Dive into deep learning", A. Zhang, Z.C. Lipton, M. Li, A.J. Smola
import os
import time
import mxnet as mx
import cv2 as cv
import numpy
from mxnet import gluon
from mxnet import autograd
from mxnet import image
from mxnet import init
from mxnet import np, npx
from mxnet.gluon import nn
import glob
import matplotlib.pyplot as plt
from datetime import timedelta
from cartonifier import Cartonifier
# %%
# -- Settings
npx.set_np()
def find_root_folder(project_folder):
folder_list = os.getcwd().split(sep="/")
root_folder_list = []
for folder in folder_list:
if folder == project_folder:
break
else:
root_folder_list.append(folder)
root_folder_list.append(project_folder)
return "/" + os.path.join(*root_folder_list)
class GeneratedImage(nn.Block):
def __init__(self, img_shape, **kwargs):
super(GeneratedImage, self).__init__(**kwargs)
self.weight = self.params.get('weight', shape=img_shape)
def forward(self):
return self.weight.data()
class StyleTransferGF:
def __init__(self, content_image, style_image, image_size, content_weight=1.0, style_weight=1.0e4, tv_weight=10.0,
lr=0.1, out_image_filepath=None):
super(StyleTransferGF, self).__init__()
self.IMAGE_SIZE = image_size
self.N_EPOCHS = 600
self.RGB_MEAN = np.array([0.485, 0.456, 0.406])
self.RGB_STD = np.array([0.229, 0.224, 0.225])
self.style_layers = [0, 5, 10, 19, 28]
self.content_layers = [25]
self.LR = lr
self.LR_DECAY_EPOCH = 300
self.CONTENT_WEIGHT = content_weight
self.STYLE_WEIGHT = style_weight
self.TV_WEIGHT = tv_weight
self.mx_ctx = mx.gpu(0)
self.out_image_filepath = out_image_filepath
# Load and prepare images
if isinstance(content_image, numpy.ndarray):
self.content_image = self.as_nd_np(content_image)
elif isinstance(content_image, str):
self.content_image = image.imread(content_image)
else:
raise TypeError("Only numpy array or str are supported.")
if isinstance(style_image, numpy.ndarray):
self.style_image = self.as_nd_np(style_image)
elif isinstance(style_image, str):
self.style_image = image.imread(style_image)
else:
raise TypeError("Only numpy array or str are supported.")
# Load and prepare feature extractor
pretrained_net = gluon.model_zoo.vision.vgg19(pretrained=True)
self.net = nn.Sequential()
for i in range(max(self.content_layers + self.style_layers) + 1):
self.net.add(pretrained_net.features[i])
def smooth(self, src: mx.numpy.ndarray, d: int, sigma_color: int, sigma_space: int):
img = image.imresize(src, *self.IMAGE_SIZE)
dst = cv.bilateralFilter(img.asnumpy(), d, sigma_color, sigma_space)
dst = self.as_nd_np(dst)
return dst
def as_nd_np(self, img):
return mx.nd.array(img, dtype=np.int32).as_np_ndarray()
def preprocess(self, img):
img = image.imresize(img, *self.IMAGE_SIZE)
img = (img.astype('float32') / 255 - self.RGB_MEAN) / self.RGB_STD
return np.expand_dims(img.transpose(2, 0, 1), axis=0)
def postprocess(self, img):
img = img[0].as_in_ctx(self.RGB_STD.ctx)
return (img.transpose(1, 2, 0) * self.RGB_STD + self.RGB_MEAN).clip(0, 1)
def extract_features(self, x):
contents = []
styles = []
for i in range(len(self.net)):
x = self.net[i](x)
if i in self.style_layers:
styles.append(x)
if i in self.content_layers:
contents.append(x)
return contents, styles
def get_contents(self):
content_x = self.preprocess(self.content_image).copyto(self.mx_ctx)
contents_y, _ = self.extract_features(content_x)
return content_x, contents_y
def get_styles(self):
style_x = self.preprocess(self.style_image).copyto(self.mx_ctx)
_, styles_y = self.extract_features(style_x)
return style_x, styles_y
def get_inits(self, x, styles_y):
gen_img = GeneratedImage(x.shape)
gen_img.initialize(init.Constant(x), ctx=self.mx_ctx, force_reinit=True)
trainer = gluon.Trainer(gen_img.collect_params(), 'adam', {'learning_rate': self.LR})
styles_y_gram = [self.gram(y) for y in styles_y]
return gen_img(), styles_y_gram, trainer
@staticmethod
def content_loss(y_hat, y):
return np.square(y_hat, y).mean()
@staticmethod
def gram(x):
num_channels = x.shape[1]
n = x.size // x.shape[1]
x = x.reshape(num_channels, n)
return np.dot(x, x.T) / (num_channels * n)
@staticmethod
def style_loss(y_hat, gram_y):
return np.square(StyleTransferGF.gram(y_hat) - gram_y).mean()
@staticmethod
def tv_loss(y_hat):
return 0.5 * (np.abs(y_hat[:, :, 1:, :] - y_hat[:, :, :-1, :]).mean() +
np.abs(y_hat[:, :, :, 1:] - y_hat[:, :, :, :-1]).mean())
def compute_loss(self, x, contents_y_hat, styles_y_hat, contents_y, styles_y_gram):
contents_l = [StyleTransferGF.content_loss(y_hat, y) * self.CONTENT_WEIGHT for y_hat, y in
zip(contents_y_hat, contents_y)]
styles_l = [StyleTransferGF.style_loss(y_hat, y) * self.STYLE_WEIGHT for y_hat, y in
zip(styles_y_hat, styles_y_gram)]
tv_l = StyleTransferGF.tv_loss(x) * self.TV_WEIGHT
l = sum(styles_l + contents_l + [tv_l])
return contents_l, styles_l, tv_l, l
def train(self):
self.net.collect_params().reset_ctx(self.mx_ctx)
content_x, contents_y = self.get_contents()
_, styles_y = self.get_styles()
x, styles_y_gram, trainer = self.get_inits(content_x, styles_y)
styles_y_gram = [StyleTransferGF.gram(Y) for Y in styles_y]
for epoch in range(self.N_EPOCHS):
with autograd.record():
contents_y_hat, styles_y_hat = self.extract_features(x)
contents_l, styles_l, tv_l, l = self.compute_loss(x, contents_y_hat, styles_y_hat, contents_y,
styles_y_gram)
l.backward()
trainer.step(1)
npx.waitall()
if epoch % self.LR_DECAY_EPOCH == 0:
trainer.set_learning_rate(trainer.learning_rate * 0.3)
if epoch % 100 == 0:
msg = [
f"Size: {self.IMAGE_SIZE}",
f"Epoch: {epoch}",
f"contents_l: {float(sum(contents_l)):0.3f}",
f"style_l: {float(sum(styles_l)):0.3f}",
f"tv_l: {float(tv_l):0.3f}",
f"total_l: {float(l):0.3f}"
]
msg = ", ".join(msg)
print(msg)
# plt.imshow(self.postprocess(x).asnumpy())
# plt.show()
out = self.postprocess(x).asnumpy()
out = (out * 255).astype(numpy.uint8)
if self.out_image_filepath is not None:
cv.imwrite(self.out_image_filepath, cv.cvtColor(out, cv.COLOR_RGB2BGR))
return out
# %%
# -- Train (continued)
def get_output_filepath(content_image_filepath, style_image_filepath, cw, sw, tw, output_folder):
filename_noext1 = os.path.splitext(os.path.basename(content_image_filepath))[0]
filename_noext2 = os.path.splitext(os.path.basename(style_image_filepath))[0]
out = f"{filename_noext1}_{filename_noext2}_{cw}_{sw}_{tw}.png"
out = os.path.join(output_folder, out)
return out
def process_image(content_image_filepath, style_image_filepath, content_weight, style_weight, tv_weight, output_folder,
timestamp):
print(f"[ ] Processing {os.path.basename(content_image_filepath)} with settings: {content_weight} {style_weight} {tv_weight}")
alpha = 0.90
scales = ((200, 150), (283, 212), (400, 300), (566, 424), (800, 600))
lr_list = (0.7, 0.6, 0.5, 0.5, 0.5)
# Prepare content image.
original_image = cv.cvtColor(cv.imread(content_image_filepath), cv.COLOR_BGR2RGB)
shape = original_image.shape
ratio = shape[1] / shape[0]
if ratio < 1:
original_image = cv.rotate(original_image, cv.ROTATE_90_CLOCKWISE)
is_rotated = True
else:
is_rotated = False
content_image = cv.resize(original_image, scales[0], cv.INTER_CUBIC)
# Prepare style image.
original_style_image = cv.cvtColor(cv.imread(style_image_filepath), cv.COLOR_BGR2RGB)
shape = original_style_image.shape
ratio = shape[1] / shape[0]
if ratio < 1:
original_style_image = cv.rotate(original_style_image, cv.ROTATE_90_CLOCKWISE)
style_image = cv.resize(original_style_image, scales[0], cv.INTER_CUBIC)
index = 0
for index, scale in enumerate(scales):
if index > 0:
src1 = cv.resize(original_image, dsize=scale, interpolation=cv.INTER_CUBIC)
src2 = cv.resize(content_image, dsize=scale, interpolation=cv.INTER_CUBIC)
src2 = cv.medianBlur(src2, ksize=3)
src3 = cv.addWeighted(src2, alpha, src1, 1.0 - alpha, 0)
content_image = src3
style_image = cv.resize(original_style_image, dsize=scale, interpolation=cv.INTER_CUBIC)
output_filepath = None
lr = lr_list[index]
style_transfer_gf = StyleTransferGF(content_image, style_image, scale, content_weight=content_weight,
style_weight=style_weight, tv_weight=tv_weight,
out_image_filepath=output_filepath)
content_image = style_transfer_gf.train()
del style_transfer_gf
time.sleep(3)
if is_rotated:
content_image = cv.rotate(content_image, cv.ROTATE_90_COUNTERCLOCKWISE)
output_filepath = get_output_filepath(content_image_filepath, style_image_filepath, content_weight, style_weight, tv_weight, output_folder)
cv.imwrite(output_filepath, cv.cvtColor(content_image, cv.COLOR_RGB2BGR))
def main():
root_folder = find_root_folder("mxnet-cookbook")
output_folder = os.path.join(root_folder, "data", "output")
os.makedirs(output_folder, exist_ok=True)
timestamp = str(int(time.time()))
content_weight_list = [1.0]
style_weight_list = [1e4]
tv_weight_list = [10]
content_image_filepath_list = sorted(glob.glob(os.path.join(root_folder, "data", "input", "IMG_*")))
content_image_filepath_list = [content_image_filepath_list[0]]
style_image_filepath_list = sorted(glob.glob(os.path.join(root_folder, "data", "style_transfer", "*.jpeg")))
for style_weight in style_weight_list:
for content_weight in content_weight_list:
for tv_weight in tv_weight_list:
for content_image_filename in content_image_filepath_list:
for style_image_filename in style_image_filepath_list:
tic = time.time()
if not os.path.exists(style_image_filename):
raise FileNotFoundError(f"Cannot find {style_image_filename}")
if not os.path.exists(content_image_filename):
raise FileNotFoundError(f"Cannot find {content_image_filename}")
process_image(content_image_filename, style_image_filename, content_weight, style_weight,
tv_weight, output_folder, timestamp)
toc = time.time()
print(f"Elapsed time: f{timedelta(seconds=(toc - tic))}")
if __name__ == '__main__':
main()
| 41.311258 | 143 | 0.636181 | 6,536 | 0.523886 | 0 | 0 | 588 | 0.04713 | 0 | 0 | 1,622 | 0.13001 |
4efc7761548ac36e29fdc7dad0bfff5e1335bdbc | 6,980 | py | Python | torncache/connection.py | shipci/torncache-sample | 790a46812f3a503d2f30eefdd89c974a929c2a4a | [
"Apache-2.0"
] | null | null | null | torncache/connection.py | shipci/torncache-sample | 790a46812f3a503d2f30eefdd89c974a929c2a4a | [
"Apache-2.0"
] | null | null | null | torncache/connection.py | shipci/torncache-sample | 790a46812f3a503d2f30eefdd89c974a929c2a4a | [
"Apache-2.0"
] | null | null | null | # -*- mode: python; coding: utf-8 -*-
"""
Torncache Connection
"""
from __future__ import absolute_import
import os
import stat
import socket
import time
import numbers
import logging
import functools
from tornado import iostream
from tornado import stack_context
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
class ConnectionError(Exception):
"Base exception class"
class ConnectionTimeoutError(ConnectionError):
"""Timeout when connecting or running and operation"""
class Connection(object):
""" A Client connection to a Server"""
def __init__(self, host, ioloop=None, serializer=None, deserializer=None,
connect_timeout=5, timeout=1, no_delay=True, ignore_exc=False,
dead_retry=30):
# Parse host conf and weight
self.weight = 1
if isinstance(host, tuple):
host, self.weight = host
# By default, assume unix socket
self.ip, self.port, self.path = None, None, host
# Parse host port
if ":" in host:
self.ip, _, self.port = host.partition(":")
self.port = int(self.port)
self.path = None
# Check that it's a valid path to an unix socket. fail otherwise
if self.path and not os.path.exists(self.path):
raise ConnectionError("Invalid unix socket '%s'".format(self.path))
# Protected data
self._ioloop = ioloop or IOLoop.instance()
self._ignore_exc = ignore_exc
# Timeouts
self._timeout = None
self._request_timeout = timeout
self._connect_timeout = connect_timeout
# Data
self._serializer = serializer
self._deserializer = deserializer
# Connections properites
self._stream = None
self._no_delay = no_delay
self._dead_until = 0
self._dead_retry = dead_retry
self._connect_callbacks = []
def __str__(self):
retval = "%s:%d" % (self.ip, self.port)
if self._dead_until:
retval += " (dead until %d)" % self._dead_until
return retval
def __del__(self):
self.close()
def _add_timeout(self, reason, timeout=None):
"""Add a timeout handler"""
def on_timeout():
self._timeout = None
self.mark_dead(reason)
raise ConnectionTimeoutError(reason)
# Allow to override default timeout per call
if not isinstance(timeout, numbers.Integral):
timeout = self._request_timeout
if timeout:
self._clear_timeout()
self._timeout = self._ioloop.add_timeout(
time.time() + timeout,
stack_context.wrap(on_timeout))
def _clear_timeout(self):
if self._timeout is not None:
self._ioloop.remove_timeout(self._timeout)
self._timeout = None
def mark_dead(self, reason):
"""Quarintine MC server for a period of time"""
if self._dead_until < time.time():
logging.warning("Marking dead %s: '%s'" % (self, reason))
self._dead_until = time.time() + self._dead_retry
self._clear_timeout()
self.close()
def is_alive(self):
"""Check if server is alive"""
return self._dead_until < time.time()
def connect(self, callback=None):
"""Open a connection to MC server"""
def on_timeout(reason):
self._timeout = None
self.mark_dead(reason)
raise ConnectionTimeoutError(reason)
def on_close():
self._clear_timeout()
if self._stream and self._stream.error:
error = self._stream.error
self._stream = None
if self._connect_callbacks:
self._connect_callbacks = None
raise error
logging.error(self._stream.error)
def on_connect():
self._clear_timeout()
for callback in self._connect_callbacks:
callback and callback(self)
self._connect_callbacks = None
# Check if server is dead
if self._dead_until > time.time():
msg = "Server {0} will stay dead next {1} secs"
msg = msg.format(self, self._dead_until - time.time())
raise ConnectionError(msg)
self._dead_until = 0
# Check we are already connected
if self._connect_callbacks is None:
callback and callback(self)
return
self._connect_callbacks.append(callback)
if self._stream and not self._stream.closed():
return
# Connection closed. clean and start again
self.close()
# Set timeout
if self._connect_timeout:
timeout_func = functools.partial(on_timeout, "Connection Timeout")
self._timeout = self._ioloop.add_timeout(
time.time() + self._connect_timeout,
stack_context.wrap(timeout_func))
# now connect to host...
if self.path is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._no_delay:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
connect = (self.ip, self.port)
# or unix socket
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
st = os.stat(self.path)
if not stat.S_ISSOCK(st.st_mode):
raise ValueError("File %s exists and is not a socket", file)
connect = self.path
self._stream = iostream.IOStream(sock, io_loop=self._ioloop)
self._stream.set_close_callback(on_close)
self._stream.connect(connect, callback=on_connect)
def send(self, cmd, callback):
"""Send a MC command"""
self._stream.write(cmd + "\r\n", callback)
def write(self, data, callback):
"""Write operation"""
self._stream.write(data, callback)
def read(self, rlen=None, callback=None):
"""Read operation"""
on_response = lambda x: callback(x[:-2])
if rlen is None:
self._stream.read_until("\r\n", on_response)
else:
# Read and strip CRLF
rlen = rlen + 2 # CRLF
self._stream.read_bytes(rlen, on_response)
def readline(self, callback):
"""Read a line"""
self._stream.read_until("\r\n", callback)
def close(self):
"""Close connection to MC"""
try:
self._stream and self._stream.close()
except RuntimeError:
# IOPool is closing, no a close call is not needed
# afterall. Igore this
pass
def closed(self):
"""Check connection status"""
if not self._stream:
return True
return self._stream and self._stream.closed()
| 31.441441 | 79 | 0.593123 | 6,620 | 0.948424 | 0 | 0 | 0 | 0 | 0 | 0 | 1,144 | 0.163897 |
4efd701d6b8e98e84a695daad78682b43535205a | 10,025 | py | Python | rl-toolkit/rlf/rl/model.py | clvrai/goal_prox_il | 7c809b2ee575a69a14997068db06f3c1f3c8bd08 | [
"MIT"
] | 4 | 2021-11-17T20:19:34.000Z | 2022-03-31T04:21:26.000Z | rl-toolkit/rlf/rl/model.py | clvrai/goal_prox_il | 7c809b2ee575a69a14997068db06f3c1f3c8bd08 | [
"MIT"
] | null | null | null | rl-toolkit/rlf/rl/model.py | clvrai/goal_prox_il | 7c809b2ee575a69a14997068db06f3c1f3c8bd08 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlf.rl.loggers import sanity_checker
def weight_init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def no_bias_weight_init(m):
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, "data"):
m.bias.data.fill_(0.0)
return m
def def_mlp_weight_init(m):
return weight_init(
m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2)
)
def reg_mlp_weight_init(m):
"""Does not weight init, defaults to whatever pytorch does."""
return m
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ConcatLayer(nn.Module):
def __init__(self, concat_dim):
super().__init__()
self.concat_dim = concat_dim
def forward(self, ab):
a, b = ab
return torch.cat([a, b], dim=self.concat_dim)
class BaseNet(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super().__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def hidden_size(self):
return self._hidden_size
@property
def output_shape(self):
return (self._hidden_size,)
def _forward_gru(self, x, hidden_state, masks):
rnn_hxs = hidden_state["rnn_hxs"]
if x.size(0) == rnn_hxs.size(0):
x, rnn_hxs = self.gru(x.unsqueeze(0), (rnn_hxs * masks).unsqueeze(0))
x = x.squeeze(0)
rnn_hxs = rnn_hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = rnn_hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = (masks[1:] == 0.0).any(dim=-1).nonzero().squeeze().cpu()
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
rnn_hxs = rnn_hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, rnn_hxs = self.gru(
x[start_idx:end_idx], rnn_hxs * masks[start_idx].view(1, -1, 1)
)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
rnn_hxs = rnn_hxs.squeeze(0)
hidden_state["rnn_hxs"] = rnn_hxs
return x, hidden_state
class IdentityBase(BaseNet):
def __init__(self, input_shape):
super().__init__(False, None, None)
self.input_shape = input_shape
def net(self, x):
return x
@property
def output_shape(self):
return self.input_shape
def forward(self, inputs, hxs, masks):
return inputs, None
class PassThroughBase(BaseNet):
"""
If recurrent=True, will apply RNN layer, otherwise will just pass through
"""
def __init__(self, input_shape, recurrent, hidden_size):
if len(input_shape) != 1:
raise ValueError("Possible RNN can only work on flat")
super().__init__(recurrent, input_shape[0], hidden_size)
self.input_shape = input_shape
def net(self, x):
return x
@property
def output_shape(self):
if self.is_recurrent:
return (self._hidden_size,)
else:
return self.input_shape
def forward(self, inputs, hxs, masks):
x = inputs
if self.is_recurrent:
x, hxs = self._forward_gru(x, hxs, masks)
return x, hxs
class CNNBase(BaseNet):
def __init__(self, num_inputs, recurrent, hidden_size):
super().__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: weight_init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain("relu"),
)
self.net = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)),
nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)),
nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)),
nn.ReLU(),
Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)),
nn.ReLU(),
)
self.train()
def forward(self, inputs, hxs, masks):
x = self.net(inputs / 255.0)
if self.is_recurrent:
x, hxs = self._forward_gru(x, hxs, masks)
return x, hxs
class MLPBase(BaseNet):
def __init__(
self,
num_inputs,
recurrent,
hidden_sizes,
weight_init=def_mlp_weight_init,
get_activation=lambda: nn.Tanh(),
no_last_act=False,
):
"""
- no_last_act: if True the activation will not be applied on the final
output.
"""
super().__init__(recurrent, num_inputs, hidden_sizes[-1])
assert len(hidden_sizes) > 0
layers = [weight_init(nn.Linear(num_inputs, hidden_sizes[0])), get_activation()]
# Minus one for the input layer
for i in range(len(hidden_sizes) - 1):
layers.append(weight_init(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1])))
if not (no_last_act and i == len(hidden_sizes) - 2):
layers.append(get_activation())
self.net = nn.Sequential(*layers)
self.train()
def forward(self, inputs, hxs, masks):
x = inputs
if self.is_recurrent:
x, hxs = self._forward_gru(x, hxs, masks)
hidden_actor = self.net(x)
return hidden_actor, hxs
class MLPBasic(MLPBase):
def __init__(
self,
num_inputs,
hidden_size,
num_layers,
weight_init=def_mlp_weight_init,
get_activation=lambda: nn.Tanh(),
):
super().__init__(
num_inputs, False, [hidden_size] * num_layers, weight_init, get_activation
)
class TwoLayerMlpWithAction(BaseNet):
def __init__(
self,
num_inputs,
hidden_sizes,
action_dim,
weight_init=def_mlp_weight_init,
get_activation=lambda: nn.Tanh(),
):
assert len(hidden_sizes) == 2, "Only two hidden sizes"
super().__init__(False, num_inputs, hidden_sizes[-1])
self.net = nn.Sequential(
weight_init(nn.Linear(num_inputs + action_dim, hidden_sizes[0])),
get_activation(),
weight_init(nn.Linear(hidden_sizes[0], hidden_sizes[1])),
get_activation(),
)
self.train()
def forward(self, inputs, actions, hxs, masks):
return self.net(torch.cat([inputs, actions], dim=-1)), hxs
class InjectNet(nn.Module):
def __init__(
self, base_net, head_net, in_dim, hidden_dim, inject_dim, should_inject
):
super().__init__()
self.base_net = base_net
if not should_inject:
inject_dim = 0
self.head_net = head_net
self.inject_layer = nn.Sequential(
nn.Linear(in_dim + inject_dim, hidden_dim), nn.Tanh()
)
self.should_inject = should_inject
def forward(self, x, inject_x):
x = self.base_net(x)
if self.should_inject:
x = torch.cat([x, inject_x], dim=-1)
x = self.inject_layer(x)
x = self.head_net(x)
return x
class DoubleQCritic(BaseNet):
"""
Code from https://github.com/denisyarats/pytorch_sac.
"""
def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth):
super().__init__(False, None, hidden_dim)
dims = [hidden_dim] * hidden_depth
dims.append(1)
self.Q1 = MLPBase(
obs_dim + action_dim,
False,
dims,
weight_init=reg_mlp_weight_init,
get_activation=lambda: nn.ReLU(inplace=True),
no_last_act=True,
)
self.Q2 = MLPBase(
obs_dim + action_dim,
False,
dims,
weight_init=reg_mlp_weight_init,
get_activation=lambda: nn.ReLU(inplace=True),
no_last_act=True,
)
# Apply the weight init exactly the same way as @denisyarats
self.apply(no_bias_weight_init)
@property
def output_shape(self):
return (2,)
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=-1)
q1, _ = self.Q1(obs_action, None, None)
q2, _ = self.Q2(obs_action, None, None)
return q1, q2
| 27.924791 | 88 | 0.569875 | 9,270 | 0.924688 | 0 | 0 | 627 | 0.062544 | 0 | 0 | 999 | 0.099651 |
4efe3309dbe459b7936bf0bef1d7d022f2a55e49 | 4,407 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/apphosting/verify.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/apphosting/verify.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/apphosting/verify.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z | ''' Common Verify functions for IOX / app-hosting '''
import logging
import time
log = logging.getLogger(__name__)
# Import parser
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
def verify_app_requested_state(device, app_list=None, requested_state='RUNNING', max_time=120, interval=10):
'''
verify_app_requested_state
Check show app-hosting list and confirm the requested state of the passed in list of appids
Args:
device ('obj') : Device object
app_list ('list') : list of appids
requested_state ('str') : requested state of appid
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
all_apps_achieved_requested_state = False
if app_list is None:
app_list = []
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
old_timeout = device.execute.timeout
device.execute.timeout = 120
output = device.parse('show app-hosting list')
device.execute.timeout = old_timeout
except SchemaEmptyParserError:
timeout.sleep()
continue
for app in app_list:
if output['app_id'][app]['state'] == requested_state:
all_apps_achieved_requested_state = True
continue
else:
log.info("App name %s not in the requested state %s yet, wait" % (app, requested_state))
all_apps_achieved_requested_state = False
timeout.sleep()
if all_apps_achieved_requested_state:
break
if all_apps_achieved_requested_state:
log.info("All Apps achieved the requested state!")
else:
log.error("Not all apps achieved the requested state!")
return all_apps_achieved_requested_state
def verify_iox_enabled(device, max_time=600, interval=10):
'''
verify_iox_enabled
Check show iox and confirm all services are up and running
Args:
device ('obj') : Device object
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
output = device.parse("show iox")
except SchemaEmptyParserError:
timeout.sleep()
continue
if output.get('caf_service', '').strip().lower() == 'running' and \
output.get('ha_service', '').strip().lower() == 'running' and \
output.get('ioxman_service', '').strip().lower() == 'running' and \
output.get('libvirtd', '').strip().lower() == 'running' and \
output.get('dockerd', '').strip().lower() == 'running':
log.info("IOX is enabled")
return True
else:
timeout.sleep()
log.info("IOX was not enabled!")
return False
def verify_iox_disabled(device, max_time=600, interval=10, redundancy=False):
'''
verify_iox_disabled
Check show iox and confirm all services are not running
Args:
device ('obj') : Device object
max_time ('int') : max time to wait
interval ('int') : interval timer
Returns:
True
False
Raises:
None
'''
timeout = Timeout(max_time=max_time, interval=interval)
while timeout.iterate():
try:
output = device.parse("show iox")
except SchemaEmptyParserError:
timeout.sleep()
continue
if output.get('caf_service', '').strip().lower() == 'not running' and \
output.get('ha_service', '').strip().lower() == 'not running' and \
output.get('ioxman_service', '').strip().lower() == 'not running' and \
output.get('dockerd', '').strip().lower() == 'not running':
log.info("IOX is disabled")
return True
else:
timeout.sleep()
log.info("IOX was not disabled!")
return False
| 33.386364 | 109 | 0.568187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,653 | 0.375085 |
4efef6a46d52e7f4dd5e178790df0f2e99b29bf4 | 15,350 | py | Python | memo/models/neural_nets.py | feloundou/memo | dc988d38dc6bcbcbb6851b78fb90ac77da4ad1b5 | [
"Apache-2.0"
] | null | null | null | memo/models/neural_nets.py | feloundou/memo | dc988d38dc6bcbcbb6851b78fb90ac77da4ad1b5 | [
"Apache-2.0"
] | null | null | null | memo/models/neural_nets.py | feloundou/memo | dc988d38dc6bcbcbb6851b78fb90ac77da4ad1b5 | [
"Apache-2.0"
] | null | null | null | # import scipy.signal
from gym.spaces import Box, Discrete
import numpy as np
import torch
from torch import nn
import IPython
# from torch.nn import Parameter
import torch.nn.functional as F
from torch.distributions import Independent, OneHotCategorical, Categorical
from torch.distributions.normal import Normal
# # from torch.distributions.categorical import Categorical
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]
return nn.Sequential(*layers)
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and optionally
# compute the log likelihood of given actions under those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class MLPGaussianActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation):
super().__init__()
self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=(64, 64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
# build value function critics
self.v = MLPCritic(obs_dim, hidden_sizes, activation)
self.vc = MLPCritic(obs_dim, hidden_sizes, activation)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
# print("pi dist! ", pi)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
vc = self.vc(obs)
return a.numpy(), v.numpy(), vc.numpy(), logp_a.numpy()
def act(self, obs):
return self.step(obs)[0]
class MEMOActor(nn.Module):
def __init__(self, state_dim, hidden_size, action_dim, activation=nn.Tanh):
super(MEMOActor, self).__init__()
log_std = -0.5 * np.ones(action_dim, dtype=np.float32)
self.log_std = nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([state_dim] + hidden_size + [action_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def forward(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
# the critic is error here would be: reward + gamma*V(s_t+1)-V(s_t)
# http://incompleteideas.net/book/first/ebook/node66.html
class MEMO(nn.Module):
"""Multiple Experts, Multiple Objectives;
"""
def __init__(self, obs_dim, out_dim, encoder_hidden, decoder_hidden, actor_hidden, latent_modes):
'''
:param obs_dim:
:param latent_dim:
:param out_dim:
:param encoder_hidden:
:param decoder_hidden:
'''
super(MEMO, self).__init__()
self.found_contexts = []
self.latent_modes = latent_modes
self.num_embeddings = self.latent_modes
self.embedding_dim = obs_dim
self.vq_encoder = VQEncoder(obs_dim, self.embedding_dim) # original
self.prenet = nn.Linear(self.embedding_dim, self.embedding_dim)
self.vector_quantizer = VectorQuantizer(self.num_embeddings, self.embedding_dim)
self.postnet = nn.Linear(self.embedding_dim, encoder_hidden[-1])
self.vq_decoder = VQDecoder(encoder_hidden[-1], decoder_hidden, obs_dim)
self.action_decoder = MEMOActor(state_dim=obs_dim + self.latent_modes, hidden_size=actor_hidden, action_dim=out_dim)
# self.action_gaussian = GaussianActor(obs_dim=obs_dim + self.latent_modes, act_dim=out_dim,
# hidden_sizes=[128]*4, activation=nn.LeakyReLU)
self.action_vq_dist = None
def compute_quantized_loss(self, state, delta_state, actions):
'''
:param state:
:param delta_state:
:param actions:
:return:
'''
delta_state_enc = self.vq_encoder(delta_state) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
encoder_output = self.prenet(delta_state_enc) # In: [B, OBS_DIM]; Out: # [B, OBS_DIM]
quantized, categorical_proposal, categorical_proposal_prob = self.vector_quantizer(encoder_output)
# update the set of known contexts
self.found_contexts = set([t.data.item() for t in categorical_proposal])
# Straight Through Estimator (Some Magic)
st_quantized = encoder_output + (quantized - encoder_output).detach()
post_quantized = self.postnet(st_quantized)
# print("Post Quantized: ", post_quantized)
reconstruction = self.vq_decoder(post_quantized)
# print("Reconstruction: ", reconstruction)
categorical_proposal_reshape = torch.reshape(categorical_proposal, (-1, 1))
categorical_proposal_onehot = F.one_hot(categorical_proposal_reshape, self.latent_modes).squeeze().float()
# total_max = torch.tensor(0.)
# print("distances max: ", max(total_max, torch.max(categorical_proposal_prob)))
# concat_state_vq = torch.cat([state, categorical_proposal_onehot], dim=-1)
concat_state_vq = torch.cat([state, categorical_proposal_prob], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
return encoder_output, quantized, reconstruction, categorical_proposal, action_vq_dist
# return encoder_output, quantized, reconstruction, categorical_proposal, action_mse
def act(self, state, context_label):
concat_state_vq = torch.cat([state, torch.reshape(torch.as_tensor(context_label), (-1,))], dim=-1)
action_vq_dist = self.action_decoder(concat_state_vq)
action = action_vq_dist.sample()
return action
def forward(self, X, Delta_X, A, kl_beta=1., recon_gamma=1.):
"""
Given input tensor, forward propagate, compute the loss, and backward propagate.
Represents the lifecycle of a single iteration
:param x: Raw state tensor
:param Delta_x: State difference tensor
:param a: Action tensor
:param kl_beta: KL divergence temperance factor
:param recon_gamma: State weights
: Important to note that both recon and context loss cannot be negative.
"""
encoder_output, quantized, reconstruction, vq_latent_labels, action_vq_dist =\
self.compute_quantized_loss(X, Delta_X, A)
vq_criterion = VQCriterion(beta=kl_beta)
vq_total_loss, recons_loss, vq_loss, commitment_loss = vq_criterion(Delta_X, encoder_output, quantized, reconstruction)
# original formula
loss_pi = (torch.tensor(1.)/(torch.exp(action_vq_dist.log_prob(A)) + torch.tensor(0.1))).sum(axis=-1)
loss = loss_pi * vq_total_loss
return loss, loss_pi, X, vq_latent_labels, vq_total_loss
class VQEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super(VQEncoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_dim, out_dim // 2),
nn.Tanh(),
nn.Linear(out_dim // 2, out_dim),
nn.Tanh()
)
# self.net = nn.Sequential(
# nn.Linear(in_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh(),
# nn.Linear(out_dim, out_dim),
# nn.Tanh()
# )
def forward(self, input):
return self.net(input)
class Clamper(nn.Module):
def __init__(self, min=None, max=None):
super().__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class VQDecoder(nn.Module):
def __init__(self, obs_dim, hidden_dim, out_dim, activation=nn.Tanh):
super().__init__()
self.initial_act = nn.Tanh()
self.net = mlp([obs_dim] + hidden_dim + [out_dim], activation)
def forward(self, input):
return self.net(self.initial_act(input))
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.num_embeddings = num_embeddings # E_N
self.embedding_dim = embedding_dim # E_D
self.embeddings = nn.Embedding(num_embeddings, embedding_dim)
self.scale = 1. / self.num_embeddings # decimal
print("Quantizer Scale: ", self.scale)
nn.init.uniform_(self.embeddings.weight, -self.scale, self.scale)
def proposal_distribution(self, input):
input_shape = input.shape # [B, OBS_DIM]
flatten_input = input.flatten(end_dim=-2).contiguous() # [B, OBS_DIM]
distances = (flatten_input ** 2).sum(dim=1, keepdim=True) # [B, 1]
distances = distances + (self.embeddings.weight ** 2).sum(dim=1) # [B, E_N]
distances -= 2 * flatten_input @ self.embeddings.weight.t() # [B, E_N]
categorical_posterior = torch.argmin(distances, dim=-1) # [B] # original
categorical_posterior_prob = distances
# categorical_posterior_prob = torch.clamp(distances, 0, 10) # 10 is a hyperparameter
# categorical_posterior_prob = torch.clamp(distances, 0, 5) # 5 is a hyperparameter
return categorical_posterior, categorical_posterior_prob
def forward(self, input):
proposal, proposal_prob = self.proposal_distribution(input) # [B]
quantized = self.embeddings(proposal).contiguous() # [B, OBS_DIM]
return quantized, proposal, proposal_prob
class VQCriterion(nn.Module):
"""
vq_loss: \| \text{sg}[I(x, e)] * e - \text{sg}[z_e(x)] \|_2^2
"""
def __init__(self, beta):
super().__init__()
self.beta = beta
def forward(self, input, encoder_output, quantized, reconstruction):
flatten_quantized = quantized.flatten(end_dim=-2)
flatten_encoder_output = encoder_output.flatten(end_dim=-2)
reconstruction_loss = F.mse_loss(input, reconstruction)
vq_loss = F.mse_loss(flatten_encoder_output.detach(), flatten_quantized)
commitment_loss = F.mse_loss(flatten_encoder_output, flatten_quantized.detach())
total_loss = reconstruction_loss + vq_loss + self.beta * commitment_loss # Original. TODO: review this loss.
return total_loss, reconstruction_loss, vq_loss, commitment_loss
class VDB(nn.Module):
def __init__(self, num_inputs, args):
super(VDB, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.z_size)
self.fc3 = nn.Linear(args.hidden_size, args.z_size)
self.fc4 = nn.Linear(args.z_size, args.hidden_size)
self.fc5 = nn.Linear(args.hidden_size, 1)
self.fc5.weight.data.mul_(0.1)
self.fc5.bias.data.mul_(0.0)
def encoder(self, x):
h = torch.tanh(self.fc1(x))
return self.fc2(h), self.fc3(h)
def reparameterize(self, mu, logvar):
std = torch.exp(logvar / 2)
eps = torch.randn_like(std)
return mu + std * eps
def discriminator(self, z):
h = torch.tanh(self.fc4(z))
return torch.sigmoid(self.fc5(h))
def forward(self, x):
mu, logvar = self.encoder(x)
z = self.reparameterize(mu, logvar)
prob = self.discriminator(z)
return prob, mu, logvar
###########################################################################3
from torch.autograd import Variable
from torch.distributions import Distribution, Normal
class TanhNormal(torch.distributions.Distribution):
"""
Represent distribution of X where
X ~ tanh(Z)
Z ~ N(mean, std)
Note: this is not very numerically stable.
"""
def __init__(self, normal_mean, normal_std, epsilon=1e-6):
"""
:param normal_mean: Mean of the normal distribution
:param normal_std: Std of the normal distribution
:param epsilon: Numerical stability epsilon when computing log-prob.
"""
self.normal_mean = normal_mean
self.normal_std = normal_std
self.normal = Normal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = torch.log(
(1+value) / (1-value)
) / 2
return self.normal.log_prob(pre_tanh_value) - torch.log(
1 - value * value + self.epsilon
)
def sample(self, return_pretanh_value=False):
z = self.normal.sample()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
z = (
self.normal_mean +
self.normal_std *
Variable(Normal(
np.zeros(self.normal_mean.size()),
np.ones(self.normal_std.size())
).sample())
)
# z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
| 36.032864 | 127 | 0.641238 | 14,358 | 0.935375 | 0 | 0 | 0 | 0 | 0 | 0 | 3,184 | 0.207427 |
4eff2ab37a18ee36fb3fd5444900984659555e54 | 855 | py | Python | sprocket/util/extfrm.py | zhouming-hfut/sprocket | 68d4005284b72a891d0c0f81afabea087fc45960 | [
"MIT"
] | 500 | 2017-09-25T14:04:50.000Z | 2022-03-24T22:21:51.000Z | sprocket/util/extfrm.py | zhouming-hfut/sprocket | 68d4005284b72a891d0c0f81afabea087fc45960 | [
"MIT"
] | 73 | 2017-09-25T13:59:38.000Z | 2022-01-12T11:35:58.000Z | sprocket/util/extfrm.py | zhouming-hfut/sprocket | 68d4005284b72a891d0c0f81afabea087fc45960 | [
"MIT"
] | 116 | 2017-09-26T15:54:13.000Z | 2022-03-05T08:40:14.000Z | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
def extfrm(data, npow, power_threshold=-20):
"""Extract frame over the power threshold
Parameters
----------
data: array, shape (`T`, `dim`)
Array of input data
npow : array, shape (`T`)
Vector of normalized power sequence.
power_threshold : float, optional
Value of power threshold [dB]
Default set to -20
Returns
-------
data: array, shape (`T_ext`, `dim`)
Remaining data after extracting frame
`T_ext` <= `T`
"""
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata
| 22.5 | 64 | 0.607018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.604678 |
f60095130f09208c0cc014869141663b3698d4a5 | 9,317 | py | Python | tickets_handler/models.py | ChanTerelLy/partnerweb3 | 4f37f032242cbc67d57b7161ecb5960d9cfde76d | [
"MIT"
] | null | null | null | tickets_handler/models.py | ChanTerelLy/partnerweb3 | 4f37f032242cbc67d57b7161ecb5960d9cfde76d | [
"MIT"
] | null | null | null | tickets_handler/models.py | ChanTerelLy/partnerweb3 | 4f37f032242cbc67d57b7161ecb5960d9cfde76d | [
"MIT"
] | null | null | null | import uuid
from django.db import models
from partnerweb_parser.manager import NewDesign, Ticket
import re
from partnerweb_parser import system, mail, manager
import json
import datetime
from partnerweb_parser.date_func import dmYHM_to_datetime
from tickets_handler.tasks import update_date_for_assigned
class Modify(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Workers(models.Model):
name = models.CharField(max_length=250, unique=True)
number = models.CharField(max_length=50, unique=True)
master = models.CharField(max_length=250)
status = models.BooleanField()
url = models.URLField()
hiring_date = models.DateField(auto_now=True, blank=True)
@classmethod
@system.my_timer
def replace_num_worker(cls, tickets):
if tickets is not None:
for ticket in tickets:
try:
worker = cls.objects.get(number=ticket.operator)
ticket.name_operator = worker.name
except:
continue
return tickets
def natural_key(self):
return self.name
@classmethod
def update_workers(cls, auth):
for worker in manager.Worker.get_workers(auth):
operator = cls.objects.filter(number=worker.number)
if not operator:
cls(name=worker.name, number=worker.number, master=worker.master, status=worker.status,
url=worker.url).save()
continue
operator.update(name=worker.name, master=worker.master, status=worker.status, url=worker.url)
def __str__(self):
return self.name
class ChiefInstaller(models.Model):
full_name = models.CharField(max_length=250, unique=True)
number = models.CharField(max_length=50, unique=True)
class Installer(models.Model):
full_name = models.CharField(max_length=250, unique=True)
number = models.CharField(max_length=50, unique=True)
@classmethod
def parse_installers(cls, auth):
login = NewDesign(auth['login'], auth['operator'], auth['password'])
tickets = login.retrive_tickets()
sw_tickets, sw_today = login.switched_tickets(tickets)
for ticket in sw_tickets:
info_data = login.ticket_info(ticket.id)
name, phone = cls.find_installer_in_text(info_data.comments)
try:
installer, created = cls.objects.update_or_create(full_name=name)
if not created:
installer.number = phone
installer.save()
else:
cls(full_name=name, number=phone).save()
except:
continue
@staticmethod
def find_installer_in_text(comments):
for comment in comments:
data = re.search(r'Назначен Сервис - инженер (\w* \w* \w*), телефон (\d{11})', comment['text'])
if (data):
name = data.group(1)
phone = data.group(2)
return name, phone
return None, None
def __str__(self):
return f'{self.full_name} {self.number}'
class AdditionalTicket(models.Model):
number = models.IntegerField()
positive = models.BooleanField() # add or remove ticket
operator = models.ForeignKey(Workers, on_delete=models.CASCADE)
datetime = models.DateTimeField(auto_now=True)
@classmethod
def add(cls, payload):
payload = json.loads(payload.decode('utf-8'))
cls(number=payload['number'], positive=payload['positive'],
operator=Workers.objects.get(number=int(payload['operator']))).save()
@classmethod
def show(cls, operator):
AdditionalTicket.objects.filter(operator=Workers.objects.get(phone=operator))
@classmethod
def clear_switched_tickets(cls, sw_tickets, all_tickets):
for t in cls.objects.filter(datetime__month=datetime.datetime.now().month):
if t.positive:
for all_t in all_tickets:
if isinstance(all_t.ticket_paired_info, Ticket):
if all_t.ticket_paired_info.number == t.number or all_t.number == t.number:
sw_tickets.append(all_t)
continue
elif not t.positive:
for sw_t in sw_tickets:
try:
if t.number == sw_t.ticket_paired_info.number or sw_t.number == t.number:
sw_tickets.remove(sw_t)
break
except:
continue
return sw_tickets
def __str__(self):
return str(self.number)
class Employer(models.Model):
profile_name = models.TextField()
name = models.TextField()
email = models.EmailField()
phone = models.CharField(max_length=10)
position = models.TextField()
operator = models.ForeignKey(Workers, on_delete=models.CASCADE)
operator_password = models.TextField()
supervisor_password = models.CharField(max_length=50)
@classmethod
def find_master(cls, phone):
master = Workers.objects.get(number=phone).master if Workers.objects.get(number=phone) else None
master_obj = cls.objects.get(name=master) if master else cls.objects.none()
return master_obj
def __str__(self):
return self.name
class Reminder(models.Model):
ticket_number = models.TextField()
client_name = models.TextField()
client_number = models.CharField(max_length=10)
timer = models.DateTimeField()
operator = models.ForeignKey(Workers, on_delete=models.CASCADE)
link = models.URLField(null=True, blank=True)
recipient = models.TextField()
class TicketSource(models.Model):
ticket_number = models.CharField(max_length=20, unique=True)
source = models.CharField(max_length=50)
agent = models.ForeignKey(Workers, on_delete=models.CASCADE)
date = models.DateField(auto_now=True, blank=True)
@classmethod
def add_source(cls, ticket_number, source, operator):
try:
data = cls.objects.get(ticket_number=ticket_number)
data.source = source
data.save()
except:
cls(ticket_number=ticket_number, source=source,
agent=Workers.objects.get(number=operator)).save()
@classmethod
def find_source(cls, ticket_number):
return cls.objects.get(ticket_number=ticket_number).source
class ACL(models.Model):
code = models.CharField(max_length=50)
date_end = models.DateField()
class AssignedTickets(models.Model):
ticket_number = models.IntegerField()
when_assigned = models.DateTimeField(null=True, blank=True)
client_address = models.CharField(max_length=200)
client_name = models.CharField(max_length=150)
phones = models.CharField(max_length=150)
assigned_date = models.DateTimeField()
agent = models.ForeignKey(Workers, null=True, blank=True, on_delete=models.CASCADE)
@classmethod
def update(cls, ticket, *args, **kwargs):
if(kwargs.get('satelit_type')):
ticket = ticket.ticket_paired_info
db_ticket = cls.objects.filter(ticket_number=ticket.number).first()
if db_ticket:
if ticket.assigned_date:
db_ticket.when_assigned = dmYHM_to_datetime(ticket.assigned_date)
db_ticket.client_address = ticket.address
db_ticket.client_name = ticket.name
db_ticket.phones = ticket.phones
db_ticket.assigned_date = dmYHM_to_datetime(ticket.call_time)
db_ticket.agent = Workers.objects.filter(number=ticket.operator).first()
return db_ticket.save()
else:
assigned_date = dmYHM_to_datetime(ticket.call_time)
agent = Workers.objects.filter(number=ticket.operator).first()
result = cls(ticket_number=ticket.number,
when_assigned=dmYHM_to_datetime(ticket.assigned_date) if ticket.assigned_date else None,
client_address=ticket.address,
phones=ticket.phones,
assigned_date=assigned_date,
agent=agent,
client_name=ticket.name).save()
db_ticket = ticket.__dict__
db_ticket['mail_to'] = Employer.find_master(ticket.operator).email
db_ticket['link'] = ''
mail.EmailSender().agent_assign_ticket(db_ticket)
def update_date(self):
update_date_for_assigned()
class AUP(models.Model):
name = models.CharField(max_length=150)
position = models.CharField(max_length=100)
email = models.EmailField()
phone = models.IntegerField()
def __str__(self):
return f' {self.name} - {self.position}'
class FirebaseNotification(Modify):
ticket_number = models.IntegerField()
today_count_notification = models.IntegerField(default=0)
last_call_time = models.DateTimeField(null=True, blank=True)
last_ticket_status = models.CharField(max_length=255)
worker = models.ForeignKey(Workers, on_delete=models.CASCADE, null=True, blank=True) | 37.568548 | 107 | 0.651819 | 9,010 | 0.964152 | 0 | 0 | 5,356 | 0.573141 | 0 | 0 | 275 | 0.029428 |
f6029db9fa8774e3c51f5cd2c301f93c77ad18fa | 1,184 | py | Python | photos/views.py | Otybrian/personal-gallery | d9492b5d29600da4a2999a9cc4914279e55f87ee | [
"MIT"
] | null | null | null | photos/views.py | Otybrian/personal-gallery | d9492b5d29600da4a2999a9cc4914279e55f87ee | [
"MIT"
] | null | null | null | photos/views.py | Otybrian/personal-gallery | d9492b5d29600da4a2999a9cc4914279e55f87ee | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
import datetime as dt
from django.views import View
from photos.models import Image, category
# Create your views here.
def welcome(request):
return render(request, 'welcome.html')
def display_page(request):
image = Image.objects.all()
return render(request, 'all.html', {'image':image} )
def viewDetails(request):
image = Image.objects.all()
return render(request, 'details.html', {'image':image} )
def my_category(request):
categorys = category.objects.all()
context = {
'categorys': categorys,
}
return render(request, 'category.html', context)
def search_results(request):
if 'category_name' in request.GET and request.GET["category_name"]:
search_term = request.GET.get("category_name")
searched_category_name = category.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"category_name": searched_category_name})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
| 27.534884 | 105 | 0.699324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.217905 |
f602a97c790f5ae9f623af20f687cb178aa58aa5 | 2,530 | py | Python | sem/constants.py | YoannDupont/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 22 | 2016-11-13T21:08:58.000Z | 2021-04-26T07:04:54.000Z | sem/constants.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 15 | 2016-11-15T10:21:07.000Z | 2021-11-08T10:08:05.000Z | sem/constants.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 8 | 2016-11-15T10:21:41.000Z | 2022-03-04T21:28:05.000Z | # -*- coding: utf-8 -*-
"""
file: constants.py
Description: some useful constants that could be of some use in SEM and
beyond.
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
#
# Chunking flags.
#
BEGIN = "B" # begin flags
IN = "I" # in flags
LAST = "LE" # end flags
SINGLE = "US" # single flags
OUT = "O"
#
# Some useful constants for tries.
#
NUL = u""
#
# Some useful regular expressions.
#
# see: http://www.ietf.org/rfc/rfc1738.txt
# URLs recognition. Validating URL is both hard and all urls may not be
# valid when analysing textual information. Hence, validity checking is
# kept to bare minimum, covering being more important.
protocol = u"(?:http|ftp|news|nntp|telnet|gopher|wais|file|prospero)"
mailto = u"mailto"
url_body = u"\S+[0-9A-Za-z/]"
url = u"<?(?:{0}://|{1}:|www\.){2}>?".format(protocol, mailto, url_body)
url_re = re.compile(url, re.I)
# email addresses recognition. See URLs.
localpart_border = u"[A-Za-z0-9!#$%&'*+\-/=?^_`{|}~]"
localpart_inside = u"[A-Za-z0-9!#$%&'*+\-/=?^_`{|}~.]"
localpart = u"{0}{1}*".format(localpart_border, localpart_inside)
subdomain_start = u"[A-Za-z]"
subdomain_inside = u"[A-Za-z0-9\\-]"
subdomain_end = u"[A-Za-z0-9]"
subdomain = u"{0}{1}*{2}".format(subdomain_start, subdomain_inside, subdomain_end)
domain = u"{0}(?:\\.{1})*".format(subdomain, subdomain)
email_str = u"{0}@{1}".format(localpart, domain)
email_re = re.compile(email_str)
| 32.857143 | 89 | 0.705929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,936 | 0.765217 |
f6039a035e0c79c81407aabe0db10a251b036b44 | 2,383 | py | Python | newsbreak.py | mitchsmith/news_munger | e9c8362dd3085acb2b1f0cfd4cb313ef9f6bf06d | [
"MIT"
] | null | null | null | newsbreak.py | mitchsmith/news_munger | e9c8362dd3085acb2b1f0cfd4cb313ef9f6bf06d | [
"MIT"
] | null | null | null | newsbreak.py | mitchsmith/news_munger | e9c8362dd3085acb2b1f0cfd4cb313ef9f6bf06d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" This module provides a command line interface to news_munger. """
import datetime
import random
import argparse
from munger import DocumentCatalog, Munger
parser = argparse.ArgumentParser()
parser.parse_args()
## Classes ##
class MadLib(Munger):
"""Real soon now. """
def build(self):
pass
def __repr__(self):
return "<MadLib: {}>".format(self.headline)
class ExquisiteCorpse(Munger):
"""
A fake news article composed of sentence fragments gleaned from the day's
headlines, in the style of surrealist party game 'Exquisite Corpse'.
See: https://en.wikipedia.org/wiki/Exquisite_corpse
"""
def __init__(self, documents):
"""Initialize super; and declare corpse list. """
super().__init__(documents)
self.corpses = []
def build(self):
"""Munge news stories to create an esquisite cadavre. """
text = ""
base_index = random.randrange(len(self._documents))
base = self._documents[base_index]
sentences = []
for i, sent in enumerate(base.sents):
stuple = (base_index, i, sent.root.lemma_, sent)
if stuple[2] == "say":
sentence = self.munge_sayings(stuple)
elif stuple[2] in ["be", "do", "have"]:
sentence = self.munge_children(stuple)
else:
sentence = self.munge_on_roots(stuple)
sentences.append(sentence)
self.corpses.append({"title": base._.title, "sentences": sentences})
text += "\n".join([sent[-1].text_with_ws for sent in sentences])
print(text)
def save(self, cadavre=None):
""" Write the cadavre(s) to a file. """
filename = datetime.datetime.today().strftime("tmp/exq_%Y%m%d.txt")
if cadavre:
corpses = [cadavre]
else:
corpses = self.corpses
with open(filename, "a+") as file:
for corpse in corpses:
file.write(f"{corpse['title']}\n\n")
for sent in corpse["sentences"]:
file.write(sent[-1].text_with_ws)
file.write("\n******\n\n")
def __repr__(self):
return "<ExquisiteCorpse: {}>".format(self.headline)
if __name__ == "__main__":
catalog = DocumentCatalog()
# Unit Tests #
| 26.775281 | 77 | 0.586655 | 2,015 | 0.845573 | 0 | 0 | 0 | 0 | 0 | 0 | 688 | 0.288712 |
f6076a72ecf2f080ef45e72da4240a2e7661a299 | 8,246 | py | Python | pipeline/dag.py | OpenSourceEconomics/pipeline | cdd9679c62af32f52a07aba800b752e90af08396 | [
"BSD-3-Clause"
] | 3 | 2020-03-23T12:15:19.000Z | 2020-05-12T11:52:21.000Z | pipeline/dag.py | OpenSourceEconomics/pipeline | cdd9679c62af32f52a07aba800b752e90af08396 | [
"BSD-3-Clause"
] | 22 | 2020-04-06T22:24:18.000Z | 2020-05-13T11:44:57.000Z | pipeline/dag.py | OpenSourceEconomics/pipeline | cdd9679c62af32f52a07aba800b752e90af08396 | [
"BSD-3-Clause"
] | null | null | null | """This module contains the code related to the DAG and the scheduler."""
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from networkx.drawing import nx_pydot
from pipeline.shared import ensure_list
BLUE = "#547482"
YELLOW_TO_RED = ["#C8B05C", "#C89D64", "#F1B05D", "#EE8445", "#C87259", "#6C4A4D"]
class Scheduler:
"""This class allows to schedule tasks.
The functionality is inspired by func:`networkx.topological_sort` which allows to
loop over a directed acyclic graph such that all preceding nodes are executed before
a dependent node.
The scheduler keeps track of all unfinished tasks and their dependencies in the
`task_dict`. If a task has no dependencies, it is eligible to be executed. All
submitted tasks are remove from `task_dict`. If a task finishes, it is removed as a
dependency from all tasks in `task_dict`.
The scheduler can take task priorities into account and proposes only tasks
with the highest priorities.
"""
def __init__(self, dag, unfinished_tasks, priority):
self.dag = dag
self.task_dict = self._create_task_dependency_dict(unfinished_tasks)
self.submitted_tasks = set()
self.priority = priority
def _create_task_dependency_dict(self, unfinished_tasks):
"""Create a task-dependency dictionary.
For each unfinished task, this function collects the tasks which have to be
executed in advance.
"""
task_dict = {}
for id_ in unfinished_tasks:
task_dict[id_] = {
preceding_task
for dependency in ensure_list(self.dag.nodes[id_].get("depends_on", []))
for preceding_task in self.dag.predecessors(dependency)
if preceding_task in unfinished_tasks
}
return task_dict
def propose(self, n_proposals=1):
"""Propose a number of tasks.
This function proposes tasks which can be executed. If a task is proposed,
remove it from the `task_dict`.
Parameters
----------
n_proposals : int
Number of tasks which should be proposed. For any nonnegative number, return
a set of task ids. For `-1` return all possible tasks.
Returns
-------
proposals : set
A set of task ids which should be executed.
"""
# Get task candidates.
candidates = [id_ for id_ in self.task_dict if len(self.task_dict[id_]) == 0]
if self.priority:
candidates = sorted(
candidates,
key=lambda id_: self.dag.nodes[id_]["priority"],
reverse=True,
)
if 0 <= n_proposals:
proposals = set(candidates[:n_proposals])
elif n_proposals == -1:
proposals = set(candidates)
else:
raise NotImplementedError
self.submitted_tasks = self.submitted_tasks.union(proposals)
for id_ in proposals:
del self.task_dict[id_]
return proposals
def process_finished(self, finished_tasks):
"""Process finished tasks.
The executor passes an id or a list of ids of finished tasks back to the
scheduler. The scheduler removes the ids from the set of submitted tasks and
removes the finished tasks from the dependency sets of all unfinished tasks in
`task_dict`.
Parameters
----------
finished_tasks : str or list
An id or a list of ids of finished tasks.
"""
finished_tasks = ensure_list(finished_tasks)
for id_ in finished_tasks:
self.submitted_tasks.remove(id_)
for id__ in self.task_dict:
self.task_dict[id__].discard(id_)
@property
def are_tasks_left(self):
return len(self.task_dict) != 0 or len(self.submitted_tasks) != 0
def create_dag(tasks, config):
"""Create a directed acyclic graph (DAG) capturing dependencies between functions.
Parameters
----------
tasks : dict
Dictionary containing tasks.
Returns
-------
dag : nx.DiGraph
The directed acyclic graph.
"""
dag_dict = _create_dag_dict(tasks)
dag = nx.DiGraph(dag_dict).reverse()
dag = _insert_tasks_in_dag(dag, tasks)
dag = _assign_priority_to_nodes(dag, config)
_draw_dag(dag, config)
return dag
def _create_dag_dict(tasks):
dag_dict = {}
for id_, task_info in tasks.items():
# Add the task to the graph as a node.
depends_on = ensure_list(task_info.get("depends_on", [])).copy()
depends_on.extend(ensure_list(task_info.get("template", [])))
depends_on.append(task_info["config"])
dag_dict[id_] = depends_on
# If the task produces anything, register the output as a node.
for target in ensure_list(task_info.get("produces", [])):
dag_dict[target] = [id_]
return dag_dict
def _insert_tasks_in_dag(dag, tasks):
for id_ in dag.nodes:
if id_ in tasks:
dag.nodes[id_].update(**tasks[id_], _is_task=True)
else:
dag.nodes[id_].update(_is_task=False)
return dag
def _assign_priority_to_nodes(dag, config):
"""Assign a priority to a node.
Task priorities trickle down from the last nodes in the DAG to the first nodes. The
total priority of a task is its own priority plus the discounted sum of priorities
of its targets.
"""
discount_factor = config["priority_discount_factor"]
reversed_dag = dag.reverse()
for id_ in nx.topological_sort(reversed_dag):
if reversed_dag.nodes[id_]["_is_task"] and config["priority_scheduling"]:
sum_priorities = 0
for pre in reversed_dag.predecessors(id_):
for pre_task in reversed_dag.predecessors(pre):
sum_priorities += dag.nodes[pre_task].get("priority", 0)
dag.nodes[id_]["priority"] = (
dag.nodes[id_].get("priority", 0) + discount_factor * sum_priorities
)
else:
pass
return dag
def _draw_dag(dag, config):
fig, ax = plt.subplots(figsize=(16, 12))
fig.suptitle("Task Graph", fontsize=24)
# Relabel absolute paths to path names.
project_directory = Path(config["project_directory"])
mapping = {
node: Path(node).relative_to(project_directory)
for node in dag.nodes
if Path(node).is_absolute()
}
dag = nx.relabel_nodes(dag, mapping)
layout = nx_pydot.pydot_layout(dag, prog="dot")
nx.draw_networkx_edges(dag, pos=layout, ax=ax)
nx.draw_networkx_labels(dag, pos=layout, ax=ax)
# Draw non-task nodes.
non_task_nodes = [node for node in dag.nodes if not dag.nodes[node]["_is_task"]]
nx.draw_networkx_nodes(
dag, pos=layout, nodelist=non_task_nodes, node_color=BLUE, ax=ax
)
task_nodes = [node for node in dag.nodes if dag.nodes[node]["_is_task"]]
if config["priority_scheduling"]:
node_size = np.array([dag.nodes[node]["priority"] for node in task_nodes])
node_size_demeaned = node_size - node_size.min()
node_size_relative = node_size_demeaned / node_size_demeaned.max()
node_size = node_size_relative * 1_000 + 300
cmap = LinearSegmentedColormap.from_list("cmap", YELLOW_TO_RED)
priority_kwargs = {
"node_size": node_size,
"node_color": node_size_relative,
"cmap": cmap,
}
else:
priority_kwargs = {"node_color": BLUE}
im = nx.draw_networkx_nodes(
dag, pos=layout, nodelist=task_nodes, **priority_kwargs, ax=ax
)
if config["priority_scheduling"]:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.1)
fig.colorbar(im, cax=cax, orientation="vertical")
cax.set_title("Priority")
path = Path(config["hidden_build_directory"], ".dag.png")
path.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(path)
plt.close()
| 31.96124 | 88 | 0.642251 | 3,604 | 0.43706 | 0 | 0 | 113 | 0.013704 | 0 | 0 | 2,955 | 0.358356 |
f607f4917d87af5bd9f2a106689f14808e678707 | 2,459 | py | Python | windyquery/validator/fullname_json.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 51 | 2019-05-13T10:51:23.000Z | 2021-09-12T08:11:56.000Z | windyquery/validator/fullname_json.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 2 | 2020-10-08T16:28:45.000Z | 2021-06-23T03:27:42.000Z | windyquery/validator/fullname_json.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 3 | 2021-05-10T13:08:21.000Z | 2021-06-20T19:58:30.000Z | from windyquery.provider._base import JSONB
from ._base import _rule
from .fullname import Fullname
from .number import Number
from .operators.minus import Minus
class FullnameJson(Fullname, Number, Minus):
reserved = {**Fullname.reserved, **Number.reserved, **Minus.reserved}
tokens = Fullname.tokens + Number.tokens + \
Minus.tokens + ('ARROW', 'DARROW',)
precedence = Fullname.precedence + Minus.precedence
# Tokens
t_ARROW = r'->'
t_DARROW = r'->>'
# rules
_start = 'fullname_json'
@_rule('fullname_json : fullname attribute')
def p_fullname_json(self, p):
p[0] = self.provider.new_record(f'{p[1]}{p[2]}', p[2].kind)
p[0].path = [p[1]] + p[2].path
@_rule('attribute : ARROW NAME attribute')
def p_attribute(self, p):
p2 = self.sanitize_literal(p[2])
if p[3].value:
kind = p[3].kind
path = [p[2]] + p[3].path
else:
kind = JSONB
path = [p[2]]
p[0] = self.provider.new_record(f'->{p2}{p[3]}', kind)
p[0].path = path
@_rule('attribute : ARROW NUMBER attribute')
def p_attribute_num(self, p):
if p[3].value:
kind = p[3].kind
path = [f'{p[2]}'] + p[3].path
else:
kind = JSONB
path = [f'{p[2]}']
p[0] = self.provider.new_record(f'->{p[2]}{p[3]}', kind)
p[0].path = path
@_rule('attribute : ARROW MINUS NUMBER attribute')
def p_attribute_minus_num(self, p):
if p[4].value:
kind = p[4].kind
path = [f'-{p[3]}'] + p[4].path
else:
kind = JSONB
path = [f'-{p[3]}']
p[0] = self.provider.new_record(f'->-{p[3]}{p[4]}', kind)
p[0].path = path
@_rule('attribute : DARROW NAME')
def p_attribute_darrow(self, p):
p2 = self.sanitize_literal(p[2])
p[0] = self.provider.new_record(f'->>{p2}')
p[0].path = [p[2]]
@_rule('attribute : DARROW NUMBER')
def p_attribute_darrow_num(self, p):
p[0] = self.provider.new_record(f'->>{p[2]}')
p[0].path = [f'{p[2]}']
@_rule('attribute : DARROW MINUS NUMBER')
def p_attribute_darrow_minus_num(self, p):
p[0] = self.provider.new_record(f'->>-{p[3]}')
p[0].path = [f'-{p[3]}']
@ _rule('attribute : empty')
def p_attribute_empty(self, p):
p[0] = self.provider.new_record('')
p[0].path = []
| 30.7375 | 73 | 0.542497 | 2,294 | 0.9329 | 0 | 0 | 1,881 | 0.764945 | 0 | 0 | 467 | 0.189915 |
f609ea1edf133c69b084cabfbd8b3cb1f4199f52 | 37 | py | Python | heatmap/__init__.py | Bilal-Yousaf/heatmap | 789907301f9663feca72fb84dffbe2de08869975 | [
"MIT"
] | 5 | 2020-03-25T20:31:48.000Z | 2021-04-23T09:53:50.000Z | heatmap/__init__.py | Bilal-Yousaf/HeatMap | 789907301f9663feca72fb84dffbe2de08869975 | [
"MIT"
] | null | null | null | heatmap/__init__.py | Bilal-Yousaf/HeatMap | 789907301f9663feca72fb84dffbe2de08869975 | [
"MIT"
] | null | null | null | from .heatmap import generate_heatmap | 37 | 37 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f60a3f4c653a3d461182600bc1efb2e3052bf960 | 1,575 | py | Python | download-files.py | CrowderSoup/download-files | 2abc35aecd010dd246e0b9b8af7880f51ea47b7e | [
"Unlicense"
] | null | null | null | download-files.py | CrowderSoup/download-files | 2abc35aecd010dd246e0b9b8af7880f51ea47b7e | [
"Unlicense"
] | null | null | null | download-files.py | CrowderSoup/download-files | 2abc35aecd010dd246e0b9b8af7880f51ea47b7e | [
"Unlicense"
] | null | null | null | import os, os.path, urllib.request, sys, getopt
def main(argv):
print(argv)
input_file = ''
download_dir = ''
try:
opts, args = getopt.getopt(argv,
"hi:d:",
["input-file=","download-dir="])
except getopt.GetoptError as err:
print(err)
print('download-files.py -i <input-file> -d <download-directory>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('download-files.py -i <input-file> -d <download-directory>')
sys.exit()
elif opt in ("-i", "--input-file"):
input_file = arg
elif opt in ("-d", "--download-dir"):
download_dir = arg
if(input_file != ''):
print("Opening input file...")
links = open(input_file, 'r')
else:
print("No Input file specified, trying default...")
links = open("urls.txt", 'r')
for link in links:
link = link.strip()
name = link.rsplit('/', 1)[-1]
if(download_dir == ''):
print("Download Directory not provided... using default.")
download_dir = os.getcwd() + '\downloads'
filename = os.path.join(download_dir, name)
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(link, filename)
except Exception as inst:
print(inst)
print('Continuing...')
if __name__ == "__main__":
main(sys.argv[1:])
| 30.288462 | 78 | 0.510476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.248889 |
f60af4235aef343759e761d60c46b44ec31d12af | 929 | py | Python | pynif3d/pipeline/base_pipeline.py | pfnet/pynif3d | da3680cce7e8fc4c194f13a1528cddbad9a18ab0 | [
"MIT"
] | 66 | 2021-08-18T01:30:17.000Z | 2022-02-19T10:35:52.000Z | pynif3d/pipeline/base_pipeline.py | pfnet/pynif3d | da3680cce7e8fc4c194f13a1528cddbad9a18ab0 | [
"MIT"
] | 6 | 2021-08-18T01:36:31.000Z | 2021-09-02T11:18:01.000Z | pynif3d/pipeline/base_pipeline.py | pfnet/pynif3d | da3680cce7e8fc4c194f13a1528cddbad9a18ab0 | [
"MIT"
] | 3 | 2021-11-22T23:06:20.000Z | 2022-02-10T06:24:40.000Z | import os
import gdown
import torch
import yaml
from pynif3d.common.verification import check_in_options, check_path_exists
from pynif3d.log.log_funcs import func_logger
class BasePipeline(torch.nn.Module):
@func_logger
def __init__(self):
super().__init__()
def load_pretrained_model(self, yaml_file, model_name, cache_directory="."):
check_path_exists(yaml_file)
with open(yaml_file) as stream:
data = yaml.safe_load(stream)
pretrained_models = list(data.keys())
check_in_options(model_name, pretrained_models, "model_name")
model_path = os.path.join(cache_directory, "model.pt")
url = data[model_name]["url"]
md5 = data[model_name]["md5"]
gdown.cached_download(url, model_path, md5)
check_path_exists(model_path)
state_dict = torch.load(model_path)["model_state"]
self.load_state_dict(state_dict)
| 28.151515 | 80 | 0.698601 | 754 | 0.811625 | 0 | 0 | 63 | 0.067815 | 0 | 0 | 48 | 0.051668 |
f60b7b13f9cee75fd4af4cab97ad49f4feeb9a82 | 392 | py | Python | search.py | joyfulflyer/billboard-spotify | a6561e4718ea671721b2a8f900ecfbf49a9f6c1b | [
"MIT"
] | null | null | null | search.py | joyfulflyer/billboard-spotify | a6561e4718ea671721b2a8f900ecfbf49a9f6c1b | [
"MIT"
] | null | null | null | search.py | joyfulflyer/billboard-spotify | a6561e4718ea671721b2a8f900ecfbf49a9f6c1b | [
"MIT"
] | null | null | null | from spotify_auth import auth
from urllib import parse
import json
def search(track_name, artist, type='track'):
parsed = parse.quote_plus(query)
query = "artist:{}%20track:{}".format(artist, track_name)
response = auth.get(
'https://api.spotify.com/v1/search?q={}&type={}'.format(query, type))
response_object = json.loads(response.text)
return response_object
| 30.153846 | 77 | 0.701531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.196429 |
f60b96e5a2ce996ad44a1fdf8fb2152dfc983fc2 | 9,910 | py | Python | fastccd_support_ioc/fastccd_support_ioc.py | lbl-camera/fastccd_support_ioc | 80b3820744e9aec7923af6adec0a66a0c51b2c21 | [
"BSD-3-Clause"
] | null | null | null | fastccd_support_ioc/fastccd_support_ioc.py | lbl-camera/fastccd_support_ioc | 80b3820744e9aec7923af6adec0a66a0c51b2c21 | [
"BSD-3-Clause"
] | 1 | 2020-08-07T22:22:25.000Z | 2020-08-07T22:22:25.000Z | fastccd_support_ioc/fastccd_support_ioc.py | lbl-camera/fastccd_support_ioc | 80b3820744e9aec7923af6adec0a66a0c51b2c21 | [
"BSD-3-Clause"
] | 1 | 2021-02-08T22:06:05.000Z | 2021-02-08T22:06:05.000Z | from caproto.server import PVGroup, SubGroup, pvproperty, get_pv_pair_wrapper
from caproto import ChannelType
from . import utils, pvproperty_with_rbv, wrap_autosave, FastAutosaveHelper
from textwrap import dedent
import sys
from caproto.server import ioc_arg_parser, run
from caproto.sync.client import write, read
from caproto.asyncio.client import Context
DEFAULT_ACQUIRETIME = 1
DEFAULT_ACQUIREPERIOD = 1.1
class FCCDSupport(PVGroup):
"""
A support IOC to initialize, shutdown, and configure the ALS FastCCD; complements ADFastCCD
"""
# ACQUIRE_POLL_PERIOD = 0.1
def __init__(self, *args, camera_prefix, shutter_prefix, hdf5_prefix, **kwargs):
self.camera_prefix = camera_prefix
self.shutter_prefix = shutter_prefix
self.hdf5_prefix = hdf5_prefix
super(FCCDSupport, self).__init__(*args, **kwargs)
@SubGroup(prefix='cam1:')
class Cam(PVGroup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._capture_goal = 0
self._active_subprocess = None
self._subprocess_completion_state = None
self.num_captured_rbv_pv = None
async def fccd_shutdown(self, instance, value):
# Note: all the fccd scripts are injected into the utils module; you can call them like so:
print("shutting down")
await self.State.write("Shutting Down...")
async def fccd_initialize(self, instance, value):
print("initializing")
await self.State.write("Initializing...")
async def _fccd_initialize(self, instance, value):
# utils.scripts.fccd_auto_start()
if self._active_subprocess:
raise RuntimeError('Another background process is still running.')
self._active_subprocess = utils.scripts.cosmic_fccd_auto_start(str(self.TestFrameMode.value))
self._subprocess_completion_state = "Initialized"
async def _fccd_shutdown(self, instance, value):
if self._active_subprocess:
raise RuntimeError('Another background process is still running.')
self._active_subprocess = utils.scripts.auto_power_down_script()
self._subprocess_completion_state = "Off"
State = pvproperty(dtype=ChannelType.ENUM,
enum_strings=["Unknown", "Initialized", "Initializing...", "Shutting Down...", "Off", ],
value="Unknown")
@State.startup
async def State(self, instance, async_lib):
self._context = Context()
if self.num_captured_rbv_pv:
await self.num_captured_rbv_pv.unsubscribe_all()
self.num_captured_rbv_pv, = await self._context.get_pvs(self.parent.hdf5_prefix + 'NumCaptured_RBV')
self.num_captured_rbv_sub = self.num_captured_rbv_pv.subscribe(data_type=ChannelType.INT)
self.num_captured_rbv_sub.add_callback(self.check_finished)
self.num_capture_pv, = await self._context.get_pvs(self.parent.hdf5_prefix + 'NumCapture')
await self.num_capture_pv.unsubscribe_all()
self.num_capture_sub = self.num_capture_pv.subscribe(data_type=ChannelType.INT)
self.num_capture_sub.add_callback(self.set_goal)
async def check_finished(self, pv, response):
# todo: make sure this is a falling edge
print('num_captured:', response.data[0])
if response.data[0] == self._capture_goal:
print('finished!')
await self.AdjustedAcquire.write(0)
async def set_goal(self, pv, response):
print('num_capture (goal):', response.data[0])
self._capture_goal = response.data[0]
@State.getter
async def State(self, instance):
return instance.value
@State.putter
async def State(self, instance, value):
# if value != instance.value:
# print("setting state:", value)
if value == "Initializing...":
await self._fccd_initialize(None, None)
elif value == "Shutting Down...":
await self._fccd_shutdown(None, None)
return value
#
# @State.scan(period=1)
# async def State(self, instance, async_lib):
# if not check_FOPS() and instance.value == "Initialized": # we can check any state here; if any of them go down during init, all of them go down
# await instance.write("Off")
autosave_helper = SubGroup(FastAutosaveHelper)
Initialize = pvproperty(value=0, dtype=int, put=fccd_initialize)
Shutdown = pvproperty(value=0, dtype=int, put=fccd_shutdown)
AdjustedAcquireTime = wrap_autosave(pvproperty_with_rbv(value=DEFAULT_ACQUIRETIME, dtype=float,
precision=3, units='s'))
AdjustedAcquirePeriod = wrap_autosave(pvproperty_with_rbv(value=DEFAULT_ACQUIREPERIOD, dtype=float,
precision=3, units='s'))
AdjustedAcquire = pvproperty(value=0, dtype=int)
TestFrameMode = pvproperty(value=False, dtype=bool)
@AdjustedAcquire.startup
async def AdjustedAcquire(self, instance, async_lib):
# write to Acquire to start the camera up in tv mode
write(self.parent.camera_prefix + 'Acquire', [1])
self.async_lib = async_lib
@AdjustedAcquire.putter
async def AdjustedAcquire(self, instance, value):
# Wait one pulse width; this assures that the first frame is always a full frame,
# and that the dark frame is always a full dark frame
await self.async_lib.library.sleep(self.AdjustedAcquirePeriod.readback.value)
# self._capture_goal = read(self.hdf5_prefix + 'NumCapture').data
# write(self.shutter_prefix + 'TriggerEnabled', [int(value)])
# if value == 1:
write(self.parent.hdf5_prefix + 'Capture', [value])
# print(f'comparing: {value} {instance.value}')
# toggle Acquire pv; this closes the current file and is necessary to inform bluesky that the HDF plugin is
# finished writing
if value != instance.value:
write(self.parent.camera_prefix + 'Acquire', [0])
await self.async_lib.library.sleep(.1)
write(self.parent.camera_prefix + 'Acquire', [1])
await self.async_lib.library.sleep(.1)
return value
@AdjustedAcquireTime.setpoint.putter
async def AdjustedAcquireTime(self, instance, value):
open_delay = read(self.parent.parent.shutter_prefix + 'ShutterOpenDelay_RBV').data[0]
close_delay = read(self.parent.parent.shutter_prefix + 'ShutterCloseDelay_RBV').data[0]
if not open_delay + value + close_delay <= self.parent.AdjustedAcquirePeriod.readback.value:
await self.parent.AdjustedAcquirePeriod.setpoint.write(open_delay + value + close_delay)
write(self.parent.parent.camera_prefix + 'AcquireTime', value + close_delay + open_delay)
write(self.parent.parent.shutter_prefix + 'ShutterTime', value + open_delay)
await self.readback.write(value)
return value
@AdjustedAcquirePeriod.setpoint.putter
async def AdjustedAcquirePeriod(self, instance, value):
readout_time = self.parent.ReadoutTime.value
open_delay = read(self.parent.parent.shutter_prefix + 'ShutterOpenDelay_RBV').data[0]
close_delay = read(self.parent.parent.shutter_prefix + 'ShutterCloseDelay_RBV').data[0]
if not value - open_delay - close_delay >= self.parent.AdjustedAcquireTime.readback.value:
await self.parent.AdjustedAcquireTime.setpoint.write(value - open_delay - close_delay)
write(self.parent.parent.camera_prefix + 'AcquirePeriod', value)
write(self.parent.parent.shutter_prefix + 'TriggerRate', 1. / value)
await self.readback.write(value)
return value
@Initialize.scan(period=1)
async def Initialize(self, instance, async_lib):
if self._active_subprocess:
print(f'checking subprocess: {self._active_subprocess}')
return_code = self._active_subprocess.poll()
if return_code is not None:
completion_state = self._subprocess_completion_state
if return_code == 0:
print('Successful background process')
await self.State.write(completion_state)
await self.State.startup(None, None)
elif return_code > 0:
error = self._active_subprocess.stderr.read().decode()
print(error)
await self.ErrorStatus.write(error)
await self.State.write('Off')
self._active_subprocess = None
self._subprocess_completion_state = None
ReadoutTime = pvproperty(dtype=float, value=.080)
ErrorStatus = pvproperty(dtype=str, value="Unknown", read_only=True)
def main():
"""Console script for fastccd_support_ioc."""
ioc_options, run_options = ioc_arg_parser(
default_prefix='ES7011:FastCCD:',
desc=dedent(FCCDSupport.__doc__))
ioc = FCCDSupport(camera_prefix='ES7011:FastCCD:cam1:',
shutter_prefix='ES7011:ShutterDelayGenerator:',
hdf5_prefix='ES7011:FastCCD:HDF1:',
**ioc_options)
run(ioc.pvdb, **run_options)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 43.656388 | 157 | 0.629566 | 8,955 | 0.903633 | 0 | 0 | 8,505 | 0.858224 | 6,430 | 0.64884 | 1,926 | 0.194349 |
f60c351db3d540f17c4848153f58c4bf3898b80c | 241 | py | Python | infrastructure-dashboard/nccid-redirect/lambda/lambda-handler.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 56 | 2020-04-08T12:40:28.000Z | 2021-10-02T22:57:16.000Z | infrastructure-dashboard/nccid-redirect/lambda/lambda-handler.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 111 | 2020-04-02T13:23:06.000Z | 2022-03-30T13:23:28.000Z | infrastructure-dashboard/nccid-redirect/lambda/lambda-handler.py | uk-gov-mirror/NHSX.covid-chest-imaging-database | 77799a97193d09e9267182d18fbb79d604bbb038 | [
"MIT"
] | 10 | 2020-05-05T14:07:11.000Z | 2022-01-11T15:47:27.000Z | def handler(event, context):
return {
"statusCode": 302,
"headers": {
"Location": "https://www.nhsx.nhs.uk/covid-19-response/data-and-covid-19/national-covid-19-chest-imaging-database-nccid/"
},
}
| 30.125 | 133 | 0.589212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.580913 |
f60c8d54e5e796dde011b4726884df78f2198799 | 1,529 | py | Python | MachineLearning/SparkPREDICT_E2E_MLFLOW_Xgboost.py | AjAgr/Synapse | b009b6479c3b9797a12e24dcd110b71ca8566212 | [
"MIT"
] | null | null | null | MachineLearning/SparkPREDICT_E2E_MLFLOW_Xgboost.py | AjAgr/Synapse | b009b6479c3b9797a12e24dcd110b71ca8566212 | [
"MIT"
] | null | null | null | MachineLearning/SparkPREDICT_E2E_MLFLOW_Xgboost.py | AjAgr/Synapse | b009b6479c3b9797a12e24dcd110b71ca8566212 | [
"MIT"
] | 1 | 2022-02-22T11:24:41.000Z | 2022-02-22T11:24:41.000Z | #!/usr/bin/env python
# coding: utf-8
# ## E2E Xgboost MLFLOW
# In[45]:
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, pandas_udf,udf,lit
import azure.synapse.ml.predict as pcontext
import azure.synapse.ml.predict.utils._logger as synapse_predict_logger
import numpy as np
import pandas as pd
import xgboost as xgb
import mlflow
# In[46]:
spark.conf.set("spark.synapse.ml.predict.enabled","true")
# ## Train and Save Model
# ### Training
# In[47]:
data = np.random.rand(5, 10) # 5 entities, each contains 10 features
label = np.random.randint(1, size=5) # binary target
dtrain = xgb.DMatrix(data, label=label)
xgr = xgb.XGBRFRegressor(objective='reg:linear', n_estimators=10, seed=123)
xgr.fit(data, label)
# In[48]:
xgr.save_model('./model.json')
# In[49]:
mlflow.pyfunc.save_model(
data_path='./model.json',
path='./xgboost_pyfunc_model_path',
loader_module='mlflow.xgboost')
# In[50]:
MODEL_URI = './xgboost_pyfunc_model_path'
RETURN_TYPES = 'float'
# In[51]:
model = pcontext.bind_model(
return_types = RETURN_TYPES,
runtime = 'mlflow',
model_alias = 'xgb_model',
model_uri = MODEL_URI,).register()
# In[52]:
type(model)
# In[53]:
data = np.random.rand(5, 10)
df = spark.createDataFrame(pd.DataFrame(data))
df.createOrReplaceTempView("data")
df.show()
# In[54]:
predictions = spark.sql(
"""
SELECT PREDICT('xgb_model', *) AS predict FROM data
"""
).show()
| 15.602041 | 75 | 0.666449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 527 | 0.34467 |
f60d8908048f4ecdd577d5074d14eeb34f78c5bb | 2,930 | py | Python | models/gaze_rnn77.py | yj-yu/Recurrent_Gaze_Prediction | cf366095f19cd8c9ce271930d194db12a7d45018 | [
"MIT"
] | 9 | 2019-06-06T14:55:56.000Z | 2022-01-27T09:35:17.000Z | models/gaze_rnn77.py | yj-yu/Recurrent_Gaze_Prediction | cf366095f19cd8c9ce271930d194db12a7d45018 | [
"MIT"
] | 1 | 2021-06-01T22:34:37.000Z | 2021-06-01T22:34:37.000Z | models/gaze_rnn77.py | yj-yu/Recurrent_Gaze_Prediction | cf366095f19cd8c9ce271930d194db12a7d45018 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
"""
gaze_rnn7.py
Implement a simple recurrent gaze prediction model based on RNN(GRU).
In this version, the gaze DIM is REDUCED to 7x7 dimension.
"""
# TODO separate pupil from gazemaps, AWFUL design
import numpy as np
import os
import sys
import time
from PIL import Image
import tensorflow as tf
rnn_cell = tf.nn.rnn_cell
from collections import OrderedDict
import cPickle as pkl
import crc_input_data_seq
from util import log, override
from models.base import ModelBase, BaseModelConfig
from models.saliency_shallownet import SaliencyModel
from models.model_util import tf_normalize_map, normalize_probability_map
from models.model_util import tf_softmax_2d, tf_softmax_cross_entropy_with_logits_2d
from evaluation_metrics import saliency_score, AVAILABLE_METRICS
from easydict import EasyDict as E
CONSTANTS = E()
CONSTANTS.image_width = 98
CONSTANTS.image_height = 98
CONSTANTS.gazemap_width = 7
CONSTANTS.gazemap_height = 7
CONSTANTS.saliencymap_width = 49
CONSTANTS.saliencymap_height = 49
# config : changed as paramter later
class GRUModelConfig(BaseModelConfig):
def __init__(self):
super(GRUModelConfig, self).__init__()
self.n_lstm_steps = 35
self.batch_size = 7 # XXX XXX XXX XXX
self.dim_feature = 1024 #1024
self.dim_sal = 1024*49 #196
self.dim_sal_proj = 1024
# use adam by default
self.optimization_method = 'adam'
self.loss_type = 'l2'
from gaze_rnn import GazePredictionGRU as GazePredictionGRU4949
class GazePredictionGRU(GazePredictionGRU4949):
# Does not compatible model.py yet. mount that to this model. TODO"
def __init__(self,
session,
data_sets,
config=GRUModelConfig()
):
self.session = session
self.data_sets = data_sets
self.config = config
#assert isinstance(self.config, GRUModelConfig)
super(GazePredictionGRU, self).__init__(session, data_sets, config)
# other configuration
self.batch_size = config.batch_size
self.n_lstm_steps = config.n_lstm_steps
self.dim_feature = config.dim_feature
self.dim_sal = config.dim_sal # 49 * 49
self.dim_sal_proj = config.dim_sal_proj # 14 * 14 = 196
self.dim_cnn_proj = 32
self.initial_learning_rate = config.initial_learning_rate
self.learning_rate_decay = config.learning_rate_decay
self.max_grad_norm = config.max_grad_norm
self.gazemap_height = CONSTANTS.gazemap_height # 7
self.gazemap_width = CONSTANTS.gazemap_width # 7
self.image_height = CONSTANTS.image_height
self.image_width = CONSTANTS.image_width
# Finally, build the model and optimizer
self.build_model()
#self.build_generator()
self.build_train_op()
self.session.run(tf.initialize_all_variables())
| 28.173077 | 84 | 0.713652 | 1,790 | 0.610922 | 0 | 0 | 0 | 0 | 0 | 0 | 542 | 0.184983 |
f60df9264d987445f7433e60e82a4593c3bbdb6a | 1,218 | py | Python | py/dcp/problems/graph/max_edges.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/dcp/problems/graph/max_edges.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/dcp/problems/graph/max_edges.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | from collections import defaultdict
from .common import IGraph
''' Remove edges to create even trees.
You are given a tree with an even number of nodes. Consider each connection between a parent and child node to be an "edge". You
would like to remove some of these edges, such that the disconnected subtrees that remain each have an even number of nodes.
For example, suppose your input is the following tree:
1
/ \
2 3
/ \
4 5
/ | \
6 7 8
In this case, if we remove the edge (3, 4), both resulting subtrees will be even.
Write a function that returns the maximum number of edges you can remove while still satisfying this requirement.
'''
def max_edges1(graph):
def traverse(graph : IGraph, cur, result):
descendants = 0
for child in graph.neighbors(cur):
num_nodes, result = traverse(graph, child, result)
result[child] += num_nodes - 1
descendants += num_nodes
return descendants + 1, result
start = graph.root()
vertices = defaultdict(int)
_, descendants = traverse(graph, start, vertices)
return len([val for val in descendants.values() if val % 2 == 1]) | 28.325581 | 128 | 0.656814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.529557 |
f60e8f8a52c502e39de2e2fbda338251bd902c5a | 1,362 | py | Python | test_pytrain/test_KNN/test_KNN.py | pytrain/pytrain-shallow | c873a6f11f1dd940da12e7c9a3c961507d064d9a | [
"MIT"
] | 20 | 2016-09-03T10:56:06.000Z | 2020-08-21T01:43:47.000Z | test_pytrain/test_KNN/test_KNN.py | pytrain/pytrain | c873a6f11f1dd940da12e7c9a3c961507d064d9a | [
"MIT"
] | 8 | 2016-11-14T12:33:38.000Z | 2017-07-14T15:43:53.000Z | test_pytrain/test_KNN/test_KNN.py | pytrain/pytrain | c873a6f11f1dd940da12e7c9a3c961507d064d9a | [
"MIT"
] | 7 | 2017-02-09T16:50:37.000Z | 2022-01-02T01:18:56.000Z | #
# test KNN
#
# @ author becxer
# @ email becxer87@gmail.com
#
from test_pytrain import test_Suite
from pytrain.KNN import KNN
from pytrain.lib import autotest
from pytrain.lib import dataset
import numpy as np
class test_KNN_iris(test_Suite):
def __init__(self, logging = True):
test_Suite.__init__(self, logging)
def test_process(self):
iris_mat_train, iris_label_train = dataset.load_iris("sample_data", "training")
iris_mat_test, iris_label_test = dataset.load_iris("sample_data", "testing")
knn = KNN(iris_mat_train, iris_label_train, 3, 'manhattan')
error_rate = autotest.eval_predict(knn, iris_mat_test, iris_label_test, self.logging)
self.tlog("iris predict (with basic knn) error rate :" + str(error_rate))
class test_KNN_mnist(test_Suite):
def __init__(self, logging = True):
test_Suite.__init__(self, logging)
def test_process(self):
dg_mat_train, dg_label_train = dataset.load_mnist("sample_data", "training")
dg_mat_test, dg_label_test = dataset.load_mnist("sample_data", "testing")
knn_digit = KNN(dg_mat_train, dg_label_train, 10, 'euclidean')
error_rate = autotest.eval_predict(knn_digit, dg_mat_test, dg_label_test, self.logging)
self.tlog("digit predict (with basic knn) error rate :" + str(error_rate))
| 34.05 | 95 | 0.709251 | 1,136 | 0.834068 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.190162 |
f60f32aa489881ec2e8225930b221b8c8e96f54f | 2,416 | py | Python | python_data_utils/spark/ml/randomforest.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | 4 | 2019-01-06T00:09:21.000Z | 2022-01-28T06:03:13.000Z | python_data_utils/spark/ml/randomforest.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | null | null | null | python_data_utils/spark/ml/randomforest.py | surajiyer/python-data-utils | d6e9bf81204a01545a3edb165c5724eb24f37c18 | [
"MIT"
] | null | null | null | __all__ = ['RandomForestBinaryModel']
from pyspark.sql import DataFrame
from pyspark.ml.classification import RandomForestClassifier
from python_data_utils.spark.evaluation.multiclass import MulticlassEvaluator
from python_data_utils.spark.ml.base import BinaryClassCVModel, Metrics
class RandomForestBinaryModel(BinaryClassCVModel):
def __init__(
self, *, estimator=None, evaluator=None,
label_col: str = 'label', params_map=None):
estimator = RandomForestClassifier(labelCol=label_col)\
if not estimator else estimator
assert isinstance(estimator, RandomForestClassifier)
evaluator = MulticlassEvaluator(metricName='weightedFMeasure')\
if not evaluator else evaluator
super().__init__(estimator=estimator, evaluator=evaluator)
self.params_map = {
'maxDepth': [5, 10, 20],
'numTrees': [20, 30, 40, 50],
'minInstancesPerNode': [1, 2, 3]
} if not params_map else params_map
@Metrics.register('feature_importances')
def feature_importances(self, predictions):
self.logger.info('Get feature importances')
feature_importance = self.best_model.stages[-1].featureImportances
feature_importance = sorted([
(self.features[i], fi)
for i, fi in enumerate(feature_importance)]
, key=lambda x: -x[1])
return feature_importance
def get_feature_importances(self, df: DataFrame):
"""
Takes in a feature importance from a random forest / GBT model and map it to the column names
Output as a pandas dataframe for easy reading
Params
------
df: DataFrame
Example dataframe with same schema as model input.
Usage
----------
>>> rf = RandomForestClassifier(featuresCol="features")
>>> mod = rf.fit(train)
>>> get_feature_importances(train)
"""
import pandas as pd
featureImp = self.best_model.stages[-1].featureImportances
featuresCol = self.estimator.getFeaturesCol()
features = df.schema[featuresCol].metadata["ml_attr"]["attrs"]
list_extract = [features[i] for i in features]
varlist = pd.DataFrame(list_extract)
varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x])
return varlist.sort_values('score', ascending=False)
| 39.606557 | 101 | 0.657699 | 2,128 | 0.880795 | 0 | 0 | 409 | 0.169288 | 0 | 0 | 632 | 0.261589 |
f610b3662a3cf10a10d6b9f3bf736444d51ee13b | 2,815 | py | Python | predict.py | zahrabashir98/SmileDetection | 66700dfbd2809f057d224d438a75bef99edffc56 | [
"MIT"
] | 17 | 2019-02-19T15:14:43.000Z | 2021-07-09T14:36:19.000Z | predict.py | zahrabashir98/SmileDetection | 66700dfbd2809f057d224d438a75bef99edffc56 | [
"MIT"
] | 1 | 2021-01-07T02:51:35.000Z | 2021-01-07T02:51:35.000Z | predict.py | zahrabashir98/SmileDetection | 66700dfbd2809f057d224d438a75bef99edffc56 | [
"MIT"
] | 2 | 2019-02-01T17:46:39.000Z | 2020-06-17T12:10:52.000Z | import sys
import cv2
from keras.models import load_model
from matplotlib import pyplot as plt
import time
model = load_model("models/model.h5")
def find_faces(image):
face_cascade = cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')
face_rects = face_cascade.detectMultiScale(
image,
scaleFactor = 1.1,
minNeighbors = 22
)
return face_rects
def load_image(filepath):
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image, gray_image
def predict(gray_image):
face_rects = find_faces(gray_image)
for face_rect in face_rects:
x, y, w, h = face_rect
face = gray_image[y:y+h, x:x+w]
face = cv2.resize(face, (48, 48)).reshape((1, 48, 48, 1))
predicted_emotions = model.predict(face)[0]
best_emotion = 'happiness' if predicted_emotions[1] > predicted_emotions[0] else 'neutral'
# Create a json serializable result
yield dict(
border = dict(
x = float(x),
y = float(y),
width = float(w),
height = float(h),
),
prediction = {'happiness': float(predicted_emotions[0]), 'neutral': float(predicted_emotions[1])},
emotion = best_emotion
)
def put_text(image, rect, text):
x, y, w, h = rect
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = h / 30.0
font_thickness = int(round(font_scale * 1.5))
text_size, _ = cv2.getTextSize(text, font, font_scale, font_thickness)
center_text_x = x + (w // 2)
center_text_y = y + (h // 2)
text_w, text_h = text_size
lower_left_text_x = center_text_x - (text_w // 2)
lower_left_text_y = center_text_y + (text_h // 2)
cv2.putText(
image, text,
(lower_left_text_x, lower_left_text_y),
font, font_scale, (0, 255, 0), font_thickness
)
def draw_face_info(image, face_info):
x = int(face_info['border']['x'])
y = int(face_info['border']['y'])
w = int(face_info['border']['width'])
h = int(face_info['border']['height'])
emotion = face_info['emotion']
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
put_text(image, (x, y, w, h // 5), emotion)
def show_image(image, title='Result'):
plt.subplot(111), plt.imshow(image), plt.title(title)
plt.show()
if __name__ == '__main__':
# start time
start_time = time.time()
image, gray_image = load_image(sys.argv[1])
for face_info in predict(gray_image):
print(face_info)
draw_face_info(image, face_info)
# end time
end_time = time.time()
show_image(image)
response_time = end_time - start_time
print(response_time)
| 26.308411 | 110 | 0.621314 | 0 | 0 | 779 | 0.276732 | 0 | 0 | 0 | 0 | 236 | 0.083837 |
f612b87c818bc244cde8457fdae9a749fd1f7f0f | 1,088 | py | Python | src/brute_force.py | tcysin/tsp-solver | 5361aefb7e2dc4d851320cd1cc8ac33aad50d76b | [
"MIT"
] | 2 | 2020-10-10T18:21:33.000Z | 2021-05-29T12:53:45.000Z | src/brute_force.py | DishanMdr/tsp-solver | 5361aefb7e2dc4d851320cd1cc8ac33aad50d76b | [
"MIT"
] | 4 | 2019-10-01T19:31:54.000Z | 2019-10-07T09:53:16.000Z | src/brute_force.py | AlekseiTcysin/tsp-solver | 5361aefb7e2dc4d851320cd1cc8ac33aad50d76b | [
"MIT"
] | 1 | 2021-05-29T12:53:47.000Z | 2021-05-29T12:53:47.000Z | """
Implementation of Brute Force algorithm.
Checks all possible tours and selects the shortest one.
"""
from itertools import permutations
def brute_force(graph):
"""Calculates and returns shortest tour using brute force approach.
Runs in O(n!). Provides exact solution.
Args:
graph: instance of a Graph.
Returns:
list: sequence of nodes constituting shortest tour.
"""
all_tours = permutations(graph.nodes())
best_length = float('inf')
best_tour = None
for tour in all_tours:
# aggregate tour length in a smart way
length = 0
for src, dest in graph._edges_from_tour(tour):
length += graph.distance(src, dest)
# if length already bigger than best solution so far, stop
if length >= best_length:
length = float('inf')
break
# we found better tour - update variables
if length < best_length:
best_length = length
best_tour = tour
else:
continue
return list(best_tour)
| 23.652174 | 71 | 0.613051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.453125 |
f6141ec62f73733e8e0035ee98f16f938d62b41d | 5,414 | py | Python | web_app.py | akshitagupta23/Udacity_DS_ND_Capstone_Project | 1c500765f84160312935c48f9c1432256cf5df06 | [
"MIT"
] | null | null | null | web_app.py | akshitagupta23/Udacity_DS_ND_Capstone_Project | 1c500765f84160312935c48f9c1432256cf5df06 | [
"MIT"
] | null | null | null | web_app.py | akshitagupta23/Udacity_DS_ND_Capstone_Project | 1c500765f84160312935c48f9c1432256cf5df06 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# coding=utf-8
import streamlit as st
import numpy as np
import pandas as pd
import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from churn_model_train import missing_values_table
st.set_page_config(layout = "wide")
st.title('Customer Churn')
st.markdown("<h3></h3>", unsafe_allow_html=True)
st.image('images/customer_churn.png', caption=None, width=None, use_column_width=True)
_FEATURES=['customerID', 'gender', 'SeniorCitizen', 'Partner', 'Dependents', 'tenure', 'PhoneService', 'MultipleLines',
'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV',
'StreamingMovies', 'Contract', 'PaperlessBilling','PaymentMethod', 'MonthlyCharges', 'TotalCharges']
model = joblib.load('model.joblib')
def predict_churn(gender, SeniorCitizen, Partner,tenure, PhoneService,
InternetService, OnlineSecurity, OnlineBackup, DeviceProtection, TechSupport, StreamingTV,
StreamingMovies, Contract, MonthlyCharges, TotalCharges):
"""Let's Authenticate Churn Prediction
responses:
200:
description: The output values
"""
prediction=model.predict([[gender, SeniorCitizen, Partner,tenure, PhoneService,
InternetService, OnlineSecurity, OnlineBackup, DeviceProtection, TechSupport, StreamingTV,
StreamingMovies, Contract, MonthlyCharges, TotalCharges]])
print(prediction)
return prediction
# Selecting the mode of prediction
predict_mode = st.sidebar.radio(
"Choose mode to predict?",
("Online", "Batch"))
def run():
if predict_mode == 'Online':
customerID = st.text_input('customerID')
col1, col2, col3, col4 = st.beta_columns(4)
with col1:
gender = st.selectbox('gender', ['Male', 'Female'])
with col2:
SeniorCitizen = st.selectbox('SeniorCitizen', [0, 1])
with col3:
Partner = st.selectbox('Partner', ['Yes', 'No'])
with col4:
Dependents = st.selectbox('Dependents', ['Yes', 'No'])
col5, col6, col7, col8 = st.beta_columns(4)
with col5:
tenure = st.slider('tenure', 0, 50, 10)
with col6:
PhoneService = st.selectbox('PhoneService', ['Yes', 'No'])
with col7:
MultipleLines = st.selectbox('MultipleLines', ['No', 'Yes', 'No phone service'])
with col8:
InternetService = st.selectbox('InternetService', ['Fiber optic', 'DSL', 'No'])
col9, col10, col11, col12 = st.beta_columns(4)
with col9:
OnlineSecurity = st.selectbox('OnlineSecurity', ['No', 'Yes', 'No phone service'])
with col10:
OnlineBackup = st.selectbox('OnlineBackup', ['No', 'Yes', 'No internet service'])
with col11:
DeviceProtection = st.selectbox('DeviceProtection', ['No', 'Yes', 'No internet service'])
with col12:
TechSupport = st.selectbox('TechSupport', ['No', 'Yes', 'No internet service'])
col13, col14, col15, col16= st.beta_columns(4)
with col13:
StreamingTV = st.selectbox('StreamingTV', ['No', 'Yes', 'No internet service'])
with col14:
StreamingMovies = st.selectbox('StreamingMovies', ['No', 'Yes', 'No internet service'])
with col15:
Contract = st.selectbox('Contract', ['Month-to-month', 'Two Year', 'One Year'])
with col16:
PaperlessBilling= st.selectbox('PaperlessBilling', ['Yes', 'No'])
col17, col18, col19 = st.beta_columns(3)
with col17:
PaymentMethod = st.selectbox('PaymentMethod', ['Electronic check', 'Mailed check', 'Bank transfer (automatic)', 'Credit card (automatic)'])
with col18:
MonthlyCharges = st.slider('MonthlyCharges', 0, 200, 50)
with col19:
TotalCharges = st.slider('TotalCharges', 0, 10000, 2000)
predict=""
if st.button("Predict"):
predict = predict_churn(gender, SeniorCitizen, Partner,tenure, PhoneService,
InternetService, OnlineSecurity, OnlineBackup, DeviceProtection, TechSupport, StreamingTV,
StreamingMovies, Contract, MonthlyCharges, TotalCharges)
st.success('Customer will churn: {}'.format(predict))
if predict_mode == 'Batch':
file_upload = st.file_uploader("Upload file", type=["csv"])
if file_upload is not None:
input_df = pd.read_csv(file_upload)
#input_df = missing_values_table(input_df)
input_df = input_df.drop(['customerID','Dependents','PhoneService','MultipleLines', 'PaperlessBilling','PaymentMethod'], axis = 1)
predict = model.predict(input_df)
st.write(predict)
if __name__ == '__main__':
run()
| 42.629921 | 152 | 0.634281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,473 | 0.272072 |
f614a355b5c9fc85356cc143f4b8958e225a2420 | 9,315 | py | Python | evaluation.py | Sockeye-Project/decl-power-seq | b3eed4b7abfdac647b8ff7653806665436e3e54d | [
"MIT"
] | null | null | null | evaluation.py | Sockeye-Project/decl-power-seq | b3eed4b7abfdac647b8ff7653806665436e3e54d | [
"MIT"
] | null | null | null | evaluation.py | Sockeye-Project/decl-power-seq | b3eed4b7abfdac647b8ff7653806665436e3e54d | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import argparse
import random
import timeit
import copy
from sequence_generation import State_Search_Flags, Topology, topological_sort
from enzian_descriptions import enzian_nodes, enzian_wires, enzian_nodes_EVAL3
problems = [
("p1", {"cpu" : "POWERED_ON", "fpga": "POWERED_ON"}, {"vdd_ddrcpu13" : [(1500, 1500)], "vdd_ddrcpu24" : [(1500, 1500)], "vdd_ddrfpga13" : [(1200, 1200)], "vdd_ddrfpga24" : [(1200, 1200)]}, 256),
("p2", {"cpu" : "POWERED_ON", "fpga": "POWERED_DOWN"}, {"vdd_ddrcpu13" : [(1500, 1500)], "vdd_ddrcpu24" : [(1500, 1500)]}, 6),
("p3", {"cpu" : "POWERED_DOWN", "fpga": "POWERED_DOWN"}, {}, 8),
]
transitions = [
("p1", {"cpu" : "POWERED_DOWN", "fpga": "POWERED_DOWN"}, {"cpu" : "POWERED_ON", "fpga": "POWERED_ON"}),
("p2", {"cpu" : "POWERED_DOWN", "fpga": "POWERED_DOWN"}, {"cpu" : "POWERED_ON", "fpga": "POWERED_DOWN"}),
("p3", {"cpu" : "POWERED_ON", "fpga": "POWERED_DOWN"}, {"cpu" : "POWERED_ON", "fpga": "POWERED_ON"}),
("p4", {"cpu" : "POWERED_ON", "fpga": "POWERED_DOWN"}, {"cpu" : "POWERED_DOWN", "fpga": "POWERED_DOWN"}),
("p5", {"cpu" : "POWERED_ON", "fpga": "POWERED_ON"}, {"cpu" : "POWERED_ON", "fpga": "POWERED_DOWN"}),
("p6", {"cpu" : "POWERED_ON", "fpga": "POWERED_ON"}, {"cpu" : "POWERED_DOWN", "fpga": "POWERED_DOWN"}),
]
def run_eval1_m1():
enzian = Topology(enzian_nodes, enzian_wires)
for (name, node_states, state_dict, number) in problems:
result_file = open("results/eval1_m1_%s.csv"%name, 'a', buffering=1)
flags1 = State_Search_Flags(all_solutions= False, advanced_backtracking=False)
flags2 = State_Search_Flags(all_solutions=False)
flags3 = State_Search_Flags(use_z3=True)
for i in range(100):
print(i)
enzian.current_node_state = node_states
random.shuffle(enzian.sorted_wires)
for w in enzian.wires.values():
random.shuffle(w.constraints)
#required since z3 keeps state and since we have permuted every conductor's state possibilities: index of which are hardcoded into the z3 problem instance!
enzian.generate_z3_solver()
time1 = timeit.timeit(lambda: enzian.parametrized_state_search({}, flags1, 1), number = 1)
time2 = timeit.timeit(lambda: enzian.parametrized_state_search({}, flags2, 1), number = 3) / 3
time3 = timeit.timeit(lambda: enzian.parametrized_state_search({}, flags3, 1), number = 3) / 3
print(time1)
print(time2)
print(time3)
result_file.write(str(time1) + "," + str(time2) + "," + str(time3) + "\n")
def run_eval1_m2():
enzian = Topology(enzian_nodes, enzian_wires)
for (name, node_states, state_dict, number) in problems:
result_file = open("results/eval1_m2_%s.csv"%name, 'a')
flags = State_Search_Flags(all_solutions=False)
for i in range(500):
print(i)
enzian.current_node_state = node_states
random.shuffle(enzian.sorted_wires)
for w in enzian.wires.values():
random.shuffle(w.constraints)
time = timeit.timeit(lambda: enzian.parametrized_state_search({}, flags, 1), number = 3) / 3
print(time)
result_file.write(str(time) + "\n")
def run_eval1_m3():
enzian = Topology(enzian_nodes, enzian_wires)
for (name, node_states, state_dict, number) in problems:
result_file = open("results/eval1_m3_%s.csv"%name, 'a')
flags = State_Search_Flags(all_solutions=True)
for i in range(500):
print(i)
enzian.current_node_state = node_states
random.shuffle(enzian.sorted_wires)
for w in enzian.wires.values():
random.shuffle(w.constraints)
time = timeit.timeit(lambda: enzian.parametrized_state_search(state_dict, flags, number), number = 3) / 3
print(time)
result_file.write(str(time) + "\n")
def run_eval1_m4():
enzian = Topology(enzian_nodes, enzian_wires)
for (name, node_states, state_dict, number) in problems:
result_file = open("results/eval1_m4_%s.csv"%name, 'a')
flags = State_Search_Flags(all_solutions=False)
for i in range(500):
print(i)
enzian.current_node_state = node_states
random.shuffle(enzian.sorted_wires)
for w in enzian.wires.values():
random.shuffle(w.constraints)
time1 = timeit.timeit(lambda: enzian.parametrized_state_search({}, flags, 1), number = 3) / 3
time2 = timeit.timeit(lambda: enzian.parametrized_state_search(state_dict, flags, 1), number = 3) / 3
print(time1)
print(time2)
result_file.write(str(time1) + "," + str(time2) + "\n")
def run_eval2():
result_file = open("results/eval2.csv", 'a', buffering=1)
for (problem, initial, end) in transitions:
time = 0
#since consumer transitions update the virtual platform state (especially the initial consumer states), we must manually time it thrice
for i in range(3):
enzian = Topology(enzian_nodes, enzian_wires)
enzian.current_node_state = copy.deepcopy(initial)
time = timeit.timeit(lambda: enzian.stateful_node_update(end, flags = State_Search_Flags(all_solutions=False, visualize=False)), number = 1) + time
print(initial)
print(end)
time = time / 3
print(time)
result_file.write(problem + "," + str(time) + "\n")
def run_eval3():
#Collect data, store sequence to commands.py and G1 to G19 (remove comments to perform)
#############################################################
enzian = Topology(enzian_nodes_EVAL3, enzian_wires)
enzian.apply_changes({}, flags= State_Search_Flags(all_solutions = False))
enzian.commands = enzian.commands + "# code from here:\n"
# writes event graphs to files G1 to G19:
enzian.stateful_node_update({"cpu": "POWERED_ON", "fpga": "POWERED_ON"}, flags= State_Search_Flags(all_solutions=True, return_graph=True, prefer_concurrent_interleaving=False))
enzian.done("results/eval3_sequence.py") #writes command sequence to results/eval3_sequence.py -> must press Enter to continue the evaluation
##############################################################
#Evaluate similarity of order as described in thesis: store results in file "result_eval3.txt":
##############################################################
#G (event graph of manual solution was manually constructed and stored in manual_sequence_event_graph.txt)
enzian = Topology(enzian_nodes, enzian_wires)
graph_file = open("manual_sequence_event_graph.txt", 'r')
graph = eval(graph_file.read())
graph_file.close()
#assert that all conductors in G were spelled correctly (since they were manually typed)
for (c, conductors) in graph.items():
for w in conductors | {c}:
name = w
if w[:4] == "set_":
name = w[4:]
if not name in enzian.wires:
print("%s not in wires" % name)
#assert that G is acyclic
print("G is acyclic: %s"% str(not topological_sort(graph) is None))
result_file = open("results/eval3.txt", 'w')
#G1 is empty because of the call to apply_changes that put the platform into the appropriate state
for i in range(1, 20):
graph_file = open("results/eval3_G%s.txt"%str(i), 'r')
graph2 = eval(graph_file.read())
graph_file.close()
for (c, conductors) in graph.items():
if c in graph2:
graph2[c].union(conductors)
else:
graph2[c] = conductors
string = "Union of G%s and G is acyclic : %s" %(str(i), str(not topological_sort(graph2) is None))
print(string)
result_file.write(string + "\n")
result_file.close()
experiments={
"e1m1" : run_eval1_m1,
"e1m2" : run_eval1_m2,
"e1m3" : run_eval1_m3,
"e1m4" : run_eval1_m4,
"e2" : run_eval2,
"e3" : run_eval3
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run evaluation, by default all experiments are run.")
parser.add_argument("--e1m1", dest="experiments", action="append_const", const="e1m1",
help="Run measurement 1 of evaluation 1"
)
parser.add_argument("--e1m2", dest="experiments", action="append_const", const="e1m2",
help="Run measurement 2 of evaluation 1"
)
parser.add_argument("--e1m3", dest="experiments", action="append_const", const="e1m3",
help="Run measurement 3 of evaluation 1"
)
parser.add_argument("--e1m4", dest="experiments", action="append_const", const="e1m4",
help="Run measurement 4 of evaluation 1"
)
parser.add_argument("--e2", dest="experiments", action="append_const", const="e2",
help="Run evaluation 2"
)
parser.add_argument("--e3", dest="experiments", action="append_const", const="e3",
help="Run evaluation 2"
)
args = parser.parse_args()
if args.experiments is None:
es = experiments.keys()
else:
es = args.experiments
for e in es:
experiments[e]()
| 45.661765 | 198 | 0.620183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,738 | 0.293935 |
f614d1aa5bffc66394d94ec1d27b271cbd750435 | 3,647 | py | Python | venv/Lib/site-packages/gensim/test/test_probability_estimation.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/gensim/test/test_probability_estimation.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/gensim/test/test_probability_estimation.py | arnoyu-hub/COMP0016miemie | 59af664dcf190eab4f93cefb8471908717415fea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for probability estimation algorithms in the probability_estimation module.
"""
import logging
import unittest
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.hashdictionary import HashDictionary
from gensim.topic_coherence import probability_estimation
class BaseTestCases:
class ProbabilityEstimationBase(unittest.TestCase):
texts = [
['human', 'interface', 'computer'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees']
]
dictionary = None
def build_segmented_topics(self):
# Suppose the segmented topics from s_one_pre are:
token2id = self.dictionary.token2id
computer_id = token2id['computer']
system_id = token2id['system']
user_id = token2id['user']
graph_id = token2id['graph']
self.segmented_topics = [
[
(system_id, graph_id),
(computer_id, graph_id),
(computer_id, system_id)
], [
(computer_id, graph_id),
(user_id, graph_id),
(user_id, computer_id)
]
]
self.computer_id = computer_id
self.system_id = system_id
self.user_id = user_id
self.graph_id = graph_id
def setup_dictionary(self):
raise NotImplementedError
def setUp(self):
self.setup_dictionary()
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
self.build_segmented_topics()
def test_p_boolean_document(self):
"""Test p_boolean_document()"""
accumulator = probability_estimation.p_boolean_document(
self.corpus, self.segmented_topics)
obtained = accumulator.index_to_dict()
expected = {
self.graph_id: {5},
self.user_id: {1, 3},
self.system_id: {1, 2},
self.computer_id: {0}
}
self.assertEqual(expected, obtained)
def test_p_boolean_sliding_window(self):
"""Test p_boolean_sliding_window()"""
# Test with window size as 2. window_id is zero indexed.
accumulator = probability_estimation.p_boolean_sliding_window(
self.texts, self.segmented_topics, self.dictionary, 2)
self.assertEqual(1, accumulator[self.computer_id])
self.assertEqual(3, accumulator[self.user_id])
self.assertEqual(1, accumulator[self.graph_id])
self.assertEqual(4, accumulator[self.system_id])
class TestProbabilityEstimation(BaseTestCases.ProbabilityEstimationBase):
def setup_dictionary(self):
self.dictionary = HashDictionary(self.texts)
class TestProbabilityEstimationWithNormalDictionary(BaseTestCases.ProbabilityEstimationBase):
def setup_dictionary(self):
self.dictionary = Dictionary(self.texts)
self.dictionary.id2token = {v: k for k, v in self.dictionary.token2id.items()}
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| 35.754902 | 94 | 0.587332 | 3,035 | 0.832191 | 0 | 0 | 0 | 0 | 0 | 0 | 630 | 0.172745 |
f6157ffabb60051dc58e33f25b48c08c00d0742e | 5,877 | py | Python | src/geomm/centering.py | stxinsite/geomm | 05ce2404614e3a594c5d5e059b957140f4983bc3 | [
"MIT"
] | 3 | 2018-05-01T16:26:15.000Z | 2019-06-19T21:17:47.000Z | src/geomm/centering.py | stxinsite/geomm | 05ce2404614e3a594c5d5e059b957140f4983bc3 | [
"MIT"
] | null | null | null | src/geomm/centering.py | stxinsite/geomm | 05ce2404614e3a594c5d5e059b957140f4983bc3 | [
"MIT"
] | 1 | 2021-09-28T15:17:16.000Z | 2021-09-28T15:17:16.000Z | import numpy as np
from geomm.centroid import centroid
def center(coords, center_point):
"""Center coordinates at the origin based on a center point.
If idxs are given the center of mass is computed only from those
coordinates and if weights are given a weighted center of mass is
computed.
Parameters
----------
coords : arraylike
The coordinates you wish to center.
center_point : arraylike of float of 1 dimension
The point to center all other coordinates around. Should have
one element for each dimension of coords
Returns
-------
centered_coords : arraylike
Transformed coordinates.
"""
assert len(coords.shape) == 2, \
"coordinates should be rank 2 array, "\
"this function operates on individual frames not trajectories."
assert coords.shape[1] == 3, "coordinates are not of 3 dimensions"
assert len(center_point) == 3, "center point is not of 3 dimensions"
return coords - center_point
def center_around(coords, idxs, weights=None):
"""Center coordinates at the origin based on a center point.
If idxs are given the center of mass is computed only from those
coordinates and if weights are given a weighted center of mass is
computed.
Parameters
----------
coords : arraylike
The coordinates you wish to center.
idxs : arraylike of int
The idxs of the coordinates to actually compute the centroid
on, although the translation will act on all the coordinates.
weights : arraylike of float, optional
Give weights to the coordinates for a weighted centroid
('center of mass').
Returns
-------
centered_coords : arraylike
Transformed coordinates.
"""
assert len(coords.shape) == 2, \
"coordinates should be rank 2 array, "\
"this function operates on individual frames not trajectories."
assert coords.shape[1] == 3, "coordinates are not of 3 dimensions"
assert len(idxs) > 0, "Must provide some idxs to compute a center of."
return center(coords, centroid(coords[idxs], weights=weights))
def apply_rectangular_pbcs(coords, unitcell_side_lengths, center_point=(0., 0., 0.,)):
"""Apply rectangular Periodic Boundary Conditions (PBCs) given the
lengths of the unitcell and a center point positions of the box in
the coordinate space. The default for the center point is (0,0,0)
which is the case for OpenMM MD frames but not other MD systems.
Parameters
----------
coords : arraylike
The coordinate array of the particles you will be
transforming.
unitcell_side_lengths : arraylike of shape (3)
The lengths of the sides of a rectangular unitcell.
Returns
-------
wrapped_coords : arraylike
Transformed coordinates. All fit within the box.
Warning
-------
This method does not understand molecular topologies and will
"break" bonds when moving molecules through boundaries.
"""
# check to make sure everything looks okay
assert len(coords.shape) == 2, \
"coordinates should be rank 2 array, "\
"this function operates on individual frames not trajectories."
assert coords.shape[1] == 3, "coordinates are not of 3 dimensions"
assert len(center_point) == 3, "center point is not of 3 dimensions"
assert len(unitcell_side_lengths) == 3, "Unitcell side lengths are not of dimension 3"
# cast the center point to an array
center_point = np.array(center_point)
# Calculate half box sizes
unitcell_half_lengths = unitcell_side_lengths * 0.5
# initialize the coordinates to be wrapped
wrapped_coords = np.copy(coords)
# find coords which are outside the box in the positive direction
pos_idxs = np.where(coords > center_point + unitcell_half_lengths)
# Groups the frame_idx, atom_idx, and dim_idx
pos_idxs = list(zip(pos_idxs[0], pos_idxs[1]))
# Restrict particle coordinates to the simulation box
for atom_idx, dim_idx in pos_idxs:
wrapped_coords[atom_idx, dim_idx] = (coords[atom_idx, dim_idx] -
unitcell_side_lengths[dim_idx])
# Find where coords are less than the negative half box sizes
neg_idxs = np.where(coords < center_point - unitcell_half_lengths)
# Groups the fram_idx, atom_idx and dim_idx where they are greater
# than half box sizes
neg_idxs = list(zip(neg_idxs[0], neg_idxs[1]))
# Restrict particle coordinates to the simulation box
for atom_idx, dim_idx in neg_idxs:
wrapped_coords[atom_idx, dim_idx] = (coords[atom_idx, dim_idx] +
unitcell_side_lengths[dim_idx])
return wrapped_coords
def center_complex(coords, complex_idxs):
"""For a system with periodic boundary conditions move all members of
a complex to the same image of the unitcell.
Parameters
----------
coords : arraylike
The coordinate array of the particles you will be
transforming.
complex_idxs : list of arraylikes of int of rank 1
A list where each member represents a member of the complex
and is a collection of the indices that define that member.
Returns
-------
centered_coords : arraylike
Transformed coordinates.
"""
# compute the centroids of each member in the complex
member_centroids = []
for member_idxs in complex_idxs:
centroid = coords[member_idxs].mean(axis=0)
member_centroids.append(centroid)
member_centroids = np.array(member_centroids)
# compute the centroid of the centroids
complex_centroid = member_centroids.mean(axis=0)
# center the complex
centered_coords = center(coords, complex_centroid)
return centered_coords
| 31.095238 | 90 | 0.680619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,846 | 0.654416 |
f61742ad1e6b7bd4951030a62b2c0b4b421ab907 | 4,082 | py | Python | xigt/exporters/itsdb.py | lizcconrad/xigt | 2fd3e107dc2b5e8bf6fa4e6b13cf67028ff89093 | [
"MIT"
] | null | null | null | xigt/exporters/itsdb.py | lizcconrad/xigt | 2fd3e107dc2b5e8bf6fa4e6b13cf67028ff89093 | [
"MIT"
] | null | null | null | xigt/exporters/itsdb.py | lizcconrad/xigt | 2fd3e107dc2b5e8bf6fa4e6b13cf67028ff89093 | [
"MIT"
] | null | null | null | import logging
from os.path import isfile, join as pjoin
from os import environ
try:
from delphin import tsdb
except ImportError:
raise ImportError(
'Could not import pyDelphin module. Get it from here:\n'
' https://github.com/goodmami/pydelphin'
)
# ECC 2021-07-26: the lambda for i-comment assumes there will be a translation, but it's not always present,
# so this is a helper function for the lambda to use
def build_comment(igt):
comment = " ".join(item.get_content() for item in next(igt.select(type="glosses"), []))
try:
comment += " // " + str(next(igt.select(type="translations"), [""])[0].get_content())
return comment
except:
return comment
# EMB 2019-04-05 Previously, the lamba part was in prepare_config, but in that case, the last mapper was used for all keys, and I couldn't figure out why. Nor could I see why the lambas weren't called right away. Moving that into DEFAULT_CELLS solved the problem, so I could hae both i-input and i-comment filled in.
DEFAULT_CELLS = [
# i-input is a string of either the first phrase (preferred) or all words
#('i-input', lambda igt: eval('next(igt.select(type="phrases"), [""])[0].value() or '
# '" ".join(item.get_content() '
# ' for item in next(igt.select(type="words"),[]))')),
# KPH 2019-09-30 The first phrases tier is not preferred if we want to target the morpheme segmented line. If the data was converted from flex
# the first phrase tier with id="l" is the language line. We want the phrase tier with id="p"
('i-input', lambda igt: eval('next(igt.select(id="p"), [""])[0].value() or '
'next(igt.select(type="phrases"), [""])[0].value() or '
'" ".join(item.get_content() '
' for item in next(igt.select(type="words"),[]))')),
# i-comment is the glosses concatenated, followed by the translation
('i-comment', lambda igt: build_comment(igt)),
('i-wf', lambda igt: eval('0 if igt.get_meta("judgment") else 1')),
]
def xigt_export(xc, outpath, config=None):
config = prepare_config(config)
if not config.get('relations') or not isfile(config['relations']):
logging.error('Relations file required for [incr tsdb()] export.')
return
# ECC 2021-07-26: fix to work with new version of pydelphin
# read in the schema, export the corpus, initialize the db, and write it in an item file
config['schema'] = tsdb.read_schema(config['relations'])
items = export_corpus(xc, config)
tsdb.initialize_database(outpath, config['schema'], files=False)
tsdb.write(outpath, 'item', items)
def prepare_config(config):
if config is None:
config = {}
config.setdefault('i-id_start', 0)
config.setdefault('i-id_skip', 10)
# attempt to find default Relations file
if 'relations' not in config and 'LOGONROOT' in environ:
rel_path = pjoin(
environ['LOGONROOT'],
'lingo/lkb/src/tsdb/skeletons/english/Relations'
)
if isfile(rel_path):
logging.info('Attempting to get relations file from {}'
.format(rel_path))
config['relations'] = rel_path
config['cells'] = DEFAULT_CELLS
return config
def export_corpus(xc, config):
id_start = config['i-id_start']
id_skip = config['i-id_skip']
items = []
for i, igt in enumerate(xc):
config['__i-id_current__'] = id_start + (i * id_skip)
logging.debug('Exporting {}'.format(str(igt.id)))
# make a list of tsdb records
items.append(tsdb.make_record(export_igt(igt, config), config['schema']['item']))
return items
def export_igt(igt, config):
row = {'i-id': config['__i-id_current__']}
for cell_map in config['cells']:
key, mapper = cell_map
try:
row[key] = mapper(igt)
except SyntaxError:
logging.error('Malformed cell mapper expression for {}'
.format(key))
raise
row['i-length'] = len(row['i-input'].split())
return row
| 40.415842 | 316 | 0.638902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,061 | 0.5049 |
f6185b7b68831ed7849329b53e5d282146c91eaa | 1,072 | py | Python | spectrum_test.py | PeiKaLunCi/rectangular_RIE | 29afdde1798ac8c4fb5f162c2033269ac18e1583 | [
"MIT"
] | 2 | 2022-03-19T08:07:00.000Z | 2022-03-22T08:35:06.000Z | spectrum_test.py | PeiKaLunCi/rectangular_RIE | 29afdde1798ac8c4fb5f162c2033269ac18e1583 | [
"MIT"
] | null | null | null | spectrum_test.py | PeiKaLunCi/rectangular_RIE | 29afdde1798ac8c4fb5f162c2033269ac18e1583 | [
"MIT"
] | 1 | 2022-03-19T08:06:51.000Z | 2022-03-19T08:06:51.000Z | import numpy as np
import matplotlib.pyplot as plt
import g_functions as g_f
R1 = 2
R2 = .6
M = 500
Delta = .1
NB_POINTS = 2**10
EPSILON_IMAG = 1e-8
parameters = {
'M' : M,
'R1' : R1,
'R2' : R2,
'NB_POINTS' : NB_POINTS,
'EPSILON_IMAG' : EPSILON_IMAG,
'verbosity' : 1,
'ENSAMBLE' : 'Wishart'
}
# Compute sample
S, Y = g_f.make_sample(parameters, Delta)
# Computer rho from theory
rho_theory = g_f.find_rho(parameters, Delta)
# Compute deoising function from theory
denoiser_plot = np.zeros(parameters["NB_POINTS"])
for (i_z, z) in enumerate(rho_theory["zs"]):
denoiser_plot[i_z] = g_f.denoiser(z, parameters, Delta)
plt.hist(g_f.find_spectrum(Y), 80, density=True)
# plt.hist(g_f.find_spectrum(g_f.denoise_sample(Y, parameters, Delta)), 160, density=True)
plt.plot(rho_theory['zs'],rho_theory['rho'],color='red')
# plt.plot(rho_theory['zs'],denoiser_plot)
plt.title(f"R2 = {parameters['R2']}, R1 = {parameters['R1']}")
plt.ylabel("Frequency")
plt.xlabel("Singular value")
plt.show() | 23.822222 | 90 | 0.655784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.360075 |
f618776d83cae749bd8dfd7a18734f7ba04c9b06 | 1,129 | py | Python | Part_2_intermediate/mod_2/lesson_2/homework_1/homework.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_2_intermediate/mod_2/lesson_2/homework_1/homework.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_2_intermediate/mod_2/lesson_2/homework_1/homework.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null |
# Utwórz klasy do reprezentacji Produktu, Zamówienia, Jabłek i Ziemniaków.
# Stwórz po kilka obiektów typu jabłko i ziemniak i wypisz ich typ za pomocą funkcji wbudowanej type.
# Stwórz listę zawierającą 5 zamówień oraz słownik, w którym kluczami będą nazwy produktów
# a wartościami instancje klasy produkt.
class Product:
pass
class Order:
pass
class Apple:
pass
class Potato:
pass
if __name__ == '__main__':
green_apple = Apple()
red_apple = Apple()
fresh_apple = Apple()
print("green apple type:", type(green_apple))
print("red apple type:", type(red_apple))
print("fresh apple type:", type(fresh_apple))
old_potato = Potato()
young_potato = Potato()
print("old potato type:", type(old_potato))
print("young potato type:", type(young_potato))
# orders = [Order(), Order(), Order(), Order(), Order()]
orders = []
for _ in range(5):
orders.append(Order())
print(orders)
products = {
"Jabłko": Product(),
"Ziemniak": Product(),
"Marchew": Product(),
"Ciastka": Product(),
}
print(products)
| 22.58 | 101 | 0.644818 | 87 | 0.075652 | 0 | 0 | 0 | 0 | 0 | 0 | 521 | 0.453043 |
f61930fbe75a973bf027da1a6d037d44481d82ff | 2,625 | py | Python | examples/validate_ndc.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | 7 | 2019-12-20T14:15:53.000Z | 2020-05-03T17:43:02.000Z | examples/validate_ndc.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | 30 | 2020-02-10T08:59:30.000Z | 2020-06-16T21:21:33.000Z | examples/validate_ndc.py | almarklein/visvis2 | 728a4b7434879d3606f32836eda3971ed73905f8 | [
"BSD-2-Clause"
] | null | null | null | """
Example (and test) for the NDC coordinates. Draws a square that falls partly out of visible range.
* The scene should show a band from the bottom left to the upper right.
* The bottom-left (NDC -1 -1) must be green, the upper-right (NDC 1 1) blue.
* The other corners must be black, cut off at exactly half way: the depth is 0-1.
"""
# test_example = true
from wgpu.gui.auto import WgpuCanvas, run
from pygfx.renderers.wgpu._shadercomposer import Binding, WorldObjectShader
import pygfx as gfx
class Square(gfx.WorldObject):
pass
class SquareMaterial(gfx.Material):
pass
class SquareShader(WorldObjectShader):
def get_code(self):
return (
self.get_definitions()
+ self.common_functions()
+ self.vertex_shader()
+ self.fragment_shader()
)
def vertex_shader(self):
return """
@stage(vertex)
fn vs_main(@builtin(vertex_index) index: u32) -> Varyings {
var positions = array<vec3<f32>, 4>(
vec3<f32>(-1.0, -1.0, 0.5), vec3<f32>(-1.0, 1.0, 1.5), vec3<f32>(1.0, -1.0, -0.5), vec3<f32>(1.0, 1.0, 0.5)
);
var colors = array<vec3<f32>, 4>(
vec3<f32>(0.0, 1.0, 0.0), vec3<f32>(0.0, 0.5, 0.5), vec3<f32>(0.0, 0.5, 0.5), vec3<f32>(0.0, 0.0, 1.0)
);
var varyings: Varyings;
varyings.position = vec4<f32>(positions[index], 1.0);
varyings.color = vec4<f32>(colors[index], 1.0);
return varyings;
}
"""
def fragment_shader(self):
return """
@stage(fragment)
fn fs_main(varyings: Varyings) -> FragmentOutput {
var out: FragmentOutput;
out.color = varyings.color;
return out;
}
"""
@gfx.renderers.wgpu.register_wgpu_render_function(Square, SquareMaterial)
def square_render_function(render_info):
shader = SquareShader(render_info)
binding = Binding("u_stdinfo", "buffer/uniform", render_info.stdinfo_uniform)
shader.define_binding(0, 0, binding)
return [
{
"render_shader": shader,
"primitive_topology": "triangle-strip",
"indices": range(4),
"bindings0": {0: binding},
},
]
# %% Setup scene
canvas = WgpuCanvas()
renderer = gfx.WgpuRenderer(canvas)
scene = gfx.Scene()
t1 = Square(None, SquareMaterial())
scene.add(t1)
camera = gfx.NDCCamera() # This example does not even use the camera
canvas.request_draw(lambda: renderer.render(scene, camera))
if __name__ == "__main__":
print(__doc__)
run()
| 28.225806 | 123 | 0.602667 | 1,297 | 0.494095 | 0 | 0 | 477 | 0.181714 | 0 | 0 | 1,413 | 0.538286 |
f61a2ffd3ec4c8094b826196a15aa36e177ad663 | 122 | py | Python | myy/accounts/admin.py | ramadevim/Travel-website | b2bec8d7ea847b5105c181fe1099bb85950dbd76 | [
"MIT"
] | null | null | null | myy/accounts/admin.py | ramadevim/Travel-website | b2bec8d7ea847b5105c181fe1099bb85950dbd76 | [
"MIT"
] | null | null | null | myy/accounts/admin.py | ramadevim/Travel-website | b2bec8d7ea847b5105c181fe1099bb85950dbd76 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Register
admin.site.register(Register) | 20.333333 | 32 | 0.811475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.229508 |
f61b418707bd8d1c9d2bec14e8a8db0e5f4a45ab | 12,572 | py | Python | ipy/nuisancelib.py | chrispycheng/nuisance | 4ba140b2b8760a45a594cd49451b6d4a7e1cc3db | [
"Apache-2.0"
] | null | null | null | ipy/nuisancelib.py | chrispycheng/nuisance | 4ba140b2b8760a45a594cd49451b6d4a7e1cc3db | [
"Apache-2.0"
] | 8 | 2020-06-29T13:01:22.000Z | 2021-01-21T02:05:24.000Z | ipy/nuisancelib.py | chrispycheng/nuisance | 4ba140b2b8760a45a594cd49451b6d4a7e1cc3db | [
"Apache-2.0"
] | 1 | 2020-08-11T14:09:25.000Z | 2020-08-11T14:09:25.000Z | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import datetime as dt
from statsmodels.stats.multitest import fdrcorrection
from pylab import savefig
# FUNCTIONS YOU CAN USE:
# analyses(filepath) spits out a nifty heatmap to let you check correlation between variables
#
# regress(option, df) churns out a saucy graph of the linear regression for the variables you provided, where
# option is 'snr_total' or 'tsnr', whichever you want to make the dependent variable of your model
# df is the pandas DataFrame containing your data. To modify which variables you want in your model, you'll
# have to directly modify the regress function
# NOTABLE FILENAMES
# ../data/extractions/p2_BOLD.csv - all dates for p2_BOLD
# ../data/extractions/p2Xs4X35mm_BOLD.csv - all dates for p2Xs4X35mm_BOLD
# ../data/extractions/anat.csv - all possible dates for anatomical data
def filter(option, df):
is_p2 = df['Filetype'] == "task-rest_acq-p2_bold.json"
is_x = df['Filetype'] == "task-rest_acq-p2Xs4X35mm_bold.json"
if option == 'x':
return df[is_x]
elif option == 'p2':
return df[is_p2]
def analyses(filepath):
files = pd.read_csv(filepath)
# FIRST CHECK: CONVERSION SOFTWARE VERSIONS
check = files.iloc[0, 7]
valid = True
for i in files.index:
if check != files.iloc[i, 7]:
valid = False
print("All Conversion Softwares are the same: " + str(valid))
# SECOND CHECK: HEATMAP
figure = sns.heatmap(files.corr(), cmap=sns.diverging_palette(h_neg=240, h_pos=10, n=9, sep=1, center="dark"), center=0)
figure
save = figure.get_figure()
save.savefig('heatmap.svg', pad_inches = 0.1)
def add_seasonal_simple(df, col='Date', start='2017-01-01'):
# Add a very simplistic seasonal regressors as cos and sin since some date in a year
time_delta = df[col] - np.datetime64(start)
time_delta_rad = time_delta.apply(lambda d: d.days) * 2 * np.pi / 365.25
df['Seasonal (sin)'] = np.sin(time_delta_rad)
df['Seasonal (cos)'] = np.cos(time_delta_rad)
def Ftest(model, var_prefix, queue, prints=False):
var_columns = [c for c in model.params.index if c.startswith(var_prefix)]
if var_columns:
f_test = model.f_test(' = '.join(var_columns) + " = 0")
if f_test.pvalue < 0.05:
if var_prefix == "Shim":
for i in range(8):
queue.append("Shim" + str(i+1))
elif var_prefix == "IOPD":
for i in range(6):
queue.append("IOPD" + str(i+1))
if prints:
print("%s F-test: %s" % (var_prefix, f_test))
return f_test
else:
if prints:
print("No %s variables in the model" % var_prefix)
return None
# copy pasted from nipy function, renamed from _orthogonalize
def orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p), the data to be orthogonalized
Returns
-------
X: after orthogonalization
Notes
-----
X is changed in place. the columns are not normalized
"""
if X.size == X.shape[0]:
return X
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], np.linalg.pinv(X[:, :i])))
return X
def regress(target_variable, model_df, plot=True, print_summary=True, add_qa=True, add_seasonal=True, real_data=False):
"""
creates a regression graph plotted against actual data from certain QA metrics
Parameters
----------
target_variable: takes str value of either snr_total or tsnr to model against
model_df : takes pandas DataFrame with data to be used for predictive modeling
plot : boolean to turn the plotted graph on/off
print_summary : boolean to turn the printed summary of OLS regression on/off
add_qa : boolean to add/not add snr_total_qa into list of variables to be modeled
add_seasonal : boolean to add/not add seasonal variables into list of variables to be modeled
real_data : boolean to indicate whether or not the pandas DataFrame being fed in is from real data or not
"""
if type(model_df) is not pd.core.frame.DataFrame:
return "DataFrame must be of type pandas.core.frame.DataFrame"
########## adding seasonal curves to the model
add_seasonal_simple(model_df)
########## Converting date to a format that can be parsed by statsmodels API
model_df = model_df.copy()
date_df = model_df['Date']
model_df['Date'] = pd.to_datetime(model_df['Date'], format="%Y%m%d")
model_df['Date'] = model_df['Date'].map(lambda x: x.toordinal())
f_tests_todo = ['IOPD']
excluded_cols = ['Date', 'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6', 'Seasonal (sin)', 'Seasonal (cos)']
seasonal_cols = ['Seasonal (sin)', 'Seasonal (cos)',]
cols = ['Date']
if not real_data:
# preparing model_df for orthogonalization
cols += ['AcquisitionTime', 'SAR', 'TxRefAmp',
'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6']
if add_seasonal:
cols += seasonal_cols
else:
cols += ['age', 'sex_male', 'PatientWeight',]
if add_seasonal:
cols += seasonal_cols
if add_qa:
cols += ['snr_total_qa']
cols += ['IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real']
if add_seasonal:
f_tests_todo += ['Seasonal']
cols.append(target_variable)
model_df = model_df[cols]
# There is apparently a sample date (20170626) with SAR being unknown None/NaN
# For now we will just filter out those samples
if 'SAR' in model_df.columns:
finite_SAR = np.isfinite(model_df['SAR'])
if not np.all(finite_SAR):
print("Following dates didn't have SAR, excluding them: %s" % str(model_df['Date'][~finite_SAR]))
model_df = model_df[finite_SAR]
orthogonalized_df = model_df.drop(target_variable, axis=1) # avoid orthogonalizing target variable
cols = cols[:-1] # remove target variable from column list
# orthogonalize dataframe after its conversion to NumPy array, then convert back and replace in original model_df
model_array = orthogonalize(orthogonalized_df.to_numpy())
orthogonalized_df = pd.DataFrame(model_array)
orthogonalized_df.columns = [cols]
orthogonalized_df[target_variable] = pd.Series(model_df[target_variable])
model_df = orthogonalized_df
# add datetime64[ns] formatted date time
model_df.columns=[x[0] for x in model_df.columns]
model_df['Date'] = pd.to_datetime(model_df['Date'])
model_df = model_df.drop('Date', axis=1)
model_df['Date'] = date_df
########## Assigning independent and dependent variables
model_vars = []
for item in model_df.std().iteritems():
if item[0] != 'Date' and item[0] != target_variable:
model_vars.append(item[0])
X = model_df[model_vars]
y = model_df[target_variable]
X = X.sub(X.mean())
X = sm.add_constant(X)
model_df = sm.add_constant(model_df)
########## modeling predictions
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
################ CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
significant_variables = []
F_tests_pvals = {
v: float(Ftest(model, v, significant_variables).pvalue)
for v in f_tests_todo
}
# get p-values
for key, value in dict(model.pvalues).items():
if key not in significant_variables and value < 0.05 or key.lower() == 'const':
# identify statistically insignificant variables in df
significant_variables.append(key)
######## set statistically insignificant variables to 0, then predict
partial_fits = {} # partial_fits = {}
for variable in significant_variables:
X2 = X.copy(True) # prepare for mods
for col in X2:
if col != variable:
X2[col] = 0
partial_fits[str(variable)] = model.predict(X2)
if print_summary:
print("Statistically significant variables: " + str(significant_variables))
################ END CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
# Functionality for carrying out FDR correction
outvars = {} # dict containing all predictive variables and their p values from the model
for var in cols:
is_f_test = False
for f_test in f_tests_todo:
if var.startswith(f_test):
is_f_test = True
break
if is_f_test:
continue
if var not in excluded_cols:
var_pvalue = getattr(model.pvalues, var)
outvars[var] = var_pvalue
outvars.update(F_tests_pvals) # add previously conducted F test p values to the outvars
FDR_tuple = fdrcorrection(list(outvars.values())) # actual FDR test conduct
t_f = list(FDR_tuple[0]) # split tuple into true/false array
FDR_pvals = list(FDR_tuple[1]) # split tuple into p value array
print("FDR-corrected p-values:")
for (var, value), fdr_pval, is_sign in zip(outvars.items(), FDR_pvals, t_f):
print("%15s | Original p-value: %8.3g" % (var, value) +
" | FDR-corrected p-value: %8.3g%s" % (fdr_pval, '**' if is_sign else ''))
print("\n")
# giving additional data
if print_summary:
print(model.summary())
print("AIC: " + str(model.aic))
print("BIC: " + str(model.bic))
if not plot:
return model
######### converting the above predictions to a format that can be plotted
plot_df = predictions.to_frame() # new DataFrame containing only data needed for the plot
plot_df.columns = ['full fit']
plot_df = plot_df.join(model_df['Date'])
plot_df = plot_df.join(model_df[target_variable])
summation_df = None
for key, value in partial_fits.items():
column = value.to_frame()
column.columns = ['partial fit']
if summation_df is None:
summation_df = column # used to add up the values
else:
summation_df = summation_df.add(column, axis=1)
plot_df = pd.concat([plot_df, summation_df], axis=1)
# plotting the graph
plt.figure(figsize=(15, 6))
ax = sns.lineplot(x="Date", y=target_variable, data=plot_df, color="#000000")
# plotting partial fit
ax_partial = plt.twinx()
sns.lineplot(x="Date", y="full fit", data=plot_df, color="r", ax=ax)
if partial_fits:
sns.lineplot(x="Date", y="partial fit", data=plot_df, color="#ffcccc", ax=ax_partial)
plt.ylim(145, 305)
ax_partial.legend(['partial fit'])
ax.legend(['actual', 'full fit'], loc='upper left')
plt.savefig("test.svg")
return model
def scrape_var_significance(targets, p_var, df):
dummy = [] # dud list for Seasonal f test comparison
columns = ['Variable', p_var + ' p value', 'R2 value']
result = pd.DataFrame(columns = columns)
raw_pvals = []
for target in targets:
input_df = pd.DataFrame(df,columns=['Date', 'sid', 'ses', target, 'age', 'tsnr',
'snr_total_qa', 'IOPD1_real', 'IOPD2_real', 'IOPD3_real',
'IOPD4_real', 'IOPD5_real', 'IOPD6_real', 'sex_male', 'PatientWeight'])
model = regress(target, input_df, plot=False, print_summary=False, real_data=True)
if p_var == 'Seasonal':
seasonal_ftest = Ftest(model, 'Seasonal', dummy).pvalue
result.loc[len(result)] = [target, seasonal_ftest, model.rsquared]
raw_pvals.append(seasonal_ftest)
else:
var_pval = model.pvalues[p_var]
result.loc[len(result)] = [target, var_pval, model.rsquared]
raw_pvals.append(var_pval)
fdr_df = pd.DataFrame({'FDR-corrected': fdrcorrection(raw_pvals)[1].tolist()})
result = result.join(fdr_df)
return result
| 35.817664 | 124 | 0.613427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,849 | 0.385698 |
f61b72c600c72677f6d1712bc62795a8c1da3213 | 7,482 | py | Python | tests/test_config_defaults.py | nmichlo/eunomia | 5c6ef13b5af5335d4437867953b5eaa872ac56bb | [
"MIT"
] | 3 | 2021-02-07T17:55:46.000Z | 2021-02-15T11:58:36.000Z | tests/test_config_defaults.py | nmichlo/eunomia | 5c6ef13b5af5335d4437867953b5eaa872ac56bb | [
"MIT"
] | null | null | null | tests/test_config_defaults.py | nmichlo/eunomia | 5c6ef13b5af5335d4437867953b5eaa872ac56bb | [
"MIT"
] | null | null | null | import pytest
from eunomia.config._default import Default
from eunomia.config.nodes import ConfigNode
from tests.test_backend_obj import _make_config_group
# ========================================================================= #
# Test YAML & Custom Tags #
# ========================================================================= #
def _resolver(string):
if isinstance(string, ConfigNode):
return string.get_config_value({}, {}, {})
return string
def _resolve_default(group, default):
# we are testing this! \/ \/ \/
g, c, pkg, is_self = default.to_resolved_components(group, _resolver)
# we are testing this! /\ /\ /\
return g.abs_path, [c.abs_path for c in c], pkg
def test_defaults():
root = _make_config_group(suboption=None, suboption2=None, package1='<option>', package2='asdf.fdsa')
d = root.get_option_recursive('default')
s1 = root.get_group_recursive('subgroup')
s1o1 = root.get_option_recursive('subgroup/suboption1')
s1o2 = root.get_option_recursive('subgroup/suboption2')
s2 = root.get_group_recursive('subgroup2')
s2s3 = root.get_group_recursive('subgroup2/subgroup3')
s2s3o1 = root.get_option_recursive('subgroup2/subgroup3/suboption1')
s2s3o2 = root.get_option_recursive('subgroup2/subgroup3/suboption2')
# multiple different versions
assert _resolve_default(root, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: d})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: [d]})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': 'default'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '/default'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': ['/default']})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': ['default']})) == ('/', ['/default'], ())
# these should throw errors, option points to option
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({'/default': ['default']}))
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({d: d}))
with pytest.raises(KeyError, match='key .* is not a group'): _resolve_default(root, Default({d: [d]}))
# allow group to represent all suboptions
assert _resolve_default(root, Default('')) == ('/', ['/default'], ()) # technically this is valid, its just confusing... should it be disabled?
assert _resolve_default(root, Default('default')) == ('/', ['/default'], ()) # we want relative support in case we use group.absorb for example
assert _resolve_default(root, Default('/')) == ('/', ['/default'], ())
assert _resolve_default(root, Default('/default')) == ('/', ['/default'], ())
assert _resolve_default(root, Default(root)) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '/'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': '*'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: '*'})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({root: root})) == ('/', ['/default'], ())
assert _resolve_default(root, Default({'/': root})) == ('/', ['/default'], ())
# these should throw errors, group points to group in list
with pytest.raises(KeyError, match='value in list .* is not an option'): _resolve_default(root, Default({'/': ['subgroup']}))
with pytest.raises(KeyError, match='value in list .* is not an option'): _resolve_default(root, Default({'/': ['default', 'subgroup']}))
# check parents
assert _resolve_default(root, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s1, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s2, Default(d)) == ('/', ['/default'], ())
assert _resolve_default(s2s3, Default(d)) == ('/', ['/default'], ())
# check others
assert _resolve_default(root, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s1, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s2, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(s2s3, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default({s1: '*'})) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default({s1: s1})) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
# strings
assert _resolve_default(root, Default('subgroup')) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default('/subgroup')) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default(s1)) == ('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))
assert _resolve_default(root, Default(s1o1)) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
assert _resolve_default(root, Default({'/subgroup': 'suboption1'})) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
assert _resolve_default(root, Default({'subgroup': 'suboption1'})) == ('/subgroup', ['/subgroup/suboption1'], ('subgroup',))
with pytest.raises(KeyError, match="Group '/subgroup2/subgroup3' does not have child 'subgroup'"):
_resolve_default(s2s3, Default({'subgroup': 'suboption1'}))
with pytest.raises(KeyError, match="Group '/subgroup2' does not have child 'subgroup'"):
_resolve_default(s2, Default({'subgroup': 'suboption1'}))
with pytest.raises(KeyError, match="Group '/subgroup' does not have child 'subgroup'"):
_resolve_default(s1, Default({'subgroup': 'suboption1'}))
def test_defaults_advanced():
def resolve_entry_defaults(group):
results = []
for default in group.get_option('default').get_unresolved_defaults():
results.append(_resolve_default(group, default))
return results
assert resolve_entry_defaults(_make_config_group(suboption='suboption1')) == [('/subgroup', ['/subgroup/suboption1'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption='suboption2')) == [('/subgroup', ['/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=['suboption2'])) == [('/subgroup', ['/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=['suboption1', 'suboption2'])) == [('/subgroup', ['/subgroup/suboption1', '/subgroup/suboption2'], ('subgroup',))]
assert resolve_entry_defaults(_make_config_group(suboption=None, suboption2='suboption1')) == [('/subgroup2/subgroup3', ['/subgroup2/subgroup3/suboption1'], ('subgroup2', 'subgroup3'))]
assert resolve_entry_defaults(_make_config_group(suboption=None, suboption2='suboption2')) == [('/subgroup2/subgroup3', ['/subgroup2/subgroup3/suboption2'], ('subgroup2', 'subgroup3'))]
| 66.212389 | 189 | 0.62911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,838 | 0.37931 |
f61ba368dd6ce3592ab58a543cda6b7e4d14ce8d | 3,499 | py | Python | rj_gameplay/stp/coordinator.py | RoboJackets/robocup-software | ae2920b8b98213e625d0565dd67005e7a8595fac | [
"Apache-2.0"
] | 200 | 2015-01-26T01:45:34.000Z | 2022-03-19T13:05:31.000Z | rj_gameplay/stp/coordinator.py | RoboJackets/robocup-software | ae2920b8b98213e625d0565dd67005e7a8595fac | [
"Apache-2.0"
] | 1,254 | 2015-01-03T01:57:35.000Z | 2022-03-16T06:32:21.000Z | rj_gameplay/stp/coordinator.py | RoboJackets/robocup-software | ae2920b8b98213e625d0565dd67005e7a8595fac | [
"Apache-2.0"
] | 206 | 2015-01-21T02:03:18.000Z | 2022-02-01T17:57:46.000Z | """This module contains the implementation of the coordinator."""
from typing import Any, Dict, Optional, Type, List, Callable
import stp.play
import stp.rc as rc
import stp.role.assignment as assignment
import stp.situation
import stp.skill
from rj_msgs import msg
NUM_ROBOTS = 16
class Coordinator:
"""The coordinator is responsible for using SituationAnalyzer to select the best
play to run, calling tick() on the play to get the list of skills, then ticking
all of the resulting skills."""
__slots__ = [
"_play_selector",
"_prev_situation",
"_prev_play",
"_prev_role_results",
"_props",
"_debug_callback",
]
_play_selector: stp.situation.IPlaySelector
_prev_situation: Optional[stp.situation.ISituation]
_prev_play: Optional[stp.play.IPlay]
_prev_role_results: assignment.FlatRoleResults
_props: Dict[Type[stp.play.IPlay], Any]
# TODO(1585): Properly handle type annotations for props instead of using Any.
def __init__(
self,
play_selector: stp.situation.IPlaySelector,
debug_callback: Callable[[stp.play.IPlay, List[stp.skill.ISkill]],
None] = None):
self._play_selector = play_selector
self._props = {}
self._prev_situation = None
self._prev_play = None
self._prev_role_results = {}
self._debug_callback = debug_callback
def tick(self, world_state: rc.WorldState) -> List[msg.RobotIntent]:
"""Performs 1 ticks of the STP system:
1. Selects the best play to run given the passed in world state.
2. Ticks the best play, collecting the list of skills to run.
3. Ticks the list of skills.
:param world_state: The current state of the world.
"""
# Call situational analysis to see which play should be running.
cur_situation, cur_play = self._play_selector.select(world_state)
cur_play_type: Type[stp.play.IPlay] = type(cur_play)
# Update the props.
cur_play_props = cur_play.compute_props(self._props.get(cur_play_type, None))
if isinstance(cur_play, type(
self._prev_play)) and not self._prev_play.is_done(world_state):
cur_play = self._prev_play
# This should be checked here or in the play selector, so we can restart a play easily
# Collect the list of skills from the play.
new_role_results, skills = cur_play.tick(
world_state, self._prev_role_results, cur_play_props
)
self._debug_callback(cur_play, [entry.skill for entry in skills])
# Get the list of actions from the skills
intents = [msg.RobotIntent() for i in range(NUM_ROBOTS)]
intents_dict = {}
for skill in skills:
robot = new_role_results[skill][0].role.robot
intents_dict.update(skill.skill.tick(robot, world_state, intents[robot.id]))
# Get the list of robot intents from the actions
for i in range(NUM_ROBOTS):
if i in intents_dict.keys():
intents[i] = intents_dict[i]
else:
intents[i].motion_command.empty_command = [msg.EmptyMotionCommand()]
# Update _prev_*.
self._prev_situation = cur_situation
self._prev_play = cur_play
self._prev_role_results = new_role_results
self._props[cur_play_type] = cur_play_props
return intents
| 36.447917 | 98 | 0.654473 | 3,212 | 0.917977 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.300943 |
f61c2aa8d4b0c6d527e4128c6b11bb2329c17ab9 | 651 | py | Python | apf/core/templates/step/package/step.py | alercebroker/APF | d71cec49ee7f4a1667210b70c48501d036d5c08b | [
"MIT"
] | 2 | 2020-03-12T17:27:11.000Z | 2020-05-28T21:21:51.000Z | apf/core/templates/step/package/step.py | alercebroker/APF | d71cec49ee7f4a1667210b70c48501d036d5c08b | [
"MIT"
] | 15 | 2019-11-25T19:51:39.000Z | 2022-03-04T19:00:31.000Z | apf/core/templates/step/package/step.py | alercebroker/APF | d71cec49ee7f4a1667210b70c48501d036d5c08b | [
"MIT"
] | 2 | 2020-06-21T07:55:58.000Z | 2022-02-08T11:09:05.000Z | from apf.core.step import GenericStep
import logging
class {{step_name}}(GenericStep):
"""{{step_name}} Description
Parameters
----------
consumer : GenericConsumer
Description of parameter `consumer`.
**step_args : type
Other args passed to step (DB connections, API requests, etc.)
"""
def __init__(self,consumer = None, config = None,level = logging.INFO,**step_args):
super().__init__(consumer,config=config, level=level)
def execute(self,message):
################################
# Here comes the Step Logic #
################################
pass
| 26.04 | 87 | 0.560676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.513057 |
f61d22ab24870ca8c56c224b5de2063d76fef120 | 5,107 | py | Python | NLPFeatures.py | Nik0l/UTemPr | cc8c64dd01910194d1f18530a687595dceff30df | [
"MIT"
] | 2 | 2016-02-24T19:36:12.000Z | 2017-08-04T07:46:06.000Z | NLPFeatures.py | Nik0l/UTemPr | cc8c64dd01910194d1f18530a687595dceff30df | [
"MIT"
] | 1 | 2016-03-03T09:18:07.000Z | 2016-03-03T09:18:07.000Z | NLPFeatures.py | Nik0l/UTemPr | cc8c64dd01910194d1f18530a687595dceff30df | [
"MIT"
] | 2 | 2017-04-18T09:56:30.000Z | 2019-11-22T01:40:10.000Z | __author__ = 'nb254'
#requires question_title_length.csv for processing: TITLE, BODY, POSTID, USERID
import nltk, csv
import pandas as pd
import Features as features
#test = "<p>I have an original Arduino UNO R3 that I bought and an <a href='http://arduino.cc/en/Main/ArduinoBoardSerialSingleSided3' rel='nofollow'>Arduino Severino (S3V3)</a> that I"'"ve built.</p><p>I have no problems uploading sketches to the UNO, but sometimes, when uploading to the Severino board, I have to hard reset it at a specific time during the upload process, when the IDE says something like this below:</p><pre><code>avrdude: Version 5.11, compiled on Sep 2 2011 at 19:38:36 Copyright (c) 2000-2005 Brian Dean, http://www.bdmicro.com/ Copyright (c) 2007-2009 Joerg Wunsch System wide configuration file is C:\arduino-1.0.3\hardware/tools/avr/etc avrdude.conf Using Port:.\COM1 Using Programmer : arduino Overriding Baud Rate : 115200 avrdude: Send: 0 [30] [20] avrdude: Send: 0 [30] [20] avrdude: Send: 0 [30] [20] </code></pre> <p>If I don"'"t reset it when one of the <code>Send</code> messages are being displayed, I get the <code>not in sync</code> message, as below:</p><pre><code>avrdude: Recv: avrdude: stk500_getsync(): not in sync: resp=0x00</code></pre><p>Other times, if I'm lucky, I can upload to the Severino board without having to reset it.</p><p>So, my questions are:</p><ol><li><p><strong>Why does that happen? Why Severino needs a hard reset during upload?</strong></p></li><li><p><strong>Why is the problem intermitent?</strong> Why does it happen sometimes and others it doesn't?</p></li><li><p><strong>How can I fix that problem?</strong> Is there a simple change to the Severino design that would fix that?</p></li></ol>"
def NLPExtract(data, file_name):
HEADER = features.KEYS + features.NLP_FEATURES
# the file where all 'wh' and '?' will be saved
csv_writer = csv.writer(open(file_name, 'wb'))
csv_writer.writerow(HEADER)
i = 0
#print data
for index, row in data.iterrows():
#TODO: delete the second condition
if i > 0:
print row
res = NLPFeatures(row)
csv_writer.writerow(res)
i = i + 1
# counts words of a particular type, for example, 'WP' - 'wh' words
def NLPFeatures(row):
post_id = row['PostId']
user_id = row['UserId']
#print row
try:
res = NLPFeaturesCalc(row['Q_Title'], row['Q_Body'])
res = [post_id, user_id] + res
except UnicodeDecodeError:
#TODO: convert to unicode, currently just ignoring the error
print('UnicodeDecodeError: ' + row['Q_Title'])
res = [post_id, user_id] + [0]*12
return res
def NLPFeaturesCalc(title, body):
num_qm = 0
num_wp = 0
symbols = ['VBG', 'VBZ', 'VBP', 'VB', 'VBD', 'VBN'] #verbs
selfRef = ['I','my','myself', 'we', 'We', 'My', 'Myself', 'i'] # self-reference nouns
# tokens of the title and the body
tokens = [nltk.word_tokenize(title), nltk.word_tokenize(body)]
tagged = [nltk.pos_tag(tokens[0]), nltk.pos_tag(tokens[1])]
############### construct features ############################
num_qm = [Count(tokens[0], '?'), Count(tokens[1], '?')] # number of question marks
num_wp = [CountP(tagged[0], 'WP'), CountP(tagged[1], 'WP')]# number of 'wh' words
num_av = [CountV(tagged[0], symbols), CountV(tagged[1], symbols)] #number of active verbs
num_sr = [CountM(tokens[0], selfRef), CountM(tokens[1], selfRef)] #self-reference
num_url = body.count('<a href=') # how many url links are there
num_img = body.count('<img') # how many images are there
num_cst = body.count('<code>') # how many start code blocks
num_cen = body.count('</code>') # how many end code blocks
cod_len = CodeLength(body, num_cst, num_cen) # total length of code in chars
res = [num_wp[0], num_wp[1], num_qm[0], num_qm[1], num_av[0], num_av[1], num_sr[0], num_sr[1], num_url, num_img, num_cst, cod_len]
return res
def CountP(data, TYPE):
num = 0
for x in data:
if x[1] == TYPE:
num = num + 1
return num
# counts a number of particular words or symbols, for example, '?'
def Count(data, symbol):
num = 0
for x in data:
if x == symbol:
num = num + 1
return num
# counts a number of multiple symbols
def CountM(data, symbols):
num = 0
for sym in symbols:
num = num + Count(data, sym)
return num
# counts a number of multiple symbols
def CountV(data, symbols):
num = 0
for sym in symbols:
num = num + CountP(data, sym)
#print num
#print sym
return num
# checks if it is a substring
def SubString(data, text):
if text in data:
return 1
else:
return 0
def CodeLength(text, num_cst, num_cen):#text and the number of start and end code blocks
cod_len = 0
if num_cst <> num_cen or num_cst * num_cen == 0:
return cod_len
else:
s = text.replace(" ","")
s = s.replace("</code>"," ")
s = s.replace("<code>"," ")
#print s
s = s.split()
i = 0
for line in s:
if i%2 <> 0:
cod_len = cod_len + len(line)
i = i + 1
return cod_len
| 45.598214 | 1,568 | 0.649305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,693 | 0.527315 |
f61e64e60bae831530e8e51ff7a2f490426146d5 | 4,925 | py | Python | Linux/.local/share/ulauncher/extensions/com.github.ulauncher.ulauncher-kill/main.py | altdx/dotfiles | ff058d343fc2c247f6cb3dfa53edd43f36c279cd | [
"MIT"
] | null | null | null | Linux/.local/share/ulauncher/extensions/com.github.ulauncher.ulauncher-kill/main.py | altdx/dotfiles | ff058d343fc2c247f6cb3dfa53edd43f36c279cd | [
"MIT"
] | null | null | null | Linux/.local/share/ulauncher/extensions/com.github.ulauncher.ulauncher-kill/main.py | altdx/dotfiles | ff058d343fc2c247f6cb3dfa53edd43f36c279cd | [
"MIT"
] | null | null | null | import os
import logging
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Notify', '0.7')
from locale import atof, setlocale, LC_NUMERIC
from gi.repository import Notify
from itertools import islice
from subprocess import check_output, check_call, CalledProcessError
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionSmallResultItem import ExtensionSmallResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
logger = logging.getLogger(__name__)
ext_icon = 'images/icon.png'
exec_icon = 'images/executable.png'
dead_icon = 'images/dead.png'
class ProcessKillerExtension(Extension):
def __init__(self):
super(ProcessKillerExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
setlocale(LC_NUMERIC, '') # set to OS default locale;
def show_notification(self, title, text=None, icon=ext_icon):
logger.debug('Show notification: %s' % text)
icon_full_path = os.path.join(os.path.dirname(__file__), icon)
Notify.init("KillerExtension")
Notify.Notification.new(title, text, icon_full_path).show()
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
return RenderResultListAction(list(islice(self.generate_results(event), 15)))
def generate_results(self, event):
for (pid, cpu, cmd) in get_process_list():
name = '[%s%% CPU] %s' % (cpu, cmd) if cpu > 1 else cmd
on_enter = {'alt_enter': False, 'pid': pid, 'cmd': cmd}
on_alt_enter = on_enter.copy()
on_alt_enter['alt_enter'] = True
if event.get_argument():
if event.get_argument() in cmd:
yield ExtensionSmallResultItem(icon=exec_icon,
name=name,
on_enter=ExtensionCustomAction(on_enter),
on_alt_enter=ExtensionCustomAction(on_alt_enter, keep_app_open=True))
else:
yield ExtensionSmallResultItem(icon=exec_icon,
name=name,
on_enter=ExtensionCustomAction(on_enter),
on_alt_enter=ExtensionCustomAction(on_alt_enter, keep_app_open=True))
class ItemEnterEventListener(EventListener):
def kill(self, extension, pid, signal):
cmd = ['kill', '-s', signal, pid]
logger.info(' '.join(cmd))
try:
check_call(cmd) == 0
extension.show_notification("Done", "It's dead now", icon=dead_icon)
except CalledProcessError as e:
extension.show_notification("Error", "'kill' returned code %s" % e.returncode)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
extension.show_notification("Error", "Check the logs")
raise
def show_signal_options(self, data):
result_items = []
options = [('TERM', '15 TERM (default)'), ('KILL', '9 KILL'), ('HUP', '1 HUP')]
for sig, name in options:
on_enter = data.copy()
on_enter['alt_enter'] = False
on_enter['signal'] = sig
result_items.append(ExtensionSmallResultItem(icon=ext_icon,
name=name,
highlightable=False,
on_enter=ExtensionCustomAction(on_enter)))
return RenderResultListAction(result_items)
def on_event(self, event, extension):
data = event.get_data()
if data['alt_enter']:
return self.show_signal_options(data)
else:
self.kill(extension, data['pid'], data.get('signal', 'TERM'))
def get_process_list():
"""
Returns a list of tuples (PID, %CPU, COMMAND)
"""
env = os.environ.copy()
env['COLUMNS'] = '200'
out = check_output(['ps', '-eo', 'pid,%cpu,cmd', '--sort', '-%cpu'], env=env).decode('utf8')
for line in out.split('\n'):
col = line.split()
try:
int(col[0])
except (ValueError, IndexError):
# not a number
continue
pid = col[0]
cpu = atof(col[1])
cmd = ' '.join(col[2:])
if 'top -bn' in cmd:
continue
yield (pid, cpu, cmd)
if __name__ == '__main__':
ProcessKillerExtension().run()
| 38.476563 | 120 | 0.596954 | 3,398 | 0.689949 | 1,671 | 0.339289 | 0 | 0 | 0 | 0 | 552 | 0.112081 |
f61eff0aa27ea8fbb3e6d4a3ba4c910c4db8c5f0 | 2,037 | py | Python | toutiao-backend/toutiao/resources/user/profile.py | weiyunfei520/toutiao | f1fdb4dd88aff0f67d11e19b13bd1a90a26ac642 | [
"MIT"
] | null | null | null | toutiao-backend/toutiao/resources/user/profile.py | weiyunfei520/toutiao | f1fdb4dd88aff0f67d11e19b13bd1a90a26ac642 | [
"MIT"
] | null | null | null | toutiao-backend/toutiao/resources/user/profile.py | weiyunfei520/toutiao | f1fdb4dd88aff0f67d11e19b13bd1a90a26ac642 | [
"MIT"
] | null | null | null | from flask import current_app
from flask import g
from flask import request
from flask_restful.reqparse import RequestParser
from flask_restful import Resource
from models import db
from models.user import User
from utils.decorators import login_required
from utils.parser import image_file
from utils.storage import upload_image
class PhotoResource(Resource):
# 使用装饰器验证用户
method_decorators = [login_required]
def patch(self):
print(request.__dict__)
# 接收请求的参数,并做检查
rp = RequestParser()
rp.add_argument('photo', type=image_file, required=True, location='files')
args_dict = rp.parse_args()
# 文件对象
photo = args_dict['photo']
# 上传图片到七牛云,获取图片key,就是图片的url名称
file_name = upload_image(photo.read())
# 把图片的名字保存到数据库
User.query.filter(User.id==g.user_id).update({'profile_photo': file_name})
db.session.commit()
# 把图片的完整url返回
ret_dict = {
'photo_url': '{}/{}'.format(current_app.config['QINIU_DOMAIN'], file_name)
}
return ret_dict
from cache.user import UserProfileCache
class CurrentUserResource(Resource):
# 检查登录
method_decorators = [login_required]
# 请求钩子 utils.middlewares.jwt_authentication已经注册生效了:把token中的user_id写入g对象中
def get(self):
# 返回当前用户信息
# 从缓存和持久化存储中获取
# 代码执行到这里时,就应该已经有g.user_id
ret = UserProfileCache(user_id=g.user_id).get()
print('=')
print(ret)
ret_dict = {
'user_id': g.user_id,
'user_name': ret['name'],
'user_mobile': ret['mobile'],
'user_photo': ret['profile_photo'],
'certificate': ret['certificate'],
'introduction': ret['introduction'],
'arts_count': 0,
'following_count': 0
}
return ret_dict
def delete(self):
ret = UserProfileCache(user_id=g.user_id).exists()
if ret:
UserProfileCache(user_id=g.user_id).clear()
return {'message': 'ok'} | 31.338462 | 86 | 0.634757 | 1,906 | 0.835598 | 0 | 0 | 0 | 0 | 0 | 0 | 700 | 0.306883 |
f620b14338e61be682d35b0543506d9c7f9a1c6d | 905 | py | Python | manim/utils/unit.py | PhotonSpheres/manim | 7399c24b33095e29633fd75460d13eae5703cba9 | [
"MIT"
] | 9,497 | 2020-05-19T04:05:51.000Z | 2022-03-31T19:00:02.000Z | manim/utils/unit.py | PhotonSpheres/manim | 7399c24b33095e29633fd75460d13eae5703cba9 | [
"MIT"
] | 2,052 | 2020-05-19T03:35:26.000Z | 2022-03-31T16:18:06.000Z | manim/utils/unit.py | PhotonSpheres/manim | 7399c24b33095e29633fd75460d13eae5703cba9 | [
"MIT"
] | 1,016 | 2020-05-20T01:16:11.000Z | 2022-03-30T16:47:14.000Z | """Implement the Unit class."""
import numpy as np
from .. import config, constants
__all__ = ["Pixels", "Degrees", "Munits", "Percent"]
class _PixelUnits:
def __mul__(self, val):
return val * config.frame_width / config.pixel_width
def __rmul__(self, val):
return val * config.frame_width / config.pixel_width
class Percent:
def __init__(self, axis):
if np.array_equal(axis, constants.X_AXIS):
self.length = config.frame_width
if np.array_equal(axis, constants.Y_AXIS):
self.length = config.frame_height
if np.array_equal(axis, constants.Z_AXIS):
raise NotImplementedError("length of Z axis is undefined")
def __mul__(self, val):
return val / 100 * self.length
def __rmul__(self, val):
return val / 100 * self.length
Pixels = _PixelUnits()
Degrees = constants.PI / 180
Munits = 1
| 24.459459 | 70 | 0.653039 | 694 | 0.766851 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.106077 |
f6228da345431023f7834e4201a3bb44d8f2ebe1 | 112 | py | Python | backend/auth_app/serializers/__init__.py | nitinmehra/TodoApp | e1e8938330df6b59b8b064ac1a2dde61744d8392 | [
"MIT"
] | null | null | null | backend/auth_app/serializers/__init__.py | nitinmehra/TodoApp | e1e8938330df6b59b8b064ac1a2dde61744d8392 | [
"MIT"
] | null | null | null | backend/auth_app/serializers/__init__.py | nitinmehra/TodoApp | e1e8938330df6b59b8b064ac1a2dde61744d8392 | [
"MIT"
] | null | null | null | from .auth_serializer import MyTokenObtainPairSerializer
from .register_serializer import UserRegisterSerializer | 56 | 56 | 0.919643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f6232a7604d0160345749d0571f7cdfb8e44d3c3 | 3,137 | py | Python | src/sensors.py | andrew-chang-dewitt/rpi-pir2mqtt-docker | 3da860fed40cd0ec7395ac4864e38d5eb38e0634 | [
"MIT"
] | 1 | 2020-04-13T13:20:50.000Z | 2020-04-13T13:20:50.000Z | src/sensors.py | andrew-chang-dewitt/rpi-security-gpio2mqtt | 3da860fed40cd0ec7395ac4864e38d5eb38e0634 | [
"MIT"
] | 17 | 2020-01-31T21:07:31.000Z | 2020-02-11T15:16:58.000Z | src/sensors.py | andrew-chang-dewitt/rpi-security-gpio2mqtt | 3da860fed40cd0ec7395ac4864e38d5eb38e0634 | [
"MIT"
] | null | null | null | """A module for defining Sensor types.
Classes:
Sensor -- Base Sensor class, all unknown types default to this.
MotionSensor -- Subclass of Sensor, for HC-SR501 type PIR sensors.
ReedSwitch -- Subclass of Sensor, for basic door/window reed switches.
Functions:
build_sensor -- Build & return a Sensor or subclass based from dict.
"""
from typing import Type, Callable
class Sensor:
"""Base Sensor class, all unknown types default to this.
Attributes:
name -- Used in identifying it in user-facing applications
type -- Used in config to identify subclass for behavior
group -- Used in user-facing applications
pin -- Identifies the sensor's GPIO pin # (by Broadcom definition)
topic -- Compiled from name, type, & group; used for routing messages
Methods:
determine_state -- Identify if the sensor is 'TRIPPED' or 'OK'.
Properties:
pull_up -- Read-only, tells if sensor requires pull up resistor.
pull_down -- Read-only, tells if sensor requires pull down resistor.
"""
def __init__(self, data: dict):
"""Init a Sensor with attributes built from data's keys."""
self.name = data['name'] # type: str
self.type = data['type'] # type: str
self.group = data['group'] # type: str
self.pin = data['pin'] # type: int
self.topic = (
self.group + '/' +
self.type + '/' +
self.name) # type: str
def determine_state(
self, check_state_callback: Callable[[int], bool]) -> str:
"""Identify if the sensor is 'TRIPPED' or 'OK'."""
return "TRIPPED" if check_state_callback(self.pin) else "OK"
@property
def pull_up(self):
"""Read-only attribute, tells if sensor requires pull up resistor."""
return False
@property
def pull_down(self):
"""Read-only attribute, tells if sensor requires pull down resistor."""
return False
class MotionSensor(Sensor):
"""Subclass of Sensor, for HC-SR501 type PIR sensors.
Currently has no different behavior from Sensor.
"""
class ReedSwitch(Sensor):
"""Subclass of Sensor, for basic door/window reed switches.
Differs from Sensor in two ways:
1. Returns 'OK' where the Sensor would return 'TRIPPED' & vice-versa
2. Requires a pull up resistor
"""
def determine_state(self, check_state_callback):
"""See Sensor.determine_state."""
return "OK" if check_state_callback(self.pin) else "TRIPPED"
@property
def pull_down(self):
"""See Sensor.pull_down."""
return True
def build_sensor(sensor: dict) -> Type[Sensor]:
"""Build & return a Sensor or subclass based from dict.
Arguments:
sensor -- A dict containing sensor information.
"""
sensor_type = sensor.get('type', 'default')
sensor['type'] = sensor_type if sensor_type is not None else 'default'
types = {
'motion': MotionSensor,
'door': ReedSwitch,
'window': ReedSwitch,
}
return types.get(sensor['type'], Sensor)(sensor)
| 31.37 | 79 | 0.634364 | 2,256 | 0.719158 | 0 | 0 | 356 | 0.113484 | 0 | 0 | 1,975 | 0.629582 |
f62464c4d4e238629f145959573630850492be9a | 1,940 | py | Python | main.py | TwoShock/Graphics-Card-Web-Scraper | 7161a63dca103826906388c61dbaecaff2f73238 | [
"MIT"
] | null | null | null | main.py | TwoShock/Graphics-Card-Web-Scraper | 7161a63dca103826906388c61dbaecaff2f73238 | [
"MIT"
] | null | null | null | main.py | TwoShock/Graphics-Card-Web-Scraper | 7161a63dca103826906388c61dbaecaff2f73238 | [
"MIT"
] | null | null | null | from urllib.request import urlopen
from bs4 import BeautifulSoup as soup
import re
import pandas as pd
def getContainerInfo(container):
name = container.img['title']
itemInfo = container.find('div',class_='item-info')
itemBranding = itemInfo.find('div',class_ = 'item-branding')
brandName = itemBranding.img['title']
ratingTag = itemBranding.find("a",class_="item-rating")
rating = re.search('[0-5]',ratingTag['title']).group() if ratingTag != None else None
ratingCount = re.search('\d+',itemBranding.find("span",class_='item-rating-num').get_text()).group() if ratingTag != None else None
priceContainer = itemInfo.find("div",class_="item-action").ul.find("li",class_="price-current")
price = re.findall('\d{1,3}(?:[.,]\d{3})*(?:[.,]\d{2})?',priceContainer.get_text())
return name,brandName,rating,ratingCount,price
def convertToPandasDF(data,columns):
name = [d[0] for d in data]
brand = [d[1] for d in data]
userRating = [d[2] for d in data]
userCount = [d[3] for d in data]
price = [d[4][0] for d in data]
offer = []
for d in data:
if(len(d[4]) == 2):
offer.append(d[4][1])
else:
offer.append(None)
df = pd.DataFrame({columns[0]:name,columns[1]:brand,columns[2]:userRating,columns[3]:userCount,columns[4]:price,columns[5]:offer})
return df
def main():
url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'
response = urlopen(url)
html = response.read()
parsedHtml = soup(html,"html.parser")
containerDivs = parsedHtml.find_all("div",class_= "item-container")
data = [getContainerInfo(container) for container in containerDivs]
columns = ['Product Name','Brand Name','Average User Rating','User Count','Price','Offer Count']
df = convertToPandasDF(data,columns)
df.to_excel("out.xlsx")
if __name__ == "__main__":
main() | 40.416667 | 135 | 0.655155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.206186 |
f62731a6aa290c859ecde18b54feaa791a6e0a5d | 1,228 | py | Python | python/qisrc/actions/checkout.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 51 | 2015-01-05T14:35:13.000Z | 2021-07-27T06:46:59.000Z | python/qisrc/actions/checkout.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 104 | 2015-04-09T10:48:42.000Z | 2020-09-16T16:33:29.000Z | python/qisrc/actions/checkout.py | aldebaran/qibuild | efea6fa3744664348717fe5e8df708a3cf392072 | [
"BSD-3-Clause"
] | 46 | 2015-01-05T14:35:16.000Z | 2022-02-13T20:39:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
Change the branch of the manifest
Also, checkout the correct branch for every git project in the worktree
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import sys
import qisrc.parsers
def configure_parser(parser):
""" Configure Parser """
qisrc.parsers.worktree_parser(parser)
group = parser.add_argument_group("checkout options")
group.add_argument("branch")
group.add_argument("-f", "--force", action="store_true", dest="force",
help="Discard local changes. Use with caution")
parser.set_defaults(force=False)
def do(args):
""" Main Entry Point """
branch = args.branch
git_worktree = qisrc.parsers.get_git_worktree(args)
manifest = git_worktree.manifest
groups = manifest.groups
branch = args.branch
git_worktree.configure_manifest(manifest.url, groups=groups, branch=branch)
ok = git_worktree.checkout(branch, force=args.force)
if not ok:
sys.exit(1)
| 31.487179 | 84 | 0.717427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.368893 |
f62827a67ce6f1302ffe03104ae4a729a1a0c806 | 961 | py | Python | experiments/calc_avg_bohb_time.py | automl/learning_environments | c1c7cec4506ba140b19c215f4bf7a8bd222b32a0 | [
"MIT"
] | 11 | 2021-02-06T15:26:12.000Z | 2022-03-05T11:24:17.000Z | experiments/calc_avg_bohb_time.py | automl/learning_environments | c1c7cec4506ba140b19c215f4bf7a8bd222b32a0 | [
"MIT"
] | 2 | 2022-02-10T00:01:37.000Z | 2022-03-12T01:07:59.000Z | experiments/calc_avg_bohb_time.py | automl/learning_environments | c1c7cec4506ba140b19c215f4bf7a8bd222b32a0 | [
"MIT"
] | 1 | 2022-03-14T02:37:48.000Z | 2022-03-14T02:37:48.000Z | import statistics
import hpbandster.core.result as hpres
# smallest value is best -> reverse_loss = True
# largest value is best -> reverse_loss = False
REVERSE_LOSS = True
EXP_LOSS = 1
OUTLIER_PERC_WORST = 0.1
OUTLIER_PERC_BEST = 0.0
def analyze_bohb(log_dir):
# load the example run from the log files
result = hpres.logged_results_to_HBS_result(log_dir)
# get all executed runs
all_runs = result.get_all_runs()
if __name__ == '__main__':
# load the example run from the log files
result = hpres.logged_results_to_HBS_result('../results/GTNC_evaluate_cmc_subopt_2021-01-21-09_5')
# get all executed runs
all_runs = result.get_all_runs()
t_arr = []
for dat in result.data.values():
for time_stamp in dat.time_stamps.values():
ts = time_stamp['started']
te = time_stamp['finished']
if te-ts > 60:
t_arr.append(te-ts)
print(statistics.mean(t_arr)) | 25.972973 | 102 | 0.680541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.316337 |
f62b330cff46dc5c4c549afab05c1b96b01df850 | 2,926 | py | Python | radio/preprocessing/augmented_batch.py | dmitrysarov/radio-1 | c0162e7e2f39018d6ca8cf7d8ef2e37c22dd40fc | [
"Apache-2.0"
] | null | null | null | radio/preprocessing/augmented_batch.py | dmitrysarov/radio-1 | c0162e7e2f39018d6ca8cf7d8ef2e37c22dd40fc | [
"Apache-2.0"
] | null | null | null | radio/preprocessing/augmented_batch.py | dmitrysarov/radio-1 | c0162e7e2f39018d6ca8cf7d8ef2e37c22dd40fc | [
"Apache-2.0"
] | null | null | null | """ Contains CTImagesAugmentedBatch: masked ct-batch with some augmentation actions """
import numpy as np
from .ct_masked_batch import CTImagesMaskedBatch
from ..dataset.dataset import action, Sampler # pylint: disable=no-name-in-module
from .mask import insert_cropped
class CTImagesAugmentedBatch(CTImagesMaskedBatch):
""" Masked ct-batch with augmenting actions.
Adds cutout, additive/multiplicative noise - augmentations.
"""
@action
def init_with_ones(self, shape=(32, 64, 64)):
""" Loader for tests, fills batch with ones.
"""
self.images = np.ones(shape=(len(self) * shape[0], *shape[1:]))
self._bounds = np.cumsum((0, ) + (shape[0], ) * len(self))
return self
@action
def cutout(self, positions, sizes, components='images', fill_with=0):
""" Fill a box from each scan with some density-value.
Parameters:
-----------
positions : ndarray
array of starting positions of boxes, has shape (len(batch), 3).
sizes : ndarray
array of box-sizes, has shape (len(batch), 3).
components : str or list
names of components to apply cutout
fill_with : ndarray, float or string
value or filling scheme. Value can be float or an array of the shape,
that can be broadcasted to box-shape. When string, can be either scan-wise
mean ('mean') or scan-wise minimum/maximum ('min', 'max').
"""
if isinstance(components, str):
components = [components]
for i in range(len(self)):
size, position = sizes[i].astype(np.int64), positions[i].astype(np.int64)
for component in components:
item = self.get(i, component)
# parse filling scheme
fill_with = getattr(np, fill_with)(item) if isinstance(fill_with, str) else fill_with
filled = np.ones(shape=size) * fill_with
# perform insertion
insert_cropped(item, filled, position)
return self
@action
def apply_noise(self, noise, op='+', component='images'):
""" For each item apply the noise to the item using op.
Parameters:
-----------
noise : Sampler/ndarray
1d-sampler/ndarray of shape=(len(batch), item.shape).
op : str
operation to perform on item. Can be either '+', '-', '*'.
component : str
component to add noise to.
"""
# prepare noise-array
all_items = getattr(self, component)
noise = noise.sample(size=all_items.size).reshape(all_items.shape) if isinstance(noise, Sampler) else noise
# parse and apply op in-place
op_dict = {'+': '__add__', '*': '__mul__', '-': '__sub__'}
op = op_dict[op]
all_items[:] = getattr(all_items, op)(noise)
return self
| 37.512821 | 115 | 0.598086 | 2,650 | 0.905673 | 0 | 0 | 2,461 | 0.84108 | 0 | 0 | 1,459 | 0.498633 |
f62d98d9378ef48e987146d478ae9033a3dcf113 | 9,155 | py | Python | tf2/ThickCylinder_DEM.py | ISM-Weimar/DeepEnergyMethods | 3a51131e0827446bf5986d698aaac396c7fa5037 | [
"MIT"
] | 15 | 2019-09-11T01:35:03.000Z | 2022-03-25T03:17:59.000Z | tf2/ThickCylinder_DEM.py | ISM-Weimar/DeepEnergyMethods | 3a51131e0827446bf5986d698aaac396c7fa5037 | [
"MIT"
] | 2 | 2020-10-16T19:06:07.000Z | 2022-01-06T16:23:49.000Z | tf2/ThickCylinder_DEM.py | ISM-Weimar/DeepEnergyMethods | 3a51131e0827446bf5986d698aaac396c7fa5037 | [
"MIT"
] | 15 | 2019-09-11T01:35:05.000Z | 2022-02-18T21:18:55.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2D linear elasticity example
Solve the equilibrium equation -\nabla \cdot \sigma(x) = f(x) for x\in\Omega
with the strain-displacement equation:
\epsilon = 1/2(\nabla u + \nabla u^T)
and the constitutive law:
\sigma = 2*\mu*\epsilon + \lambda*(\nabla\cdot u)I,
where \mu and \lambda are Lame constants, I is the identity tensor.
Dirichlet boundary conditions: u(x)=\hat{u} for x\in\Gamma_D
Neumann boundary conditions: \sigma n = \hat{t} for x\in \Gamma_N,
where n is the normal vector.
For this example:
\Omega is a quarter annulus in the 1st quadrant, centered at origin
with inner radius 1, outer radius 4
Symmetry (Dirichlet) boundary conditions on the bottom and left
u_x(x,y) = 0 for x=0
u_y(x,y) = 0 for y=0
and pressure boundary conditions for the curved boundaries:
\sigma n = P_int n on the interior boundary with P_int = 10 MPa
\sigma n = P_ext n on the exterior boundary with P_ext = 0 MPa.
Use DEM
"""
import tensorflow as tf
import numpy as np
import time
from utils.tfp_loss import tfp_function_factory
from utils.Geom_examples import QuarterAnnulus
from utils.Solvers import Elasticity2D_DEM_dist
from utils.Plotting import plot_field_2d
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
np.random.seed(42)
tf.random.set_seed(42)
class Elast_ThickCylinder(Elasticity2D_DEM_dist):
'''
Class including the symmetry boundary conditions for the thick cylinder problem
'''
def __init__(self, layers, train_op, num_epoch, print_epoch, model_data, data_type):
super().__init__(layers, train_op, num_epoch, print_epoch, model_data, data_type)
@tf.function
def dirichletBound(self, X, xPhys, yPhys):
# multiply by x,y for strong imposition of boundary conditions
u_val = X[:,0:1]
v_val = X[:,1:2]
u_val = xPhys*u_val
v_val = yPhys*v_val
return u_val, v_val
#define the model properties
model_data = dict()
model_data["radius_int"] = 1.
model_data["radius_ext"] = 4.
model_data["E"] = 1e2
model_data["nu"] = 0.3
model_data["state"] = "plane strain"
model_data["inner_pressure"] = 10.
model_data["outer_pressure"] = 0.
# generate the model geometry
geomDomain = QuarterAnnulus(model_data["radius_int"], model_data["radius_ext"])
# define the input and output data set
numElemU = 10
numElemV = 10
numGauss = 5
#xPhys, yPhys = myQuad.getRandomIntPts(numPtsU*numPtsV)
xPhys, yPhys, Wint = geomDomain.getQuadIntPts(numElemU, numElemV, numGauss)
data_type = "float32"
Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)
Wint = np.array(Wint).astype(data_type)
# prepare boundary points in the fromat Xbnd = [Xcoord, Ycoord, norm_x, norm_y] and
# Wbnd for boundary integration weights and
# Ybnd = [trac_x, trac_y], where Xcoord, Ycoord are the x and y coordinates of the point,
# norm_x, norm_y are the x and y components of the unit normals
# trac_x, trac_y are the x and y components of the traction vector at each point
# inner curved boundary, include both x and y directions
xPhysBnd, yPhysBnd , xNorm, yNorm, Wbnd = geomDomain.getQuadEdgePts(numElemV, numGauss, 4)
Xbnd = np.concatenate((xPhysBnd, yPhysBnd), axis=1).astype(data_type)
Wbnd = np.array(Wbnd).astype(data_type)
plt.scatter(xPhys, yPhys, s=0.1)
plt.scatter(xPhysBnd, yPhysBnd, s=1, c='red')
plt.title("Boundary and interior integration points")
plt.show()
# define loading
Ybnd_x = -model_data["inner_pressure"]*xNorm
Ybnd_y = -model_data["inner_pressure"]*yNorm
Ybnd = np.concatenate((Ybnd_x, Ybnd_y), axis=1).astype(data_type)
#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(20, "swish")
l2 = tf.keras.layers.Dense(20, "swish")
l3 = tf.keras.layers.Dense(20, "swish")
l4 = tf.keras.layers.Dense(2, None)
train_op = tf.keras.optimizers.Adam()
train_op2 = "TFP-BFGS"
num_epoch = 1000
print_epoch = 100
pred_model = Elast_ThickCylinder([l1, l2, l3, l4], train_op, num_epoch,
print_epoch, model_data, data_type)
#convert the training data to tensors
Xint_tf = tf.convert_to_tensor(Xint)
Wint_tf = tf.convert_to_tensor(Wint)
Xbnd_tf = tf.convert_to_tensor(Xbnd)
Wbnd_tf = tf.convert_to_tensor(Wbnd)
Ybnd_tf = tf.convert_to_tensor(Ybnd)
#training
t0 = time.time()
print("Training (ADAM)...")
pred_model.network_learn(Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")
print("Training (TFP-BFGS)...")
loss_func = tfp_function_factory(pred_model, Xint_tf, Wint_tf, Xbnd_tf, Wbnd_tf, Ybnd_tf)
# convert initial model parameters to a 1D tf.Tensor
init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)
# train the model with L-BFGS solver
results = tfp.optimizer.bfgs_minimize(
value_and_gradients_function=loss_func, initial_position=init_params,
max_iterations=10000, tolerance=1e-14)
# after training, the final optimized parameters are still in results.position
# so we have to manually put them back to the model
loss_func.assign_new_model_parameters(results.position)
t2 = time.time()
print("Time taken (BFGS)", t2-t1, "seconds")
print("Time taken (all)", t2-t0, "seconds")
def cart2pol(x, y):
rho = np.sqrt(np.array(x)**2 + np.array(y)**2)
phi = np.arctan2(y, x)
return rho, phi
# define the exact displacements
def exact_disp(x,y,model):
nu = model["nu"]
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
mu = model["E"]/(2*(1+nu))
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
dispxy = 1/(2*mu*(b**2-a**2))*((1-2*nu)*(p1*a**2-p0*b**2)+(p1-p0)*a**2*b**2/r**2)
ux = x*dispxy
uy = y*dispxy
return ux, uy
#define the exact stresses
def exact_stresses(x,y,model):
r = np.hypot(x,y)
a = model["radius_int"]
b = model["radius_ext"]
p1 = model["inner_pressure"]
p0 = model["outer_pressure"]
term_fact = a**2*b**2/(b**2-a**2)
term_one = p1/b**2 - p0/a**2 + (p1-p0)/r**2
term_two = 2*(p1-p0)/r**4
sigma_xx = term_fact*(term_one - term_two*x**2)
sigma_yy = term_fact*(term_one - term_two*y**2)
sigma_xy = term_fact*(-term_two*x*y)
return sigma_xx, sigma_yy, sigma_xy
print("Testing...")
numPtsUTest = 2*numElemU*numGauss
numPtsVTest = 2*numElemV*numGauss
xPhysTest, yPhysTest = geomDomain.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])
XTest = np.concatenate((xPhysTest,yPhysTest),axis=1).astype(data_type)
XTest_tf = tf.convert_to_tensor(XTest)
YTest = pred_model(XTest_tf).numpy()
xPhysTest = xPhysTest.astype(data_type)
yPhysTest = yPhysTest.astype(data_type)
stress_xx_comp, stress_yy_comp, stress_xy_comp = pred_model.constitutiveEq(xPhysTest, yPhysTest)
stress_xx_comp = stress_xx_comp.numpy()
stress_yy_comp = stress_yy_comp.numpy()
stress_xy_comp = stress_xy_comp.numpy()
# plot the displacement
plot_field_2d(XTest, YTest[:,0], numPtsUTest, numPtsVTest, title="Computed x-displacement")
plot_field_2d(XTest, YTest[:,1], numPtsUTest, numPtsVTest, title="Computed y-displacement")
# comparison with exact solution
ux_exact, uy_exact = exact_disp(xPhysTest, yPhysTest, model_data)
ux_test = YTest[:,0:1]
uy_test = YTest[:,1:2]
err_norm = np.sqrt(np.sum((ux_exact-ux_test)**2+(uy_exact-uy_test)**2))
ex_norm = np.sqrt(np.sum(ux_exact**2 + uy_exact**2))
rel_err_l2 = err_norm/ex_norm
print("Relative L2 error: ", rel_err_l2)
stress_xx_exact, stress_yy_exact, stress_xy_exact = exact_stresses(xPhysTest,
yPhysTest, model_data)
stress_xx_err = stress_xx_exact - stress_xx_comp
stress_yy_err = stress_yy_exact - stress_yy_comp
stress_xy_err = stress_xx_exact - stress_xx_comp
C_inv = np.linalg.inv(pred_model.Emat.numpy())
energy_err = 0.
energy_norm = 0.
numPts = len(xPhysTest)
for i in range(numPts):
err_pt = np.array([stress_xx_err[i,0],stress_yy_err[i,0],stress_xy_err[i,0]])
norm_pt = np.array([stress_xx_exact[i,0],stress_yy_exact[i,0],stress_xy_exact[i,0]])
energy_err = energy_err + err_pt@C_inv@err_pt.T
energy_norm = energy_norm + norm_pt@C_inv@norm_pt.T
print("Relative energy error: ", np.sqrt(energy_err/energy_norm))
plot_field_2d(XTest, ux_exact-YTest[:,0:1], numPtsUTest, numPtsVTest, title="Error for x-displacement")
plot_field_2d(XTest, uy_exact-YTest[:,1:2], numPtsUTest, numPtsVTest, title="Error for y-displacement")
# plot the stresses
plot_field_2d(XTest, stress_xx_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xx")
plot_field_2d(XTest, stress_yy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_yy")
plot_field_2d(XTest, stress_xy_comp, numPtsUTest, numPtsVTest, title="Computed sigma_xy")
plot_field_2d(XTest, stress_xx_err, numPtsUTest, numPtsVTest, title="Error for sigma_xx")
plot_field_2d(XTest, stress_yy_err, numPtsUTest, numPtsVTest, title="Error for sigma_yy")
plot_field_2d(XTest, stress_xy_err, numPtsUTest, numPtsVTest, title="Error for sigma_xy")
| 37.830579 | 103 | 0.72343 | 642 | 0.070126 | 0 | 0 | 286 | 0.03124 | 0 | 0 | 2,975 | 0.324959 |
f62fd3c1a6c3577c375970042756089fd3f9e611 | 1,224 | py | Python | src/trw/utils/clamp_n.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | 3 | 2019-07-04T01:20:41.000Z | 2020-01-27T02:36:12.000Z | src/trw/utils/clamp_n.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | null | null | null | src/trw/utils/clamp_n.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | 2 | 2020-10-19T13:46:06.000Z | 2021-12-27T02:18:10.000Z | from typing import Sequence, Any
import torch
def clamp_n(tensor: torch.Tensor, min_values: Sequence[Any], max_values: Sequence[Any]) -> torch.Tensor:
"""
Clamp a tensor with axis dependent values.
Args:
tensor: a N-d torch.Tensor
min_values: a 1D torch.Tensor. Min value is axis dependent
max_values: a 1D torch.Tensor. Max value is axis dependent
Returns:
tensor with values clamped to min_values and max_values
Examples:
>>> t = torch.LongTensor([[1, 2, 3], [4, 5, 6]])
>>> min_values = torch.LongTensor([3, 2, 4])
>>> max_values = torch.LongTensor([3, 4, 8])
>>> clamped_t = clamp_n(t, min_values, max_values)
"""
assert isinstance(min_values, torch.Tensor)
assert isinstance(max_values, torch.Tensor)
assert min_values.shape == max_values.shape
if len(min_values.shape) == 1:
min_values = min_values.unsqueeze(dim=0)
max_values = max_values.unsqueeze(dim=0)
else:
assert min_values.shape[0] == 1, 'must be broadcastable to tensor shape'
assert max_values.shape[0] == 1, 'must be broadcastable to tensor shape'
return torch.max(torch.min(tensor, max_values), min_values)
| 37.090909 | 104 | 0.660131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.515523 |
f6300d71500ac859671e30287246eb5ec27f0524 | 437 | py | Python | bin2dec.py | Alba3k/BinToDec | 4a4874b40cdd69b732532308484eeb976e158d35 | [
"MIT"
] | null | null | null | bin2dec.py | Alba3k/BinToDec | 4a4874b40cdd69b732532308484eeb976e158d35 | [
"MIT"
] | null | null | null | bin2dec.py | Alba3k/BinToDec | 4a4874b40cdd69b732532308484eeb976e158d35 | [
"MIT"
] | null | null | null | def bin2dec(binNumber):
decNumber = 0
index = 1
binNumber = binNumber[::-1]
for i in binNumber:
number = int(i) * index
decNumber = decNumber + number
index = index * 2
return decNumber
print('ВВЕДИТЕ ДВОИЧНОЕ 8-БИТНОЕ ЧИСЛО')
binNumber = input('>')
if len(binNumber) != 8:
print('ВВЕДИТЕ ПРАВИЛЬНОЕ 8-БИТНОЕ ЧИСЛО')
elif not binNumber.isdigit():
print('ВВЕДИТЕ ПРАВИЛЬНОЕ 8-БИТНОЕ ЧИСЛО')
else:
print(bin2dec(binNumber)) | 23 | 43 | 0.71167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.362235 |
f6302daf57e1897d3b623be1dae10ac2892a5ddb | 1,227 | py | Python | taskmanager/src/modules/tasks/domain/task.py | acostapazo/event-manager | 614c91af19fad39f766ffa1b9a3d1e783f06c72e | [
"MIT"
] | null | null | null | taskmanager/src/modules/tasks/domain/task.py | acostapazo/event-manager | 614c91af19fad39f766ffa1b9a3d1e783f06c72e | [
"MIT"
] | 1 | 2020-04-20T11:20:22.000Z | 2020-04-20T11:20:22.000Z | taskmanager/src/modules/tasks/domain/task.py | acostapazo/event-manager | 614c91af19fad39f766ffa1b9a3d1e783f06c72e | [
"MIT"
] | null | null | null | from typing import Any, Dict
from meiga import Result, Error, Success
from petisco import AggregateRoot
from datetime import datetime
from taskmanager.src.modules.tasks.domain.description import Description
from taskmanager.src.modules.tasks.domain.events import TaskCreated
from taskmanager.src.modules.tasks.domain.task_id import TaskId
from taskmanager.src.modules.tasks.domain.title import Title
class Task(AggregateRoot):
def __init__(
self, task_id: TaskId, title: str, description: str, created_at: datetime
):
self.task_id = task_id
self.title = title
self.description = description
self.created_at = created_at
super().__init__()
@staticmethod
def create(task_id: TaskId, title: Title, description: Description):
user = Task(task_id, title, description, datetime.utcnow())
user.record(TaskCreated(task_id))
return user
def to_result(self) -> Result[Any, Error]:
return Success(self)
def to_dict(self) -> Dict:
return {
"task_id": self.task_id,
"title": self.title,
"description": self.description,
"created_at": self.created_at.isoformat(),
}
| 31.461538 | 81 | 0.682967 | 822 | 0.669927 | 0 | 0 | 216 | 0.176039 | 0 | 0 | 41 | 0.033415 |
f630f6ea5762c04cfcd310931e443ab55f0888fb | 4,085 | py | Python | CSD_API/get_from_author.py | andrewtarzia/cage_collect | e5e68dc23ec197eceff3b56de6725d996730b8ac | [
"MIT"
] | null | null | null | CSD_API/get_from_author.py | andrewtarzia/cage_collect | e5e68dc23ec197eceff3b56de6725d996730b8ac | [
"MIT"
] | null | null | null | CSD_API/get_from_author.py | andrewtarzia/cage_collect | e5e68dc23ec197eceff3b56de6725d996730b8ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Script to search for and collect CIFs using a list of authors.
Author: Andrew Tarzia
Date Created: 1 Mar 2019
"""
import ccdc.search
import sys
import CSD_f
def write_entry(file, author, number, DOI, CSD, solvent, disorder):
"""
Write entry to CIF DB file that contains information for a
structure.
"""
with open(file, 'a') as f:
f.write(
f'{author},{number},{DOI},{CSD},{solvent},{disorder}\n'
)
def write_REFCODES(file, CSD):
"""
Write REFCODE to file.
"""
with open(file, 'a') as f:
f.write(CSD+'\n')
def main():
if (not len(sys.argv) == 4):
print """
Usage: get_from_author.py author_file cage_type output_prefix
author_file (str) -
file with list of authors
cage_type (str) -
organic if organic cages, metal if is_organometallic
organic: sets is_organometallic is False
metal: sets is_organometallic is True
anything else: passes this test
output_prefix (str) - prefix of .txt and .gcd file to output
"""
sys.exit()
else:
author_file = sys.argv[1]
cage_type = sys.argv[2]
output_prefix = sys.argv[3]
out_txt = output_prefix+'.txt'
out_gcd = output_prefix+'.gcd'
# files = []
authors = []
DOIs = []
CSD = []
for line in open(author_file, 'r'):
authors.append(line.rstrip())
with open(out_txt, 'w') as f:
f.write('author,number,DOI,CSD,solvent,disorder\n')
with open(out_gcd, 'w') as f:
f.write('')
count = 0
count_no = 0
idents = []
for i, author in enumerate(authors):
# break at '-----'
if '-----' in author:
break
count_no += 1
query = ccdc.search.TextNumericSearch()
query.add_author(author)
hits = query.search(database='CSD')
print author+': '+str(len(hits))
if len(hits) == 0:
print(author)
for hit in hits:
author_list = [
i.strip()
for i in hit.entry.publication.authors.split(',')
]
# skip polymeric structures
if hit.entry.chemical_name is not None:
if 'catena' in hit.entry.chemical_name:
continue
if hit.entry.is_polymeric is True:
continue
# skip if structure is powder study
if hit.entry.is_powder_study is True:
continue
if cage_type == 'organic':
# skip structures that are NOT purely organic
if hit.entry.is_organometallic is True:
continue
elif cage_type == 'metal':
# skip structures that are purely organic
if hit.entry.is_organometallic is False:
continue
else:
# do not skip any
pass
# note structures with solvent
solvent = 'n'
if hit.entry.chemical_name is not None:
if len(hit.entry.chemical_name.split(' ')) > 1:
solvent = 'y'
# note structures with disorder
disorder = 'n'
if hit.entry.has_disorder is True:
disorder = 'y'
crystal = hit.crystal
# write REFCODE to file
if hit.identifier not in idents:
idents.append(hit.identifier)
write_entry(
out_txt,
author,
str(hit.entry.ccdc_number),
hit.entry.doi,
hit.identifier,
solvent,
disorder
)
write_REFCODES(out_gcd, hit.identifier)
count += 1
print str(count)+' cifs found from '+str(count_no)+' authors'
if __name__ == "__main__":
main()
| 28.566434 | 68 | 0.520685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,315 | 0.321909 |
f6316922d218565500b02ad1c78331fdf583ec07 | 641 | py | Python | src/datastructure/stacks_ex1.py | Valeeswaran/tutorials | 71b43cad46f4d7d2d67d3ff4be61bdaaade2a36a | [
"MIT"
] | null | null | null | src/datastructure/stacks_ex1.py | Valeeswaran/tutorials | 71b43cad46f4d7d2d67d3ff4be61bdaaade2a36a | [
"MIT"
] | null | null | null | src/datastructure/stacks_ex1.py | Valeeswaran/tutorials | 71b43cad46f4d7d2d67d3ff4be61bdaaade2a36a | [
"MIT"
] | null | null | null | import stacks1
def is_match(ch1, ch2):
match_dict = {
")": "(",
"]": "[",
"}": "{"
}
return match_dict[ch1] == ch2
def is_balanced(s):
stack = stacks1.Stack()
for ch in s:
if ch == '(' or ch == '{' or ch == '[':
stack.push(ch)
if ch == ')' or ch == '}' or ch == ']':
if stack.size() == 0:
return False
if not is_match(ch, stack.pop()):
return False
return stack.size()==0
print(is_balanced("))((a+b}{"))
print(is_balanced("((a+b))"))
print(is_balanced("))"))
print(is_balanced("[a+b]*(x+2y)*{hh+kk}")) | 20.677419 | 47 | 0.447738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.127925 |
f631fd5dc1a814f372da186dbc180a4eadeb0274 | 2,100 | py | Python | recommend.py | Srini96/Market-Basket-Analysis-with-Customer-Profiling-and-Exploratory-Analysis-using-Python | 293291157a4d91c217c4008e058e01b1b930b923 | [
"MIT"
] | 1 | 2021-02-01T02:15:48.000Z | 2021-02-01T02:15:48.000Z | recommend.py | Srini96/Market-Basket-Analysis-with-Customer-Profiling-and-Exploratory-Analysis-using-Python | 293291157a4d91c217c4008e058e01b1b930b923 | [
"MIT"
] | null | null | null | recommend.py | Srini96/Market-Basket-Analysis-with-Customer-Profiling-and-Exploratory-Analysis-using-Python | 293291157a4d91c217c4008e058e01b1b930b923 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 18:30:29 2018
@author: Koushik
"""
import pandas as pd
from IPython.display import display
import sys
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 19:04:35 2018
@author: Koushik
"""
#Python 2.x program for Speech Recognition
import re
#enter the name of usb microphone that you found
#using lsusb
#the following name is only used as an example
#mic_name = "USB Device 0x46d:0x825: Audio (hw:1, 0)"
#device_id = "MMDEVAPI\AudioEndpoints"
#Sample rate is how often values are recorded
#mic_list = sr.Microphone.list_microphone_names()
#the following loop aims to set the device ID of the mic that
#we specifically want to use to avoid ambiguity.
#for i, microphone_name in enumerate(mic_list):
# if microphone_name == mic_name:
# device_id = i
#use the microphone as source for input. Here, we also specify
#which device ID to specifically look for incase the microphone
#is not working, an error will pop up saying "device_id undefined"
products = pd.read_csv('products.csv')
products = products['product_name'].tolist();
for i in range(0,len(products)):
products[i] = products[i].lower()
#print(products)
text = ' '.join(sys.argv[1:])
text = re.split(' and |order |some | like | love |',text)
#print(text)
new_text = ''
for i in text:
print(i)
if i in products:
new_text+=i+","
new_text = new_text.split(',')
#print(new_text)
analysis = pd.read_csv("output.csv")
analysis["itemA"]=analysis['itemA'].str.lower()
#item_name = ("The Red ONe: Squished Fruit Smoothies","Santa Fe Extra Lean Veggie Burger")
#print(analysis['itemA'])
#columns = ["itemB"]
#df[df['B']==3]['A']
analysis_specific_data = pd.DataFrame()
for i in new_text:
analysis_specific = analysis.loc[analysis['itemA']==i]
analysis_specific_data = analysis_specific_data.append(analysis_specific)
#print(analysis_specific)
analysis_specific_data = analysis_specific_data.sort_values('lift',ascending = False)
analysis_specific_data.to_html('recommend_table.html')
| 30.882353 | 91 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,298 | 0.618095 |
f6321b2979bddd05bf08689d0cd5e7faac1097ef | 584 | py | Python | image_pipeline/stages/__init__.py | MarcoGlauser/image_pipeline | a33b8a64773e8a0189155ae625d3060e15fb8def | [
"MIT"
] | null | null | null | image_pipeline/stages/__init__.py | MarcoGlauser/image_pipeline | a33b8a64773e8a0189155ae625d3060e15fb8def | [
"MIT"
] | null | null | null | image_pipeline/stages/__init__.py | MarcoGlauser/image_pipeline | a33b8a64773e8a0189155ae625d3060e15fb8def | [
"MIT"
] | null | null | null | from image_pipeline.stages.crop_stage import CropStage
from image_pipeline.stages.jpeg_lossless_compression import JPEGLossLessCompressionStage
from image_pipeline.stages.jpeg_lossy_compression import JPEGLossyCompressionStage
from image_pipeline.stages.resize_stage import ResizeStage
pre_stages = [
ResizeStage,
CropStage,
] # List[_Stage]
compression_stages = [
JPEGLossyCompressionStage,
JPEGLossLessCompressionStage,
] # List[_Stage]
post_stages = [
# PlaceholderGenerationStage,
] # List[_Stage]
stages = pre_stages + compression_stages + post_stages | 27.809524 | 88 | 0.813356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.121575 |
f634dcfcb0026b1b4daab85dd3790ec59612e959 | 538 | py | Python | pyunitwizard/tests/test_get_form.py | dprada/pyunitwizard | dbfd0c015f9359c76e7e59a5c16ab469b86aed34 | [
"MIT"
] | 2 | 2021-07-01T14:33:58.000Z | 2022-03-19T19:19:09.000Z | pyunitwizard/tests/test_get_form.py | dprada/pyunitwizard | dbfd0c015f9359c76e7e59a5c16ab469b86aed34 | [
"MIT"
] | 15 | 2021-02-11T18:54:16.000Z | 2022-03-18T17:38:03.000Z | pyunitwizard/tests/test_get_form.py | dprada/pyunitwizard | dbfd0c015f9359c76e7e59a5c16ab469b86aed34 | [
"MIT"
] | 2 | 2021-06-17T18:56:02.000Z | 2022-03-08T05:02:17.000Z | import pytest
import pyunitwizard as puw
def test_string():
puw.configure.reset()
assert puw.get_form('1 meter')=='string'
def test_pint_quantity():
puw.configure.reset()
puw.configure.load_library(['pint'])
ureg = puw.forms.api_pint.ureg
q = ureg.Quantity(1.0,'meter')
assert puw.get_form(q)=='pint'
def test_pint_unit():
puw.configure.reset()
puw.configure.load_library(['pint'])
ureg = puw.forms.api_pint.ureg
q = puw.forms.api_pint.ureg.Unit('meter')
assert puw.get_form(q)=='pint'
| 24.454545 | 45 | 0.678439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.10223 |
f63545e957985ca6c1cbcde078c84fd5739a594f | 4,393 | py | Python | package/awesome_panel/express/bootstrap/modal.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 179 | 2019-12-04T14:54:53.000Z | 2022-03-30T09:08:38.000Z | package/awesome_panel/express/bootstrap/modal.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 62 | 2019-12-14T16:51:28.000Z | 2022-03-19T18:47:12.000Z | package/awesome_panel/express/bootstrap/modal.py | slamer59/awesome-panel | 91c30bd6d6859eadf9c65b1e143952f7e64d5290 | [
"Apache-2.0"
] | 35 | 2019-12-08T13:19:53.000Z | 2022-03-25T10:33:02.000Z | """In this module we provide the functionality of a Modal.
The Modal can be used to focus some kind of information like text, images, chart or an interactive
dashboard.
The implementation is inspired by
- https://css-tricks.com/considerations-styling-modal/
- https://codepen.io/henchmen/embed/preview/PzQpvk
- https://getbootstrap.com/docs/4.3/components/modal/
"""
import panel as pn
import param
_CSS = """
.bk.modal {
/* This way it could be display flex or grid or whatever also. */
display: block;
max-width: 100%;
max-height: 100%;
position: fixed!important;
z-index: 100;
left: 0!important;
top: 0!important;
bottom: 0!important;
right: 0!important;
margin: auto!important;
box-shadow: 5px 5px 20px grey;
box-shadow: 0 0 60px 10px rgba(0, 0, 0, 0.9);
border: 1px solid rgba(0,0,0,.125);
border-radius: 0.25rem;
}
.closed {
display: none!important;
}
.modal-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 50;
background: rgba(0, 0, 0, 0.6);
}
.modal-body {
overflow: auto;
}
"""
class Modal(param.Parameterized):
"""The Modal can be used to focus some kind of information like text, images, chart or an
interactive dashboard.
In order to use this modal you need to
- Instantiate the Modal
- Add the CSS from the get_css function to the app
- using `pn.config.raw_css.append` or
- directly in your template
The implementation is inspired by
- https://css-tricks.com/considerations-styling-modal/
- https://codepen.io/henchmen/embed/preview/PzQpvk
- https://getbootstrap.com/docs/4.3/components/modal/
"""
title = param.String(default="Modal")
body = param.List()
def __init__(self, **params):
super().__init__(**params)
self.modal_overlay = pn.pane.HTML('<div class="modal-overlay" id="modal-overlay"></div>')
self.close_button = pn.widgets.Button(
name="X",
css_classes=["close-modal-button"],
width=50,
)
self.close_button.js_on_click(
code="""
var modal = document.querySelector(".bk.modal");
var modalOverlay = document.querySelector("#modal-overlay");
modal.classList.toggle("closed");
modalOverlay.classList.toggle("closed");
"""
)
self._modal_title = pn.pane.Markdown("# " + self.title)
self._modal_body = pn.Column(*self.body) # pylint: disable=not-an-iterable
self.modal = pn.Column(
pn.Column(
pn.Row(
self._modal_title,
pn.layout.HSpacer(),
self.close_button,
),
pn.layout.Divider(),
self._modal_body,
sizing_mode="stretch_width",
margin=10,
),
background="white",
width=400,
height=400,
css_classes=["modal"],
)
@staticmethod
def get_open_modal_button(name: str = "Open Modal", **kwargs) -> pn.widgets.Button:
"""A Button to open the modal with"""
open_modal_button = pn.widgets.Button(
name=name, css_classes=["open-modal-button"], **kwargs
)
open_modal_button.js_on_click(
code="""
var modal = document.querySelector(".modal");
var modalOverlay = document.querySelector("#modal-overlay");
modal.classList.toggle("closed");
modalOverlay.classList.toggle("closed");
"""
)
return open_modal_button
@staticmethod
def get_css() -> str:
"""Add the CSS from this function to the app
- using `pn.config.raw_css.append` or
- directly in your template
Returns:
str: The css string
"""
return _CSS
@param.depends(
"title",
watch=True,
)
def set_modal_title(
self,
):
"""Updates the title of the modal"""
self._modal_title.object = "# " + self.title
@param.depends(
"body",
watch=True,
)
def set_modal_body(
self,
):
"""Updates the body of the modal"""
self._modal_body[:] = self.body
| 27.117284 | 99 | 0.572046 | 3,256 | 0.741179 | 0 | 0 | 1,269 | 0.288869 | 0 | 0 | 2,585 | 0.588436 |
f636a313757854652da404412374580257d3fe53 | 5,696 | py | Python | python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 17,085 | 2016-11-18T06:40:52.000Z | 2022-03-31T22:52:32.000Z | python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 29,769 | 2016-11-18T06:35:22.000Z | 2022-03-31T16:46:15.000Z | python/paddle/distributed/fleet/meta_optimizers/fp16_allreduce_optimizer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4,641 | 2016-11-18T07:43:33.000Z | 2022-03-31T15:15:02.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle.fluid import core, framework, unique_name
from .meta_optimizer_base import MetaOptimizerBase
__all__ = []
class FP16AllReduceOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(FP16AllReduceOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = [
"LarsOptimizer",
"LambOptimizer",
"RecomputeOptimizer",
"LocalSGDOptimizer",
"GradientMergeOptimizer",
"GraphExecutionOptimizer",
"AdaptiveLocalSGDOptimizer",
]
self.meta_optimizers_black_list = ["DGCOptimizer"]
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(FP16AllReduceOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _can_apply(self):
if not self.role_maker._is_collective:
return False
if self.user_defined_strategy.fp16_allreduce:
return True
return False
def _disable_strategy(self, dist_strategy):
dist_strategy.fp16_allreduce = False
def _enable_strategy(self, dist_strategy, context=None):
dist_strategy.fp16_allreduce = True
@staticmethod
def fp16_compression(param_and_grads):
"""
Compress fp32 gradients to fp16 during allreduce.
"""
op_maker = core.op_proto_and_checker_maker
new_param_and_grads = [] # param, grad, is_cast
# cast grad from fp32->fp16 before allreduce,
for param, grad in param_and_grads:
if grad is None or grad.dtype != core.VarDesc.VarType.FP32:
new_param_and_grads.append((param, grad, False))
continue
op = grad.op
block = grad.block
var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]
if param.name not in var_attr:
new_param_and_grads.append((param, grad, False))
continue
# remove (param, grad) from op_role_var
var_attr.remove(param.name)
var_attr.remove(grad.name)
if len(var_attr) > 1:
op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)
else:
op._remove_attr(op_maker.kOpRoleVarAttrName())
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp16"),
dtype=core.VarDesc.VarType.FP16,
persistable=False,
stop_gradient=True)
with block.program._backward_role_guard():
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP32,
"out_dtype": core.VarDesc.VarType.FP16
},
stop_gradient=True)
backward = op_maker.OpRole.Backward
cast_op._set_attr(op_maker.kOpRoleAttrName(), backward)
cast_op._set_attr(op_maker.kOpRoleVarAttrName(),
[param.name, new_grad.name])
new_grad.op = cast_op
new_param_and_grads.append((param, new_grad, True))
ret_param_and_grads = []
# cast grad from fp16->fp32 after allreduce.
# NOTE. Now we split fp16 compression into two for loops,
# if we do not separate them, fuse allreduce will wrong.
# This must be the problem of fuse allreduce pass, need
# fixed in future.
for param, grad, cast in new_param_and_grads:
if not cast:
ret_param_and_grads.append((param, grad))
continue
block = grad.block
new_grad = block.create_var(
name=unique_name.generate(grad.name + ".cast_fp32"),
dtype=core.VarDesc.VarType.FP32,
persistable=False,
stop_gradient=True)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('fp16_allreduce'):
cast_op = block.append_op(
type="cast",
inputs={"X": grad},
outputs={"Out": new_grad},
attrs={
"in_dtype": core.VarDesc.VarType.FP16,
"out_dtype": core.VarDesc.VarType.FP32
},
stop_gradient=True)
ret_param_and_grads.append((param, new_grad))
return ret_param_and_grads
def apply_optimize(self, loss, startup_program, params_grads):
new_params_grads = self.fp16_compression(params_grads)
return self.inner_opt.apply_optimize(
loss,
startup_program=startup_program,
params_grads=new_params_grads)
| 38.228188 | 76 | 0.59849 | 4,994 | 0.876756 | 0 | 0 | 3,435 | 0.603055 | 0 | 0 | 1,309 | 0.22981 |
f636a882b94f8b074d76f3398dd62f93844dc2ba | 245 | py | Python | factory_djoy/__about__.py | jamescooke/factory_djoy | 97cf68a068b831f2830688a91e7b143b97bc1326 | [
"MIT"
] | 26 | 2016-05-27T19:00:34.000Z | 2022-02-14T14:22:20.000Z | factory_djoy/__about__.py | jamescooke/factory_djoy | 97cf68a068b831f2830688a91e7b143b97bc1326 | [
"MIT"
] | 37 | 2016-05-27T18:58:51.000Z | 2021-10-10T11:35:08.000Z | factory_djoy/__about__.py | jamescooke/factory_djoy | 97cf68a068b831f2830688a91e7b143b97bc1326 | [
"MIT"
] | null | null | null | __name__ = 'factory_djoy'
__version__ = '2.2.0'
__author__ = 'James Cooke'
__copyright__ = '2021, {}'.format(__author__)
__description__ = 'Factories for Django, creating valid model instances every time.'
__email__ = 'github@jamescooke.info'
| 27.222222 | 84 | 0.755102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.546939 |
f6385c22b94a6e2e267dcaa60df50ad19fa4a166 | 3,928 | py | Python | garageofcode/sat/fifteen_puzzle.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | 2 | 2020-02-11T10:32:06.000Z | 2020-02-11T17:00:47.000Z | garageofcode/sat/fifteen_puzzle.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | null | null | null | garageofcode/sat/fifteen_puzzle.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | null | null | null | from collections import defaultdict
from sugarrush.solver import SugarRush
from garageofcode.common.utils import flatten_simple
N = 3
def get_state(solver):
# one-hot encoding
X = [[[solver.var() for _ in range(N**2)]
for _ in range(N)]
for _ in range(N)]
for x in flatten_simple(X):
solver.add(solver.equals(x, 1)) # exactly one number per tile
#for y in zip(*X):
# solver.add(solver.equals(y, 1))
return X
def get_transition(solver, X0, X1):
ij2swaps = defaultdict(list)
swap2ijs = {}
for i in range(N):
for j in range(N):
if j < N - 1:
swap = solver.var()
swap2ijs[swap] = [(i, j), (i, j+1)]
ij2swaps[(i, j)].append(swap)
ij2swaps[(i, j+1)].append(swap)
if i < N - 1:
swap = solver.var()
swap2ijs[swap] = [(i, j), (i+1, j)]
ij2swaps[(i, j)].append(swap)
ij2swaps[(i+1, j)].append(swap)
cnf = []
for i in range(N):
for j in range(N):
hot = X0[i][j][0]
# if the empty square is on (i, j) (is 'hot'),
# then one of the adjacent swaps must be used
cnf.append([-hot] + ij2swaps[(i, j)])
# the non-adjacent swaps must be 0
for swap in swap2ijs:
if swap not in ij2swaps[(i, j)]:
cnf.append([-hot, -swap])
for swap, ijs in swap2ijs.items():
# if a swap is used, one of the adjacent
# squares must be hot
#cnf.append([-swap] + swap2ijs[swap])
# if swap is true, then the adjacent tiles should swap values
(il, jl), (ir, jr) = ijs # left/right
for x0l, x1r in zip(X0[il][jl], X1[ir][jr]):
# swap => x0l == x1r
cnf.extend([[-swap, x0l, -x1r], [-swap, -x0l, x1r]])
for x0r, x1l in zip(X0[ir][jr], X1[il][jl]):
# swap => x0r == x1l
cnf.extend([[-swap, x0r, -x1l], [-swap, -x0r, x1l]])
for ij in ij2swaps:
# if tile is not adjacent to swap,
# then X1 = X0 in that tile
if ij not in ijs:
i, j = ij
for x0, x1 in zip(X0[i][j], X1[i][j]):
# swap => x0 == x1
cnf.extend([[-swap, x0, -x1], [-swap, -x0, x1]])
swaps = list(swap2ijs.keys())
cnf.extend(solver.equals(swaps, 1)) # only one swap per turn
return cnf
def set_state(X0, ij2k=None):
cnf = []
for i in range(N):
for j in range(N):
for k in range(N**2):
if k == ij2k[(i, j)]:
cnf.append([X0[i][j][k]])
else:
cnf.append([-X0[i][j][k]])
return cnf
def print_solve(solver, Xr):
Xr_solve = [[[solver.solution_value(xi) for xi in x]
for x in row]
for row in Xr]
for row in Xr_solve:
print([x.index(1) for x in row])
def main():
for r in range(4, 24, 2):
solver = SugarRush()
#r = 5 # note: r is number of steps i.e. num states minus one
X = [get_state(solver) for _ in range(r+1)]
ij2k = {(i, j): i * N + j for j in range(N) for i in range(N)}
cnf = set_state(X[0], ij2k)
solver.add(cnf)
for X0, X1 in zip(X, X[1:]):
cnf = get_transition(solver, X0, X1)
solver.add(cnf)
ij2k[(0, 1)] = 2
ij2k[(0, 2)] = 1
cnf = set_state(X[-1], ij2k)
solver.add(cnf)
satisfiable = solver.solve()
if satisfiable:
print(r)
for x in X:
print_solve(solver, x)
print()
return
else:
print(r, "not satisfiable")
if __name__ == '__main__':
main() | 30.6875 | 70 | 0.470468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 629 | 0.160132 |
f639470dab223c0a61d56c8eec0fdc664b9c6792 | 3,435 | py | Python | normalize.py | Zoomerhimmer/NT-Textual-Toolkit | c7b236972f6dcef45fd0596b06f113c503dda6d2 | [
"MIT"
] | null | null | null | normalize.py | Zoomerhimmer/NT-Textual-Toolkit | c7b236972f6dcef45fd0596b06f113c503dda6d2 | [
"MIT"
] | null | null | null | normalize.py | Zoomerhimmer/NT-Textual-Toolkit | c7b236972f6dcef45fd0596b06f113c503dda6d2 | [
"MIT"
] | null | null | null | import re
import sys
import os
# Lists of same characters
alpha_equiv = ['Α','Ά','ά','ὰ','ά','ἀ','ἁ','ἂ','ἃ','ἄ','ἅ','ἆ','ἇ','Ἀ','Ἁ','Ἂ','Ἃ','Ἄ','Ἅ','Ἆ','Ἇ','ᾶ','Ᾰ','Ᾱ','Ὰ','Ά','ᾰ','ᾱ'] #Converts to α
alpha_subscripted = ['ᾀ','ᾁ','ᾂ','ᾃ','ᾄ','ᾅ','ᾆ','ᾇ','ᾈ','ᾉ','ᾊ','ᾋ','ᾌ','ᾍ','ᾎ','ᾏ','ᾲ','ᾴ','ᾷ','ᾼ','ᾳ'] #Converts to ᾳ
epsilon_equiv = ['Ε','Έ','έ','ὲ','έ','ἐ','ἑ','ἒ','ἓ','ἔ','ἕ','Ἐ','Ἑ','Ἒ','Ἓ','Ἔ','Ἕ'] #Converts to ε
eta_equiv = ['Η','Ή','ή','ὴ','ή','ἠ','ἡ','ἢ','ἣ','ἤ','ἥ','ἦ','ἧ','Ἠ','Ἡ','Ἢ','Ἣ','Ἤ','Ἥ','Ἦ','Ἧ','Ὲ','Έ','Ὴ','Ή','ῆ'] #Converts to η
eta_subscripted = ['ᾐ','ᾑ','ᾒ','ᾓ','ᾔ','ᾕ','ᾖ','ᾗ','ᾘ','ᾙ','ᾚ','ᾛ','ᾜ','ᾝ','ᾞ''ᾟ','ῂ','ῄ','ῇ','ῌ','ῃ'] #Converts to ῃ
iota_equiv = ['Ι','Ί','ΐ','Ϊ','ί','ϊ','ὶ','ί','ἰ','ἱ','ἲ','ἳ','ἴ','ἵ','ἶ','ἷ','Ἰ','Ἱ','Ἲ','Ἳ','Ἴ','Ἵ','Ἶ','Ἷ','ῐ','ῑ','ῒ','ΐ','ῖ','ῗ','Ῐ','Ῑ','Ὶ','Ί'] #Converts to ι
omicron_equiv = ['Ο','Ό','ό','ὸ','ό','ὀ','ὁ','ὂ','ὃ','ὄ','ὅ','Ὀ','Ὁ','Ὂ','Ὃ','Ὄ','Ὅ'] #Converts to ο
upsilon_equiv = ['Υ','Ύ','Ϋ','ΰ','ϋ','ύ','ὺ','ύ','ὐ','ὑ','ὒ','ὓ','ὔ','ὕ','ὖ','ὗ','Ὑ','Ὓ','Ὕ','Ὗ','ΰ','ῦ','ῧ','Ῠ','Ῡ','Ὺ','Ύ'] #Converts to υ
omega_equiv = ['Ω','Ώ','ώ','ὼ','ώ','ὠ','ὡ','ὢ','ὣ','ὤ','ὥ','ὦ','ὧ','Ὠ','Ὡ','Ὢ','Ὣ','Ὤ','Ὥ','Ὦ','Ὧ','ῶ','Ὸ','Ό','Ὼ','Ώ'] #Converts to ω
omega_subscripted = ['ᾠ','ᾡ','ᾢ','ᾣ','ᾤ','ᾥ','ᾦ','ᾧ','ᾨ','ᾩ','ᾪ','ᾫ','ᾬ','ᾭ','ᾮ','ᾯ','ῲ','ῴ','ῷ','ῼ','ῳ'] #Converts to ῳ
rho_equiv = ['Ρ','ῤ','ῥ','Ῥ'] #Converts to ρ
uppercase = {'Β':'β','Γ':'γ','Δ':'δ','Ζ':'ζ','Θ':'θ','Κ':'κ','Λ':'λ','Μ':'μ','Ν':'ν','Ξ':'ξ','Π':'π','Σ':'σ','Τ':'τ','Φ':'φ','Χ':'χ','Ψ':'ψ'}
def normalizer(char_list, normal_char, string):
for char in char_list:
string = string.replace(char, normal_char)
return string
def normalize(data, ignore_subscript=True):
# Remove brackets and normalize characters to textually significant letters
data = re.sub(r'(\[|\])', '', data)
data = normalizer(alpha_equiv, 'α', data)
data = normalizer(epsilon_equiv, 'ε', data)
data = normalizer(eta_equiv, 'η', data)
data = normalizer(iota_equiv, 'ι', data)
data = normalizer(omicron_equiv, 'ο', data)
data = normalizer(upsilon_equiv, 'υ', data)
data = normalizer(omega_equiv, 'ω', data)
data = normalizer(rho_equiv, 'ρ', data)
if ignore_subscript:
data = normalizer(alpha_subscripted, 'α', data)
data = normalizer(eta_subscripted, 'η', data)
data = normalizer(omega_subscripted, 'ω', data)
else:
data = normalizer(alpha_subscripted, 'ᾳ', data)
data = normalizer(eta_subscripted, 'ῃ', data)
data = normalizer(omega_subscripted, 'ῳ', data)
# Lowercase everything
for cap in list(uppercase):
data = data.replace(cap, uppercase[cap])
return data
def main():
# Prior checks
arg_num = len(sys.argv)
if arg_num == 1:
sys.exit('Program needs a file to process.')
# Ignore iota subscripts by default
try:
if sys.argv[2].lower() == 'false':
ignore_subscript = False
else:
ignore_subscript = True
except:
ignore_subscript = True
print('Ignoring iota subscripts.')
# Open files
input = open(sys.argv[1])
data = input.read()
filename = os.path.splitext(sys.argv[1])[0] + '_normalized.txt'
output = open(filename, 'w')
data = normalize(data, ignore_subscript)
output.write(data)
# Close files
input.close()
output.close()
if __name__ == '__main__':
main()
| 43.481013 | 165 | 0.52198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,844 | 0.466363 |
f63a003b49d1a2ecd8e8f98df7bb4d1aa0a1fd33 | 10,756 | py | Python | tests/test_conveniences.py | CSC-DPR/eopf-cpm | 6af10c0905eec876e8ab884ce62d5b74d52cb5a3 | [
"Apache-2.0"
] | null | null | null | tests/test_conveniences.py | CSC-DPR/eopf-cpm | 6af10c0905eec876e8ab884ce62d5b74d52cb5a3 | [
"Apache-2.0"
] | 171 | 2022-01-29T09:38:27.000Z | 2022-03-30T08:17:35.000Z | tests/test_conveniences.py | CSC-DPR/eopf-cpm | 6af10c0905eec876e8ab884ce62d5b74d52cb5a3 | [
"Apache-2.0"
] | null | null | null | import datetime
import os
import sys
from cmath import inf
from typing import Any
import hypothesis.extra.numpy as xps
import hypothesis.strategies as st
import numpy
import pytest
from hypothesis import assume, given
from eopf.product.utils import (
apply_xpath,
conv,
convert_to_unix_time,
is_date,
parse_xml,
reverse_conv,
translate_structure,
)
@pytest.fixture
def tree(EMBEDED_TEST_DATA_FOLDER: str):
snippet_path = os.path.join(EMBEDED_TEST_DATA_FOLDER, "snippet_xfdumanifest.xml")
with open(snippet_path) as f:
return parse_xml(f)
@st.composite
def value_with_type(draw, elements=st.integers(), expected_type=int, expected_container_type=None):
if isinstance(expected_type, st.SearchStrategy):
expected_type = draw(expected_type)
if expected_container_type is not None:
if isinstance(expected_container_type, st.SearchStrategy):
expected_container_type = draw(expected_container_type)
return (draw(elements), expected_type, expected_container_type)
return (draw(elements), expected_type)
@st.composite
def numpy_value(draw, dtype_st=xps.scalar_dtypes(), allow_infinity=True, allow_nan=True):
return draw(xps.from_dtype(draw(dtype_st), allow_infinity=allow_infinity, allow_nan=allow_nan))
@pytest.mark.unit
def test_parse_xml(tree):
"""Given an input xml,
the output of the function must match the expected output"""
result = ""
display_namespaces = True
for element in tree.iter():
tag = element.tag
result += f"{tag}\n"
if display_namespaces:
display_namespaces = False
for key, value in element.nsmap.items():
result += f"{key} : {value}\n"
attributes = element.attrib
for key, value in attributes.items():
result += f"{key} : {value}\n"
textual_content = element.text
if textual_content and textual_content.strip():
result += textual_content + "\n"
file_path = os.path.join(os.path.abspath("tests/data"), "solutions.txt")
with open(file_path, "r") as f:
expected = f.read()
assert result == expected
@pytest.mark.unit
def test_translate_structure(tree):
"""Given an input xml,
the output of the function must match the expected output"""
MAP = {
"title": "concat('',metadataSection/metadataObject[@ID='generalProductInformation']/metadataWrap/xmlData/"
"sentinel3:generalProductInformation/sentinel3:productName/text())",
"Conventions": "'CF-1.9'",
}
NAMESPACES = {
"xfdu": "urn:ccsds:schema:xfdu:1",
"gml": "http://www.opengis.net/gml",
"sentinel-safe": "http://www.esa.int/safe/sentinel/1.1",
"sentinel3": "http://www.esa.int/safe/sentinel/sentinel-3/1.0",
"olci": "http://www.esa.int/safe/sentinel/sentinel-3/olci/1.0",
}
result = translate_structure(MAP, tree, NAMESPACES)
assert result == {
"title": "S3A_OL_1_EFR____20220116T092821_20220116T093121_20220117T134858_0179_081_036_2160_LN1_O_NT_002.SEN3",
"Conventions": "CF-1.9",
}
@pytest.mark.unit
def test_apply_xpath(tree):
"""Given an input xml,
the output of the function must match the expected output"""
MAP = {
"title": "concat('',metadataSection/metadataObject[@ID='generalProductInformation']/metadataWrap/xmlData/"
"sentinel3:generalProductInformation/sentinel3:productName/text())",
"Conventions": "'CF-1.9'",
}
NAMESPACES = {
"xfdu": "urn:ccsds:schema:xfdu:1",
"gml": "http://www.opengis.net/gml",
"sentinel-safe": "http://www.esa.int/safe/sentinel/1.1",
"sentinel3": "http://www.esa.int/safe/sentinel/sentinel-3/1.0",
"olci": "http://www.esa.int/safe/sentinel/sentinel-3/olci/1.0",
}
result = {attr: apply_xpath(tree, MAP[attr], NAMESPACES) for attr in MAP}
assert result == {
"title": "S3A_OL_1_EFR____20220116T092821_20220116T093121_20220117T134858_0179_081_036_2160_LN1_O_NT_002.SEN3",
"Conventions": "CF-1.9",
}
@pytest.mark.unit
def test_is_date():
string_date_1 = "2020-03-31T17:19:29.230522Z" # Zulu time
string_date_2 = "2020-03-31T17:19:29.230522GMT+3" # GMT+3 Time
string_date_3 = "some_random_string"
dt_date = datetime.datetime(2020, 3, 31, 17, 19, 29, 230522)
assert is_date(string_date_1)
assert is_date(string_date_2)
assert is_date(str(dt_date))
assert not is_date(string_date_3)
@pytest.mark.unit
def test_convert_unix_time():
import pytz
# Define datetime-like string and verify if conversion match with datetime object and expected unix time. (MS)
string_date = "2020-03-31T17:19:29.230522Z"
dt_date = datetime.datetime(2020, 3, 31, 17, 19, 29, 230522, pytz.UTC)
expected_unix_time = 1585675169230522
assert convert_to_unix_time(string_date) == convert_to_unix_time(dt_date) == expected_unix_time
# Define datetime-like string in Zulu Time Zone, and verify that it doesnt match with expected unix time
string_date = "2020-03-31T17:19:29.230522GMT-3"
assert convert_to_unix_time(string_date) != convert_to_unix_time(dt_date)
assert convert_to_unix_time(string_date) != expected_unix_time
#
try:
string_date = "a string that is not a valid date"
convert_to_unix_time(string_date)
except ValueError:
assert True
@pytest.mark.unit
@given(
value_and_types=st.one_of(
value_with_type(
st.lists(elements=st.floats(allow_infinity=False, allow_nan=False), unique=True, min_size=10),
float,
list,
),
value_with_type(st.lists(elements=st.integers(), unique=True, min_size=10), int, list),
value_with_type(st.lists(elements=st.booleans(), unique=True, min_size=2), int, list),
value_with_type(st.sets(elements=st.floats(allow_infinity=False, allow_nan=False), min_size=10), float, set),
value_with_type(st.sets(elements=st.integers(), min_size=10), int, set),
value_with_type(st.sets(elements=st.booleans(), min_size=2), int, set),
value_with_type(st.dictionaries(st.text(), st.integers(), min_size=10), int, dict),
value_with_type(st.dictionaries(st.text(), st.booleans(), min_size=10), int, dict),
value_with_type(
st.dictionaries(st.text(), st.floats(allow_infinity=False, allow_nan=False), min_size=10),
float,
dict,
),
value_with_type(xps.arrays(xps.floating_dtypes(), 10, unique=True), float, list),
value_with_type(xps.arrays(xps.integer_dtypes(), 10, unique=True), int, list),
value_with_type(xps.arrays(xps.boolean_dtypes(), 10, unique=True), int, list),
),
)
def test_conv_sequences(value_and_types: tuple[Any, type, type]):
values, type_, container_type = value_and_types
assume(inf not in values)
converted_list = conv(values)
assert isinstance(converted_list, container_type)
# Check if size of converted value doesn't change
assert len(converted_list) == len(values)
# Check if type of each item from converted value is correct
if isinstance(converted_list, dict):
iterator = converted_list.values()
original = values.values()
else:
iterator = converted_list
original = values
for converted_value, value in zip(sorted(iterator), sorted(original)):
assert isinstance(converted_value, type_)
conv_value = conv(value)
# check if converted values are the same or both are nan
assert converted_value == conv_value or (converted_value != converted_value and conv_value != conv_value)
@pytest.mark.unit
@pytest.mark.parametrize("EPSILON", [0.1])
@given(value=numpy_value(xps.floating_dtypes(), allow_infinity=False, allow_nan=False))
def test_epsilon_on_fp_conv(value, EPSILON):
converted_value = conv(value)
assert value - converted_value < EPSILON
assert converted_value - value < EPSILON
@pytest.mark.unit
@given(
value_and_type=st.one_of(
value_with_type(
elements=numpy_value(xps.floating_dtypes(), allow_infinity=False, allow_nan=False),
expected_type=float,
),
value_with_type(
elements=numpy_value(xps.integer_dtypes(), allow_infinity=False, allow_nan=False),
expected_type=int,
),
value_with_type(
elements=st.datetimes(),
expected_type=int,
),
),
)
def test_conv(value_and_type):
value, expected_type = value_and_type
converted_value = conv(value)
assert isinstance(converted_value, expected_type)
@pytest.mark.unit
@pytest.mark.parametrize(
"sysmax, maxint",
[
(numpy.int64(sys.maxsize), numpy.int64(9223372036854775807)),
],
)
def test_maxint_conv(sysmax, maxint):
# Robustness
assert conv(sysmax) == maxint
@pytest.mark.unit
@given(
value_and_types=st.one_of(
value_with_type(
st.integers(min_value=numpy.iinfo("int64").min, max_value=numpy.iinfo("int64").max),
int,
xps.integer_dtypes(endianness="=", sizes=(64,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int32").min, max_value=numpy.iinfo("int32").max),
int,
xps.integer_dtypes(endianness="=", sizes=(32,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int16").min, max_value=numpy.iinfo("int16").max),
int,
xps.integer_dtypes(endianness="=", sizes=(16,)),
),
value_with_type(
st.integers(min_value=numpy.iinfo("int8").min, max_value=numpy.iinfo("int8").max),
int,
xps.integer_dtypes(endianness="=", sizes=(8,)),
),
value_with_type(st.floats(width=16), float, xps.floating_dtypes(endianness="=", sizes=(16,))),
value_with_type(st.floats(width=32), float, xps.floating_dtypes(endianness="=", sizes=(32,))),
value_with_type(st.floats(width=64), float, xps.floating_dtypes(endianness="=", sizes=(64,))),
),
)
def test_reverse_conv(value_and_types):
value, current_type, data_type = value_and_types
# verify if the current data type is as expected (int or float)
assert isinstance(value, current_type)
# convert value to given data type (int64, int32, float64 etc .. )
converted_value = reverse_conv(data_type, value)
# check if conversion is performed according to given data (int -> numpy.int64, float -> numpy.float64)
assert numpy.issubdtype(type(converted_value), data_type)
# check if converted data type is changed and not match with old one
assert type(converted_value) != current_type
| 37.873239 | 119 | 0.676088 | 0 | 0 | 0 | 0 | 10,338 | 0.961138 | 0 | 0 | 2,487 | 0.23122 |
f63a76556115387c030b8e4755467f5a63c5c6d2 | 1,227 | py | Python | src/controller/src/depth_hold.py | Fzeak/sauvc-2019 | 573dcb351d0f87f9b7605667c570a5003bedb224 | [
"MIT"
] | null | null | null | src/controller/src/depth_hold.py | Fzeak/sauvc-2019 | 573dcb351d0f87f9b7605667c570a5003bedb224 | [
"MIT"
] | null | null | null | src/controller/src/depth_hold.py | Fzeak/sauvc-2019 | 573dcb351d0f87f9b7605667c570a5003bedb224 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msgs import UInt16, Float32, String
from mavros_msgs.msg import Mavlink
from struct import pack, unpack
def listener():
rospy.init_node('depth_listener', anonymous=True)
pub_pressure = rospy.Publisher("depth_listener/pressure_diff", Float32, queue_size=10)
pub_throttle = rospy.Publisher("BlueRov2/rc_channel3/set_pwm", UInt16, queue_size=10)
rospy.Subscriber("/mavlink/from", Mavlink, callback)
rospy.spin()
def callback(data):
# msgid for SCALED_PRESSURE2 is 137 (Bar30 Sensor)
# msgid for SCALED_PRESSURE is 29 (Pixhawk internal pressure sensor)
# https://mavlink.io/en/messages/common.html
if data.msgid == 137:
rospy.loginfo(rospy.get_caller_id() + "Package: %s", data)
p = pack("QQ", *data.payload64)
time_boot_ms, press_abs, press_diff, temperature = unpack("Iffhxx", p)
# Pressure is in hectopascal (hPa)
# 100 hPa = 1.0197 m of H20
# 50 hPa = 0.5 m of H20
if press_diff > 70:
pub_throttle.publish(1550)
elif press_diff > 53:
pub_throttle.publish(1510)
elif press_diff < 30:
pub_throttle.publish(1450)
elif press_diff < 47:
pub_throttle.publish(1490)
pub_pressure.publish(press_diff)
if __name__ == "__main__":
listener()
| 32.289474 | 87 | 0.740831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.320293 |
f63af088aba40e29ad224e5c65268768345c6e09 | 6,354 | py | Python | energy/lopy_serial_external_sd/LoPy4_sd/main.py | niclabs/water-monitoring | d18405e7536c985a400816263e80f73318e304b6 | [
"Apache-2.0"
] | 2 | 2020-05-21T03:47:12.000Z | 2021-02-01T16:48:01.000Z | energy/lopy_serial_external_sd/LoPy4_sd/main.py | niclabs/water-monitoring | d18405e7536c985a400816263e80f73318e304b6 | [
"Apache-2.0"
] | 9 | 2021-03-19T16:09:25.000Z | 2021-05-24T22:55:16.000Z | energy/lopy_serial_external_sd/LoPy4_sd/main.py | niclabs/water-monitoring | d18405e7536c985a400816263e80f73318e304b6 | [
"Apache-2.0"
] | 1 | 2021-01-13T14:45:14.000Z | 2021-01-13T14:45:14.000Z | def get_lorawan_maximum_payload_size(dr):
mac_payload_size_dic = {'0':59, '1':59, '2':59, '3':123, '4':230, '5':230, '6':230}
fhdr_size = 7 #in bytes. Assuming that FOpts length is zero
fport_size = 1 #in bytes
frm_payload_size = mac_payload_size_dic.get(str(dr)) - fhdr_size - fport_size
return frm_payload_size
def get_data_pointers(buff):
block_size = 51 # include 2 bytes for Checksum
n_block = math.ceil(len(buff)/block_size)
ptrs = list()
for i in range(n_block):
ptr_start = block_size*i
if i == n_block-1:
ptr_end = len(buff)-2
else:
ptr_end = block_size*(i+1)-2
#if check_fletcher16(buff[ptr_start:ptr_end+2]):
if mula(buff[ptr_start:ptr_end+2]):
print("LoPy4 - PROCESS - Checksum OK")
ptrs.append((ptr_start,ptr_end))
else:
print("LoPy4 - PROCESS - Checksum Error")
print(ptrs)
return ptrs
def mula(buff):
return True
def get_file_size(filename):
# https://docs.pycom.io/firmwareapi/micropython/uos/
# Returns file size in bytes
return os.stat(filename)[6]
def get_free_space(path):
# Returns available space in KiB
return os.getfree(path)
def file_exists_in_folder(filename, folder):
lowercase_ls = list(map(lambda x: x.lower(), os.listdir(folder)))
return filename.lower() in lowercase_ls
def save_text_file_sd(readings: list, index: int):
'''
Usage:
file_index = save_text_file_sd(decoded_data, file_index)
'''
if get_free_space(SD_MOUNT_POINT) < 2:
print("LoPy4 - SD - Too litle space available!")
filename = ('data%d' % index) + '.csv'
filepath = SD_MOUNT_POINT + '/' + filename
if file_exists_in_folder(filename, SD_MOUNT_POINT) and get_file_size(filename) > MAX_FILE_SIZE:
index += 1
filepath = SD_MOUNT_POINT + ('/data%d' % index) + '.csv'
print("LoPy4 - SD - Writing in", filepath)
with open(filepath, 'a') as f:
for reading in readings:
row = '%d;%d;%.1f\n' % reading
f.write(row)
f.flush()
os.sync()
return index
print("LoPy4 - Initializing communication values...")
m_dr = DATA_RATE
print("LoPy4 - LoRaWAN - Initial Data Rate: ", m_dr)
lorawan_mtu = get_lorawan_maximum_payload_size(m_dr)
print("LoPy4 - LoRaWAN - Initial Maximum Payload Size: %d bytes" % lorawan_mtu)
print("LoPy4 - Starting Loop...")
a = 1
while True:
print("*****************************")
print("LoPy4 - SERIAL_RX - Cleaning Serial Rx buffer")
n_bytes = uart.any()
buff = uart.read(n_bytes)
print("LoPy4 - SERIAL_RX - num bytes recv and dropped: %d" % n_bytes)
print("LoPy4 - Loop %d" % a)
a=a+1
print("LoPy4 - Going to sleep...")
time.sleep(3)
machine.sleep(50 * 1000) # 50 s
print("Wake up...")
# Se lee el mensaje serial proveniente desde el Arduino
start = time.ticks_ms()
recv_bytes = bytes()
n_bytes = 0
recv_len = -1
while True:
if time.ticks_diff(time.ticks_ms(), start) > 3000:
break
n_bytes = uart.any()
if n_bytes != 0:
recv_bytes = recv_bytes + uart.read(n_bytes)
recv_len = len(recv_bytes)
print("LoPy4 - SERIAL_RX - num bytes recv: %d" % n_bytes)
print("LoPy4 - SERIAL_RX - bytes recv: %s" % ubinascii.hexlify(recv_bytes))
print("LoPy4 - SERIAL_RX - total recv_bytes: %d" % recv_len)
if(recv_len != 0):
print("LoPy4 - SERIAL_RX - Dropping 1st byte (Dummy RX)")
recv_bytes = recv_bytes[1:] # Ahora sí son el mismo mensaje
print("LoPy4 - SERIAL_RX - actual recv msg (%d bytes): %s" % (recv_len, ubinascii.hexlify(recv_bytes)))
print("LoPy4 - DECODED : ", end='')
decoded_data = decode_payload(recv_bytes)
print(decoded_data)
file_index = save_text_file_sd(decoded_data, file_index)
# se obtiene el tamaño máximo disponible para un mensaje LoRaWAN
print("LoPy4 - LoRaWAN - Data Rate: ", m_dr)
lorawan_mtu = get_lorawan_maximum_payload_size(m_dr)
print("LoPy4 - LoRaWAN - Maximum Payload Size: %d bytes" % lorawan_mtu)
ptrs = get_data_pointers(recv_bytes)
block_size = 49 # bytes
blocks_per_mtu = lorawan_mtu//block_size # n° blocks per a LoRaWAN message
n_lorawan_messages = math.ceil(len(ptrs)/blocks_per_mtu)
n_blocks_in_full_lorawan_msg = blocks_per_mtu * (len(ptrs)//blocks_per_mtu)
print("LoPy4 - LoRaWAN - The current LoRaWAN MTU supports %d blocks" % blocks_per_mtu)
print("LoPy4 - LoRaWAN - The %d blocks are sent in multiple (%d) LoRaWAN messages" % (len(ptrs),n_lorawan_messages))
print("LoPy4 - LoRaWAN - blocks_per_mtu: %d" % blocks_per_mtu)
print("LoPy4 - LoRaWAN - len(ptrs): %d" % len(ptrs))
print("LoPy4 - LoRaWAN - n_blocks_in_full_lorawan_msg: %d" % n_blocks_in_full_lorawan_msg)
i = 0
while i < n_blocks_in_full_lorawan_msg:
aux = bytearray()
for _ in range(blocks_per_mtu):
aux += recv_bytes[ptrs[i][0]:ptrs[i][1]]
i += 1
try:
s.setblocking(True)
print("LoPy4 - LoRaWAN - Sending %d bytes" % len(aux))
s.send(aux)
s.setblocking(False)
except OSError as e:
if e.args[0] == 11:
s.setblocking(False)
print("LoPy4 - LoRaWAN_ERROR - It can probably be a duty cycle problem")
if n_blocks_in_full_lorawan_msg != len(ptrs):
aux = bytearray()
for i in range(n_blocks_in_full_lorawan_msg, len(ptrs)):
aux += recv_bytes[ptrs[i][0]:ptrs[i][1]]
try:
s.setblocking(True)
print("LoPy4 - LoRaWAN - Sending %d bytes" % len(aux))
s.send(aux)
s.setblocking(False)
except OSError as e:
if e.args[0] == 11:
s.setblocking(False)
print("LoPy4 - LoRaWAN_ERROR - It can probably be a duty cycle problem")
| 37.597633 | 125 | 0.586245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,885 | 0.296477 |
f63c2ee035cc31867910d8a464ee16f8c9b19ab6 | 741 | py | Python | forms_app/views.py | cs-fullstack-fall-2018/django-forms2-bachmanryan | e0318516f86afe99e991e6796b5e66d8b3e7a728 | [
"Apache-2.0"
] | null | null | null | forms_app/views.py | cs-fullstack-fall-2018/django-forms2-bachmanryan | e0318516f86afe99e991e6796b5e66d8b3e7a728 | [
"Apache-2.0"
] | null | null | null | forms_app/views.py | cs-fullstack-fall-2018/django-forms2-bachmanryan | e0318516f86afe99e991e6796b5e66d8b3e7a728 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render, redirect
from .models import FormModel
from datetime import datetime
def index(request):
form_list = FormModel.objects.all()
context = {'form_list': form_list}
return render(request, 'forms_app/index.html', context)
def post_new(request):
if request.method == "POST":
form = FormModel(request.POST)
if form.is_valid():
post = form.save()
post.name = request.user
post.recipe = request.user
post.timeCook = request.user
post.dateCreated = datetime
post.save()
return redirect('index', pk=post.pk)
else:
form = FormModel()
return render(request, 'forms_app/index.html')
| 28.5 | 59 | 0.626181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.091768 |
f63c8b35a69bb35bff0b3baecdbc71a71d1a0a8f | 342 | py | Python | figurefirst/__init__.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | 1 | 2020-05-07T01:17:51.000Z | 2020-05-07T01:17:51.000Z | figurefirst/__init__.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | 1 | 2020-05-07T01:25:27.000Z | 2020-05-28T08:10:19.000Z | figurefirst/__init__.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | null | null | null | from . import svg_to_axes
#reload(svg_to_axes)
from . import mpl_functions
from .svg_to_axes import FigureLayout
from . import mpl_fig_to_figurefirst_svg
from . import svg_util
from . import deprecated_regenerate
import sys
if sys.version_info[0] > 2: # regenerate uses importlib.utils, which requires python 3?
from . import regenerate
| 28.5 | 87 | 0.804094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.230994 |
f63eceb66bf981ddf69e6d539baa4da5ebac9226 | 3,152 | py | Python | dedsecuritybot.py | dedsecurity/DedSecuritySearch | 294e2e1ed9254499fecc3db7b419f283027361fa | [
"MIT"
] | null | null | null | dedsecuritybot.py | dedsecurity/DedSecuritySearch | 294e2e1ed9254499fecc3db7b419f283027361fa | [
"MIT"
] | null | null | null | dedsecuritybot.py | dedsecurity/DedSecuritySearch | 294e2e1ed9254499fecc3db7b419f283027361fa | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import pandas as pd
import json
import nltk
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import Input, Embedding, LSTM , Dense,GlobalMaxPooling1D,Flatten
from tensorflow.keras.preprocessing.sequence import pad_sequences
from transformers import BertModel, BertTokenizer
from tensorflow.keras.models import Model
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import string
import torch
import random
with open('./links.json') as content:
datali = json.load(content)
tags = []
inputs = []
responses={}
for intent in datali['intents']:
responses[intent['tag']]=intent['responses']
for lines in intent['input']:
inputs.append(lines)
tags.append(intent['tag'])
data = pd.DataFrame({"inputs":inputs,
"tags":tags})
data
data = data.sample(frac=1)
data['inputs'] = data['inputs'].apply(lambda wrd:[ltrs.lower() for ltrs in wrd if ltrs not in string.punctuation])
data['inputs'] = data['inputs'].apply(lambda wrd: ''.join(wrd))
data
tokenizer = Tokenizer(num_words=2000)
tokenizer.fit_on_texts(data['inputs'])
train = tokenizer.texts_to_sequences(data['inputs'])
x_train = pad_sequences(train)
le = LabelEncoder()
y_train = le.fit_transform(data['tags'])
input_shape = x_train.shape[1]
print(input_shape)
vocabulary = len(tokenizer.word_index)
print("number of unique words : ",vocabulary)
output_length = le.classes_.shape[0]
print("output length: ",output_length)
i = Input(shape=(input_shape,))
x = Embedding(vocabulary+1,10)(i)
x = LSTM(10,return_sequences=True)(x)
x = Flatten()(x)
x = Dense(output_length,activation="softmax")(x)
model = Model(i,x)
model.compile(loss="sparse_categorical_crossentropy",optimizer='adam',metrics=['accuracy'])
train = model.fit(x_train,y_train,epochs=300)
model1 = BertModel.from_pretrained('bert-base-uncased')
tokenizer1 = BertTokenizer.from_pretrained('bert-base-uncased')
while True:
import random
print("\033[32mDed Security Search \033[m")
texts_p = []
prediction_input = input('\033[36mSearch: \033[m')
tokens = tokenizer1.tokenize(prediction_input)
tokens = ['[CLS]'] + tokens + ['[SEP]']
tokens = tokens + ['[PAD]'] + ['[PAD]']
attention_mask = [1 if i!= '[PAD]' else 0 for i in tokens]
token_ids = tokenizer1.convert_tokens_to_ids(tokens)
token_ids = torch.tensor(token_ids).unsqueeze(0)
attention_mask = torch.tensor(attention_mask).unsqueeze(0)
hidden_rep, cls_head = model1(token_ids, attention_mask = attention_mask)
prediction_input = [letters.lower() for letters in prediction_input if letters not in string.punctuation]
prediction_input = ''.join(prediction_input)
texts_p.append(prediction_input)
prediction_input = tokenizer.texts_to_sequences(texts_p)
prediction_input = np.array(prediction_input).reshape(-1)
prediction_input = pad_sequences([prediction_input],input_shape)
output = model.predict(prediction_input)
output = output.argmax()
response_tag = le.inverse_transform([output])[0]
print(random.choice(responses[response_tag])) | 30.901961 | 114 | 0.745876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.126586 |
f641186c18365ae96c79dd45058d2e7e2b96430d | 5,069 | py | Python | tests/html/inline_builder_test.py | a-pompom/Python-markdownParser | 2525ba9c25dc3f77fb70af4ba9af423114940077 | [
"MIT"
] | null | null | null | tests/html/inline_builder_test.py | a-pompom/Python-markdownParser | 2525ba9c25dc3f77fb70af4ba9af423114940077 | [
"MIT"
] | 29 | 2021-07-25T02:06:16.000Z | 2022-03-20T10:55:16.000Z | tests/html/inline_builder_test.py | a-pompom/Python-markdownParser | 2525ba9c25dc3f77fb70af4ba9af423114940077 | [
"MIT"
] | null | null | null | import pytest
from app.html.inline_builder import InlineBuilder, LinkBuilder, CodeBuilder, ImageBuilder
from app.markdown.inline_parser import InlineParser, LinkParser, CodeParser, ImageParser
from app.settings import setting
class TestInlineBuilder:
""" Inline要素からHTML文字列が得られるか検証 """
# HTML組み立て
@pytest.mark.parametrize(
('inline_text', 'expected'),
[
(
'plain text',
'plain text'
),
(
'[参考リンク](https://docs.python.org/3/)',
f'<a href="https://docs.python.org/3/" class="{setting["class_name"]["a"]}">参考リンク</a>'
),
(
'',
'<img src="image.png" alt="awesome image">'
),
(
'`DependencyInjection`',
f'<code class="{setting["class_name"]["code"]}">DependencyInjection</code>'
),
],
ids=['plain', 'link', 'image', 'code'])
def test_build(self, inline_text: str, expected: str):
# GIVEN
sut = InlineBuilder()
inline = InlineParser().parse(inline_text)[0]
# WHEN
actual = sut.build(inline)
# THEN
assert actual == expected
class TestLinkBuilder:
""" LinkInline要素からaタグと対応するHTML文字列が得られるか検証 """
# 対象判定
@pytest.mark.parametrize(
('inline_text', 'expected'),
[
('[this is a link](url)', True),
('plain text', False),
('[参考リンク](https://www.google.com/)', True)
],
ids=['target', 'not target', 'normal link'])
def test_target(self, inline_text: str, expected: bool):
# GIVEN
sut = LinkBuilder()
inline = InlineParser().parse(inline_text)[0]
# WHEN
actual = sut.is_target(inline)
# THEN
assert actual == expected
# HTML組み立て
@pytest.mark.parametrize(
('inline_text', 'expected'),
[
(
'[this is a link](url)',
f'<a href="url" class="{setting["class_name"]["a"]}">this is a link</a>'
),
(
'[参考リンク](https://www.google.com/)',
f'<a href="https://www.google.com/" class="{setting["class_name"]["a"]}">参考リンク</a>'
)
],
ids=['url', 'google'])
def test_build(self, inline_text: str, expected: str):
# GIVEN
sut = LinkBuilder()
inline = LinkParser().parse(inline_text)
# WHEN
actual = sut.build(inline)
# THEN
assert actual == expected
class TestCodeBuilder:
""" CodeInline要素からcodeタグと対応するHTML文字列が得られるか検証 """
# 対象判定
@pytest.mark.parametrize(
('inline_text', 'expected'), [
('`#`', True),
('[this is a link](url)', False),
],
ids=['target', 'not target'])
def test_target(self, inline_text: str, expected: bool):
# GIVEN
sut = CodeBuilder()
inline = InlineParser().parse(inline_text)[0]
# WHEN
actual = sut.is_target(inline)
# THEN
assert actual == expected
# HTML組み立て
@pytest.mark.parametrize(
('inline_text', 'expected'), [
(
'`plain text`',
f'<code class="{setting["class_name"]["code"]}">plain text</code>'
),
(
'`codeタグ`',
f'<code class="{setting["class_name"]["code"]}">codeタグ</code>'
),
],
ids=['plain', 'full width'])
def test_build(self, inline_text: str, expected: str):
# GIVEN
sut = CodeBuilder()
inline = CodeParser().parse(inline_text)
# WHEN
actual = sut.build(inline)
# THEN
assert actual == expected
class TestImageBuilder:
""" ImageInline要素からimgタグと対応するHTML文字列が得られるか検証 """
# 対象判定
@pytest.mark.parametrize(
('inline_text', 'expected'),
[
('', True),
('`code text`', False),
],
ids=['target', 'not target'])
def test_target(self, inline_text: str, expected: bool):
# GIVEN
sut = ImageBuilder()
inline = InlineParser().parse(inline_text)[0]
# WHEN
actual = sut.is_target(inline)
# THEN
assert actual == expected
# HTML組み立て
@pytest.mark.parametrize(
('inline_text', 'expected'),
[
(
'',
'<img src="images/dog.png" alt="わんこ">'
),
(
'',
'<img src="http://localhost/image.png" alt="画像">'
),
],
ids=['path_expression', 'url_expression'])
def test_build(self, inline_text, expected: str):
# GIVEN
sut = ImageBuilder()
inline = ImageParser().parse(inline_text)
# WHEN
actual = sut.build(inline)
# THEN
assert actual == expected
| 29.300578 | 102 | 0.50582 | 5,126 | 0.955452 | 0 | 0 | 4,488 | 0.836533 | 0 | 0 | 1,944 | 0.362349 |
f6414d22c3496a2fc287bc2eb9d1d63bb35b880e | 5,728 | py | Python | lab8/src/main.py | YaelBenShalom/Intro-to-AI | 37df1fc9316544338b8acfa5264316c4d5ce5915 | [
"MIT"
] | null | null | null | lab8/src/main.py | YaelBenShalom/Intro-to-AI | 37df1fc9316544338b8acfa5264316c4d5ce5915 | [
"MIT"
] | null | null | null | lab8/src/main.py | YaelBenShalom/Intro-to-AI | 37df1fc9316544338b8acfa5264316c4d5ce5915 | [
"MIT"
] | null | null | null | import common
import student_code
import array
class bcolors:
RED = "\x1b[31m"
GREEN = "\x1b[32m"
NORMAL = "\x1b[0m"
def read_data(training_data, test_data1, gold_data1, filename):
data = array.array('f')
test = array.array('f')
with open(filename, 'rb') as fd:
data.fromfile(fd, common.constants.TRAINING_SIZE *
(common.constants.DATA_DIM+1))
test.fromfile(fd, common.constants.TEST_SIZE *
(common.constants.DATA_DIM+1))
for i in range(common.constants.TRAINING_SIZE):
for j in range(common.constants.DATA_DIM+1):
training_data[i][j] = data[i*(common.constants.DATA_DIM+1)+j]
for i in range(common.constants.TEST_SIZE):
for j in range(common.constants.DATA_DIM):
test_data1[i][j] = test[i*(common.constants.DATA_DIM+1)+j]
test_data1[i][common.constants.DATA_DIM] = -1
gold_data1[i] = test[i *
(common.constants.DATA_DIM+1)+common.constants.DATA_DIM]
f = open(filename.split('.dat')[0] + '.csv', 'w')
for i in training_data:
f.write(str(i[0]) + ',' + str(i[1]) + ',' + str(i[2]) + '\n')
for i in range(len(test_data1)):
f.write(str(test_data1[i][0]) + ',' + str(test_data1[i]
[1]) + ',' + str(gold_data1[i]) + '\n')
f.close()
def read_data_csv(training_data, test_data1, gold_data1, filename):
data = []
test = []
with open(filename, 'r') as fd:
lines = fd.readlines()
for i in range(common.constants.TRAINING_SIZE + common.constants.TEST_SIZE):
if i < common.constants.TRAINING_SIZE:
data += [float(j) for j in lines[i].strip().split(',')]
else:
test += [float(j) for j in lines[i].strip().split(',')]
for i in range(common.constants.TRAINING_SIZE):
for j in range(common.constants.DATA_DIM+1):
training_data[i][j] = data[i*(common.constants.DATA_DIM+1)+j]
for i in range(common.constants.TEST_SIZE):
for j in range(common.constants.DATA_DIM):
test_data1[i][j] = test[i*(common.constants.DATA_DIM+1)+j]
test_data1[i][common.constants.DATA_DIM] = -1
gold_data1[i] = test[i *
(common.constants.DATA_DIM+1)+common.constants.DATA_DIM]
def run_experiment1(filename):
gold_data = [0 for x in range(common.constants.TEST_SIZE)]
test_data = [[0, 0, 0] for x in range(common.constants.TEST_SIZE)]
training_data = [[0, 0, 0] for x in range(common.constants.TRAINING_SIZE)]
# generating data should be hidden from students!
read_data_csv(training_data, test_data, gold_data, filename)
# this is one of the two student functions
# print (training_data)#, test_data, gold_data)
student_code.part_one_classifier(training_data, test_data)
# part 1 grading
error = 0
for i in range(common.constants.TEST_SIZE):
if(test_data[i][common.constants.DATA_DIM] != gold_data[i]):
error += 1
print("Incorrect classification is "+str(error) +
" out of " + str(common.constants.TEST_SIZE))
success = True
if (error <= float(common.constants.TEST_SIZE)*.05):
print("(" + bcolors.GREEN + "SUCCESS" + bcolors.NORMAL + ")")
else:
success = False
print("(" + bcolors.RED + "FAIL" + bcolors.NORMAL + ") maximum " +
str(float(common.constants.TEST_SIZE)*.05))
print
return success
def run_experiment2(filename):
gold_data = [0 for x in range(common.constants.TEST_SIZE)]
test_data = [[0, 0, 0] for x in range(common.constants.TEST_SIZE)]
training_data = [[0, 0, 0] for x in range(common.constants.TRAINING_SIZE)]
# generating data should be hidden from students!
read_data_csv(training_data, test_data, gold_data, filename)
# this is one of the two student functions
student_code.part_two_classifier(training_data, test_data)
# part 2 grading
error = 0
for i in range(common.constants.TEST_SIZE):
if(test_data[i][common.constants.DATA_DIM] != gold_data[i]):
# print("error is data", test_data[i])
error += 1
print("Incorrect classification is "+str(error) +
" out of " + str(common.constants.TEST_SIZE))
success = True
if (error <= float(common.constants.TEST_SIZE)*.05):
print("(" + bcolors.GREEN + "SUCCESS" + bcolors.NORMAL + ")")
else:
success = False
print("(" + bcolors.RED + "FAIL" + bcolors.NORMAL + ") maximum " +
str(float(common.constants.TEST_SIZE)*.05))
print
return success
all_passed = True
filename1 = "../data1.csv"
print("Linear Classifier : Dataset 1")
all_passed = run_experiment1(filename1) and all_passed
filename2 = "../data2.csv"
print("Linear Classifier : Dataset 2")
all_passed = run_experiment1(filename2) and all_passed
filename3 = "../data3.csv"
print("Linear Classifier : Dataset 3")
all_passed = run_experiment1(filename3) and all_passed
filename4 = "../data4.csv"
print("Linear Classifier : Dataset 4")
all_passed = run_experiment1(filename4) and all_passed
filename5 = "../datar1.csv"
print("Accelerometer : Dataset 1")
all_passed = run_experiment2(filename5) and all_passed
filename6 = "../datar2.csv"
print("Accelerometer : Dataset 2")
all_passed = run_experiment2(filename6) and all_passed
filename7 = "../datar3.csv"
print("Accelerometer : Dataset 3")
all_passed = run_experiment2(filename7) and all_passed
if all_passed:
exit(0)
else:
exit(1)
| 36.025157 | 86 | 0.622905 | 84 | 0.014665 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.148045 |
f64190e807ca83a7c40b302396ec9aa0416a19ee | 7,670 | py | Python | webapp/api/Model/basic.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | 3 | 2020-05-16T19:24:42.000Z | 2020-09-26T17:05:19.000Z | webapp/api/Model/basic.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | 30 | 2020-05-16T16:01:17.000Z | 2020-10-08T23:05:08.000Z | webapp/api/Model/basic.py | SCcagg5/My_Youtube | 6234b655ceb18a85d9bdd2c5837ce625c495d07b | [
"MIT"
] | null | null | null | from bottle import request, response, HTTPResponse
import os, datetime, re
import json as JSON
import jwt
class auth:
def gettoken(mypass):
secret = str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
password = str(os.getenv('API_PASS', 'password'))
if mypass == password:
exp = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
ret = jwt.encode({'exp': exp, 'password': hash(password + secret)}, secret).decode('utf-8')
return [True, {'exp': str(exp), "token": str(ret)}, None, {"token": str(ret)}]
return [False, "Invalid password", 403]
def verify(token):
secret = str(os.getenv('API_SCRT', '!@ws4RT4ws212@#%'))
password = str(os.getenv('API_PASS', 'password'))
try:
decoded = jwt.decode(token, secret, leeway=10, algorithms=['HS256'])
if decoded["password"] != hash(password + secret):
raise
except jwt.ExpiredSignature:
return [False, "Signature expired", 403]
except:
return [False, "Invalid token", 403]
return [True, None, None]
class ret:
def __init__(self, route = None, params=None, header = None, cookie = None, anon = None) :
self.data = {
'queryInfos' : {
'route': route,
'params': params,
'header': header,
'cookie': cookie
},
'status' : 200,
'error' : None,
'data' : None,
'succes' : False,
'mod' : None
}
self.err = False
self.anon = anon
def get(self):
return self.data
def ret(self):
self.__anon()
self.data['mod'] = self.anon
if self.data['error'] is None :
self.data['succes'] = True
self.data['status'] = 200
return self.data
def __anon(self):
level = self.__getannon()
if level == 0 :
return
if level == 2 :
if "queryInfos" in self.data:
del self.data["queryInfos"]
return
forb = ["content-type", "connection", "x-real-ip", "x-forwarded-for",
"x-forwarded-proto", "x-forwarded-ssl", "x-forwarded-port",
"user-agent", "accept", "cache-control", "accept-encoding",
"cookie", "content-length"]
for i in self.data["queryInfos"]:
if i is None:
continue
for i2 in forb:
if self.data["queryInfos"][i] is None or i2 not in self.data["queryInfos"][i]:
continue
del self.data["queryInfos"][i][i2]
def add_error(self, error = None, code = None):
self.data['error'] = error
self.data['status'] = code
self.data['data'] = None
if code is None:
self.add_error("Bad code input", 500)
return 1
if error is None:
self.add_error("Bad error input", 500)
return 1
self.err = True
return self.ret()
def add_data(self, data = None):
self.data['data'] = data
self.set_code(200)
if data is None:
self.add_error("Bad data input", 500)
return 1
return 0
def set_code(self, code = None):
self.data['status'] = code
if code is None:
self.add_error("Bad code input", 500)
return 1
return 0
def __getannon(self):
self.anon = self.anon if self.anon is not None else str(os.getenv('API_MOD', 'PROD'))
return self.__anon_to_lvl(self.anon)
def __anon_to_lvl(self, anon = 'PROD'):
mod = {
"PROD": 2,
"DEV" : 1,
"TEST": 0
}
if anon in mod:
return mod[anon]
return 2
class check:
def contain(json, array, type = "body"):
type = type.upper()
if json is None:
return [False, "Invalid json received ", 400]
for i in array:
if isinstance(i, list):
if not check.contain_opt(json, i):
return [False, "[" + type +"] Missing on of parameters: " + JSON.dumps(i), 400]
json = check.setnoneopt(json, i)
elif i not in json:
return [False, "[" + type +"] Missing parameter : " + i, 400]
elif json[i] is None:
return [False, "[" + type +"] Null parameter : " + i, 400]
return [True, json, 200]
def contain_opt(json, arr_opt):
for i in arr_opt:
if isinstance(i, list):
if check.contain(json, i):
return True
elif i in json:
return True
return False
def setnoneopt(json, arr_opt):
for i in arr_opt:
if i not in json:
json[i] = None
return json
def json(request):
res = {}
#try:
res = request.json
#except:
# pass
return res
def head_json(request, cookie = None):
res = {}
try:
for i in cookie:
res[i.lower()] = cookie[i]
for i in request.headers.keys():
res[i.lower()] = request.headers.raw(i)
except:
pass
return res
def cookies_json(request):
res = {}
try:
cookie = request.headers.raw("Cookie")
for i in cookie.split(";"):
i = i.split("=")
res[i[0].strip()] = i[1]
except:
pass
return res
def route_json(request):
res = {}
dat = request.path[1:].split('/')
i = 0
while i < len(dat) - 1:
res[str(dat[i])] = str(dat[i + 1])
i += 1
return res
class callnext:
def __init__(self, req, resp = None, err = None, anonlvl = None):
self.pr = check.json(req)
self.ck = check.cookies_json(req)
self.hd = check.head_json(req, self.ck)
self.rt = check.route_json(req)
self.get = dict(req.query.decode())
self.private = {}
self.cookie = {}
self.toret = ret(req.path, self.pr, self.hd, self.ck, anonlvl)
self.req = req
self.resp = resp
self.err = err
def call(self, nextc):
if self.req.method == 'OPTIONS':
return {}
if len(nextc) == 0:
return self.ret()
return nextc[0](self, nextc)
def call_next(self, nextc, err = [True]):
if not err[0]:
self.resp.status = err[2]
return self.toret.add_error(err[1], err[2])
nextc.pop(0)
if len(nextc) == 0:
if len(err) >= 4 and err[3] is not None:
self.__merge_cookie(err[3])
self.toret.add_data(err[1])
return self.ret()
return nextc[0](self, nextc)
def ret(self):
if self.resp is not None:
for cookie in self.cookie:
self.resp.set_cookie(cookie, self.cookie[cookie], path='/')
self.resp.content_type = 'application/json'
self.resp.status = self.toret.data['status']
return self.toret.ret()
return self.toret.ret()
def __merge_cookie(self, cookies):
self.cookie = merged = {**self.cookie, **cookies}
| 32.638298 | 104 | 0.484876 | 7,541 | 0.983181 | 0 | 0 | 0 | 0 | 0 | 0 | 899 | 0.11721 |
f6429da6a835c587318716d02c57de897093290c | 304 | py | Python | data_sniffer/urls.py | thefedoration/django-data-sniffer | c691dbf42e69d6190ab8fb4fe3bfc3cb646716f3 | [
"MIT"
] | null | null | null | data_sniffer/urls.py | thefedoration/django-data-sniffer | c691dbf42e69d6190ab8fb4fe3bfc3cb646716f3 | [
"MIT"
] | null | null | null | data_sniffer/urls.py | thefedoration/django-data-sniffer | c691dbf42e69d6190ab8fb4fe3bfc3cb646716f3 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.conf import settings
from .views import data_sniffer_health_check
if settings.DATA_SNIFFER_ENABLED:
urlpatterns = [
url(r'^(?P<key>[-\w]+)', data_sniffer_health_check, name="data_sniffer_health_check"),
]
else:
urlpatterns = []
| 25.333333 | 94 | 0.726974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.151316 |
f642c7514997e4f67f9ec1097b2c4722eab6d372 | 2,430 | py | Python | crtauth/client.py | spotify/crtauth | 5df8960f81f0c3a7ae3119b59221390707749cac | [
"Apache-2.0"
] | 90 | 2015-01-30T16:19:38.000Z | 2022-03-26T17:53:03.000Z | crtauth/client.py | spotify/crtauth | 5df8960f81f0c3a7ae3119b59221390707749cac | [
"Apache-2.0"
] | 14 | 2015-04-23T08:08:00.000Z | 2020-09-29T12:15:04.000Z | crtauth/client.py | spotify/crtauth | 5df8960f81f0c3a7ae3119b59221390707749cac | [
"Apache-2.0"
] | 20 | 2015-01-25T07:02:18.000Z | 2022-03-19T10:23:21.000Z | # Copyright (c) 2011-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import msgpack
from crtauth import ssh, protocol, msgpack_protocol, exceptions
def create_request(username):
"""
Create a request for a challenge with your username encoded.
:param username: the username of the user to authenticate as
:return: a request message
"""
buf = io.BytesIO()
msgpack.pack(1, buf)
msgpack.pack(ord('q'), buf)
msgpack.pack(username, buf)
return ssh.base64url_encode(buf.getvalue())
def create_response(challenge, server_name, signer_plug=None):
"""Called by a client with the challenge provided by the server
to generate a response using the local ssh-agent"""
b = ssh.base64url_decode(challenge)
if b[0] == 'v':
# this is version 0 challenge
hmac_challenge = protocol.VerifiablePayload.deserialize(b)
challenge = protocol.Challenge.deserialize(hmac_challenge.payload)
to_sign = hmac_challenge.payload
version_1 = False
elif b[0] == '\x01':
# version 1
challenge = msgpack_protocol.Challenge.deserialize(b)
to_sign = b
version_1 = True
else:
raise exceptions.ProtocolError("invalid first byte of challenge")
if challenge.server_name != server_name:
s = ("Possible MITM attack. Challenge originates from '%s' "
"and not '%s'" % (challenge.server_name, server_name))
raise exceptions.InvalidInputException(s)
if not signer_plug:
signer_plug = ssh.AgentSigner()
signature = signer_plug.sign(to_sign, challenge.fingerprint)
signer_plug.close()
if version_1:
response = msgpack_protocol.Response(challenge=b, signature=signature)
else:
response = protocol.Response(
signature=signature, hmac_challenge=hmac_challenge)
return ssh.base64url_encode(response.serialize())
| 33.75 | 78 | 0.702058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,015 | 0.417695 |
f643843dc611bf1e9914238b418113f9bed6e4cd | 836 | py | Python | editor/api/terminal/endpoints/terminal.py | dporr/skf-editor | 51b660d1356f283629a7f9e9121fa948830b5579 | [
"MIT"
] | 3 | 2021-08-23T16:32:21.000Z | 2021-09-04T02:40:59.000Z | editor/api/terminal/endpoints/terminal.py | dporr/skf-editor | 51b660d1356f283629a7f9e9121fa948830b5579 | [
"MIT"
] | null | null | null | editor/api/terminal/endpoints/terminal.py | dporr/skf-editor | 51b660d1356f283629a7f9e9121fa948830b5579 | [
"MIT"
] | 1 | 2021-07-08T13:40:01.000Z | 2021-07-08T13:40:01.000Z | from flask import request
from flask_restplus import Resource
from editor.api.terminal.business import get_terminal_output
from editor.api.terminal.serializers import terminal_response, terminal_cmd
from editor.api.restplus import api
ns = api.namespace('terminal', description='Operations related to terminal')
@ns.route('/command')
@api.doc(terminal_cmd)
@api.response(404, 'Validation error', terminal_response)
class TerminalCMD(Resource):
@api.expect(terminal_cmd)
@api.marshal_with(terminal_response)
@api.response(400, 'No results found', terminal_response)
def post(self):
"""
Returns a terminal command output.
* Privileges required: **none**
"""
data = request.json
cmd = data.get('command')
result = get_terminal_output(cmd)
return result, 200 | 34.833333 | 76 | 0.720096 | 419 | 0.501196 | 0 | 0 | 522 | 0.624402 | 0 | 0 | 195 | 0.233254 |
f64680efdbc765125c942cd42b9be9b6e892d55e | 400 | py | Python | mediumwave/migrations/0011_transmitter_iso.py | soundelec/mwradio | b1a7f6d8c24469c13c5336e292cc316980fdea2d | [
"BSD-3-Clause"
] | null | null | null | mediumwave/migrations/0011_transmitter_iso.py | soundelec/mwradio | b1a7f6d8c24469c13c5336e292cc316980fdea2d | [
"BSD-3-Clause"
] | null | null | null | mediumwave/migrations/0011_transmitter_iso.py | soundelec/mwradio | b1a7f6d8c24469c13c5336e292cc316980fdea2d | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.1.2 on 2018-10-19 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mediumwave', '0010_auto_20181017_1937'),
]
operations = [
migrations.AddField(
model_name='transmitter',
name='iso',
field=models.CharField(blank=True, max_length=3),
),
]
| 21.052632 | 61 | 0.605 | 307 | 0.7675 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.255 |
f647836ba9f9d26035e89229613150a256f9838c | 953 | py | Python | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/userReducer.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/userReducer.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | 3_MapReduce Programming on MovieLens Data/solution/MapReduce/code/userReducer.py | minakoyang/YY_Distributed_Cluster_Computing | 0284569e4a6d0312590acf9ead5cf23bbcd167c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import csv
import json
mostRatingUser = None
mostRatingCount = 0
mostRatingInfo = None
for line in sys.stdin:
line = line.strip()
user, genreString = line.split("\t", 1)
genreInfo = json.loads(genreString)
if not mostRatingUser or len(genreInfo) > mostRatingCount:
mostRatingUser = user
mostRatingCount = len(genreInfo)
mostRatingInfo = genreInfo
# print(mostRatingCount)
genreDict = {}
for genreList in mostRatingInfo:
for genre in genreList:
if genre not in genreDict:
genreDict[genre] = 0
genreDict[genre] += 1
mostRatedCount = 0
mostRatedGenre = None
for genre, count in genreDict.items():
if count > mostRatedCount:
mostRatedCount = count
mostRatedGenre = genre
print("%s -- Total Rating Counts: %d -- Most Rated Genre: %s - %d" % (mostRatingUser, mostRatingCount, mostRatedGenre, mostRatedCount)) | 25.078947 | 135 | 0.672613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.114376 |
f64788dcdb43c2e6980587df505c8707237a9135 | 2,901 | py | Python | src/MockingBirdOnlyForUse/logger.py | Diaosi1111/MockingBirdOnlyForUse | 243092621eff36c13418e2648b3909bed6b1ae2d | [
"MIT"
] | null | null | null | src/MockingBirdOnlyForUse/logger.py | Diaosi1111/MockingBirdOnlyForUse | 243092621eff36c13418e2648b3909bed6b1ae2d | [
"MIT"
] | null | null | null | src/MockingBirdOnlyForUse/logger.py | Diaosi1111/MockingBirdOnlyForUse | 243092621eff36c13418e2648b3909bed6b1ae2d | [
"MIT"
] | null | null | null | import logging
import os
import sys
import time
LOG_LEVEL = logging.INFO
OPEN_CONSOLE_LOG = True
OPEN_FILE_LOG = False
LOG_FILE_PATH = None
LOG_NAME = "null"
###############################################################################################################
# 初始化日志
def _create_logger(
level=LOG_LEVEL,
open_console=OPEN_CONSOLE_LOG,
open_file=OPEN_FILE_LOG,
path=LOG_FILE_PATH,
):
logger = logging.getLogger("MockingBird")
logger.setLevel(level)
formatter = logging.Formatter("[%(asctime)s][%(levelname)s]: %(message)s")
if open_console:
hterm = logging.StreamHandler()
hterm.setLevel(level)
hterm.setFormatter(formatter)
logger.addHandler(hterm)
if open_file:
if not os.path.exists(path):
os.mkdir(path)
hfile = logging.FileHandler(
path + "/" + time.strftime("%Y-%m-%d", time.localtime()) + ".log",
encoding="utf-8",
)
hfile.setLevel(level)
hfile.setFormatter(formatter)
logger.addHandler(hfile)
return logger
class Logger:
def __init__(
self,
level=LOG_LEVEL,
open_console=OPEN_CONSOLE_LOG,
open_file=OPEN_FILE_LOG,
path=LOG_FILE_PATH,
) -> None:
self.logger = _create_logger(level, open_console, open_file, path)
def debug(self, msg):
try:
msg = "[{}][{}][{}] {}".format(
os.path.basename(sys._getframe(1).f_code.co_filename),
sys._getframe(1).f_code.co_name,
sys._getframe(1).f_lineno,
msg,
)
except:
pass
self.logger.debug(msg)
return
def info(self, msg):
try:
msg = "[{}][{}][{}] {}".format(
os.path.basename(sys._getframe(1).f_code.co_filename),
sys._getframe(1).f_code.co_name,
sys._getframe(1).f_lineno,
msg,
)
except:
pass
self.logger.info(msg)
return
def error(self, msg):
try:
msg = "[{}][{}][{}] {}".format(
os.path.basename(sys._getframe(1).f_code.co_filename),
sys._getframe(1).f_code.co_name,
sys._getframe(1).f_lineno,
msg,
)
except:
pass
self.logger.error(msg)
return
def setlogger(
self,
level=LOG_LEVEL,
open_console=OPEN_CONSOLE_LOG,
open_file=OPEN_FILE_LOG,
path=LOG_FILE_PATH,
):
self.logger = _create_logger(level, open_console, open_file, path)
logger = Logger()
def creat_logger(
level=LOG_LEVEL,
open_console=OPEN_CONSOLE_LOG,
open_file=OPEN_FILE_LOG,
path=LOG_FILE_PATH,
):
global logger
logger.setlogger(level, open_console, open_file, path)
| 25.226087 | 111 | 0.541537 | 1,582 | 0.543456 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.091721 |
f647a1e251a956e51f190c8dadc2f437ea0fcb0e | 2,083 | py | Python | tools/GetImages.py | vicyangworld/WaterDispenserEye | 6b2191f99d85dfe89dc9ec4161959e8f908dd16f | [
"MIT"
] | null | null | null | tools/GetImages.py | vicyangworld/WaterDispenserEye | 6b2191f99d85dfe89dc9ec4161959e8f908dd16f | [
"MIT"
] | null | null | null | tools/GetImages.py | vicyangworld/WaterDispenserEye | 6b2191f99d85dfe89dc9ec4161959e8f908dd16f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import cv2
import sys
import os
# 定义旋转rotate函数
def rotate(image, angle, center=None, scale=1.0):
# 获取图像尺寸
(h, w) = image.shape[:2]
print(image.shape)
# 若未指定旋转中心,则将图像中心设为旋转中心
if center is None:
center = (w / 2, h / 2)
# 执行旋转
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# 返回旋转后的图像
return rotated
def GetImagesFromVideo(windowName, picNum, savePath, cameraID=0, videoInput="", bRotate=True,value=-90):
cv2.namedWindow(windowName)
if videoInput=="":
cap = cv2.VideoCapture(cameraID)
else:
cap = cv2.VideoCapture(videoInput) #读入视频文件
classfier = cv2.CascadeClassifier("/usr/local/lib/python3.6/dist-packages/cv2/data/haarcascade_frontalface_alt2.xml")
recgColor = (0, 255, 0)
count = 0
while cap.isOpened():
ok,frame = cap.read()
if not ok:
break
if bRotate:
frame = rotate(frame,value)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceRects = classfier.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3, minSize=(32,32))
if len(faceRects) > 0:
for faceRect in faceRects:
x,y,w,h = faceRect
imgName = os.path.join(savePath,"A"+str(count)+".jpg")
# imgName = savePath+"/"+str(count)+".jpg"
image = frame[y-15:y+h+15,x-15:x+w+15] #截取人脸图像
cv2.imwrite(imgName, image)#将图像写出到savePath
count = count + 1
if count > picNum:
break
cv2.rectangle(frame,(x-15,y-15),(x+w+15,y+h+15),recgColor, 3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,str(count)+"/"+str(picNum),(x+15,y+15),font,1,(255,0,255),3)
if count > picNum:
break
cv2.imshow(windowName, frame)
kb = cv2.waitKey(1)
if kb&0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
picNum = 2000
savePath = '../data/'
#video = "/home/yagnwk/Projects/linzhiling.mp4"
#两种方式:一种是从摄像头获取人脸
#一种是从视频获取人脸
GetImagesFromVideo("Get Images", picNum, savePath, cameraID=1, bRotate=False)
#GetImagesFromVideo("Get Images", picNum, savePath, videoInput=video,bRotate=False)
| 27.051948 | 118 | 0.670667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.276426 |
f6483079842b52a30927c51d2b4ba1bbe9984614 | 503 | py | Python | tests/test_padondehoy.py | kengru/Giru | 93f1b8dc6db35109a58a350218f7a69dcf2e788d | [
"MIT"
] | 3 | 2018-05-29T16:44:14.000Z | 2021-01-13T12:46:05.000Z | tests/test_padondehoy.py | kengru/Giru | 93f1b8dc6db35109a58a350218f7a69dcf2e788d | [
"MIT"
] | 52 | 2018-05-29T17:07:03.000Z | 2021-09-01T18:33:17.000Z | tests/test_padondehoy.py | kengru/Giru | 93f1b8dc6db35109a58a350218f7a69dcf2e788d | [
"MIT"
] | 2 | 2018-06-02T22:06:16.000Z | 2018-06-03T19:01:31.000Z | from unittest import TestCase
from giru.core.commands import PaDondeHoy
from tests.mocks import MockBot, MockUpdate
class TestPaDondeHoy(TestCase):
def test_catalogue_response_same_chat_same_day(self):
bot = MockBot()
update = MockUpdate()
PaDondeHoy(bot, update)
response_1 = bot.last_message[update.message.chat_id]
PaDondeHoy(bot, update)
response_2 = bot.last_message[update.message.chat_id]
self.assertEqual(response_1, response_2)
| 26.473684 | 61 | 0.723658 | 383 | 0.761431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f649717f99a11da8f0544eda17de8a09d6562732 | 2,395 | py | Python | ircConnection.py | mutexlox/NO-FIFTH-GLYPH | 4acb5b8e584093ae7faa9f03c86c0e8d686df8f9 | [
"MIT"
] | 1 | 2019-04-16T10:59:38.000Z | 2019-04-16T10:59:38.000Z | ircConnection.py | mutexlox/NO-FIFTH-GLYPH | 4acb5b8e584093ae7faa9f03c86c0e8d686df8f9 | [
"MIT"
] | null | null | null | ircConnection.py | mutexlox/NO-FIFTH-GLYPH | 4acb5b8e584093ae7faa9f03c86c0e8d686df8f9 | [
"MIT"
] | null | null | null | import socket
import select
import config
class IRCConnection:
def __init__(self, serverName, port=6667):
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((serverName, port))
self.connection.setblocking(0)
def sendMessage(self, toSend):
'''Helper function that sends the given
string as an IRC message.
'''
if not toSend.startswith("PONG"):
print toSend
self.connection.send(str(toSend) + "\r\n")
def receive(self):
'''Recieve 512 bytes from the connection (512 bytes == 1 message)
'''
# time out after a reasonable period of time so we revoice quickly
ready = select.select([self.connection], [], [], 0.2)
if ready[0]:
return str(self.connection.recv(512))
else:
return None
def setNick(self, nick):
'''Sets the nick to given string.
'''
self.sendMessage("NICK " + nick)
def setUser(self, userName, hostName, serverName, realName):
'''Set the user info as given.
'''
self.sendMessage("USER " + userName + " " +
hostName + " " +
serverName + " :" +
realName)
def authenticate(self, password):
'''Authenticate with NickServ with given password.
'''
self.sendMessage("PRIVMSG NickServ IDENTIFY " + password)
def setBot(self, nick):
'''Tell the server that we're a bot. (Note: This is network-dependent!)
'''
config.botIdentify(self, botNick=nick)
def reply(self, toSend, nick, chan, isPM):
sendTo = nick if isPM else chan
self.sendMessage("PRIVMSG " + sendTo + " :" + toSend)
def quit(self, quitMessage):
if quitMessage == "":
self.sendMessage("QUIT")
else:
self.sendMessage("QUIT :" + quitMessage)
def part(self, partMessage, chan):
if chan != "":
if partMessage == "":
self.sendMessage("PART " + chan)
else:
self.sendMessage("PART " + chan + " :" + partMessage)
def join(self, chan):
self.sendMessage("JOIN " + chan)
def close(self):
'''Close the connection.
'''
self.connection.close()
| 31.103896 | 79 | 0.548643 | 2,350 | 0.981211 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.259708 |
f64a19aff3d9a4d44aac824d38c6aaef4eeb9f77 | 99 | py | Python | lesson-08/test_m.py | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | 1 | 2020-05-15T09:41:14.000Z | 2020-05-15T09:41:14.000Z | lesson-08/test_m.py | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | null | null | null | lesson-08/test_m.py | rafaelmartinsbuck/ai-for-trading | 51234e408c94ccdeee9b06301a2f63bd170243e3 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
df = pd.DataFrame({"A":[1,2,3,4], "B":[5,6,7,8]})
print(df) | 19.8 | 49 | 0.616162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.060606 |
f64b72c278e266e29f2b5f8013262577a44a9069 | 1,314 | py | Python | services/swimmer_service.py | Design-Patterns-Project-Group/swimming-mangement-in-python | c0acd0efb5f5536d7890801eed3a7bf995744bde | [
"MIT"
] | null | null | null | services/swimmer_service.py | Design-Patterns-Project-Group/swimming-mangement-in-python | c0acd0efb5f5536d7890801eed3a7bf995744bde | [
"MIT"
] | null | null | null | services/swimmer_service.py | Design-Patterns-Project-Group/swimming-mangement-in-python | c0acd0efb5f5536d7890801eed3a7bf995744bde | [
"MIT"
] | null | null | null | import sys, os.path
sys.path.append(os.path.abspath('..'))
from models import *
from . import AbstractSwimmerService
class SwimmerService(AbstractSwimmerService):
def __init__(self):
# assume this is a database
self._data_store = [
{
'swimmer_id': 1,
'first_name' : 'Abebe',
'last_name' : 'Bekila'
},
{
'swimmer_id': 2,
'first_name' : 'Abebe2',
'last_name' : 'Bekila2'
},
{
'swimmer_id': 3,
'first_name' : 'Abebe3',
'last_name' : 'Bekila3'
},
{
'swimmer_id': 4,
'first_name' : 'Abebe4',
'last_name' : 'Bekila4'
},
{
'swimmer_id': 5,
'first_name' : 'Abebe5',
'last_name' : 'Bekila5'
}
]
def getById(self, swimmer_id):
data_entry = list(filter(lambda entry: entry['swimmer_id'] == swimmer_id, self._data_store))
try:
data_entry = data_entry[0]
except:
raise Exception('could not find a swimmer by that id')
return Swimmer(**data_entry)
| 26.28 | 100 | 0.444444 | 1,188 | 0.90411 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.25723 |