hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9514c9647a31509619c43b943b315ef73a1f481a
| 1,192
|
py
|
Python
|
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | null | null | null |
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | null | null | null |
tests/test_hw02.py
|
timm/sinless-swe
|
b331b9bf4d27fdf357ce8a5ce54f9858103fd64f
|
[
"MIT"
] | 2
|
2021-08-29T19:26:19.000Z
|
2021-09-20T17:44:27.000Z
|
import os
import sys
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from src.hw2 import csv_reader
def testCsvReader():
expectedResult = [['outlook', 'Temp', '?Humidity', 'windy', 'Wins+', 'Play-'],
['sunny', 85, 85, 'FALSE', 10, 20],
['sunny', 80, 90, 'TRUE', 12, 40],
['overcast', 83, 86, 'FALSE', 40, 40],
['rainy', 70, 96, 'FALSE', 40, 50],
['rainy', 65, 70, 'TRUE', 4, 10],
['overcast', 64, 65, 'TRUE', 30, 60],
['sunny', 72, 95, 'FALSE', 7, 20],
['sunny', 69, 70, 'FALSE', 70, 70],
['rainy', 75, 80, 'FALSE', 80, 40],
['sunny', 75, 70, 'TRUE', 30, 50],
['overcast', 72, 90, 'TRUE', 60, 50],
['overcast', 81, 75, 'FALSE', 30, 60],
['rainy', 71, 91, 'TRUE', 50, 40]]
dataPath = os.path.dirname(os.path.abspath(__file__))
dataPath = dataPath[:dataPath.rindex("/")]
result = csv_reader("data/windy.csv")
for i,row in enumerate(result):
assert row == expectedResult[i]
| 45.846154
| 82
| 0.452181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.217282
|
9514f668db331c946ecbf660cfa6375f54adec5b
| 2,462
|
py
|
Python
|
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | 1
|
2021-09-06T15:02:34.000Z
|
2021-09-06T15:02:34.000Z
|
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | null | null | null |
hyperdeck.py
|
FlantasticDan/hyperdeck-replay
|
5d5a62c9342c4e552e6a2d44dbe85cb3dba49f28
|
[
"MIT"
] | null | null | null |
from telnetlib import Telnet
from threading import Thread
class Hyperdeck:
def __init__(self, ip_address, id) -> None:
self.deck = Telnet(ip_address, 9993)
self.id = id
self.thread = Thread(target=self.listener)
self.thread.start()
def listener(self):
while True:
message = self.deck.read_some()
print(f'//{self.id}//')
print(message)
def identify_standard_command(self, command):
if command == 'live':
return 'preview: enable: true'
elif command == 'clip':
return 'preview: enable: false\r\nplayrange clear'
elif command == 'record':
return 'record'
elif command == 'play':
return 'play: single clip: true'
elif command == 'stop':
return 'stop'
elif command == 'previous':
return 'goto: clip id: -1'
elif command == 'next':
return 'goto: clip id: +1'
elif command == 'beginning':
return 'goto: clip: start'
elif command == 'end':
return 'goto: clip: end'
def identify_granular_command(self, command, direction):
if direction == 'forward':
sign = '+'
else:
sign = '-'
if command == '10%':
return f'play: single clip: true speed: {sign}10'
elif command == '25%':
return f'play: single clip: true speed: {sign}25'
elif command == '50%':
return f'play: single clip: true speed: {sign}50'
elif command == '75%':
return f'play: single clip: true speed: {sign}75'
elif command == '10s':
return f'jog: timecode: {sign}00:00:10:00'
elif command == '5s':
return f'jog: timecode: {sign}00:00:05:00'
elif command == '1s':
return f'jog: timecode: {sign}00:00:01:00'
elif command == '1f':
return f'jog: timecode: {sign}00:00:00:01'
def send_standard_command(self, command):
identified_command = self.identify_standard_command(command)
query = bytes(f'{identified_command}\r\n', 'ascii')
self.deck.write(query)
def send_granular_command(self, command, direction):
identified_command = self.identify_granular_command(command, direction)
query = bytes(f'{identified_command}\r\n', 'ascii')
self.deck.write(query)
| 34.676056
| 79
| 0.553209
| 2,398
| 0.974005
| 0
| 0
| 0
| 0
| 0
| 0
| 687
| 0.279041
|
95150abc9ac26ff15d14447cfaa884078a1c20b0
| 2,215
|
py
|
Python
|
tensorwatch/repeated_timer.py
|
sytelus/longview
|
686e43cf187eaf55df18949359fd63d57dc337b2
|
[
"MIT"
] | 3,453
|
2019-05-22T15:01:23.000Z
|
2022-03-31T07:50:41.000Z
|
tensorwatch/repeated_timer.py
|
wgxcow/tensorwatch
|
142f83a7cb8c54e47e9bab06cb3a1ef8ae225422
|
[
"MIT"
] | 69
|
2019-05-22T17:11:20.000Z
|
2022-03-03T09:32:38.000Z
|
tensorwatch/repeated_timer.py
|
wgxcow/tensorwatch
|
142f83a7cb8c54e47e9bab06cb3a1ef8ae225422
|
[
"MIT"
] | 375
|
2019-05-22T17:10:33.000Z
|
2022-03-24T07:43:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import threading
import time
import weakref
class RepeatedTimer:
class State:
Stopped=0
Paused=1
Running=2
def __init__(self, secs, callback, count=None):
self.secs = secs
self.callback = weakref.WeakMethod(callback) if callback else None
self._thread = None
self._state = RepeatedTimer.State.Stopped
self.pause_wait = threading.Event()
self.pause_wait.set()
self._continue_thread = False
self.count = count
def start(self):
self._continue_thread = True
self.pause_wait.set()
if self._thread is None or not self._thread.isAlive():
self._thread = threading.Thread(target=self._runner, name='RepeatedTimer', daemon=True)
self._thread.start()
self._state = RepeatedTimer.State.Running
def stop(self, block=False):
self.pause_wait.set()
self._continue_thread = False
if block and not (self._thread is None or not self._thread.isAlive()):
self._thread.join()
self._state = RepeatedTimer.State.Stopped
def get_state(self):
return self._state
def pause(self):
if self._state == RepeatedTimer.State.Running:
self.pause_wait.clear()
self._state = RepeatedTimer.State.Paused
# else nothing to do
def unpause(self):
if self._state == RepeatedTimer.State.Paused:
self.pause_wait.set()
if self._state == RepeatedTimer.State.Paused:
self._state = RepeatedTimer.State.Running
# else nothing to do
def _runner(self):
while (self._continue_thread):
if self.count:
self.count -= 0
if not self.count:
self._continue_thread = False
if self._continue_thread:
self.pause_wait.wait()
if self.callback and self.callback():
self.callback()()
if self._continue_thread:
time.sleep(self.secs)
self._thread = None
self._state = RepeatedTimer.State.Stopped
| 31.197183
| 99
| 0.602709
| 2,096
| 0.946275
| 0
| 0
| 0
| 0
| 0
| 0
| 126
| 0.056885
|
9515d87797c5883ffb46a5046c9382bbdb71bc8f
| 1,037
|
py
|
Python
|
pushpy_examples/client/tasks/schedule/c_local_schedule.py
|
briangu/push-examples
|
3acf00d9f63523010ee3b70f3117d1be686c3335
|
[
"MIT"
] | null | null | null |
pushpy_examples/client/tasks/schedule/c_local_schedule.py
|
briangu/push-examples
|
3acf00d9f63523010ee3b70f3117d1be686c3335
|
[
"MIT"
] | null | null | null |
pushpy_examples/client/tasks/schedule/c_local_schedule.py
|
briangu/push-examples
|
3acf00d9f63523010ee3b70f3117d1be686c3335
|
[
"MIT"
] | null | null | null |
import time
from pushpy_examples.client.ex_push_manager import ExamplePushManager
m = ExamplePushManager()
m.connect()
class ScheduleTask:
def apply(self, control):
import schedule
import time
def job():
print(f"I'm working...{time.time()}")
schedule.clear()
schedule.every(5).seconds.do(job)
# schedule.every(10).minutes.do(job)
# schedule.every().hour.do(job)
# schedule.every().day.at("10:30").do(job)
# schedule.every(5).to(10).minutes.do(job)
# schedule.every().monday.do(job)
# schedule.every().wednesday.at("13:15").do(job)
# schedule.every().minute.at(":17").do(job)
while control.running:
schedule.run_pending()
time.sleep(1)
repl_code_store = m.repl_code_store()
repl_code_store.set("schedule_task", ScheduleTask, sync=True)
dt = m.local_tasks()
dt.stop("schedule_task")
dt.run("daemon", src="schedule_task", name="schedule_task")
time.sleep(30)
dt.stop("schedule_task")
| 24.690476
| 69
| 0.633558
| 662
| 0.63838
| 0
| 0
| 0
| 0
| 0
| 0
| 388
| 0.374156
|
951662a92b08b48e3775881d06dfdde6053f3486
| 453
|
py
|
Python
|
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
leetcode/weekly154/balloons.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
'''
https://leetcode.com/contest/weekly-contest-154/problems/maximum-number-of-balloons/
'''
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
m = {}
for c in text:
if c not in m: m[c] = 0
m[c] += 1
ans = len(text)
for c in 'lo':
if c in m: m[c] //= 2
for c in 'balon':
if c in m: ans = min(ans, m[c])
else: ans = 0
return ans
| 28.3125
| 84
| 0.479029
| 360
| 0.794702
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.227373
|
9516843db83caf5de14579548efc7a35483c1024
| 3,100
|
py
|
Python
|
app/cache/basic.py
|
JunyongYao/flask-backend-seed
|
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
|
[
"MIT"
] | 9
|
2017-10-20T09:26:09.000Z
|
2021-01-28T02:54:43.000Z
|
app/cache/basic.py
|
JunyongYao/flask-backend-seed
|
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
|
[
"MIT"
] | 2
|
2018-03-06T06:27:53.000Z
|
2018-04-19T01:47:38.000Z
|
app/cache/basic.py
|
JunyongYao/flask-backend-seed
|
9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d
|
[
"MIT"
] | 2
|
2019-07-18T22:32:28.000Z
|
2020-06-15T14:10:29.000Z
|
# -*- coding: utf-8 -*-
import logging
import pickle
from abc import ABCMeta, abstractmethod
from app import redis
from app.cache import set_dict_if_key_expire, set_data_if_key_expire, set_redis_dict_with_timeout, \
set_redis_data_with_timeout
from task.asyncTask import refresh_cache
class CacheABC(object, metaclass=ABCMeta):
def __init__(self):
# In order to avoid miss calling parent class's __init__ and reset the value to be 0, only set the value when it
# is not set before.
try:
getattr(self, "_threshold_value")
except AttributeError:
self._threshold_value = 0
@property
@abstractmethod
def cache_key(self):
raise NotImplementedError()
@property
@abstractmethod
def expire_in_seconds(self):
raise NotImplementedError()
@property
def auto_refresh_threshold(self):
# In case self._threshold_value value was not set before, use a default one.
return getattr(self, "_threshold_value", 0)
@auto_refresh_threshold.setter
def auto_refresh_threshold(self, value):
if value >= self.expire_in_seconds:
raise ValueError("Given threshold {} cannot bigger than expire value {}".format(
value, self.expire_in_seconds))
self._threshold_value = value
@abstractmethod
def data_retrieve_ops(self):
raise NotImplementedError()
@abstractmethod
def get(self) -> dict:
raise NotImplementedError()
@abstractmethod
def refresh(self):
raise NotImplementedError()
def clear(self):
redis.delete(self.cache_key)
def apply_async_task_if_possible(self):
# From Redis 2.8, if the key does not exist, return -2; if it has no ttl, return -1.
left_seconds = redis.ttl(self.cache_key)
if 0 < left_seconds < self.auto_refresh_threshold:
logging.info("Apply async task for refresh {}".format(self.cache_key))
refresh_cache.apply_async(args=[pickle.dumps(self)], serializer='pickle')
class DictCacheABC(CacheABC):
def get(self) -> dict:
self.apply_async_task_if_possible()
return set_dict_if_key_expire(self.cache_key, self.expire_in_seconds, self.data_retrieve_ops)
def refresh(self):
dict_data = self.data_retrieve_ops()
# only set data which is not None
if dict_data:
set_redis_dict_with_timeout(self.cache_key, dict_data, self.expire_in_seconds)
else:
logging.error("Cannot set dict data for {}".format(self.cache_key))
class DataCacheABC(CacheABC):
def get(self):
self.apply_async_task_if_possible()
return set_data_if_key_expire(self.cache_key, self.expire_in_seconds, self.data_retrieve_ops)
def refresh(self):
dict_data = self.data_retrieve_ops()
# only set data which is not None
if dict_data:
set_redis_data_with_timeout(self.cache_key, dict_data, self.expire_in_seconds)
else:
logging.error("Cannot set dict data for {}".format(self.cache_key))
| 32.978723
| 120
| 0.684516
| 2,800
| 0.903226
| 0
| 0
| 907
| 0.292581
| 0
| 0
| 572
| 0.184516
|
9518a93eb1a74edc2a091b88692ed0896329bfe9
| 38,343
|
py
|
Python
|
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
fraudbot.py
|
DocGrishka/tetstsss
|
9e594333306e6ea8c13f0c81aa5ccb05bc7e9e5e
|
[
"MIT"
] | null | null | null |
import discord
import sqlite3
import random
import requests
import pymorphy2
from itertools import product
# база, в которой будут храниться заработанные очки и статус отношений бота с пользователем - играет оно и во что,
# или просто общается
class Bnc:
def __init__(self):
random.seed(self.generate_answer())
self.attempt = self.k = 0
# создаем список всех возможных чисел.
self.everything = ["".join(x) for x in product('0123456789', repeat=4)
if len(set(x)) == len(x)]
self.answer = self.generate_answer()
# таким образом мы еще и перемешиваем все числа. кроме того, из массива их удобнее удалять.
self.guess_space = set(self.everything)
# здесь храним историю попыток бота.
self.historys = []
# а здесь храним историю попыток игрока.
self.history = []
def is_compatible(self, guess):
# проверка на то, подходит ли нам это число, на основе всех предыдущих попыток.
return all(self.bulls_n_cows(guess, previous_guess) == (bulls, cows)
for previous_guess, bulls, cows in self.historys)
@staticmethod
# возвращает быки и коровы, сравнивая 2 числа
def bulls_n_cows(attempt, answer):
bulls = sum(1 for x, y in zip(attempt, answer) if x == y)
cows = len(set(attempt) & set(answer)) - int(bulls)
return bulls, cows
@staticmethod
def bulls_n_cows_morph(bulls, cows):
# возвращает быков и коров в более удобной форме для передачи игроку.
morph = pymorphy2.MorphAnalyzer()
cows = str(cows) + ' ' + morph.parse('корова')[0].make_agree_with_number(int(cows)).word
bulls = str(bulls) + ' ' + morph.parse('бык')[0].make_agree_with_number(int(bulls)).word
return bulls, cows
@staticmethod
# генерирует число
def generate_answer():
n = [i for i in range(10)]
number = []
for _ in range(4):
a = n.pop(random.choice(range(len(n))))
number.append(str(a))
return ''.join(number)
def cheat(self, player_try):
max_score = 0
best_answer = self.answer
for new_answer in self.everything:
score = 12.0
error = True
while error:
if self.history:
for i in self.history:
if self.bulls_n_cows(i[0], new_answer) != [i[1], i[2]]:
score = 0
error = False
break
error = False
else:
break
bulls, cows = self.bulls_n_cows(new_answer, player_try)
score -= bulls * 3 + cows
if bulls + cows == 0:
score -= 5.1
if max_score < score:
best_answer = new_answer
max_score = score
return best_answer
class Fraudbot(discord.Client):
def __init__(self, **options):
super().__init__(**options)
# в базе хранятся данные о пользователях, user_id, points, state.
# Первое это идентификатор, второе это очки, а третье - то, чем сейчас занимается бот с игроками.
self.con = sqlite3.connect("users.db")
# это все игры, которые доступны, в формате: команду на вызов игры - описание игры
games = '/быки и коровы - математическая игра, в двух изданиях: в одиночку и против бота\n' \
'/крестики-нолики - классические крестики-нолики с 3 уровнями сложности\n' \
'/сапер - классический сапер, размер поля варьируется от 5 на 5, до 26 на 26 клеток\n' \
'/камень-ножницы-бумага - классические... камень-ножницы-бумага!\n' \
'/кости - вы делаете ставки на сумму выброшенных ботом костей\n\n' \
'Более подробные правила игр описаны внутри каждой из них. Пусть Фортуна будет благосклонна' \
' к вам!'
# это база откуда мы будем брать реакции на разные фразы.
self.dialog_base = {'/игры': 'Вот список моих игр: \n' + games,
'/привет': 'Здравствуйте! Я Fraudbot. Я представляю математические игры, то есть игры,'
' где используется математическое мышление. Команда "/игры" -- '
'здесь описаны мои игры и команды для их вызова.\nКоманда "/помощь" -- '
'используйте ее, если возникнут вопросы или проблемы.',
'/помощь': 'Если у вас возник вопрос, проблема, или у вас есть какая-то идея'
' -- пишите на адрес fraudbot.help@mail.ru'}
self.commands = ['/помощь', '/игры', '/привет'] + [g.split(' - ')[0] for g in games.split('\n')]
# после перезапуска бот должен будет предупредить пользователей, что все их диалоги были прекращены.
self.reconnect = {}
async def on_message(self, message):
# не даем отвечать самому себе
if message.author == self.user:
return
# user_gambler - объект класса Member и служит для проверки в функции check(m).
user_gambler = message.author
# user_player - идентификатор пользователя, нужен для обращения к нему и для занесения в базу.
user_player = str(user_gambler).replace('#', '')
# user_channel - канал, на котором был запущено общение.
user_channel = message.channel
# в базе данных лежит имя сервера и канал. если пользователь общается с ботом в личных сообщениях,
# то сервера нет и мы записываем только канал.
try:
user_chan_guild = str(user_channel.guild.id) + str(user_channel.id)
except AttributeError:
user_chan_guild = str(user_channel.id)
# прекращает все взаимодействия с ботом по команде.
if message.content == '/стоп':
await self.db_edit(user_player, 'empty')
await message.channel.send(user_player + ", вы прервали все взаимодействия с ботом.")
# если бот был запущен первый раз, или перезапущен
if user_player in self.reconnect and self.reconnect[user_player]:
# 1 условие проверяет, что пользователь уже общался с ботом
await message.channel.send(f"Извините, {user_player}, произошел перезапуск бота. Приносим извинения"
f" за причиненные неудобства. Все диалоги были досрочно прекращены.")
# если пользователя нет в базе
if self.user_status(user_player) == 'None':
# поприветствуем нового пользователя и добавим его в базу. Добавление в базу происходит автоматически,
await message.channel.send(f'Приветствую, {user_player}! Я Fraudbot и у меня 3 основных команды:\n\t/'
f'привет\t|\t/игры\t|\t/помощь\nВы можете отправить любую из них. Более '
f'подробное приветствие уже отправлено вам в личные сообщения.')
# также отправляем ему сообщение в личный канал.
await self.pm_greet(user_gambler)
# вместе со сменой статуса в конце функции. Но пользователь мог первым сообщением сразу отправить команду
# и поэтому статус меняется перед проверкой на то, что сообщение является командой.
await self.db_edit(user_player, 'empty')
# если пользователь "свободен" от наших игр или диалога
if self.user_status(user_player) == 'empty':
for i in self.dialog_base:
if message.content == i:
await message.channel.send(self.dialog_base[i])
# если игрок не "свободен", и при этом пишет с другого канала - говорим ему об этом
# и не даем запустить еще один процесс.
else:
# также проверяем - не написал ли он в другой чат просто так, не нам.(проверка на наличие нашей команды)
if self.user_status(user_player, get_channel=True) != "None" and user_chan_guild != \
self.user_status(user_player, get_channel=True) and message.content in self.commands:
await message.channel.send(user_player + ', вы уже ведете диалог с ботом на другом канале.'
' Завершите его, или прервите командой "/стоп".')
# не даем еще раз запустить цикл, даже в случае, если он вызвал команду с того же сервера,
# где он "занят".
return
def check(m):
# проверяем, что точно сообщение от нашего игрока и что он не случайно нажал enter
# также не дает случиться путанице с множеством каналов.
return len(m.content) != 0 and m.author == user_gambler and m.channel == user_channel
# запуск игры "Быки и Коровы>"
if message.content == '/быки и коровы':
await self.db_edit(message.author.name + message.author.discriminator, 'bnc', user_chan_guild)
# это нужно, чтобы отслеживать сообщения именно от данного пользователя
await message.channel.send('Хорошо, ' + user_player + '!\nУгадывающий называет число, а '
'загадывающий специальным образом отвечает, '
'сколько цифр совпало с ответом.\nЕсли в назван'
'ном числе цифра какого-то раз'
'ряда совпала с цифрой в том же разряде правил'
'ьного ответа, '
'это называется "быком". Если указанная цифра '
'есть в ответе, но на неверной'
' позиции, это "корова". Загадывающий отвечает,'
' сколько "быков" и "коров" '
'в числе угадывающего.\nПример -- числа\n8536\n'
'6573\nУ них 1 "бык" (это цифра 5) и 2 "коровы"'
' (это цифры 3 и 6).\n\n'
'Вы собираетесь просто отгадывать; играть'
' против бота(одновременно загадывать свое число '
'и отгадывать его);'
' или вы не собираетесь играть?\nЧтобы ответи'
'ть, введите'
' один из следующих вариантов: '
' 1 | 2 | /стоп\n'
'\nЕсли вы '
'пожелаете прекратить игру, то в любой'
' момент введите команду "/стоп"')
async def bnc_user_input(history=None):
# пользовательский ввод для игры быки и коровы
user_try = await self.wait_for('message', check=check)
user_try = user_try.content
# здесь находятся комбинации цифр, начинающиеся с 0.
zero_digitalis = ['0' + str(digital) for digital in range(100, 1000)]
while user_try != '/стоп' and (len(set(list(user_try))) != 4 or user_try not in
(zero_digitalis + [str(d) for d in range(1000, 10000)])):
if history is not None and user_try == '/история':
history_read = ''
for p in history:
b, c = Bnc.bulls_n_cows_morph(p[1], p[2])
# в f строке нельзя напрямую вызвать метод split() с аргументом '\n',
# поэтому аргументом будет служить переменная со значением '\n'
delimiter = '\n'
history_read += f'\nПопытка {str(len(history_read.split(delimiter)))}.' \
f' Ваше число {str(p[0])} -- {b} и {c}.'
await message.channel.send(user_player + ', это история ваших попыток.' + history_read)
await message.channel.send(user_player + ', введите четырехзначное число'
' с неповторяющимися цифрами или команду "/стоп",'
' чтобы прекратить игру.')
user_try = await self.wait_for('message', check=check)
user_try = user_try.content
return user_try
choice = await self.wait_for('message', check=check)
while choice.content not in ('1', '2', '/стоп'):
await message.channel.send(user_player + ', чтобы ответить,'
' введите один из следующих вариантов: \n1\n2\n/стоп')
choice = await self.wait_for('message', check=check)
if choice.content == '/стоп':
# игрок отказался играть. В конце блока игры его статус автоматически поменяется.
pass
elif choice.content == '1':
# генерируем число, выводим быки и коровы, пока игрок не выиграет
answer = Bnc.generate_answer()
await message.channel.send('Вы в одиночной игре, ' + user_player + '! Бот уже загадал число,'
' попробуйте угадать его.'
' Введите четырехзначное число'
' с неповторяющимися цифрами.')
win = False
number = 1
user_input = await bnc_user_input()
# количество попыток
while not win:
if user_input == '/стоп':
break
bulls_count, cows_count = Bnc.bulls_n_cows(user_input, answer)
bulls, cows = Bnc.bulls_n_cows_morph(bulls_count, cows_count)
await message.channel.send(user_player + f"\n{number} попытка. Ваше число {user_input}."
f" У вас {bulls} и {cows}.")
if bulls_count == 4:
win = True
break
else:
await message.channel.send('Введите четырехзначное число с неповторяющимися цифрами.')
user_input = await bnc_user_input()
number += 1
if win:
morph = pymorphy2.MorphAnalyzer()
await message.channel.send('Невероятная победа, ' + user_player + '! Вы сделали это'
' всего за ' + str(number)
+ ' ' +
morph.parse('попытку')[0].make_agree_with_number(number).word + '.')
else:
await message.channel.send(user_player + ', вы играете против бота. Для того, чтобы решить,'
' кто будет ходить первым, бот использует бинарную'
' монетку. Выберите 0 или 1.\nВо время вашего хода также'
' будет доступна команда "/история", эта команда покажет'
' все ваши попытки и ответы соперника.')
# определяет, кто ходит первым.
bin_coin = str(random.choice((0, 1)))
choice = await self.wait_for('message', check=check)
while choice.content not in ('1', '0', '/стоп'):
await message.channel.send(user_player + ', выберите\n0\tили\t1\n Для прекращения игры '
'напишите команду "/стоп"')
choice = await self.wait_for('message', check=check)
# объект класса Быки и Коровы, в игре против бота используются все его функции.
game = Bnc()
# 0 означает, что игра в процессе. 1 - что игрок победил. 2 - что победил бот.
# -1 - что игра была прервана потому, что игрок жульничал, или потому, что он ее прервал.
playing = 0
# True, если сейчас ход игрока
player_turn = False
# ведет подсчет попыток игрока
if choice.content == '/стоп':
playing = -1
elif choice.content == bin_coin:
player_turn = True
await message.channel.send('Вы угадали, ' + user_player + '.')
else:
await message.channel.send('Вы не угадали, ' + user_player + '. ')
# игра длится до остановки командой или победы одной из сторон
while playing == 0:
if player_turn:
await message.channel.send(user_player + ', введите четырехзначное число '
'с неповторяющимися цифрами. Также вы можете'
' ввести команду "/история".')
user_input = await bnc_user_input(history=game.history)
if user_input == '/стоп':
playing = -1
break
bulls_count, cows_count = game.bulls_n_cows(user_input, game.answer)
# считаем быков и коров, и, если они подходят под условие, генерируем число заново,
# в связи с историей попыток.
if bulls_count >= 2 or cows_count >= 3 or bulls_count + cows_count in (4, 0):
game.cheat(user_input)
bulls_count, cows_count = game.bulls_n_cows(user_input, game.answer)
# добавляем в историю попытку и ее результаты
game.history.append([user_input, bulls_count, cows_count])
bulls, cows = game.bulls_n_cows_morph(bulls_count, cows_count)
await message.channel.send(user_player + f"\nВаша {len(game.history)} попытка. Ваше число"
f" {user_input}. У вас {bulls} и {cows}.")
if bulls_count == 4:
# игрок победил
await message.channel.send('Вы победили, ' + user_player + '! Я загадал число '
+ str(game.answer))
playing = 1
player_turn = False
else:
guess = None
while True:
if len(game.guess_space) == 0:
await message.channel.send(user_player + ', вы попытались обмануть бота. '
'Вы проиграли.')
playing = -1
break
guess = random.choice(list(game.guess_space))
game.guess_space.remove(guess)
if game.is_compatible(guess):
break
# если бот обнаружил, что игрок жульничает - прерываем игру
if playing != 0:
break
await message.channel.send(user_player + ', я думаю, что вы загадали число '
+ str(guess) + '\nВведите через пробел количество быков и коров.'
' (например -- 0 2)')
bulls_n_cows = await self.wait_for('message', check=check)
bulls_n_cows = bulls_n_cows.content.split(' ')
while len(bulls_n_cows) != 2 or not all(j in [str(d) for d in range(0, 5)]
for j in bulls_n_cows) \
or sum([int(c) for c in bulls_n_cows]) > 4:
if bulls_n_cows == ['/стоп']:
playing = -1
break
await message.channel.send(user_player + ', введите через пробел количество'
' "быков" и "коров".\nЕсли в названном числе '
'цифра какого-то разряда совпала с цифрой'
' в том же разряде правильного ответа, эт'
'о называется "быком". Если указанная циф'
'ра есть в ответе, но на неверной позиции,'
' это "корова". Пример -- у чисел 1234 и 5631 '
' 1 "бык" (это цифра 3) и 1 "корова"'
' (это цифра 1). Сумма "быков" и "коров" не может'
' быть больше 4.')
bulls_n_cows = await self.wait_for('message', check=check)
bulls_n_cows = bulls_n_cows.content.split(' ')
# это условие приходится дублировать из-за того, что во время хода бота 2 варианта
# прерывания игры. 1 - игрок жульничал. 2 - игрок прервал игру. В обоих случаях игра
# должна прекратиться незамедлительно.
if playing != 0:
break
game.historys.append((guess, int(bulls_n_cows[0]), int(bulls_n_cows[1])))
bulls, cows = game.bulls_n_cows_morph(bulls_n_cows[0], bulls_n_cows[1])
await message.channel.send(user_player + f"\nМоя {len(game.history) + 1} попытка. Мое число"
f" {guess}. У меня {bulls} и {cows}.")
if bulls_n_cows[0] == 4:
# бот победил
await message.channel.send('Бот победил, ' + user_player + '! Вы загадали число '
+ str(guess))
playing = 2
player_turn = True
if playing != -1:
await message.channel.send('Спасибо за игру! Если вы желаете еще поиграть --'
' введите команду "/игры".')
await message.channel.send(f'Игра окончена, {user_player}. Если желаете еще раз сыграть в эту или'
f' иную игру -- введите команду "/игры".')
# запуск игры "Кости"
elif message.content == '/кости':
# изменение статуса.
await self.db_edit(message.author.name + message.author.discriminator, 'dices', user_chan_guild)
# объяснение правил игры
await message.channel.send('Хорошо, ' + user_player + '! Правила таковы -- у вас ровно 100 монет. Вам нужно'
' увеличить их количество. На каждый бросок можно с'
'делать ставку, от 5 до 20 монет. Ставка делается '
'на сумму цифр, которые будет на верхн(их/ей) гран(я'
'х/и) кост(ей/и) после броска. Также вы можете '
'выбрать какие кости будете бросать. Кости каждый р'
'аз выбираются случайно, из следующих вариантов:'
'\n\tодна шестигранная кость, коэффициент ставки - 3.'
'\n\tдве шестигранные кости коэффициент ставки - 6'
'\n\tодна восьмигранная кость, коэффициент ставки - '
'4\n\tдве восьмигранные кости, коэффициент ставки - '
'8\n\tодна двадцатигранная кость,'
' коэффициент ставки - 10\nТакже вам всегда будет д'
'оступна моентка со стабильным коэффициентом 2.\n'
'Коэффициент ставки - это то число, на которое '
'будет умножена ваша ставка. При проигрыше у вас '
'вычтут вашу ставку. Но есть одно условие - ,'
' все коэффициенты, кроме стабильного, варируются'
' от 2 до самих себя.\nЕсли вы будете'
' играть, то выберите число, которого хотите '
'достигнуть, из нижеперечисленных. В противном случ'
'ае, напишите команду "/стоп"\n'
'200 | 300 | 500 | 1000 | /стоп')
choice = await self.wait_for('message', check=check)
# проверка на правильный ввод
while choice.content not in ('200', '300', '/стоп', '500', '1000'):
await message.channel.send(user_player + ', чтобы ответить,'
' введите один из следующих вариантов: \n200\n300\n500\n100'
'0\n/стоп')
choice = await self.wait_for('message', check=check)
if choice.content == '/стоп':
# игрок отказался играть. В конце блока игры его статус автоматически поменяется.
pass
else:
start_cash = 100
end_cash = int(choice.content)
# начальные и стартовые суммы, словарь названий костей и их коэффициентов.
dash_set = {'один шестигранник': 3,
'два шестигранника': 6,
'один восьмигранник': 4,
'два восьмигранника': 8,
'один двадцатигранник': 10}
# все возможные результаты бросков для разных наборов костей.
values = {'один шестигранник': range(1, 7),
'два шестигранника': range(2, 13),
'один восьмигранник': range(1, 9),
'два восьмигранника': range(2, 17),
'один двадцатигранник': range(1, 21),
'монета': range(1, 3)}
# использовалась ли монета в прошлый раз.
d2_used = False
# пока игрок не проиграет, или не выиграет.
while start_cash != 0 or start_cash != end_cash:
# экспериментальным путем было определено, что именно такая генерация
random.seed(random.randint(10 ** 10, 10 ** 20))
# те наборы кубиков, которые буду предоставлены игроку в этот раз.
cur_set = [random.choice([d for d in dash_set.keys()]) for _ in range(2)]
for i in range(len(cur_set)):
# устранение и замена дупликатов.
while cur_set.count(cur_set[i]) > 1:
del cur_set[i]
cur_set.append(random.choice([d for d in dash_set.keys()]))
cur_set[i] = f'{i + 1}){cur_set[i]} -- {str(random.randint(2, dash_set[cur_set[i]]))}'
if not d2_used:
cur_set.append('3)монета -- 2')
else:
d2_used = False
await message.channel.send(user_player + f'. Ваши монеты: {start_cash}, осталось набрать ещё '
f'{end_cash - start_cash} монет.\n Вы можете кинуть '
f'следующие кости:\n\t' + '\n\t'.join(cur_set)
+ '\nМожно ввести или наименование варианта, или его номер.')
user_move = await self.wait_for('message', check=check)
# проверка на правильный ввод.
while all([user_move.content != c.split(' -- ')[0][2:] for
c in cur_set]) and user_move.content not in ['1', '2', '3'] + ['/стоп']:
await message.channel.send(user_player + ', чтобы ответить, введите наименование одного из'
' следующих вариантов:\n\t' + '\n\t'.join(cur_set) +
'\nили номер варианта, от 1 до 3.\nТакже вы можете прервать игру'
' командой "/стоп"')
user_move = await self.wait_for('message', check=check)
dice = user_move.content
if dice == '/стоп':
break
if dice not in ['1', '2', '3']:
# если было указано наименование, то узнаем его номер.
dice = str([d.split(' -- ')[0][2:] == dice for d in cur_set].index(True) + 1)
if dice == '3':
d2_used = True
coefficient = int(cur_set[int(dice) - 1][-1])
await message.channel.send(user_player + ', теперь выберите число, на которое будете делать ставку.'
' Число не может превышать максимальную сумму цифр костей'
', или быть меньше 1 (или 2 если костей две).')
digit = await self.wait_for('message', check=check)
# получаем все числа, на которые можно делать ставки.
sums = [str(b) for b in values[cur_set[int(dice) - 1].split(' -- ')[0][2:]]]
# проверяем ввод
while digit.content not in sums and digit.content != '/стоп':
await message.channel.send(user_player + ', выберите число, на которое будете делать ставку.'
' Введите любое число из следуюших: ' +
', '.join(sums) + '\nТакже вы можете прервать игру командой '
'"/стоп"')
digit = await self.wait_for('message', check=check)
if digit.content == '/стоп':
break
await message.channel.send(f'Отлично, {user_player}, а теперь введите ставку. Ставкой может быть '
f'любое число от 5 до 20 включительно.')
bet = await self.wait_for('message', check=check)
# проверяем корректность ставки. Существует возможность сделать ставку и уйти в минус,
# в полном соответствии с правилами игры, которые были предоставлены пользователю.
while bet.content not in [str(b) for b in range(5, 21)] and bet.content != '/стоп':
await message.channel.send(user_player + ', введите ставку. Ставкой может быть любое число из'
' следующих: ' + ', '.join([str(g) for g in
range(5, 21)]))
bet = await self.wait_for('message', check=check)
if bet.content == '/стоп':
break
# бросок костей.
cast = random.choice(sums)
await message.channel.send(f'{user_player}, вы сделали ставку {bet.content} монет на число '
f'{digit.content}. Бот бросает кости...\nИ выбрасывает число'
f' {cast}.')
if digit.content != cast:
await message.channel.send(f'Жаль, {user_player}, вы не угадали и лишились {bet.content} монет.')
start_cash -= int(bet.content)
else:
await message.channel.send(f'Вы угадали, {user_player}! Ваш выигрыш составляет '
f'{coefficient * int(bet.content)} монет(а).')
start_cash += coefficient * int(bet.content)
if start_cash <= 0:
await message.channel.send(f'Вы проиграли, {user_player}. Но это не повод для огорчения,'
f' ведь смысл этой игры не в победах или поражениях, а в самой игре.'
f' Каждый проигрыш или победа чему-то учат.')
if start_cash == end_cash:
await message.channel.send(f'Поздравляю, {user_player}, вы победили!')
await message.channel.send(f'Игра окончена, {user_player}. Если вы желаете сыграть еще '
f'-- введите команду "/игры".')
await self.db_edit(user_player, 'empty')
async def db_edit(self, user_id, status, channel='None'):
# функция заносит игрока в базу данных, или изменяет статус, если он там уже есть.
cur = self.con.cursor()
# на сервере идентификатор содержит #, а в личных сообщениях нет. Не даем дублировать записи.
user = cur.execute("Select * from users WHERE user_id=?", (user_id,)).fetchone()
if user is None:
cur.execute('INSERT INTO users(user_id, state, channel) VALUES(?, ?, ?)', (str(user_id), status, channel))
else:
cur.execute(f'UPDATE users SET state = "{status}", channel = "{channel}" WHERE user_id = "'
+ str(user_id) + '"')
self.con.commit()
def user_status(self, user_id, get_channel=False):
# получение статуса пользователя.
cur = self.con.cursor()
user = cur.execute("Select * from users WHERE user_id=?", (user_id.replace('#', ''),)).fetchone()
if user is None:
return 'None'
if get_channel:
return user[2]
return user[1]
async def on_ready(self):
# при перезапуске все статусы сбрасываются, а при первом запуске ничего не просходит,
# так как в базе нет пользователей.
cur = self.con.cursor()
users = cur.execute("Select * from users").fetchall()
for i in users:
cur.execute('UPDATE users SET state = "empty", channel = "None" WHERE user_id = "' + str(i[0]) + '"')
self.reconnect[i[0]] = True
self.con.commit()
async def on_member_join(self, member):
# отправляем новому на сервере пользователю сообщение.
await self.pm_greet(member)
async def pm_greet(self, member):
# приветствие мы отправляем только в том случае, если пользователя нет в базе.
if self.user_status(str(member)) == 'None':
await member.create_dm()
await member.dm_channel.send(self.dialog_base['/привет'])
await member.dm_channel.send('Вы можете общаться со мной как на общем канале, так и здесь. Eще у меня'
' есть команда "/помощь". Отправьте ее мне, если понадобится помощь.')
client = Fraudbot()
client.run(open('token.txt', 'r').readline())
| 68.469643
| 122
| 0.463683
| 46,927
| 0.990627
| 0
| 0
| 1,046
| 0.022081
| 40,365
| 0.852104
| 22,532
| 0.47565
|
9518dbb4f02a3d9f4f06a63e879638510aa4fe07
| 31,698
|
py
|
Python
|
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/lib/ioc_json.py
|
project-fifo/iocage
|
1b8669bc2119718dbea8f2707a4eb4c92197c0f0
|
[
"BSD-2-Clause"
] | 1
|
2022-03-06T10:09:18.000Z
|
2022-03-06T10:09:18.000Z
|
"""Convert, load or write JSON."""
import json
import logging
import os
import re
import sys
from os import geteuid, path
from subprocess import CalledProcessError, PIPE, Popen, STDOUT, check_call
from iocage.lib.ioc_common import checkoutput, get_nested_key, open_atomic
def _get_pool_and_iocroot():
"""For internal setting of pool and iocroot."""
pool = IOCJson().json_get_value("pool")
iocroot = IOCJson(pool).json_get_value("iocroot")
return (pool, iocroot)
class IOCJson(object):
"""
Migrates old iocage configurations(UCL and ZFS Props) to the new JSON
format, will set and get properties.
"""
def __init__(self, location="", silent=False, cli=False):
self.location = location
self.lgr = logging.getLogger('ioc_json')
self.cli = cli
if silent:
self.lgr.disabled = True
def json_convert_from_ucl(self):
"""Convert to JSON. Accepts a location to the ucl configuration."""
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
with open(self.location + "/config", "r") as conf:
lines = conf.readlines()
key_and_value = {}
for line in lines:
line = line.partition("=")
key = line[0].rstrip()
value = line[2].replace(";", "").replace('"', '').strip()
key_and_value[key] = value
self.json_write(key_and_value)
def json_convert_from_zfs(self, uuid, skip=False):
"""Convert to JSON. Accepts a jail UUID"""
pool, _ = _get_pool_and_iocroot()
dataset = "{}/iocage/jails/{}".format(pool, uuid)
jail_zfs_prop = "org.freebsd.iocage:jail_zfs_dataset"
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
cmd = ["zfs", "get", "-H", "-o", "property,value", "all", dataset]
regex = re.compile("org.freebsd.iocage")
zfs_get = Popen(cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split("\n")
# Find each of the props we want to convert.
props = [p for p in zfs_get if re.search(regex, p)]
key_and_value = {"host_domainname": "none"}
for prop in props:
prop = prop.partition(":")
key = prop[2].split("\t")[0]
value = prop[2].split("\t")[1].strip()
if key == "type":
if value == "basejail":
# These were just clones on master.
value = "jail"
key_and_value["basejail"] = "yes"
key_and_value[key] = value
if not skip:
# Set jailed=off and move the jailed dataset.
checkoutput(["zfs", "set", "jailed=off",
"{}/root/data".format(dataset)])
checkoutput(["zfs", "rename", "-f",
"{}/root/data".format(dataset),
"{}/data".format(dataset)])
checkoutput(["zfs", "set",
"{}=iocage/jails/{}/data".format(
jail_zfs_prop, uuid),
"{}/data".format(dataset)])
checkoutput(["zfs", "set", "jailed=on",
"{}/data".format(dataset)])
key_and_value["jail_zfs_dataset"] = "iocage/jails/{}/data".format(uuid)
self.json_write(key_and_value)
def json_load(self):
"""Load the JSON at the location given. Returns a JSON object."""
version = self.json_get_version()
skip = False
try:
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
except (IOError, OSError):
if path.isfile(self.location + "/config"):
self.json_convert_from_ucl()
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
else:
dataset = self.location.split("/")
for d in dataset:
if len(d) == 36:
uuid = d
elif len(d) == 8:
# Hack88 migration to a perm short UUID.
pool, iocroot = _get_pool_and_iocroot()
from iocage.lib.ioc_list import IOCList
full_uuid = checkoutput(
["zfs", "get", "-H", "-o",
"value",
"org.freebsd.iocage:host_hostuuid",
self.location]).rstrip()
jail_hostname = checkoutput(
["zfs", "get", "-H", "-o",
"value",
"org.freebsd.iocage:host_hostname",
self.location]).rstrip()
short_uuid = full_uuid[:8]
full_dataset = "{}/iocage/jails/{}".format(
pool, full_uuid)
short_dataset = "{}/iocage/jails/{}".format(
pool, short_uuid)
self.json_convert_from_zfs(full_uuid)
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
self.lgr.info("hack88 is no longer supported."
"\n{} is being converted to {} "
"permanently.".format(full_dataset,
short_dataset))
status, _ = IOCList().list_get_jid(full_uuid)
if status:
self.lgr.info("Stopping jail to migrate UUIDs.")
from iocage.lib.ioc_stop import IOCStop
IOCStop(full_uuid, conf["tag"], self.location,
conf, silent=True)
jail_zfs_prop = "org.freebsd.iocage:jail_zfs_dataset"
uuid_prop = "org.freebsd.iocage:host_hostuuid"
host_prop = "org.freebsd.iocage:host_hostname"
# Set jailed=off and move the jailed dataset.
checkoutput(["zfs", "set", "jailed=off",
"{}/data".format(full_dataset)])
# We don't want to change a real hostname.
if jail_hostname == full_uuid:
checkoutput(["zfs", "set", "{}={}".format(
host_prop, short_uuid), full_dataset])
checkoutput(["zfs", "set", "{}={}".format(
uuid_prop, short_uuid), full_dataset])
checkoutput(["zfs", "set",
"{}=iocage/jails/{}/data".format(
jail_zfs_prop, short_uuid),
"{}/data".format(full_dataset)])
checkoutput(["zfs", "rename", "-f", full_dataset,
short_dataset])
checkoutput(["zfs", "set", "jailed=on",
"{}/data".format(short_dataset)])
uuid = short_uuid
self.location = "{}/jails/{}".format(iocroot,
short_uuid)
skip = True
self.json_convert_from_zfs(uuid, skip=skip)
with open(self.location + "/config.json", "r") as conf:
conf = json.load(conf)
try:
conf_version = conf["CONFIG_VERSION"]
if version != conf_version:
conf = self.json_check_config(conf, version)
except KeyError:
conf = self.json_check_config(conf, version)
return conf
def json_write(self, data, _file="/config.json"):
"""Write a JSON file at the location given with supplied data."""
with open_atomic(self.location + _file, 'w') as out:
json.dump(data, out, sort_keys=True, indent=4,
ensure_ascii=False)
def json_get_value(self, prop):
"""Returns a string with the specified prop's value."""
old = False
if prop == "pool":
match = 0
zpools = Popen(["zpool", "list", "-H", "-o", "name"],
stdout=PIPE).communicate()[0].decode(
"utf-8").split()
for zfs in zpools:
dataset = Popen(["zfs", "get", "-H", "-o", "value",
"org.freebsd.ioc:active", zfs],
stdout=PIPE).communicate()[0].decode(
"utf-8").strip()
old_dataset = Popen(["zpool", "get", "-H", "-o", "value",
"comment", zfs],
stdout=PIPE).communicate()[0].decode(
"utf-8").strip()
if dataset == "yes":
_dataset = zfs
match += 1
elif old_dataset == "iocage":
_dataset = zfs
match += 1
old = True
if match == 1:
pool = _dataset
if old:
if os.geteuid() != 0:
raise RuntimeError("Run as root to migrate old pool"
" activation property!")
check_call(["zpool", "set", "comment=-", pool],
stderr=PIPE, stdout=PIPE)
check_call(["zfs", "set", "org.freebsd.ioc:active=yes",
pool], stderr=PIPE, stdout=PIPE)
return pool
elif match >= 2:
if "deactivate" not in sys.argv[1:]:
self.lgr.error("Pools:")
for zpool in zpools:
self.lgr.error(" {}".format(zpool))
raise RuntimeError("You have {} ".format(match) +
"pools marked active for iocage "
"usage.\n"
"Run \"iocage deactivate ZPOOL\" on"
" {} of the".format(match - 1) +
" pools.\n")
else:
if len(sys.argv) >= 2 and "activate" in sys.argv[1:]:
pass
else:
# We use the first zpool the user has, they are free to
# change it.
cmd = ["zpool", "list", "-H", "-o", "name"]
zpools = Popen(cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split()
if os.geteuid() != 0:
raise RuntimeError("Run as root to automatically "
"activate the first zpool!")
self.lgr.info("Setting up zpool [{}] for iocage usage\n"
"If you wish to change please use "
"\"iocage activate\"".format(zpools[0]))
Popen(["zfs", "set", "org.freebsd.ioc:active=yes",
zpools[0]]).communicate()
return zpools[0]
elif prop == "iocroot":
# Location in this case is actually the zpool.
try:
loc = "{}/iocage".format(self.location)
mount = checkoutput(["zfs", "get", "-H", "-o", "value",
"mountpoint", loc]).strip()
return mount
except CalledProcessError:
raise RuntimeError("{} not found!".format(self.location))
elif prop == "all":
conf = self.json_load()
return conf
else:
conf = self.json_load()
if prop == "last_started" and conf[prop] == "none":
return "never"
else:
return conf[prop]
def json_set_value(self, prop, create_func=False):
"""Set a property for the specified jail."""
# Circular dep! Meh.
from iocage.lib.ioc_list import IOCList
from iocage.lib.ioc_create import IOCCreate
key, _, value = prop.partition("=")
conf = self.json_load()
old_tag = conf["tag"]
uuid = conf["host_hostuuid"]
status, jid = IOCList.list_get_jid(uuid)
conf[key] = value
sysctls_cmd = ["sysctl", "-d", "security.jail.param"]
jail_param_regex = re.compile("security.jail.param.")
sysctls_list = Popen(sysctls_cmd, stdout=PIPE).communicate()[0].decode(
"utf-8").split()
jail_params = [p.replace("security.jail.param.", "").replace(":", "")
for p in sysctls_list if re.match(jail_param_regex, p)]
single_period = ["allow_raw_sockets", "allow_socket_af",
"allow_set_hostname"]
if not create_func:
if key == "tag":
conf["tag"] = IOCCreate("", prop, 0).create_link(
conf["host_hostuuid"], value, old_tag=old_tag)
tag = conf["tag"]
if key == "template":
pool, iocroot = _get_pool_and_iocroot()
old_location = "{}/iocage/jails/{}".format(pool, uuid)
new_location = "{}/iocage/templates/{}".format(pool, old_tag)
if status:
raise RuntimeError(f"{uuid} ({old_tag}) is running.\nPlease"
"stop it first!")
jails, paths = IOCList("uuid").list_datasets()
for j in jails:
_uuid = jails[j]
_path = f"{paths[j]}/root"
t_old_path = f"{old_location}/root@{_uuid}"
t_path = f"{new_location}/root@{_uuid}"
if _uuid == uuid:
continue
origin = checkoutput(["zfs", "get", "-H", "-o", "value",
"origin", _path]).rstrip()
if origin == t_old_path or origin == t_path:
_status, _ = IOCList.list_get_jid(_uuid)
if _status:
raise RuntimeError(f"CHILD: {_uuid} ({j}) is"
f" running.\nPlease stop it first!")
if value == "yes":
try:
checkoutput(["zfs", "rename", "-p", old_location,
new_location], stderr=STDOUT)
conf["type"] = "template"
self.location = new_location.lstrip(pool).replace(
"/iocage", iocroot)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
self.lgr.info("{} ({}) converted to a template.".format(uuid,
old_tag))
self.lgr.disabled = True
elif value == "no":
try:
checkoutput(["zfs", "rename", "-p", new_location,
old_location], stderr=STDOUT)
conf["type"] = "jail"
self.location = old_location.lstrip(pool).replace(
"/iocage", iocroot)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
self.lgr.info("{} ({}) converted to a jail.".format(uuid,
old_tag))
self.lgr.disabled = True
self.json_check_prop(key, value, conf)
self.json_write(conf)
self.lgr.info(
"Property: {} has been updated to {}".format(key, value))
# Used for import
if not create_func:
if key == "tag":
return tag
# We can attempt to set a property in realtime to jail.
if status:
if key in single_period:
key = key.replace("_", ".", 1)
else:
key = key.replace("_", ".")
if key in jail_params:
try:
checkoutput(["jail", "-m", "jid={}".format(jid),
"{}={}".format(key, value)], stderr=STDOUT)
except CalledProcessError as err:
raise RuntimeError("ERROR: {}".format(
err.output.decode("utf-8").rstrip()))
@staticmethod
def json_get_version():
"""Sets the iocage configuration version."""
version = "5"
return version
def json_check_config(self, conf, version):
"""
Takes JSON as input and checks to see what is missing and adds the
new keys with their default values if missing.
"""
if geteuid() != 0:
raise RuntimeError("You need to be root to convert the"
" configurations to the new format!")
_, iocroot = _get_pool_and_iocroot()
# Version 2 keys
try:
sysvmsg = conf["sysvmsg"]
sysvsem = conf["sysvsem"]
sysvshm = conf["sysvshm"]
except KeyError:
sysvmsg = "new"
sysvsem = "new"
sysvshm = "new"
# Set all keys, even if it's the same value.
conf["sysvmsg"] = sysvmsg
conf["sysvsem"] = sysvsem
conf["sysvshm"] = sysvshm
# Version 3 keys
try:
release = conf["release"]
cloned_release = conf["cloned_release"]
except KeyError:
try:
freebsd_version = f"{iocroot}/releases/{conf['release']}" \
"/root/bin/freebsd-version"
except (IOError, OSError):
freebsd_version = f"{iocroot}/templates/{conf['tag']}" \
"/root/bin/freebsd-version"
if conf["release"][:4].endswith("-"):
# 9.3-RELEASE and under don't actually have this binary.
release = conf["release"]
else:
with open(freebsd_version, "r") as r:
for line in r:
if line.startswith("USERLAND_VERSION"):
release = line.rstrip().partition("=")[2].strip(
'"')
cloned_release = conf["release"]
# Set all Version 3 keys
conf["release"] = release
conf["cloned_release"] = cloned_release
# Version 4 keys
try:
basejail = conf["basejail"]
except KeyError:
basejail = "no"
# Set all keys, even if it's the same value.
conf["basejail"] = basejail
# Version 5 keys
try:
comment = conf["comment"]
except KeyError:
comment = "none"
# Set all keys, even if it's the same value.
conf["comment"] = comment
conf["CONFIG_VERSION"] = version
self.json_write(conf)
return conf
def json_check_prop(self, key, value, conf):
"""
Checks if the property matches known good values, if it's the
CLI, deny setting any properties not in this list.
"""
props = {
# Network properties
"interfaces" : (":", ","),
"host_domainname" : ("string",),
"host_hostname" : ("string",),
"exec_fib" : ("string",),
"ip4_addr" : ("|",),
"ip4_saddrsel" : ("0", "1",),
"ip4" : ("new", "inherit", "none"),
"ip6_addr" : ("|",),
"ip6_saddrsel" : ("0", "1"),
"ip6" : ("new", "inherit", "none"),
"defaultrouter" : ("string",),
"defaultrouter6" : ("string",),
"resolver" : ("string",),
"mac_prefix" : ("string",),
"vnet0_mac" : ("string",),
"vnet1_mac" : ("string",),
"vnet2_mac" : ("string",),
"vnet3_mac" : ("string",),
# Jail Properties
"devfs_ruleset" : ("string",),
"exec_start" : ("string",),
"exec_stop" : ("string",),
"exec_prestart" : ("string",),
"exec_poststart" : ("string",),
"exec_prestop" : ("string",),
"exec_poststop" : ("string",),
"exec_clean" : ("0", "1"),
"exec_timeout" : ("string",),
"stop_timeout" : ("string",),
"exec_jail_user" : ("string",),
"exec_system_jail_user": ("string",),
"exec_system_user" : ("string",),
"mount_devfs" : ("0", "1"),
"mount_fdescfs" : ("0", "1"),
"enforce_statfs" : ("0", "1", "2"),
"children_max" : ("string",),
"login_flags" : ("string",),
"securelevel" : ("string",),
"sysvmsg" : ("new", "inherit", "disable"),
"sysvsem" : ("new", "inherit", "disable"),
"sysvshm" : ("new", "inherit", "disable"),
"allow_set_hostname" : ("0", "1"),
"allow_sysvipc" : ("0", "1"),
"allow_raw_sockets" : ("0", "1"),
"allow_chflags" : ("0", "1"),
"allow_mount" : ("0", "1"),
"allow_mount_devfs" : ("0", "1"),
"allow_mount_nullfs" : ("0", "1"),
"allow_mount_procfs" : ("0", "1"),
"allow_mount_tmpfs" : ("0", "1"),
"allow_mount_zfs" : ("0", "1"),
"allow_quotas" : ("0", "1"),
"allow_socket_af" : ("0", "1"),
# RCTL limits
"cpuset" : ("off", "on"),
"rlimits" : ("off", "on"),
"memoryuse" : ":",
"memorylocked" : ("off", "on"),
"vmemoryuse" : ("off", "on"),
"maxproc" : ("off", "on"),
"cputime" : ("off", "on"),
"pcpu" : ("off", "on"),
"datasize" : ("off", "on"),
"stacksize" : ("off", "on"),
"coredumpsize" : ("off", "on"),
"openfiles" : ("off", "on"),
"pseudoterminals" : ("off", "on"),
"swapuse" : ("off", "on"),
"nthr" : ("off", "on"),
"msgqqueued" : ("off", "on"),
"msgqsize" : ("off", "on"),
"nmsgq" : ("off", "on"),
"nsemop" : ("off", "on"),
"nshm" : ("off", "on"),
"shmsize" : ("off", "on"),
"wallclock" : ("off", "on"),
# Custom properties
"tag" : ("string",),
"bpf" : ("off", "on"),
"dhcp" : ("off", "on"),
"boot" : ("off", "on"),
"notes" : ("string",),
"owner" : ("string",),
"priority" : str(tuple(range(1, 100))),
"hostid" : ("string",),
"jail_zfs" : ("off", "on"),
"jail_zfs_dataset" : ("string",),
"jail_zfs_mountpoint" : ("string",),
"mount_procfs" : ("0", "1"),
"mount_linprocfs" : ("0", "1"),
"vnet" : ("off", "on"),
"template" : ("no", "yes"),
"comment" : ("string",)
}
zfs_props = {
# ZFS Props
"compression" : "lz4",
"origin" : "readonly",
"quota" : "none",
"mountpoint" : "readonly",
"compressratio": "readonly",
"available" : "readonly",
"used" : "readonly",
"dedup" : "off",
"reservation" : "none",
}
if key in zfs_props.keys():
pool, _ = _get_pool_and_iocroot()
if conf["template"] == "yes":
_type = "templates"
uuid = conf["tag"] # I know, but it's easier this way.
else:
_type = "jails"
uuid = conf["host_hostuuid"]
checkoutput(["zfs", "set", f"{key}={value}",
f"{pool}/iocage/{_type}/{uuid}"])
return
if key in props.keys():
# Either it contains what we expect, or it's a string.
for k in props[key]:
if k in value:
return
if props[key][0] == "string":
return
else:
err = f"{value} is not a valid value for {key}.\n"
if self.cli:
self.lgr.error(f"ERROR: {err}")
else:
err = f"ERROR: {err}"
if key not in ("interfaces", "ip4_addr", "ip6_addr",
"memoryuse"):
msg = f"Value must be {' or '.join(props[key])}"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "ip4_addr":
msg = "IP address must contain both an interface and IP " \
"address.\nEXAMPLE: em0|192.168.1.10"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "ip6_addr":
msg = "IP address must contain both an interface and IP " \
"address.\nEXAMPLE: em0|fe80::5400:ff:fe54:1"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "interfaces":
msg = "Interfaces must be specified as a pair.\n" \
"EXAMPLE: vnet0:bridge0, vnet1:bridge1"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
elif key == "memoryuse":
msg = "memoryuse requires at minimum a pair.EXAMPLE: " \
"8g:log"
if not self.cli:
msg = err + msg
raise RuntimeError(msg)
else:
if self.cli:
exit(1)
else:
if self.cli:
raise RuntimeError(
f"ERROR: {key} cannot be changed by the user.")
else:
if key not in conf.keys():
raise RuntimeError(
f"WARNING: {key} is not a valid property!")
def json_plugin_load(self):
try:
with open("{}/plugin/settings.json".format(
self.location), "r") as settings:
settings = json.load(settings)
except (IOError, OSError):
raise RuntimeError(
"No settings.json exists in {}/plugin!".format(self.location))
return settings
def json_plugin_get_value(self, prop):
from iocage.lib.ioc_exec import IOCExec
pool, iocroot = _get_pool_and_iocroot()
conf = self.json_load()
uuid = conf["host_hostuuid"]
tag = conf["tag"]
_path = checkoutput(["zfs", "get", "-H", "-o", "value", "mountpoint",
"{}/iocage/jails/{}".format(pool,
uuid)]).rstrip()
# Plugin variables
settings = self.json_plugin_load()
serviceget = settings["serviceget"]
prop_error = ".".join(prop)
if "options" in prop:
_prop = prop[1:]
else:
_prop = prop
prop_cmd = "{},{}".format(serviceget, ",".join(_prop)).split(",")
try:
if prop[0] != "all":
if len(_prop) > 1:
return get_nested_key(settings, prop)
else:
return IOCExec(prop_cmd, uuid, tag, _path).exec_jail()
else:
return settings
except KeyError:
raise RuntimeError(
"Key: \"{}\" does not exist!".format(prop_error))
def json_plugin_set_value(self, prop):
from iocage.lib.ioc_exec import IOCExec
from iocage.lib.ioc_list import IOCList
pool, iocroot = _get_pool_and_iocroot()
conf = self.json_load()
uuid = conf["host_hostuuid"]
tag = conf["tag"]
_path = checkoutput(["zfs", "get", "-H", "-o", "value", "mountpoint",
"{}/iocage/jails/{}".format(pool,
uuid)]).rstrip()
status, _ = IOCList().list_get_jid(uuid)
# Plugin variables
settings = self.json_plugin_load()
serviceset = settings["serviceset"]
servicerestart = settings["servicerestart"].split()
keys, _, value = ".".join(prop).partition("=")
prop = keys.split(".")
restart = False
if "options" in prop:
prop = keys.split(".")[1:]
prop_cmd = "{},{},{}".format(serviceset, ",".join(prop), value).split(
",")
setting = settings["options"]
try:
while prop:
current = prop[0]
key = current
prop.remove(current)
if not prop:
if setting[current]:
try:
restart = setting[current]["requirerestart"]
except KeyError:
pass
else:
setting = setting[current]
if status:
# IOCExec will not show this if it doesn't start the jail.
self.lgr.info("Command output:")
IOCExec(prop_cmd, uuid, tag, _path).exec_jail()
if restart:
self.lgr.info("\n-- Restarting service --")
self.lgr.info("Command output:")
IOCExec(servicerestart, uuid, tag, _path).exec_jail()
self.lgr.info("\nKey: {} has been updated to {}".format(keys,
value))
except KeyError:
raise RuntimeError("Key: \"{}\" does not exist!".format(key))
| 39.573034
| 81
| 0.430942
| 31,213
| 0.984699
| 0
| 0
| 139
| 0.004385
| 0
| 0
| 8,348
| 0.26336
|
951a6328f58a32b162e3ef00d555a91633c30955
| 6,913
|
py
|
Python
|
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | 1
|
2021-05-05T23:00:28.000Z
|
2021-05-05T23:00:28.000Z
|
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | null | null | null |
FP/V46_faraday_effect/plot.py
|
nsalewski/laboratory
|
e30d187a3f5227d5e228b0132c3de4d426d85ffb
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
#coding:utf8
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
from modules.table import textable
import scipy.constants as const
import math as math
from modules.plot import axislabel as axis
#arr1=[0.4,0.75,1.4]
#arr2=[2,3,4]
#textable.latex_tab(data=[arr1,arr2],names=[r"title column 1",r"title column 2"], filename=r"example.tex",caption=r"Beautiful caption",label=r"important_label",dec_points=[2,0])
def manipulate(arr):
for elem in range(len(arr)):
if arr[elem-1]<180:
arr[elem-1]=arr[elem-1]+180
else: arr[elem-1]=arr[elem-1]-180
return arr
def theorie(x,a,mu,b):
return ((a*np.exp(-((x-mu)**2)/(b))))
def winkel(grad,sec):
sec=sec*1/60
grad=grad+sec
return grad
def lin(x,a):
return a*x
def eff_mass(a,B,N):
return unp.sqrt(((e0)**3*N*B)/(8*np.pi**2*eps*c**3*n*a))
#daten importieren
b,z=np.genfromtxt("data/b_feld.txt",unpack=True)
f1,d1_hin,d1_hins,d1_rueck,d1_ruecks=np.genfromtxt("data/1_probe.txt",unpack=True)
f2,d2_hin,d2_hins,d2_rueck,d2_ruecks=np.genfromtxt("data/2_probe.txt",unpack=True)
f3,d3_hin,d3_hins,d3_rueck,d3_ruecks=np.genfromtxt("data/3_probe.txt",unpack=True)
f1=f1*10**(-6)
f2=f2*10**(-6)
f3=f3*10**(-6)
l1=1.296*10**(-3)
l2=1.36*10**(-3)
l3=5.11*10**(-3)
#bogensekunden addieren
grad1_hin=winkel(d1_hin,d1_hins)
grad1_rueck=winkel(d1_rueck,d1_ruecks)
grad2_hin=winkel(d2_hin,d2_hins)
grad2_rueck=winkel(d2_rueck,d2_ruecks)
grad3_hin=winkel(d3_hin,d3_hins)
grad3_rueck=winkel(d3_rueck,d3_ruecks)
#umrechnen auf gleichen Bezugspunkt
grad1_hin=manipulate(grad1_hin)
grad1_rueck=manipulate(grad1_rueck)
grad2_hin=manipulate(grad2_hin)
grad2_rueck=manipulate(grad2_rueck)
grad3_hin=manipulate(grad3_hin)
grad3_rueck=manipulate(grad3_rueck)
grad1=(1/(2*l1)*(grad1_rueck-grad1_hin)*2*np.pi/360)
grad2=(1/(2*l2)*(grad2_rueck-grad2_hin)*2*np.pi/360)
grad3=(1/(2*l3)*(grad3_rueck-grad3_hin)*2*np.pi/360)
#Berechnung delta theta
delta1=grad1-grad3
delta2=grad2-grad3
textable.latex_tab(data=[f1*10**6,grad3,grad1,grad2,delta1,delta2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_{\mathrm{und}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$",r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$"], filename=r"tables/eff_mass.tex",caption=r"Werte der $\Delta \theta$ zwischen undotiertem und dotiertem $\ce{GaAs}$ zur Bestimmung der effektiven Masse der Kristallelektronen",label=r"eff_mass",dec_points=[2,2,2,2,2,2],tableformat=4.2)
#Tabellen theta
textable.latex_tab(data=[f1*10**6,grad1_hin,grad1_rueck,grad1],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe1.tex",caption=r"Messwerte der Faraday-Rotation für die dotierte Probe $\ce{GaAs}_{d1}$",label=r"probe1",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f2*10**6,grad2_hin,grad2_rueck,grad2],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe2.tex",caption=r"Messwerte der Faraday-Rotation für die dotierte Probe $\ce{GaAs}_{d2}$",label=r"probe2",dec_points=[2,2,2,2],tableformat=4.2)
textable.latex_tab(data=[f3*10**6,grad3_hin,grad3_rueck,grad3],names=[r"$\lambda$/$\si{\micro\meter}$",r"$\theta_1$/$\si{\degree}$",r"$\theta_2$/$\si{\degree}$",r"$\theta$/$\si{\radian\per\meter}$"], filename=r"tables/probe3.tex",caption=r"Messwerte der Faraday-Rotation für die undotierte Probe $\ce{GaAs}_{und}$",label=r"probe3",dec_points=[2,2,2,2],tableformat=4.2)
#Tabelle Magnetfeld
textable.latex_tab(data=[z-3.1,b],names=[r"$z$/$\si{\centi\meter}$",r"$B$/$\si{\milli\tesla}$"], filename=r"tables/magnetfeld.tex",caption=r"Messung des Magnetfelds in Abhängigkeit zum Ort $z$ (Probe ist etwa bei $\SI{3.1}{\centi\meter}$ platziert)",label=r"magnetfeld",dec_points=[2,0],tableformat=3.2)
z_theo=np.linspace(0,6,50)
#Ausgleichsrechnung Magnetfeld
params, covariance = curve_fit(theorie,z-3.1,b)
errors = np.sqrt(np.diag(covariance))
print(params,errors)
print("Erwartungswert",params[1],errors[1])
delta1_calc=np.delete(delta1,[0,3,7])
f1_calc1=np.delete(f1,[0,3,7])
delta2_calc=np.delete(delta2,[6,7])
f1_calc2=np.delete(f1,[6,7])
#lin regress delta
paramsd1, covarianced1 = curve_fit(lin,(f1_calc1**2),delta1_calc*10**(-6))
errorsd1 = np.sqrt(np.diag(covarianced1))
paramsd2, covarianced2 = curve_fit(lin,(f1_calc2)**2,delta2_calc*10**(-6))
errorsd2 = np.sqrt(np.diag(covarianced2))
a1=ufloat(paramsd1[0],errorsd1[0])*10**(6)
a2=ufloat(paramsd2[0],errorsd2[0])*10**(6)
n=3.3
e0=const.e
eps=const.epsilon_0
c=const.c
B=377.5*10**(-3)
print("Delta_1 Steigung", a1)
print("Delta_2 Steigung", a2)
print("Effektive Masse 1",eff_mass(a1,B,2.8*10**18*10**6),eff_mass(a1,B,2.8*10**18*10**6)/const.m_e)
print("Effektive Masse 2",eff_mass(a2,B,1.2*10**18*10**6),eff_mass(a2,B,1.2*10**18*10**6)/const.m_e)
#Plot Magnetfeld
plt.plot((params[1],params[1]),(-20,400), 'r--', label="Erwartungswert \n der Normalverteilung")
plt.plot(z-3.1,b, 'rx', label="Messwerte $B$")
plt.ylabel(r"$B/\si{\milli\tesla}$")
plt.xlabel(r"z/\si{\centi\meter}")
plt.legend(loc='best')
plt.ylim(-20,400)
axis.labels()
plt.tight_layout()
plt.savefig('pictures/B_feld.pdf')
plt.clf()
#Plot theta
plt.plot(f1*10**6,grad1, 'rx', label=r"Messwerte $\theta_{\mathrm{d1}}$")
plt.plot(f2*10**6,grad2, 'gx', label=r"Messwerte $\theta_{\mathrm{d2}}$")
plt.plot(f3*10**6,grad3, 'bx', label=r"Messwerte $\theta_{\mathrm{und}}$")
plt.ylabel(r"$\theta$/$\si{\radian\per\meter}")
plt.xlabel(r"$\lambda$/$\si{\micro\meter}$")
plt.legend(loc='lower right')
plt.tight_layout()
axis.labels()
plt.xlim(1,3.5)
plt.savefig('pictures/winkel_gg_wellenlaenge.pdf')
plt.clf()
f_theo=np.linspace(0,np.max(f1)+0.1*np.max(f1))
#plot delta
plt.plot((f1)**2*10**11,delta1, 'rx', label=r"$\Delta \theta_{\mathrm{d1}}$")
plt.plot((f_theo)**2*10**11,lin((f_theo)**2,*paramsd1*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d1}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot \num{e-11}$")
plt.legend(loc='best')
axis.labels()
plt.xlim(0,1.1)
plt.tight_layout()
plt.savefig('pictures/delta1.pdf')
plt.clf()
plt.plot((f1)**2*10**11,delta2, 'rx', label=r"$\Delta \theta_{\mathrm{d2}}$")
plt.plot((f_theo)**2*10**11,lin(f_theo**2,*paramsd2*10**6), 'b-', label="Ausgleichsgrade")
plt.ylabel(r"$\Delta \theta_{\mathrm{d2}}$/$\si{\radian\per\meter}$")
plt.xlabel(r"$\lambda^{2}$/$\si{\square\meter}\cdot\num{e-11}$")
axis.labels()
plt.legend(loc='best')
plt.tight_layout()
plt.xlim(0,1.1)
plt.savefig('pictures/delta2.pdf')
plt.clf()
| 43.20625
| 613
| 0.707363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,730
| 0.39468
|
951a6b980e66f06393b5c53d18d14db57345b12d
| 2,256
|
py
|
Python
|
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
hackzurich_py/test_hist_threshold.py
|
ejoebstl/hackzurich16
|
81a3b302050a4a464e2191c1d0912f8038c26ed9
|
[
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
filedir = '/Users/gabrielfior/Dropbox/Hackzurich16/pupils_cutout/'
readbgr = filedir+'left_pupil232.bmp'
frame = plt.imread(readbgr)
white=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil61.bmp')
black=plt.imread('/Users/gabrielfior/Dropbox/Hackzurich16/pupils_bw/right_pupil203.bmp')
#convert to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
plt.figure(1)
plt.clf()
img = cv2.imread(readbgr)
color = ('b','g','r')
b = img[:,:,0]
g = img[:,:,1]
r = img[:,:,2]
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
plt.figure(2)
plt.clf()
plt.subplot(211)
ret,th1 = cv2.threshold(img[:,:,0],40,60,cv2.THRESH_BINARY)
plt.imshow(th1)
plt.subplot(212)
plt.imshow(hsv)
#Compare blue channel (when it is smaller than red channel)
#plt.figure(3)
new_mask = np.zeros_like(b)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
#if b < r, put 1 else 0
if (img[:,:,0])[i][j] < (img[:,:,2])[i][j]:
new_mask[i][j]=1
plt.figure(3)
plt.clf()
plt.imshow(new_mask)
plt.figure(4)
plt.subplot(211)
plt.title('white')
for i,col in enumerate(color):
histr = cv2.calcHist([white],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.subplot(212)
plt.title('black')
for i,col in enumerate(color):
histr = cv2.calcHist([black],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
#################
#Compute diff
mask_white = np.zeros_like(white[:,:,0])
for i in range(white.shape[0]):
for j in range(white.shape[1]):
#if b < r, put 1 else 0
if (white[:,:,0])[i][j] < (white[:,:,2])[i][j]:
mask_white[i][j]=1
mask_black = np.zeros_like(black[:,:,0])
for i in range(black.shape[0]):
for j in range(black.shape[1]):
#if b < r, put 1 else 0
if (black[:,:,0])[i][j] < (black[:,:,2])[i][j]:
mask_black[i][j]=1
#Plot masks
plt.figure(5)
plt.subplot(211)
plt.title('white')
plt.imshow(mask_white)
plt.subplot(212)
plt.title('black')
plt.imshow(mask_black)
plt.show()
#Flat fill
| 23.747368
| 88
| 0.626773
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 459
| 0.203457
|
951fd4c03bbcd55fdd4eaa4cf1d74e5f3dba25ea
| 496
|
py
|
Python
|
Lyft-Dental/Django-WebRtc/home/views.py
|
Abhik1998/Lyft-sample_project
|
3f9a79fb86c7abee713ae37245f5e7971be09139
|
[
"MIT"
] | 1
|
2021-01-09T08:42:24.000Z
|
2021-01-09T08:42:24.000Z
|
Lyft-Dental/Django-WebRtc/home/views.py
|
Abhik1998/Lyft-sample_project
|
3f9a79fb86c7abee713ae37245f5e7971be09139
|
[
"MIT"
] | null | null | null |
Lyft-Dental/Django-WebRtc/home/views.py
|
Abhik1998/Lyft-sample_project
|
3f9a79fb86c7abee713ae37245f5e7971be09139
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from chat.models import *
# Create your views here.
def home(request):
chat = ChatMessage.objects.all()
return render(request,'common/home.html', {'chat':chat})
def video(request):
return render(request,'video.html')
def video3(request):
return render(request,'video3.html')
def video5(request):
return render(request,'video5.html')
def video6(request):
return render(request,'video6.html')
def call(request):
return render(request,'final.html')
| 20.666667
| 57
| 0.745968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.225806
|
9520fdc9ead572486f8211683471cb168ee795b7
| 6,113
|
py
|
Python
|
Spatial_Scripts/2_gtfs_arnold_stops.py
|
VolpeUSDOT/gtfs-measures
|
0530d3c7193f10d591edd446d7e4985d03a7c48a
|
[
"CC0-1.0"
] | 3
|
2019-08-29T13:31:14.000Z
|
2021-06-18T06:10:06.000Z
|
Spatial_Scripts/2_gtfs_arnold_stops.py
|
VolpeUSDOT/gtfs-measures
|
0530d3c7193f10d591edd446d7e4985d03a7c48a
|
[
"CC0-1.0"
] | null | null | null |
Spatial_Scripts/2_gtfs_arnold_stops.py
|
VolpeUSDOT/gtfs-measures
|
0530d3c7193f10d591edd446d7e4985d03a7c48a
|
[
"CC0-1.0"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: GTFS_Arnold_Stops
#
# Purpose: Associate stops with the route shapes that have already been snapped to ARNOLD
#
# Author: Alex Oberg and Gary Baker
#
# Created: 10/17/2016
#
# Last updated 6/15/2017
#-------------------------------------------------------------------------------
# CONFIG
#-------------------------------------------------------------------------------
#MBTA MODEL
sqlite_file = r"C:\tasks\2016_09_12_GTFS_ingest\Model\MBTA\GTFS-MBTA.sqlite"
output_dir = r"c:\tasks\2016_09_12_GTFS_ingest\Model\MBTA\Output"
# SETUP
#-------------------------------------------------------------------------------
import datetime
import sqlite3
import arcpy
import os
#out_file = os.path.join(output_dir, 'test.txt')
#wf = open(out_file, 'w')
#wf.write("shape_id, trip_id, stop_lat, stop_lon, milepost\n")
start_time = datetime.datetime.now()
print('\nStart at ' + str(start_time))
print "Started Step 2: Snapping Stops to Routes"
print "GTFS database being processed: " + sqlite_file
output_gdb = "gtfs_arnold_prelim.gdb"
full_path_to_output_gdb = os.path.join(output_dir, output_gdb)
arcpy.env.workspace = full_path_to_output_gdb
arcpy.env.overwriteOutput = True
WGS84 = arcpy.SpatialReference(4326)
ALBERS_PRJ = arcpy.SpatialReference(102039)
traversed_oid_dict = {}
con = sqlite3.connect(sqlite_file)
# Prepare the output file
# -----------------------
out_lrs_file = os.path.join(output_dir, 'rtshp_lr_stops.txt')
with open(out_lrs_file, 'w') as wf:
wf.write("ROUTE_SHAPE,MP,STOP_ID\n")
#Add dummy values so ArcGIS doesn't mis-identify the field types
with open(out_lrs_file, 'a') as wf:
wf.write("randomtext,0.00,randomtext2\nrandomtext,0.00,randomtext3\nrandomtext,0.00,randomtext4\nrandomtext,0.00,randomtext5\n")
# FOR EACH ROUTE SHAPE ID (AKA CONSTRUCTED ROUTE)
# -----------------------------------------
print "Retrieving stops for each route shape ID..."
sql_shape = '''
select distinct shape_id
from trips t
join routes r on t.route_id = r.route_id
where r.route_type = 3 AND shape_id <> ""
'''
cur_shape_id = con.cursor()
for shape_row in cur_shape_id.execute(sql_shape):
#Cast as string otherwise non-numeric characters in shape_ID can cause many issues (e.g. some can come across as scientific notation).
shape_id = str(shape_row[0])
#print 'processing shape id {}'.format(shape_id)
#Testing on individual route shapes
#if not shape_id == '34E0040':
#continue
#if not shape_id == '850026':
#continue
# GET THE THE CONSTRUCTED ROUTE GEOMETRY FOR THE current ROUTE SHAPE ID
# --------------------------------------------------------
arcpy.MakeFeatureLayer_management ("route_results", "route_results_lyr")
route_results_query = 'name = \'{}\''.format(shape_id)
arcpy.SelectLayerByAttribute_management ("route_results_lyr", "NEW_SELECTION", route_results_query)
if int(arcpy.GetCount_management("route_results_lyr").getOutput(0)) != 1:
print 'Can''t process route shape {} because it doesn''t have a single geography'.format(shape_id)
route_geometry = None
with arcpy.da.SearchCursor("route_results_lyr", ["SHAPE@"]) as scursor:
row = scursor.next()
route_geometry = row[0]
# All stops every seen on the current route shape
# ------------------------------------------------
#Note that tick marks have to be added to __SHAPE_ID__ to work with shape IDs that contain text.
sql_stops = '''
select stop_id, stop_lat, stop_lon
from stops
where stop_id in (
select distinct stop_id
from stop_times
where trip_id in (
select trip_id from trips where shape_id = '__SHAPE_ID__'
)
)
'''
sql_stops = sql_stops.replace('__SHAPE_ID__', (shape_id))
#print sql_stops
with open(out_lrs_file, 'a') as wf:
point = arcpy.Point()
cur_stops = con.cursor()
for stop_row in cur_stops.execute(sql_stops):
stop_id, stop_lat, stop_lon = stop_row
#print '\n{}, {}, {}'.format(stop_id, stop_lat, stop_lon)
point.X = stop_lon
point.Y = stop_lat
point_geom = arcpy.PointGeometry(point, WGS84).projectAs(ALBERS_PRJ)
result = route_geometry.queryPointAndDistance(point_geom, False)
#print result
result_geom = result[0] # TODO make layer from this for use in itegrate step below
#Adding code to deal with milepost rounding issue
if result[1] <> 0:
milepost = result[1]-.01
else:
milepost = result[1]
wf.write('{},{:.2f},{}\n'.format(shape_id, milepost, stop_id))
# Linear reference the stops
print "Linear referencing the stops with the route results..."
arcpy.MakeRouteEventLayer_lr ("route_results", "Name" , out_lrs_file, "ROUTE_SHAPE POINT MP", "stop_events")
# Create a layer from them
arcpy.CopyFeatures_management("stop_events", "stops_lrs_temp")
arcpy.MakeFeatureLayer_management ("stops_lrs_temp", "stops_lrs_temp_lyr")
arcpy.SelectLayerByAttribute_management(in_layer_or_view="stops_lrs_temp_lyr", selection_type="NEW_SELECTION", where_clause="ROUTE_SHAPE <> 'randomtext'")
arcpy.CopyFeatures_management("stops_lrs_temp_lyr", "stops_lrs")
arcpy.Delete_management("stops_lrs_temp")
# Combine stops together that are within a certain distance of each other
print "Integrating stops that are near each other..."
arcpy.Integrate_management(in_features="stops_lrs #", cluster_tolerance="3 Meters")
# Split network by those integrated points (TODO segregate network that had routes from network that didn't and only split them?)
print "Splitting network at stops..."
arcpy.SplitLineAtPoint_management("network/arnold_split_nw","stops_lrs","network/arnold_split_stops_nw","1 Meters")
end_time = datetime.datetime.now()
total_time = end_time - start_time
print ("\nEnd at {}. Total run time {}".format(end_time, total_time))
| 33.773481
| 154
| 0.648454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,559
| 0.582202
|
9521b11ea24c3b1975d9331d56438810a026e0f3
| 14,298
|
py
|
Python
|
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:19:52.000Z
|
2019-10-10T06:19:52.000Z
|
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/research/baselines/emnist/models.py
|
khramtsova/federated
|
88b3ca65204a9922696ccefd774ece03ebf5cc8e
|
[
"Apache-2.0"
] | 2
|
2019-10-10T06:19:41.000Z
|
2021-01-28T03:06:55.000Z
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build a model for EMNIST classification."""
import functools
import tensorflow as tf
def create_conv_dropout_model(only_digits=True):
"""Recommended model to use for EMNIST experiments.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
data_format=data_format),
tf.keras.layers.Conv2D(
64, kernel_size=(3, 3), activation='relu', data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629.
The number of parameters when `only_digits=True` is (1,663,370), which matches
what is reported in the paper.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 28, 28, 32) 832
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 14, 14, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 3136) 0
_________________________________________________________________
dense (Dense) (None, 512) 1606144
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 1,663,370
Trainable params: 1,663,370
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
input_shape = [28, 28, 1]
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.Reshape(input_shape=(28 * 28,), target_shape=input_shape),
conv2d(filters=32, input_shape=input_shape),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_two_hidden_layer_model(only_digits=True, hidden_units=200):
"""Create a two hidden-layer fully connected neural network.
Args:
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
hidden_units: An integer specifying the number of units in the hidden layer.
Returns:
A `tf.keras.Model`.
"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
hidden_units, activation=tf.nn.relu, input_shape=(28 * 28,)),
tf.keras.layers.Dense(hidden_units, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
# Defining global constants for ResNet model
L2_WEIGHT_DECAY = 2e-4
def _residual_block(input_tensor, kernel_size, filters, base_name):
"""A block of two conv layers with an identity residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
x = tf.keras.layers.add([x, input_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def _conv_residual_block(input_tensor,
kernel_size,
filters,
base_name,
strides=(2, 2)):
"""A block of two conv layers with a convolutional residual connection.
Args:
input_tensor: The input tensor for the residual block.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
base_name: A string used to generate layer names.
strides: A tuple of integers specifying the strides lengths in the first
conv layer in the block.
Returns:
The output tensor of the residual block evaluated at the input tensor.
"""
filters1, filters2 = filters
x = tf.keras.layers.Conv2D(
filters1,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
name='{}_conv_1'.format(base_name))(
input_tensor)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
name='{}_conv_2'.format(base_name))(
x)
shortcut = tf.keras.layers.Conv2D(
filters2, (1, 1),
strides=strides,
use_bias=False,
name='{}_conv_shortcut'.format(base_name))(
input_tensor)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def _resnet_block(input_tensor,
size,
kernel_size,
filters,
stage,
conv_strides=(2, 2)):
"""A block which applies multiple residual blocks to a given input.
The resnet block applies a single conv residual block followed by multiple
identity residual blocks to a given input.
Args:
input_tensor: The input tensor for the resnet block.
size: An integer specifying the number of residual blocks. A conv residual
block is applied once, followed by (size - 1) identity residual blocks.
kernel_size: An integer specifying the kernel size of the convolutional
layers in the residual blocks.
filters: A list of two integers specifying the filters of the conv layers in
the residual blocks. The first integer specifies the number of filters on
the first conv layer within each residual block, the second applies to the
remaining conv layers within each block.
stage: An integer representing the the position of the resnet block within
the resnet. Used for generating layer names.
conv_strides: A tuple of integers specifying the strides in the first conv
layer within each conv residual block.
Returns:
The output tensor of the resnet block evaluated at the input tensor.
"""
x = _conv_residual_block(
input_tensor,
kernel_size,
filters,
base_name='res_{}_block_0'.format(stage),
strides=conv_strides)
for i in range(size - 1):
x = _residual_block(
x,
kernel_size,
filters,
base_name='res_{}_block_{}'.format(stage, i + 1))
return x
def create_resnet(num_blocks=5, only_digits=True):
"""Instantiates a ResNet model for EMNIST classification.
Instantiates the ResNet architecture from https://arxiv.org/abs/1512.03385.
The ResNet contains 3 stages of ResNet blocks with each block containing one
conv residual block followed by (num_blocks - 1) idenity residual blocks. Each
residual block has 2 convolutional layers. With the input convolutional
layer and the final dense layer, this brings the total number of trainable
layers in the network to (6*num_blocks + 2). This number is often used to
identify the ResNet, so for example ResNet56 has num_blocks = 9.
Args:
num_blocks: An integer representing the number of residual blocks within
each ResNet block.
only_digits: A boolean that determines whether to only use the digits in
EMNIST, or the full EMNIST-62 dataset. If True, uses a final layer with 10
outputs, for use with the digit-only EMNIST dataset. If False, uses 62
outputs for the larger dataset.
Returns:
A `tf.keras.Model`.
"""
num_classes = 10 if only_digits else 62
target_shape = (28, 28, 1)
img_input = tf.keras.layers.Input(shape=(28 * 28,))
x = img_input
x = tf.keras.layers.Reshape(
target_shape=target_shape, input_shape=(28 * 28,))(
x)
x = tf.keras.layers.ZeroPadding2D(padding=(1, 1), name='initial_pad')(x)
x = tf.keras.layers.Conv2D(
16, (3, 3),
strides=(1, 1),
padding='valid',
use_bias=False,
name='initial_conv')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[16, 16],
stage=2,
conv_strides=(1, 1))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[32, 32],
stage=3,
conv_strides=(2, 2))
x = _resnet_block(
x,
size=num_blocks,
kernel_size=3,
filters=[64, 64],
stage=4,
conv_strides=(2, 2))
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
num_classes,
activation=tf.nn.softmax,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
kernel_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='fully_connected')(
x)
inputs = img_input
model = tf.keras.models.Model(
inputs, x, name='resnet{}'.format(6 * num_blocks + 2))
return model
| 34.873171
| 80
| 0.665268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,832
| 0.617709
|
9522282432e0e76392916180e81134140fe248cd
| 893
|
py
|
Python
|
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | 1
|
2020-02-19T00:39:10.000Z
|
2020-02-19T00:39:10.000Z
|
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | null | null | null |
iterdeciser/loader.py
|
mpavlase/responses-form-evaluator
|
d0066a44c078ece458ae44577afc207583116638
|
[
"MIT"
] | null | null | null |
import csv
from iterdeciser import models
def data_loader(filename):
with open(filename, newline='') as fd:
reader = csv.reader(fd, delimiter=',', quotechar='"')
# remove all previous entries
models.Answer.objects.all().delete()
models.Question.objects.all().delete()
models.Response.objects.all().delete()
header = next(reader)
questions = []
for question in header:
q = models.Question(title=question)
q.save()
questions.append(q)
for row in reader:
response = models.Response()
response.save()
for index, column in enumerate(row):
answer = models.Answer()
answer.title = column
answer.question = questions[index]
answer.response = response
answer.save()
| 27.060606
| 61
| 0.555431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.041433
|
95258effa24ad7ea4b397bc2159a4af1349e68bd
| 6,146
|
py
|
Python
|
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | 4
|
2021-03-14T23:02:14.000Z
|
2022-02-14T10:10:12.000Z
|
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | null | null | null |
adapter.py
|
jain-harshil/Adapter-BERT
|
fd74ed0eea21b13034f9a834244191846de6b8d5
|
[
"Apache-2.0"
] | 2
|
2020-10-12T09:04:55.000Z
|
2021-11-13T03:54:55.000Z
|
import torch
from torch import nn
from transformers.modeling_bert import BertIntermediate, BertOutput, BertLayer, BertEncoder, BertModel, BertForSequenceClassification
def get_nonlin_func(nonlin):
if nonlin == "tanh":
return torch.tanh
elif nonlin == "relu":
return torch.relu
elif nonlin == "gelu":
return nn.functional.gelu
elif nonlin == "sigmoid":
return torch.sigmoid
else:
raise ValueError("Unsupported nonlinearity!")
### Bottleneck Adapter
class BottleneckAdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.adapter_input_size = config.hidden_size
self.adapter_latent_size = config.adapter_latent_size
self.non_linearity = get_nonlin_func(config.adapter_non_linearity)
self.residual = config.adapter_residual
# down projection
self.down_proj = nn.Linear(self.adapter_input_size, self.adapter_latent_size)
# up projection
self.up_proj = nn.Linear(self.adapter_latent_size, self.adapter_input_size)
self.init_weights()
def init_weights(self):
""" Initialize the weights -> so that initially we the whole Adapter layer is a near-identity function """
self.down_proj.weight.data.normal_(mean=0.0, std=0.02)
self.down_proj.bias.data.zero_()
self.up_proj.weight.data.normal_(mean=0.0, std=0.02)
self.up_proj.bias.data.zero_()
def forward(self, x):
output = self.up_proj(self.non_linearity(self.down_proj(x)))
if self.residual:
output = x + output
return output
### BERT
class AdapterBertIntermediate(BertIntermediate):
def __init__(self, config, layer_index):
super().__init__(config)
self.add_adapter = layer_index in config.layers_to_adapt and config.add_intermediate_adapter
if self.add_adapter:
self.intermediate_adapter = BottleneckAdapterLayer(config)
def forward(self, hidden_states):
# adapter extension
if self.add_adapter:
hidden_states = self.intermediate_adapter(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class AdapterBertOutput(BertOutput):
def __init__(self, config, layer_index):
super().__init__(config)
self.add_adapter = layer_index in config.layers_to_adapt
if self.add_adapter:
self.output_adapter = BottleneckAdapterLayer(config)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
# adapter extension
if self.add_adapter:
hidden_states = self.output_adapter(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AdapterBertLayer(BertLayer):
def __init__(self, config, layer_index):
super().__init__(config)
self.intermediate = AdapterBertIntermediate(config, layer_index)
self.output = AdapterBertOutput(config, layer_index)
class AdapterBertEncoder(BertEncoder):
def __init__(self, config):
super().__init__(config)
self.layer = nn.ModuleList([AdapterBertLayer(config, i) for i in range(config.num_hidden_layers)])
class AdapterBertModel(BertModel):
def __init__(self, config):
super().__init__(config)
self.encoder = AdapterBertEncoder(config)
self.freeze_original_params(config)
def freeze_original_params(self, config):
for param in self.parameters():
param.requires_grad = False
for i in range(config.num_hidden_layers):
if i in config.layers_to_adapt:
for param in self.encoder.layer[i].intermediate.intermediate_adapter.parameters():
param.requires_grad = True
for param in self.encoder.layer[i].output.output_adapter.parameters():
param.requires_grad = True
def unfreeze_original_params(self, config):
for param in self.parameters():
param.requires_grad = True
class AdapterBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
self.bert = AdapterBertModel(config)
self.bert.unfreeze_original_params(config)
### Parallel Adapter
class ParallelAdapterBertModel(BertModel):
def __init__(self, config):
super().__init__(config)
# parallel, adapter-BERT
self.parabert = BertModel(config.parabert_config)
# freezing the pre-trained BERT
self.freeze_original_params()
def freeze_original_params(self):
for param in self.parameters():
param.requires_grad = False
for param in self.parabert.parameters():
param.requires_grad = True
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
outputs_main = super().forward(input_ids, attention_mask, token_type_ids)
outputs_adapter = self.parabert(input_ids, attention_mask, token_type_ids)
outs_cls = []
outs_cls.append(outputs_main[1])
outs_cls.append(outputs_adapter[1])
concat_cls = torch.cat(outs_cls, dim = 1)
outs_tok = []
outs_tok.append(outputs_main[0])
outs_tok.append(outputs_adapter[0])
concat_tok = torch.cat(outs_tok, dim = 2)
outputs = (concat_tok, concat_cls)
return outputs
class ParallelAdapterBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config):
super().__init__(config)
self.bert = ParallelAdapterBertModel(config)
self.classifier = nn.Linear(config.hidden_size + config.parabert_config.hidden_size, self.config.num_labels)
### XLM-R
| 35.94152
| 134
| 0.678653
| 5,560
| 0.904653
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.055971
|
9527282622ce1b8a8057c23be87132dc48225952
| 125
|
py
|
Python
|
test/integration_test/exampleProject/test_module.py
|
thusoy/grunt-pylint
|
1911144b76b144c991e721c794640c06101a8bf1
|
[
"MIT"
] | 9
|
2015-03-04T22:35:49.000Z
|
2018-08-16T00:51:24.000Z
|
test/integration_test/exampleProject/test_module.py
|
thusoy/grunt-pylint
|
1911144b76b144c991e721c794640c06101a8bf1
|
[
"MIT"
] | 10
|
2015-03-05T14:09:53.000Z
|
2019-04-13T21:48:05.000Z
|
test/integration_test/exampleProject/test_module.py
|
thusoy/grunt-pylint
|
1911144b76b144c991e721c794640c06101a8bf1
|
[
"MIT"
] | 5
|
2015-03-04T16:25:05.000Z
|
2018-08-13T10:49:47.000Z
|
""" This module is used for integration testing. """
# pylint: disable=locally-disabled,unused-import
import venv_exclusive
| 25
| 52
| 0.776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.8
|
95277c92e91076992bcacdf611aab098dd6f15f0
| 3,837
|
py
|
Python
|
models/pixelpick/networks/deeplab.py
|
martafdezmAM/lessen_supervision
|
630dfea2e396b9b6f61a3ad6786bb3ee169da3fd
|
[
"MIT"
] | 49
|
2021-04-08T07:45:13.000Z
|
2022-03-08T03:20:30.000Z
|
networks/deeplab.py
|
leiyu1980/PixelPick
|
f0ae7d35f62c1dda70f5bff1689177a513ab6259
|
[
"MIT"
] | 5
|
2021-04-21T02:13:47.000Z
|
2022-03-30T12:06:36.000Z
|
networks/deeplab.py
|
leiyu1980/PixelPick
|
f0ae7d35f62c1dda70f5bff1689177a513ab6259
|
[
"MIT"
] | 15
|
2021-04-14T01:15:06.000Z
|
2022-03-25T05:05:36.000Z
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aspp import ASPP
from .decoders import SegmentHead
from .mobilenet_v2 import MobileNetV2
class DeepLab(nn.Module):
def __init__(self,
args,
backbone='mobilenet',
output_stride=16):
super(DeepLab, self).__init__()
self.backbone = MobileNetV2(output_stride, nn.BatchNorm2d, mc_dropout=args.use_mc_dropout)
self.aspp = ASPP(backbone, output_stride, nn.BatchNorm2d)
# low level features
low_level_inplanes = 24
self.low_level_conv = nn.Sequential(nn.Conv2d(low_level_inplanes, 48, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU())
# segment
self.seg_head = SegmentHead(args)
self.return_features = False
self.return_attention = False
def turn_on_dropout(self):
for m in self.modules():
if isinstance(m, torch.nn.Dropout):
m.train()
def turn_off_dropout(self):
for m in self.modules():
if isinstance(m, torch.nn.Dropout):
m.eval()
def forward(self, inputs):
backbone_feat, low_level_feat = self.backbone(inputs) # 1/16, 1/4;
x = self.aspp(backbone_feat) # 1/16 -> aspp -> 1/16
# low + high features
low_level_feat_ = self.low_level_conv(low_level_feat) # 256->48
x = F.interpolate(x, size=low_level_feat_.size()[2:], mode='bilinear', align_corners=True) # 1/4
second_to_last_features = torch.cat((x, low_level_feat_), dim=1) # 304 = 256 + 48
# segment
dict_outputs = self.seg_head(second_to_last_features)
pred = F.interpolate(dict_outputs['pred'], size=inputs.size()[2:], mode='bilinear', align_corners=True)
dict_outputs['pred'] = pred
emb = F.interpolate(dict_outputs['emb'], size=inputs.size()[2:], mode='bilinear', align_corners=True)
dict_outputs['emb'] = emb
return dict_outputs
def set_return_features(self, return_features): # True or False
self.return_features = return_features
def set_return_attention(self, return_attention): # True or False
self.return_attention = return_attention
def get_1x_lr_params(self):
modules = [self.backbone]
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], (nn.Conv2d, nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
yield p
def get_10x_lr_params(self):
modules = [self.aspp, self.low_level_conv, self.seg_head]
if self.with_mask:
modules.append(self.mask_head)
for i in range(len(modules)):
for m in modules[i].named_modules():
if isinstance(m[1], (nn.Conv2d, nn.BatchNorm2d)):
for p in m[1].parameters():
if p.requires_grad:
yield p
def load_pretrain(self, pretrained):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')['state_dict']
print('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()} # 不加载最后的 head 参数
# for k, v in pretrained_dict.items():
# print('=> loading {} | {}'.format(k, v.size()))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
else:
print('No such file {}'.format(pretrained))
| 37.990099
| 111
| 0.584832
| 3,677
| 0.954321
| 787
| 0.204256
| 0
| 0
| 0
| 0
| 405
| 0.105113
|
95291ef04782317ff7c65177e450a86cba814b66
| 1,224
|
py
|
Python
|
examples/top_view.py
|
ryan-mooore/anvil-parser
|
f2da8e0b7ca84ace49da8c6784363d914b2ca93d
|
[
"MIT"
] | 70
|
2019-08-12T18:46:09.000Z
|
2022-02-22T12:37:29.000Z
|
examples/top_view.py
|
ryan-mooore/anvil-parser
|
f2da8e0b7ca84ace49da8c6784363d914b2ca93d
|
[
"MIT"
] | 24
|
2020-01-20T04:15:59.000Z
|
2022-03-13T20:49:55.000Z
|
examples/top_view.py
|
ryan-mooore/anvil-parser
|
f2da8e0b7ca84ace49da8c6784363d914b2ca93d
|
[
"MIT"
] | 33
|
2019-12-06T19:22:10.000Z
|
2022-03-28T17:08:56.000Z
|
"""
Generates a image of the top view of a chunk
Needs a textures folder with a block folder inside
"""
import sys
if len(sys.argv) == 1:
print('You must give a region file')
exit()
else:
region = sys.argv[1]
chx = int(sys.argv[2])
chz = int(sys.argv[3])
import os
from PIL import Image
import _path
import anvil
chunk = anvil.Chunk.from_region(region, chx, chz)
img = Image.new('RGBA', (16*16,16*16))
grid = [[None for i in range(16)] for j in range(16)]
for y in reversed(range(256)):
for z in range(16):
for x in range(16):
b = chunk.get_block(x, y, z).id
if b == 'air' or grid[z][x] is not None:
continue
grid[z][x] = b
texturesf = os.listdir('textures/block')
textures = {}
for z in range(16):
for x in range(16):
b = grid[z][x]
if b is None:
continue
if b not in textures:
if b+'.png' not in texturesf:
print(f'Skipping {b}')
textures[b] = None
continue
textures[b] = Image.open(f'textures/block/{b}.png')
if textures[b] is None:
continue
img.paste(textures[b], box=(x*16, z*16))
img.show()
| 26.042553
| 63
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.167484
|
95293f8eba3bae03a2ebdf267114cb3e46a7731e
| 2,468
|
py
|
Python
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054
|
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282
|
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/worker.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224
|
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
"""Celery worker application instantiation."""
import os
from celery import Celery
from django.conf import settings
from django_structlog.celery.steps import DjangoStructLogInitStep
def create_application():
"""Create a Celery application using Django settings."""
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'readthedocs.settings.dev',
)
application = Celery(settings.CELERY_APP_NAME)
application.config_from_object('django.conf:settings')
application.autodiscover_tasks(None)
# A step to initialize django-structlog
application.steps['worker'].add(DjangoStructLogInitStep)
return application
def register_renamed_tasks(application, renamed_tasks):
"""
Register renamed tasks into Celery registry.
When a task is renamed (changing the function's name or moving it to a
different module) and there are old instances running in production, they
will trigger tasks using the old name. However, the new instances won't
have those tasks registered.
This function re-register the new tasks under the old name to workaround
this problem. New instances will then executed the code for the new task,
but when called under the old name.
This function *must be called after renamed tasks with new names were
already registered/load by Celery*.
When using this function, think about the order the ASG will be deployed.
Deploying webs first will require some type of re-register and deploying
builds may require a different one.
A good way to test this locally is with a code similar to the following:
In [1]: # Register a task with the old name
In [2]: @app.task(name='readthedocs.projects.tasks.update_docs_task')
...: def mytask(*args, **kwargs):
...: return True
...:
In [3]: # Trigger the task
In [4]: mytask.apply_async([99], queue='build:default')
In [5]: # Check it's executed by the worker with the new code
:param application: Celery Application
:param renamed_tasks: Mapping containing the old name of the task as its
and the new name as its value.
:type renamed_tasks: dict
:type application: celery.Celery
:returns: Celery Application
"""
for oldname, newname in renamed_tasks.items():
application.tasks[oldname] = application.tasks[newname]
return application
app = create_application() # pylint: disable=invalid-name
| 32.473684
| 77
| 0.715559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,796
| 0.727715
|
952983a05bf28fe82e2cd622f5d71bbde9e46c7c
| 876
|
py
|
Python
|
tr_converter.py
|
EFatihAydin/contverter_error_utf8
|
971035644425af69d48b869d0de1668127843f01
|
[
"MIT"
] | null | null | null |
tr_converter.py
|
EFatihAydin/contverter_error_utf8
|
971035644425af69d48b869d0de1668127843f01
|
[
"MIT"
] | null | null | null |
tr_converter.py
|
EFatihAydin/contverter_error_utf8
|
971035644425af69d48b869d0de1668127843f01
|
[
"MIT"
] | null | null | null |
file = open("./data.txt" , encoding = 'utf-8')
data = file.readlines()
liste=[]
for string in data:
string=string.replace('ü','ü')
string=string.replace('ÅŸ','ş')
string=string.replace('ÄŸ','ğ')
string=string.replace('ç','ç')
string=string.replace('ı','ı')
string=string.replace('ö','ö')
string=string.replace('Ü','Ü')
string=string.replace('Ö','Ö')
string=string.replace('İ','İ')
string=string.replace('Å','Ş')
string=string.replace('Ä','Ğ')
string=string.replace('Ç','Ç')
string=string.replace("ý","ı")
string=string.replace("ð","ğ")
string=string.replace("þ","ş")
string=string.replace("Ð","Ğ")
string=string.replace("Ý","İ")
string=string.replace("Þ","Ş")
string=string.lower()
liste.append(string)
with open('./dosya_out.txt' , 'w' , encoding = 'utf-8') as fl:
for i in liste:
fl.write(str(i))
| 27.375
| 63
| 0.615297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.233261
|
952d81863666bd0aa65ead158b3c1300284fe4e6
| 1,485
|
py
|
Python
|
example/simple_example/example_models.py
|
kun-fang/avro-data-model
|
1a657e20e666b534d0196888ae580ad7caddadeb
|
[
"MIT"
] | 9
|
2019-03-28T16:31:33.000Z
|
2022-02-18T03:22:50.000Z
|
example/simple_example/example_models.py
|
kun-fang/avro-data-model
|
1a657e20e666b534d0196888ae580ad7caddadeb
|
[
"MIT"
] | 3
|
2019-06-17T17:09:38.000Z
|
2021-05-14T03:06:00.000Z
|
example/simple_example/example_models.py
|
kun-fang/avro-data-model
|
1a657e20e666b534d0196888ae580ad7caddadeb
|
[
"MIT"
] | 2
|
2019-04-11T18:26:52.000Z
|
2022-02-18T03:22:52.000Z
|
import datetime
import os
from avro_models import avro_schema, AvroModelContainer
EXAMPLE_NAMES = AvroModelContainer(default_namespace="example.avro")
DIRNAME = os.path.dirname(os.path.realpath(__file__))
@avro_schema(
EXAMPLE_NAMES,
schema_file=os.path.join(DIRNAME, "Date.avsc"))
class Date(object):
def __init__(self, value):
if isinstance(value, datetime.date):
value = {
'year': value.year,
'month': value.month,
'day': value.day
}
super().__init__(value)
def date(self):
return datetime.date(self.year, self.month, self.day)
def validate(self, data):
print("validate", data)
return super().validate(data) \
and datetime.date(data['year'], data['month'], data['day'])
def __str__(self):
return str(self.date())
@avro_schema(
EXAMPLE_NAMES,
schema_file=os.path.join(DIRNAME, "Occupation.avsc"))
class Occupation(object):
pass
@avro_schema(
EXAMPLE_NAMES,
schema_file=os.path.join(DIRNAME, "User.avsc"))
class User(object):
def fullname(self):
return "{} {}".format(self.firstName, self.lastName)
def __repr__(self):
return "User({})".format(self.fullname())
@avro_schema(
EXAMPLE_NAMES,
schema_file=os.path.join(DIRNAME, "Employee.avsc"))
class Employee(object):
pass
@avro_schema(EXAMPLE_NAMES, full_name="com.test.Name")
class Name(object):
pass
| 23.203125
| 71
| 0.641077
| 857
| 0.577104
| 0
| 0
| 1,262
| 0.849832
| 0
| 0
| 146
| 0.098316
|
952e3eae671c4397df0072361e08791772e8f4d1
| 5,401
|
py
|
Python
|
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Reports/settings.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
import django
import sys
# Compatibility import
from Bcfg2.Bcfg2Py3k import ConfigParser
# Django settings for bcfg2 reports project.
c = ConfigParser.ConfigParser()
if len(c.read(['/etc/bcfg2.conf', '/etc/bcfg2-web.conf'])) == 0:
raise ImportError("Please check that bcfg2.conf or bcfg2-web.conf exists "
"and is readable by your web server.")
try:
DEBUG = c.getboolean('statistics', 'web_debug')
except:
DEBUG = False
if DEBUG:
print("Warning: Setting web_debug to True causes extraordinary memory "
"leaks. Only use this setting if you know what you're doing.")
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Root', 'root'),
)
MANAGERS = ADMINS
try:
db_engine = c.get('statistics', 'database_engine')
except ConfigParser.NoSectionError:
e = sys.exc_info()[1]
raise ImportError("Failed to determine database engine: %s" % e)
db_name = ''
if c.has_option('statistics', 'database_name'):
db_name = c.get('statistics', 'database_name')
if db_engine == 'sqlite3' and db_name == '':
db_name = "%s/etc/brpt.sqlite" % c.get('server', 'repository')
DATABASES = {
'default': {
'ENGINE': "django.db.backends.%s" % db_engine,
'NAME': db_name
}
}
if db_engine != 'sqlite3':
DATABASES['default']['USER'] = c.get('statistics', 'database_user')
DATABASES['default']['PASSWORD'] = c.get('statistics', 'database_password')
DATABASES['default']['HOST'] = c.get('statistics', 'database_host')
try:
DATABASES['default']['PORT'] = c.get('statistics', 'database_port')
except: # An empty string tells Django to use the default port.
DATABASES['default']['PORT'] = ''
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
DATABASE_ENGINE = db_engine
DATABASE_NAME = DATABASES['default']['NAME']
if DATABASE_ENGINE != 'sqlite3':
DATABASE_USER = DATABASES['default']['USER']
DATABASE_PASSWORD = DATABASES['default']['PASSWORD']
DATABASE_HOST = DATABASES['default']['HOST']
DATABASE_PORT = DATABASES['default']['PORT']
# Local time zone for this installation. All choices can be found here:
# http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
try:
TIME_ZONE = c.get('statistics', 'time_zone')
except:
if django.VERSION[0] == 1 and django.VERSION[1] > 2:
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/site_media'
if c.has_option('statistics', 'web_prefix'):
MEDIA_URL = c.get('statistics', 'web_prefix').rstrip('/') + MEDIA_URL
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'eb5+y%oy-qx*2+62vv=gtnnxg1yig_odu0se5$h0hh#pc*lmo7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'Bcfg2.Server.Reports.urls'
# Authentication Settings
# Use NIS authentication backend defined in backends.py
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
'Bcfg2.Server.Reports.backends.NISBackend')
# The NIS group authorized to login to BCFG2's reportinvg system
AUTHORIZED_GROUP = ''
#create login url area:
try:
import django.contrib.auth
except ImportError:
raise ImportError('Import of Django module failed. Is Django installed?')
django.contrib.auth.LOGIN_URL = '/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
'/usr/share/python-support/python-django/django/contrib/admin/templates/',
'Bcfg2.Server.Reports.reports'
)
if django.VERSION[0] == 1 and django.VERSION[1] < 2:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
else:
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'Bcfg2.Server.Reports.reports'
)
| 33.339506
| 79
| 0.695797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,256
| 0.602851
|
95300a9bbee2d9246ae4298544114b63521e0cfa
| 2,851
|
py
|
Python
|
arachne/lingo.py
|
Darumin/arachne
|
ddae1c9f47e177941a6d6deed84357cbf41ad116
|
[
"MIT"
] | 1
|
2020-08-24T05:19:05.000Z
|
2020-08-24T05:19:05.000Z
|
arachne/lingo.py
|
Darumin/arachne
|
ddae1c9f47e177941a6d6deed84357cbf41ad116
|
[
"MIT"
] | null | null | null |
arachne/lingo.py
|
Darumin/arachne
|
ddae1c9f47e177941a6d6deed84357cbf41ad116
|
[
"MIT"
] | null | null | null |
from enum import Enum
import arachne.nouns as a
nouns = (
a.Container,
a.Item,
a.Door,
a.Room,
a.Key,
a.Door
)
class Verb(Enum):
# basic player actions
LOOK = 'im rotating it in my mind'
TAKE = 'the act of picking things up'
DROP = 'the act of putting things down'
PUT = 'the act of placing things where you want them'
EXAMINE = 'when you want to really see something'
INVENTORY = 'when you really want to see your somethings'
USE = 'when you want to spend your somethings'
# room limitation actions
OPEN = 'open a container or door'
CLOSE = 'close a container or door'
UNLOCK = 'unlock a container or door'
LOCK = 'lock a container or door'
# social and personal actions
THINK = 'can be used as an objective tracker'
ASK = ''
TELL = ''
SAY = ''
GIVE = ''
SHOW = ''
# sequence actions
WAIT = ''
REPEAT = ''
# case of bad verb
NULL = 'invalid input'
# this is an arachne object, in the english grammar sense.
# not to be confused with object types.
class Object(Enum):
FOUND = 'there is something like that nearby'
MULTIPLE = 'there is more than one thing like that nearby'
NONEXISTENT = 'there is nothing like that nearby'
UNSPECIFIED = 'there is nothing'
POSSESSED = 'not in a scary sense, but in a carry sense'
ALL = 'every loose item nearby'
class Prep(Enum):
WITHIN = 'put the toy in the box'
ATOP = 'place the toy on the box'
SETTING = 'turn the dial to ten'
NONE = 'no prep specified'
class Compass(Enum):
NORTH = 'north'
EAST = 'east'
WEST = 'west'
SOUTH = 'south'
NORTHEAST = 'northeast'
NORTHWEST = 'northwest'
SOUTHEAST = 'southeast'
SOUTHWEST = 'southwest'
UP = 'going up'
DOWN = 'coming down'
# encompasses all known in-game vocabulary, unmatched vocab always default to type Object
lexicon = (
('ARTICLES', '^the$|^a$|^an$|^some$'),
(Compass.NORTH, '^north$|^n$'),
(Compass.EAST, '^east$|^e$'),
(Compass.WEST, '^west$|^w$'),
(Compass.SOUTH, '^south$|^s$'),
(Compass.NORTHEAST, '^northeast$|^ne$'),
(Compass.NORTHWEST, '^northwest$|^nw$'),
(Compass.SOUTHEAST, '^southeast$|^se$'),
(Compass.SOUTHWEST, '^southwest$|^sw$'),
(Compass.UP, '^up$|^u$'),
(Compass.DOWN, '^down$|^d$'),
(Verb.LOOK, '^look$'),
(Verb.TAKE, '^take$|^get$'),
(Verb.DROP, '^drop$'),
(Verb.PUT, '^put$|^store$|^place$'),
(Verb.EXAMINE, '^x$|^check$|^examine$'),
(Verb.INVENTORY, '^i$|^inv$|^inventory$'),
(Verb.USE, '^use$|^consume$|^spend$'),
(Verb.OPEN, '^open$'),
(Verb.CLOSE, '^close$'),
(Verb.UNLOCK, '^unlock$'),
(Verb.LOCK, '^lock$'),
(Prep.WITHIN, '^in$|^inside$|^into$'),
(Prep.ATOP, '^on$|^above$'),
(Prep.SETTING, '^at$|^to$')
)
| 24.577586
| 89
| 0.591371
| 1,579
| 0.553841
| 0
| 0
| 0
| 0
| 0
| 0
| 1,504
| 0.527534
|
9531452916d8af98d79a18cfcf7c243ec86f577d
| 488
|
py
|
Python
|
src/hera/host_alias.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | 84
|
2021-10-20T17:20:22.000Z
|
2022-03-31T17:20:06.000Z
|
src/hera/host_alias.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | 84
|
2021-10-31T16:05:51.000Z
|
2022-03-31T14:25:25.000Z
|
src/hera/host_alias.py
|
bchalk101/hera-workflows
|
a3e9262f996ba477a35850c7e4b18ce3d5749687
|
[
"MIT"
] | 18
|
2021-11-01T04:34:39.000Z
|
2022-03-29T03:48:19.000Z
|
from typing import List
from argo_workflows.models import HostAlias as ArgoHostAlias
from pydantic import BaseModel
class HostAlias(BaseModel):
"""mapping between IP and hostnames
Notes
-----
See https://github.com/argoproj/argo-workflows/blob/master/sdks/python/client/docs/HostAlias.md
"""
hostnames: List[str]
ip: str
@property
def argo_host_alias(self) -> ArgoHostAlias:
return ArgoHostAlias(hostnames=self.hostnames, ip=self.ip)
| 23.238095
| 103
| 0.715164
| 368
| 0.754098
| 0
| 0
| 124
| 0.254098
| 0
| 0
| 168
| 0.344262
|
9532e0a3625fbfa97cee2a3c1c1ac08b02e54bbb
| 1,297
|
py
|
Python
|
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | 1
|
2021-09-21T01:42:05.000Z
|
2021-09-21T01:42:05.000Z
|
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
legacy/lua_data/lua_data_converter.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
from slpp import slpp as lua
import json
class LuaConverter:
def parse(self, luafile):
with open(luafile, 'r') as to_convert:
to_convert = str(to_convert.read())
to_convert = to_convert.replace('data:extend(\n{\n {', '').replace('})\n', '') # slpp가 알아먹을수 있는 형태로 가공
to_convert = to_convert.replace(' },\n\n', ' },\n') # 불규칙적으로 두 칸 띄운 경우가 있음.
item_info_list = to_convert.split('\n },\n {')
returndict = {}
for each_item in item_info_list: # 아이템별로 따로 반복
each_item = ' {' + each_item + '\n },'
each_item_dict = lua.decode(each_item) # lua 데이터 변환 라이브러리 slpp 사용
returndict[each_item_dict['name']] = each_item_dict # 딕셔너리 하위에 slpp가 return한 딕셔너리 삽입
return returndict
def write(self, infile, outfile):
towrite = json.dumps(self.parse(infile), sort_keys=False, indent=4)
towrite = infile.replace('.lua', '') + '_info = ' + towrite + '\n'
towrite = towrite.replace('true', 'True').replace('false', 'False')
outfilefulld = '../data/' + outfile
with open(outfilefulld, 'w') as outf:
outf.write(towrite)
print(infile + ' converted to ' + outfilefulld)
'''
사용법
lc=LuaConverter()
lc.write('fluid.lua','fluid_dict.py')
'''
| 36.027778
| 112
| 0.591365
| 1,309
| 0.914745
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.336827
|
9533f3d3d51a5a32d60d0e2337d926980cff5177
| 839
|
py
|
Python
|
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | 2
|
2017-04-18T13:31:37.000Z
|
2017-07-12T21:00:10.000Z
|
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | null | null | null |
odette/scripts/collect_iso_codes.py
|
mdelhoneux/oDETTE
|
1b09bb3a950eb847c409de48c466d6559a010bd8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#==============================================================================
#author :Miryam de Lhoneux
#email :miryam.de_lhoneux@lingfil.uu.se
#date :2015/12/30
#version :1.0
#description :collect iso codes in UD directories
#usage :python scripts/collect_iso_codes.py
#Python version :2.7.6
#==============================================================================
import os
import sys
import pprint
#generate a dictionary of iso_codes from ud treebank directory
codes = {}
ud_dir = sys.argv[1]
for language in os.listdir(ud_dir):
ldir = ud_dir + "/" + language
for f in os.listdir(ldir):
if len(f.split(".")) >1 and f.split(".")[1] == "conllu":
iso_code = f.split("-")[0]
codes[language] = iso_code
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(codes)
| 28.931034
| 79
| 0.54112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 479
| 0.570918
|
20f86d70eb09a90cb1a4b918de25a5f97e226d8c
| 5,696
|
py
|
Python
|
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
airtest/core/ios/mjpeg_cap.py
|
Cache-Cloud/Airtest
|
4f831977a32c2b120dee631631c1154407b34d32
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
import socket
import traceback
from airtest import aircv
from airtest.utils.snippet import reg_cleanup, on_method_ready, ready_method
from airtest.core.ios.constant import ROTATION_MODE, DEFAULT_MJPEG_PORT
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
LOGGING = get_logger(__name__)
class SocketBuffer(SafeSocket):
def __init__(self, sock: socket.socket):
super(SocketBuffer, self).__init__(sock)
def _drain(self):
_data = self.sock.recv(1024)
if _data is None or _data == b"":
raise IOError("socket closed")
self.buf += _data
return len(_data)
def read_until(self, delimeter: bytes) -> bytes:
""" return without delimeter """
while True:
index = self.buf.find(delimeter)
if index != -1:
_return = self.buf[:index]
self.buf = self.buf[index + len(delimeter):]
return _return
self._drain()
def read_bytes(self, length: int) -> bytes:
while length > len(self.buf):
self._drain()
_return, self.buf = self.buf[:length], self.buf[length:]
return _return
def write(self, data: bytes):
return self.sock.sendall(data)
class MJpegcap(object):
def __init__(self, instruct_helper=None, ip='localhost', port=None, ori_function=None):
self.instruct_helper = instruct_helper
self.port = int(port or DEFAULT_MJPEG_PORT)
self.ip = ip
# 如果指定了port,说明已经将wda的9100端口映射到了新端口,无需本地重复映射
self.port_forwarding = True if self.port == DEFAULT_MJPEG_PORT and ip in ('localhost', '127.0.0.1') else False
self.ori_function = ori_function
self.sock = None
self.buf = None
self._is_running = False
@ready_method
def setup_stream_server(self):
if self.port_forwarding:
self.port, _ = self.instruct_helper.setup_proxy(9100)
self.init_sock()
reg_cleanup(self.teardown_stream)
def init_sock(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.ip, self.port))
self.buf = SocketBuffer(self.sock)
self.buf.write(b"GET / HTTP/1.0\r\nHost: localhost\r\n\r\n")
self.buf.read_until(b'\r\n\r\n')
self._is_running = True
LOGGING.info("mjpegsock is ready")
except ConnectionResetError:
# 断开tidevice或是拔线,会导致这个异常,直接退出即可
LOGGING.error("mjpegsock connection error")
raise
@on_method_ready('setup_stream_server')
def get_frame_from_stream(self):
if self._is_running is False:
self.init_sock()
try:
while True:
line = self.buf.read_until(b'\r\n')
if line.startswith(b"Content-Length"):
length = int(line.decode('utf-8').split(": ")[1])
break
while True:
if self.buf.read_until(b'\r\n') == b'':
break
imdata = self.buf.read_bytes(length)
return imdata
except IOError:
# 如果暂停获取mjpegsock的数据一段时间,可能会导致它断开,这里将self.buf关闭并临时返回黑屏图像
# 等待下一次需要获取屏幕时,再进行重连
LOGGING.debug("mjpegsock is closed")
self._is_running = False
self.buf.close()
return self.get_blank_screen()
def get_frame(self):
# 获得单张屏幕截图
return self.get_frame_from_stream()
def snapshot(self, ensure_orientation=True, *args, **kwargs):
"""
Take a screenshot and convert it into a cv2 image object
获取一张屏幕截图,并转化成cv2的图像对象
!!! 注意,该方法拿到的截图可能不是队列中最新的,除非一直在消费队列中的图像,否则可能会是过往图像内容,请谨慎使用
Args:
ensure_orientation: True or False whether to keep the orientation same as display
Returns: numpy.ndarray
"""
screen = self.get_frame_from_stream()
try:
screen = aircv.utils.string_2_img(screen)
except Exception:
# may be black/locked screen or other reason, print exc for debugging
traceback.print_exc()
return None
if ensure_orientation:
if self.ori_function:
display_info = self.ori_function()
orientation = next(key for key, value in ROTATION_MODE.items() if value == display_info["orientation"])
screen = aircv.rotate(screen, -orientation, clockwise=False)
return screen
def get_blank_screen(self):
"""
生成一个黑屏图像,在连接失效时代替屏幕画面返回
Returns:
"""
if self.ori_function:
display_info = self.ori_function()
width, height = display_info['width'], display_info['height']
if display_info["orientation"] in [90, 270]:
width, height = height, width
else:
width, height = 1080, 1920
img = numpy.zeros((width, height, 3)).astype('uint8')
img_string = aircv.utils.img_2_string(img)
return img_string
def teardown_stream(self):
if self.port_forwarding:
self.instruct_helper.remove_proxy(self.port)
if self.buf:
self.buf.close()
self.port = None
if __name__ == "__main__":
import wda
from airtest.core.ios.instruct_cmd import InstructHelper
addr = "http://localhost:8100"
driver = wda.Client(addr)
info = driver.info
instruct_helper = InstructHelper(info['uuid'])
mjpeg_server = MJpegcap(instruct_helper)
print(len(mjpeg_server.get_frame()))
| 33.309942
| 119
| 0.607619
| 5,388
| 0.881256
| 0
| 0
| 1,172
| 0.191691
| 0
| 0
| 1,437
| 0.235034
|
20fa7eb3a7346661e1dcc5a7aa474c9102b7df4b
| 3,342
|
py
|
Python
|
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
happy.py
|
xiaoqcn/LearnLinuxViaPython
|
3c591471bbceefab44161aedb8ff67c2009b8ec0
|
[
"Apache-2.0"
] | null | null | null |
import time
import datetime
import os
import sys
import atexit
import signal
from multiprocessing import Pool
from threading import Thread
class HappyScrum:
def __init__(
self,
pid_path,
pool_size=4,
busy_wait=90,
idle_wait=300,
say_hi_wait=1800,
is_debug=False,
):
self.pid_path = pid_path
self.busy_wait = busy_wait
self.idle_wait = idle_wait
self.say_hi_wait = say_hi_wait
self.exception_wait = 300
self.pool_size = pool_size
self.is_debug = is_debug
if self.is_debug:
self.busy_wait = 5
self.idle_wait = 5
self.say_hi_wait = 8
self.round = 0
self.is_busy = True
self.born_utc = datetime.datetime.utcnow()
self.born = datetime.datetime.now()
self.daemon_t = Thread(target=self.sen, daemon=True)
self.dev = lambda x: x
self.po = lambda x: x
def sen(self):
while True:
time.sleep(self.say_hi_wait)
if self.round >= 10000:
print(
f"-DOG [{os.getpid()}]:", datetime.datetime.now(), file=sys.stderr
)
self.round = 0
def run_forever(self):
if os.path.exists(self.pid_path):
raise ValueError(f"pid_file已存在: {PID_FILE}")
with open(self.pid_path, mode="w", encoding="utf-8") as f:
f.write(str(os.getpid()))
print(
f"==================\nMAIN [{os.getpid()}]: 启动", file=sys.stderr, flush=True
)
self.daemon_t.start()
while True:
self.round += 1
try:
self.run_round()
except Exception as ex:
print(
f"MAIN [{os.getpid()}]: HS_ERR: {str(ex)}",
file=sys.stderr,
flush=True,
)
time.sleep(self.exception_wait)
def run_round(self):
if self.is_busy:
print(
f"MAIN [{os.getpid()}]: ROUND: {self.round} BUSY {datetime.datetime.now()}",
file=sys.stderr,
)
time.sleep(self.busy_wait)
else:
print(
f"MAIN [{os.getpid()}]: ROUND: {self.round} IDLE {datetime.datetime.now()}",
file=sys.stderr,
)
time.sleep(self.idle_wait)
_task_list = self.po()
if len(_task_list) == 0:
self.is_busy = False
return
self.do_work(_task_list)
def do_work(self, task_list):
_feature_list = []
_pool = Pool(self.pool_size)
for i in task_list:
_f = _pool.apply_async(self.dev, args=(i,))
_feature_list.append(_f)
_pool.close()
_pool.join()
for r in _feature_list:
print(f"MAIN[{os.getpid()}]: HS_DOD", r.get())
pass
def register_po(self, po_tpl):
self.po = po_tpl
def register_dev(self, dev_tpl):
self.dev = dev_tpl
@classmethod
def register_dispose(cls, func_dispose):
atexit.register(func_dispose)
signal.signal(signal.SIGTERM, func_dispose)
signal.signal(signal.SIGINT, func_dispose)
signal.signal(signal.SIGQUIT, func_dispose)
| 28.084034
| 92
| 0.529623
| 3,210
| 0.957637
| 0
| 0
| 250
| 0.074582
| 0
| 0
| 339
| 0.101134
|
20fa9357a93d7d86c13beaf0a8a806393d553ed4
| 526
|
py
|
Python
|
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
functional_tests/test_gallery.py
|
atypicalrobot/igor_personal_site
|
8fd788bc43884792b786abeb34e9fec9e79492f1
|
[
"MIT"
] | null | null | null |
from .base import *
class GalleryPageTests(SeleniumTestCase):
def test_gallery_items(self):
browser = self.browser
browser.get('http://127.0.0.1:8000/gallery/')
assert "we don't have any Galleries" not in browser.page_source
def test_gallery_images(self):
browser = self.browser
browser.get('http://127.0.0.1:8000/gallery/')
link = browser.find_element_by_tag_name("center")
link.click()
assert "No images are tagged" not in browser.page_source
| 29.222222
| 71
| 0.659696
| 505
| 0.960076
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.23384
|
20fb6d839493dfeb4698c4e202a1cd7ca0226dba
| 784
|
py
|
Python
|
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
plates.py
|
winksaville/cq-plates
|
fb175522fae991a8d88cdf26afad273a4b8b9098
|
[
"MIT"
] | null | null | null |
import cadquery as cq # type: ignore
nd = 0.4 # Nozzle Diameter
length = 50
width = 20
gap = 5
p1 = (
cq.Workplane("XY", origin=(-(width + gap), 0, 0))
.rect(width, length)
.extrude(nd/2)
)
#show_object(p1)
p2 = (
cq.Workplane("XY", origin=(0, 0, 0))
.rect(width, length)
.extrude(nd)
)
#show_object(p2)
p3 = (
cq.Workplane("XY", origin=(width + gap, 0, 0))
.rect(width, length)
.extrude(nd * 2)
)
#show_object(p3)
# Combine the objects so they all can be slected and exported to stl
#
# Note: you must use .val() otherwise the following generates
# a "AttributeError: 'Workplane' object has no 'wapped'"
# all = cq.Compound.makeCompound([p1, p2, p3])
all = cq.Compound.makeCompound([p1.val(), p2.val(), p3.val()])
show_object(all)
| 21.189189
| 68
| 0.626276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.422194
|
20fe1adaa92216baa26b834b33664cd9c78ae67b
| 2,430
|
py
|
Python
|
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | 1
|
2021-05-06T19:45:54.000Z
|
2021-05-06T19:45:54.000Z
|
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | null | null | null |
tests/tonalmodel_tests/test_chromatic_scale.py
|
dpazel/music_rep
|
2f9de9b98b13df98f1a0a2120b84714725ce527e
|
[
"MIT"
] | null | null | null |
import unittest
import logging
from tonalmodel.chromatic_scale import ChromaticScale
class TestChromaticScale(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_frequencies(self):
assert is_close(ChromaticScale.get_frequency((4, 9)), 440.0), \
"Error A:4 = {0} should be 440.0".format(ChromaticScale.get_frequency((4, 9)))
assert is_close(ChromaticScale.get_frequency((4, 0)), 261.625565301), \
"Error C:4 = {0} should be 261.625565301".format(ChromaticScale.get_frequency((4, 0)))
def test_parse_chromatic_location(self):
for i in range(0, 12):
s = str(4) + ':' + str(i)
location = ChromaticScale.parse_notation(s)
assert location[0] == 4 and location[1] == i
def test_location_to_index(self):
for i in range(1, 4):
for j in range(0, 12):
index = ChromaticScale.location_to_index((i, j))
assert index == 12 * i + j
def test_index_to_location(self):
for i in range(12, 47):
location = ChromaticScale.index_to_location(i)
logging.info(location)
assert location[0] == i // 12 and location[1] == i % 12
def test_scale(self):
scale = ChromaticScale.get_chromatic_scale(ChromaticScale.parse_notation("0:9"),
ChromaticScale.parse_notation("8:0"))
start = ChromaticScale.location_to_index((0, 9))
end = ChromaticScale.location_to_index((8, 0)) + 1
for i in range(start, end):
logging.info('{0}{1} {1}'.format(i, ChromaticScale.index_to_location(i), scale[i - start]))
assert is_close(scale[ChromaticScale.location_to_index((4, 9)) - start], 440.0), \
"Error A:4 = {0} should be 440.0".format(scale[ChromaticScale.location_to_index((4, 9)) - start])
assert is_close(scale[ChromaticScale.location_to_index((4, 0)) - start], 261.625565301), \
"Error C:4 = {0} should be 261.625565301".format(scale[ChromaticScale.location_to_index((4, 0)) - start])
def is_close(value_a, value_b):
return abs(value_a - value_b) < 0.0001
def is_close_in_bounds(value_a, value_b, tolerance):
return abs(value_a - value_b) < tolerance
if __name__ == "__main__":
unittest.main()
| 38.571429
| 117
| 0.60535
| 2,115
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.076132
|
20feae08b04eeba7945d6473eedc0730006c75f9
| 3,093
|
py
|
Python
|
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
beeseyes/pycode/sampling.py
|
sosi-org/scientific-code
|
395bae0f95fbccb936dc01145c797dc22a1c99a0
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import math
import polygon_sampler
nan_rgb = np.zeros((3,)) + np.NaN
# sampler session: texture, W_,H_,W,H
'''
Used by `sample_colors_squarepixels()`
Samples a single point.
Using square pixels.
[0, ... ,W-1] (incl.)
By mapping [0,1) -> [0,W) (int)
(mapping u,v)
'''
def sample1(um,vm, texture, W_,H_,W,H):
if np.isnan(um) or np.isnan(vm):
rgb = nan_rgb
else:
# sample
py = math.floor(um * H_)
px = math.floor(vm * W_)
if px < 0 or py < 0 or px >= W or py >= H:
rgb = nan_rgb
else:
rgb = texture[py,px]
return rgb
'''
Simple sampler.
slow.
"Pixel at Centroid" sampler
One pixel is taken for each region
Uses `sample1`
if regions is None, a different irder is used
'''
def sample_colors_squarepixels(uv, regions, texture):
# print('uv.shape', uv.shape)
if texture.shape[2] == 4:
texture = texture[:,:, 0:3]
#print('uv', uv)
#print('regions', regions)
#exit()
EPS = 0.00000001
# (H,W) mmove to slow part.
(H,W) = texture.shape[0:2]
# print('W,H', W,H)
W_ = (W - EPS)
H_ = (H - EPS)
nf = len(regions)
uvm_for_debug = np.zeros((nf,2),dtype=float)
regions_rgb = np.zeros((nf,3),dtype=float)
for i in range(nf):
# temporary solution: sample at center only
#if np.isnan(uv[regions[i], 0]):
um = np.mean(uv[regions[i], 0])
vm = np.mean(uv[regions[i], 1])
uvm_for_debug[i, :] = [um, vm]
rgb = sample1(um,vm, texture, W_,H_,W,H)
regions_rgb[i] = rgb
return regions_rgb, uvm_for_debug
def sample_colors_squarepixels_pointwise(uv, texture):
'''
Based on `sample_colors_squarepixels` but without regioons.
A simple point-wise sampling.
uv:shape => (6496, 2)
'''
if texture.shape[2] == 4:
texture = texture[:,:, 0:3]
EPS = 0.00000001
(H,W) = texture.shape[0:2]
W_ = (W - EPS)
H_ = (H - EPS)
print('uv.shape', uv.shape)
nf = uv.shape[0]
uvm_for_debug = np.zeros((nf,2),dtype=float)
regions_rgb = np.zeros((nf,3),dtype=float)
for i in range(nf):
um = uv[i, 0]
vm = uv[i, 1]
uvm_for_debug[i, :] = [um, vm]
rgb = sample1(um,vm, texture, W_,H_,W,H)
regions_rgb[i] = rgb
assert np.allclose(uvm_for_debug, uv, equal_nan=True)
return regions_rgb, uvm_for_debug
'''
Choice of sampler method
Choose your hexagon sampler here
regions=None => pointwise, simply smple uv s
regions=not None => forms regions from mhiese points and samples those reggions rom the texture. (For now, it is the median point fo each region/facet)
'''
def sample_colors(uv, regions, texture):
if regions is not None:
# Acceptable speed. Samples aa single point. bware of Alising. No Monte-Carlo, integration or downsampling.
return sample_colors_squarepixels (uv, regions, texture)
else:
return sample_colors_squarepixels_pointwise(uv, texture)
# extremely slow. Unusable
#return polygon_sampler.sample_colors_polygons (uv, regions, texture)
| 25.991597
| 154
| 0.6172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,189
| 0.384416
|
20fedbf1080a9f144951aee297b7d6f393e3751d
| 5,237
|
py
|
Python
|
src/ui/workspace_view.py
|
weijiang1994/iPost
|
008e767c23691bd9ba802eab1e405f98094cce4c
|
[
"MIT"
] | 2
|
2021-10-18T01:24:04.000Z
|
2021-12-14T01:29:22.000Z
|
src/ui/workspace_view.py
|
weijiang1994/iPost
|
008e767c23691bd9ba802eab1e405f98094cce4c
|
[
"MIT"
] | null | null | null |
src/ui/workspace_view.py
|
weijiang1994/iPost
|
008e767c23691bd9ba802eab1e405f98094cce4c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'workspace_view.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(889, 684)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 9, 0, -1)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(6, -1, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.new_pushButton = QtWidgets.QPushButton(Form)
self.new_pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.new_pushButton.setObjectName("new_pushButton")
self.horizontalLayout.addWidget(self.new_pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtWidgets.QFrame(Form)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(6, -1, -1, -1)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.workspace_listWidget = QtWidgets.QListWidget(Form)
self.workspace_listWidget.setMinimumSize(QtCore.QSize(90, 0))
self.workspace_listWidget.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.workspace_listWidget.setObjectName("workspace_listWidget")
item = QtWidgets.QListWidgetItem()
self.workspace_listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.workspace_listWidget.addItem(item)
self.horizontalLayout_2.addWidget(self.workspace_listWidget)
self.workspace_stackedWidget = QtWidgets.QStackedWidget(Form)
self.workspace_stackedWidget.setObjectName("workspace_stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.workspace_stackedWidget.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.workspace_stackedWidget.addWidget(self.page_2)
self.horizontalLayout_2.addWidget(self.workspace_stackedWidget)
self.horizontalLayout_2.setStretch(0, 2)
self.horizontalLayout_2.setStretch(1, 3)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.line_3 = QtWidgets.QFrame(Form)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout_3.addWidget(self.line_3)
self.tabWidget = QtWidgets.QTabWidget(Form)
self.tabWidget.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab_2, "")
self.horizontalLayout_3.addWidget(self.tabWidget)
self.horizontalLayout_3.setStretch(0, 1)
self.horizontalLayout_3.setStretch(2, 3)
self.gridLayout.addLayout(self.horizontalLayout_3, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Workspace"))
self.new_pushButton.setText(_translate("Form", "New"))
__sortingEnabled = self.workspace_listWidget.isSortingEnabled()
self.workspace_listWidget.setSortingEnabled(False)
item = self.workspace_listWidget.item(0)
item.setText(_translate("Form", "Collections"))
item = self.workspace_listWidget.item(1)
item.setText(_translate("Form", "Histroy"))
self.workspace_listWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Form", "Tab 1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Form", "Tab 2"))
| 50.355769
| 114
| 0.715104
| 4,987
| 0.952263
| 0
| 0
| 0
| 0
| 0
| 0
| 520
| 0.099293
|
20ff397b31725a7c336cc66646521d603dc8bb92
| 389
|
py
|
Python
|
task_queuing/tasks/custom.py
|
joejcollins/lieutenant-dean
|
eea536a146fb89b2feca244d5c4cf68e662cf2f2
|
[
"MIT"
] | null | null | null |
task_queuing/tasks/custom.py
|
joejcollins/lieutenant-dean
|
eea536a146fb89b2feca244d5c4cf68e662cf2f2
|
[
"MIT"
] | null | null | null |
task_queuing/tasks/custom.py
|
joejcollins/lieutenant-dean
|
eea536a146fb89b2feca244d5c4cf68e662cf2f2
|
[
"MIT"
] | null | null | null |
"""Custom celery task to capitalize text"""
import task_queuing.celery_app as app
class Capitalize(app.queue_broker.Task):
"""Custom task without the decorator"""
def run(self, text):
capitalized = text.upper()
return capitalized
@app.queue_broker.task(base=Capitalize)
def shit(x):
print('shit')
return "val"
# app.queues.tasks.register(Capitalize)
| 19.45
| 43
| 0.694087
| 172
| 0.442159
| 0
| 0
| 87
| 0.22365
| 0
| 0
| 132
| 0.339332
|
1f00bbb4cb26e6889fa5994c748463440e235c8e
| 654
|
py
|
Python
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 4
|
2020-12-03T19:24:20.000Z
|
2022-03-16T13:45:11.000Z
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 3
|
2020-08-24T08:05:11.000Z
|
2021-11-07T06:14:36.000Z
|
migrations/versions/d805931e1abd_add_topics.py
|
cyberinnovationhub/lunch-roulette
|
0b0b933188c095b6e3778ee7de9d4e21cd7caae5
|
[
"BSD-3-Clause"
] | 3
|
2020-08-27T13:58:53.000Z
|
2022-03-09T14:09:06.000Z
|
"""add topics
Revision ID: d805931e1abd
Revises: 9430b6bc8d1a
Create Date: 2018-09-18 15:11:45.922659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd805931e1abd'
down_revision = '9430b6bc8d1a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('topics', sa.String(length=140), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'topics')
# ### end Alembic commands ###
| 22.551724
| 84
| 0.689602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.588685
|
1f01e1d172c08c2fafb69829e4c50d4807643989
| 726
|
py
|
Python
|
1-50/031NextPermutation.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
1-50/031NextPermutation.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
1-50/031NextPermutation.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-03-24
算法思想:下一个排列
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
for idx in range(len(nums)-1, -1, -1):
if nums[idx] > nums[idx-1]:
break
for i in range(len(nums)-1, idx-1, -1):
if nums[i] > nums[idx-1]:
nums[i], nums[idx-1] = nums[idx-1], nums[i]
break
tmp = nums[idx:]
tmp.sort()
nums[idx:] = tmp
return nums
if __name__ == '__main__':
print Solution().nextPermutation([1, 3, 2])
| 24.2
| 74
| 0.508264
| 558
| 0.747989
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.321716
|
1f04128726942205094994e2b681a53cdfe743aa
| 64
|
py
|
Python
|
h1st/tuner/__init__.py
|
vophihungvn/h1st
|
d421995bb0b8de6a5a76788261efef5b26bc7c12
|
[
"Apache-2.0"
] | null | null | null |
h1st/tuner/__init__.py
|
vophihungvn/h1st
|
d421995bb0b8de6a5a76788261efef5b26bc7c12
|
[
"Apache-2.0"
] | null | null | null |
h1st/tuner/__init__.py
|
vophihungvn/h1st
|
d421995bb0b8de6a5a76788261efef5b26bc7c12
|
[
"Apache-2.0"
] | null | null | null |
from h1st.tuner.hyperparameter_tuner import HyperParameterTuner
| 32
| 63
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1f0432871a66053bea5e2a19da56fe363bea9cb9
| 78,296
|
py
|
Python
|
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
allesfitter/basement.py
|
pierfra-ro/allesfitter
|
a6a885aaeb3253fec0d924ef3b45e8b7c473b181
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 00:17:06 2018
@author:
Dr. Maximilian N. Günther
European Space Agency (ESA)
European Space Research and Technology Centre (ESTEC)
Keplerlaan 1, 2201 AZ Noordwijk, The Netherlands
Email: maximilian.guenther@esa.int
GitHub: mnguenther
Twitter: m_n_guenther
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: modules
import numpy as np
import os
import sys
import fnmatch
import collections
from datetime import datetime
from multiprocessing import cpu_count
import warnings
warnings.formatwarning = lambda msg, *args, **kwargs: f'\n! WARNING:\n {msg}\ntype: {args[0]}, file: {args[1]}, line: {args[2]}\n'
warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
warnings.filterwarnings('ignore', category=np.RankWarning)
from scipy.stats import truncnorm
#::: allesfitter modules
from .exoworlds_rdx.lightcurves.index_transits import index_transits, index_eclipses, get_first_epoch, get_tmid_observed_transits
from .priors.simulate_PDF import simulate_PDF
from .utils.mcmc_move_translator import translate_str_to_move
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
###############################################################################
#::: 'Basement' class, which contains all the data, settings, etc.
###############################################################################
class Basement():
'''
The 'Basement' class contains all the data, settings, etc.
'''
###############################################################################
#::: init
###############################################################################
def __init__(self, datadir, quiet=False):
'''
Inputs:
-------
datadir : str
the working directory for allesfitter
must contain all the data files
output directories and files will also be created inside datadir
fast_fit : bool (optional; default is False)
if False:
use all photometric data for the plot
if True:
only use photometric data in an 8h window around the transit
requires a good initial guess of the epoch and period
Returns:
--------
All the variables needed for allesfitter
'''
print('Filling the Basement')
self.quiet = quiet
self.now = "{:%Y-%m-%d_%H-%M-%S}".format(datetime.now())
self.datadir = datadir
self.outdir = os.path.join(datadir,'results')
if not os.path.exists( self.outdir ): os.makedirs( self.outdir )
print('')
self.logprint('\nallesfitter version')
self.logprint('---------------------')
self.logprint('v1.2.8')
self.load_settings()
self.load_params()
self.load_data()
if self.settings['shift_epoch']:
try:
self.change_epoch()
except:
warnings.warn('\nCould not shift epoch (you can peacefully ignore this warning if no period was given)\n')
if self.settings['fit_ttvs']:
self.prepare_ttv_fit()
#::: external priors (e.g. stellar density)
self.external_priors = {}
self.load_stellar_priors()
#::: if baseline model == sample_GP, set up a GP object for photometric data
# self.setup_GPs()
#::: translate limb darkening codes from params.csv (int) into str for ellc
self.ldcode_to_ldstr = ["none",# : 0,
"lin",# : 1,
"quad",# : 2,
"sing",# : 3,
"claret",# : 4,
"log",# : 5,
"sqrt",# : 6,
"exp",# : 7,
"power-2",#: 8,
"mugrid"]# : -1
#::: check if the input is consistent
for inst in self.settings['inst_phot']:
key='flux'
if (self.settings['baseline_'+key+'_'+inst] in ['sample_GP_Matern32', 'sample_GP_SHO']) &\
(self.settings['error_'+key+'_'+inst] != 'sample'):
raise ValueError('If you want to use '+self.settings['baseline_'+key+'_'+inst]+', you will want to sample the jitters, too!')
###############################################################################
#::: print function that prints into console and logfile at the same time
###############################################################################
def logprint(self, *text):
if not self.quiet:
print(*text)
original = sys.stdout
with open( os.path.join(self.outdir,'logfile_'+self.now+'.log'), 'a' ) as f:
sys.stdout = f
print(*text)
sys.stdout = original
else:
pass
###############################################################################
#::: load settings
###############################################################################
def load_settings(self):
'''
For the full list of options see www.allesfitter.com
'''
def set_bool(text):
if text.lower() in ['true', '1']:
return True
else:
return False
def is_empty_or_none(key):
return (key not in self.settings) or (str(self.settings[key]).lower() == 'none') or (len(self.settings[key])==0)
def unique(array):
uniq, index = np.unique(array, return_index=True)
return uniq[index.argsort()]
rows = np.genfromtxt( os.path.join(self.datadir,'settings.csv'),dtype=None,encoding='utf-8',delimiter=',' )
#::: make backwards compatible
for i, row in enumerate(rows):
# print(row)
name = row[0]
if name[:7]=='planets':
rows[i][0] = 'companions'+name[7:]
warnings.warn('You are using outdated keywords. Automatically renaming '+name+' ---> '+rows[i][0]+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
if name[:6]=='ld_law':
rows[i][0] = 'host_ld_law'+name[6:]
warnings.warn('You are using outdated keywords. Automatically renaming '+name+' ---> '+rows[i][0]+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings = {r[0]:r[1] for r in rows}
self.settings = collections.OrderedDict( [('user-given:','')]+[ (r[0],r[1] ) for r in rows ]+[('automatically set:','')] )
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Main settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for key in ['companions_phot', 'companions_rv', 'inst_phot', 'inst_rv', 'inst_rv2']:
if key not in self.settings:
self.settings[key] = []
elif len(self.settings[key]):
self.settings[key] = str(self.settings[key]).split(' ')
else:
self.settings[key] = []
self.settings['companions_all'] = list(np.unique(self.settings['companions_phot']+self.settings['companions_rv'])) #sorted by b, c, d...
self.settings['inst_all'] = list(unique( self.settings['inst_phot']+self.settings['inst_rv']+self.settings['inst_rv2'] )) #sorted like user input
if len(self.settings['inst_phot'])==0 and len(self.settings['companions_phot'])>0:
raise ValueError('No photometric instrument is selected, but photometric companions are given.')
if len(self.settings['inst_rv'])==0 and len(self.settings['companions_rv'])>0:
raise ValueError('No RV instrument is selected, but RV companions are given.')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: General settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'print_progress' in self.settings:
self.settings['print_progress'] = set_bool(self.settings['print_progress'] )
else:
self.settings['print_progress'] = True
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Epoch settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'shift_epoch' in self.settings:
self.settings['shift_epoch'] = set_bool(self.settings['shift_epoch'] )
else:
self.settings['shift_epoch'] = True
for companion in self.settings['companions_all']:
if 'inst_for_'+companion+'_epoch' not in self.settings:
self.settings['inst_for_'+companion+'_epoch'] = 'all'
if self.settings['inst_for_'+companion+'_epoch'] in ['all','none']:
self.settings['inst_for_'+companion+'_epoch'] = self.settings['inst_all']
else:
if len(self.settings['inst_for_'+companion+'_epoch']):
self.settings['inst_for_'+companion+'_epoch'] = str(self.settings['inst_for_'+companion+'_epoch']).split(' ')
else:
self.settings['inst_for_'+companion+'_epoch'] = []
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Multiprocess settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
self.settings['multiprocess'] = set_bool(self.settings['multiprocess'])
if 'multiprocess_cores' not in self.settings.keys():
self.settings['multiprocess_cores'] = cpu_count()-1
elif self.settings['multiprocess_cores'] == 'all':
self.settings['multiprocess_cores'] = cpu_count()-1
else:
self.settings['multiprocess_cores'] = int(self.settings['multiprocess_cores'])
if self.settings['multiprocess_cores'] == cpu_count():
string = 'You are pushing your luck: you want to run on '+str(self.settings['multiprocess_cores'])+' cores, but your computer has only '+str(cpu_count())+'. I will let you go through with it this time...'
warnings.warn(string)
if self.settings['multiprocess_cores'] > cpu_count():
string = 'Oops, you want to run on '+str(self.settings['multiprocess_cores'])+' cores, but your computer has only '+str(cpu_count())+'. Maybe try running on '+str(cpu_count()-1)+'?'
raise ValueError(string)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Phase variations
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('phase_variations' in self.settings.keys()) and len(self.settings['phase_variations']):
warnings.warn('You are using outdated keywords. Automatically renaming "phase_variations" ---> "phase_curve".'+'. Please fix this before the Duolingo owl comes to get you.')
self.settings['phase_curve'] = self.settings['phase_variations']
if ('phase_curve' in self.settings.keys()) and len(self.settings['phase_curve']):
self.settings['phase_curve'] = set_bool(self.settings['phase_curve'])
if self.settings['phase_curve']==True:
# self.logprint('The user set phase_curve==True. Automatically set fast_fit=False and secondary_eclispe=True, and overwrite other settings.')
self.settings['fast_fit'] = 'False'
self.settings['secondary_eclipse'] = 'True'
else:
self.settings['phase_curve'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Fast fit
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('fast_fit' in self.settings.keys()) and len(self.settings['fast_fit']):
self.settings['fast_fit'] = set_bool(self.settings['fast_fit'])
else:
self.settings['fast_fit'] = False
if ('fast_fit_width' in self.settings.keys()) and len(self.settings['fast_fit_width']):
self.settings['fast_fit_width'] = np.float(self.settings['fast_fit_width'])
else:
self.settings['fast_fit_width'] = 8./24.
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Host stellar density prior
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'use_host_density_prior' in self.settings:
self.settings['use_host_density_prior'] = set_bool(self.settings['use_host_density_prior'] )
else:
self.settings['use_host_density_prior'] = True
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Host stellar density prior
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'use_tidal_eccentricity_prior' in self.settings:
self.settings['use_tidal_eccentricity_prior'] = set_bool(self.settings['use_tidal_eccentricity_prior'] )
else:
self.settings['use_tidal_eccentricity_prior'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: TTVs
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('fit_ttvs' in self.settings.keys()) and len(self.settings['fit_ttvs']):
self.settings['fit_ttvs'] = set_bool(self.settings['fit_ttvs'])
if (self.settings['fit_ttvs']==True) and (self.settings['fast_fit']==False):
raise ValueError('fit_ttvs==True, but fast_fit==False.'+\
'Currently, you can only fit for TTVs if fast_fit==True.'+\
'Please choose different settings.')
else:
self.settings['fit_ttvs'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Secondary eclipse
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if ('secondary_eclipse' in self.settings.keys()) and len(self.settings['secondary_eclipse']):
self.settings['secondary_eclipse'] = set_bool(self.settings['secondary_eclipse'])
else:
self.settings['secondary_eclipse'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: MCMC settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'mcmc_pre_run_loops' not in self.settings:
self.settings['mcmc_pre_run_loops'] = 0
if 'mcmc_pre_run_steps' not in self.settings:
self.settings['mcmc_pre_run_steps'] = 0
if 'mcmc_nwalkers' not in self.settings:
self.settings['mcmc_nwalkers'] = 100
if 'mcmc_total_steps' not in self.settings:
self.settings['mcmc_total_steps'] = 2000
if 'mcmc_burn_steps' not in self.settings:
self.settings['mcmc_burn_steps'] = 1000
if 'mcmc_thin_by' not in self.settings:
self.settings['mcmc_thin_by'] = 1
if 'mcmc_moves' not in self.settings:
self.settings['mcmc_moves'] = 'DEMove'
#::: make sure these are integers
for key in ['mcmc_nwalkers','mcmc_pre_run_loops','mcmc_pre_run_steps',
'mcmc_total_steps','mcmc_burn_steps','mcmc_thin_by']:
self.settings[key] = int(self.settings[key])
#::: luser proof
if self.settings['mcmc_total_steps'] <= self.settings['mcmc_burn_steps']:
raise ValueError('Your setting for mcmc_total_steps must be larger than mcmc_burn_steps (check your settings.csv).')
#::: translate the mcmc_move string into a list of emcee commands
self.settings['mcmc_moves'] = translate_str_to_move(self.settings['mcmc_moves'])
# N_evaluation_samples = int( 1. * self.settings['mcmc_nwalkers'] * (self.settings['mcmc_total_steps']-self.settings['mcmc_burn_steps']) / self.settings['mcmc_thin_by'] )
# self.logprint('\nAnticipating ' + str(N_evaluation_samples) + 'MCMC evaluation samples.\n')
# if N_evaluation_samples>200000:
# answer = input('It seems like you are asking for ' + str(N_evaluation_samples) + 'MCMC evaluation samples (calculated as mcmc_nwalkers * (mcmc_total_steps-mcmc_burn_steps) / mcmc_thin_by).'+\
# 'That is an aweful lot of samples.'+\
# 'What do you want to do?\n'+\
# '1 : continue at any sacrifice\n'+\
# '2 : abort and increase the mcmc_thin_by parameter in settings.csv (do not do this if you continued an old run!)\n')
# if answer==1:
# pass
# else:
# raise ValueError('User aborted the run.')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Nested Sampling settings
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'ns_modus' not in self.settings:
self.settings['ns_modus'] = 'static'
if 'ns_nlive' not in self.settings:
self.settings['ns_nlive'] = 500
if 'ns_bound' not in self.settings:
self.settings['ns_bound'] = 'single'
if 'ns_sample' not in self.settings:
self.settings['ns_sample'] = 'rwalk'
if 'ns_tol' not in self.settings:
self.settings['ns_tol'] = 0.01
self.settings['ns_nlive'] = int(self.settings['ns_nlive'])
self.settings['ns_tol'] = float(self.settings['ns_tol'])
# if self.settings['ns_sample'] == 'auto':
# if self.ndim < 10:
# self.settings['ns_sample'] = 'unif'
# print('Using ns_sample=="unif".')
# elif 10 <= self.ndim <= 20:
# self.settings['ns_sample'] = 'rwalk'
# print('Using ns_sample=="rwalk".')
# else:
# self.settings['ns_sample'] = 'slice'
# print('Using ns_sample=="slice".')
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: host & companion grids, limb darkening laws, shapes, etc.
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for companion in self.settings['companions_all']:
for inst in self.settings['inst_all']:
if 'host_grid_'+inst not in self.settings:
self.settings['host_grid_'+inst] = 'default'
if companion+'_grid_'+inst not in self.settings:
self.settings[companion+'_grid_'+inst] = 'default'
if is_empty_or_none('host_ld_law_'+inst):
self.settings['host_ld_law_'+inst] = None
if is_empty_or_none(companion+'_ld_law_'+inst):
self.settings[companion+'_ld_law_'+inst] = None
if is_empty_or_none('host_ld_space_'+inst):
self.settings['host_ld_space_'+inst] = 'q'
if is_empty_or_none(companion+'_ld_space_'+inst):
self.settings[companion+'_ld_space_'+inst] = 'q'
if 'host_shape_'+inst not in self.settings:
self.settings['host_shape_'+inst] = 'sphere'
if companion+'_shape_'+inst not in self.settings:
self.settings[companion+'_shape_'+inst] = 'sphere'
for companion in self.settings['companions_rv']:
for inst in list(self.settings['inst_rv']) + list(self.settings['inst_rv2']):
if companion+'_flux_weighted_'+inst in self.settings:
self.settings[companion+'_flux_weighted_'+inst] = set_bool(self.settings[companion+'_flux_weighted_'+inst])
else:
self.settings[companion+'_flux_weighted_'+inst] = False
if 'exact_grav' in self.settings:
self.settings['exact_grav'] = set_bool(self.settings['exact_grav'])
else:
self.settings['exact_grav'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Phase curve styles
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if is_empty_or_none('phase_curve_style'):
self.settings['phase_curve_style'] = None
if self.settings['phase_curve_style'] not in [None, 'sine_series', 'sine_physical', 'ellc_physical', 'GP']:
raise ValueError("The setting 'phase_curve_style' must be one of [None, 'sine_series', 'sine_physical', 'ellc_physical', 'GP'], but was '"+str(self.settings['phase_curve_style'])+"'.")
if (self.settings['phase_curve'] is True) and (self.settings['phase_curve_style'] is None):
raise ValueError("You chose 'phase_curve=True' but did not select a 'phase_curve_style'; please select one of ['sine_series', 'sine_physical', 'ellc_physical', 'GP'].")
if (self.settings['phase_curve'] is False) and (self.settings['phase_curve_style'] in ['sine_series', 'sine_physical', 'ellc_physical', 'GP']):
raise ValueError("You chose 'phase_curve=False' but also selected a 'phase_curve_style'; please double check and set 'phase_curve_style=None' (or remove it).")
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Stellar variability
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for key in ['flux', 'rv', 'rv2']:
if ('stellar_var_'+key not in self.settings) or (self.settings['stellar_var_'+key] is None) or (self.settings['stellar_var_'+key].lower()=='none'):
self.settings['stellar_var_'+key] = 'none'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Baselines
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if inst in self.settings['inst_phot']: key='flux'
elif inst in self.settings['inst_rv']: key='rv'
elif inst in self.settings['inst_rv2']: key='rv2'
if 'baseline_'+key+'_'+inst not in self.settings:
self.settings['baseline_'+key+'_'+inst] = 'none'
elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
if 'baseline_'+key+'_'+inst+'_against' not in self.settings:
self.settings['baseline_'+key+'_'+inst+'_against'] = 'time'
if self.settings['baseline_'+key+'_'+inst+'_against'] not in ['time','custom_series']:
raise ValueError("The setting 'baseline_'+key+'_'+inst+'_against' must be one of ['time', custom_series'], but was '" + self.settings['baseline_'+key+'_'+inst+'_against'] + "'.")
# for inst in self.settings['inst_phot']:
# for key in ['flux']:
# if 'baseline_'+key+'_'+inst not in self.settings:
# self.settings['baseline_'+key+'_'+inst] = 'none'
# elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
# warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
# for inst in self.settings['inst_rv']:
# for key in ['rv']:
# if 'baseline_'+key+'_'+inst not in self.settings:
# self.settings['baseline_'+key+'_'+inst] = 'none'
# elif self.settings['baseline_'+key+'_'+inst] == 'sample_GP':
# warnings.warn('You are using outdated keywords. Automatically renaming sample_GP ---> sample_GP_Matern32.'+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
# self.settings['baseline_'+key+'_'+inst] = 'sample_GP_Matern32'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Errors
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if inst in self.settings['inst_phot']: key='flux'
elif inst in self.settings['inst_rv']: key='rv'
elif inst in self.settings['inst_rv2']: key='rv2'
if 'error_'+key+'_'+inst not in self.settings:
self.settings['error_'+key+'_'+inst] = 'sample'
# for inst in self.settings['inst_phot']:
# for key in ['flux']:
# if 'error_'+key+'_'+inst not in self.settings:
# self.settings['error_'+key+'_'+inst] = 'sample'
# for inst in self.settings['inst_rv']:
# for key in ['rv']:
# if 'error_'+key+'_'+inst not in self.settings:
# self.settings['error_'+key+'_'+inst] = 'sample'
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Color plot
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'color_plot' not in self.settings.keys():
self.settings['color_plot'] = False
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Companion colors
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for i, companion in enumerate( self.settings['companions_all'] ):
self.settings[companion+'_color'] = sns.color_palette()[i]
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Plot zoom window
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'zoom_window' not in self.settings:
self.settings['zoom_window'] = 8./24. #8h window around transit/eclipse midpoint by Default
else:
self.settings['zoom_window'] = float(self.settings['zoom_window'])
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Exposure time interpolation
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
#::: if t_exp is given
if 't_exp_'+inst in self.settings.keys() and len(self.settings['t_exp_'+inst]):
t_exp = self.settings['t_exp_'+inst].split(' ')
#if float
if len(t_exp)==1:
self.settings['t_exp_'+inst] = np.float(t_exp[0])
#if array
else:
self.settings['t_exp_'+inst] = np.array([ np.float(t) for t in t_exp ])
#::: if not given / given as an empty field
else:
self.settings['t_exp_'+inst] = None
#::: if t_exp_n_int is given
if 't_exp_'+inst in self.settings \
and 't_exp_n_int_'+inst in self.settings \
and len(self.settings['t_exp_n_int_'+inst]):
self.settings['t_exp_n_int_'+inst] = int(self.settings['t_exp_n_int_'+inst])
if self.settings['t_exp_n_int_'+inst] < 1:
raise ValueError('"t_exp_n_int_'+inst+'" must be >= 1, but is given as '+str(self.settings['t_exp_n_int_'+inst])+' in params.csv')
else:
self.settings['t_exp_n_int_'+inst] = None
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Number of spots
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
for inst in self.settings['inst_all']:
if 'host_N_spots_'+inst in self.settings and len(self.settings['host_N_spots_'+inst]):
self.settings['host_N_spots_'+inst] = int(self.settings['host_N_spots_'+inst])
else:
self.settings['host_N_spots_'+inst] = 0
for companion in self.settings['companions_all']:
if companion+'_N_spots'+inst in self.settings:
self.settings[companion+'_N_spots_'+inst] = int(self.settings[companion+'_N_spots_'+inst])
else:
self.settings[companion+'_N_spots_'+inst] = 0
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: Number of flares
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if 'N_flares' in self.settings and len(self.settings['N_flares'])>0:
self.settings['N_flares'] = int(self.settings['N_flares'])
else:
self.settings['N_flares'] = 0
###############################################################################
#::: load params
###############################################################################
def load_params(self):
'''
For the full list of options see www.allesfitter.com
'''
#==========================================================================
#::: load params.csv
#==========================================================================
buf = np.genfromtxt(os.path.join(self.datadir,'params.csv'), delimiter=',',comments='#',dtype=None,encoding='utf-8',names=True)
#==========================================================================
#::: function to assure backwards compability
#==========================================================================
def backwards_compability(key_new, key_deprecated):
if key_deprecated in np.atleast_1d(buf['name']):
warnings.warn('You are using outdated keywords. Automatically renaming '+key_deprecated+' ---> '+key_new+'. Please fix this before the Duolingo owl comes to get you.') #, category=DeprecationWarning)
ind = np.where(buf['name'] == key_deprecated)[0]
np.atleast_1d(buf['name'])[ind] = key_new
#==========================================================================
#::: luser-proof: backwards compability
# (has to happend first thing and right inside buf['name'])
#==========================================================================
for inst in self.settings['inst_all']:
backwards_compability(key_new='host_ldc_q1_'+inst, key_deprecated='ldc_q1_'+inst)
backwards_compability(key_new='host_ldc_q2_'+inst, key_deprecated='ldc_q2_'+inst)
backwards_compability(key_new='host_ldc_q3_'+inst, key_deprecated='ldc_q3_'+inst)
backwards_compability(key_new='host_ldc_q4_'+inst, key_deprecated='ldc_q4_'+inst)
backwards_compability(key_new='ln_err_flux_'+inst, key_deprecated='log_err_flux_'+inst)
backwards_compability(key_new='ln_jitter_rv_'+inst, key_deprecated='log_jitter_rv_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnsigma_flux_'+inst, key_deprecated='baseline_gp1_flux_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnrho_flux_'+inst, key_deprecated='baseline_gp2_flux_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnsigma_rv_'+inst, key_deprecated='baseline_gp1_rv_'+inst)
backwards_compability(key_new='baseline_gp_matern32_lnrho_rv_'+inst, key_deprecated='baseline_gp2_rv_'+inst)
#==========================================================================
#::: luser-proof: check for allowed keys to catch typos etc.
#==========================================================================
#TODO
#==========================================================================
#::: set up stuff
#==========================================================================
self.allkeys = np.atleast_1d(buf['name']) #len(all rows in params.csv)
self.labels = np.atleast_1d(buf['label']) #len(all rows in params.csv)
self.units = np.atleast_1d(buf['unit']) #len(all rows in params.csv)
if 'truth' in buf.dtype.names:
self.truths = np.atleast_1d(buf['truth']) #len(all rows in params.csv)
else:
self.truths = np.nan * np.ones(len(self.allkeys))
self.params = collections.OrderedDict() #len(all rows in params.csv)
self.params['user-given:'] = '' #just for pretty printing
for i,key in enumerate(self.allkeys):
#::: if it's not a "coupled parameter", then use the given value
if np.atleast_1d(buf['value'])[i] not in list(self.allkeys):
self.params[key] = np.float(np.atleast_1d(buf['value'])[i])
#::: if it's a "coupled parameter", then write the string of the key it is coupled to
else:
self.params[key] = np.atleast_1d(buf['value'])[i]
#==========================================================================
#::: function to automatically set default params if they were not given
#==========================================================================
def validate(key, default, default_min, default_max):
if (key in self.params) and (self.params[key] is not None):
if (self.params[key] < default_min) or (self.params[key] > default_max):
raise ValueError("User input for "+key+" is "+self.params+" but must lie within ["+str(default_min)+","+str(default_max)+"].")
if (key not in self.params):
self.params[key] = default
#==========================================================================
#::: luser-proof: make sure the limb darkening values are uniquely
#::: from either the u- or q-space
#==========================================================================
def check_ld(obj, inst):
if self.settings[obj+'_ld_space_'+inst] == 'q':
matches = fnmatch.filter(self.allkeys, obj+'_ldc_u*_'+inst)
if len(matches) > 0:
raise ValueError("The following user input is inconsistent:\n"+\
"Setting: '"+key+"' = 'q'\n"+\
"Parameters: {}".format(matches))
elif self.settings[obj+'_ld_space_'+inst] == 'u':
matches = fnmatch.filter(self.allkeys, obj+'_ldc_q*_'+inst)
if len(matches) > 0:
raise ValueError("The following user input is inconsistent:\n"+\
"Setting: '"+key+"' = 'u'\n"+\
"Parameters: {}".format(matches))
for inst in self.settings['inst_all']:
for obj in ['host'] + self.settings['companions_all']:
check_ld(obj, inst)
#==========================================================================
#::: validate that initial guess params have reasonable values
#==========================================================================
self.params['automatically set:'] = '' #just for pretty printing
for companion in self.settings['companions_all']:
for inst in self.settings['inst_all']:
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: ellc defaults
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: frequently used parameters
validate(companion+'_rr', None, 0., np.inf)
validate(companion+'_rsuma', None, 0., np.inf)
validate(companion+'_cosi', 0., 0., 1.)
validate(companion+'_epoch', 0., -np.inf, np.inf)
validate(companion+'_period', 0., 0., np.inf)
validate(companion+'_sbratio_'+inst, 0., 0., np.inf)
validate(companion+'_K', 0., 0., np.inf)
validate(companion+'_f_s', 0., -1, 1)
validate(companion+'_f_c', 0., -1, 1)
validate('dil_'+inst, 0., -np.inf, np.inf)
#::: limb darkenings, u-space
validate('host_ldc_u1_'+inst, None, 0, 1)
validate('host_ldc_u2_'+inst, None, 0, 1)
validate('host_ldc_u3_'+inst, None, 0, 1)
validate('host_ldc_u4_'+inst, None, 0, 1)
validate(companion+'_ldc_u1_'+inst, None, 0, 1)
validate(companion+'_ldc_u2_'+inst, None, 0, 1)
validate(companion+'_ldc_u3_'+inst, None, 0, 1)
validate(companion+'_ldc_u4_'+inst, None, 0, 1)
#::: limb darkenings, q-space
validate('host_ldc_q1_'+inst, None, 0, 1)
validate('host_ldc_q2_'+inst, None, 0, 1)
validate('host_ldc_q3_'+inst, None, 0, 1)
validate('host_ldc_q4_'+inst, None, 0, 1)
validate(companion+'_ldc_q1_'+inst, None, 0, 1)
validate(companion+'_ldc_q2_'+inst, None, 0, 1)
validate(companion+'_ldc_q3_'+inst, None, 0, 1)
validate(companion+'_ldc_q4_'+inst, None, 0, 1)
#::: catch exceptions
if self.params[companion+'_period'] is None:
self.settings['do_not_phase_fold'] = True
#::: advanced parameters
validate(companion+'_a', None, 0., np.inf)
validate(companion+'_q', 1., 0., np.inf)
validate('didt_'+inst, None, -np.inf, np.inf)
validate('domdt_'+inst, None, -np.inf, np.inf)
validate('host_gdc_'+inst, None, 0., 1.)
validate('host_rotfac_'+inst, 1., 0., np.inf)
validate('host_hf_'+inst, 1.5, -np.inf, np.inf)
validate('host_bfac_'+inst, None, -np.inf, np.inf)
validate('host_heat_'+inst, None, -np.inf, np.inf)
validate('host_lambda', None, -np.inf, np.inf)
validate('host_vsini', None, -np.inf, np.inf)
validate(companion+'_gdc_'+inst, None, 0., 1.)
validate(companion+'_rotfac_'+inst, 1., 0., np.inf)
validate(companion+'_hf_'+inst, 1.5, -np.inf, np.inf)
validate(companion+'_bfac_'+inst, None, -np.inf, np.inf)
validate(companion+'_heat_'+inst, None, -np.inf, np.inf)
validate(companion+'_lambda', None, -np.inf, np.inf)
validate(companion+'_vsini', None, -np.inf, np.inf)
#::: special parameters (list type)
if 'host_spots_'+inst not in self.params:
self.params['host_spots_'+inst] = None
if companion+'_spots_'+inst not in self.params:
self.params[companion+'_spots_'+inst] = None
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: errors and jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all errors / jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: baselines (and backwards compability)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all baseline params
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_series
# all in ppt
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_series') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_A1_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_B1_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1t_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1t_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1r_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1r_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B2_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B3_'+inst, None, -np.inf, 0.)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_physical
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_physical') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_beaming_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_ellipsoidal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_ellipsoidal_2nd_'+inst, None, 0., np.inf)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: to avoid a bug/feature in ellc, if either property is >0, set the other to 1-15 (not 0):
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if self.params[companion+'_heat_'+inst] is not None:
if (self.params[companion+'_sbratio_'+inst] == 0) and (self.params[companion+'_heat_'+inst] > 0):
self.params[companion+'_sbratio_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
if (self.params[companion+'_sbratio_'+inst] > 0) and (self.params[companion+'_heat_'+inst] == 0):
self.params[companion+'_heat_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: luser proof: avoid conflicting/degenerate phase curve commands
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if (inst in self.settings['inst_phot']) and (self.settings['phase_curve'] == True):
phase_curve_model_1 = (self.params[companion+'_phase_curve_B1_'+inst] is not None)
phase_curve_model_2 = ((self.params[companion+'_phase_curve_B1t_'+inst] is not None) or (self.params[companion+'_phase_curve_B1r_'+inst] is not None))
phase_curve_model_3 = (self.params[companion+'_phase_curve_atmospheric_'+inst] is not None)
phase_curve_model_4 = ((self.params[companion+'_phase_curve_atmospheric_thermal_'+inst] is not None) or (self.params[companion+'_phase_curve_atmospheric_reflected_'+inst] is not None))
phase_curve_model_5 = ((self.params['host_bfac_'+inst] is not None) or (self.params['host_heat_'+inst] is not None) or \
(self.params['host_gdc_'+inst] is not None) or (self.settings['host_shape_'+inst]!='sphere') or \
(self.params[companion+'_bfac_'+inst] is not None) or (self.params[companion+'_heat_'+inst] is not None) or \
(self.params[companion+'_gdc_'+inst] is not None) or (self.settings[companion+'_shape_'+inst]!='sphere'))
if (phase_curve_model_1 + phase_curve_model_2 + phase_curve_model_3 + phase_curve_model_4 + phase_curve_model_5) > 1:
raise ValueError('You can use either\n'\
+'1) the sine_series phase curve model with "*_phase_curve_B1_*",\n'\
+'2) the sine_series phase curve model with "*_phase_curve_B1t_*" and "*_phase_curve_B1r_*", or\n'\
+'3) the sine_physical phase curve model with "*_phase_curve_atmospheric_*",\n'\
+'4) the sine_physical phase curve model with "*_phase_curve_atmospheric_thermal_*" and "*_phase_curve_atmospheric_reflected_*", or\n'\
+'5) the ellc_physical phase curve model with "*_bfac_*", "*_heat_*", "*_gdc_*" etc.\n'\
+'but you shall not pass with a mix&match.')
#==========================================================================
#::: coupled params
#==========================================================================
if 'coupled_with' in buf.dtype.names:
self.coupled_with = buf['coupled_with']
else:
self.coupled_with = [None]*len(self.allkeys)
for i, key in enumerate(self.allkeys):
if isinstance(self.coupled_with[i], str) and (len(self.coupled_with[i])>0):
self.params[key] = self.params[self.coupled_with[i]] #luser proof: automatically set the values of the params coupled to another param
buf['fit'][i] = 0 #luser proof: automatically set fit=0 for the params coupled to another param
#==========================================================================
#::: mark to be fitted params
#==========================================================================
self.ind_fit = (buf['fit']==1) #len(all rows in params.csv)
self.fitkeys = buf['name'][ self.ind_fit ] #len(ndim)
self.fitlabels = self.labels[ self.ind_fit ] #len(ndim)
self.fitunits = self.units[ self.ind_fit ] #len(ndim)
self.fittruths = self.truths[ self.ind_fit ] #len(ndim)
self.theta_0 = buf['value'][ self.ind_fit ] #len(ndim)
if 'init_err' in buf.dtype.names:
self.init_err = buf['init_err'][ self.ind_fit ] #len(ndim)
else:
self.init_err = 1e-8
self.bounds = [ str(item).split(' ') for item in buf['bounds'][ self.ind_fit ] ] #len(ndim)
for i, item in enumerate(self.bounds):
if item[0] in ['uniform', 'normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]) ]
elif item[0] in ['trunc_normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]), np.float(item[3]), np.float(item[4]) ]
else:
raise ValueError('Bounds have to be "uniform", "normal" or "trunc_normal". Input from "params.csv" was "'+self.bounds[i][0]+'".')
self.ndim = len(self.theta_0) #len(ndim)
#==========================================================================
#::: luser proof: check if all initial guesses lie within their bounds
#==========================================================================
#TODO: make this part of the validate() function
for th, b, key in zip(self.theta_0, self.bounds, self.fitkeys):
#:::: test bounds
if (b[0] == 'uniform') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'normal') and ( np.abs(th - b[1]) > 3*b[2] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
elif (b[0] == 'trunc_normal') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'trunc_normal') and ( np.abs(th - b[3]) > 3*b[4] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
###############################################################################
#::: load data
###############################################################################
def load_data(self):
'''
Example:
-------
A lightcurve is stored as
data['TESS']['time'], data['TESS']['flux']
A RV curve is stored as
data['HARPS']['time'], data['HARPS']['flux']
'''
self.fulldata = {}
self.data = {}
#======================================================================
#::: photometry
#======================================================================
for inst in self.settings['inst_phot']:
try:
time, flux, flux_err, custom_series = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, flux, flux_err = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if any(np.isnan(time*flux*flux_err*custom_series)):
raise ValueError('There are NaN values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err==0):
raise ValueError('There are uncertainties with values of 0 in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err<0):
raise ValueError('There are uncertainties with negative values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if not all(np.diff(time)>=0):
raise ValueError('The time array in "'+inst+'.csv" is not sorted. Please make sure the file is not corrupted, then sort it by time and restart.')
elif not all(np.diff(time)>0):
warnings.warn('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time stamps).')
# overwrite = str(input('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time stamps).'+\
# 'What do you want to do?\n'+\
# '1 : continue and hope for the best; no risk, no fun; #yolo\n'+\
# '2 : abort\n'))
# if (overwrite == '1'):
# pass
# else:
# raise ValueError('User aborted operation.')
self.fulldata[inst] = {
'time':time,
'flux':flux,
'err_scales_flux':flux_err/np.nanmean(flux_err),
'custom_series':custom_series
}
if (self.settings['fast_fit']) and (len(self.settings['inst_phot'])>0):
time, flux, flux_err, custom_series = self.reduce_phot_data(time, flux, flux_err, custom_series=custom_series, inst=inst)
self.data[inst] = {
'time':time,
'flux':flux,
'err_scales_flux':flux_err/np.nanmean(flux_err),
'custom_series':custom_series
}
#======================================================================
#::: RV
#======================================================================
for inst in self.settings['inst_rv']:
try:
time, rv, rv_err, custom_series = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, rv, rv_err = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if any(np.isnan(time*rv*rv_err*custom_series)):
raise ValueError('There are NaN values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
#aCkTuaLLLyy rv_err=0 is ok, since we add a jitter term here anyway (instead of scaling)
# if any(rv_err==0):
# raise ValueError('There are uncertainties with values of 0 in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(rv_err<0):
raise ValueError('There are uncertainties with negative values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if not all(np.diff(time)>0):
raise ValueError('Your time array in "'+inst+'.csv" is not sorted. You will want to check that...')
self.data[inst] = {
'time':time,
'rv':rv,
'white_noise_rv':rv_err,
'custom_series':custom_series
}
#======================================================================
#::: RV2 (for detached binaries)
#======================================================================
for inst in self.settings['inst_rv2']:
try:
time, rv, rv_err, custom_series = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, rv, rv_err = np.genfromtxt( os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if not all(np.diff(time)>0):
raise ValueError('Your time array in "'+inst+'.csv" is not sorted. You will want to check that...')
self.data[inst] = {
'time':time,
'rv2':rv,
'white_noise_rv2':rv_err,
'custom_series':custom_series
}
#======================================================================
#::: also save the combined time series
#::: for cases where all instruments are treated together
#::: e.g. for stellar variability GPs
#======================================================================
self.data['inst_phot'] = {'time':[],'flux':[],'flux_err':[],'inst':[]}
for inst in self.settings['inst_phot']:
self.data['inst_phot']['time'] += list(self.data[inst]['time'])
self.data['inst_phot']['flux'] += list(self.data[inst]['flux'])
self.data['inst_phot']['flux_err'] += [inst]*len(self.data[inst]['time']) #errors will be sampled/derived later
self.data['inst_phot']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_phot']['time'])
self.data['inst_phot']['ind_sort'] = ind_sort
self.data['inst_phot']['time'] = np.array(self.data['inst_phot']['time'])[ind_sort]
self.data['inst_phot']['flux'] = np.array(self.data['inst_phot']['flux'])[ind_sort]
self.data['inst_phot']['flux_err'] = np.array(self.data['inst_phot']['flux_err'])[ind_sort]
self.data['inst_phot']['inst'] = np.array(self.data['inst_phot']['inst'])[ind_sort]
self.data['inst_rv'] = {'time':[],'rv':[],'rv_err':[],'inst':[]}
for inst in self.settings['inst_rv']:
self.data['inst_rv']['time'] += list(self.data[inst]['time'])
self.data['inst_rv']['rv'] += list(self.data[inst]['rv'])
self.data['inst_rv']['rv_err'] += list(np.nan*self.data[inst]['rv']) #errors will be sampled/derived later
self.data['inst_rv']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_rv']['time'])
self.data['inst_rv']['ind_sort'] = ind_sort
self.data['inst_rv']['time'] = np.array(self.data['inst_rv']['time'])[ind_sort]
self.data['inst_rv']['rv'] = np.array(self.data['inst_rv']['rv'])[ind_sort]
self.data['inst_rv']['rv_err'] = np.array(self.data['inst_rv']['rv_err'])[ind_sort]
self.data['inst_rv']['inst'] = np.array(self.data['inst_rv']['inst'])[ind_sort]
self.data['inst_rv2'] = {'time':[],'rv2':[],'rv2_err':[],'inst':[]}
for inst in self.settings['inst_rv2']:
self.data['inst_rv2']['time'] += list(self.data[inst]['time'])
self.data['inst_rv2']['rv2'] += list(self.data[inst]['rv2'])
self.data['inst_rv2']['rv2_err'] += list(np.nan*self.data[inst]['rv2']) #errors will be sampled/derived later
self.data['inst_rv2']['inst'] += [inst]*len(self.data[inst]['time'])
ind_sort = np.argsort(self.data['inst_rv2']['time'])
self.data['inst_rv2']['ind_sort'] = ind_sort
self.data['inst_rv2']['time'] = np.array(self.data['inst_rv2']['time'])[ind_sort]
self.data['inst_rv2']['rv2'] = np.array(self.data['inst_rv2']['rv2'])[ind_sort]
self.data['inst_rv2']['rv2_err'] = np.array(self.data['inst_rv2']['rv2_err'])[ind_sort]
self.data['inst_rv2']['inst'] = np.array(self.data['inst_rv2']['inst'])[ind_sort]
###############################################################################
#::: change epoch
###############################################################################
def my_truncnorm_isf(q,a,b,mean,std):
a_scipy = 1.*(a - mean) / std
b_scipy = 1.*(b - mean) / std
return truncnorm.isf(q,a_scipy,b_scipy,loc=mean,scale=std)
def change_epoch(self):
'''
change epoch entry from params.csv to set epoch into the middle of the range
'''
self.logprint('\nShifting epochs into the data center')
self.logprint('------------------------------------')
#::: for all companions
for companion in self.settings['companions_all']:
self.logprint('Companion',companion)
self.logprint('\tinput epoch:',self.params[companion+'_epoch'])
#::: get data time range
alldata = []
for inst in self.settings['inst_for_'+companion+'_epoch']:
alldata += list(self.data[inst]['time'])
start = np.nanmin( alldata )
end = np.nanmax( alldata )
#::: get the given values
user_epoch = 1.*self.params[companion+'_epoch']
period = 1.*self.params[companion+'_period']
# buf = self.bounds[ind_e].copy()
#::: calculate the true first_epoch
if 'fast_fit_width' in self.settings and self.settings['fast_fit_width'] is not None:
width = self.settings['fast_fit_width']
else:
width = 0
first_epoch = get_first_epoch(alldata, self.params[companion+'_epoch'], self.params[companion+'_period'], width=width)
#::: calculate the mid_epoch (in the middle of the data set)
N = int(np.round((end-start)/2./period))
self.settings['mid_epoch'] = first_epoch + N * period
#::: calculate how much the user_epoch has to be shifted to get the mid_epoch
N_shift = int(np.round((self.settings['mid_epoch']-user_epoch)/period))
#::: set the new initial guess (and truth)
self.params[companion+'_epoch'] = 1.*self.settings['mid_epoch']
#::: also shift the truth (implies that the turth epoch is set where the initial guess is)
try:
ind_e = np.where(self.fitkeys==companion+'_epoch')[0][0]
ind_p = np.where(self.fitkeys==companion+'_period')[0][0]
N_truth_shift = int(np.round((self.settings['mid_epoch']-self.fittruths[ind_e])/self.fittruths[ind_p]))
self.fittruths[ind_e] += N_truth_shift * self.fittruths[ind_p]
except:
pass
#::: if a fit param, also update the bounds accordingly
if (N_shift != 0) and (companion+'_epoch' in self.fitkeys):
ind_e = np.where(self.fitkeys==companion+'_epoch')[0][0]
ind_p = np.where(self.fitkeys==companion+'_period')[0][0]
# print('\n')
# print('############################################################################')
# print('user_epoch', user_epoch, self.bounds[ind_e])
# print('user_period', period, self.bounds[ind_p])
# print('----------------------------------------------------------------------------')
#::: set the new initial guess
self.theta_0[ind_e] = 1.*self.settings['mid_epoch']
#::: get the bounds / errors
#::: if the epoch and period priors are both uniform
if (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'uniform'):
if N_shift > 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #lower bound
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][2] #upper bound
elif N_shift < 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][2] #lower bound; period bounds switched if N_shift is negative
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][1] #upper bound; period bounds switched if N_shift is negative
#::: if the epoch and period priors are both normal
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #mean (in case the prior-mean is not the initial-guess-mean)
self.bounds[ind_e][2] = np.sqrt( self.bounds[ind_e][2]**2 + N_shift**2 * self.bounds[ind_p][2]**2 ) #std (in case the prior-mean is not the initial-guess-mean)
#::: if the epoch and period priors are both trunc_normal
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'trunc_normal'):
if N_shift > 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][1] #lower bound
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][2] #upper bound
elif N_shift < 0:
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * self.bounds[ind_p][2] #lower bound; period bounds switched if N_shift is negative
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * self.bounds[ind_p][1] #upper bound; period bounds switched if N_shift is negative
self.bounds[ind_e][3] = self.bounds[ind_e][3] + N_shift * self.bounds[ind_p][3] #mean (in case the prior-mean is not the initial-guess-mean)
self.bounds[ind_e][4] = np.sqrt( self.bounds[ind_e][4]**2 + N_shift**2 * self.bounds[ind_p][4]**2 ) #std (in case the prior-mean is not the initial-guess-mean)
#::: if the epoch prior is uniform and period prior is normal
elif (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * (period + self.bounds[ind_p][2]) #lower bound epoch + Nshift * period + Nshift * std_period
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * (period + self.bounds[ind_p][2]) #upper bound + Nshift * period + Nshift * std_period
#::: if the epoch prior is uniform and period prior is trunc_normal
elif (self.bounds[ind_e][0] == 'uniform') & (self.bounds[ind_p][0] == 'trunc_normal'):
self.bounds[ind_e][1] = self.bounds[ind_e][1] + N_shift * (period + self.bounds[ind_p][4]) #lower bound epoch + Nshift * period + Nshift * std_period
self.bounds[ind_e][2] = self.bounds[ind_e][2] + N_shift * (period + self.bounds[ind_p][4]) #upper bound + Nshift * period + Nshift * std_period
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'uniform'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'normal') & (self.bounds[ind_p][0] == 'trunc_normal'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'uniform'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
elif (self.bounds[ind_e][0] == 'trunc_normal') & (self.bounds[ind_p][0] == 'normal'):
raise ValueError('shift_epoch with different priors for epoch and period is not yet implemented.')
else:
raise ValueError('Parameters "bounds" have to be "uniform", "normal" or "trunc_normal".')
self.logprint('\tshifted epoch:',self.params[companion+'_epoch'])
self.logprint('\tshifted by',N_shift,'periods')
###############################################################################
#::: reduce_phot_data
###############################################################################
def reduce_phot_data(self, time, flux, flux_err, custom_series=None, inst=None):
ind_in = []
for companion in self.settings['companions_phot']:
epoch = self.params[companion+'_epoch']
period = self.params[companion+'_period']
width = self.settings['fast_fit_width']
if self.settings['secondary_eclipse']:
ind_ecl1x, ind_ecl2x, ind_outx = index_eclipses(time,epoch,period,width,width) #TODO: currently this assumes width_occ == width_tra
ind_in += list(ind_ecl1x)
ind_in += list(ind_ecl2x)
self.fulldata[inst][companion+'_ind_ecl1'] = ind_ecl1x
self.fulldata[inst][companion+'_ind_ecl2'] = ind_ecl2x
self.fulldata[inst][companion+'_ind_out'] = ind_outx
else:
ind_inx, ind_outx = index_transits(time,epoch,period,width)
ind_in += list(ind_inx)
self.fulldata[inst][companion+'_ind_in'] = ind_inx
self.fulldata[inst][companion+'_ind_out'] = ind_outx
ind_in = np.sort(np.unique(ind_in))
self.fulldata[inst]['all_ind_in'] = ind_in
self.fulldata[inst]['all_ind_out'] = np.delete( np.arange(len(self.fulldata[inst]['time'])), ind_in )
if len(ind_in)==0:
raise ValueError(inst+'.csv does not contain any in-transit data. Check that your epoch and period guess are correct.')
time = time[ind_in]
flux = flux[ind_in]
flux_err = flux_err[ind_in]
if custom_series is None:
return time, flux, flux_err
else:
custom_series = custom_series[ind_in]
return time, flux, flux_err, custom_series
###############################################################################
#::: prepare TTV fit (if chosen)
###############################################################################
def prepare_ttv_fit(self):
'''
this must be run *after* reduce_phot_data()
'''
for companion in self.settings['companions_phot']:
all_times = []
all_flux = []
for inst in self.settings['inst_phot']:
all_times += list(self.data[inst]['time'])
all_flux += list(self.data[inst]['flux'])
self.data[companion+'_tmid_observed_transits'] = get_tmid_observed_transits(all_times,self.params[companion+'_epoch'],self.params[companion+'_period'],self.settings['fast_fit_width'])
#::: plots
# if self.settings['fit_ttvs']:
# flux_min = np.nanmin(all_flux)
# flux_max = np.nanmax(all_flux)
# N_days = int( np.max(all_times) - np.min(all_times) )
# figsizex = np.min( [1, int(N_days/20.)] )*5
# fig, ax = plt.subplots(figsize=(figsizex, 4)) #figsize * 5 for every 20 days
# for inst in self.settings['inst_phot']:
# ax.plot(self.data[inst]['time'], self.data[inst]['flux'],ls='none',marker='.',label=inst)
# ax.plot( self.data[companion+'_tmid_observed_transits'], np.ones_like(self.data[companion+'_tmid_observed_transits'])*0.995*flux_min, 'k^' )
# for i, tmid in enumerate(self.data[companion+'_tmid_observed_transits']):
# ax.text( tmid, 0.9925*flux_min, str(i+1), ha='center' )
# ax.set(ylim=[0.99*flux_min, flux_max], xlabel='Time (BJD)', ylabel='Realtive Flux')
# if not os.path.exists( os.path.join(self.datadir,'results') ):
# os.makedirs(os.path.join(self.datadir,'results'))
# ax.legend()
# fname = os.path.join(self.datadir,'results','preparation_for_TTV_fit_'+companion+'.pdf')
# if os.path.exists(fname):
# overwrite = str(input('Figure "preparation_for_TTV_fit_'+companion+'.pdf" already exists.\n'+\
# 'What do you want to do?\n'+\
# '1 : overwrite it\n'+\
# '2 : skip it and move on\n'))
# if (overwrite == '1'):
# fig.savefig(fname, bbox_inches='tight' )
# else:
# pass
# plt.close(fig)
width = self.settings['fast_fit_width']
for inst in self.settings['inst_phot']:
time = self.data[inst]['time']
for i, t in enumerate(self.data[companion+'_tmid_observed_transits']):
ind = np.where((time >= (t - width/2.)) & (time <= (t + width/2.)))[0]
self.data[inst][companion+'_ind_time_transit_'+str(i+1)] = ind
self.data[inst][companion+'_time_transit_'+str(i+1)] = time[ind]
###############################################################################
#::: stellar priors
###############################################################################
def load_stellar_priors(self, N_samples=10000):
if os.path.exists(os.path.join(self.datadir,'params_star.csv')) and (self.settings['use_host_density_prior'] is True):
buf = np.genfromtxt( os.path.join(self.datadir,'params_star.csv'), delimiter=',', names=True, dtype=None, encoding='utf-8', comments='#' )
radius = simulate_PDF(buf['R_star'], buf['R_star_lerr'], buf['R_star_uerr'], size=N_samples, plot=False) * 6.957e10 #in cgs
mass = simulate_PDF(buf['M_star'], buf['M_star_lerr'], buf['M_star_uerr'], size=N_samples, plot=False) * 1.9884754153381438e+33 #in cgs
volume = (4./3.)*np.pi*radius**3 #in cgs
density = mass / volume #in cgs
self.params_star = {'R_star_median':buf['R_star'],
'R_star_lerr':buf['R_star_lerr'],
'R_star_uerr':buf['R_star_uerr'],
'M_star_median':buf['M_star'],
'M_star_lerr':buf['M_star_lerr'],
'M_star_uerr':buf['M_star_uerr']
}
self.external_priors['host_density'] = ['normal', np.median(density), np.max( [np.median(density)-np.percentile(density,16), np.percentile(density,84)-np.median(density)] ) ] #in cgs
| 58.04003
| 224
| 0.481008
| 76,578
| 0.978045
| 0
| 0
| 0
| 0
| 0
| 0
| 35,484
| 0.453197
|
1f08e87bb685c5de27a28a6c0f75d6ba70a73d31
| 3,334
|
py
|
Python
|
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
schematron/ssk.py
|
SarahTV/SSK
|
ac7f5b7b1f1c02aefcb706abd80178f86c216cf7
|
[
"CC-BY-4.0"
] | null | null | null |
#coding: utf-8
import re
import os
from lxml import etree as ET
from bs4 import BeautifulSoup
import csv
class schSSK:
def create_directory(self, directory):
"""Create a new directory.
:param directory: path to new directory
:type directory: string
"""
if not os.path.exists(directory):
os.makedirs(directory)
# Manage input files to handle
def get_files(self, d):
filesList = [] # liste fichiers
for fileName in os.listdir(d):
if fileName.endswith(".xml"):
filesList.append(d + "/" + fileName)
return filesList
def loadBS(self, xmlfile):
with open(xmlfile) as file:
testedFile = BeautifulSoup(file, 'xml')
return testedFile
def loadTree(self, xmlfile):
parser = ET.XMLParser(ns_clean=True)
tree = ET.parse(xmlfile, parser)
return tree
def parseSVRL(self, svrl, tree):
diagnostic = []
fired = svrl.find_all('failed-assert')
successfulReports = svrl.find_all('successful-report')
fired.extend(successfulReports)
for fire in fired:
location = self.getLocations(fire.attrs['location'], tree)
if location[1] is not None:
lineNumber = location[1].sourceline
tagName = location[1].tag
tagText = location[1].text
else:
lineNumber = ""
tagName = ""
tagText = ""
role = fire.attrs['role']
message = " ".join(fire.text.split())
rule = {
# "context": fire.findPrevious('fired-rule')['context'],
#"test": fire['test'],
"location": location[0],
"line": lineNumber,
"role" : role,
#"tag" : tagName,
# "attributes" : location[1].attrib,
#"nodeText": tagText,
"message": message
}
diagnostic.append(rule)
return diagnostic
def getLocations(self, assertLocation, tree):
# patters to process the xpathes
pattern1 = re.compile('/\*:')
pattern2 = re.compile('\[namespace-uri\(\)=\'http://www\.tei\-c\.org/ns/1\.0\'\]')
pattern3 = re.compile('/')
location1 = re.sub(pattern1, '/', assertLocation)
location2 = re.sub(pattern2, '', location1)
# Different processings if the context node is root or not
if len(location2) > 7:
locationNorm = re.sub(pattern3, '/{http://www.tei-c.org/ns/1.0}', location2[7:])[1:]
else:
locationNorm = re.sub(pattern3, '/{http://www.tei-c.org/ns/1.0}', location2)[1:]
location = tree.getroot().find(locationNorm)
return location2, location
def writeCSV(self, diagnostic, report, reportFolder):
keys = diagnostic[0].keys()
reportFile = re.search('\/(.+?)\.xml', report).group(1) + "_report.csv"
csvFile = reportFolder + "/" + os.path.basename(os.path.normpath(reportFile))
with open(csvFile, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(diagnostic)
| 35.468085
| 96
| 0.54889
| 3,226
| 0.967606
| 0
| 0
| 0
| 0
| 0
| 0
| 694
| 0.208158
|
1f098e212077f84f0f80919da194e6c3605bd4fb
| 14,798
|
py
|
Python
|
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | 1
|
2018-09-27T09:07:05.000Z
|
2018-09-27T09:07:05.000Z
|
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | null | null | null |
src/01_eigenprogression_transform.py
|
lostanlen/nemisig2018
|
2868da84c938ff6db98936d81a830b838eef1131
|
[
"MIT"
] | null | null | null |
import localmodule
import datetime
import h5py
import math
import music21 as m21
import numpy as np
import os
import scipy
import scipy.linalg
import sys
import time
# Parse arguments
args = sys.argv[1:]
composer_str = args[0]
track_str = args[1]
# Define constants.
J_tm = 8
N = 2**10
n_octaves = 8
midi_octave_offset = 2
quantization = 2.0
xi = 0.25
sigma = 0.1
# Print header.
start_time = int(time.time())
print(str(datetime.datetime.now()) + " Start.")
print("Eigenprogression transform.")
print("Composer: " + composer_str + ".")
print("Piece: " + track_str + ".")
print("")
print("h5py version: {:s}".format(h5py.__version__))
print("music21 version: {:s}".format(m21.__version__))
print("numpy version: {:s}".format(np.__version__))
print("scipy version: {:s}".format(scipy.__version__))
print("")
############################# (1) PARSING ##################################
# Start clock.
parsing_start_time = int(time.time())
# Parse Kern score with music21.
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
kern_name = "_".join([dataset_name, "kern"])
kern_dir = os.path.join(data_dir, kern_name)
composer_dir = os.path.join(kern_dir, composer_str)
track_name = track_str + ".krn"
track_path = os.path.join(composer_dir, track_name)
score = m21.converter.parse(track_path)
pianoroll_parts = []
n_parts = len(score.parts)
n_semitones = 12 * n_octaves
# Loop over parts to extract piano rolls.
for part_id in range(n_parts):
part = score.parts[part_id]
pianoroll_part = np.zeros((n_semitones, N), dtype=np.float32)
# Get the measure offsets
measure_offset = {}
for el in part.recurse(classFilter=('Measure')):
measure_offset[el.measureNumber] = el.offset
# Loop over notes
for note in part.recurse(classFilter=('Note')):
note_start = int(math.ceil(
(measure_offset[note.measureNumber] +\
note.offset) *\
quantization))
note_end = int(math.ceil((
measure_offset[note.measureNumber] +\
note.offset +\
note.duration.quarterLength) *\
quantization))
pianoroll_part[
note.midi - midi_octave_offset * 12,
note_start:note_end] = 1
pianoroll_parts.append(pianoroll_part)
# Stack parts into piano roll.
mtrack_pianoroll = np.stack(pianoroll_parts, 2)
pianoroll = mtrack_pianoroll.max(axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Parsing took " + elapsed_str + " seconds.")
####################### (2) WAVELET TRANSFORM ##############################
# Start clock.
wavelet_start_time = int(time.time())
# Setup wavelet filter bank over time.
wavelet_filterbank_ft = np.zeros((1, N, J_tm), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
wavelet_filterbank_ft[0, :, -1 - j] = psi_ft
# Append scaling function phi (average).
wavelet_filterbank_ft[0, 0, 0] = 1
# Convolve pianoroll with filterbank.
pianoroll_ft = scipy.fftpack.fft(pianoroll, axis=1)
pianoroll_ft = np.expand_dims(pianoroll_ft, axis=2)
wavelet_transform_ft = pianoroll_ft * wavelet_filterbank_ft
wavelet_transform = scipy.fftpack.ifft(wavelet_transform_ft, axis=1)
# Print elapsed time.
elapsed_time = time.time() - int(parsing_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Wavelet transform took " + elapsed_str + " seconds.")
####################### (3) EIGENTRIAD TRANSFORM ###########################
# Start clock.
eigentriad_start_time = int(time.time())
# Reshape MIDI axis to chromagram
chromagram = np.reshape(wavelet_transform,
(12, -1, wavelet_transform.shape[1], wavelet_transform.shape[2]), 'F')
# Construct eigentriads
cosine_basis = np.array([[np.cos(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
sine_basis = np.array([[np.sin(2*np.pi*omega*t/3)
for omega in range(3)] for t in range(3)]).T
fourier_basis = cosine_basis + 1.0j * sine_basis
major_template = [0, 4, 7]
minor_template = [0, 3, 7]
major_eigentriads = np.zeros((12, 3), dtype=np.complex64)
minor_eigentriads = np.zeros((12, 3), dtype=np.complex64)
for omega in range(3):
for t, p in enumerate(major_template):
major_eigentriads[p, omega] = fourier_basis[t, omega]
for t, p in enumerate(minor_template):
minor_eigentriads[p, omega] = fourier_basis[t, omega]
eigentriads = np.stack(
(major_eigentriads, minor_eigentriads), axis=1)
eigentriads = eigentriads.astype(np.complex64)
# Convolve chromagram with eigentriads
chromagram_ft = scipy.fftpack.fft(chromagram, axis=0)
chromagram_ft = chromagram_ft[:, np.newaxis, :, :, :, np.newaxis]
eigentriads_ft = scipy.fftpack.fft(eigentriads, axis=0)
eigentriads_ft = eigentriads_ft[:, :, np.newaxis,
np.newaxis, np.newaxis, :]
eigentriad_transform_ft = chromagram_ft * eigentriads_ft
eigentriad_transform = scipy.fftpack.fft(
eigentriad_transform_ft, axis=0)
# Apply modulus nonlinearity
eigentriad_transform_modulus = np.abs(eigentriad_transform)
# Print elapsed time.
elapsed_time = time.time() - int(eigentriad_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigentriad transform took " + elapsed_str + " seconds.")
####################### (4) SCATTERING TRANSFORM ###########################
# Start clock.
scattering_start_time = int(time.time())
# Setup scattering filter bank over time.
scattering_filterbank_ft = np.zeros((1, N, 2*J_tm-1), dtype=np.float32)
for j in range(J_tm-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * N
den = 2 * sigma_j * sigma_j * N * N
psi_ft = localmodule.morlet(center, den, N, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
scattering_filterbank_ft[0, :, -1 - 2*j] = psi_ft
scattering_filterbank_ft[0, :, -1 - (2*j+1)] = conj_psi_ft
scattering_filterbank_ft[0, 0, 0] = 1
# Convolve eigentriad transform with filterbank again.
# This is akin to a scattering transform.
# We remove the finest scale (last two coefficients).
eigentriad_transform_modulus_ft =\
scipy.fftpack.fft(eigentriad_transform_modulus, axis=3)
eigentriad_transform_modulus_ft =\
eigentriad_transform_modulus_ft[:, :, :, :, :, :, np.newaxis]
scattering_filterbank_ft =\
wavelet_filterbank_ft[:, np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis, :-2]
scattering_transform_ft =\
eigentriad_transform_modulus_ft * scattering_filterbank_ft
scattering_transform = scipy.fftpack.ifft(scattering_transform_ft, axis=3)
# Print elapsed time.
elapsed_time = time.time() - int(scattering_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Scattering transform took " + elapsed_str + " seconds.")
###################### (5) EIGENPROGRESSION TRANSFORM ######################
# Start clock.
eigenprogression_start_time = int(time.time())
# Reshape chroma and quality into a chord axis
sc_shape = scattering_transform.shape
tonnetz_shape = (
sc_shape[0]*sc_shape[1], sc_shape[2],
sc_shape[3], sc_shape[4], sc_shape[5],
sc_shape[6])
tonnetz = np.reshape(scattering_transform,
tonnetz_shape, 'F')
# Build adjacency matrix for Tonnetz graph
# (1/3) Major to minor transitions.
major_edges = np.zeros((12,), dtype=np.float32)
# Parallel minor (C major to C minor)
major_edges[0] = 1
# Relative minor (C major to A minor)
major_edges[9] = 1
# Leading tone minor (C major to E minor)
major_edges[4] = 1
# (2/3) Minor to major transitions
minor_edges = np.zeros((12,))
# Parallel major (C minor to C major)
minor_edges[0] = 1
# Relative major (C minor to Eb major)
minor_edges[3] = 1
# Leading tone major (C major to Ab minor)
minor_edges[8] = 1
# (2/3) Build full adjacency matrix by 4 blocks.
major_adjacency = scipy.linalg.toeplitz(major_edges, minor_edges)
minor_adjacency = scipy.linalg.toeplitz(minor_edges, major_edges)
tonnetz_adjacency = np.zeros((24, 24), dtype=np.float32)
tonnetz_adjacency[:12, 12:] = minor_adjacency
tonnetz_adjacency[12:, :12] = major_adjacency
# Define Laplacian on the Tonnetz graph.
tonnetz_laplacian = 3 * np.eye(24, dtype=np.float32) - tonnetz_adjacency
# Compute eigenprogressions, i.e. eigenvectors of the Tonnetz Laplacian
eigvecs, eigvals = np.linalg.eig(tonnetz_laplacian)
# Diagonalize Laplacian.
eigvals, eigvecs = np.linalg.eig(tonnetz_laplacian)
sorting_indices = np.argsort(eigvals)
eigvals = eigvals[sorting_indices]
eigvecs = eigvecs[:, sorting_indices]
# Key invariance
phi = eigvecs[:, 0]
# Tonic invariance with quality covariance
psi_quality = eigvecs[:, 23]
# C -> C# -> D ... simultaneously with Cm -> C#m -> ...
# Major third periodicity.
psi_chromatic = eigvecs[:, 1] + 1j * eigvecs[:, 2]
# Major keys: pentatonic pattern (C D F G A) moving up a minor third.
# Major keys: minor seventh pattern (B D E A) moving down a minor third.
psi_pentatonic_up = eigvecs[:, 3] + 1j * eigvecs[:, 4]
# Cm -> B -> Bm -> Bb -> Am -> ...
# Minor third periodicity
psi_Cm_B_Bm_Bb = eigvecs[:, 5] + 1j * eigvecs[:, 6]
# C -> Am -> A -> Cm -> C ...
# Relative (R) followed by parallel (P).
# Major third periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_RP = eigvecs[:, 7] + j * eigvecs[:, 8] + jbar * eigvecs[:, 9]
# C -> Bm -> Bb -> Am -> Ab -> ...
psi_C_Bm_Bb_Am = eigvecs[:, 10] + 1j * eigvecs[:, 11]
# Upwards minor third. Qualities in phase opposition.
psi_minorthird_quality = eigvecs[:, 12] + 1j * eigvecs[:, 13]
# Ab is simultaneous with Am.
# Abstract notion of "third" degree with quality invariance?
# Tritone periodicity
j = np.complex(np.cos(2*np.pi/3), np.sin(2*np.pi/3))
jbar = np.complex(np.cos(-2*np.pi/3), np.sin(-2*np.pi/3))
psi_third_tritone = eigvecs[:, 14] + j * eigvecs[:, 15] + jbar * eigvecs[:, 16]
# C -> C#m -> D -> D#m -> ...
# Minor third periodicity.
psi_C_Dbm_D_Ebm = eigvecs[:, 17] + 1j * eigvecs[:, 18]
# Major keys: pentatonic pattern (C D F G A) moving down a minor third.
# Major keys: minor seventh pattern (B D E A) moving up a minor third.
psi_pentatonic_down = eigvecs[:, 19] + 1j * eigvecs[:, 20]
# C is simultaneous with Dm.
# Abstract notion of minor key?
# Major third periodicity.
psi_minorkey = eigvecs[:, 21] + 1j * eigvecs[:, 22]
# Concatenate eigenprogressions.
eigenprogressions = np.stack((
phi,
psi_quality,
psi_chromatic,
psi_pentatonic_up,
psi_Cm_B_Bm_Bb,
psi_RP,
psi_C_Bm_Bb_Am,
psi_C_Bm_Bb_Am,
psi_minorthird_quality,
psi_third_tritone,
psi_C_Dbm_D_Ebm,
psi_pentatonic_down,
psi_minorkey), axis=-1)
eigenprogressions = np.reshape(eigenprogressions, (12, 2, -1), 'F')
eigenprogressions = eigenprogressions.astype(np.complex64)
# Apply eigenprogression transform.
scattering_transform_ft = scipy.fftpack.fft(scattering_transform, axis=0)
scattering_transform_ft = scattering_transform_ft[:, :, :, :, :, :, :, np.newaxis]
eigenprogressions_ft = scipy.fftpack.fft(eigenprogressions, axis=0)
eigenprogressions_ft = eigenprogressions_ft[
:, :, np.newaxis, np.newaxis, np.newaxis, np.newaxis, np.newaxis]
eigenprogression_transform_ft = scattering_transform_ft * eigenprogressions_ft
eigenprogression_transform = scipy.fftpack.ifft(eigenprogression_transform_ft, axis=0)
# Print elapsed time.
elapsed_time = time.time() - int(eigenprogression_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Eigenprogression transform took " + elapsed_str + " seconds.")
###################### (5) SPIRAL TRANSFORM ######################
# Start clock.
spiral_start_time = int(time.time())
# Setup wavelet filter bank across octaves.
# This is comparable to a spiral scattering transform.
J_oct = 3
octave_filterbank_ft = np.zeros((n_octaves, 2*J_oct-1), dtype=np.float32)
for j in range(J_oct-1):
xi_j = xi * 2**(-j)
sigma_j = sigma * 2**(-j)
center = xi_j * n_octaves
den = 2 * sigma_j * sigma_j * n_octaves * n_octaves
psi_ft = localmodule.morlet(center, den, n_octaves, n_periods=4)
conj_psi_ft = np.roll(psi_ft, -1)[::-1]
octave_filterbank_ft[:, -1 - 2*j] = psi_ft
octave_filterbank_ft[:, -1 - (2*j+1)] = conj_psi_ft
octave_filterbank_ft[0, 0] = 1
octave_filterbank_ft = octave_filterbank_ft[
np.newaxis, np.newaxis, :,
np.newaxis, np.newaxis,
np.newaxis, np.newaxis, np.newaxis]
# Apply octave transform.
eigenprogression_transform_ft = scipy.fftpack.fft(
eigenprogression_transform, axis=2)
eigenprogression_transform_ft = eigenprogression_transform_ft[
:, :, :, :, :, :, :, :, np.newaxis]
spiral_transform_ft =\
eigenprogression_transform_ft * octave_filterbank_ft
spiral_transform = scipy.fftpack.fft(
spiral_transform_ft, axis=2)
# Print elapsed time.
elapsed_time = time.time() - int(spiral_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Spiral transform took " + elapsed_str + " seconds.")
######################## (6) MODULUS AND AVERAGING #########################
modulus_start_time = time.time()
# Apply second-order modulus nonlinearity.
U2 = np.abs(spiral_transform)
# Average over chroma, quality, octave, and time.
S2 = np.sum(U2, axis=(0, 1, 2, 3))
# Print elapsed time.
elapsed_time = time.time() - int(modulus_start_time)
elapsed_str = "{:>05.2f}".format(elapsed_time)
print("Averaging took " + elapsed_str + " seconds.")
############################### (7) STORAGE #################################
# Store to HDF5 container
hdf5_name = "_".join([dataset_name, "eigenprogression-transforms"])
hdf5_dir = os.path.join(data_dir, hdf5_name)
os.makedirs(hdf5_dir, exist_ok=True)
composer_dir = os.path.join(hdf5_dir, composer_str)
os.makedirs(composer_dir, exist_ok=True)
out_path = os.path.join(composer_dir,
"_".join([
dataset_name,
"eigenprogression-transform",
composer_str,
track_str + ".hdf5"]))
out_file = h5py.File(out_path)
hdf5_dataset_size = S2.shape
hdf5_dataset_key = "_".join([
"eigenprogression-transform",
composer_str,
track_str])
hdf5_dataset = out_file.create_dataset(hdf5_dataset_key, hdf5_dataset_size)
hdf5_dataset[:] = S2
out_file.close()
# Print elapsed time.
print(str(datetime.datetime.now()) + " Finish.")
elapsed_time = time.time() - int(start_time)
elapsed_hours = int(elapsed_time / (60 * 60))
elapsed_minutes = int((elapsed_time % (60 * 60)) / 60)
elapsed_seconds = elapsed_time % 60.
elapsed_str = "{:>02}:{:>02}:{:>05.2f}".format(elapsed_hours,
elapsed_minutes,
elapsed_seconds)
print("Total elapsed time: " + elapsed_str + ".")
| 34.334107
| 86
| 0.68462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,970
| 0.268279
|
1f09cb31eceadc76ff93699e82ee70df317cae82
| 983
|
py
|
Python
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 5
|
2020-06-04T23:01:30.000Z
|
2020-09-09T08:58:51.000Z
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 9
|
2022-02-10T00:58:28.000Z
|
2022-03-23T11:12:47.000Z
|
src/spaceone/monitoring/manager/plugin_manager.py
|
jean1042/monitoring
|
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
|
[
"Apache-2.0"
] | 7
|
2020-06-10T01:56:35.000Z
|
2021-12-02T05:36:21.000Z
|
import logging
from spaceone.core.manager import BaseManager
from spaceone.core.connector.space_connector import SpaceConnector
_LOGGER = logging.getLogger(__name__)
class PluginManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.plugin_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='plugin')
def get_plugin_endpoint(self, plugin_info, domain_id):
plugin_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='plugin')
response = plugin_connector.dispatch(
'Plugin.get_plugin_endpoint',
{
'plugin_id': plugin_info['plugin_id'],
'version': plugin_info.get('version'),
'upgrade_mode': plugin_info.get('upgrade_mode', 'AUTO'),
'domain_id': domain_id
}
)
return response['endpoint'], response.get('updated_version')
| 35.107143
| 110
| 0.666328
| 812
| 0.826043
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.191251
|
1f0a14df894f78200ec160dd56d1194d86c6d8d9
| 1,107
|
py
|
Python
|
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Segmentation/bins/hist_label_portarit.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# @file name : hist_label_portrait.py
# @author : JLChen
# @date : 2020-03-11
# @brief : 统计各类别数量
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import pylab as pl
import cv2
def cal_cls_nums(path, t=0.78):
label_img = cv2.imread(path)
label_img = cv2.cvtColor(label_img, cv2.COLOR_BGR2GRAY)
label_img[label_img > t] = 1
label_img[label_img <= t] = 0
label_img = label_img.flatten()
count = np.bincount(label_img, minlength=2) # np.bincount
return count
if __name__ == '__main__':
data_dir = r"G:\deep_learning_data\EG_dataset\dataset\training"
counter = np.zeros((2,))
# 遍历每张标签图,统计标签
file_names = [n for n in os.listdir(data_dir) if n.endswith('_matte.png')]
for i, name in enumerate(file_names):
path_img = os.path.join(data_dir, name)
counter += cal_cls_nums(path_img) # 统计的数据记录于 counter中
# https://pytorch.org/docs/stable/generated/torch.nn.BCEWithLogitsLoss.html?highlight=pos_weight
# pos_weight设置为 负样本数量/正样本数量
print(counter, counter[0] / counter[1])
| 26.357143
| 100
| 0.67299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 468
| 0.393608
|
1f0a726404191dd0a8ef9e2cd1c7c33d9b482f77
| 7,924
|
py
|
Python
|
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | 1
|
2021-12-17T03:25:34.000Z
|
2021-12-17T03:25:34.000Z
|
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | null | null | null |
yoapi/contexts.py
|
YoApp/yo-api
|
a162e51804ab91724cc7ad3e7608410329da6789
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Client context module."""
import pytz
import time
from flask import current_app
from datetime import datetime, timedelta
from mongoengine import DoesNotExist
from .ab_test import get_enrolled_experiments
from .core import cache
from .errors import APIError
from .helpers import assert_valid_time
from .models import GifPhrase
from .constants.context import DEFAULT_CONTEXTS, ALL_CONTEXT_IDS, LOCATION_CTX, DEFAULT_CTX, AUDIO_CTX, CAMERA_CTX
import semver
from yoapi.models import Yo
from yoapi.notification_endpoints import get_useragent_profile
def get_contexts(user, request=None):
"""Gets the contexts associated with the supplied user"""
default_context = current_app.config.get('DEFAULT_CONTEXT')
if user is None:
return [LOCATION_CTX, DEFAULT_CTX, CAMERA_CTX, AUDIO_CTX], default_context
week_ago = datetime.now() - timedelta(days=27)
week_ago_unix = int(time.mktime(week_ago.timetuple()) * 1e6)
if Yo.objects.filter(sender=user, created__gt=week_ago_unix, context_id='gif').count() > 0:
return ALL_CONTEXT_IDS, default_context
if Yo.objects.filter(sender=user, created__gt=week_ago_unix, context_id='emoji').count() > 0:
return ALL_CONTEXT_IDS, default_context
try:
if request and semver.match(get_useragent_profile(request).get('app_version'), '>=2.5.0'):
return [LOCATION_CTX, DEFAULT_CTX, CAMERA_CTX, AUDIO_CTX], default_context
except:
pass
experiments = get_enrolled_experiments(user, dimension='context')
if experiments:
experiment = experiments[0]
contexts = DEFAULT_CONTEXTS[:]
assignments = experiment.get_params()
exp_context = assignments.get('context')
exp_context_position = assignments.get('context_position')
exp_default_context = assignments.get('default_context')
if exp_context:
if (exp_context_position is not None and
exp_context_position < len(DEFAULT_CONTEXTS) and
exp_context_position >= 0):
contexts.insert(exp_context_position, exp_context)
else:
contexts.append(exp_context)
if exp_default_context:
default_context = exp_default_context
if not experiment.ab_test.debug:
experiment.log_event('context_ab_test_enrolled',
extras={'dimension': 'context'})
return contexts, default_context
if current_app.config.get('ENABLE_ALL_CONTEXTS'):
return ALL_CONTEXT_IDS, default_context
return DEFAULT_CONTEXTS, default_context
def update_gif_phrases(payload):
items = []
for item in payload:
item = item.copy()
items.append(item)
phrase_id = item.get('id')
is_new = False
if phrase_id:
try:
phrase = get_gif_phrase_by_id(phrase_id)
except DoesNotExist:
item.update({'update_result': 'discarded'})
continue
if item.get('delete'):
phrase.delete()
item.update({'update_result': 'deleted'})
continue
else:
phrase = GifPhrase()
is_new = True
if item.get('delete'):
item.update({'update_result': 'skipped'})
continue
end_time = item.get('end_time')
start_time = item.get('start_time')
keyword = item.get('keyword')
header = item.get('header')
day = item.get('day')
date = item.get('date')
default = item.get('is_default')
default = bool(default)
# Parse the iso8601 dates that google spreadsheets provide.
if date:
try:
date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
date = date.strftime('%-m/%-d/%y')
except:
raise APIError('Invalid date format')
else:
date = None
try:
start_time = datetime.strptime(start_time, '%H:%M')
start_time = start_time.strftime('%H:%M')
except:
raise APIError('Invalid start_time format')
try:
end_time = datetime.strptime(end_time, '%H:%M')
end_time = end_time.strftime('%H:%M')
except:
raise APIError('Invalid end_time format')
if default and date:
raise APIError('defaults cannot have a date')
if default and not day:
raise APIError('defaults must have a day')
if default != phrase.is_default:
phrase.is_default = default
if start_time != phrase.start_time:
phrase.start_time = start_time
if end_time != phrase.end_time:
phrase.end_time = end_time
if keyword != phrase.keyword:
phrase.keyword = keyword
if header != phrase.header:
phrase.header = header
if day != phrase.day:
if day:
day = day.lower()
try:
assert_valid_time(day, time_format='%A')
except ValueError:
raise APIError('invalid day of the week')
else:
day = None
phrase.day = day
if date != phrase.date:
phrase.date = date
if is_new:
item.update({'update_result': 'created'})
elif phrase._changed_fields:
item.update({'update_result': 'updated'})
else:
item.update({'update_result': 'nochange'})
continue
try:
phrase.save()
except ValidationError:
item.update({'update_result': 'discarded'})
message = 'Tried to update gif phrase with invalid information.'
current_app.log_error({'message': message, 'item': item})
continue
item.update({'id': phrase.phrase_id})
if phrase.is_default:
clear_get_default_phrase_cache(phrase.day)
clear_get_phrases_cache()
return {'items': items}
def clear_get_phrases_cache(date=None):
if date:
# This is a hack to make sure dates are NEVER 0 padded
# when dealing with them in cache.
ts = time.strptime(date, '%m/%d/%y')
date = datetime(*ts[:6]).strftime('%-m/%-d/%y')
cache.delete_memoized(_get_all_phrases, date)
else:
cache.delete_memoized(_get_all_phrases)
def clear_get_default_phrase_cache(day):
day = str(day).lower()
cache.delete_memoized(_get_default_phrases, day)
def get_gif_phrase_by_id(phrase_id):
return GifPhrase.objects(id=phrase_id).get()
@cache.memoize()
def _get_default_phrases(day):
phrases = GifPhrase.objects(day=day, is_default=True).all()
return list(phrases)
# Timeout after 2 days.
@cache.memoize(timeout=60*60*24*2)
def _get_all_phrases(date):
phrases = GifPhrase.objects(date=date).all()
return list(phrases)
def get_gif_phrase(user):
if user.timezone:
zone = pytz.timezone(user.timezone)
current_datetime = datetime.now(zone)
else:
zone = pytz.utc
current_datetime = datetime.now(zone)
current_time = current_datetime.strftime('%H:%M')
current_date = current_datetime.strftime('%-m/%-d/%y')
current_day = current_datetime.strftime('%A').lower()
phrases = _get_all_phrases(current_date)
for phrase in phrases:
if (current_time >= phrase.start_time and
current_time <= phrase.end_time):
return phrase
phrases = _get_default_phrases(current_day)
for phrase in phrases:
if (current_time >= phrase.start_time and
current_time <= phrase.end_time):
return phrase
return GifPhrase(keyword=current_app.config.get('GIPHY_PHRASE'),
header=current_app.config.get('GIPHY_TEXT'))
| 32.342857
| 114
| 0.617365
| 0
| 0
| 0
| 0
| 272
| 0.034326
| 0
| 0
| 1,075
| 0.135664
|
1f0acc1fb7d824f01253e231a80bcc928842ee31
| 4,180
|
py
|
Python
|
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
coyote_framework/config/abstract_config.py
|
vaibhavrastogi1988/python_testing_framework
|
583a2286479ed0ccda309c866a403dc92fa1bb3b
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
import json
import fnmatch
import os
__author__ = 'justin@shapeways.com'
TEST_RUN_SETTING_CONFIG = 'TEST_RUN_SETTING_CONFIG'
confg_dict = {}
class NullConfigAttribute(object):
def __init__(self, description, default_value=None):
self.description = description
self.default_value = default_value
class ConfigBase(object):
"""The config base; do not inherit from ConfigParser because it is an old-style class"""
def __init__(self, section):
if section not in confg_dict.keys():
self.section = section
self.parser = ConfigParser()
self._readall()
confg_dict[section] = self
else:
this_config = confg_dict[section]
self.section = section
self.parser = this_config.parser
def get(self, key):
return self.parser.get(self.section, key)
def getbool(self, key):
return bool(self.parser.getboolean(self.section, key))
def getint(self, key):
return int(self.get(key))
def getfloat(self, key):
return float(self.get(key))
def getjson(self, key):
raw = self.get(key)
if not raw:
raw = '{}'
return json.loads(raw)
def _readall(self):
"""Read configs from all available configs. It will read files in the following order:
1.) Read all default settings:
These are located under: `<project_root>/config/*/default.cfg`
2.) Read the user's config settings:
This is located on the path: `~/.aftrc`
3.) Read all config files specified by the config string in the environment variable TEST_RUN_SETTING_CONFIG
A config string such as "browser.headless,scripts.no_ssh" will read paths:
`<project_root>/config/browser/headless.cfg`
`<project_root>/config/scripts/no_ssh.cfg`
OR a config string such as "<project_root>/config/browser/headless.cfg" will load that path directly
"""
# First priority -- read all default configs
config_path = os.path.dirname(__file__)
config_defaults = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(config_path)
for f in fnmatch.filter(files, 'default.cfg')]
# Second priority -- read the user overrides
user_config = os.path.expanduser('~/.aftrc')
# Third priority -- read the environment variable overrides
override_filenames = []
if TEST_RUN_SETTING_CONFIG in os.environ:
for test_config in os.environ[TEST_RUN_SETTING_CONFIG].split(','):
if os.path.exists(test_config): #is this a file path
override_filenames.append(test_config)
elif "." in test_config and not test_config.endswith('.cfg'): #else it might be in xxxx.yyyy format
config_parts = test_config.split('.')
config_parts[-1]+='.cfg' #add file ext to last part, which should be file
filename = os.path.join(config_path, *config_parts)
override_filenames.append(filename)
else: #else unknown, might throw exception here
pass
all_configs = config_defaults + [user_config] + override_filenames
return self.parser.read(all_configs)
def load_config_vars(target_config, source_config):
"""Loads all attributes from source config into target config
@type target_config: TestRunConfigManager
@param target_config: Config to dump variables into
@type source_config: TestRunConfigManager
@param source_config: The other config
@return: True
"""
# Overwrite all attributes in config with new config
for attr in dir(source_config):
# skip all private class attrs
if attr.startswith('_'):
continue
val = getattr(source_config, attr)
if val is not None:
setattr(target_config, attr, val)
| 35.726496
| 134
| 0.617943
| 3,351
| 0.801675
| 0
| 0
| 0
| 0
| 0
| 0
| 1,630
| 0.389952
|
1f0aed11f4fb176f853b2e44dfcbd9b5e33ba39f
| 16,468
|
py
|
Python
|
src/model_transforms.py
|
pmclSF/DeepCompress
|
6fc51aa0e9b34fb89f97877ad56da6345f93b929
|
[
"MIT"
] | 8
|
2021-03-11T10:56:35.000Z
|
2022-02-24T12:44:33.000Z
|
src/model_transforms.py
|
pmclSF/DeepCompress
|
6fc51aa0e9b34fb89f97877ad56da6345f93b929
|
[
"MIT"
] | null | null | null |
src/model_transforms.py
|
pmclSF/DeepCompress
|
6fc51aa0e9b34fb89f97877ad56da6345f93b929
|
[
"MIT"
] | null | null | null |
from enum import Enum
import tensorflow.compat.v1 as tf
from tensorflow.keras.layers import Layer, Conv3D, Conv3DTranspose, AveragePooling3D
from tensorflow_core.python.keras.utils import conv_utils
import tensorflow_compression as tfc
import tensorflow.keras as keras
def relu():
return keras.activations.relu
def CGDN():
return tfc.GDN(alpha_parameter=1, epsilon_parameter=1)
# ad-hoc alert: specify the activation using this:
#ACTIVATION = relu
# define a function similar to relu() to instantiate a GDN with alternative parameters
ACTIVATION = tfc.GDN
#ACTIVATION = CGDN
def get_channel_axis(data_format):
return 1 if data_format == 'channels_first' else -1
class SequentialLayer(Layer):
def __init__(self, layers, *args, **kwargs):
super(SequentialLayer, self).__init__(*args, **kwargs)
self._layers = layers
def call(self, tensor, **kwargs):
for layer in self._layers:
tensor = layer(tensor)
return tensor
class ResidualLayer(Layer):
def __init__(self, layers, residual_mode='add', data_format=None, *args, **kwargs):
super(ResidualLayer, self).__init__(*args, **kwargs)
assert residual_mode in ('add', 'concat')
self._layers = layers
self.residual_mode = residual_mode
self.data_format = conv_utils.normalize_data_format(data_format)
def call(self, tensor, **kwargs):
tensor = self._layers[0](tensor)
tensor1 = tensor
for layer in self._layers[1:]:
tensor = layer(tensor)
if self.residual_mode == 'add':
return tensor1 + tensor
else:
return tf.concat((tensor, tensor1), get_channel_axis(self.data_format))
class AnalysisTransformV1(SequentialLayer):
def __init__(self, filters, data_format=None, activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'strides': (2, 2, 2), 'padding': 'same', 'data_format': data_format, 'filters': filters}
layers = [Conv3D(kernel_size=(9, 9, 9), use_bias=True, activation=activation, **params),
Conv3D(kernel_size=(5, 5, 5), use_bias=True, activation=activation, **params),
Conv3D(kernel_size=(5, 5, 5), use_bias=False, activation=None, **params)]
super(AnalysisTransformV1, self).__init__(layers, *args, **kwargs)
class SynthesisTransformV1(SequentialLayer):
def __init__(self, filters, data_format=None, activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'strides': (2, 2, 2), 'padding': 'same', 'data_format': data_format, 'use_bias': True,
'activation': activation}
layers = [Conv3DTranspose(filters, (5, 5, 5), **params),
Conv3DTranspose(filters, (5, 5, 5), **params),
Conv3DTranspose(1, (9, 9, 9), **params)]
super(SynthesisTransformV1, self).__init__(layers, *args, **kwargs)
class AnalysisBlock(ResidualLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), strides=(2, 2, 2), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format,
'filters': filters, 'kernel_size': kernel_size, 'use_bias': True}
layers = [Conv3D(strides=strides, activation=activation() **params),
Conv3D(activation=activation(), **params),
Conv3D(activation=activation(), **params)]
super(AnalysisBlock, self).__init__(layers, *args, data_format=data_format, **kwargs)
class SynthesisBlock(ResidualLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), strides=(2, 2, 2), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format, 'use_bias': True, 'activation': activation,
'filters': filters, 'kernel_size': kernel_size}
layers = [Conv3DTranspose(strides=strides, **params),
Conv3DTranspose(**params),
Conv3DTranspose(**params)]
super(SynthesisBlock, self).__init__(layers, *args, data_format=data_format, **kwargs)
class AnalysisBlockV3(ResidualLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), strides=(2, 2, 2), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format,
'filters': filters, 'kernel_size': kernel_size, 'use_bias': True}
layers = [Conv3D(strides=strides, activation=activation() **params),
AnalysisBlockV3Base(filters=filters, data_format=data_format),
AnalysisBlockV3Base(filters=filters, data_format=data_format)]
super(AnalysisBlockV3, self).__init__(layers, *args, data_format=data_format, **kwargs)
class AnalysisBlockV3Base(SequentialLayer):
def __init__(self, filters, data_format=None, activation=ACTIVATION, *args, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format, 'use_bias': True}
self.paths = [[Conv3D(kernel_size=(1,1,1), activation=activation(), filters=filters // 4, **params)],
[Conv3D(kernel_size=(1,1,1), activation=activation(), filters=filters // 2, **params),
[Conv3D(kernel_size=(1,1,3), activation=activation(), filters=filters // 4, **params), Conv3D(kernel_size=(3,3,1), activation=activation(), filters=filters // 4, **params),
Conv3D(kernel_size=(3,1,1), activation=activation(), filters=filters // 4, **params), Conv3D(kernel_size=(1,3,3), activation=activation(), filters=filters // 4, **params),
Conv3D(kernel_size=(1,3,1), activation=activation(), filters=filters // 4, **params), Conv3D(kernel_size=(3,1,3), activation=activation(), filters=filters // 4, **params)]]]
super(AnalysisBlockV3Base, self).__init__(Self.paths, *args, **kwargs)
def call(self, tensor, **kwargs):
path_outs = [tensor, tensor]
for p in self.paths[0]:
path_outs[0] = p(path_outs[0])
path_outs[1] = self.paths[1][0](path_outs[1])
sub_outs = [path_outs[1], path_outs[1], path_outs[1]]
sub_outs[0] = self.paths[1][1][0](sub_outs[0])
sub_outs[0] = self.paths[1][1][1](sub_outs[0])
sub_outs[1] = self.paths[1][1][2](sub_outs[1])
sub_outs[1] = self.paths[1][1][3](sub_outs[1])
sub_outs[2] = self.paths[1][1][4](sub_outs[2])
sub_outs[2] = self.paths[1][1][5](sub_outs[2])
return tf.concat((path_outs[0], sub_outs[0], sub_outs[1], sub_outs[2]), get_channel_axis(self.data_format))
class ResidualLayerV2(Layer):
def __init__(self, layers, residual_mode='add', data_format=None, *args, **kwargs):
super(ResidualLayerV2, self).__init__(*args, **kwargs)
assert residual_mode in ('add', 'concat')
self._layers = layers
self.residual_mode = residual_mode
self.data_format = conv_utils.normalize_data_format(data_format)
def call(self, tensor, **kwargs):
tensor = self._layers[0](tensor)
tensor1 = tensor
layer = self._layers[1]
tensor = layer(tensor)
tensor2 = tensor
layer = self._layers[2]
tensor = layer(tensor + tensor1)
return tensor1 + tensor2 + tensor
class AnalysisBlockV4(ResidualLayerV2):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), strides=(2, 2, 2), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format,
'filters': filters, 'kernel_size': kernel_size, 'use_bias': True}
layers = [Conv3D(strides=strides, activation=activation() **params),
AnalysisBlockV3Base(filters=filters, data_format=data_format),
AnalysisBlockV3Base(filters=filters, data_format=data_format)]
super(AnalysisBlockV4, self).__init__(layers, *args, data_format=data_format, **kwargs)
class AnalysisTransformV2(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=tf.nn.relu, residual_mode='add',
*args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'kernel_size': kernel_size, 'activation': activation, 'data_format': data_format,
'residual_mode': residual_mode}
layers = [AnalysisBlock(filters // 2, **params),
AnalysisBlock(filters, **params),
AnalysisBlock(filters, **params),
Conv3D(filters, kernel_size, padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformV2, self).__init__(layers, *args, **kwargs)
class SynthesisTransformV2(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=tf.nn.relu, residual_mode='add',
*args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'kernel_size': kernel_size, 'activation': activation, 'data_format': data_format,
'residual_mode': residual_mode}
layers = [SynthesisBlock(filters, **params),
SynthesisBlock(filters, **params),
SynthesisBlock(filters // 2, **params),
Conv3DTranspose(1, kernel_size, padding="same", use_bias=True, activation=activation,
data_format=data_format)]
super(SynthesisTransformV2, self).__init__(layers, *args, **kwargs)
class AnalysisTransformProgressiveV2(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=ACTIVATION, residual_mode='add',
*args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'kernel_size': kernel_size, 'activation': activation, 'data_format': data_format,
'residual_mode': residual_mode}
layers = [AnalysisBlock(filters // 4, **params),
AnalysisBlock(filters // 2, **params),
AnalysisBlock(filters, **params),
Conv3D(filters, kernel_size, padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformProgressiveV2, self).__init__(layers, *args, **kwargs)
class SynthesisTransformProgressiveV2(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=tf.nn.relu, residual_mode='add',
*args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'kernel_size': kernel_size, 'activation': activation, 'data_format': data_format,
'residual_mode': residual_mode}
layers = [SynthesisBlock(filters, **params),
SynthesisBlock(filters // 2, **params),
SynthesisBlock(filters // 4, **params),
Conv3DTranspose(1, kernel_size, padding="same", use_bias=True, activation=activation,
data_format=data_format)]
super(SynthesisTransformProgressiveV2, self).__init__(layers, *args, **kwargs)
class AnalysisTransformProgressiveV3(SequentialLayer):
def __init__(self, filters, data_format=None, activation=ACTIVATION, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'activation': activation, 'data_format': data_format}
layers = [AnalysisBlock(filters // 4, **params),
AnalysisBlock(filters // 2, **params),
AnalysisBlockV3(filters, **params),
Conv3D(filters, (3, 3, 3), padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformProgressiveV3, self).__init__(layers, *args, **kwargs)
class AnalysisTransformProgressiveV4(SequentialLayer):
def __init__(self, filters, data_format=None, activation=ACTIVATION, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'activation': activation, 'data_format': data_format}
layers = [AnalysisBlockV4(filters // 4, **params),
AnalysisBlockV4(filters // 2, **params),
AnalysisBlockV4(filters, **params),
Conv3D(filters, (3, 3, 3), padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformProgressiveV4, self).__init__(layers, *args, **kwargs)
class AnalysisTransformProgressiveV5(SequentialLayer):
def __init__(self, filters, data_format=None, activation=ACTIVATION, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'activation': activation, 'data_format': data_format}
layers = [AnalysisBlock(filters // 4, **params),
AnalysisBlock(filters // 2, **params),
AnalysisBlockV4(filters, **params),
Conv3D(filters, (3, 3, 3), padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformProgressiveV5, self).__init__(layers, *args, **kwargs)
class AnalysisTransformProgressiveV6(SequentialLayer):
def __init__(self, filters, data_format=None, activation=ACTIVATION, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'activation': activation, 'data_format': data_format}
layers = [AnalysisBlock(filters // 4, **params),
AnalysisBlockV4(filters // 2, **params),
AnalysisBlockV4(filters, **params),
Conv3D(filters, (3, 3, 3), padding="same", use_bias=False, activation=None,
data_format=data_format)]
super(AnalysisTransformProgressiveV6, self).__init__(layers, *args, **kwargs)
class HyperAnalysisTransform(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format, 'filters': filters, 'kernel_size': kernel_size}
layers = [Conv3D(use_bias=True, activation=activation, **params),
Conv3D(use_bias=True, activation=activation, strides=(2, 2, 2), **params),
Conv3D(use_bias=False, activation=None, **params)]
super(HyperAnalysisTransform, self).__init__(layers, *args, **kwargs)
class HyperSynthesisTransform(SequentialLayer):
def __init__(self, filters, data_format=None, kernel_size=(3, 3, 3), activation=tf.nn.relu, *args, **kwargs):
data_format = conv_utils.normalize_data_format(data_format)
params = {'padding': 'same', 'data_format': data_format, 'activation': activation, 'use_bias': True,
'filters': filters, 'kernel_size': kernel_size}
layers = [Conv3DTranspose(**params),
Conv3DTranspose(strides=(2, 2, 2), **params),
Conv3DTranspose(**params)]
super(HyperSynthesisTransform, self).__init__(layers, *args, **kwargs)
class TransformType(Enum):
AnalysisTransformV1 = AnalysisTransformV1
AnalysisTransformV2 = AnalysisTransformV2
AnalysisTransformProgressiveV2 = AnalysisTransformProgressiveV2
AnalysisTransformProgressiveV3 = AnalysisTransformProgressiveV3
AnalysisTransformProgressiveV4 = AnalysisTransformProgressiveV4
AnalysisTransformProgressiveV5 = AnalysisTransformProgressiveV5
AnalysisTransformProgressiveV6 = AnalysisTransformProgressiveV6
SynthesisTransformV1 = SynthesisTransformV1
SynthesisTransformV2 = SynthesisTransformV2
SynthesisTransformProgressiveV2 = SynthesisTransformProgressiveV2
HyperAnalysisTransform = HyperAnalysisTransform
HyperSynthesisTransform = HyperSynthesisTransform
| 55.076923
| 197
| 0.660189
| 15,736
| 0.95555
| 0
| 0
| 0
| 0
| 0
| 0
| 1,126
| 0.068375
|
1f0d5b67c4f91743453ccb056ca36b102ec5a878
| 6,485
|
py
|
Python
|
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | null | null | null |
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | 4
|
2021-06-08T21:23:15.000Z
|
2022-03-12T00:29:23.000Z
|
src/main.py
|
matthewb96/NetSpeedGraphs
|
51e6f6d4f24845e50f34ed56452a4fa454db189b
|
[
"MIT"
] | null | null | null |
"""
Main module for running NetSpeedGraphs.
"""
##### IMPORTS #####
# Standard imports
from pathlib import Path
from datetime import datetime, timedelta
from argparse import ArgumentParser
# Third party imports
import speedtest
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, save
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.models import (ColumnDataSource, DataTable, TableColumn,
NumberFormatter, DateFormatter)
from bokeh.layouts import grid
##### CONSTANTS #####
DATA_HEADER = ['Time', 'Ping (ms)', 'Download Speed (Mbs)',
'Upload Speed (Mbs)']
##### FUNCTIONS #####
def allTests():
""" Runs ping, download and upload speed tests.
Returns
-------
ping: float
Ping value in miliseconds.
download: float
Download speed in Mbs.
upload: float
Upload speed in Mbs.
"""
st = speedtest.Speedtest()
server = st.get_best_server()
down = st.download()
up = st.upload()
return server['latency'], down / 1e6, up / 1e6
def plotGraph(data, path):
""" Plots a graph with the download and upload speeds and pings.
Parameters
----------
data: pandas.DataFrame
DataFrame containing 4 columns:
- Time: datetime objects for the time the test was ran.
- Ping: ping in milliseconds.
- Download: download speed in megabits per second.
- Upload: upload speed in megabits per second.
path: pathlib Path
Path to html file for outputting plots to.
See Also
--------
readResults
"""
# output to static HTML file
output_file(path)
# Use the pandas dataframe as the source
source = ColumnDataSource(data)
# Create a new plot and set x-axis type as datetime
netPlot = figure(title="Network Speeds", x_axis_type='datetime',
x_axis_label='Time of Test',
y_axis_label='Speed (Mbs) / Ping (ms)',
tools=['xpan', 'xwheel_zoom', 'box_select', 'reset'],
active_drag='xpan', active_scroll='xwheel_zoom',
sizing_mode='stretch_both')
# Change x axis tick format depending on zoom level
netPlot.xaxis.formatter = DatetimeTickFormatter(hours = ['%H:%M'],
days = ['%d/%m/%Y'],
months = ['%m/%Y'])
# Add the lines to the plot in different colours
WIDTH = 2
netPlot.line(x='Time', y='Ping', source=source, legend_label='Ping',
line_color='orange', line_width=WIDTH)
netPlot.line(x='Time', y='Download', source=source, legend_label='Download',
line_color='blue', line_width=WIDTH)
netPlot.line(x='Time', y='Upload', source=source, legend_label='Upload',
line_color='green', line_width=WIDTH)
# Create table
numFormatter = NumberFormatter(format='0.00')
columns = [
TableColumn(field="Time", title='Time',
formatter=DateFormatter(format="%Y-%m-%d %H:%M")),
TableColumn(field='Ping', title='Ping (ms)', formatter=numFormatter),
TableColumn(field="Download", title='Download Speed (Mbs)',
formatter=numFormatter),
TableColumn(field='Upload', title='Upload Speed (Mbs)',
formatter=numFormatter)
]
table = DataTable(source=source, columns=columns, width=400,
sizing_mode='stretch_height')
# Add plot to grid layout
layout = grid([netPlot, table], ncols=2)
# show the results
save(layout)
return
def storeResults(results, path):
""" Save the network speed results to CSV containing all results.
Will create a CSV if it doesn't exist, or append results to it if it does.
Parameters
----------
results: list-like of floats
The results from a single run of the network test in the following
order: ping (miliseconds), download speed (Mbs) and upload speed (Mbs).
path: pathlib Path
Path to csv file for saving results to.
See Also
--------
allTests
"""
# Get current time of results
now = datetime.now()
# Create row of results
row = [now.isoformat(), *[str(i) for i in results]]
# Check if file exists and create it with header if not
# then append current results to it
header = not path.exists()
with open(path, 'at') as out:
if header:
out.writelines(','.join(DATA_HEADER) + '\n')
out.write(','.join(row) + '\n')
return
def readResults(path):
""" Read the csv containing all the results into a DataFrame.
The `DATA_PATH` and `DATA_HEADER` constants are used when reading the csv.
Parameters
----------
path: pathlib Path
Path to csv file for reading from.
Returns
-------
data: pandas.DataFrame
DataFrame containing 4 columns:
- Time: datetime objects for the time the test was ran.
- Ping: ping in milliseconds.
- Download: download speed in megabits per second.
- Upload: upload speed in megabits per second.
"""
data = pd.read_csv(path, usecols=DATA_HEADER, parse_dates=[0])
rename = {i: i.split()[0].strip().capitalize() for i in DATA_HEADER}
data = data.rename(columns=rename)
return data
def argParser():
""" Creates an ArgumentParser to get output locations.
Returns
-------
parser: argparse ArgumentParser
Parser to get the output file locations from the arguments.
"""
parser = ArgumentParser(description='Run a network test and update html plots.')
parser.add_argument('data_file', type=Path,
help='csv file for storing all network test results.')
parser.add_argument('html_file', type=Path,
help='html file for saving the output plots to.')
return parser
def main():
""" Runs the network test to get results then updates the csv and graphs. """
# Get file locations from arguments
parser = argParser()
args = parser.parse_args()
# Run a test and update the graphs
netRes = allTests()
storeResults(netRes, args.data_file)
results = readResults(args.data_file)
plotGraph(results, args.html_file)
return
##### MAIN #####
if __name__ == '__main__':
main()
| 33.25641
| 84
| 0.612336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,361
| 0.518273
|
1f0da5b719cc8ed4639299b06648e3a470d196da
| 7,478
|
py
|
Python
|
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | 2
|
2020-11-01T00:25:09.000Z
|
2021-04-07T10:13:59.000Z
|
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | null | null | null |
tests/planar_tests/test_region_in_multiregion.py
|
lycantropos/orient
|
01f4f67a717c5ee911d83756d455cc35e85ce817
|
[
"MIT"
] | null | null | null |
from typing import Tuple
from ground.base import Relation
from hypothesis import given
from orient.hints import (Multiregion,
Region)
from orient.planar import (contour_in_multiregion,
region_in_multiregion,
region_in_region)
from tests.utils import (MULTIPART_COMPOUND_RELATIONS,
equivalence,
implication,
reverse_contour,
reverse_contour_coordinates,
reverse_multiregion,
reverse_multiregion_coordinates,
reverse_multiregion_regions,
sequence_rotations)
from . import strategies
@given(strategies.multiregions_with_contours)
def test_basic(multiregion_with_region: Tuple[Multiregion, Region]) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert isinstance(result, Relation)
assert result in MULTIPART_COMPOUND_RELATIONS
@given(strategies.multiregions)
def test_self(multiregion: Multiregion) -> None:
assert all(region_in_multiregion(region, multiregion) is Relation.COMPONENT
for region in multiregion)
@given(strategies.size_three_or_more_multiregions_with_contours)
def test_step(multiregion_with_region: Tuple[Multiregion, Region]) -> None:
multiregion, region = multiregion_with_region
first_region, *rest_multiregion = multiregion
result = region_in_multiregion(region, rest_multiregion)
next_result = region_in_multiregion(region, multiregion)
relation_with_first_region = region_in_region(region, first_region)
assert equivalence(next_result is Relation.DISJOINT,
result is relation_with_first_region
is Relation.DISJOINT)
assert equivalence(next_result is Relation.TOUCH,
result is Relation.DISJOINT
and relation_with_first_region is Relation.TOUCH
or result is Relation.TOUCH
and relation_with_first_region in (Relation.DISJOINT,
Relation.TOUCH))
assert equivalence(next_result is Relation.COMPONENT,
result is Relation.COMPONENT
or bool(rest_multiregion)
and relation_with_first_region is Relation.EQUAL)
assert equivalence(next_result is Relation.OVERLAP,
result is Relation.OVERLAP
or relation_with_first_region is Relation.OVERLAP
or (bool(rest_multiregion)
and result is Relation.DISJOINT
or result is Relation.TOUCH)
and relation_with_first_region in (Relation.COVER,
Relation.ENCLOSES)
or result in (Relation.COVER, Relation.ENCLOSES)
and relation_with_first_region is Relation.DISJOINT)
assert equivalence(next_result is Relation.COVER,
(not rest_multiregion or result is Relation.COVER)
and relation_with_first_region is Relation.COVER)
assert equivalence(next_result is Relation.ENCLOSES,
result is Relation.ENCLOSES
and relation_with_first_region in (Relation.ENCLOSES,
Relation.COVER)
or (not rest_multiregion or result is Relation.COVER)
and relation_with_first_region is Relation.ENCLOSES)
assert equivalence(next_result is Relation.EQUAL,
not rest_multiregion
and relation_with_first_region is Relation.EQUAL)
assert equivalence(next_result is Relation.ENCLOSED,
result is Relation.ENCLOSED
or relation_with_first_region is Relation.ENCLOSED)
assert equivalence(next_result is Relation.WITHIN,
result is Relation.WITHIN
or relation_with_first_region is Relation.WITHIN)
@given(strategies.multiregions_with_contours)
def test_reversals(multiregion_with_region: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert result is region_in_multiregion(reverse_contour(region),
multiregion)
assert result is region_in_multiregion(region,
reverse_multiregion(multiregion))
assert result is region_in_multiregion(
region, reverse_multiregion_regions(multiregion))
assert result is region_in_multiregion(
reverse_contour_coordinates(region),
reverse_multiregion_coordinates(multiregion))
@given(strategies.multiregions_with_contours)
def test_rotations(multiregion_with_region: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
assert all(result is region_in_multiregion(region, rotated)
for rotated in sequence_rotations(multiregion))
@given(strategies.multiregions_with_contours)
def test_connection_with_contour_in_multiregion(multiregion_with_region
: Tuple[Multiregion, Region]
) -> None:
multiregion, region = multiregion_with_region
result = region_in_multiregion(region, multiregion)
contour_relation = contour_in_multiregion(region, multiregion)
assert implication(result is Relation.DISJOINT
or result is Relation.COVER,
contour_relation is Relation.DISJOINT)
assert implication(contour_relation is Relation.DISJOINT,
result is Relation.DISJOINT
or result is Relation.OVERLAP
or result is Relation.COVER)
assert implication(result is Relation.TOUCH
or result is Relation.ENCLOSES
or result is Relation.COMPOSITE,
contour_relation is Relation.TOUCH)
assert implication(contour_relation is Relation.TOUCH,
result is Relation.TOUCH
or result is Relation.ENCLOSES
or result is Relation.OVERLAP
or result is Relation.COMPOSITE)
assert implication(result is Relation.OVERLAP,
contour_relation is Relation.DISJOINT
or contour_relation is Relation.CROSS
or contour_relation is Relation.TOUCH)
assert implication(contour_relation is Relation.CROSS,
result is Relation.OVERLAP)
assert equivalence(result is Relation.COMPONENT
or result is Relation.EQUAL,
contour_relation is Relation.COMPONENT)
assert equivalence(result is Relation.ENCLOSED,
contour_relation is Relation.ENCLOSED)
assert equivalence(result is Relation.WITHIN,
contour_relation is Relation.WITHIN)
| 47.329114
| 79
| 0.622626
| 0
| 0
| 0
| 0
| 6,697
| 0.89556
| 0
| 0
| 0
| 0
|
1f0ed2213b59cdb0f244b760bfd1759ed4538c6a
| 11,676
|
py
|
Python
|
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | 1
|
2017-11-02T20:38:20.000Z
|
2017-11-02T20:38:20.000Z
|
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | null | null | null |
gui/src/core/parse_qca.py
|
retallickj/qca-embedding
|
96fd37a3ecd4beacb04ad1cb193d65d0b48ceab2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#---------------------------------------------------------
# Name: parse_qca.py
# Purpose: Parsing functions for QCADesigner files
# Author: Jacob Retallick
# Created: 2015.10.22
# Last Modified: 2015.10.22
#---------------------------------------------------------
# NOTE
# the original parse script no longer seems to work (change in networkx?)
# for the purposes of the embedder, we don't need to consider clock zones so
# I have simplified the parseing script to remove that functionality.
import re
import networkx as nx
import numpy as np
from auxil import getEk, CELL_FUNCTIONS, CELL_MODES
from itertools import combinations
## general global parameters
R_MAX = 2.1 # max cell-cell interaction range (rel to grid spacing)
EK_THRESH = 1e-3 # threshold for included Ek, relative to max(abs(Ek))
X_ROUND = 4 # places to round to when deciding if cell is rotated
### FILE PROCESSING
def build_hierarchy(fn):
'''Build a dict hierarchy containing all objects, their parameters, and
childen.'''
fp = open(fn, 'r')
linemap = lambda s: s.replace(',', '.')
# general re expression. may need to change if future format changes
re_start = re.compile('^\[.+\]$')
re_term = re.compile('^\[#.+\]$')
hier = {'label': 'Hierarchy', 'children': [], 'vars': {}}
key_stack = ['Hierarchy'] # stack of active keys, pop of top of stack
dict_stack = [hier] # stack of corresponding dict objects.
line_cnt = 0
for line in fp:
line = linemap(line)
line_cnt += 1
line = line.strip() # remove endline and possible whitespace
# must check object termination first
if re_term.match(line):
key = line[2:-1]
if key_stack[-1] == key:
d = dict_stack.pop()
key_stack.pop()
try:
dict_stack[-1]['children'].append(d)
except:
print('Somehow over-popped dict_stack...')
return None
else:
print('Start-end mismatch in line {0}'.format(line_cnt))
return None
# for a new object, create a new dict template
elif re_start.match(line):
key = line[1:-1]
key_stack.append(key)
d = {'label': key, 'children': [], 'vars': {}}
dict_stack.append(d)
# otherwise check for new variable to add to most recent dict
else:
if '=' in line:
var, val = line.split('=')
dict_stack[-1]['vars'][var] = val
fp.close()
return hier
def proc_hierarchy(hier):
'''Process the extracted data hierarchy to extract useful information. In
the current information, we are interested in the overall cell grid spacing
(for deciding on the range of included cell) and the properties of each
cell in the circuit'''
cells = []
spacing = None
# hierarchy should only have two children: VERSION and TYPE:DESIGN. The
# former might be useful in later implentations for selecting formatting
# options but for now all we care about are the DESIGN objects
hier = [child for child in hier['children']
if child['label'] == 'TYPE:DESIGN'][0]
# for now assert that there can be only one cell layer, no vertical x-over
layers = [child for child in hier['children']
if child['label'] == 'TYPE:QCADLayer']
# isolate cell layers
cell_layers = [layer for layer in layers if layer['vars']['type'] == '1']
# merge cell layers, will lead to qdot conflict if vertical x-over
cell_dicts = [layer['children'] for layer in cell_layers]
cell_dicts = reduce(lambda x, y: x+y, cell_dicts)
# get grid-spacing (average cell bounding box)
cx = float(cell_dicts[0]['vars']['cell_options.cxCell'])
cy = float(cell_dicts[0]['vars']['cell_options.cyCell'])
spacing = np.sqrt(cx*cy)
# create cell objects
cells = []
for cd in cell_dicts:
cell = {}
# cell type
cell['cf'] = CELL_FUNCTIONS[cd['vars']['cell_function']]
cell['cm'] = CELL_MODES[cd['vars']['cell_options.mode']]
cell['clk'] = int(cd['vars']['cell_options.clock'])
# just for show sol
cell['cx'] = float(cd['vars']['cell_options.cxCell'])
cell['cy'] = float(cd['vars']['cell_options.cyCell'])
# position, first child will be the QCADesignObject
design_object = cd['children'][0]
cell['x'] = float(design_object['vars']['x'])
cell['y'] = float(design_object['vars']['y'])
# quantum dots
qdot_dicts = [child for child in cd['children']
if child['label'] == 'TYPE:CELL_DOT']
qdots = []
for d in qdot_dicts:
dot = {}
dot['x'] = float(d['vars']['x'])
dot['y'] = float(d['vars']['y'])
dot['c'] = float(d['vars']['charge'])
qdots.append(dot)
cell['qdots'] = qdots
# determine if cell is rotated, will have three x values
x = set([round(dt['x'], X_ROUND) for dt in qdots])
if len(x) == 3:
cell['rot'] = True
elif len(x) == 2:
cell['rot'] = False
else:
print('Could not decide cell rotation')
cell['rot'] = False
# keep track of polarization if cell is fixed: don't rely on labels
if cell['cf'] == CELL_FUNCTIONS['QCAD_CELL_FIXED']:
pol = qdots[0]['c']+qdots[2]['c']-qdots[1]['c']-qdots[3]['c']
pol /= qdots[0]['c']+qdots[2]['c']+qdots[1]['c']+qdots[3]['c']
cell['pol'] = pol
cells.append(cell)
return cells, spacing
## CIRCUIT PROCESSING
def build_J(cells, spacing, r_max=R_MAX):
'''Build the J matrix for the given circuit. Restricts the interaction
distance to r_max but does not apply any adjacency contraints'''
N = len(cells)
# contruct connectivvity matrix
J = np.zeros([N, N], dtype=float)
DR = r_max*spacing
for i,j in combinations(range(N), 2):
Ek = getEk(cells[i], cells[j], DR=DR)
if Ek:
J[i,j] = J[j,i] = Ek
# remove very weak interactions
J = J*(np.abs(J) >= np.max(np.abs(J)*EK_THRESH))
return J
def zone_cells(cells, spacing, show=False):
'''Split cells into clock zones. Distinguishes disjoint zones with the
same zone index'''
N = len(cells) # number of cells
# construct connectivity matrix
J = np.zeros([N, N], dtype=float)
DR = R_MAX*spacing
for i in xrange(N-1):
for j in xrange(i+1, N):
Ek = getEk(cells[i], cells[j], DR=DR)
if Ek:
J[i, j] = Ek
J[j, i] = Ek
# remove very weak interactions
J = J * (np.abs(J) >= np.max(np.abs(J)*EK_THRESH))
# make full cell connectivity Graph
G = nx.Graph(J)
# if show:
# plt.figure(0)
# plt.clf()
# nx.draw_graphviz(G)
# plt.show()
# get indices for each clock index
clk = [cell['clk'] for cell in cells]
clk_ind = list(set(clk)) # will sort by default
inds = [[i for i, x in enumerate(clk) if x == ind] for ind in clk_ind]
# split graph into sub-graphs with the same clock indices
sub_G = {ind: G.subgraph(inds[ind]) for ind in clk_ind}
# split disconnected components for each label graph
sub_ind = {ind: list(nx.connected_components(sub_G[ind]))
for ind in clk_ind}
## find zone order
# create abstract zone connectivity graph
G = nx.DiGraph()
# nodes
for clk in clk_ind:
for i in xrange(len(sub_ind[clk])):
key = (clk, i)
G.add_node(key, inds=sub_ind[clk][i])
# edges
for clk in clk_ind:
adj_clk = 3 if clk == 0 else clk-1
if not adj_clk in sub_ind:
continue
for i in xrange(len(sub_ind[clk])):
k1 = (clk, i)
for j in xrange(len(sub_ind[adj_clk])):
k2 = (adj_clk, j)
if np.any(J[G.node[k1]['inds'], :][:, G.node[k2]['inds']]):
G.add_edge(k2, k1)
# if show:
# plt.figure(1)
# plt.clf()
# nx.draw_graphviz(G)
# plt.show()
# find input nodes, have no predecessors
predecs = {n: len(G.predecessors(n)) for n in G.nodes_iter()}
inputs = [ky for ky, val in predecs.iteritems() if val == 0]
# expand from inputs
visited = {key: False for key in G.nodes()}
nodes = inputs
order = [nodes]
while nodes:
new_nodes = set()
for node in nodes:
new_nodes.update(G.successors(node))
visited[node] = True
# remove already visited nodes from new nodes
new_nodes = [node for node in new_nodes if not visited[node]]
nodes = new_nodes
if nodes:
order.append(nodes)
# find feedback interactions
feedback = {}
for n in G.nodes_iter():
for p in G.predecessors(n):
pshell = 0
nshell = 0
pzone = 0
nzone = 0
for shell in order:
if p in shell:
pshell = order.index(shell)
pzone = shell.index(p)
if n in shell:
nshell = order.index(shell)
nzone = shell.index(n)
if pshell > nshell:
if (pshell,pzone) in feedback:
feedback[(pshell,pzone)].append((nshell,nzone))
else:
feedback[(pshell,pzone)] = [(nshell,nzone)]
# reformat order list to contain zone indices
form_func = lambda n: sub_ind[n[0]][n[1]]
order = [[form_func(zone) for zone in shell] for shell in order]
return order, J, feedback
def reorder_cells(cells, J, flipy=False):
'''Renumber cells by position rather than the default QCADesigner placement
order. Cells ordered by the tuple (zone, y, x)'''
keys = {}
ysgn = -1 if flipy else 1
# assign sortable tuples for each cell
for ind, cell in enumerate(cells):
keys[ind] = (ysgn*cell['y'], cell['x'])
order = zip(*sorted([(keys[i], i) for i in keys]))[1]
# relabel cells and reorder the J matrix
cells = [cells[i] for i in order]
J = J[order, :][:, order]
for i in range(len(cells)):
cells[i]['num'] = i
cells[i]['number'] = i
return cells, J
## MAIN FUNCTION
def parse_qca_file(fn, verbose=False):
'''Parse a QCADesigner file to extract cell properties. Returns an ordered
list of cells, the QCADesigner grid spacing in nm, a list structure of the
indices of each clock zone (propogating from inputs), and a coupling matrix
J which contains the Ek values for cells within a radius of R_MAX times the
grid spacing'''
# build data hierarchy
hier = build_hierarchy(fn)
# extract useful information from data hierarchy
cells, spacing = proc_hierarchy(hier)
if verbose:
print('Parsed QCA file...')
for cell in cells:
cell['clk'] = 0
# construct J matrix
J = build_J(cells, spacing)
# reorder cells by zone and position
cells, J = reorder_cells(cells, J)
return cells, spacing, J
| 32.34349
| 80
| 0.554642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,603
| 0.394227
|
1f0fed3d680bffcc5eeafee6ce65b7395cfecca1
| 7,391
|
py
|
Python
|
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
docs/Tutorial/1-glm/plot_1_LinearRegression.py
|
bbayukari/abess
|
3b21b0a58cac6c1464ec9403ffbe4902fee7b890
|
[
"Intel"
] | null | null | null |
"""
=================
Linear Regression
=================
In this tutorial, we are going to demonstrate how to use the ``abess`` package to carry out best subset selection
in linear regression with both simulated data and real data.
"""
###############################################################################
#
# Our package ``abess`` implements a polynomial algorithm in the following best-subset selection problem:
#
# .. math::
# \min_{\beta\in \mathbb{R}^p} \frac{1}{2n} ||y-X\beta||^2_2,\quad \text{s.t.}\ ||\beta||_0\leq s,
#
#
# where :math:`\| \cdot \|_2` is the :math:`\ell_2` norm, :math:`\|\beta\|_0=\sum_{i=1}^pI( \beta_i\neq 0)`
# is the :math:`\ell_0` norm of :math:`\beta`, and the sparsity level :math:`s`
# is an unknown non-negative integer to be determined.
# Next, we present an example to show the ``abess`` package can get an optimal estimation.
#
# Toward optimality: adaptive best-subset selection
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Synthetic dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We generate a design matrix :math:`X` containing :math:`n = 300` observations and each observation has :math:`p = 1000` predictors.
# The response variable :math:`y` is linearly related to the first, second, and fifth predictors in :math:`X`:
#
# .. math::
# y = 3X_1 + 1.5X_2 + 2X_5 + \epsilon,
#
# where :math:`\epsilon` is a standard normal random variable.
import numpy as np
from abess.datasets import make_glm_data
np.random.seed(0)
n = 300
p = 1000
true_support_set=[0, 1, 4]
true_coef = np.array([3, 1.5, 2])
real_coef = np.zeros(p)
real_coef[true_support_set] = true_coef
data1 = make_glm_data(n=n, p=p, k=len(true_coef), family="gaussian", coef_=real_coef)
print(data1.x.shape)
print(data1.y.shape)
# %%
# This dataset is high-dimensional and brings large challenge for subset selection.
# As a typical data examples, it mimics data appeared in real-world for modern scientific researches and data mining,
# and serves a good quick example for demonstrating the power of the ``abess`` library.
#
# Optimality
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The optimality of subset selection means:
#
# - ``true_support_set`` (i.e. ``[0, 1, 4]``) can be exactly identified;
# - the estimated coefficients is `ordinary least squares (OLS) estimator <https://en.wikipedia.org/wiki/Ordinary_least_squares>`__ under the true subset such that is very closed to ``true_coef = np.array([3, 1.5, 2])``.
#
# To understand the second criterion, we take a look on the estimation given by ``scikit-learn`` library:
from sklearn.linear_model import LinearRegression as SKLLinearRegression
sklearn_lr = SKLLinearRegression()
sklearn_lr.fit(data1.x[:, [0, 1, 4]], data1.y)
print("OLS estimator: ", sklearn_lr.coef_)
# %%
# The fitted coefficients ``sklearn_lr.coef_`` is OLS estimator
# when the true support set is known.
# It is very closed to the ``true_coef``, and is hard to be improve under finite sample size.
# %%
# Adaptive Best Subset Selection
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The adaptive best subset selection (ABESS) algorithm is a very powerful for the selection of the best subset.
# We will illustrate its power by showing it can reach to the optimality.
#
# The following code shows the simple syntax for using ABESS algorithm via ``abess`` library.
from abess import LinearRegression
model = LinearRegression()
model.fit(data1.x, data1.y)
# %%
# ``LinearRegression`` functions in ``abess`` is designed for selecting the best subset under the linear model,
# which can be imported by: ``from abess import LinearRegression``.
# Following similar syntax like ``scikit-learn``, we can fit the data via ABESS algorithm.
#
# Next, we going to see that the above approach can successfully recover the true set ``np.array([0, 1, 4])``.
# The fitted coefficients are stored in ``model.coef_``.
# We use ``np.nonzero`` function to find the selected subset of ``abess``,
# and we can extract the non-zero entries in ``model.coef_`` which is the coefficients estimation for the selected predictors.
#
ind = np.nonzero(model.coef_)
print("estimated non-zero: ", ind)
print("estimated coef: ", model.coef_[ind])
# %%
# From the result, we know that ``abess`` exactly found the true set ``np.array([0, 1, 4])`` among all 1000 predictors.
# Besides, the estimated coefficients of them are quite close to the real ones,
# and is exactly the same as the estimation ``sklearn_lr.coef_`` given by ``scikit-learn``.
###############################################################################
# Real data example
# ^^^^^^^^^^^^^^^^^
#
# Hitters Dataset
# ~~~~~~~~~~~~~~~
# Now we focus on real data on the `Hitters dataset <https://www.kaggle.com/floser/hitters>`__.
# We hope to use several predictors related to the performance of
# the baseball athletes last year to predict their salary.
#
# First, let's have a look at this dataset. There are 19 variables except
# `Salary` and 322 observations.
import os
import pandas as pd
data2 = pd.read_csv(os.path.join(os.getcwd(), 'Hitters.csv'))
print(data2.shape)
print(data2.head(5))
# %%
# Since the dataset contains some missing values, we simply drop those rows with missing values.
# Then we have 263 observations remain:
data2 = data2.dropna()
print(data2.shape)
# %%
# What is more, before fitting, we need to transfer the character
# variables to dummy variables:
data2 = pd.get_dummies(data2)
data2 = data2.drop(['League_A', 'Division_E', 'NewLeague_A'], axis=1)
print(data2.shape)
print(data2.head(5))
###############################################################################
# Model Fitting
# ~~~~~~~~~~~~~
# As what we do in simulated data, an adaptive best subset can be formed
# easily:
x = np.array(data2.drop('Salary', axis=1))
y = np.array(data2['Salary'])
model = LinearRegression(support_size=range(20))
model.fit(x, y)
# %%
# The result can be shown as follows:
ind = np.nonzero(model.coef_)
print("non-zero:\n", data2.columns[ind])
print("coef:\n", model.coef_)
# %%
# Automatically, variables `Hits`, `CRBI`, `PutOuts`, `League\_N` are
# chosen in the model (the chosen sparsity level is 4).
###############################################################################
# More on the results
# ~~~~~~~~~~~~~~~~~~~
# We can also plot the path of abess process:
import matplotlib.pyplot as plt
coef = np.zeros((20, 19))
ic = np.zeros(20)
for s in range(20):
model = LinearRegression(support_size=s)
model.fit(x, y)
coef[s, :] = model.coef_
ic[s] = model.ic_
for i in range(19):
plt.plot(coef[:, i], label=i)
plt.xlabel('support_size')
plt.ylabel('coefficients')
plt.title('ABESS Path')
plt.show()
# %%
# Besides, we can also generate a graph about the tuning parameter.
# Remember that we used the default EBIC to tune the support size.
plt.plot(ic, 'o-')
plt.xlabel('support_size')
plt.ylabel('EBIC')
plt.title('Model selection via EBIC')
plt.show()
# %%
# In EBIC criterion, a subset with the support size 3 has the lowest value,
# so the process adaptively chooses 3 variables.
# Note that under other information criteria, the result may be different.
###############################################################################
# R tutorial
# ^^^^^^^^^^
# For R tutorial, please view
# https://abess-team.github.io/abess/articles/v01-abess-guide.html.
| 35.028436
| 221
| 0.64998
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,723
| 0.77432
|
1f11e9df2b051fcb60ef9a9128d6a058c4f210e2
| 2,386
|
py
|
Python
|
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
pyppeteer/tracing.py
|
cr1pt/pypyteer
|
b3aade3741b385f2e1dde600b501776f1f5e8479
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tracing module."""
import asyncio
from pathlib import Path
from typing import Any, Awaitable
from pyppeteer.connection import Session
class Tracing(object):
"""Tracing class."""
def __init__(self, client: Session) -> None:
"""Make new tracing object."""
self._client = client
self._recording = False
self._path = ''
async def start(self, options: dict = None, **kwargs: Any) -> None:
"""Start."""
options = options or dict()
options.update(kwargs)
categoriesArray = [
'-*', 'devtools.timeline', 'v8.execute',
'disabled-by-default-devtools.timeline',
'disabled-by-default-devtools.timeline.frame', 'toplevel',
'blink.console', 'blink.user_timing', 'latencyInfo',
'disabled-by-default-devtools.timeline.stack',
'disabled-by-default-v8.cpu_profiler',
]
if 'screenshots' in options:
categoriesArray.append('disabled-by-default-devtools.screenshot')
self._path = options.get('path', '')
self._recording = True
await self._client.send('Tracing.start', {
'transferMode': 'ReturnAsStream',
'categories': ','.join(categoriesArray),
})
async def stop(self) -> Awaitable:
"""Stop."""
contentPromise = asyncio.get_event_loop().create_future()
self._client.once(
'Tracing.tracingComplete',
lambda event: asyncio.ensure_future(
self._readStream(event.get('stream'), self._path)
).add_done_callback(
lambda fut: contentPromise.set_result(
fut.result()) # type: ignore
)
)
await self._client.send('Tracing.end')
self._recording = False
return await contentPromise
async def _readStream(self, handle: str, path: str) -> None:
eof = False
file = Path(path)
with file.open('w') as f:
while not eof:
response = await self._client.send('IO.read', {
'handle': handle
})
eof = response.get('eof', False)
if path:
f.write(response.get('data', ''))
await self._client.send('IO.close', {'handle': handle})
| 32.684932
| 77
| 0.559933
| 2,196
| 0.920369
| 0
| 0
| 0
| 0
| 1,956
| 0.819782
| 632
| 0.264878
|
1f126ef87161ba2d8fbb4e598c5bbb09c32019bd
| 2,627
|
py
|
Python
|
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | 3
|
2021-06-30T23:42:47.000Z
|
2022-03-01T03:45:07.000Z
|
src/wai/annotations/isp/map_labels/component/_MapLabels.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Dict
from wai.common.adams.imaging.locateobjects import LocatedObjects
from wai.common.cli.options import TypedOption
from ....core.component import ProcessorComponent
from ....core.stream import ThenFunction, DoneFunction
from ....core.stream.util import RequiresNoFinalisation
from ....core.util import InstanceState
from ....domain.image.object_detection import ImageObjectDetectionInstance
from ....domain.image.object_detection.util import get_object_label, set_object_label
class MapLabels(
RequiresNoFinalisation,
ProcessorComponent[ImageObjectDetectionInstance, ImageObjectDetectionInstance]
):
"""
Processes a stream of object-detection instances, mapping labels
from one set to another.
"""
label_mapping = TypedOption(
"-m", "--mapping",
type=str,
metavar="old=new", action='concat',
help="mapping for labels, for replacing one label string with another (eg when fixing/collapsing labels)"
)
@InstanceState
def label_table(self) -> Dict[str, str]:
label_table = {}
for map_string in self.label_mapping:
old, new = map_string.split("=")
# Make sure we don't double-map a label
if old in label_table:
raise ValueError(f"Multiple mappings specified for label '{old}': "
f"{label_table[old]}, {new}")
label_table[old] = new
return label_table
def process_element(
self,
element: ImageObjectDetectionInstance,
then: ThenFunction[ImageObjectDetectionInstance],
done: DoneFunction
):
# Apply the label mapping
self.apply_label_mapping(element.annotations)
then(element)
def apply_label_mapping(self, located_objects: LocatedObjects):
"""
Maps the labels in the located objects from their current value to
their new value.
:param located_objects: The parsed objects
"""
# Do nothing if no mapping provided
if len(self.label_table) == 0:
return
# Process each object
for located_object in located_objects:
# Get the object's current label
label: Optional[str] = get_object_label(located_object, None)
# If the object doesn't have a label, skip it
if label is None:
continue
# If there is a mapping for this label, change it
if label in self.label_table:
set_object_label(located_object, self.label_table[label])
| 33.679487
| 113
| 0.650552
| 2,113
| 0.80434
| 0
| 0
| 474
| 0.180434
| 0
| 0
| 739
| 0.281309
|
1f12f143e8af312aabb54b8c1b32c57f8566b95e
| 793
|
py
|
Python
|
python/tHome/msgHub/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 18
|
2016-04-17T19:39:28.000Z
|
2020-11-19T06:55:20.000Z
|
python/tHome/msgHub/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 11
|
2018-09-07T18:34:41.000Z
|
2021-05-02T04:44:54.000Z
|
python/tHome/msgHub/__init__.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 12
|
2016-10-31T12:29:08.000Z
|
2021-12-28T12:18:28.000Z
|
#===========================================================================
#
# msgHub package
#
#===========================================================================
__doc__ = """Zero-MQ Message Hub
The msgHub is a pub/sub forwarder. All of the various data producers
send messages to the msgHub as a single point of contact for the
producers. Consumers of the messages read from the hub as a single
point of contact for the consumers.
Logging object name: tHome.msgHub
"""
#===========================================================================
#===========================================================================
from . import cmdLine
from . import config
from .start import start
#===========================================================================
| 28.321429
| 76
| 0.387137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 699
| 0.881463
|
1f14096bca569e364e31b3699b308c6507e8fe1b
| 8,221
|
py
|
Python
|
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
nlg/app.py
|
samrudh/gramex-nlg
|
fb1b1ce14347947c8644adda7bd63856dcb2ce3d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Module for gramex exposure. This shouldn't be imported anywhere, only for use
with gramex.
"""
import glob
import json
import os
import os.path as op
import pandas as pd
from six.moves.urllib import parse
from tornado.template import Template
from gramex.apps.nlg import grammar
from gramex.apps.nlg import nlgutils as utils
from gramex.apps.nlg import templatize
from gramex.config import variables
DATAFILE_EXTS = {'.csv', '.xls', '.xlsx', '.tsv'}
nlg_path = op.join(variables['GRAMEXDATA'], 'nlg')
if not op.isdir(nlg_path):
os.mkdir(nlg_path)
def clean_anonymous_files():
"""Remove all files uploaded by anonymous users.
This may be used at startup when deploying the app."""
import shutil
anon_dir = op.join(nlg_path, 'anonymous')
if op.isdir(anon_dir):
shutil.rmtree(anon_dir)
def is_user_authenticated(handler):
"""Check if the current user is authenticated."""
current_user = getattr(handler, 'current_user', False)
return bool(current_user)
def get_user_dir(handler):
if is_user_authenticated(handler):
dirpath = op.join(nlg_path, handler.current_user.id)
else:
dirpath = op.join(nlg_path, 'anonymous')
return dirpath
def render_live_template(handler):
"""Given a narrative ID and df records, render the template."""
payload = json.loads(handler.request.body)
orgdf = get_original_df(handler)
nrid = payload['nrid']
if not nrid.endswith('.json'):
nrid += '.json'
df = pd.DataFrame.from_records(payload['data'])
nrpath = op.join(nlg_path, handler.current_user.id, nrid)
with open(nrpath, 'r') as fout: # noqa: No encoding for json
templates = json.load(fout)
narratives = []
for t in templates['config']:
tmpl = utils.add_html_styling(t['template'], payload['style'])
s = Template(tmpl).generate(df=df, fh_args=t.get('fh_args', {}),
G=grammar, U=utils, orgdf=orgdf)
rendered = s.decode('utf8')
narratives.append(rendered)
return '\n'.join(narratives)
def get_original_df(handler):
"""Get the original dataframe which was uploaded to the webapp."""
data_dir = get_user_dir(handler)
with open(op.join(data_dir, 'meta.cfg'), 'r') as fout: # noqa: No encoding for json
meta = json.load(fout)
dataset_path = op.join(data_dir, meta['dsid'])
return pd.read_csv(dataset_path, encoding='utf-8')
def render_template(handler):
"""Render a set of templates against a dataframe and formhandler actions on it."""
orgdf = get_original_df(handler)
payload = json.loads(handler.request.body.decode('utf8'))
fh_args = payload['args']
templates = payload['template']
df = pd.DataFrame.from_records(payload['data'])
# fh_args = {k: [x.lstrip('-') for x in v] for k, v in fh_args.items()}
resp = []
for t in templates:
rendered = Template(t).generate(
orgdf=orgdf, df=df, fh_args=fh_args, G=grammar, U=utils).decode('utf8')
rendered = rendered.replace('-', '')
# grmerr = utils.check_grammar(rendered)
resp.append({'text': rendered}) # , 'grmerr': grmerr})
return json.dumps(resp)
def process_text(handler):
"""Process English text in the context of a df and formhandler arguments
to templatize it."""
payload = json.loads(handler.request.body.decode('utf8'))
df = pd.DataFrame.from_records(payload['data'])
args = payload.get('args', {}) or {}
resp = []
for t in payload['text']:
# grammar_errors = yield utils.check_grammar(t)
replacements, t, infl = templatize(t, args.copy(), df)
resp.append({
'text': t, 'tokenmap': replacements, 'inflections': infl,
'fh_args': args
# 'grmerr': json.loads(grammar_errors.decode('utf8'))['matches']
})
return json.dumps(resp)
def read_current_config(handler):
"""Read the current data and narrative IDs written to the session file."""
user_dir = get_user_dir(handler)
meta_path = op.join(user_dir, 'meta.cfg')
if not op.isdir(user_dir):
os.mkdir(user_dir)
if not op.isfile(meta_path):
return {}
with open(meta_path, 'r') as fout: # noqa: No encoding for json
meta = json.load(fout)
return meta
def get_dataset_files(handler):
"""Get all filenames uploaded by the user.
Parameters
----------
handler : tornado.RequestHandler
Returns
-------
list
List of filenames.
"""
files = glob.glob('{}/*'.format(get_user_dir(handler)))
return [f for f in files if op.splitext(f)[-1].lower() in DATAFILE_EXTS]
def get_narrative_config_files(handler):
"""Get list of narrative config files generated by the user.
Parameters
----------
handler : tornado.RequestHandler
Returns
-------
list
List of narrative configurations.
"""
return glob.glob('{}/*.json'.format(get_user_dir(handler)))
def save_config(handler):
"""Save the current narrative config.
(to $GRAMEXDATA/{{ handler.current_user.id }})"""
payload = {}
for k in ['config', 'name', 'dataset']:
payload[k] = parse.unquote(handler.args[k][0])
payload['config'] = json.loads(payload['config'])
nname = payload['name']
if not nname.endswith('.json'):
nname += '.json'
payload['dataset'] = parse.unquote(handler.args['dataset'][0])
fpath = op.join(nlg_path, handler.current_user.id, nname)
with open(fpath, 'w') as fout: # noqa: No encoding for json
json.dump(payload, fout, indent=4)
def get_gramopts(handler):
"""Find all Grammar and token inflection options from the NLG library.
Primarily used for creating the select box in the template settings dialog."""
funcs = {}
for attrname in dir(grammar):
obj = getattr(grammar, attrname)
if getattr(obj, 'gramopt', False):
funcs[obj.fe_name] = {'source': obj.source, 'func_name': attrname}
return funcs
def init_form(handler):
"""Process input from the landing page and write the current session config."""
meta = {}
data_dir = get_user_dir(handler)
if not op.isdir(data_dir):
os.makedirs(data_dir)
# handle dataset
data_file = handler.request.files.get('data-file', [{}])[0]
if data_file:
# TODO: Unix filenames may not be valid Windows filenames.
outpath = op.join(data_dir, data_file['filename'])
with open(outpath, 'wb') as fout:
fout.write(data_file['body'])
else:
dataset = handler.args['dataset'][0]
outpath = op.join(data_dir, dataset)
# shutil.copy(outpath, fh_fpath)
meta['dsid'] = op.basename(outpath)
# handle config
config_name = handler.get_argument('narrative', '')
if config_name:
config_path = op.join(data_dir, config_name)
# shutil.copy(config_path, op.join(local_data_dir, 'config.json'))
meta['nrid'] = op.basename(config_path)
# write meta config
with open(op.join(data_dir, 'meta.cfg'), 'w') as fout: # NOQA
json.dump(meta, fout, indent=4)
def edit_narrative(handler):
"""Set the handler's narrative and dataset ID to the current session."""
user_dir = op.join(nlg_path, handler.current_user.id)
dataset_name = handler.args.get('dsid', [''])[0]
narrative_name = handler.args.get('nrid', [''])[0] + '.json'
with open(op.join(user_dir, 'meta.cfg'), 'w') as fout: # NOQA: no encoding for JSON
json.dump({'dsid': dataset_name, 'nrid': narrative_name}, fout, indent=4)
def get_init_config(handler):
"""Get the initial default configuration for the current user."""
user_dir = get_user_dir(handler)
metapath = op.join(user_dir, 'meta.cfg')
if op.isfile(metapath):
with open(metapath, 'r') as fout: # NOQA: no encoding for JSON
meta = json.load(fout)
config_file = op.join(user_dir, meta.get('nrid', ''))
if op.isfile(config_file):
with open(config_file, 'r') as fout: # NOQA: no encoding for JSON
meta['config'] = json.load(fout)
return meta
return {}
| 33.555102
| 88
| 0.643961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,759
| 0.335604
|
1f14219d12c0adf9ade099f871dd4550e114601e
| 3,682
|
py
|
Python
|
data/main_data_flow.py
|
SterArcher/OHCA-registry-Slovenia
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | 1
|
2022-02-28T13:02:14.000Z
|
2022-02-28T13:02:14.000Z
|
data/main_data_flow.py
|
SterArcher/dispatch
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | 1
|
2022-03-20T10:51:17.000Z
|
2022-03-21T07:52:57.000Z
|
data/main_data_flow.py
|
SterArcher/OHCA-registry-Slovenia
|
ad8278a28039503ab6a75d48ffea314de9a759ba
|
[
"MIT"
] | null | null | null |
import plotly.graph_objects as go
import plotly as plt
import random
# Uncomment the names you want the diagram to show
# Names in english
# sta = "Statistical Office"
# si = "Emergency call admission" #"sprejem intervencij"
# pni = "Emergency intervention report" #"poročilo/protokol nujne intervencije"
# pnrv = "Emergency protocol of the out-of-hospital EMS" # "protokol nujnega reševalnega vozila"
# ppo = "Out-of-hospital CPR" #"predbolnišnično oživljanje"
# utst = "Supplementary Utstein protocol"
# nijz = "National Institute of Public Health" #"NIJZ (v primeru smrti)"
# hosp = "Hospitals" # Večinoma v obliki protokola triaže,statusa/anamneze/rezultatov diagnostike in odpustnice
# disp = "Dispatch service"
# ppp = "First responders"
# comp = "IT system provider" #"Computel"
# api = "API"
# api_csv = "API/CSV"
# db = "Utstein database"
# title_text = "Representation of data flow for the Slovenian OHCA registry based on the Utstein protocol."
# Names in Slovene
si = "Sprejem intervencij" #"sprejem intervencij"
pni = "Protokol nujne intervencije" #"poročilo/protokol nujne intervencije"
pnrv = "Protokol nujnega reševalnega vozila" # "protokol nujnega reševalnega vozila"
ppo = "Protokol predbolnišničnega oživljanja" #"predbolnišnično oživljanje"
utst = "Dodatni protokol Utstein"
nijz = "NIJZ" #"NIJZ (v primeru smrti)"
hosp = "Bolnišnice" # Večinoma v obliki protokola triaže,statusa/anamneze/rezultatov diagnostike in odpustnice
disp = "Dispečerska služba zdravstva"
ppp = "Protokol prvih posredovalcev"
comp = "Ponudnik informacijske tehnologije" #"Computel"
sta = "Statistični urad"
api = "API"
api_csv = "API/CSV"
db = "Baza podatkov Utstein"
title_text = "Prikaz pretoka podatkov za Register slovenskih predbolnišničnih srčnih dogodkov v skladu s protokolom Utstein."
def random_color_generator():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return [r, g, b]
colors, colors_conn = [], []
for i in range(25):
[r, g, b] = random_color_generator()
colors.append("rgba(" + str(r) + "," + str(g) + "," + str(b) + "," + str(0.9) + ")")
colors_conn.append("rgba(" + str(r) + "," + str(g) + "," + str(b) + "," + str(0.5) + ")")
elements = [si, pni, pnrv, ppo, utst, nijz, hosp, disp, ppp, comp, api, api_csv, db]
labels, counter = dict(), 0
for elt in elements:
labels[elt] = counter
counter += 1
labels[sta] = counter
protocols, rest = [si, pni, pnrv, ppo, utst], [nijz, hosp, disp, ppp]
connections = dict()
for protocol in protocols:
connections[(labels[protocol], labels[comp])] = 1
for elt in rest:
connections[(labels[elt], labels[api_csv])] = 1
connections[(labels[comp], labels[api])] = len(protocols)
connections[(labels[api_csv], labels[db])] = len(rest)
connections[(labels[api], labels[db])] = len(protocols)
connections[(labels[sta], labels[db])] = 1
label = list(labels.keys())
sources, targets, values = [], [], []
for key in connections:
sources.append(key[0])
targets.append(key[1])
values.append(connections[key])
fig = go.Figure(data = [go.Sankey(
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(pad = 15,
thickness = 20,
line = dict(color="black", width = 0.5),
label = label,
color = colors),
link = dict(source = sources,
target = targets,
value = values,
#label = 'label',
color = colors_conn))]) # 'rgb(220,220,220)'
fig.update_layout(title=dict(text=title_text, font=dict(size = 20, color = 'gray')),
font=dict(size = 12, color = 'black'),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)")
fig.show()
| 35.066667
| 125
| 0.67409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,737
| 0.468573
|
1f14b7efeb28f5b978cdbfde55415ea03866d4ca
| 569
|
py
|
Python
|
tests/Exscript/memtest_host.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 226
|
2015-01-20T19:59:06.000Z
|
2022-01-02T11:13:01.000Z
|
tests/Exscript/memtest_host.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 155
|
2015-01-02T07:56:27.000Z
|
2022-01-09T20:56:19.000Z
|
tests/Exscript/memtest_host.py
|
saveshodhan/exscript
|
72718eee3e87b345d5a5255be9824e867e42927b
|
[
"MIT"
] | 114
|
2015-01-03T11:48:17.000Z
|
2022-01-26T02:50:43.000Z
|
from __future__ import print_function
from builtins import str
from builtins import range
# This script is not meant to provide a fully automated test, it's
# merely a hack/starting point for investigating memory consumption
# manually. The behavior also depends heavily on the version of meliae.
from meliae import scanner, loader
from Exscript import Account, Host
hostlist = [Host(str(i)) for i in range(1, 10000)]
# accountlist = [Account(str(i)) for i in range(1, 10000)]
scanner.dump_all_objects('test.dump')
om = loader.load('test.dump')
print(om.summarize())
| 35.5625
| 71
| 0.773286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 284
| 0.499121
|
1f1586c55bd8026b70c428056979527a8012b8fd
| 8,468
|
py
|
Python
|
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
apcadastros.py
|
Alexsussa/ap-cadastros
|
9b5e9b57970a6a044ebde071a68403e0d513e89b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
__developer__ = 'Alex Pinheiro'
__version__ = 1.4
__build__ = 6
import sqlite3
from tkinter.ttk import *
from tkinter.filedialog import *
from threading import Thread
from utils import Utils
from login import Login
u = Utils
# Listas
estados = ['AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS,', 'MG', 'PA',
'PB,', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC', 'SP', 'SE', 'TO']
cidades = []
cpfcnpjs = ['CPF', 'CNPJ']
# Janela principal
class Clientes(Thread, Tk):
def __init__(self, master=None):
Thread.__init__(self)
banco = 'banco/dados.db'
conexao = sqlite3.connect(banco)
c = conexao.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS clientes (id INTEGER PRIMARY KEY AUTOINCREMENT,
cliente TEXT VARCHAR(30) UNIQUE NOT NULL, cpf_cnpj TINYINT(18) UNIQUE NOT NULL, telefone TINYINT(15) NOT NULL,
cep TINYINT(10) NOT NULL, endereco TEXT VARCHA(30) NOT NULL, numero TINYINT(5) NOT NULL,
bairro TEXT VARCHAR(20) NOT NULL, cidade TEXT VARCHAR(15) NOT NULL, estado TEXT VARCHAR(2) NOT NULL)''')
conexao.commit()
self.c0 = Frame(master)
self.c0.pack(pady=20)
self.c1 = Frame(master)
self.c1.pack(pady=10)
self.c2 = Frame(master)
self.c2.pack(pady=10)
self.c3 = Frame(master)
self.c3.pack(pady=10)
self.c4 = Frame(master)
self.c4.pack()
# Barra de menu superior ainda não implementada
self.menuBar = Menu(janela, bd=0, bg='#d9d9d9')
self.menuArquivo = Menu(self.menuBar, tearoff=0)
self.menuArquivo.add_command(label='Produtos', command=self.produtos, accelerator='Ctrl+P')
self.menuArquivo.add_command(label='Salvar', command=lambda: u.cadastrarClientes(self), accelerator='Ctrl+S')
self.menuArquivo.add_command(label='Atualizar', command=lambda: u.atualizar(self), accelerator='Ctrl+U')
self.menuArquivo.add_command(label='Deletar', command=lambda: u.deletar(self), accelerator='Ctrl+D')
self.menuArquivo.add_separator()
self.menuArquivo.add_command(label='Sair', command=janela.destroy, accelerator='Ctrl+Q')
self.menuBar.add_cascade(label='Arquivo', menu=self.menuArquivo)
self.menuAjuda = Menu(self.menuBar, tearoff=0)
self.menuAjuda.add_command(label='Sobre', command=lambda: u.sobre(self, window=janela), accelerator='Ctrl+H')
self.menuBar.add_cascade(label='Ajuda', menu=self.menuAjuda)
janela.config(menu=self.menuBar)
self.lbid = Label(self.c1, text='ID:', width=3)
self.lbid.pack(side=LEFT)
self.txtid = Combobox(self.c1, width=8, background='white', foreground='black',
values=u.listaID(self))
self.txtid.pack(side=LEFT)
self.btnlupa = Button(self.c1, width=20, height=20, bg='white', command='u.lupaID(self)')
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.lbcliente = Label(self.c1, text='CLIENTE:', width=8)
self.lbcliente.pack(side=LEFT)
self.txtcliente = Entry(self.c1, width=30, background='white', foreground='black')
self.txtcliente.pack(side=LEFT)
self.lbcpfcnpj = Combobox(self.c1, text='CPF/CNPJ:', width=5, values=cpfcnpjs)
self.lbcpfcnpj.pack(side=LEFT, padx=3)
self.lbcpfcnpj.set(cpfcnpjs[0])
self.lbcpfcnpj.bind('<<ComboboxSelected>>', lambda e: u.maskCampos(self))
self.txtcpfcnpj = Entry(self.c1, width=18, background='white', foreground='black')
self.txtcpfcnpj.pack(side=LEFT)
self.btnlupa = Button(self.c1, width=20, height=20, bg='white', command=lambda: u.lupaCPF(self))
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.btnlupa.pack(side=LEFT, padx=2)
self.lbtelcel = Label(self.c2, text='TEL/CEL:', width=8)
self.lbtelcel.pack(side=LEFT)
self.txttelcel = Entry(self.c2, text='Telefone ou Celular...', width=15, bg='white', fg='black')
self.txttelcel.pack(side=LEFT)
self.lbcep = Label(self.c2, text='CEP:', width=4)
self.lbcep.pack(side=LEFT)
self.txtcep = Entry(self.c2, width=10, bg='white', fg='black')
self.txtcep.pack(side=LEFT)
self.btnlupa = Button(self.c2, width=20, height=20, bg='white', command=lambda: u.buscaCep(self))
self.lupa = PhotoImage(file='imagens/lupa.png')
self.btnlupa.config(image=self.lupa)
self.btnlupa.image = self.lupa
self.btnlupa.pack(side=LEFT, padx=2)
self.lbendereco = Label(self.c2, text='ENDEREÇO:', width=10)
self.lbendereco.pack(side=LEFT)
self.txtendereco = Entry(self.c2, width=30, bg='white', fg='black')
self.txtendereco.pack(side=LEFT)
self.lbnumero = Label(self.c2, text='Nº:', width=3)
self.lbnumero.pack(side=LEFT)
self.txtnumero = Entry(self.c2, width=5, bg='white', fg='black')
self.txtnumero.pack(side=LEFT)
self.lbbairro = Label(self.c3, text='BAIRRO:', width=7)
self.lbbairro.pack(side=LEFT)
self.txtbairro = Entry(self.c3, width=30, bg='white', fg='black')
self.txtbairro.pack(side=LEFT)
self.lbcidade = Label(self.c3, text='CIDADE:', width=7)
self.lbcidade.pack(side=LEFT)
self.txtcidade = Entry(self.c3, width=20, background='white', foreground='black')
self.txtcidade.pack(side=LEFT)
self.lbestado = Label(self.c3, text='ESTADO:', width=7)
self.lbestado.pack(side=LEFT)
self.txtestado = Combobox(self.c3, width=3, background='white', foreground='black',
values=sorted(estados))
self.txtestado.pack(side=LEFT)
self.logo = Label(self.c4, image=imglogo)
self.logo.pack()
###############################################################################
# Menu do mouse
self.MenuMouse = Menu(tearoff=0)
self.MenuMouse.add_command(label='Cortar')
self.MenuMouse.add_command(label='Copiar')
self.MenuMouse.add_command(label='Colar')
janela.bind('<Button-3><ButtonRelease-3>', self.MostrarMenuMouse)
# Binds
self.txtid.bind('<<ComboboxSelected>>', lambda e: u.lupaID(self))
janela.bind('<Button-1>', lambda e: u.maskCampos(self))
janela.bind('<Control-S>', lambda e: u.cadastrarClientes(self))
janela.bind('<Control-s>', lambda e: u.cadastrarClientes(self))
janela.bind('<Control-U>', lambda e: u.atualizar(self))
janela.bind('<Control-u>', lambda e: u.atualizar(self))
janela.bind('<Control-D>', lambda e: u.deletar(self))
janela.bind('<Control-d>', lambda e: u.deletar(self))
janela.bind('<Control-L>', lambda e: u.limpar(self))
janela.bind('<Control-l>', lambda e: u.limpar(self))
janela.bind('<Control-Q>', lambda e: janela.destroy())
janela.bind('<Control-q>', lambda e: janela.destroy())
janela.bind('<Control-P>', lambda e: self.produtos())
janela.bind('<Control-p>', lambda e: self.produtos())
janela.bind('<Control-H>', lambda e: u.sobre(self, window=janela))
janela.bind('<Control-h>', lambda e: u.sobre(self, window=janela))
def MostrarMenuMouse(self, event):
w = event.widget
self.MenuMouse.entryconfigure('Cortar', command=lambda: w.event_generate('<<Cut>>'))
self.MenuMouse.entryconfigure('Copiar', command=lambda: w.event_generate('<<Copy>>'))
self.MenuMouse.entryconfigure('Colar', command=lambda: w.event_generate('<<Paste>>'))
self.MenuMouse.tk.call('tk_popup', self.MenuMouse, event.x_root, event.y_root)
def produtos(self):
from produtos import jan
janela.iconify()
if jan.withdraw:
jan.deiconify()
jan.focus_force()
else:
jan.withdraw()
janela.deiconify()
# Término janela clientes
janela = Tk()
imglogo = PhotoImage(file='imagens/logo.png')
iconejanela = PhotoImage(file='imagens/iconejanela.png')
Clientes(janela)
janela.tk.call('wm', 'iconphoto', janela._w, iconejanela)
janela.title('AP CADASTROS - CLIENTES')
janela.geometry('800x450')
janela.resizable(False, False)
janela.mainloop()
| 41.920792
| 118
| 0.62931
| 7,609
| 0.898135
| 0
| 0
| 0
| 0
| 0
| 0
| 1,716
| 0.20255
|
1f17a8045e62dbb0dc195713bf778e0658496213
| 131
|
py
|
Python
|
src/pricehist/beanprice/alphavantage.py
|
chrisberkhout/pricehist
|
a54da85a6fae15e2f771e8612aed089407ec5c22
|
[
"MIT"
] | 4
|
2021-09-15T03:23:10.000Z
|
2022-02-08T23:31:10.000Z
|
src/pricehist/beanprice/alphavantage.py
|
chrisberkhout/pricehist
|
a54da85a6fae15e2f771e8612aed089407ec5c22
|
[
"MIT"
] | null | null | null |
src/pricehist/beanprice/alphavantage.py
|
chrisberkhout/pricehist
|
a54da85a6fae15e2f771e8612aed089407ec5c22
|
[
"MIT"
] | null | null | null |
from pricehist import beanprice
from pricehist.sources.alphavantage import AlphaVantage
Source = beanprice.source(AlphaVantage())
| 26.2
| 55
| 0.847328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1f18bfa4fe6a76d3156288890c8bbdb080595ab5
| 12,846
|
py
|
Python
|
example/hit_path.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/hit_path.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/hit_path.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | 3
|
2020-06-19T15:41:19.000Z
|
2020-06-29T12:47:05.000Z
|
import math
import sys
from scipy.interpolate import interp2d
from scipy.ndimage import rotate, center_of_mass
from scipy.spatial import distance
from skimage.feature import canny
from skimage.filters import rank, gaussian
from skimage.measure import subdivide_polygon
from skimage.morphology import medial_axis, square, erosion, disk
from skimage.segmentation import active_contour
from skimage.transform import probabilistic_hough_line, rescale
from sklearn.linear_model import LinearRegression
from credo_cf import load_json, progress_and_process_image, group_by_id, GRAY, nkg_mark_hit_area, NKG_MASK, nkg_make_track, NKG_PATH, NKG_DIRECTION, \
NKG_DERIVATIVE, ID, NKG_THRESHOLD, NKG_UPSCALE, NKG_SKELETON, point_to_point_distance, center_of_points, NKG_MASKED, NKG_REGRESSION, NKG_PATH_FIT, \
store_png, IMAGE
import matplotlib.pyplot as plt
from numpy import unravel_index, ma
import numpy as np
import itertools
from scipy.sparse import csr_matrix
from scipy.sparse.dok import dok_matrix
from scipy.sparse.csgraph import dijkstra
# prepare dataset: hits - JSON's objects, and grays - numpy grayscale images 60x60
from credo_cf.classification.preprocess.nkg_processings import search_longest_path_dijkstra, bitmap_to_graph, analyse_path
objects, count, errors = load_json('../data/manual.json', progress_and_process_image)
by_id = group_by_id(objects)
used_hits1 = {4711435, 6234182, 9152349, 4913621, 5468291, 7097636, 4976474, 5206452, 4876475, 5951007, 4714801, 4819239, 4660572, 4705446, 8280225, 8459656,
8471578, 9124308, 9314789, 4813841}
used_hits2 = [7741225, 7238971, 5973441, 4892405, 17432760,
17432645, 4731298, 6229582, 17571002, 17368987,
7148947, 4899235, 18349704, 18250739, 6908292,
9129139, 17771578, 17861029, 17337669, 7470695]
used_hits3 = [7741225, 4580817, 5973441, 4892405, 17432760,
17432645, 4731298, 6229582, 17571002, 17368987,
7148947, 4899235, 18349704, 18250739, 6908292,
9129139, 17771578, 17861029, 17337669, 7470695,
4711435, 6234182, 9152349, 4913621, 5468291,
7097636, 4976474, 5206452, 4876475, 5951007,
4714801, 4819239, 4660572, 4705446, 8280225,
8459656, 8471578, 9124308, 9314789, 4813841]
used_hits = used_hits3
hits = []
for u in used_hits:
hits.append(by_id[u][0])
grays = list(map(lambda x: x['gray'], hits))
# utils
def display(img):
plt.matshow(img)
plt.colorbar()
plt.show()
def display_all(values):
f, axs = plt.subplots(4, 5, constrained_layout=True, figsize=(32, 24))
i = 0
for ax in axs.flat:
im = ax.matshow(values[i])
i += 1
# f.colorbar(im, ax=axs.flat)
plt.show()
def display_all_from(hits, _from, title_func=None, scale=6):
cols = 5
rows = int(math.ceil(len(hits) / cols))
f, axs = plt.subplots(rows, cols, constrained_layout=True, figsize=(4*scale, 3*scale*rows/4))
i = 0
for ax in axs.flat:
if len(hits) <= i:
break
im = ax.matshow(hits[i].get(_from))
if title_func is not None:
ax.title.set_text(title_func(hits[i]))
i += 1
# f.colorbar(im, ax=axs.flat)
plt.show()
# wycinek 1/8 drogi promienia sprawdzania
ray_way_octet = [
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
],
]
ray_way_octet2 = [
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0],
],
]
fit_mask = [
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
]
def build_ray_way(octet):
ret = []
for r in range(0, 4):
for i in range(0, len(octet)):
o = octet[i]
oct = np.array(o)
angle = r * 90.0 + 45.0 / (len(octet) - 1) * i - 180
ret.append({'way': np.rot90(oct, r), 'angle': angle})
for i in range(1, len(octet) - 1):
o = octet[-(i + 1)]
oct = np.array(o)
fl = np.flip(oct, axis=0)
angle = r * 90.0 + 45.0 / (len(octet) - 1) * i + 45 - 180
ret.append({'way': np.rot90(rotate(fl, angle=90), r), 'angle': angle})
return ret
def way_next_point(way, step=1):
w, h = way.shape
a = np.zeros(way.shape)
a[step:w-step, step:h-step] = way[step:w-step, step:h-step]
a[step+1:w-step-1, step+1:h-step-1] = 0
next = unravel_index(a.argmax(), a.shape)
return next[0] - (w - 1) / 2, next[1] - (h - 1) / 2
def calc_ways(img, pos, ways):
w = int((ways[0]['way'].shape[0] - 1) / 2)
cut = img[pos[0]-w:pos[0]+w+1, pos[1]-w:pos[1]+w+1]
sums = []
for way in ways:
calc = cut * way['way']
dw = np.ones(calc.shape)
dw[1:4, 1:4] = calc[1:4, 1:4]
calc = calc + dw # najbliższe srodka są 2x
# calc = calc * dw # najbliższe środka są x^2
s = np.sum(calc)
sums.append({**way, 'value': s})
return sums
def calc_pos(a, b):
return int(a[0]+b[0]), int(a[1]+b[1])
def normalize_angle(angle):
n = angle % 360
return n if n <= 180 else n - 360
def in_angle(a, b, v):
pa = normalize_angle(a) + 360
pb = normalize_angle(b) + 360
pv = normalize_angle(v) + 360
if pa <= pb:
return pa <= pv <= pb
else:
return not (pa >= pv >= pb)
def nkg_pather_step(img, next_pos, angle, threshold, fov, step):
path = []
while img[next_pos] > threshold:
path.append(next_pos)
img[next_pos] = 0
try:
calc = calc_ways(img, next_pos, ray_way)
except:
# edge achieved
break
filtered = list(filter(lambda x: in_angle(angle - fov / 2, angle + fov / 2, x['angle']) and img[calc_pos(next_pos, way_next_point(x['way'], step))] > threshold, calc))
if len(filtered) == 0:
break
direction = max(filtered, key=lambda x: x['value'])
next_pos = calc_pos(next_pos, way_next_point(direction['way'], step))
angle = direction['angle']
return path
def nkg_pather(img, threshold, fov=90, step=1):
# mask = np.zeros(img.shape)
img = img.copy()
start = unravel_index(img.argmax(), img.shape)
try:
calc = calc_ways(h['smooth'], start, ray_way)
except:
return np.array([])
direction = max(calc, key=lambda x: x['value'])
next_pos = calc_pos(start, way_next_point(direction['way'], step))
angle = direction['angle']
next_angle = direction['angle'] - 180
path = nkg_pather_step(img, next_pos, angle, threshold, fov, step)
angle = next_angle
next_pos = start
path2 = nkg_pather_step(img, next_pos, angle, threshold, fov, step)
return np.array([*reversed(path2), *path])
def line_to_mask(img, path, scale=1, value=1, create_new_mask=False):
if create_new_mask:
mask = np.zeros(img.shape)
else:
mask = img
for a in path:
if scale > 1:
mask[round(a[0] * scale + scale/2.0), round(a[1] * scale + scale/2.0)] = value
else:
mask[round(a[0]), round(a[1])] = value
return ma.masked_array(img, mask)
def path_to_center_of_weight(img, fm, path):
path2 = []
fit_mask = np.array(fm)
w = fit_mask.shape[0]
h = fit_mask.shape[1]
fit = img.copy()
for i in path:
x1 = int(i[0] - (w - 1) / 2)
x2 = int(i[0] + (w + 1) / 2)
y1 = int(i[1] - (h - 1) / 2)
y2 = int(i[1] + (h + 1) / 2)
# cut = fit[i[0]-2:i[0]+3, i[1]-2:i[1]+3]
cut = fit[x1:x2, y1:y2]
if cut.shape[0] != 5 or cut.shape[1] != 5:
continue
m = cut * fit_mask
new_i = center_of_mass(m)
path2.append([new_i[0] + x1, new_i[1] + y1])
path2 = optimize_path(path2, 0.5)
path2 = np.array(path2)
# if path2.shape[0] > 1:
# return subdivide_polygon(path2)
return path2
def optimize_path(path, max_distance, max_passes=20):
working = path
for i in range(0, max_passes):
used = False
path2 = [working[0]]
for pos in range(1, len(working)):
dist = point_to_point_distance(working[pos - 1], working[pos])
if dist <= max_distance:
new_point = center_of_points([working[pos - 1], working[pos]])
if path2[-1][0] == working[pos - 1][0] and path2[-1][1] == working[pos - 1][1]:
path2[-1] = new_point
else:
path2.append(new_point)
used = True
else:
path2.append(working[pos])
working = path2
if not used:
break
return working
def nkg_path_analysis(detection: dict, fov=90, step=1):
h = detection
path = nkg_pather(h.get(GRAY), h.get(NKG_THRESHOLD), fov, step)
h[NKG_PATH] = path
h[NKG_MASKED] = line_to_mask(h.get(GRAY), path, create_new_mask=True)
path_fit = path_to_center_of_weight(h.get(GRAY), fit_mask, path) if len(path) else []
h[NKG_PATH_FIT] = path_fit
if len(path_fit) == 0:
h[NKG_REGRESSION] = 0
return h
X = path_fit[:,0].reshape(-1, 1)
y = path_fit[:,1].reshape(-1, 1)
reg = LinearRegression().fit(X, y)
score = reg.score(X, y)
h[NKG_REGRESSION] = score
return h
ray_way = build_ray_way(ray_way_octet)
www = way_next_point(ray_way[0]['way'])
# mark_all(vs, used_kernel, requrence_mark)
# display_all(grays)
display_all_from(hits, GRAY)
for h in hits:
nkg_mark_hit_area(h)
display_all_from(hits, NKG_MASK)
for h in hits:
img = h.get(GRAY).copy()
img = gaussian(img, 0.5) # rank.mean(img, selem=disk(1))
h['smooth'] = img
display_all_from(hits, 'smooth')
for h in hits:
nkg_path_analysis(h, 90)
h['masked'] = line_to_mask(h.get(GRAY), h[NKG_PATH], create_new_mask=True)
h['score'] = '%s: %0.3f/%d' % (str(h.get(ID)), h[NKG_REGRESSION], len(h[NKG_PATH_FIT]))
img = rescale(h.get(GRAY), 8, order=0, preserve_range=True, anti_aliasing=False)
mask = np.zeros(img.shape)
h['masked2'] = line_to_mask(img, h.get(NKG_PATH_FIT), scale=8, create_new_mask=True)
display_all_from(hits, 'masked', lambda x:str(x['score']))
display_all_from(hits, 'masked2', lambda x:str(x['score']), scale=10)
#def measure_angle(fn: str):
# hits, count, errors = load_json('../data/%s' % fn, progress_and_process_image)
# for h in hits:
# nkg_mark_hit_area(h)
# nkg_path_analysis(h)
# store_png('/tmp/credo', [fn], '%0.3f_%s' % (h.get(NKG_REGRESSION), str(h.get(ID))), h.get(IMAGE))
#def main():
# measure_angle('hits_votes_4_class_2.json')
# measure_angle('hits_votes_4_class_3.json')
#if __name__ == '__main__':
# main()
# sys.exit(0) # not always close
# for o in objects:
# print('%s;%f' % (str(o.get(ID)), o.get(NKG_REGRESSION)))
| 28.738255
| 175
| 0.541725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,281
| 0.099681
|
1f18f11b5d9f381e25d945aa36634594b061dc4c
| 3,749
|
py
|
Python
|
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | 2
|
2020-07-09T03:15:58.000Z
|
2022-03-09T11:57:17.000Z
|
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | null | null | null |
exps/supp-synthetic/notebooks/hp_analysis.py
|
Viktour19/overlap-code
|
f5c6e63146a00f65710c38b9181bb9d12de6454f
|
[
"MIT"
] | 1
|
2021-05-18T11:55:04.000Z
|
2021-05-18T11:55:04.000Z
|
#!/usr/bin/env python
# coding: utf-8
from sacred.observers import TinyDbReader
import pdb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def get_exclusion_metadata(
d,
ideal_rule = [['i0', 'not', ''], ['i1', 'not', '']],
w_lb=1e-8):
r = dict(d)
r['rule_avg_coverage'] = np.mean([rule['p_covered'] for rule in r['rule_stats']])
r['rule_n_perfect'] = np.sum([rule['n_covered'] == 0 for rule in r['rule_stats']])
r['rule_n_total'] = len(r['rule_stats'])
r['rule_avg_length'] = np.mean([len(rule) for rule in r['rules']])
ideal_literal_idx = np.array([i in ideal_rule for i in r['z_index']])
dirty_rules_idx = r['z_values'][ideal_literal_idx].sum(axis=0) == len(ideal_rule)
clean_rules_idx = np.logical_and(
dirty_rules_idx,
r['z_values'][~ideal_literal_idx].sum(axis=0) == 0
)
# Make sure dirty rules exclude the clean rule
dirty_rules_idx = np.logical_xor(dirty_rules_idx, clean_rules_idx)
other_rules_idx = np.logical_not(np.logical_or(dirty_rules_idx, clean_rules_idx))
assert sum(clean_rules_idx) <= 1
# Rules considered (i.e., they show up in W at all)
r['n_lp_rules_considered_dirty'] = dirty_rules_idx.sum()
r['n_lp_rules_considered_clean'] = clean_rules_idx.sum()
r['n_lp_rules_considered_other'] = other_rules_idx.sum()
# Rules used (i.e., non-zero values in W)
r['n_lp_coeff_above_lb_dirty'] = np.logical_and(
dirty_rules_idx, r['w'] > w_lb).sum()
r['n_lp_coeff_above_lb_clean'] = np.logical_and(
clean_rules_idx, r['w'] > w_lb).sum()
r['n_lp_coeff_above_lb_other'] = np.logical_and(
other_rules_idx, r['w'] > w_lb).sum()
# Average value of coefficients
# r['lp_coeff_avg_value_dirty'] = np.nan if dirty_rules_idx.sum() == 0 else np.mean(r['w'][dirty_rules_idx])
# r['lp_coeff_avg_value_clean'] = np.nan if clean_rules_idx.sum() == 0 else np.mean(r['w'][clean_rules_idx])
# r['lp_coeff_avg_value_other'] = np.nan if other_rules_idx.sum() == 0 else np.mean(r['w'][other_rules_idx])
r['n_rounded_rules_considered_clean'] = sum(this_r == ideal_rule for this_r in r['rules'])
r['n_rounded_rules_considered_dirty'] = \
sum([np.all(np.array([i in ideal_rule for i in this_r])) for this_r in r['rules']]) - \
sum(this_r == ideal_rule for this_r in r['rules'])
r['n_lp_rules_viewed'] = r['z_values'].shape[1]
del r['rules']
del r['w']
del r['z_index']
del r['z_values']
del r['rule_stats']
return r
def rename_filter_df(df):
return df.rename(columns={'n_rounded_rules_considered_clean': 'id_exclusion_rr',
'n_lp_rules_considered_clean' : 'id_exclusion_lp',
'reference_coverage': 'ref_coverage',
'literals': 'n_rules_literals'})[['B', 'K', 'alpha', 'lambda0', 'lambda1', 'n_ref_mult',
'lp_obj', 'rounded_obj', 'ref_coverage',
'n_lp_rules_viewed', 'id_exclusion_lp', 'id_exclusion_rr',
'n_rules', 'rule_n_perfect', 'rule_avg_coverage', 'rule_avg_length']]
def get_data(data_path, verbose=False):
reader = TinyDbReader(data_path)
meta = reader.fetch_metadata(exp_name='synthetic_removal')
if verbose:
print("{} / {} experiments completed".format(
len([d['status'] for d in meta if d['status'] == 'COMPLETED']),
len([d['status'] for d in meta])))
info = [d['info'] for d in meta if d['status'] == 'COMPLETED']
data = [get_exclusion_metadata(d) for d in info]
df = rename_filter_df(pd.DataFrame(data))
return df, info
| 41.197802
| 112
| 0.628434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,517
| 0.404641
|
1f1ad2122b60088ba3e024758a2e26d9b7b4784d
| 9,282
|
py
|
Python
|
bin/punydecode.py
|
mutedmouse/punydecode
|
547900823b874f581a842379a4472b2a27d64c1c
|
[
"Apache-2.0"
] | 1
|
2021-03-24T00:22:40.000Z
|
2021-03-24T00:22:40.000Z
|
bin/punydecode.py
|
mutedmouse/punydecode
|
547900823b874f581a842379a4472b2a27d64c1c
|
[
"Apache-2.0"
] | null | null | null |
bin/punydecode.py
|
mutedmouse/punydecode
|
547900823b874f581a842379a4472b2a27d64c1c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2005-2017 Splunk Inc. All Rights Reserved. Version 6.x
# Author: Andrew Quill
import sys,splunk.Intersplunk
import string
import getpass
import re
def replace_xns(field):
try:
punydecode = field.encode("idna").decode("idna")
except:
punydecode = field
return punydecode
def chardetect(field1, detection):
vargroup = []
isXN = re.compile("^[Xx][Nn]\--.*$")
isAlpha = re.compile('[\w\d]')
for xngroup in re.split('[\\\.]', field1):
try:
vargroup.append(str(xngroup.lower().encode("idna").decode("idna")))
except:
detection.append("Wide Mode Unicode")
#roll through the var groups only checking the xn domains
for chgroup in vargroup:
dgbcounter = 0
for character in list(chgroup.decode()):
#make an umodified backup for plain chars
backupchar = character
try:
character = repr(character.decode()).lstrip('\'\\\u').rstrip("'")
if int(character, 16) < 0x0080:
detection.append("English Latin Base")
elif int(character, 16) < 0x02AF:
detection.append("English Latin Extended")
elif int(character, 16) < 0x036F:
detection.append("Diacritical Marks")
elif int(character, 16) < 0x03FF:
detection.append("Greek and Coptic")
elif int(character, 16) < 0x052F:
detection.append("Cyrillic")
elif int(character, 16) < 0x058F:
detection.append("Armenian")
elif int(character, 16) < 0x06FF:
detection.append("Hebrew")
elif int(character, 16) < 0x06FF:
detection.append("Arabic")
elif int(character, 16) < 0x074F:
detection.append("Syriac")
elif int(character, 16) < 0x07BF:
detection.append("Thaana")
elif int(character, 16) < 0x097F:
detection.append("Devanagari")
elif int(character, 16) < 0x09FF:
detection.append("Bengali")
elif int(character, 16) < 0x0A7F:
detection.append("Gurmukhi")
elif int(character, 16) < 0x0AFF:
detection.append("Gujarati")
elif int(character, 16) < 0x0B7F:
detection.append("Oriya")
elif int(character, 16) < 0x0BFF:
detection.append("Tamil")
elif int(character, 16) < 0x0C7F:
detection.append("Telugu")
elif int(character, 16) < 0x0CFF:
detection.append("Kannada")
elif int(character, 16) < 0x0D7F:
detection.append("Malayalam")
elif int(character, 16) < 0x0DFF:
detection.append("Sinhala")
elif int(character, 16) < 0x0E7F:
detection.append("Thai")
elif int(character, 16) < 0x0EFF:
detection.append("Lao")
elif int(character, 16) < 0x0FFF:
detection.append("Tibetan")
elif int(character, 16) < 0x109F:
detection.append("Myanmar")
elif int(character, 16) < 0x10FF:
detection.append("Georgian")
elif int(character, 16) < 0x11FF:
detection.append("Hangul")
elif int(character, 16) < 0x137F:
detection.append("Ethiopic")
elif int(character, 16) < 0x13FF:
detection.append("Cherokee")
elif int(character, 16) < 0x167F:
detection.append("Canadian Aboriginal")
elif int(character, 16) < 0x169F:
detection.append("Ogham")
elif int(character, 16) < 0x16FF:
detection.append("Runic")
elif int(character, 16) < 0x171F:
detection.append("Tagalog")
elif int(character, 16) < 0x173F:
detection.append("Hanunoo")
elif int(character, 16) < 0x175F:
detection.append("Buhid")
elif int(character, 16) < 0x177F:
detection.append("Tagbanwa")
elif int(character, 16) < 0x17FF:
detection.append("Khmer")
elif int(character, 16) < 0x18AF:
detection.append("Mongolian")
elif int(character, 16) < 0x194F:
detection.append("Limbu")
elif int(character, 16) < 0x197F:
detection.append("Tai Le")
elif int(character, 16) < 0x19FF:
detection.append("Khmer")
elif int(character, 16) < 0x1EFF:
detection.append("English Latin Extended")
elif int(character, 16) < 0x1FFF:
detection.append("Greek")
elif int(character, 16) < 0x206F:
detection.append("English Latin Base")
elif int(character, 16) < 0x209F:
detection.append("Super and Subscripts")
elif int(character, 16) < 0x20CF:
detection.append("Currency Symbols")
elif int(character, 16) < 0x21FF:
detection.append("Letterlike Symbols / Number Forms / Arrows")
elif int(character, 16) < 0x2BFF:
detection.append("Symbols and Shapes")
elif int(character, 16) > 0x2E80 and int(character, 16) < 0x2FFF:
detection.append("CJK Combined")
elif int(character, 16) > 0x2FF0 and int(character, 16) < 0x2FFF:
detection.append("CJK Combined")
elif int(character, 16) > 0x3040 and int(character, 16) < 0x309F:
detection.append("Hiragana")
elif int(character, 16) > 0x30A0 and int(character, 16) < 0x30FF:
detection.append("Katakana")
elif int(character, 16) > 0x4E000 and int(character, 16) < 0x9FBFF:
detection.append("CJK Combined")
elif int(character, 16) > 0x4E000 and int(character, 16) < 0x9FFFF:
detection.append("CJK Combined")
elif int(character, 16) > 0xAC00 and int(character, 16) < 0xD7AF:
detection.append("Hangul")
elif int(character, 16) > 0xFB50 and int(character, 16) < 0xFDFF:
detection.append("Arabic")
elif int(character, 16) > 0xFE20 and int(character, 16) < 0xFE6F:
detection.append("CJK Combined")
elif int(character, 16) > 0xFE50 and int(character, 16) < 0xFE6F:
detection.append("English Latin Base")
elif int(character, 16) > 0xFE70 and int(character, 16) < 0xFEFF:
detection.append("Arabic")
elif int(character, 16) > 0x2F800 and int(character, 16) < 0x2FA1F:
detection.append("CJK Combined")
else:
detection.append("Unidentified Other")
except:
try:
alphaVal = repr(backupchar.decode()).lstrip('\'\\\u').rstrip("'")
if re.match(isAlpha, alphaVal) is not None:
detection.append("English Latin Base")
except:
detection.append("Extended Unicode (Emoji, Symbol or Other)")
return sorted(set(detection))
def main():
results = []
output_results = []
keywords,options = splunk.Intersplunk.getKeywordsAndOptions()
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
if not options.has_key('field'):
output_result = splunk.Intersplunk.generateErrorResults("Usage: punydecode field=<field-to-decode> [detection]")
splunk.Intersplunk.outputResults( output_result )
exit(0)
field = options.get('field', None)
detect = None
if 'detection' in keywords:
detect = True
try:
for r in results:
match = re.compile(r'[Xx][Nn]\-\-[\w\d\-\_]*')
if field in r.keys():
r['punydecoded'] = r[field]
for item in re.findall(match, r[field]):
r['punydecoded'] = r['punydecoded'].replace(item, replace_xns(item.lower()))
if detect:
r['detection'] = []
for item in re.findall(match, r[field].lower()):
r['detection'] = chardetect(item, r['detection'])
r['detection'] = sorted(set(r['detection']))
output_results.append(r)
except:
import traceback
stack = traceback.format_exc()
output_results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( output_results )
if __name__ == "__main__":
main()
| 45.058252
| 120
| 0.5195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,309
| 0.141026
|
1f1e0b869b9f01994358b74334809a1ece521ead
| 774
|
py
|
Python
|
345_ReverseVowelsOfAString.py
|
satwiksabharwal01/LeetcodeProblemSolutions
|
c08fb77b76519f9c543d74f84cb2c0477aeddcd9
|
[
"MIT"
] | 1
|
2020-06-03T22:00:54.000Z
|
2020-06-03T22:00:54.000Z
|
345_ReverseVowelsOfAString.py
|
AmiGandhi/leetcode
|
238186f1e4dd7f243caab47173ebc2511ae5902e
|
[
"MIT"
] | null | null | null |
345_ReverseVowelsOfAString.py
|
AmiGandhi/leetcode
|
238186f1e4dd7f243caab47173ebc2511ae5902e
|
[
"MIT"
] | null | null | null |
# Write a function that takes a string as input and reverse only the vowels of a string.
# Example 1:
# Input: "hello"
# Output: "holle"
# Example 2:
# Input: "leetcode"
# Output: "leotcede"
class Solution:
def reverseVowels(self, s: str) -> str:
vowels = set(list("aeiouAEIOU"))
s = list(s)
left, right = 0, len(s)-1
while left<right:
if s[left] in vowels and s[right] in vowels:
s[left], s[right] = s[right], s[left]
left, right = left + 1, right -1
if s[left] not in vowels:
left += 1
if s[right] not in vowels:
right -= 1
return ''.join(s)
if __name__ == "__main__":
s = "hello"
print(Solution().reverseVowels(s))
| 26.689655
| 88
| 0.536176
| 496
| 0.640827
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.277778
|
1f1f88fe67e806539b890092e9e0d182702100b7
| 574
|
py
|
Python
|
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
script/run_basic_slackbot.py
|
imperial-genomics-facility/IGFSlackBot
|
2692460e907381cea067b674a560cacef6fff981
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from slackbot.basic.igfbasicslackbot import IgfBasicSlackBot
parser=argparse.ArgumentParser()
parser.add_argument('-s','--slack_config', required=True, help='Slack configuration json file')
parser.add_argument('-p','--project_data', required=True, help='Project data CSV file')
args=parser.parse_args()
slack_config=args.slack_config
project_data=args.project_data
try:
igf_bot=IgfBasicSlackBot(slack_config_json=slack_config, \
project_data_file=project_data)
igf_bot.start_igfslackbot()
except Exception as e:
print(e)
| 31.888889
| 95
| 0.771777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.163763
|
1f201209100b12ce6a3d7fab4f4d25eb9b397c33
| 258
|
py
|
Python
|
python/Exercises105Bonus.py
|
TerryRPatterson/Coursework
|
633d15b9ceb4e5a3cc4c74d0ca44ab91d7faa7c7
|
[
"Apache-2.0"
] | null | null | null |
python/Exercises105Bonus.py
|
TerryRPatterson/Coursework
|
633d15b9ceb4e5a3cc4c74d0ca44ab91d7faa7c7
|
[
"Apache-2.0"
] | null | null | null |
python/Exercises105Bonus.py
|
TerryRPatterson/Coursework
|
633d15b9ceb4e5a3cc4c74d0ca44ab91d7faa7c7
|
[
"Apache-2.0"
] | 2
|
2018-02-16T00:44:16.000Z
|
2018-02-16T18:44:45.000Z
|
def countDown(start,message):
import time
if start > 20:
raise OverflowError("Countdown can accept numbers bigger that 20.")
for i in range(start,0,-1):
time.sleep(1)
print(i)
print(message)
countDown(20,"Blastoff!🚀")
| 25.8
| 75
| 0.631783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.233716
|
1f2103ff16477b77dbb801e6f1f09baa26d1ea3b
| 1,170
|
py
|
Python
|
bgbl/management/commands/fix_glyphs.py
|
okfde/api.offenegesetze.de
|
85bc0a1a65dfa77651b7319eb0fccde1a27ba193
|
[
"MIT"
] | 16
|
2018-12-10T11:59:44.000Z
|
2020-06-28T21:37:15.000Z
|
bgbl/management/commands/fix_glyphs.py
|
bundestag/api.offenegesetze.de
|
280673b9995a8a5c1fd01b1cb14dc0046599530f
|
[
"MIT"
] | 21
|
2020-02-11T23:17:52.000Z
|
2022-01-05T13:58:20.000Z
|
bgbl/management/commands/fix_glyphs.py
|
bundestag/api.offenegesetze.de
|
280673b9995a8a5c1fd01b1cb14dc0046599530f
|
[
"MIT"
] | 1
|
2018-12-11T20:17:09.000Z
|
2018-12-11T20:17:09.000Z
|
from glob import glob
import os
import shutil
from django.core.management.base import BaseCommand
from bgbl.pdf_utils import fix_glyphs, remove_watermark
class Command(BaseCommand):
help = 'Fix glyphs pdfs'
def add_arguments(self, parser):
parser.add_argument('doc_path', type=str)
def handle(self, *args, **options):
doc_path = options['doc_path']
if doc_path.endswith('.pdf'):
filenames = [doc_path]
else:
pattern = os.path.join(doc_path, '**/*.pdf')
filenames = glob(pattern, recursive=True)
for original_filename in filenames:
if original_filename.endswith(('_original.pdf', '_watermarked.pdf')):
continue
print('Fix glyphs', original_filename)
fixed_filename = fix_glyphs(original_filename)
real_filename = fixed_filename.replace('_fixed.pdf', '.pdf')
if os.path.exists(real_filename):
os.remove(real_filename)
shutil.move(fixed_filename, real_filename)
print('Adding meta data', real_filename)
remove_watermark(real_filename, force=True)
| 30.789474
| 81
| 0.638462
| 1,011
| 0.864103
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.11453
|
1f228e4d5652a96220edc4fa67e8ff6e9ecc91ac
| 657
|
py
|
Python
|
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/time_topology_complex_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from bindings.csw.abstract_time_complex_type import AbstractTimeComplexType
from bindings.csw.time_topology_primitive_property_type import (
TimeTopologyPrimitivePropertyType,
)
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TimeTopologyComplexType(AbstractTimeComplexType):
"""
A temporal topology complex.
"""
primitive: List[TimeTopologyPrimitivePropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 1,
},
)
| 26.28
| 75
| 0.703196
| 350
| 0.532725
| 0
| 0
| 361
| 0.549467
| 0
| 0
| 138
| 0.210046
|
1f251d95dc8853c21e444177f77e27a265f912f3
| 1,534
|
py
|
Python
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
yangboz/maro
|
0973783e55ca07bf8e177910c9d47854117a4ea8
|
[
"MIT"
] | 598
|
2020-09-23T00:50:22.000Z
|
2022-03-31T08:12:54.000Z
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 235
|
2020-09-22T10:20:48.000Z
|
2022-03-31T02:10:03.000Z
|
maro/cli/grass/lib/services/node_api_server/blueprints/containers.py
|
gx9702/maro
|
38c796f0a7ed1e0f64c299d96c6e0df032401fa9
|
[
"MIT"
] | 116
|
2020-09-22T09:19:04.000Z
|
2022-02-12T05:04:07.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from flask import Blueprint, abort, request
from ...utils.docker_controller import DockerController
from ...utils.exception import CommandExecutionError
# Flask related.
blueprint = Blueprint(name="container", import_name=__name__)
URL_PREFIX = "/v1/containers"
# Api functions.
@blueprint.route(f"{URL_PREFIX}", methods=["POST"])
def create_container():
"""Create a container, aka 'docker run'.
Returns:
None.
"""
try:
create_config = request.json
return DockerController.create_container_with_config(create_config=create_config)
except CommandExecutionError:
abort(400)
@blueprint.route(f"{URL_PREFIX}/<container_name>", methods=["DELETE"])
def delete_container(container_name: str):
"""Delete a container, aka 'docker rm'.
Args:
container_name (str): Name of the container.
Returns:
None.
"""
try:
DockerController.remove_container(container_name=container_name)
return {}
except CommandExecutionError:
abort(400)
@blueprint.route(f"{URL_PREFIX}/<container_name>:stop", methods=["POST"])
def stop_container(container_name: str):
"""Stop a container, aka 'docker stop'.
Args:
container_name (str): Name of the container.
Returns:
None.
"""
try:
DockerController.stop_container(container_name=container_name)
return {}
except CommandExecutionError:
abort(400)
| 22.895522
| 89
| 0.683833
| 0
| 0
| 0
| 0
| 1,167
| 0.760756
| 0
| 0
| 588
| 0.383312
|
1f25e26a1245bec906b2d218d9c2eb30d569b680
| 384
|
py
|
Python
|
examples/testlogger.py
|
ev3dev-python-tools/ev3devlogging
|
b98904d9d93059159637d87e5ebcab8ca093b0f1
|
[
"MIT"
] | null | null | null |
examples/testlogger.py
|
ev3dev-python-tools/ev3devlogging
|
b98904d9d93059159637d87e5ebcab8ca093b0f1
|
[
"MIT"
] | null | null | null |
examples/testlogger.py
|
ev3dev-python-tools/ev3devlogging
|
b98904d9d93059159637d87e5ebcab8ca093b0f1
|
[
"MIT"
] | null | null | null |
# print to log file, not shown on screen (to stderr which ev3dev os puts in logfile)
from ev3devlogger import log
#from ev3devlogger import timedlog as log
log("starwars song(log)")
from ev3devlogger import timedlog
timedlog("starwars song(timedlog)")
print("starwars (print)")
log("starwars song")
from ev3devlogger import timedlog as log
log("starwars song (timedlog as log)")
| 27.428571
| 86
| 0.770833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 238
| 0.619792
|
1f26b6eb6d6dcadfa381edb1a417fab9d0a51f97
| 5,305
|
py
|
Python
|
python_graphs/instruction_test.py
|
reshinthadithyan/python-graphs
|
1234c448cb38af44c963d5ef7f8d99f678028104
|
[
"Apache-2.0"
] | 53
|
2021-04-12T14:20:16.000Z
|
2022-03-29T02:47:30.000Z
|
python_graphs/instruction_test.py
|
reshinthadithyan/python-graphs
|
1234c448cb38af44c963d5ef7f8d99f678028104
|
[
"Apache-2.0"
] | 2
|
2021-09-08T16:37:34.000Z
|
2022-03-15T17:32:36.000Z
|
python_graphs/instruction_test.py
|
reshinthadithyan/python-graphs
|
1234c448cb38af44c963d5ef7f8d99f678028104
|
[
"Apache-2.0"
] | 14
|
2021-05-08T04:34:46.000Z
|
2022-01-16T12:58:16.000Z
|
# Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for instruction module."""
from absl.testing import absltest
import gast as ast
from python_graphs import instruction as instruction_module
def create_instruction(source):
node = ast.parse(source)
node = instruction_module._canonicalize(node)
return instruction_module.Instruction(node)
class InstructionTest(absltest.TestCase):
def test_instruction(self):
self.assertIsNotNone(instruction_module.Instruction)
def test_represent_same_program_basic_positive_case(self):
program1 = ast.parse('x + 1')
program2 = ast.parse('x + 1')
self.assertTrue(
instruction_module.represent_same_program(program1, program2))
def test_represent_same_program_basic_negative_case(self):
program1 = ast.parse('x + 1')
program2 = ast.parse('x + 2')
self.assertFalse(
instruction_module.represent_same_program(program1, program2))
def test_represent_same_program_different_contexts(self):
full_program1 = ast.parse('y = x + 1') # y is a write
program1 = full_program1.body[0].targets[0] # 'y'
program2 = ast.parse('y') # y is a read
self.assertTrue(
instruction_module.represent_same_program(program1, program2))
def test_get_accesses(self):
instruction = create_instruction('x + 1')
self.assertEqual(instruction.get_read_names(), {'x'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('return x + y + z')
self.assertEqual(instruction.get_read_names(), {'x', 'y', 'z'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('fn(a, b, c)')
self.assertEqual(instruction.get_read_names(), {'a', 'b', 'c', 'fn'})
self.assertEqual(instruction.get_write_names(), set())
instruction = create_instruction('c = fn(a, b, c)')
self.assertEqual(instruction.get_read_names(), {'a', 'b', 'c', 'fn'})
self.assertEqual(instruction.get_write_names(), {'c'})
def test_get_accesses_augassign(self):
instruction = create_instruction('x += 1')
self.assertEqual(instruction.get_read_names(), {'x'})
self.assertEqual(instruction.get_write_names(), {'x'})
instruction = create_instruction('x *= y')
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), {'x'})
def test_get_accesses_augassign_subscript(self):
instruction = create_instruction('x[0] *= y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_augassign_attribute(self):
instruction = create_instruction('x.attribute *= y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_subscript(self):
instruction = create_instruction('x[0] = y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_get_accesses_attribute(self):
instruction = create_instruction('x.attribute = y')
# This is not currently considered a write of x. It is a read of x.
self.assertEqual(instruction.get_read_names(), {'x', 'y'})
self.assertEqual(instruction.get_write_names(), set())
def test_access_ordering(self):
instruction = create_instruction('c = fn(a, b + c, d / a)')
access_names_and_kinds = [(instruction_module.access_name(access),
instruction_module.access_kind(access))
for access in instruction.accesses]
self.assertEqual(access_names_and_kinds, [('fn', 'read'), ('a', 'read'),
('b', 'read'), ('c', 'read'),
('d', 'read'), ('a', 'read'),
('c', 'write')])
instruction = create_instruction('c += fn(a, b + c, d / a)')
access_names_and_kinds = [(instruction_module.access_name(access),
instruction_module.access_kind(access))
for access in instruction.accesses]
self.assertEqual(access_names_and_kinds, [('fn', 'read'), ('a', 'read'),
('b', 'read'), ('c', 'read'),
('d', 'read'), ('a', 'read'),
('c', 'read'), ('c', 'write')])
if __name__ == '__main__':
absltest.main()
| 42.782258
| 77
| 0.6541
| 4,374
| 0.824505
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.253723
|
1f26c8fd4ac1dfad9af1cf8e92f70fe641af8f00
| 6,521
|
py
|
Python
|
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/saneyaml.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from collections import OrderedDict
from functools import partial
import yaml
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader
from yaml import SafeDumper
"""
Wrapper around PyYAML to provide sane defaults ensuring that dump/load does not
damage content, keeps ordering, use always block-style and use four spaces
indents to get readable YAML and quotes and folds texts in a sane way.
Use the `load` function to get a primitive type from a YAML string and the
`dump` function to get a YAML string from a primitive type.
Load and dump rely on subclasses of SafeLoader and SafeDumper respectively doing
all the dirty bidding to get PyYAML straight.
"""
# Check:
# https://github.com/ralienpp/reyaml/blob/master/reyaml/__init__.py
# https://pypi.python.org/pypi/PyYAML.Yandex/3.11.1
# https://pypi.python.org/pypi/ruamel.yaml/0.9.1
# https://pypi.python.org/pypi/yaml2rst/0.2
def load(s):
"""
Return an object safely loaded from YAML string `s`. `s` must be unicode
or be a string that converts to unicode without errors.
"""
return yaml.load(s, Loader=SaneLoader)
def dump(obj):
"""
Return a safe YAML unicode string representation from `obj`.
"""
return yaml.dump(
obj,
Dumper=SaneDumper,
default_flow_style=False,
default_style=None,
canonical=False,
allow_unicode=True,
# do not encode as Unicode
encoding=None,
indent=4,
width=90,
line_break='\n',
explicit_start=False,
explicit_end=False,
)
class SaneLoader(SafeLoader):
pass
def string_loader(loader, node):
"""
Ensure that a scalar type (a value) is returned as a plain unicode string.
"""
return loader.construct_scalar(node)
SaneLoader.add_constructor(u'tag:yaml.org,2002:str', string_loader)
# Load as strings most scalar types: nulls, ints, (such as in
# version 01) floats (such version 2.20) and timestamps conversion (in
# versions too) are all emitted as unicode strings. This avoid unwanted type
# conversions for unquoted strings and the resulting content damaging. This
# overrides the implicit resolvers. Callers must handle type conversion
# explicitly from unicode to other types in the loaded objects.
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:timestamp', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:float', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:int', string_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:null', string_loader)
# keep boolean conversion
# SaneLoader.add_constructor(u'tag:yaml.org,2002:boolean', string_loader)
def ordered_loader(loader, node):
"""
Ensure that YAML maps ordered is preserved and loaded in an OrderedDict.
"""
assert isinstance(node, yaml.MappingNode)
omap = OrderedDict()
yield omap
for key, value in node.value:
key = loader.construct_object(key)
value = loader.construct_object(value)
omap[key] = value
SaneLoader.add_constructor(u'tag:yaml.org,2002:map', ordered_loader)
SaneLoader.add_constructor(u'tag:yaml.org,2002:omap', ordered_loader)
class SaneDumper(SafeDumper):
"""
Ensure that lists items are always indented.
"""
def increase_indent(self, flow=False, indentless=False):
return super(SaneDumper, self).increase_indent(flow, indentless=False)
def ordered_dumper(dumper, data):
"""
Ensure that maps are always dumped in the items order.
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.items())
SaneDumper.add_representer(OrderedDict, ordered_dumper)
def null_dumper(dumper, value):
"""
Always dump nulls as empty string.
"""
return dumper.represent_scalar(u'tag:yaml.org,2002:null', u'')
SafeDumper.add_representer(type(None), null_dumper)
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'):
"""
Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in
the sanest and most readable way.
"""
if not isinstance(value, basestring):
value = repr(value)
if isinstance(value, str):
value = value.decode('utf-8')
style = None
multilines = '\n' in value
if multilines:
literal_style = '|'
style = literal_style
return dumper.represent_scalar(_tag, value, style=style)
SaneDumper.add_representer(str, string_dumper)
SaneDumper.add_representer(unicode, string_dumper)
SaneDumper.add_representer(int, partial(string_dumper, _tag=u'tag:yaml.org,2002:int'))
SaneDumper.add_representer(float, partial(string_dumper, _tag=u'tag:yaml.org,2002:float'))
def boolean_dumper(dumper, value):
"""
Dump booleans as yes or no.
"""
value = u'yes' if value else u'no'
style = None
return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style)
SaneDumper.add_representer(bool, boolean_dumper)
| 33.441026
| 90
| 0.73455
| 272
| 0.041711
| 363
| 0.055666
| 0
| 0
| 0
| 0
| 3,724
| 0.571078
|
1f272919a0358c21a01d9a8008881e0d63626d7a
| 14,383
|
py
|
Python
|
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | 1
|
2022-03-14T00:35:05.000Z
|
2022-03-14T00:35:05.000Z
|
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | null | null | null |
tsutsuji/gui_tsutsuji.py
|
konawasabi/tsutsuji-trackcomputer
|
04469a8a9872e8bad3d661c5911b9c881fab8ca9
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021-2022 konawasabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
'''
import sys
import pathlib
import os
import webbrowser
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog as filedialog
import tkinter.simpledialog as simpledialog
import tkinter.font as font
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib import rcParams
import matplotlib.gridspec
from PIL import Image
import numpy as np
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Sans', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP']
from . import track_control
from . import drawcursor
from . import backimg
from . import measure
from ._version import __version__
class Catcher: # tkinter内で起きた例外をキャッチする
def __init__(self, func, subst, widget):
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except Exception as e:
if not __debug__: # デバッグモード(-O)なら素通し。pdbが起動する
raise e
else:
print(e) # 通常モードならダイアログ表示
tk.messagebox.showinfo(message=e)
class mainwindow(ttk.Frame):
def __init__(self, master):
super().__init__(master, padding='3 3 3 3')
self.master.title('Tsutsuji')
self.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
master.protocol('WM_DELETE_WINDOW', self.ask_quit)
self.backimgctrl = backimg.BackImgControl(self)
self.cursor = drawcursor.cursor(self)
self.measurewindow = measure.interface(self)
self.trackcontrol = track_control.TrackControl()
self.create_widgets()
self.create_menubar()
self.bind_keyevent()
def create_widgets(self):
font_title = font.Font(weight='bold',size=10)
# プロットフレーム
self.canvas_frame = ttk.Frame(self, padding='3 3 3 3')
self.canvas_frame.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.fig_plane = plt.figure(figsize=(9,7),tight_layout=True)
gs1 = self.fig_plane.add_gridspec(nrows=1,ncols=1)
self.ax_plane = self.fig_plane.add_subplot(gs1[0])
self.plt_canvas_base = tk.Canvas(self.canvas_frame, bg="white", width=900, height=700)
self.plt_canvas_base.grid(row = 0, column = 0)
def on_canvas_resize(event):
self.plt_canvas_base.itemconfigure(self.fig_frame_id, width=event.width, height=event.height)
#print(event)
self.fig_frame = tk.Frame(self.plt_canvas_base)
self.fig_frame_id = self.plt_canvas_base.create_window((0, 0), window=self.fig_frame, anchor="nw")
self.fig_frame.columnconfigure(0, weight=1)
self.fig_frame.rowconfigure(0, weight=1)
self.plt_canvas_base.bind("<Configure>", on_canvas_resize)
self.fig_canvas = FigureCanvasTkAgg(self.fig_plane, master=self.fig_frame)
self.fig_canvas.draw()
self.fig_canvas.get_tk_widget().grid(row=0, column=0, sticky='news')
self.canvas_frame.columnconfigure(0, weight=1)
#self.canvas_frame.columnconfigure(1, weight=1)
self.canvas_frame.rowconfigure(0, weight=1)
#self.canvas_frame.rowconfigure(1, weight=1)
#ボタンフレーム
self.button_frame = ttk.Frame(self, padding='3 3 3 3')
self.button_frame.grid(column=1, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
# ---
self.replot_btn = ttk.Button(self.button_frame, text="Replot", command = self.drawall)
self.replot_btn.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E))
self.plotarea_frame = ttk.Frame(self.button_frame, padding='3 3 3 3')
self.plotarea_frame.grid(column=0, row=1, sticky=(tk.N, tk.W, tk.E, tk.S))
self.plotarea_val_frame = ttk.Frame(self.plotarea_frame, padding='3 3 3 3')
self.plotarea_val_frame.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.viewpos_v = [tk.DoubleVar(value=0),tk.DoubleVar(value=0)]
self.viewp_scale_v = tk.DoubleVar(value=1000)
self.view_whole_v = tk.StringVar()
self.view_whole_v.set('False')
self.aspectratio_v = tk.DoubleVar(value=1)
self.viewp_x_l = ttk.Label(self.plotarea_val_frame, text='x')
self.viewp_y_l = ttk.Label(self.plotarea_val_frame, text='y')
self.viewp_sc_l = ttk.Label(self.plotarea_val_frame, text='scale')
self.viewp_asr_l = ttk.Label(self.plotarea_val_frame, text='Y mag.')
self.viewp_x_l.grid(column=0, row=0, sticky=(tk.E,tk.W))
self.viewp_y_l.grid(column=2, row=0, sticky=(tk.E,tk.W))
self.viewp_sc_l.grid(column=0, row=1, sticky=(tk.E,tk.W))
self.viewp_asr_l.grid(column=2, row=1, sticky=(tk.E,tk.W))
self.viewp_x_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewpos_v[0],width=5)
self.viewp_y_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewpos_v[1],width=5)
self.viewp_sc_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.viewp_scale_v,width=5)
self.view_whole_e = ttk.Checkbutton(self.plotarea_val_frame, text='Whole', variable=self.view_whole_v, onvalue='True', offvalue='False')
self.viewp_asr_e = ttk.Entry(self.plotarea_val_frame, textvariable=self.aspectratio_v,width=5)
self.viewp_x_e.grid(column=1, row=0, sticky=(tk.E,tk.W))
self.viewp_y_e.grid(column=3, row=0, sticky=(tk.E,tk.W))
self.viewp_sc_e.grid(column=1, row=1, sticky=(tk.E,tk.W))
self.viewp_asr_e.grid(column=3, row=1, sticky=(tk.E,tk.W))
self.view_whole_e.grid(column=0, row=3, sticky=(tk.E,tk.W))
# ---
self.plotmove_frame = ttk.Frame(self.plotarea_frame, padding='3 3 3 3')
self.plotmove_frame.grid(column=0, row=1, sticky=(tk.N, tk.W, tk.E, tk.S))
self.plotmove_btn_up = ttk.Button(self.plotmove_frame, text="↑", command = lambda: self.move_xy(0,-1))
self.plotmove_btn_down = ttk.Button(self.plotmove_frame, text="↓", command = lambda: self.move_xy(0,1))
self.plotmove_btn_left = ttk.Button(self.plotmove_frame, text="←", command = lambda: self.move_xy(-1,0))
self.plotmove_btn_right = ttk.Button(self.plotmove_frame, text="→", command = lambda: self.move_xy(1,0))
self.plotmove_btn_up.grid(column=1, row=0, sticky=(tk.E,tk.W))
self.plotmove_btn_down.grid(column=1, row=2, sticky=(tk.E,tk.W))
self.plotmove_btn_left.grid(column=0, row=1, sticky=(tk.E,tk.W))
self.plotmove_btn_right.grid(column=2, row=1, sticky=(tk.E,tk.W))
# ---
self.measure_btn = ttk.Button(self.button_frame, text="Measure", command = self.measure)
self.measure_btn.grid(column=0, row=2, sticky=(tk.N, tk.W, tk.E))
self.getrelrad_btn = ttk.Button(self.button_frame, text="Generate", command = self.get_relativepos_rad)
self.getrelrad_btn.grid(column=0, row=3, sticky=(tk.N, tk.W, tk.E))
if not __debug__:
self.printtracks_btn = ttk.Button(self.button_frame, text="P. Tracks", command = self.trackcontrol.dump_trackdata)
self.printtracks_btn.grid(column=0, row=4, sticky=(tk.N, tk.W, tk.E))
self.printpos_btn = ttk.Button(self.button_frame, text="P. Pos", command = self.draw_tracks_cp)
self.printpos_btn.grid(column=0, row=5, sticky=(tk.N, tk.W, tk.E))
# ウィンドウリサイズに対する設定
self.columnconfigure(0, weight=1)
#self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
def create_menubar(self):
self.master.option_add('*tearOff', False)
self.menubar = tk.Menu(self.master)
self.menu_file = tk.Menu(self.menubar)
self.menu_backimg = tk.Menu(self.menubar)
self.menu_help = tk.Menu(self.menubar)
self.menubar.add_cascade(menu=self.menu_file, label='ファイル')
self.menubar.add_cascade(menu=self.menu_backimg, label='背景画像')
self.menubar.add_cascade(menu=self.menu_help, label='ヘルプ')
self.menu_file.add_command(label='開く...', command=self.opencfg, accelerator='Control+O')
self.menu_file.add_command(label='リロード', command=self.reloadcfg, accelerator='F5')
self.menu_file.add_separator()
self.menu_file.add_command(label='終了', command=self.ask_quit, accelerator='Alt+F4')
self.menu_backimg.add_command(label='Window...', command=self.backimgctrl.create_window)
self.menu_backimg.add_separator()
self.menu_backimg.add_command(label='Load...', command=self.backimgctrl.load_setting)
self.menu_backimg.add_command(label='Save...', command=self.backimgctrl.save_setting)
self.menu_help.add_command(label='ヘルプ...', command=self.open_webdocument)
self.menu_help.add_command(label='Tsutsujiについて...', command=self.aboutwindow)
self.master['menu'] = self.menubar
def bind_keyevent(self):
self.bind_all("<Control-o>", self.opencfg)
self.bind_all("<F5>", self.reloadcfg)
self.bind_all("<Alt-F4>", self.ask_quit)
def ask_quit(self, event=None, ask=True):
if ask:
if tk.messagebox.askyesno(message='Tsutsuji を終了しますか?'):
self.quit()
else:
self.quit()
def opencfg(self, event=None, in_dir=None):
inputdir = filedialog.askopenfilename() if in_dir == None else in_dir
print('loading',inputdir)
self.trackcontrol.loadcfg(inputdir)
self.trackcontrol.loadmap()
if self.trackcontrol.conf.general['backimg'] is not None:
self.backimgctrl.load_setting(path = self.trackcontrol.conf.general['backimg'])
elif self.backimgctrl.conf_path is not None:
self.backimgctrl.load_setting(path = self.backimgctrl.conf_path)
self.measurewindow.reload_trackkeys()
self.drawall()
def reloadcfg(self, event=None):
if self.trackcontrol.path is not None:
self.opencfg(event=event,in_dir=self.trackcontrol.path)
def draw2dplot(self):
self.ax_plane.cla()
self.trackcontrol.plot2d(self.ax_plane)
self.fig_canvas.draw()
def drawall(self):
self.ax_plane.cla()
self.trackcontrol.plot2d(self.ax_plane)
self.measurewindow.drawall()
if self.view_whole_v.get() == 'True':
imgarea = self.backimgctrl.imgsarea()
imgarea = self.trackcontrol.drawarea(imgarea)
self.ax_plane.set_xlim(imgarea[0],imgarea[1])
self.ax_plane.set_ylim(imgarea[2],imgarea[3])
else:
center = [self.viewpos_v[0].get(),self.viewpos_v[1].get()]
#windowratio = self.ax_plane.bbox.height/self.ax_plane.bbox.width # 平面図のアスペクト比を取得
windowratio = 1/self.aspectratio_v.get()*7/9
scalex = self.viewp_scale_v.get()
scaley = windowratio * scalex
self.ax_plane.set_xlim(center[0]-scalex/2, center[0]+scalex/2)
self.ax_plane.set_ylim(center[1]-scaley/2, center[1]+scaley/2)
for i in self.backimgctrl.imgs.keys():
self.backimgctrl.imgs[i].show(self.ax_plane,as_ratio=7/9,ymag=self.aspectratio_v.get())
self.ax_plane.invert_yaxis()
self.fig_canvas.draw()
def move_xy(self,x,y):
nowpos = [self.viewpos_v[0].get(),self.viewpos_v[1].get()]
windowratio = 1/self.aspectratio_v.get()*7/9
scalex = self.viewp_scale_v.get()
scaley = windowratio * scalex
self.viewpos_v[0].set(nowpos[0] + x*scalex/5)
self.viewpos_v[1].set(nowpos[1] + y*scaley/5)
self.drawall()
def measure(self):
self.measurewindow.create_widgets()
def draw_tracks_cp(self):
self.trackcontrol.plot_controlpoints(self.ax_plane)
self.fig_canvas.draw()
def get_relativepos_rad(self):
self.trackcontrol.generate_mapdata()
def aboutwindow(self, event=None):
msg = 'Tsutsuji trackcomputer\n'
msg += 'Version '+__version__+'\n\n'
msg += 'Copyright © 2022 konawasabi\n'
msg += 'Released under the Apache License, Version 2.0 .\n'
msg += 'https://www.apache.org/licenses/LICENSE-2.0'
tk.messagebox.showinfo(message=msg)
def open_webdocument(self, event=None):
webbrowser.open('https://konawasabi.github.io/tsutsuji-trackcomputer/')
def sendtopmost(self,event=None):
self.master.lift()
self.master.focus_force()
def main():
if not __debug__:
# エラーが発生した場合、デバッガを起動 https://gist.github.com/podhmo/5964702e7471ccaba969105468291efa
def info(type, value, tb):
if hasattr(sys, "ps1") or not sys.stderr.isatty():
# You are in interactive mode or don't have a tty-like
# device, so call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# You are NOT in interactive mode; print the exception...
traceback.print_exception(type, value, tb)
# ...then start the debugger in post-mortem mode
pdb.pm()
sys.excepthook = info
print('Debug mode')
tk.CallWrapper = Catcher
root = tk.Tk()
app = mainwindow(master=root)
if len(sys.argv)>1:
app.opencfg(in_dir=sys.argv[1])
app.mainloop()
| 43.453172
| 144
| 0.643398
| 12,374
| 0.84326
| 0
| 0
| 0
| 0
| 0
| 0
| 2,320
| 0.158103
|
1f2ccc8e4139330b0b1a1e4de76035b03e5fa0d0
| 1,011
|
py
|
Python
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 24
|
2020-07-08T06:16:52.000Z
|
2022-02-19T00:33:34.000Z
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 42
|
2020-05-29T12:56:10.000Z
|
2022-03-07T17:12:08.000Z
|
extra/uniq.py
|
JarryShaw/darc
|
0fc8782bb2f641ca3734c94666cbc36e3d9cb09f
|
[
"BSD-3-Clause"
] | 7
|
2020-07-11T18:57:24.000Z
|
2022-02-01T21:46:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tempfile
def is_in(line: str, dest: str) -> bool:
if os.path.isfile(dest):
with open(dest) as file:
for content in filter(None, map(lambda s: s.strip(), file)):
if line == content:
return True
return False
def uniq(path: str, tempdir: str) -> None:
name = os.path.split(path)[1]
dest = os.path.join(tempdir, '%s.tmp' % name)
with open(path) as file:
for line in filter(None, map(lambda s: s.strip(), file)):
if line.startswith('#'):
continue
if is_in(line, dest):
continue
with open(dest, 'at') as out_file:
print(line, file=out_file)
os.rename(dest, path)
def main() -> int:
with tempfile.TemporaryDirectory() as tempdir:
for path in sys.argv[1:]:
uniq(path, tempdir)
return 0
if __name__ == "__main__":
sys.exit(main())
| 24.071429
| 72
| 0.547972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.069238
|
1f2d5d68906150aa022de6e4c0b468cf3688673c
| 353
|
py
|
Python
|
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | null | null | null |
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | 3
|
2020-03-24T16:53:31.000Z
|
2021-02-02T21:58:25.000Z
|
tests.py
|
B1Z0N/turingmachine
|
4c6761ee52fd05071d675a8cab8558025a5c26d9
|
[
"MIT"
] | null | null | null |
"""
Script that runs all tests written
"""
import os
import pathlib
import pytest
cwd = pathlib.Path.cwd
os.chdir(cwd() / "tests")
def subfolders(dir):
return [x[0] for x in os.walk(dir)][1:] # without current directory
for subf in subfolders(cwd()):
if not subf.endswith("__pycache__"):
os.chdir(subf)
pytest.main()
| 13.576923
| 72
| 0.645892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.25779
|
1f2da5398cfdb995da864f3b7f84a89bc1c2fda5
| 7,933
|
py
|
Python
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-26T14:35:22.000Z
|
2020-05-12T15:26:27.000Z
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 7
|
2019-06-26T20:52:14.000Z
|
2020-12-16T21:08:20.000Z
|
sandbox/straws/loadstraws.py
|
mustaric/lambda-tess-search
|
1d48133f32c8a073cba5d221f30c2d44e8d06e4b
|
[
"BSD-3-Clause"
] | 2
|
2019-06-26T20:24:11.000Z
|
2020-05-12T19:36:04.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2018 Orbital Insight Inc., all rights reserved.
# Contains confidential and trade secret information.
# Government Users: Commercial Computer Software - Use governed by
# terms of Orbital Insight commercial license agreement.
"""
Created on Tue Oct 22 21:22:36 2019
@author: fergal
"""
from __future__ import print_function
from __future__ import division
import boto3
import numpy as np
import json
import os
import io
import common
class LoadTessCube(object):
"""
Load a datacube of TESS imagery from straws stored on disk.
"""
def __init__(self, path, sector):
#Set path to None for some testing
if path is not None:
self.path = path
self.sector = sector
self.loadMetadata()
def __repr__(self):
return "<TessCube object for sector %s. Data at %s>" %(self.sector, self.path)
def __call__(self, camera, ccd, col, row):
return self.get(camera, ccd, col, row, 20)
def loadMetadata(self):
"""Load metadata on the straws stored in `path`
Metadata is stored in a json file and contains details like ccd sizes,
number of cadences, strawsize, etc.
"""
sectorStr = "sector%02i" %(self.sector)
fn = os.path.join(self.path, sectorStr, common.METADATA_FILE)
with open(fn) as fp:
props = json.load(fp)
assert self.sector == props['sector']
self.setMetadataFromDict(props)
def setMetadataFromDict(self, props):
self.__dict__.update(props)
self.nCols, self.nRows = self.nColsRows
self.nCadences = len(self.datestampList)
def getMidTimestamps(self):
"""Return the cadence mid times as stored in the metadata
See make straws for the details of how this value is calculated
"""
try:
timestamps = self.midtimes_tbjd
except AttributeError:
raise AttributeError("metadata doesn't contain timestamps")
return np.array(timestamps)
def getRelativeCadenceNumbers(self):
"""Return a integers from zero to length of datacube"""
return np.arange(self.nCadences, dtype=int)
def get(self, camera, ccd, col, row, min_size_pix=None):
"""Get a data cube
The data cube is garaunteed to be square and at least `min_size_pix`
on a side. However, because it constructs that datacube whose bounding
box aligns with the straws its reading data from, the actual size
may be larger than `min_size_pix`, and the requested (`col`, `row`)
may not be at the centre of the image.
Inputs
-------------
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
Optional Inputs
-----------------
min_size_pix
(int) Minimum width and height of the returned datacube
Returns
-----------
cube
(np 3d array) of shape (nCadence, nRows, nCols)
target_col, target_row
(float) The index in `image` corresponding to (`col`, `row`).
For example, if the request is for a 30x30 pixel stamp around
the postion cr= 301, 602, the resulting target_col, _row might be
(1,2)
"""
if min_size_pix is None:
min_size_pix = self.strawSize
c0, c1, r0, r1 = self.pickBbox(col, row, min_size_pix)
colSize = c1 - c0
rowSize = r1 - r0
image = np.empty( (self.nCadences, rowSize, colSize) )
ds = self.strawSize
for i in range(c0, c1, ds):
for j in range(r0, r1, ds):
straw = self.getStraw(camera, ccd, i, j)
assert straw.shape == (self.nCadences, ds, ds)
dCol = i - c0
dRow = j - r0
sc = slice(dCol, dCol + ds)
sr = slice(dRow, dRow + ds)
image[:, sr, sc] = straw
target_col = col - c0
target_row = row - r0
return image, target_col, target_row
def pickBbox(self, col, row, size_pix):
"""Pick the bounding box around (col, row) for the returned data cube
The bounding box will be
* square
* The width will be > `size_pix`
* The width will be an integer times the `strawSize`
Inputs
-------
col, row
(float) Location of centre of region of interest
size_pix
(int) Minimum size of returned bounding box. The bounding box
will probably be bigger than this request.
Returns
----------
4-tuple of col and row values defining the bounding box.
"""
if not self.isInBounds(col, row):
raise ValueError("Requested col,row (%g, %g) is out of bounds" %(col, row))
assert(size_pix > 0)
ds = .5 * size_pix
c0 = common.roundToNearestBelow(max(col-ds, 0), self.strawSize)
c1 = common.roundToNearestAbove(min(col+ds, self.nCols), self.strawSize)
r0 = common.roundToNearestBelow(max(row-ds, 0), self.strawSize)
r1 = common.roundToNearestAbove(min(row+ds, self.nRows), self.strawSize)
return c0, c1, r0, r1
def isInBounds(self, col, row):
"""Test if the requested col,row actually fall on disk
Inputs
-------------
col, row
(int)
Returns
----------
boolean
"""
if col < 0 or col >= self.nCols:
return False
if row < 0 or row >= self.nRows:
return False
return True
def getStraw(self, camera, ccd, col, row):
""" Load a straw from disk
Inputs
-------------
camera, ccd, col, row
(int) Properties of the straw. col and row refer to coordinates of
the bottom-left corner of the straw.
"""
longPath, fn = common.makeStrawName(self.path,
self.sector,
camera,
ccd,
col,
row)
straw = self.loadStrawFromUri(longPath, fn)
return straw
def loadStrawFromUri(self, strawPath, fn):
if not os.path.exists(strawPath):
raise IOError("Path %s not found" %(strawPath))
fn = os.path.join(strawPath, fn)
if not os.path.exists(fn):
raise IOError("File %s not found" %(fn))
return np.load(fn)
class LoadTessCubeS3(LoadTessCube):
"""Load straws from S3 instead of a local disk"""
def __init__(self, bucket, path, sector, region='us-east-1'):
#bucket is a string. self.bucket is an object
self.bucketName = bucket
self.s3 = boto3.resource('s3', region_name=region)
self.path = path
self.sector = sector
self.loadMetadata()
def loadStrawFromUri(self, strawPath, fn):
#boto stuff goes here
uri = os.path.join(strawPath, fn)
obj = self.s3.Object(self.bucketName, uri)
thebytes = obj.get()['Body'].read()
return np.load(io.BytesIO(thebytes))
def loadMetadata(self):
"""Load metadata on the straws stored in `path`
Metadata is stored in a json file and contains details like ccd sizes,
number of cadences, strawsize, etc.
"""
uri = os.path.join(self.path, "sector%02i" % self.sector, common.METADATA_FILE)
print(uri)
obj = self.s3.Object(self.bucketName, uri)
print(obj)
thebytes = obj.get()['Body'].read()
props = json.loads(thebytes)
assert self.sector == props['sector']
self.setMetadataFromDict(props)
| 30.511538
| 87
| 0.573427
| 7,446
| 0.938611
| 0
| 0
| 0
| 0
| 0
| 0
| 3,474
| 0.437918
|
1f2febb0be1e8f771610a3b24e1a5429336b73dd
| 508
|
py
|
Python
|
vbench/scripts/vb_run_benchmarks.py
|
vene/vbench
|
77989fa0d3c45e63f576968d206021ffee72a24c
|
[
"MIT"
] | 1
|
2017-08-12T03:11:42.000Z
|
2017-08-12T03:11:42.000Z
|
vbench/scripts/vb_run_benchmarks.py
|
vene/vbench
|
77989fa0d3c45e63f576968d206021ffee72a24c
|
[
"MIT"
] | null | null | null |
vbench/scripts/vb_run_benchmarks.py
|
vene/vbench
|
77989fa0d3c45e63f576968d206021ffee72a24c
|
[
"MIT"
] | null | null | null |
import sys
import traceback
import cPickle as pickle
if len(sys.argv) != 3:
print 'Usage: script.py input output'
sys.exit()
in_path, out_path = sys.argv[1:]
benchmarks = pickle.load(open(in_path))
results = {}
for bmk in benchmarks:
try:
res = bmk.run()
results[bmk.checksum] = res
except Exception:
print >> sys.stderr, 'Exception in benchmark %s:' % bmk.name
traceback.print_exc()
continue
benchmarks = pickle.dump(results, open(out_path, 'w'))
| 22.086957
| 68
| 0.647638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.122047
|
1f2ff6c7d9e8ddf98bfe8be97cd8f3e7685911e4
| 6,445
|
py
|
Python
|
problems/kitchencombinatorics/data/secret/gendata.py
|
stoman/CompetitiveProgramming
|
0000b64369b50e31c6f48939e837bdf6cece8ce4
|
[
"MIT"
] | 2
|
2020-12-22T13:21:25.000Z
|
2021-12-12T22:26:26.000Z
|
problems/kitchencombinatorics/data/secret/gendata.py
|
stoman/CompetitiveProgramming
|
0000b64369b50e31c6f48939e837bdf6cece8ce4
|
[
"MIT"
] | null | null | null |
problems/kitchencombinatorics/data/secret/gendata.py
|
stoman/CompetitiveProgramming
|
0000b64369b50e31c6f48939e837bdf6cece8ce4
|
[
"MIT"
] | null | null | null |
import sys
import random
MAX_R = 1000
MAX_D = 25
MAX_K = 20
MAX_C = 2000
MAX_B = 100
case_no = 1
def next_file(suffix=None, desc=None):
global case_no
basename = '%02d' % case_no if suffix is None else '%02d-%s' % (case_no, suffix)
f = open(basename + '.in', 'w')
if desc is not None:
with open(basename + '.desc', 'w') as df:
df.write('%s\n' % desc)
case_no += 1
return f
def write_case(ingreds, starters, mains, desserts, conflicts, suffix=None, desc=None):
f = next_file(suffix, desc)
f.write('%d %d %d %d %d\n' % (len(ingreds), len(starters), len(mains), len(desserts), len(conflicts)))
f.write('%s\n' % ' '.join(map(str, ingreds)))
for d in starters + mains + desserts:
f.write('%d %s\n' % (len(d), ' '.join(map(str, d))))
for (a,b) in conflicts:
f.write('%d %d\n' % (a,b))
f.close()
def random_dish(r, min_k, max_k):
idxs = range(1,r+1)
k = random.randint(min_k, max_k)
random.shuffle(idxs)
return idxs[:k]
def gen_random(r, s, m, d, c, max_b, max_k, min_b=1, min_k=1):
ingreds = [random.randint(min_b, max_b) for i in range(r)]
dishes = [random_dish(r, min_k, max_k) for i in range(s+m+d)]
conf = []
for i in range(c):
while True:
t1 = random.randint(0,2)
t2 = (t1+random.randint(1,2)) % 3
a = random.randint(1, [s,m,d][t1]) + [0,s,s+m][t1]
b = random.randint(1, [s,m,d][t2]) + [0,s,s+m][t2]
if (a,b) not in conf and (b,a) not in conf:
break
conf.append((a,b))
write_case(ingreds, dishes[:s], dishes[s:s+m], dishes[s+m:], conf, suffix='random', desc='random case with %d ingreds, %d starters, %d mains, %d desserts, %d conflicts' % (r, s, m, d, c))
def gen_special():
# answer = 10^18
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
s = m = d = 1
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='maxans', desc='answer = 10^18')
# answer = 19*10^18 (19 terms of value 10^18, should take out some overflow errs)
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
ingres = ingreds + [1]*42
s = 1
m = 1
d = 19
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='answer = 19*10^18 -- 19 terms of value 10^18')
# answer = 10^18 (but with 25x25x25 dish combos)
# note this has a bunch of constants hard-coded to MAX_D=25
ingreds = [5]*12 + [2]*18
random.shuffle(ingreds)
ingreds = ingreds + [1]*123
s = m = d = MAX_D
starters = [list(set(range(1,11) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(s)]
mains = [list(set(range(11,21) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(m)]
desserts = [list(set(range(21,31) + random_dish(len(ingreds), 1, MAX_K-10))) for i in range(d)]
write_case(ingreds, starters, mains, desserts, [], suffix='maxans', desc='ans = 10^18, coming from 25^3 different dish combos')
# answer = 10^18+1
ingreds = [5]*18 + [2]*18
random.shuffle(ingreds)
ingreds = ingreds + [1]*(3*MAX_K)
s = m = d = 1
starters = [list(set(range(1,13) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(s)] + [range(37, 37+MAX_K)]
mains = [list(set(range(13,25) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(m)] + [range(37+MAX_K, 37+2*MAX_K)]
desserts = [list(set(range(25,37) + random_dish(len(ingreds), 1, MAX_K-12))) for i in range(d)] + [range(37+2*MAX_K, 37+3*MAX_K)]
conf = [(1,4), (6, 3), (2, 5)]
write_case(ingreds, starters, mains, desserts, conf, suffix='overflow', desc='ans = 10^18 + 1')
gen_1875()
# overflow, but all partial products when multiplied as longs are
# within range
ingreds = [92, 92, 92, 92, 79, 92, 92, 92, 92, 92]
starters = [random_dish(len(ingreds), 1, MAX_K)]
mains = [range(1, len(ingreds)+1)]
desserts = [random_dish(len(ingreds), 1, MAX_K)]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='overflow, but when computing with signed 64-bit integers,\neach partial product is larger than the previous, and smaller than 10^18')
# similar case as before but kills a solution that (for whatever
# reason...) multplies the numbers from largest to smallest
ingreds = [38, 38, 38, 38, 38, 80, 38, 38, 38, 38, 38, 38]
write_case(ingreds, starters, mains, desserts, [], suffix='overflow', desc='overflow, but when computing with signed 64-bit integers,\neach partial product is larger than the previous, and smaller than 10^18')
def gen_1875():
# answer = 0
ingreds = [1]*1
starters = [[1]]*25
mains = starters
desserts = starters
conf=[];
for i in range(1,26):
for j in range(1,26):
conf.extend([(i,j+25), (i,j+50), (i+25, j+50)])
write_case(ingreds, starters, mains, desserts, conf, suffix='maxconf', desc='all pairs of dishes in conflict')
random.seed(42)
gen_special()
gen_random(17, 5, 9, 8, 23, 5, 11)
for i in range(25):
r = random.randint(1, MAX_R)
s = random.randint(1, MAX_D)
m = random.randint(1, MAX_D)
d = random.randint(1, MAX_D)
max_c = min(s*m+m*d+s*d, MAX_C)
c = random.randint(0, max_c)
max_b = random.randint(1, 20)
max_k = random.randint(1, MAX_K)
gen_random(r, s, m, d, c, max_b, max_k)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, 0, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, 0, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3/2, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3/2, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3-3*MAX_D, 2, MAX_K, min_k=MAX_K)
gen_random(MAX_R, MAX_D, MAX_D, MAX_D, MAX_D*MAX_D*3-3*MAX_D, MAX_B, MAX_K, min_k=MAX_K, min_b=MAX_B)
| 42.401316
| 213
| 0.621257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,106
| 0.171606
|
1f30e7564e6c5decd42ff9ef937b6271af7e25ce
| 8,797
|
py
|
Python
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 4
|
2015-03-27T09:12:44.000Z
|
2022-01-18T08:45:29.000Z
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 5
|
2015-01-06T22:08:58.000Z
|
2021-04-12T07:56:34.000Z
|
MISC/opt_omega_ip.py
|
PHOTOX/photoxrepo
|
83ad3813e9c52926e6387afc76813e99d430a5f3
|
[
"MIT"
] | 2
|
2019-09-02T11:43:32.000Z
|
2022-01-18T08:45:30.000Z
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.getcwd())
import abinitio_driver as driver
from abinitio_driver import AUtoEV
import scipy.optimize as opt
from scipy.interpolate import interp1d
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
pass
# This is the driver script for omega tuning of long-range functionals such as BNL or wPBE
# The interface to ab initio programs is in separate file abinitio_driver.py
# and currently supports QCHEM and TeraChem
# Initial input files for ground and ionized state should be in files:
# optomega_gs.inp and optomega_is.inp
# OR
# optomega_scf.inp and optomega_na.inp in case you choose the "QCHEM_IEDC" PROGRAM option"
# This file can be directly submitted to the queue: qsub -V -cwd opt_omega_ip.py aq/nq
#For further details see our wiki pages...
####### USER INPUT PARAMETERS ############################
#PROGRAM = "QCHEM"
PROGRAM = "QCHEM_PCM"
#PROGRAM = "QCHEM_IEDC"
#PROGRAM = "QCHEM_IEDC_PCM"
#PROGRAM = "TERACHEM"
METHOD = 1
# 0 - minimization
# 1 - interpolation
# 2 - read omega-deltaIP function from file omegas.dat and interpolate
# Options for interpolation
MIN_OMEGA = 200
BEST_GUESS = 300
MAX_OMEGA = 400
STEP = 20
# for interpolation, one needs at least 2 starting points
# i.e. (MAX_OMEGA-MIN_OMEGA)/STEP >=2
# of course, this inequality should hold as well: MIN_OMEGA < BEST_GUESS < MAX_OMEGA
# OPTIONS for minimizer
# accuracy and maximum iterations for the minimizer
THR_OMEGA = 10.000 # absolute accuracy, omega*1000
MAXITER = 20
# These are bounds for the minimizer, can be tighter if you know where to look
MIN_OMEGA_DEF = 10
MAX_OMEGA_DEF = 250
####### END OF USER INPUT #########################################
# Whether to check SCF convergence (implemented only for TC at the moment)
driver.CHECK_SCF = True
if BEST_GUESS <= MIN_OMEGA or BEST_GUESS >= MAX_OMEGA:
print("ERROR:Incorrect input value for BEST_GUESS")
sys.exit(1)
if METHOD == 1 and (MAX_OMEGA-MIN_OMEGA)/STEP < 1:
print("ERROR: Wrong initial interpolation interval. I need at least 2 initial points")
print("Adjust MIN_OMEGA or MAX_OMEGA or STEP")
sys.exit(1)
def minimize(min_omega, max_omega, thr_omega):
"""Minimization of a general univariate function"""
# http://docs.scipy.org/doc/scipy/reference/optimize.html
try:
res = opt.minimize_scalar(f_optomega_ip,method="bounded",bounds=(MIN_OMEGA_DEF, MAX_OMEGA_DEF), \
options={"xatol":thr_omega,"maxiter": MAXITER,"disp": True})
except NameError:
print("Whoops, you probably have old version of SciPy that does not have minimize_scalar!")
print("Use interpolation instead and comment out this code!")
raise
print(res)
if "success" in res:
suc = res.success # older scipy versions do not have this attribute
else:
suc = True
if suc == True:
return res.x
else:
print("Minimization probably did not converge! Check results carefully.")
sys.exit(2)
def f_optomega_ip(omega):
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
f = (IP_dscf - IP_koop)**2
return f
def interpolate(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root."""
omega = min_omega
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
elif PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
elif PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
elif PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
elif PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
deltaIP = []
omegas = []
# Initial points for interpolation, determined by the user via MAX_OMEGA, MIN_OMEGA and STEP
while omega <= max_omega:
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
omegas.append(omega)
omega += step
# Check whether deltaIP crosses zero
# If not, extend the interpolation interval
# This assumes a monotonic dependence of deltaIP on omega
while deltaIP[0] * deltaIP[-1] > 0:
if (deltaIP[-1] < deltaIP[-2] and deltaIP[-1] > 0) \
or (deltaIP[-1] > deltaIP[-2] and deltaIP[-1] < 0):
best_guess = omegas[-1] + step / 2.0
omega = omegas[-1] + step
omegas.append(omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.append(IP_dscf-IP_koop)
else:
best_guess = omegas[0] - step / 2.0
omega = omegas[0] - step
omegas.insert(0,omega)
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
deltaIP.insert(0,IP_dscf-IP_koop)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
# Brent method should be superior to newton
# It is also guaranteed not to step out of a given interval,
# which is crucial here, since f_omega function throws an exception in that case
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
def interpolate_read(min_omega, max_omega, step, best_guess):
"""Interpolate for fixed omega range using cubic spline
Then find the root. Read omegas from s file"""
deltaIP = []
omegas = []
with open("omegas.dat","r") as f:
comm_first = True
for line in f:
l = line.split()
if not len(l):
continue
if l[0][0] == '#':
if comm_first:
comm_first = False
continue
else:
break
else:
omegas.append(float(l[0]))
deltaIP.append(float(l[1]))
# Check whether deltaIP crosses zero. If not, exit
# This assumes a monotonic dependence of deltaIP on omega
if deltaIP[0] * deltaIP[-1] > 0:
print("ERROR:could not find optimal omega for a computed range.")
sys.exit(1)
# Interpolate the computed points
if len(omegas) >=4:
f_omega = interp1d(omegas, deltaIP, kind='cubic')
elif len(omegas) == 3:
f_omega = interp1d(omegas, deltaIP, kind='quadratic')
elif len(omegas) == 2:
f_omega = interp1d(omegas, deltaIP, kind='linear')
else:
print("ERROR: I need at least 2 points for interpolation, and I only got "+str(len(omegas)))
sys.exit(1)
# Plot the interpolated function for later inspection
try:
x = [ x + omegas[0] for x in range((omegas[-1]-omegas[0]))]
plt.plot(omegas, deltaIP, 'o', x, f_omega(x), "-")
plt.savefig("omega-deltaIP.png")
except:
pass
# Find the root of interpolated function deltaIP(omega)
res = opt.brentq(f_omega, omegas[0], omegas[-1])
return res
#### Actual calculation starts here!
if METHOD == 0:
omega = minimize(MIN_OMEGA, MAX_OMEGA, THR_OMEGA)
elif METHOD == 1:
omega = interpolate(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
elif METHOD == 2:
omega = interpolate_read(MIN_OMEGA, MAX_OMEGA, STEP, BEST_GUESS)
print("Final tuned omega = ",omega)
if METHOD == 2:
sys.exit(0)
# This can be skipped if you want to save time
print("Recomputing with final omega...")
if PROGRAM == "TERACHEM":
dr = driver.Abinitio_driver_terachem()
if PROGRAM == "QCHEM":
dr = driver.Abinitio_driver_qchem()
if PROGRAM == "QCHEM_PCM":
dr = driver.Abinitio_driver_qchem_pcm()
if PROGRAM == "QCHEM_IEDC":
dr = driver.Abinitio_driver_qchem_IEDC_gas()
if PROGRAM == "QCHEM_IEDC_PCM":
dr = driver.Abinitio_driver_qchem_IEDC_pcm()
IP_dscf, IP_koop = dr.compute_ip(omega/1000.)
err = IP_dscf - IP_koop
print("Final IP_dscf:",IP_dscf*AUtoEV)
print("Final IP_exc_na:",IP_koop*AUtoEV)
print("Final deltaIP:",err*AUtoEV)
| 32.581481
| 103
| 0.665909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,605
| 0.409799
|
1f36e08746ee116943eb44bc9ccc08813b7b6dbe
| 415
|
py
|
Python
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 191
|
2020-01-14T19:32:54.000Z
|
2022-03-29T17:57:19.000Z
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 59
|
2020-01-14T18:55:09.000Z
|
2022-03-03T21:10:03.000Z
|
test/test_pbp.py
|
Galtozzy/basketball_reference_scraper
|
fb0081f2ae146f3a7da3a17d4e30af0c0dc1124a
|
[
"MIT"
] | 76
|
2020-01-08T19:50:31.000Z
|
2022-03-31T18:52:06.000Z
|
import unittest
from basketball_reference_scraper.pbp import get_pbp
class TestPbp(unittest.TestCase):
def test_pbp(self):
df = get_pbp('2020-01-06', 'DEN', 'ATL')
expected_columns = ['QUARTER', 'TIME_REMAINING', 'DENVER_ACTION', 'ATLANTA_ACTION', 'DENVER_SCORE', 'ATLANTA_SCORE']
self.assertListEqual(list(df.columns), expected_columns)
if __name__ == '__main__':
unittest.main()
| 34.583333
| 124
| 0.710843
| 296
| 0.713253
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.281928
|
1f376809bd6d755cb0caead50017abc148fc244a
| 978
|
py
|
Python
|
bin/grep.py
|
Blindfold/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | 1
|
2019-04-03T20:02:40.000Z
|
2019-04-03T20:02:40.000Z
|
bin/grep.py
|
Blindfold-Games/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | 3
|
2015-01-03T23:56:51.000Z
|
2015-01-15T09:16:46.000Z
|
bin/grep.py
|
Blindfold-Games/pk-mod
|
24f958b0d501a3b5d9393dcad1e69987c2448968
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
from sys import argv
def grep(match):
def _do_grep_wrapper(match):
def _do_grep(lines):
if match(lines):
yield lines
return _do_grep
return _do_grep_wrapper(match)
def find(what, where, depth=True):
"""
:param what: str String to search for
:param where: str directory to start search in
:param regexp: bool If true then 'what' is a regexp, otherwise - use simple substring search
:return:
"""
r = re.compile(what, re.M)
res = []
for root, sub_dirs, files in os.walk(where, True):
if (not depth) and (root != where):
continue
for file_name in files:
f = open(os.path.join(root, file_name), 'r')
data = f.read()
if r.search(data):
res.append(os.path.join(root, file_name))
return res
if __name__ == '__main__':
if len(argv) > 2:
print(list(find(argv[1], argv[2], True)))
| 27.166667
| 96
| 0.5818
| 0
| 0
| 194
| 0.198364
| 0
| 0
| 0
| 0
| 227
| 0.232106
|
1f39f0a7bb12ceef46b29fb32101f2f558a75220
| 2,023
|
py
|
Python
|
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
solution.py
|
nandita16gupta/CSV-Reading-using-Dynamic-Programming
|
793f9a9b23c2b1ea45d9ec71ea7070690932f9aa
|
[
"Apache-2.0"
] | null | null | null |
import csv
def inner(cell, spreadsheet):
try:
parts = cell.split()
if len(parts) == 0:
return 0.0
stack = []
for part in parts:
if part[0].isalpha():
col = ord(part[0]) - ord('a')
row = int(part[1:]) - 1
cell = spreadsheet[row][col]
value = solve(cell, spreadsheet)
if value == "#ERR":
return "#ERR"
stack.append(value)
elif part[0].isdigit() or part[0] == '.':
value = float(part)
stack.append(value)
elif part in ('+', '-', '*', '/'):
a = stack.pop()
b = stack.pop()
if part == '+':
stack.append(a + b)
elif part == '-':
stack.append(b - a)
elif part == '*':
stack.append(a * b)
elif part == '/':
stack.append(b / a)
else:
return "#ERR"
if len(stack) != 1:
return "#ERR"
return stack.pop()
except:
return "#ERR"
visited = {}
def solve(cell, spreadsheet):
if cell in visited:
computed = visited[cell]
if computed is None:
# cycle detected
return "#ERR"
return computed
visited[cell] = None
value = inner(cell, spreadsheet)
visited[cell] = value
return value
if __name__ == "__main__":
rows = []
with open('input.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
rows.append(row)
output_rows = []
for row in rows:
output_row = []
for cell in row:
output_row.append(solve(cell, rows))
output_rows.append(output_row)
with open('solution_csv_write.csv', 'w') as f:
writer = csv.writer(f)
for row in output_rows:
writer.writerow(row)
| 24.373494
| 53
| 0.44439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.064261
|
1f3ab12d8151036391c9ff1e4b3cbf52e4305e09
| 1,147
|
py
|
Python
|
slacm/exceptions.py
|
SLAcM/SLAcM
|
62943f4a68725674b103c73fcbcd25bb9cb5890e
|
[
"Apache-2.0"
] | 1
|
2022-01-13T03:19:24.000Z
|
2022-01-13T03:19:24.000Z
|
slacm/exceptions.py
|
SLAcM/SLAcM
|
62943f4a68725674b103c73fcbcd25bb9cb5890e
|
[
"Apache-2.0"
] | null | null | null |
slacm/exceptions.py
|
SLAcM/SLAcM
|
62943f4a68725674b103c73fcbcd25bb9cb5890e
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Sep 19, 2020
@author: esdev
'''
class SlacmException(Exception):
'''
Base class for all SLAcM expressions
'''
def __init__(self, message):
super().__init__(message)
class NotYetImplemented(SlacmException):
def __init__(self, message):
super().__init__(message)
class LoadError(SlacmException):
def __init__(self, message):
super().__init__(message)
class PeerOperationError(SlacmException):
def __init__(self, message):
super().__init__(message)
class PortOperationError(SlacmException):
def __init__(self, message):
super().__init__(message)
class UndefinedOperation(SlacmException):
def __init__(self, message):
super().__init__(message)
class InvalidOperation(SlacmException):
def __init__(self, message):
super().__init__(message)
class UndefinedHandler(SlacmException):
def __init__(self, message):
super().__init__(message)
class ParameterLoadError(SlacmException):
def __init__(self, message):
super().__init__(message)
| 24.404255
| 41
| 0.652136
| 1,007
| 0.877942
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.087184
|
1f3ca2663f904f54aa3ffae1453e96545934c8ab
| 959
|
py
|
Python
|
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | 1
|
2021-03-25T19:28:48.000Z
|
2021-03-25T19:28:48.000Z
|
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | null | null | null |
tests/test_auth.py
|
ChukwuEmekaAjah/buycoins_python
|
86547aa742364a0e308b1dfb5f7c73b4467b1e06
|
[
"MIT"
] | null | null | null |
from buycoins_client import Auth
import unittest
class TestAuthMethods(unittest.TestCase):
def test_invalid_secret_key_setup(self):
"""
Should throw an exception for invalid secret key
"""
try:
Auth.setup("name",3)
except Exception as e:
self.assertEqual(str(e), "Invalid secret key. Secret key should be a string")
def test_invalid_public_key_setup(self):
"""
Should throw an exception for invalid secret key
"""
try:
Auth.setup(1,3)
except Exception as e:
self.assertEqual(str(e), "Invalid public key. Public key should be a string")
def test_valid_auth_setup(self):
"""
Should return public and secret key as username and password auth
"""
auth = Auth.setup("buycoins", "africa")
self.assertEqual(auth, True)
if __name__ == '__main__':
unittest.main()
| 26.638889
| 89
| 0.605839
| 858
| 0.894682
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.397289
|
1f3d20a100b0201057cb5b8f77818cba1ad9e63b
| 6,328
|
py
|
Python
|
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | 3
|
2021-01-20T00:54:09.000Z
|
2021-06-02T01:54:02.000Z
|
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | null | null | null |
icn/plc/main.py
|
PMaynard/ndn-water-treatment-testbed
|
926db68237b06f43f6e736f035201ed71fc153bc
|
[
"MIT"
] | null | null | null |
# from ui import UI
# from ui import UI_Element
import sys
import time
import threading
import socket
from plcrpcservice import PLCRPCClient
import pyndn
from pyndn import Name
from pyndn import Face
from pyndn import Interest
from pyndn.security import KeyChain
from pyndn.security.identity import IdentityManager
from pyndn.security import AesKeyParams
from pyndn import Data
from pyndn import MetaInfo
from pyndn.util.common import Common
import logging
logging.basicConfig()
log = logging.getLogger('PLC')
log.setLevel(logging.DEBUG)
class store(object):
def __init__(self, slaveid, register, address, value):
self.slaveid = slaveid
self.register = register
self.address = address
self.value = value
def __str__(self):
return "{} {} {} {} {}".format(self.name, self.slaveid, self.register, self.address, self.value)
class PLC(object):
def __init__(self, name=None):
# PLC Simulation
self.slaveid = 0x00
self.name = name
if not name:
self.name = socket.gethostname()
self.plcrpcclient = PLCRPCClient(rpc_server="0.0.0.0", rpc_port=8000, plc=self.name)
self.registered = False
self.speed = 0.2
self.db = {}
# NDN
self._callbackCount = 0
self.primary_prefix = "/example"
self.names = []
self.freshnessPeriod = 2000 # in milliseconds (2000 = 2s).
self.identify_manager = IdentityManager()
self.keyChain = KeyChain(self.identify_manager)
def _get_sensor_data(self):
sensor_data = self.plcrpcclient.readSensors()
for sensor in sensor_data:
register = sensor_data[sensor]['register_type']
address = int(sensor_data[sensor]['data_address'])
if register in ['c', 'd']:
value = bool(sensor_data[sensor]['value'])
elif register in ['h', 'i']:
value = int(sensor_data[sensor]['value'])
address = address + 1 # section 4.4 of specification
self.db[sensor] = store(self.slaveid, address, register, value)
def _registerPLC(self):
self.slaveid = self.plcrpcclient.registerPLC()
self.registered = True
log.debug("[PLC][%s] Registered on scadasim rpc" % self.name)
return True
def update(self):
# log.debug("[PLC][%s] Updating PLC values with sensor values" % self)
# while not self.queue.empty():
# # Update scadasim with any new values from Master
# fx, address, values = self.queue.get()
# log.debug("[PLC][%s] setting fx: %s register:%s to value:%s" %
# (self.name, fx, address, values))
# self.plcrpcclient.setValues(fx=fx, address=address, values=values)
self._get_sensor_data()
delay = (-time.time() % self.speed)
t = threading.Timer(delay, self.update, ())
t.daemon = True
t.start()
def set_speed(self, speed):
self.speed = speed
def __repr__(self):
return "%s" % self.name
def main(self):
log.debug("[PLC][%s] Initialized" % self.name)
while not self.registered:
log.debug(
"[PLC][%s] Trying to register with scadasim rpc" % self.name)
try:
self._registerPLC()
except KeyError:
log.warn(
"""[PLC][%s] PLC not found within scadasim. Verify Docker
Compose container names match list of plcs in scadasim
config""")
time.sleep(1)
log.debug("[PLC][%s] Starting update service" % self.name)
self.update()
log.debug("[PLC][%s] Starting NDN Producer" % self.name)
# TODO: Move this setup stuff into a function and make dynamic.
log.info("Listening on: ")
for n in self.db:
# /ndn/plc2-site/plc2
name = Name("{0}/{1}-site/{1}/{2}".format(self.primary_prefix, self.name, n))
log.info("\t{}".format(name))
name_identiy = self.keyChain.createIdentityAndCertificate(name, self.keyChain.getDefaultKeyParams())
log.debug("Name Identify: {}".format(name_identiy))
self.face.setCommandSigningInfo(self.keyChain, name_identiy)
self.face.registerPrefix(name, self.onInterest, self.onRegisterFailed)
# log.debug("Registered Prefix: {} {}", str(self.primary_prefix), str(n))
# END LOOP
# Keep Running unless error.
while self._callbackCount < 1:
self.face.processEvents()
time.sleep(0.01)
self.face.shutdown()
# NDN Stuff
def ndnInit(self):
Interest.setDefaultCanBePrefix(True)
# TODO: Bug? Does not auto retry TCP if unix socket fails as says in docs.
# self.face = Face("localhost", 6363)
self.face = Face()
self.primary_prefix = "/ndn"
def onInterest(self, prefix, interest, face, interestFilterId, filter):
self._callbackCount = 0
# log.debug("prefix: '{}'".format(prefix))
# log.debug("interest: '{}'".format(interest))
# log.debug("face: '{}'".format(face))
# log.debug("interestFilterId: '{}'".format(interestFilterId))
# log.debug("filter: '{}'".format(filter))
data = Data()
#
# log.debug("----")
# for n in self.db:
# log.debug(n)
# log.debug(self.db[n].value)
# log.debug("----")
#
n = str(prefix).split("/")[-1]
log.debug("{} value '{}' ({})".format(prefix, self.db[n].value, self.freshnessPeriod))
data.setContent(str(self.db[n].value)) # TODO: Why does this need to be converted to string?
data.setName(prefix)
meta = MetaInfo()
meta.setFreshnessPeriod(self.freshnessPeriod)
data.setMetaInfo(meta)
self.keyChain.sign(data)
face.putData(data)
def onRegisterFailed(self, prefix):
self._callbackCount += 1
dump("Unable to register", prefix)
#
try:
plc = PLC(sys.argv[1])
except:
plc = PLC()
# Keep trying until we get a connection.
while True:
plc.ndnInit()
plc.main()
time.sleep(5)
| 31.326733
| 112
| 0.588021
| 5,617
| 0.887642
| 0
| 0
| 0
| 0
| 0
| 0
| 1,824
| 0.288243
|
1f3eb22adbac011762c8a0158ac669343f090557
| 2,876
|
py
|
Python
|
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
cogs/administration.py
|
tigersharkpr13/AnsuraBot
|
035797121d8e7952bc38e32834cdb655c15cb703
|
[
"Unlicense"
] | null | null | null |
from typing import Union
import discord
from discord.ext import commands
import cogs.gamertags
from ansura.ansurabot import AnsuraBot
from ansura.ansuracontext import AnsuraContext
class Administration(commands.Cog):
def error(self, title, message={}, color=0xff0000):
e = discord.Embed()
e.colour = color
e.title = title
for k in message.keys():
e.add_field(name=k, value=message[k])
return e
def __init__(self, bot: AnsuraBot):
self.bot = bot
@commands.is_owner()
@commands.command(aliases=["sgv"])
async def setgtval(self, ctx: AnsuraContext,
typ: str, user: Union[discord.Member, discord.User],
val: str):
ch: discord.TextChannel = \
ctx.channel
if not ch.permissions_for(ctx.author).administrator:
await ctx.send(embed=self.error("Permission error",
message={
"Message":
"You must have administrator permission"
})
)
return
if typ not in "xbox,mojang,youtube,twitch,mixer".split(","):
await ctx.send(embed=self.error("Invalid gametag type"))
await self.bot.get_cog("Help").help_(ctx, "setgtval")
return
util: cogs.gamertags.Util = self.bot.get_cog("Util")
db = util.db
if typ == "xbox":
typ = "xboxlive"
rec = db.lookup_gaming_record(user.id)
e = discord.Embed()
e.colour = user.color
e.title = user.display_name + " before"
e.add_field(name="XBox", value=rec[2] if rec[2] is not None else "N/A")
e.add_field(name="Mojang", value=rec[1] if rec[1] is not None else "N/A")
e.add_field(name="Youtube", value=rec[3] if rec[3] is not None else "N/A")
e.add_field(name="Twitch", value=rec[4] if rec[4] is not None else "N/A")
e.add_field(name="Mixer", value=rec[5] if rec[5] is not None else "N/A")
await ctx.send(embed=e)
db.set_gaming_record(user.id, typ, val)
rec = db.lookup_gaming_record(user.id)
e = discord.Embed()
e.colour = user.color
e.title = user.display_name + " after"
e.add_field(name="XBox", value=rec[2] if rec[2] is not None else "N/A")
e.add_field(name="Mojang", value=rec[1] if rec[1] is not None else "N/A")
e.add_field(name="Youtube", value=rec[3] if rec[3] is not None else "N/A")
e.add_field(name="Twitch", value=rec[4] if rec[4] is not None else "N/A")
e.add_field(name="Mixer", value=rec[5] if rec[5] is not None else "N/A")
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(Administration(bot))
| 40.507042
| 92
| 0.561544
| 2,635
| 0.916203
| 0
| 0
| 2,297
| 0.798679
| 2,233
| 0.776426
| 312
| 0.108484
|
1f3f0f56c3a1c070b48e8fbce26fe6e40715c8ef
| 357
|
py
|
Python
|
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | 1
|
2020-06-17T05:25:42.000Z
|
2020-06-17T05:25:42.000Z
|
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | null | null | null |
project/celery.py
|
kunugoda/jobbrd
|
19debcac7673a85eda4a8d1eb00e5537268bd601
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
app = Celery('jobboard')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0}'.format(self.request))
| 23.8
| 67
| 0.787115
| 0
| 0
| 0
| 0
| 87
| 0.243697
| 0
| 0
| 88
| 0.246499
|
1f3fa004ef7cd2d4e23f674d416525c0f3924172
| 128
|
py
|
Python
|
rotatelist/rotatelist.py
|
tanaytoshniwal/Python-Programs
|
6553a2dcef5d26aad5a818cafd3c678ef59e8b82
|
[
"MIT"
] | 1
|
2019-01-29T18:46:39.000Z
|
2019-01-29T18:46:39.000Z
|
rotatelist/rotatelist.py
|
AlphaBAT69/Python-Programs
|
6553a2dcef5d26aad5a818cafd3c678ef59e8b82
|
[
"MIT"
] | null | null | null |
rotatelist/rotatelist.py
|
AlphaBAT69/Python-Programs
|
6553a2dcef5d26aad5a818cafd3c678ef59e8b82
|
[
"MIT"
] | null | null | null |
def rotatelist(l,k):
(i,t) = (1,l)
while i<=k:
(l,i) = (l[len(l)-1:len(l)] + l[0:len(l)-1],i+1)
(rot,l) = (l,t)
return rot
| 21.333333
| 50
| 0.484375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1f406fa1d479f75e59d5f05a3c4ac549424cb9b7
| 214
|
py
|
Python
|
filer/models/__init__.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | 1
|
2015-03-03T15:49:14.000Z
|
2015-03-03T15:49:14.000Z
|
filer/models/__init__.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | 10
|
2015-04-08T14:16:52.000Z
|
2021-12-15T16:17:57.000Z
|
filer/models/__init__.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | null | null | null |
#-*- coding: utf-8 -*-
from .mixins import *
from .filemodels import *
from .clipboardmodels import *
from .imagemodels import *
from .foldermodels import *
from .virtualitems import *
from .archivemodels import *
| 23.777778
| 30
| 0.742991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.102804
|
1f407417b73374a6afc645fcceeb6ced94f54f5e
| 2,388
|
py
|
Python
|
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | 2
|
2021-12-26T05:00:54.000Z
|
2021-12-30T17:15:49.000Z
|
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | null | null | null |
EGGS_labrad/clients/cryovac_clients/fma1700a_client.py
|
EGGS-Experiment/EGGS_Control
|
c29b3ab0e30dcb6e01d1ca3212ac64ad1506143b
|
[
"MIT"
] | null | null | null |
from time import time
from datetime import datetime
from twisted.internet.defer import inlineCallbacks
from EGGS_labrad.clients import GUIClient
from EGGS_labrad.clients.cryovac_clients.fma1700a_gui import fma1700a_gui
class fma1700a_client(GUIClient):
name = 'FMA1700A Client'
FLOWID = 877920
servers = {'fma': 'FMA1700A Server'}
def getgui(self):
if self.gui is None:
self.gui = fma1700a_gui()
return self.gui
@inlineCallbacks
def initClient(self):
# set recording stuff
self.c_record = self.cxn.context()
self.recording = False
# connect to signals
yield self.fma.signal__flow_update(self.FLOWID)
yield self.fma.addListener(listener=self.updateFlow, source=None, ID=self.FLOWID)
# start device polling if not already started
poll_params = yield self.fma.polling()
if not poll_params[0]:
yield self.fma.polling(True, 5.0)
def initGUI(self):
self.gui.record_button.toggled.connect(lambda status: self.record_flow(status))
# SLOTS
@inlineCallbacks
def record_flow(self, status):
"""
Creates a new dataset to record flow and
tells polling loop to add data to data vault.
"""
# set up datavault
self.recording = status
if self.recording:
self.starttime = time()
date = datetime.now()
year = str(date.year)
month = '{:02d}'.format(date.month)
trunk1 = '{0:s}_{1:s}_{2:02d}'.format(year, month, date.day)
trunk2 = '{0:s}_{1:02d}:{2:02d}'.format(self.name, date.hour, date.minute)
yield self.dv.cd(['', year, month, trunk1, trunk2], True, context=self.c_record)
yield self.dv.new('FMA1700A Flowmeter', [('Elapsed time', 't')],
[('Flowmeter', 'Flow rate', 'L/min')], context=self.c_record)
@inlineCallbacks
def updateFlow(self, c, flow):
"""
Updates GUI when values are received from server.
"""
self.gui.flow_display.setText(str(flow))
if self.recording:
elapsedtime = time() - self.starttime
yield self.dv.add(elapsedtime, flow, context=self.c_record)
if __name__ == "__main__":
from EGGS_labrad.clients import runClient
runClient(fma1700a_client)
| 33.166667
| 100
| 0.620603
| 2,059
| 0.862228
| 1,615
| 0.676298
| 1,678
| 0.70268
| 0
| 0
| 471
| 0.197236
|
1f41c0a4ab7d43a2dea653442a426c4d1174eff9
| 1,653
|
py
|
Python
|
tests/test_model.py
|
camisatx/flasker
|
affc9fc13742e18434961b8ec15f133cc246dcd5
|
[
"MIT"
] | 3
|
2020-12-12T01:30:08.000Z
|
2021-10-13T16:47:56.000Z
|
tests/test_model.py
|
camisatx/flasker
|
affc9fc13742e18434961b8ec15f133cc246dcd5
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
camisatx/flasker
|
affc9fc13742e18434961b8ec15f133cc246dcd5
|
[
"MIT"
] | 1
|
2020-12-12T01:30:13.000Z
|
2020-12-12T01:30:13.000Z
|
import sys
import unittest
sys.path.append('../')
from app import db # NOQA
from app.models import User # NOQA
from tests.base import BaseTestCase # NOQA
class UserModelCase(BaseTestCase):
def test_model_password_hashing(self):
"""Test the password hashing."""
u = User(username='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_model_follow(self):
"""Test the user following mechanic."""
u1 = User(username='josh', email='josh@example.com', public_id='1',
group='user')
u1.set_password('cat')
u2 = User(username='sara', email='sara@example.com', public_id='2',
group='user')
u2.set_password('cat')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u2.followed.all(), [])
# Test the follow mechanic
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().username, 'sara')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().username, 'josh')
# Test the unfollow mechanic
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
if __name__ == '__main__':
unittest.main()
| 31.788462
| 75
| 0.603146
| 1,409
| 0.85239
| 0
| 0
| 0
| 0
| 0
| 0
| 268
| 0.162129
|
1f41eb6162eb883a714a301cdb2d992b3f4f957d
| 119
|
py
|
Python
|
examples/__init__.py
|
DiMoser/PyPinT
|
3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
DiMoser/PyPinT
|
3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73
|
[
"MIT"
] | null | null | null |
examples/__init__.py
|
DiMoser/PyPinT
|
3cba394d0fd87055ab412d35fe6dbf4a3b0dbe73
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""Examples demonstrating usage of PyPinT
.. moduleauthor:: Torbjörn Klatt <t.klatt@fz-juelich.de>
"""
| 19.833333
| 56
| 0.722689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.983333
|
1f43bc58bc8f57d5639beefb900d57b125412748
| 1,440
|
py
|
Python
|
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | 1
|
2020-12-24T19:52:58.000Z
|
2020-12-24T19:52:58.000Z
|
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | null | null | null |
cd4ml/app.py
|
joragupra/CD4ML-Scenarios
|
8c8886388260147cd5248dfa1945f60ebabfaacc
|
[
"MIT"
] | 1
|
2020-05-04T18:21:41.000Z
|
2020-05-04T18:21:41.000Z
|
from flask import Flask, render_template, request
import cd4ml.app_utils as utils
from cd4ml.fluentd_logging import FluentdLogger
app = Flask(__name__, template_folder='webapp/templates',
static_folder='webapp/static')
fluentd_logger = FluentdLogger()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/replace_model', methods=["POST"])
def replace_model():
content = request.get_data(as_text=False)
utils.replace_model_file(content)
return "OK", 200
@app.route('/replace_encoder', methods=["POST"])
def replace_encoder():
content = request.get_data(as_text=False)
utils.replace_encoder_file(content)
return "OK", 200
@app.route('/prediction')
def get_prediction():
date_string = request.args.get('date')
item_nbr = request.args.get("item_nbr")
prediction_tuple = utils.get_prediction(item_nbr, date_string)
status = prediction_tuple[0]
prediction = prediction_tuple[1]
log_payload = {
'prediction': prediction,
'itemid': item_nbr,
'item_name': utils.get_product_name_from_id(item_nbr),
'date_string': date_string
}
log_prediction_console(log_payload)
fluentd_logger.log('prediction', log_payload)
if status == "ERROR":
return prediction, 503
else:
return "%d" % prediction, 200
def log_prediction_console(log_payload):
print('logging {}'.format(log_payload))
| 25.263158
| 66
| 0.702083
| 0
| 0
| 0
| 0
| 1,075
| 0.746528
| 0
| 0
| 210
| 0.145833
|
1f44b877c61b52c5169fcc3dc901630593e11752
| 1,456
|
py
|
Python
|
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
api/crud/events.py
|
cnuland/tbml
|
8dca907011971a8ad21dfc5b5d6bec1ddbff0818
|
[
"MIT"
] | null | null | null |
from fastapi import HTTPException
from tortoise.exceptions import DoesNotExist
from db.models import Events
from schemas.events import EventsOutSchema
async def get_events():
return await EventsOutSchema.from_queryset(Events.all())
async def get_event(event_id) -> EventsOutSchema:
return await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
async def create_event(event) -> EventsOutSchema:
event_dict = event.dict(exclude_unset=True)
event_obj = await Events.create(**event_dict)
return await EventsOutSchema.from_tortoise_orm(event_obj)
async def update_event(event_id, event) -> EventsOutSchema:
try:
db_event = await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
await Events.filter(id=event_id).update(**event.dict(exclude_unset=True))
return await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
async def delete_event(event_id):
try:
db_event = await EventsOutSchema.from_queryset_single(Events.get(id=event_id))
except DoesNotExist:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
deleted_count = await Events.filter(id=event_id).delete()
if not deleted_count:
raise HTTPException(status_code=404, detail=f"Event {event_id} not found")
return f"Event {event_id} deleted"
| 35.512195
| 86
| 0.76511
| 0
| 0
| 0
| 0
| 0
| 0
| 1,290
| 0.885989
| 114
| 0.078297
|
1f46b6c5cc84e0b05c2f63e339fe44af56c4515e
| 29,428
|
py
|
Python
|
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
sfx/sfx.py
|
Terry14/sfx
|
16bcf401ba3251b0de211276d97153469499515d
|
[
"MIT"
] | null | null | null |
import asyncio
import os
import unicodedata
import aiohttp
import discord
import lavalink
import unidecode
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import pagify
from redbot.core.utils.predicates import MessagePredicate
from .api import generate_urls
try:
from redbot.core.utils._dpy_menus_utils import dpymenu
DPY_MENUS = True
except ImportError:
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
DPY_MENUS = False
from .voices import voices
class SFX(commands.Cog):
"""Plays uploaded sounds or text-to-speech."""
__version__ = "2.0.0"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=134621854878007296)
self.session = aiohttp.ClientSession()
user_config = {"voice": "Clara", "speed": 5}
guild_config = {"sounds": {}, "channels": []}
global_config = {"sounds": {}, "schema_version": 0}
self.config.register_user(**user_config)
self.config.register_guild(**guild_config)
self.config.register_global(**global_config)
lavalink.register_event_listener(self.ll_check)
self.bot.loop.create_task(self.check_config_version())
self.bot.loop.create_task(self.fill_channel_cache())
self.last_track_info = {}
self.current_sfx = {}
self.channel_cache = {}
# lag_time to compensate for skipping lavalink lag
self.lag_time = 1000
self.repeat_state = {}
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
lavalink.unregister_event_listener(self.ll_check)
def format_help_for_context(self, ctx):
"""Thanks Sinbad"""
pre_processed = super().format_help_for_context(ctx)
return f"{pre_processed}\nCog Version: {self.__version__}"
async def check_config_version(self):
schema_version = await self.config.schema_version()
if schema_version == 0:
await self.config.clear_all_users()
await self.config.sounds.clear()
all_guilds = await self.config.all_guilds()
for guild in all_guilds:
await self.config.guild_from_id(guild).sounds.clear()
await self.config.schema_version.set(1)
async def fill_channel_cache(self):
all_guilds = await self.config.all_guilds()
for guild in all_guilds:
try:
self.channel_cache[guild] = all_guilds[guild]["channels"]
except KeyError:
pass # no channels set
# full credits to kable
# https://github.com/kablekompany/Kable-Kogs/blob/master/decancer/decancer.py#L67
@staticmethod
def decancer_text(text):
text = unicodedata.normalize("NFKC", text)
text = unicodedata.normalize("NFD", text)
text = unidecode.unidecode(text)
text = text.encode("ascii", "ignore")
text = text.decode("utf-8")
if text == "":
return
return text
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def tts(self, ctx, *, text):
"""
Plays the given text as TTS in your current voice channel.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
author_data = await self.config.user(ctx.author).all()
author_voice = author_data["voice"]
author_speed = author_data["speed"]
text = self.decancer_text(text)
if text is None:
await ctx.send("That's not a valid message, sorry.")
return
char_number = len(text)
if char_number > 1000:
await ctx.send(
f"Sorry, I limit TTS to 1000 characters to avoid abuse. ({char_number}/1000)"
)
return
urls = generate_urls(author_voice, text, author_speed)
await self.play_sfx(ctx.author.voice.channel, ctx.channel, urls)
try:
1 + 1
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def sfx(self, ctx, sound: str):
"""
Plays an existing sound in your current voice channel.
If a guild SFX exists with the same name as a global one, the guild SFX will be played.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if sound not in guild_sounds.keys() and sound not in global_sounds.keys():
await ctx.send(
f"Sound **{sound}** does not exist. Try `{ctx.clean_prefix}listsfx` for a list."
)
return
if sound in guild_sounds.keys():
link = guild_sounds[sound]
else:
link = global_sounds[sound]
try:
await self.play_sfx(ctx.author.voice.channel, ctx.channel, [link])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.cooldown(
rate=1, per=1, type=discord.ext.commands.cooldowns.BucketType.guild
)
@commands.guild_only()
async def qsfx(self, ctx, sound: str):
"""
Queues an existing sound in your current voice channel.
If a guild SFX exists with the same name as a global one, the guild SFX will be played.
"""
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if sound not in guild_sounds.keys() and sound not in global_sounds.keys():
await ctx.send(
f"Sound **{sound}** does not exist. Try `{ctx.clean_prefix}listsfx` for a list."
)
return
if sound in guild_sounds.keys():
link = guild_sounds[sound]
else:
link = global_sounds[sound]
try:
await self.queue_sfx(ctx.author.voice.channel, ctx.channel, [link])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command()
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def addsfx(self, ctx, name: str, link: str = None):
"""
Adds a new SFX to this guild.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]addsfx <name>` or `[p]addsfx <name> <link>`.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one SFX at a time.")
return
url = ""
filename = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only SFX in .mp3 and .wav format are supported at this time."
)
return
if name in guild_sounds.keys():
await ctx.send(
f"A sound with that filename already exists. Either choose a new name or use {ctx.clean_prefix}delsfx to remove it."
)
return
guild_sounds[name] = url
await self.config.guild(ctx.guild).sounds.set(guild_sounds)
await ctx.send(f"Sound **{name}** has been added.")
@commands.command()
@commands.admin_or_permissions(manage_guild=True)
@commands.guild_only()
async def delsfx(self, ctx, soundname: str):
"""
Deletes an existing sound.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
if soundname not in guild_sounds.keys():
await ctx.send(
f"Sound **{soundname}** does not exist. Try `{ctx.prefix}listsfx` for a list."
)
return
del guild_sounds[soundname]
await self.config.guild(ctx.guild).sounds.set(guild_sounds)
await ctx.send(f"Sound **{soundname}** deleted.")
@commands.command()
@commands.guild_only()
async def addglobalsfx(self, ctx, name: str, link: str = None):
"""
Adds a new SFX to this the bot globally.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]addsfx <name>` or `[p]addsfx <name> <link>`.
"""
global_sounds = await self.config.sounds()
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one SFX at a time.")
return
url = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only SFX in .mp3 and .wav format are supported at this time."
)
return
if name in global_sounds.keys():
await ctx.send(
f"A sound with that filename already exists. Either choose a new name or use {ctx.clean_prefix}delglobalsfx to remove it."
)
return
global_sounds[name] = link
await self.config.sounds.set(global_sounds)
await ctx.send(f"Sound **{name}** has been added.")
@commands.command()
@checks.is_owner()
async def delglobalsfx(self, ctx, name: str):
"""
Deletes an existing global sound.
"""
global_sounds = await self.config.sounds()
if name not in global_sounds.keys():
await ctx.send(
f"Sound **{name}** does not exist. Try `{ctx.prefix}listsfx` for a list."
)
return
del global_sounds[name]
await self.config.sounds.set(global_sounds)
await ctx.send(f"Sound **{name}** deleted.")
@commands.command()
@commands.guild_only()
async def listsfx(self, ctx):
"""
Lists all available sounds for this server.
"""
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
if (len(guild_sounds.items()) + len(global_sounds.items())) == 0:
await ctx.send(f"No sounds found. Use `{ctx.prefix}addsfx` to add one.")
return
txt = ""
if guild_sounds:
txt += "**Guild Sounds**:\n"
for sound in guild_sounds:
txt += sound + "\n"
if global_sounds:
txt += "\n**Global Sounds**:\n"
for sound in global_sounds:
if guild_sounds and sound in guild_sounds:
txt += sound + " (disabled)\n"
txt += sound + "\n"
pages = [p for p in pagify(text=txt, delims="\n")]
for page in pages:
await ctx.send(page)
@commands.command()
@commands.guild_only()
async def fplay(self, ctx, link: str = None):
"""
Adds a file to the music queue.
Either upload the file as a Discord attachment or use a link.
Syntax:`[p]fplay` or `[p]fplay <link>`.
"""
attachments = ctx.message.attachments
if len(attachments) > 1 or (attachments and link):
await ctx.send("Please only try to add one file at a time.")
return
url = ""
filename = ""
if attachments:
attachment = attachments[0]
url = attachment.url
elif link:
url = "".join(link)
else:
await ctx.send(
"You must provide either a Discord attachment or a direct link to a sound."
)
return
filename = "".join(url.split("/")[-1:]).replace("%20", "_")
file_name, file_extension = os.path.splitext(filename)
if file_extension not in [".wav", ".mp3"]:
await ctx.send(
"Sorry, only files in .mp3 and .wav format are supported at this time."
)
return
if not ctx.author.voice or not ctx.author.voice.channel:
await ctx.send("You are not connected to a voice channel.")
return
guild_sounds = await self.config.guild(ctx.guild).sounds()
global_sounds = await self.config.sounds()
try:
await self.queue_sfx(ctx.author.voice.channel, ctx.channel, [url])
except Exception:
await ctx.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
@commands.command(aliases=["setvoice"])
async def myvoice(self, ctx, voice: str = None):
"""
Changes your TTS voice.
Type `[p]listvoices` to view all possible voices.
If no voice is provided, it will show your current voice.
"""
current_voice = await self.config.user(ctx.author).voice()
if voice is None:
await ctx.send(f"Your current voice is **{current_voice}**")
return
voice = voice.title()
if voice in voices.keys():
await self.config.user(ctx.author).voice.set(voice)
await ctx.send(f"Your new TTS voice is: **{voice}**")
else:
await ctx.send(
f"Sorry, that's not a valid voice. You can view voices with the `{ctx.clean_prefix}listvoices` command."
)
@commands.command(aliases=["setspeed"])
async def myspeed(self, ctx, speed: int = None):
"""
Changes your TTS speed.
If no speed is provided, it will show your current speed.
The speed range is 0-10 (higher is faster, 5 is normal.)
"""
author_data = await self.config.user(ctx.author).all()
current_speed = author_data["speed"]
current_voice = author_data["voice"]
support_speed = voices[current_voice]["speed"]
if speed is None:
await ctx.send(f"Your current speed is **{current_speed}**")
return
if speed < 0:
await ctx.send("Your speed must be greater than or equal to 0.")
return
if speed > 10:
await ctx.send("Your speed must be less than or equal to 10.")
return
await self.config.user(ctx.author).speed.set(speed)
if support_speed:
await ctx.send(f"Your new speed is **{speed}**.")
else:
await ctx.send(
f"Your new speed is **{speed}**. "
"Keep in mind your current voice doesn't support speed changes, "
"so you won't see a difference until you change your voice to one that supports speed."
)
@commands.command()
async def listlangs(self, ctx):
"""
List all the valid language codes for TTS voices.
"""
langs = sorted(
set([voices[voice]["languageCode"] for voice in voices.keys()] + ["all"])
)
embed = discord.Embed(
title="Valid Language Codes",
color=await ctx.embed_color(),
description=", ".join(langs),
)
await ctx.send(embed=embed)
@commands.command()
async def listvoices(self, ctx, lang="en"):
"""
Lists all the TTS voices in the selected language.
If no language is provided, it will list sthe voices in English.
Use 'all' as the language code to view all voices.
"""
langs = set([voices[voice]["languageCode"] for voice in voices.keys()])
ALL_VOICES = False
if lang not in langs:
if lang == "all":
ALL_VOICES = True
else:
await ctx.send(
f"Sorry, that's not a valid language code. You can view all valid language codes with the `{ctx.clean_prefix}listlangs` command."
)
if ALL_VOICES:
voice_data = voices
else:
voice_data = {
voice: voices[voice]
for voice in voices.keys()
if voices[voice]["languageCode"] == lang
}
qs = {"low": [], "medium": [], "high": []}
for voice in voice_data:
embed = discord.Embed(color=await ctx.embed_color(), title=voice)
embed.description = (
"```yaml\n"
f"Gender: {voice_data[voice]['gender']}\n"
f"Language: {voice_data[voice]['languageName']}\n"
f"Quality: {voice_data[voice]['quality']}\n"
f"Supports Speed: {voice_data[voice]['speed']}\n"
f"Translates: {voice_data[voice]['translates']}\n"
f"Provider: {voice_data[voice]['provider']}"
"```"
)
q = voice_data[voice]["quality"].lower()
qs[q].append(embed)
pages = qs["high"] + qs["medium"] + qs["low"]
for index, embed in enumerate(pages):
if len(pages) > 1:
embed.set_footer(text=f"Voice {index + 1}/{len(pages)} | {lang} voices")
if DPY_MENUS:
await dpymenu(ctx, pages, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=pages[0])
else:
await menu(ctx, pages, DEFAULT_CONTROLS, timeout=60)
@commands.group()
@commands.guild_only()
@commands.admin_or_permissions(manage_guild=True)
async def ttschannel(self, ctx):
"""
Configures automatic TTS channels.
"""
pass
@ttschannel.command()
async def add(self, ctx, channel: discord.TextChannel):
"""
Adds a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id not in channel_list:
channel_list.append(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(
f"Okay, {channel.mention} will now be used as a TTS channel."
)
else:
await ctx.send(
f"{channel.mention} is already a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel remove` command?"
)
@ttschannel.command(aliases=["delete", "del"])
async def remove(self, ctx, channel: discord.TextChannel):
"""
Removes a channel for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if channel.id in channel_list:
channel_list.remove(channel.id)
await self.config.guild(ctx.guild).channels.set(channel_list)
self.channel_cache[ctx.guild.id] = channel_list
await ctx.send(f"Okay, {channel.mention} is no longer a TTS channel.")
else:
await ctx.send(
f"{channel.mention} isn't a TTS channel, did you mean use the `{ctx.clean_prefix}ttschannel add` command?"
)
@ttschannel.command()
async def clear(self, ctx):
"""
Removes all the channels for automatic TTS.
"""
channel_list = await self.config.guild(ctx.guild).channels()
if not channel_list:
await ctx.send("There's no channels in the config.")
else:
try:
await ctx.send(
"Are you sure you want to clear all this server's TTS channels? Respond with yes or no."
)
predictate = MessagePredicate.yes_or_no(ctx, user=ctx.author)
await ctx.bot.wait_for("message", check=predictate, timeout=30)
except asyncio.TimeoutError:
await ctx.send(
"You never responded, please use the command again to clear all of this server's TTS channels."
)
return
if predictate.result:
await self.config.guild(ctx.guild).channels.clear()
del self.channel_cache[ctx.guild.id]
await ctx.send("Okay, I've cleared all TTS channels for this server.")
else:
await ctx.send("Okay, I won't clear any TTS channels.")
@ttschannel.command()
async def list(self, ctx):
"""
Shows all the channels for automatic TTS.
"""
try:
channel_list = self.channel_cache[ctx.guild.id]
except KeyError:
channel_list = None
if not channel_list:
await ctx.send("This server doesn't have any TTS channels set up.")
else:
text = "".join(
"<#" + str(channel) + "> - " + str(channel) + "\n"
for channel in channel_list
)
pages = [p for p in pagify(text=text, delims="\n")]
embeds = []
for index, page in enumerate(pages):
embed = discord.Embed(
title="Automatic TTS Channels",
color=await ctx.embed_colour(),
description=page,
)
if len(embeds) > 1:
embed.set_footer(text=f"Page {index+1}/{len(pages)}")
embeds.append(embed)
if DPY_MENUS:
await dpymenu(ctx, embeds, timeout=60)
else:
if len(pages) == 1:
await ctx.send(embed=embeds[0])
else:
await menu(ctx, embeds, DEFAULT_CONTROLS, timeout=60)
@commands.Cog.listener()
async def on_message_without_command(self, message: discord.Message):
if not message.guild:
return
if message.author.bot:
return
if not message.channel.permissions_for(message.guild.me).send_messages:
return
if await self.bot.allowed_by_whitelist_blacklist(who=message.author) is False:
return
if await self.bot.cog_disabled_in_guild(self, message.guild):
return
try:
channel_list = self.channel_cache[message.guild.id]
except KeyError:
return
if not channel_list:
return
if message.channel.id not in channel_list:
return
if not message.author.voice or not message.author.voice.channel:
await message.channel.send("You are not connected to a voice channel.")
return
author_data = await self.config.user(message.author).all()
author_voice = author_data["voice"]
author_speed = author_data["speed"]
text = self.decancer_text(message.clean_content)
if text is None:
await message.channel.send("That's not a valid message, sorry.")
return
char_number = len(text)
if char_number > 1000:
await message.channel.send(
f"Sorry, I limit TTS to 1000 characters to avoid abuse. ({char_number}/1000)"
)
return
urls = generate_urls(author_voice, text, author_speed)
try:
await self.play_sfx(message.author.voice.channel, message.channel, urls)
except Exception:
await message.channel.send(
"Oops, an error occured. If this continues please use the contact command to inform the bot owner."
)
async def play_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
self.repeat_state[vc.guild.id] = player.repeat
player.repeat = False
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
try:
csfx = self.current_sfx[vc.guild.id]
except KeyError:
csfx = None
if csfx is not None:
player.queue.insert(0, track)
await player.skip()
self.current_sfx[player.guild.id] = track
return
self.last_track_info[player.guild.id] = (player.current, player.position)
self.current_sfx[player.guild.id] = track
player.queue.insert(0, track)
player.queue.insert(1, player.current)
await player.skip()
async def queue_sfx(self, vc, channel, link):
try:
player = lavalink.get_player(vc.guild.id)
except NoLavalinkNode: # Lavalink hasn't been initialised yet
if channel and type != "autotts":
await channel.send(
"Either the Audio cog is not loaded or lavalink has not been initialized yet. If this continues to happen, please contact the bot owner."
)
return
except KeyError:
player = await lavalink.connect(vc)
link = link[0] # could be rewritten to add ALL links
tracks = await player.load_tracks(query=link)
if not tracks.tracks:
await channel.send(
"Something went wrong. Either the SFX is invalid, or the TTS host is down."
)
return
track = tracks.tracks[0]
if player.current is None and not player.queue:
player.queue.append(track)
self.current_sfx[vc.guild.id] = track
await player.play()
return
player.queue.append(track)
return
async def ll_check(self, player, event, reason):
try:
csfx = self.current_sfx[player.guild.id]
except KeyError:
csfx = None
try:
lti = self.last_track_info[player.guild.id]
except KeyError:
lti = None
if csfx is None and lti is None:
return
if (
event == lavalink.LavalinkEvents.TRACK_EXCEPTION
and csfx is not None
or event == lavalink.LavalinkEvents.TRACK_STUCK
and csfx is not None
):
del self.current_sfx[player.guild.id]
return
if (
event == lavalink.LavalinkEvents.TRACK_END
and player.current is None
and csfx is not None
):
del self.current_sfx[player.guild.id]
return
if (
event == lavalink.LavalinkEvents.TRACK_END
and lti is not None
and player.current is not None
and player.current.track_identifier == lti[0].track_identifier
):
if player.guild.id in self.current_sfx:
del self.current_sfx[player.guild.id]
await player.pause()
await player.seek(lti[1] + self.lag_time)
await player.pause(False)
if player.guild.id in self.last_track_info:
del self.last_track_info[player.guild.id]
if player.guild.id in self.repeat_state:
player.repeat = self.repeat_state[player.guild.id]
| 35.327731
| 157
| 0.571259
| 28,904
| 0.982194
| 0
| 0
| 22,239
| 0.755709
| 25,700
| 0.873318
| 7,504
| 0.254995
|
1f477811434f0fbba0fb2564e885e5ce2cde1027
| 581
|
py
|
Python
|
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
norm_files.py
|
jncraton/ipeds-data
|
e17b051bac3d4d112d83a85f38dc1422d4fb584b
|
[
"MIT"
] | null | null | null |
"""
Normalizes contents for all data files.
- Converts column names to uppercase
- Converts data values to uppercase
- Converts to Unix line endings
- Removes trailing whitespace from all lines
"""
import os
csvs = ['data/' + f for f in os.listdir('data') if f.endswith('.csv')]
for f in csvs:
lf = f.lower()
os.rename(f,lf)
print(lf)
content = ''
with open(lf,'r',encoding='cp1252') as fr:
content = fr.read()
content = '\n'.join([l.strip() for l in content.splitlines()])
with open(lf,'w',encoding='cp1252') as fw:
fw.write(content.upper())
| 19.366667
| 70
| 0.650602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 246
| 0.423408
|
1f4a0b28023170a365cdcc71fab67b6fb8261389
| 2,929
|
py
|
Python
|
admin/sandbox.py
|
liuhh02/NERDA
|
7488d0c5adf3eb5296b1b276b412b95131557097
|
[
"MIT"
] | 123
|
2020-12-19T09:36:28.000Z
|
2022-03-20T11:01:02.000Z
|
admin/sandbox.py
|
liuhh02/NERDA
|
7488d0c5adf3eb5296b1b276b412b95131557097
|
[
"MIT"
] | 35
|
2021-02-14T09:46:11.000Z
|
2022-03-31T09:57:38.000Z
|
admin/sandbox.py
|
liuhh02/NERDA
|
7488d0c5adf3eb5296b1b276b412b95131557097
|
[
"MIT"
] | 30
|
2021-01-28T11:02:31.000Z
|
2022-03-06T19:58:27.000Z
|
from NERDA.models import NERDA
from NERDA.datasets import get_conll_data, get_dane_data
from transformers import AutoTokenizer
trans = 'bert-base-multilingual-uncased'
tokenizer = AutoTokenizer.from_pretrained(trans, do_lower_case = True)
data = get_dane_data('train')
sents = data.get('sentences')
out = []
for sent in sents:
sent = sents[3595]
tokens = []
for word in sent:
tokens.extend(tokenizer.tokenize(word))
out.append(tokens)
lens = [len(x) for x in out]
max(lens)
sents[3595]
from transformers import AutoTokenizer, AutoModel, AutoConfig
t = 'google/electra-small-discriminator'
cfg = AutoModel.from_pretrained(t)
#trn = get_conll_data('train')
#idx_min = 3110
#idx_max = 3115
#valid = get_conll_data('valid')
#valid['sentences'] = valid['sentences'][idx_min:idx_max+1]
#valid['tags'] = valid['tags'][idx_min:idx_max+1]
#trn['sentences'] = trn['sentences'][idx_min:idx_max+1]
#trn['tags'] = trn['tags'][idx_min:idx_max+1]
# model = NERDA(dataset_training=trn,
# dataset_validation = valid)
#model.train()
#k=0
#trn['sentences'][3111]
#from transformers import AutoTokenizer
#t = AutoTokenizer.from_pretrained('bert-base-multilingual-uncased')
#valid = get_conll_data('valid')
filename = 'en_bert_ml.pkl'
# pickle.dump(model, open(filename, 'wb'))
import pickle
file = open(filename,'rb')
model = pickle.load(file)
test = get_conll_data('test')
model.evaluate_performance(test, batch_size = 10)
#for entry in range(3120,3140):
# print(entry)
# sent = trn['sentences'][entry]
# [t.tokenize(word) for word in sent]
test = get_conll_data('test')
idx_min = 202
idx_max = 202
# valid = get_conll_data('valid')
#valid['sentences'] = valid['sentences'][idx_min:idx_max+1]
#valid['tags'] = valid['tags'][idx_min:idx_max+1]
test['sentences'] = test['sentences'][idx_min:idx_max+1]
test['tags'] = test['tags'][idx_min:idx_max+1]
model.evaluate_performance(test)
# model = NERDA(dataset_training=trn,
# dataset_validation = valid)
#model.train()
#k=0
#trn['sentences'][3111]
#from transformers import AutoTokenizer
#t = AutoTokenizer.from_pretrained('bert-base-multilingual-uncased')
#valid = get_conll_data('valid')
<<<<<<< HEAD:admin/sandbox.py
transformer = "google/electra-small-discriminator"
from transformers import AutoTokenizer, AutoModel, AutoConfig
trans = AutoConfig.from_pretrained(transformer)
def tester():
try:
model = AutoModel.from_pretrained('google/electra-small-discriminator')
except:
print("Oops!", sys.exc_info()[0], "occurred.")
return model
=======
from NERDA.datasets import get_dane_data
trn = get_conll_data('train', 5)
valid = get_conll_data('dev', 5)
transformer = 'bert-base-multilingual-uncased',
model = NERDA(transformer = transformer,
dataset_training = trn,
dataset_validation = valid)
>>>>>>> b5eea087ece5f61ec70aa3f99cd4c99b418ebb92:sandbox.py
| 26.627273
| 79
| 0.714237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,408
| 0.48071
|
1f4f35f139c907b889c9bfaceea5f2502d416ae2
| 2,099
|
py
|
Python
|
ironic/tests/drivers/test_base.py
|
Tehsmash/ironic
|
a34c351639e960af92a3608fbc9249dfce5c6057
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/drivers/test_base.py
|
Tehsmash/ironic
|
a34c351639e960af92a3608fbc9249dfce5c6057
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/drivers/test_base.py
|
Tehsmash/ironic
|
a34c351639e960af92a3608fbc9249dfce5c6057
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironic.common import exception
from ironic.drivers import base as driver_base
from ironic.tests import base
class FakeVendorInterface(driver_base.VendorInterface):
def get_properties(self):
pass
@driver_base.passthru(['POST'])
def noexception(self):
return "Fake"
@driver_base.passthru(['POST'])
def ironicexception(self):
raise exception.IronicException("Fake!")
@driver_base.passthru(['POST'])
def normalexception(self):
raise Exception("Fake!")
def validate(self, task, **kwargs):
pass
def driver_validate(self, **kwargs):
pass
class PassthruDecoratorTestCase(base.TestCase):
def setUp(self):
super(PassthruDecoratorTestCase, self).setUp()
self.fvi = FakeVendorInterface()
driver_base.LOG = mock.Mock()
def test_passthru_noexception(self):
result = self.fvi.noexception()
self.assertEqual("Fake", result)
def test_passthru_ironicexception(self):
self.assertRaises(exception.IronicException,
self.fvi.ironicexception, mock.ANY)
driver_base.LOG.exception.assert_called_with(
mock.ANY, 'ironicexception')
def test_passthru_nonironicexception(self):
self.assertRaises(exception.VendorPassthruException,
self.fvi.normalexception, mock.ANY)
driver_base.LOG.exception.assert_called_with(
mock.ANY, 'normalexception')
| 30.867647
| 78
| 0.697475
| 1,332
| 0.634588
| 0
| 0
| 286
| 0.136255
| 0
| 0
| 698
| 0.332539
|
1f51deb831801cb409889edbf97a6deabae048a4
| 1,368
|
py
|
Python
|
quantiphyse/packages/core/analysis/__init__.py
|
physimals/quantiphyse
|
bd3be0098b9929b1987fe0f23e515fa70674b3d4
|
[
"Apache-2.0"
] | 9
|
2021-02-01T06:44:31.000Z
|
2022-01-17T15:46:40.000Z
|
quantiphyse/packages/core/analysis/__init__.py
|
ibme-qubic/quantiphyse
|
34f40424941414ce139c4612a903de3f24883576
|
[
"Apache-2.0"
] | 34
|
2019-02-04T10:47:02.000Z
|
2020-08-13T09:36:52.000Z
|
quantiphyse/packages/core/analysis/__init__.py
|
ibme-qubic/quantiphyse
|
34f40424941414ce139c4612a903de3f24883576
|
[
"Apache-2.0"
] | 2
|
2019-05-25T08:52:05.000Z
|
2020-06-18T13:59:00.000Z
|
"""
Quantiphyse - Analysis widgets
Copyright (c) 2013-2020 University of Oxford
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .widgets import MultiVoxelAnalysis, DataStatistics, RoiAnalysisWidget, SimpleMathsWidget, VoxelAnalysis, MeasureWidget
from .processes import CalcVolumesProcess, ExecProcess, DataStatisticsProcess, OverlayStatsProcess
from .tests import DataStatisticsTest, MultiVoxelAnalysisTest, VoxelAnalysisTest, MeasureWidgetTest
from .process_tests import AnalysisProcessTest
QP_MANIFEST = {
"widgets" : [MultiVoxelAnalysis, DataStatistics, RoiAnalysisWidget, SimpleMathsWidget, VoxelAnalysis, MeasureWidget],
"widget-tests" : [DataStatisticsTest, MultiVoxelAnalysisTest, VoxelAnalysisTest, MeasureWidgetTest],
"process-tests" : [AnalysisProcessTest],
"processes" : [CalcVolumesProcess, ExecProcess, OverlayStatsProcess, DataStatisticsProcess],
}
| 47.172414
| 123
| 0.811404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 658
| 0.480994
|
1f528ea187b09ea162a161716dde9aff8b7b565d
| 1,042
|
py
|
Python
|
examples/chain_mdp.py
|
kngwyu/rlpy
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | 3
|
2019-12-07T13:34:02.000Z
|
2021-03-29T10:20:05.000Z
|
examples/chain_mdp.py
|
kngwyu/rlpy
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | 14
|
2019-09-29T03:09:09.000Z
|
2022-01-13T03:17:48.000Z
|
examples/chain_mdp.py
|
kngwyu/rlpy3
|
329166de28d311d8f87358a62c38f40a7318fe07
|
[
"BSD-3-Clause"
] | null | null | null |
import click
from rlpy.domains import ChainMDP
from rlpy.tools.cli import run_experiment
import methods
def select_domain(chain_size):
return ChainMDP(chain_size=chain_size)
def select_agent(name, domain, max_steps, seed, **kwargs):
if name is None or name == "lspi":
return methods.tabular_lspi(domain, max_steps)
elif name == "nac":
return methods.tabular_nac(domain)
elif name == "tabular-q":
return methods.tabular_q(domain, initial_learn_rate=0.1)
elif name == "ifddk-q":
return methods.ifddk_q(domain, initial_learn_rate=0.1)
elif name == "psrl":
return methods.tabular_psrl(domain, seed=seed)
else:
raise NotImplementedError("Method {} is not supported".format(name))
if __name__ == "__main__":
run_experiment(
select_domain,
select_agent,
default_max_steps=10000,
default_num_policy_checks=10,
default_checks_per_policy=50,
other_options=[click.Option(["--chain-size"], type=int, default=4)],
)
| 28.944444
| 76
| 0.681382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.085413
|
1f5495f299c3cac72ffba7fb46905bf9c811295d
| 694
|
py
|
Python
|
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | null | null | null |
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:42:03.000Z
|
2021-06-02T00:42:03.000Z
|
migrations/versions/6e2656ef034b_.py
|
haichungcn/h-ticketbox
|
37d3a3054a92fbb3702cac10f87621762b68bae2
|
[
"Apache-2.0"
] | null | null | null |
"""empty message
Revision ID: 6e2656ef034b
Revises: f8f949ce4522
Create Date: 2019-11-26 11:05:54.376467
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6e2656ef034b'
down_revision = 'f8f949ce4522'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('tickettypes_name_key', 'tickettypes', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('tickettypes_name_key', 'tickettypes', ['name'])
# ### end Alembic commands ###
| 23.931034
| 80
| 0.708934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 444
| 0.639769
|
1f54a8fce56dc2266fdcba4960db2d6b32f72f6a
| 1,940
|
py
|
Python
|
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
python/heterocl/mlir/context.py
|
chhzh123/heterocl
|
856e9b8ad877d11280a7e457e91ca89803c05570
|
[
"Apache-2.0"
] | null | null | null |
from contextvars import ContextVar
from hcl_mlir.dialects import hcl as hcl_d
from hcl_mlir.ir import *
ImperativeLoopNestCount = ContextVar("ImperativeLoopNestCount", default=1)
ImperativeLoopDepth = ContextVar("ImperativeLoopDepth", default=0)
StageName = ContextVar("StageName", default="")
NestedCompute = ContextVar("NestedCompute", default=0)
class UniqueName(object):
scalar_idx = 0
loop_idx = 0
tensor_idx = 0
stage_idx = 0
schedule_idx = 0
reduction_axis_idx = 0
def __init__(self):
pass
@classmethod
def get(cls, case="stage"):
if case == "stage":
# Imperative computing stage
name = "stage_" + str(cls.stage_idx)
cls.stage_idx += 1
elif case == "loop":
name = "loop_" + str(cls.loop_idx)
cls.loop_idx += 1
elif case == "scalar":
name = "scalar_" + str(cls.scalar_idx)
cls.scalar_idx += 1
elif case == "tensor":
name = "compute_" + str(cls.tensor_idx)
cls.tensor_idx += 1
elif case == "schedule":
name = "schedule_" + str(cls.schedule_idx)
cls.schedule_idx += 1
elif case == "reduction_axis":
name = "reduction_axis_" + str(cls.loop_idx)
cls.reduction_axis_idx += 1
else:
raise RuntimeError(f"Unrecognized case in get_unique_name: {case}")
return name
class GlobalContext(object):
def __init__(self):
self.ctx = None
self.loc = None
def get_context(self):
return self.ctx
def set_context(self):
self.ctx = Context()
hcl_d.register_dialect(self.ctx)
self.loc = Location.unknown(self.ctx)
def get_location(self):
return self.loc
global_ctx = GlobalContext()
get_context = global_ctx.get_context
set_context = global_ctx.set_context
get_location = global_ctx.get_location
| 27.714286
| 79
| 0.619588
| 1,439
| 0.741753
| 0
| 0
| 900
| 0.463918
| 0
| 0
| 273
| 0.140722
|
1f572ce92d4767535e92d6069a13c0b878ad4d2b
| 1,216
|
py
|
Python
|
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
378. Kth Smallest Element in a Sorted Matrix.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
from typing import List
import heapq
# 排序
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
res = sorted(sum(matrix,[]))
return res[k-1]
# 最小堆维护归并排序
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
hpq = [(matrix[i][0], i, 0) for i in range(n)]
heapq.heapify(hpq)
for i in range(k-1):
num, x, y = heapq.heappop(hpq)
if y != n-1:
heapq.heappush(hpq, (matrix[x][y+1], x, y+1))
return heapq.heappop(hpq)[0]
# 二分法
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
def check(mid):
i, j = n-1, 0
num = 0
while i >= 0 and j < n:
if matrix[i][j] <= mid:
num += i + 1
j += 1
else:
i -= 1
return num >= k
n = len(matrix)
left, right = matrix[0][0], matrix[-1][-1]
while left<right:
mid = (left+right)//2
if check(mid):
right = mid
else:
left = mid+1
return left
| 27.022222
| 66
| 0.449836
| 1,151
| 0.925241
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.038585
|