max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
imagersite/imager_images/views.py | famavott/django-imager | 0 | 12765451 | """Views for imager_images."""
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, ListView, TemplateView, UpdateView
from imager_images.forms import AlbumForm, PhotoForm
from imager_images.models import Album, Photo
class LibraryView(TemplateView):
"""View for library view."""
template_name = "imager_images/library.html"
def get_context_data(self, **kwargs):
"""Return album and photos for requested user."""
context = super(LibraryView, self).get_context_data(**kwargs)
user = self.request.user.profile
context['photos'] = user.photo.all()
context['albums'] = user.album.all()
return context
class AlbumsView(ListView):
"""View for the user's albums."""
template_name = 'imager_images/albums.html'
model = Album
context_object_name = 'albums'
def get_queryset(self):
"""Overwrite queryset to get all albums."""
user = self.request.user.profile
return user.album.all()
class PhotosView(ListView):
"""View all photos for a user."""
template_name = 'imager_images/photos.html'
model = Photo
context_object_name = 'photos'
def get_queryset(self):
"""Overwrite queryset to get all photos."""
user = self.request.user.profile
return user.photo.all()
class AlbumInfo(DetailView):
"""View for specific photo info."""
template_name = 'imager_images/album_info.html'
model = Album
class PhotoInfo(DetailView):
"""View for specific photo info."""
template_name = 'imager_images/photo_info.html'
model = Photo
class CreatePhoto(CreateView):
"""View to create photo."""
template_name = 'imager_images/photo_form.html'
model = Photo
form_class = PhotoForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(CreatePhoto, self).form_valid(form)
class CreateAlbum(CreateView):
"""View to create album."""
template_name = 'imager_images/album_form.html'
model = Album
form_class = AlbumForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(CreateAlbum, self).form_valid(form)
class EditPhoto(UpdateView):
"""Edit existing photos."""
template_name = 'imager_images/photo_edit.html'
model = Photo
form_class = PhotoForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(EditPhoto, self).form_valid(form)
class EditAlbum(UpdateView):
"""Edit existing albums."""
template_name = 'imager_images/album_edit.html'
model = Album
form_class = AlbumForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(EditAlbum, self).form_valid(form)
| 2.703125 | 3 |
setup.py | xrmx/uwsgiit-py | 2 | 12765452 | from setuptools import setup, find_packages
import os
import uwsgiit
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(
author="<NAME>",
author_email="<EMAIL>",
name='uwsgiit-py',
version=uwsgiit.__version__,
description='Library for uwsgi.it api',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
url="https://github.com/xrmx/uwsgiit-py",
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'requests>=2',
],
test_suite='uwsgiit.tests',
packages=find_packages(exclude=["test_project", "example.*"]),
include_package_data=True,
zip_safe = False,
)
| 1.375 | 1 |
130_html_to_csv/190_mkcsv_t_result_odds.py | takobouzu/BOAT_RACE_DB | 6 | 12765453 | <reponame>takobouzu/BOAT_RACE_DB<filename>130_html_to_csv/190_mkcsv_t_result_odds.py
'''
【システム】BOAT_RACE_DB2
【ファイル】180_mkcsv_t_result_odds.py
【機能仕様】レース結果HTMLファイルからレース結果オッズテーブル「t_result_odds」のインポートCSVファイルを作成する
【動作環境】macOS 11.1/Raspbian OS 10.4/python 3.9.1/sqlite3 3.32.3
【来 歴】2021.02.01 ver 1.00
'''
import re
import os
import datetime
from bs4 import BeautifulSoup
#インストールディレクトの定義
BASE_DIR = '/home/pi/BOAT_RACE_DB'
'''
【関 数】mkcsv_t_result_odds
【機 能】レース結果HTMLファイルからレース結果オッズテーブル「t_result_odds」のインポートCSVファイルを作成する
【引 数】なし
【戻り値】なし
'''
def mkcsv_t_result_odds():
print('レース結果オッズテーブル「t_result_odds」のインポートCSVファイル 開始')
in_path = BASE_DIR + '/200_html/result'
out_file = BASE_DIR + '/210_csv/t_result_odds.csv'
fw = open(out_file, 'w')
for item in os.listdir(path=in_path):
if item != '.html' and item != '.DS_Store':
in_file = in_path + '/' + item
print("==> 処理中[%s]" % (in_file))
fb = open(in_file, 'r')
html = fb.read()
fb.close()
#データ存在チェック
flg = 0
if 'データがありません。' in html:
flg = 1
if 'レース中止' in html:
flg = 1
if flg == 0:
#CSVレコードフィールドの初期化(共通項目)
t_result_oods_yyyymmdd = '' #開催日付
t_result_oods_pool_code = '' #場コード
t_result_oods_race_no = '' #レース番号
#HTMLファイルからcsvレコード項目を抽出
soup = BeautifulSoup(html, 'html.parser')
#開催日付の抽出
t_result_oods_yyyymmdd = item[0:8]
#場コードの抽出
t_result_oods_pool_code = item[8:10]
#レース番号
t_result_oods_race_no = item[10:12]
tag1 = soup.find_all('div', class_='grid_unit')
soup = BeautifulSoup(str(tag1[2]), 'html.parser')
n = 0
for tag1 in soup.find_all('tr', class_='is-p3-0'):
n = n + 1
focus_list = []
t_result_oods_ticket_type = '' #区分
t_result_oods_focus = '' #組番
t_result_oods_dividend = '' #払戻金
t_result_oods_popularity = '' #人気
nn = 0
for tag2 in str(tag1).splitlines():
nn = nn + 1
if 'numberSet1_number' in str(tag2):
#組番を抽出
wk = str(tag2)
wk = wk.strip()
wk = wk.replace('-->','')
wk = wk.replace('<!--','')
wk_arry = wk.split('>')
wk_arry = str(wk_arry[1]).split('<')
focus_list.append(str(wk_arry[0]))
if 'is-payout1' in str(tag2):
#払い戻しを抽出
wk = str(tag2)
wk = wk.strip()
wk = wk.replace('¥','')
wk = wk.replace(',','')
wk_arry = wk.split('>')
wk_arry = str(wk_arry[2]).split('<')
t_result_oods_dividend = str(wk_arry[0])
#人気を抽出
m = re.match(r"<td>([1-9][0-9][0-9]|[1-9][0-9]|[1-9])</td>", str(tag2))
if m:
wk = str(tag2)
wk = wk.strip()
wk_arry = wk.split('>')
wk_arry = str(wk_arry[1]).split('<')
t_result_oods_popularity = str(wk_arry[0])
#組番が存在する場合、CSVレコードを生成、ファイル出力
if len(focus_list) > 0:
if n == 1 or n == 2:
t_result_oods_ticket_type = '三連単'
t_result_oods_focus = focus_list[0] + "-" + focus_list[1] + "-" + focus_list[2]
if n == 3 or n == 4:
t_result_oods_ticket_type = '三連複'
t_result_oods_focus = focus_list[0] + "=" + focus_list[1] + "=" + focus_list[2]
if n == 5 or n == 6:
t_result_oods_ticket_type = '二連単'
t_result_oods_focus = focus_list[0] + "-" + focus_list[1]
if n == 7 or n == 8:
t_result_oods_ticket_type = '二連複'
t_result_oods_focus = focus_list[0] + "=" + focus_list[1]
if n == 9 or n == 10 or n == 11 or n == 12 or n == 13:
t_result_oods_ticket_type = '拡連複'
t_result_oods_focus = focus_list[0] + "=" + focus_list[1]
if n == 14 or n == 15:
t_result_oods_ticket_type = '単勝'
t_result_oods_focus = focus_list[0]
if n == 16 or n == 17 or n == 18:
t_result_oods_ticket_type = '複勝'
t_result_oods_focus = focus_list[0]
#CSVレコードの生成
t_result_oods_outrec = ''
t_result_oods_outrec = t_result_oods_outrec + '"' + t_result_oods_yyyymmdd + '"' #開催日付
t_result_oods_outrec = t_result_oods_outrec + ',"' + t_result_oods_pool_code + '"' #場コード
t_result_oods_outrec = t_result_oods_outrec + ',"' + t_result_oods_race_no + '"' #レース番号
t_result_oods_outrec = t_result_oods_outrec + ',"' + t_result_oods_ticket_type + '"' #区分
t_result_oods_outrec = t_result_oods_outrec + ',"' + t_result_oods_focus + '"' #組番
t_result_oods_outrec = t_result_oods_outrec + ',' + t_result_oods_dividend #払戻金
t_result_oods_outrec = t_result_oods_outrec + ',' + t_result_oods_popularity #人気
#CSVレコードファイル出力
fw.write(t_result_oods_outrec + '\n')
fw.close()
print('レース結果オッズ「t_result_odds」のインポートCSVファイル 完了')
#主処理
mkcsv_t_result_odds() #レース結果オッズテーブル「t_result_odds」のインポートCSVファイルを作成
| 2.484375 | 2 |
self_supervised/tensorboard/embedding_projector.py | 36000/myow | 19 | 12765454 | import torch
import tensorflow as tf
import tensorboard as tb
# fix a bug with tensorboard
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
def log_representation(net, inputs, metadata, writer, step, tag='representation', metadata_header=None,
inputs_are_images=False):
r"""
Computes representations and logs them to tensorboard.
Args:
net (torch.nn.Module): Encoder.
inputs (torch.Tensor): Inputs.
writer (torch.writer.SummaryWriter): Summary writer.
metadata (torch.Tensor or list): A list of labels, each element will be convert to string.
step (int): Global step value to record.
tag (string, optional): Name for the embedding. (default: :obj:`representation`)
metadata_header (list, optional): Metadata header. (default: :obj:`None`)
inputs_are_images (boolean, optional): Set to :obj:`True` if inputs are images. (default: :obj:`False`)
"""
with torch.no_grad():
representation = net(inputs)
representation = representation.view(representation.shape[0], -1).detach()
label_img = inputs if inputs_are_images else None
writer.add_embedding(representation, metadata, tag=tag, global_step=step, metadata_header=metadata_header,
label_img=label_img)
| 2.640625 | 3 |
app/users/views.py | prapeller/phoneauth_api | 0 | 12765455 | <gh_stars>0
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import mixins, viewsets, generics
from rest_framework import status
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.settings import api_settings
from users.serializers import UserSerializer, AuthTokenSerializer, UserPhoneSerializer
User = get_user_model()
class ListUserViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet,
):
"""List all users"""
queryset = User.objects.all()
serializer_class = UserSerializer
class RetrieveUpdateDestroyUserView(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""Retrieve / Update / Destroy a user"""
queryset = User.objects.all()
serializer_class = UserSerializer
class CreateRetrieveUserByPhoneViewSet(mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet,
):
"""Retrieve or Create a new user by phone number """
lookup_field = 'phone'
queryset = User.objects.all()
serializer_class = UserPhoneSerializer
def retrieve(self, request, *args, **kwargs):
phone = User.objects.normalize_phone(kwargs.get('phone'))
try:
user = User.objects.get(phone=phone)
except ObjectDoesNotExist:
return self.create(request, *args, **kwargs)
otp = user.generate_otp()
user.set_otp(otp)
user.save()
serializer = self.get_serializer(user)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_200_OK, headers=headers)
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ActivateInviteCodeViewSet(viewsets.ModelViewSet):
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
queryset = User.objects.all()
serializer_class = UserSerializer
def activate(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 2.140625 | 2 |
tests/test_parametric_components/test_PortCutterRectangular.py | moatazharb/paramak | 1 | 12765456 |
import unittest
import paramak
class TestPortCutterRectangular(unittest.TestCase):
def test_creation(self):
"""Checks a PortCutterRectangular creation."""
test_component = paramak.PortCutterRectangular(
distance=3,
z_pos=0,
height=0.2,
width=0.4,
fillet_radius=0.02,
azimuth_placement_angle=[0, 45, 90, 180]
)
assert test_component.solid is not None
| 3.046875 | 3 |
scripts/common/css/uri/authority.py | appcelerator-archive/titanium_mobile_tooling | 2 | 12765457 | # -*- coding: utf-8 -*-
def userinfo( authority, otherwise='' ):
"""Extracts the user info (user:pass)."""
end = authority.find('@')
if -1 == end:
return otherwise
return authority[0:end]
def location( authority ):
"""Extracts the location (host:port)."""
end = authority.find('@')
if -1 == end:
return authority
return authority[1+end:]
| 3.359375 | 3 |
editor/content_table_editor.py | LordKBX/EbookCollection | 1 | 12765458 | # This Python file uses the following encoding: utf-8
import os, sys
import traceback
from PyQt5.QtWidgets import *
import PyQt5.QtCore
import PyQt5.QtGui
import PyQt5.uic
from PyQt5.uic import *
from xml.dom import minidom
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from common import lang
import common.common, common.files, common.dialog, common.qt
from common.vars import *
from common.books import *
class IndexNameWindow(QDialog):
def __init__(self, parent):
super(IndexNameWindow, self).__init__(parent, QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
PyQt5.uic.loadUi(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'files_name.ui'.replace('/', os.sep), self) # Load the .ui file
lng = parent.lang
self.setWindowTitle(lng['Editor']['ContentTableWindow']['NameWindowTitle'])
self.label.setText(lng['Editor']['ContentTableWindow']['NameWindowLabel'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setText(lng['Editor']['ContentTableWindow']['btnOk'])
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setText(lng['Editor']['ContentTableWindow']['btnCancel'])
def open_exec(self, text: str = None):
try:
if text is not None:
self.line_edit.setText(text)
ret = self.exec_()
if ret == 1:
print('name = ', self.line_edit.text())
return self.line_edit.text()
else:
return None
except Exception:
traceback.print_exc()
class ContentTableWindow(QDialog):
def __init__(self, parent, folder: str):
super(ContentTableWindow, self).__init__(parent, QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
PyQt5.uic.loadUi(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'content_table_editor.ui'.replace('/', os.sep), self) # Load the .ui file
self.BDD = parent.BDD
self.style = self.BDD.get_param('style')
lng = lang.Lang()
lng.set_lang(self.BDD.get_param('lang'))
self.lang = lng
self.setStyleSheet(get_style_var(self.style,'QDialog'))
self.setWindowTitle(lng['Editor']['ContentTableWindow']['WindowTitle'])
self.list_label.setText(lng['Editor']['ContentTableWindow']['ListLabel'])
self.addindex_label.setText(lng['Editor']['ContentTableWindow']['AddIndexLabel'])
self.addindex_line_edit.setPlaceholderText(lng['Editor']['ContentTableWindow']['AddIndexPlaceholder'])
self.modify_index_label.setText(lng['Editor']['ContentTableWindow']['ModifyIndexLabel'])
self.btn_rename.setText(lng['Editor']['ContentTableWindow']['BtnRename'])
self.btn_delete.setText(lng['Editor']['ContentTableWindow']['BtnDelete'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setText(lng['Editor']['ContentTableWindow']['btnOk'])
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setText(lng['Editor']['ContentTableWindow']['btnCancel'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setStyleSheet(get_style_var(self.style, 'fullAltButton'))
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setStyleSheet(get_style_var(self.style, 'fullAltButton'))
# self.list_content = QtWidgets.QListWidget()
self.addindex_btn.clicked.connect(self.new_index)
self.btn_rename.clicked.connect(self.rename)
self.btn_delete.clicked.connect(self.delete)
self.folder = folder
self.selected_folder = ''
self.list_data = dict()
self.files = []
def open_exec(self, text: str = None, url: str = None):
try:
self.list_content.clear()
self.addindex_combobox.clear()
self.files = common.files.list_directory_tree(self.folder, 'html|xhtml')
files = common.files.list_directory(self.folder, 'html|xhtml')
self.addindex_combobox.addItem("")
print(self.files)
for file in files:
self.addindex_combobox.addItem(file.replace(self.folder, ""))
li = common.files.list_directory(self.folder, "opf")
data = ''
with open(li[0]) as myfile:
data = myfile.read()
toc_type, chapters = parse_content_table(
data,
li[0].replace(self.folder, '').replace(li[0][li[0].rindex(os.sep) + 1:], '').replace(os.sep, '/'),
self.folder
)
for chapter in chapters:
try:
item = QtWidgets.QListWidgetItem()
item.setText(chapter['name'] + " (" + chapter['src'] + ")")
item.setData(97, chapter['name'])
item.setData(98, chapter['src'])
self.list_content.addItem(item)
except Exception:
traceback.print_exc()
ret = self.exec_()
content_table = []
max = self.list_content.count()
i = 0
while i < max:
child = self.list_content.item(i)
content_table.append({'name': child.data(97), 'url': child.data(98).replace("\\", "/")})
i += 1
print(content_table)
if ret == 1:
return content_table
else:
return None
except Exception:
traceback.print_exc()
def new_index(self):
# self.addindex_line_edit = QLineEdit()
# self.addindex_combobox = QComboBox()
name = self.addindex_line_edit.text().strip()
url = self.addindex_combobox.currentText().strip()
if name == "" or name is None or url == "" or url is None:
return
item = QListWidgetItem()
item.setData(97, name)
item.setData(98, url)
item.setText(name + " (" + url + ")")
# self.list_content = QListWidget()
self.list_content.insertItem(self.list_content.count(), item)
self.addindex_combobox.setCurrentIndex(0)
self.addindex_line_edit.setText("")
def rename(self):
try:
if self.list_content.currentIndex().row() == -1:
return
# self.list_content = QListWidget()
wn = IndexNameWindow(self)
url = self.list_content.item(self.list_content.currentIndex().row()).data(98)
tx = self.list_content.item(self.list_content.currentIndex().row()).data(97)
name = wn.open_exec(tx)
if name is not None:
self.list_content.item(self.list_content.currentIndex().row()).setData(97, name)
self.list_content.item(self.list_content.currentIndex().row()).setText(name + " (" + url + ")")
except Exception:
traceback.print_exc()
def delete(self):
try:
if self.list_content.currentIndex().row() == -1:
return
# self.list_content = QListWidget()
self.list_content.takeItem(self.list_content.currentIndex().row())
# self.list_content.removeItemWidget(self.list_content.item(self.list_content.currentIndex().row()))
except Exception:
traceback.print_exc()
| 2.28125 | 2 |
Rest Django Framework/myproject/webapp/admin.py | PaulMarcelo/Python | 0 | 12765459 | from django.contrib import admin
from . models import employees
admin.site.register(employees)
| 1.28125 | 1 |
Interview-Preparation/Facebook/Mock/OnSite-03-Design-Add-and-Search-Words-Data-Structures.py | shoaibur/SWE | 1 | 12765460 | <gh_stars>1-10
class TrieNode:
def __init__(self):
self.children = {}
self.isEnd = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
currNode = self.root
for char in word:
if char not in currNode.children:
currNode.children[char] = TrieNode()
currNode = currNode.children[char]
currNode.isEnd = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
currNode = self.root
stack = [(currNode, word)]
while stack:
currNode, word = stack.pop()
if not word:
if currNode.isEnd:
return True
elif word[0] in currNode.children:
child = currNode.children[word[0]]
stack.append((child, word[1:]))
elif word[0] == '.':
for child in currNode.children.values():
stack.append((child, word[1:]))
return False
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
| 3.84375 | 4 |
simplerepresentations/utils.py | adrienrenaud/simplerepresentations | 31 | 12765461 | <reponame>adrienrenaud/simplerepresentations
from multiprocessing import Pool
import torch
from tqdm.auto import tqdm
from torch.utils.data import TensorDataset
from simplerepresentations.input_features import InputFeatures
def examples_to_dataset(examples, tokenizer, max_seq_length, process_count, chunksize, verbose=1):
"""
Converts a list of InputExample objects to a TensorDataset containing InputFeatures.
"""
tokenizer = tokenizer
if verbose == 1:
print('Converting to features started.')
features = convert_examples_to_features(
examples=examples,
max_seq_length=max_seq_length,
tokenizer=tokenizer,
process_count=process_count,
chunksize=chunksize,
# XLNet has a CLS token at the end
cls_token_at_end=bool('bert' in ['xlnet']),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if 'bert' in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
# RoBERTa uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
sep_token_extra=bool('bert' in ['roberta']),
# PAD on the left for XLNet
pad_on_left=bool('bert' in ['xlnet']),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if 'bert' in ['xlnet'] else 0,
verbose=verbose
)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
return dataset
def convert_examples_to_features(
examples,
max_seq_length,
tokenizer,
process_count,
chunksize,
cls_token_at_end=False,
sep_token_extra=False,
pad_on_left=False,
cls_token='[CLS]',
sep_token='[SEP]',
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
verbose=1
):
"""
Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token:
- 0 for BERT
- 2 for XLNet
"""
examples = [
(
example,
max_seq_length,
tokenizer,
cls_token_at_end,
cls_token,
sep_token,
cls_token_segment_id,
pad_on_left,
pad_token_segment_id,
sep_token_extra
) for example in examples
]
with Pool(process_count) as p:
features = list(
tqdm(
p.imap(
convert_example_to_feature,
examples,
chunksize=chunksize
),
total=len(examples),
disable=(verbose == 0)
)
)
return features
def convert_example_to_feature(
example_row,
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
sep_token_extra=False
):
example, \
max_seq_length, \
tokenizer, \
cls_token_at_end, \
cls_token, \
sep_token, \
cls_token_segment_id, \
pad_on_left, \
pad_token_segment_id, \
sep_token_extra = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with '- 3'. '- 4' for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with '- 2' and with '- 3' for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where 'type_ids' are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the 'sentence vector'. Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids
)
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length: break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
| 2.453125 | 2 |
logs/views.py | NGXTDN/webvirtcloud | 1 | 12765462 | import json
from django.http import HttpResponse
from admin.decorators import superuser_only
from instances.models import Instance
from logs.models import Logs
def addlogmsg(user, instance, message):
"""
:param user:
:param instance:
:param message:
:return:
"""
add_log_msg = Logs(user=user, instance=instance, message=message)
add_log_msg.save()
@superuser_only
def vm_logs(request, vname):
"""
:param request:
:param vname:
:return:
"""
vm = Instance.objects.get(name=vname)
logs_ = Logs.objects.filter(instance=vm.name, date__gte=vm.created).order_by("-date")
logs = []
for l in logs_:
log = dict()
log["user"] = l.user
log["instance"] = l.instance
log["message"] = l.message
log["date"] = l.date.strftime("%x %X")
logs.append(log)
return HttpResponse(json.dumps(logs))
| 2.15625 | 2 |
evaluation.py | LanceaKing/aasist | 36 | 12765463 | <filename>evaluation.py
import sys
import os
import numpy as np
def calculate_tDCF_EER(cm_scores_file,
asv_score_file,
output_file,
printout=True):
# Replace CM scores with your own scores or provide score file as the
# first argument.
# cm_scores_file = 'score_cm.txt'
# Replace ASV scores with organizers' scores or provide score file as
# the second argument.
# asv_score_file = 'ASVspoof2019.LA.asv.eval.gi.trl.scores.txt'
# Fix tandem detection cost function (t-DCF) parameters
Pspoof = 0.05
cost_model = {
'Pspoof': Pspoof, # Prior probability of a spoofing attack
'Ptar': (1 - Pspoof) * 0.99, # Prior probability of target speaker
'Pnon': (1 - Pspoof) * 0.01, # Prior probability of nontarget speaker
'Cmiss': 1, # Cost of ASV system falsely rejecting target speaker
'Cfa': 10, # Cost of ASV system falsely accepting nontarget speaker
'Cmiss_asv': 1, # Cost of ASV system falsely rejecting target speaker
'Cfa_asv':
10, # Cost of ASV system falsely accepting nontarget speaker
'Cmiss_cm': 1, # Cost of CM system falsely rejecting target speaker
'Cfa_cm': 10, # Cost of CM system falsely accepting spoof
}
# Load organizers' ASV scores
asv_data = np.genfromtxt(asv_score_file, dtype=str)
# asv_sources = asv_data[:, 0]
asv_keys = asv_data[:, 1]
asv_scores = asv_data[:, 2].astype(np.float)
# Load CM scores
cm_data = np.genfromtxt(cm_scores_file, dtype=str)
# cm_utt_id = cm_data[:, 0]
cm_sources = cm_data[:, 1]
cm_keys = cm_data[:, 2]
cm_scores = cm_data[:, 3].astype(np.float)
# Extract target, nontarget, and spoof scores from the ASV scores
tar_asv = asv_scores[asv_keys == 'target']
non_asv = asv_scores[asv_keys == 'nontarget']
spoof_asv = asv_scores[asv_keys == 'spoof']
# Extract bona fide (real human) and spoof scores from the CM scores
bona_cm = cm_scores[cm_keys == 'bonafide']
spoof_cm = cm_scores[cm_keys == 'spoof']
# EERs of the standalone systems and fix ASV operating point to
# EER threshold
eer_asv, asv_threshold = compute_eer(tar_asv, non_asv)
eer_cm = compute_eer(bona_cm, spoof_cm)[0]
attack_types = [f'A{_id:02d}' for _id in range(7, 20)]
if printout:
spoof_cm_breakdown = {
attack_type: cm_scores[cm_sources == attack_type]
for attack_type in attack_types
}
eer_cm_breakdown = {
attack_type: compute_eer(bona_cm,
spoof_cm_breakdown[attack_type])[0]
for attack_type in attack_types
}
[Pfa_asv, Pmiss_asv,
Pmiss_spoof_asv] = obtain_asv_error_rates(tar_asv, non_asv, spoof_asv,
asv_threshold)
# Compute t-DCF
tDCF_curve, CM_thresholds = compute_tDCF(bona_cm,
spoof_cm,
Pfa_asv,
Pmiss_asv,
Pmiss_spoof_asv,
cost_model,
print_cost=False)
# Minimum t-DCF
min_tDCF_index = np.argmin(tDCF_curve)
min_tDCF = tDCF_curve[min_tDCF_index]
if printout:
with open(output_file, "w") as f_res:
f_res.write('\nCM SYSTEM\n')
f_res.write('\tEER\t\t= {:8.9f} % '
'(Equal error rate for countermeasure)\n'.format(
eer_cm * 100))
f_res.write('\nTANDEM\n')
f_res.write('\tmin-tDCF\t\t= {:8.9f}\n'.format(min_tDCF))
f_res.write('\nBREAKDOWN CM SYSTEM\n')
for attack_type in attack_types:
_eer = eer_cm_breakdown[attack_type] * 100
f_res.write(
f'\tEER {attack_type}\t\t= {_eer:8.9f} % (Equal error rate for {attack_type}\n'
)
os.system(f"cat {output_file}")
return eer_cm * 100, min_tDCF
def obtain_asv_error_rates(tar_asv, non_asv, spoof_asv, asv_threshold):
# False alarm and miss rates for ASV
Pfa_asv = sum(non_asv >= asv_threshold) / non_asv.size
Pmiss_asv = sum(tar_asv < asv_threshold) / tar_asv.size
# Rate of rejecting spoofs in ASV
if spoof_asv.size == 0:
Pmiss_spoof_asv = None
else:
Pmiss_spoof_asv = np.sum(spoof_asv < asv_threshold) / spoof_asv.size
return Pfa_asv, Pmiss_asv, Pmiss_spoof_asv
def compute_det_curve(target_scores, nontarget_scores):
n_scores = target_scores.size + nontarget_scores.size
all_scores = np.concatenate((target_scores, nontarget_scores))
labels = np.concatenate(
(np.ones(target_scores.size), np.zeros(nontarget_scores.size)))
# Sort labels based on scores
indices = np.argsort(all_scores, kind='mergesort')
labels = labels[indices]
# Compute false rejection and false acceptance rates
tar_trial_sums = np.cumsum(labels)
nontarget_trial_sums = nontarget_scores.size - \
(np.arange(1, n_scores + 1) - tar_trial_sums)
# false rejection rates
frr = np.concatenate(
(np.atleast_1d(0), tar_trial_sums / target_scores.size))
far = np.concatenate((np.atleast_1d(1), nontarget_trial_sums /
nontarget_scores.size)) # false acceptance rates
# Thresholds are the sorted scores
thresholds = np.concatenate(
(np.atleast_1d(all_scores[indices[0]] - 0.001), all_scores[indices]))
return frr, far, thresholds
def compute_eer(target_scores, nontarget_scores):
""" Returns equal error rate (EER) and the corresponding threshold. """
frr, far, thresholds = compute_det_curve(target_scores, nontarget_scores)
abs_diffs = np.abs(frr - far)
min_index = np.argmin(abs_diffs)
eer = np.mean((frr[min_index], far[min_index]))
return eer, thresholds[min_index]
def compute_tDCF(bonafide_score_cm, spoof_score_cm, Pfa_asv, Pmiss_asv,
Pmiss_spoof_asv, cost_model, print_cost):
"""
Compute Tandem Detection Cost Function (t-DCF) [1] for a fixed ASV system.
In brief, t-DCF returns a detection cost of a cascaded system of this form,
Speech waveform -> [CM] -> [ASV] -> decision
where CM stands for countermeasure and ASV for automatic speaker
verification. The CM is therefore used as a 'gate' to decided whether or
not the input speech sample should be passed onwards to the ASV system.
Generally, both CM and ASV can do detection errors. Not all those errors
are necessarily equally cost, and not all types of users are necessarily
equally likely. The tandem t-DCF gives a principled with to compare
different spoofing countermeasures under a detection cost function
framework that takes that information into account.
INPUTS:
bonafide_score_cm A vector of POSITIVE CLASS (bona fide or human)
detection scores obtained by executing a spoofing
countermeasure (CM) on some positive evaluation trials.
trial represents a bona fide case.
spoof_score_cm A vector of NEGATIVE CLASS (spoofing attack)
detection scores obtained by executing a spoofing
CM on some negative evaluation trials.
Pfa_asv False alarm (false acceptance) rate of the ASV
system that is evaluated in tandem with the CM.
Assumed to be in fractions, not percentages.
Pmiss_asv Miss (false rejection) rate of the ASV system that
is evaluated in tandem with the spoofing CM.
Assumed to be in fractions, not percentages.
Pmiss_spoof_asv Miss rate of spoof samples of the ASV system that
is evaluated in tandem with the spoofing CM. That
is, the fraction of spoof samples that were
rejected by the ASV system.
cost_model A struct that contains the parameters of t-DCF,
with the following fields.
Ptar Prior probability of target speaker.
Pnon Prior probability of nontarget speaker (zero-effort impostor)
Psoof Prior probability of spoofing attack.
Cmiss_asv Cost of ASV falsely rejecting target.
Cfa_asv Cost of ASV falsely accepting nontarget.
Cmiss_cm Cost of CM falsely rejecting target.
Cfa_cm Cost of CM falsely accepting spoof.
print_cost Print a summary of the cost parameters and the
implied t-DCF cost function?
OUTPUTS:
tDCF_norm Normalized t-DCF curve across the different CM
system operating points; see [2] for more details.
Normalized t-DCF > 1 indicates a useless
countermeasure (as the tandem system would do
better without it). min(tDCF_norm) will be the
minimum t-DCF used in ASVspoof 2019 [2].
CM_thresholds Vector of same size as tDCF_norm corresponding to
the CM threshold (operating point).
NOTE:
o In relative terms, higher detection scores values are assumed to
indicate stronger support for the bona fide hypothesis.
o You should provide real-valued soft scores, NOT hard decisions. The
recommendation is that the scores are log-likelihood ratios (LLRs)
from a bonafide-vs-spoof hypothesis based on some statistical model.
This, however, is NOT required. The scores can have arbitrary range
and scaling.
o Pfa_asv, Pmiss_asv, Pmiss_spoof_asv are in fractions, not percentages.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>: "t-DCF: a Detection
Cost Function for the Tandem Assessment of Spoofing Countermeasures
and Automatic Speaker Verification", Proc. Odyssey 2018: the
Speaker and Language Recognition Workshop, pp. 312--319, Les Sables d'Olonne,
France, June 2018 (https://www.isca-speech.org/archive/Odyssey_2018/pdfs/68.pdf)
[2] ASVspoof 2019 challenge evaluation plan
TODO: <add link>
"""
# Sanity check of cost parameters
if cost_model['Cfa_asv'] < 0 or cost_model['Cmiss_asv'] < 0 or \
cost_model['Cfa_cm'] < 0 or cost_model['Cmiss_cm'] < 0:
print('WARNING: Usually the cost values should be positive!')
if cost_model['Ptar'] < 0 or cost_model['Pnon'] < 0 or cost_model['Pspoof'] < 0 or \
np.abs(cost_model['Ptar'] + cost_model['Pnon'] + cost_model['Pspoof'] - 1) > 1e-10:
sys.exit(
'ERROR: Your prior probabilities should be positive and sum up to one.'
)
# Unless we evaluate worst-case model, we need to have some spoof tests against asv
if Pmiss_spoof_asv is None:
sys.exit(
'ERROR: you should provide miss rate of spoof tests against your ASV system.'
)
# Sanity check of scores
combined_scores = np.concatenate((bonafide_score_cm, spoof_score_cm))
if np.isnan(combined_scores).any() or np.isinf(combined_scores).any():
sys.exit('ERROR: Your scores contain nan or inf.')
# Sanity check that inputs are scores and not decisions
n_uniq = np.unique(combined_scores).size
if n_uniq < 3:
sys.exit(
'ERROR: You should provide soft CM scores - not binary decisions')
# Obtain miss and false alarm rates of CM
Pmiss_cm, Pfa_cm, CM_thresholds = compute_det_curve(
bonafide_score_cm, spoof_score_cm)
# Constants - see ASVspoof 2019 evaluation plan
C1 = cost_model['Ptar'] * (cost_model['Cmiss_cm'] - cost_model['Cmiss_asv'] * Pmiss_asv) - \
cost_model['Pnon'] * cost_model['Cfa_asv'] * Pfa_asv
C2 = cost_model['Cfa_cm'] * cost_model['Pspoof'] * (1 - Pmiss_spoof_asv)
# Sanity check of the weights
if C1 < 0 or C2 < 0:
sys.exit(
'You should never see this error but I cannot evalute tDCF with negative weights - please check whether your ASV error rates are correctly computed?'
)
# Obtain t-DCF curve for all thresholds
tDCF = C1 * Pmiss_cm + C2 * Pfa_cm
# Normalized t-DCF
tDCF_norm = tDCF / np.minimum(C1, C2)
# Everything should be fine if reaching here.
if print_cost:
print('t-DCF evaluation from [Nbona={}, Nspoof={}] trials\n'.format(
bonafide_score_cm.size, spoof_score_cm.size))
print('t-DCF MODEL')
print(' Ptar = {:8.5f} (Prior probability of target user)'.
format(cost_model['Ptar']))
print(
' Pnon = {:8.5f} (Prior probability of nontarget user)'.
format(cost_model['Pnon']))
print(
' Pspoof = {:8.5f} (Prior probability of spoofing attack)'.
format(cost_model['Pspoof']))
print(
' Cfa_asv = {:8.5f} (Cost of ASV falsely accepting a nontarget)'
.format(cost_model['Cfa_asv']))
print(
' Cmiss_asv = {:8.5f} (Cost of ASV falsely rejecting target speaker)'
.format(cost_model['Cmiss_asv']))
print(
' Cfa_cm = {:8.5f} (Cost of CM falsely passing a spoof to ASV system)'
.format(cost_model['Cfa_cm']))
print(
' Cmiss_cm = {:8.5f} (Cost of CM falsely blocking target utterance which never reaches ASV)'
.format(cost_model['Cmiss_cm']))
print(
'\n Implied normalized t-DCF function (depends on t-DCF parameters and ASV errors), s=CM threshold)'
)
if C2 == np.minimum(C1, C2):
print(
' tDCF_norm(s) = {:8.5f} x Pmiss_cm(s) + Pfa_cm(s)\n'.format(
C1 / C2))
else:
print(
' tDCF_norm(s) = Pmiss_cm(s) + {:8.5f} x Pfa_cm(s)\n'.format(
C2 / C1))
return tDCF_norm, CM_thresholds
| 2.375 | 2 |
minecraft/migrations/0002_category_date.py | Tosmel2/Python_Django | 0 | 12765464 | <reponame>Tosmel2/Python_Django
# Generated by Django 4.0.1 on 2022-01-24 18:38
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('minecraft', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='date',
field=models.DateField(default=datetime.datetime(2022, 1, 24, 18, 38, 36, 587401, tzinfo=utc)),
preserve_default=False,
),
]
| 1.929688 | 2 |
main/migrations/0004_delete_bugreport.py | josylad/RoomScout | 0 | 12765465 | <reponame>josylad/RoomScout<gh_stars>0
# Generated by Django 2.2.7 on 2019-12-11 19:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0003_bugreport'),
]
operations = [
migrations.DeleteModel(
name='BugReport',
),
]
| 1.390625 | 1 |
src/mobot_client/tests/factories/__init__.py | mobilecoinofficial/mobot | 6 | 12765466 | # Copyright (c) 2021 MobileCoin. All rights reserved.
from decimal import Decimal
import factory
import pytz
from django.utils import timezone
from datetime import timedelta
from faker.factory import Factory
from faker.providers import date_time, internet, phone_number, lorem
Faker = Factory.create
fake = Faker()
fake.add_provider(date_time)
fake.add_provider(internet)
fake.add_provider(phone_number)
fake.add_provider(lorem)
fake.seed(0)
dt = fake.date_time_between(start_date='+5d', end_date='+10d', tzinfo=pytz.utc)
from mobot_client.models import (
Drop,
Store,
DropSession,
SessionState,
Customer,
CustomerStorePreferences,
Order,
BonusCoin,
DropType,
Item,
Sku,
)
class StoreFactory(factory.django.DjangoModelFactory):
class Meta:
model = Store
django_get_or_create = ('phone_number',)
id = factory.Faker('pyint')
name = factory.Sequence(lambda n: f"Mobot Store #{n}")
phone_number = factory.Sequence(lambda n: "+448211" + "%06d" % (n + 100000))
description = fake.paragraph(nb_sentences=10)
privacy_policy_url = factory.Sequence(lambda n: f"https://example.com/privacy_{n}")
class DropFactory(factory.django.DjangoModelFactory):
class Meta:
model = Drop
drop_type = DropType.AIRDROP
store = factory.SubFactory(StoreFactory)
id = factory.Sequence(lambda n: n)
pre_drop_description = factory.Sequence(lambda n: f"Item drop {n}")
advertisment_start_time = fake.date_time_between(start_date='-2d', end_date='+10d', tzinfo=pytz.utc)
start_time = timezone.now() - timedelta(days=2)
end_time = timezone.now() + timedelta(days=2)
number_restriction = factory.Iterator(['+44', '+1'])
timezone = 'PST'
initial_coin_amount_mob = Decimal(f"{float(0.2):4f}")
@factory.lazy_attribute
def store_id(self):
return self.store.pk
@factory.lazy_attribute
def item_id(self):
if hasattr(self, 'item'):
return self.item.pk
class OldDropFactory(DropFactory):
start_time = timezone.now() - timedelta(days=3)
end_time = timezone.now() - timedelta(days=1)
advertisment_start_time = fake.date_time_between(start_date='-15d', end_date='-1d', tzinfo=pytz.UTC)
class ItemFactory(factory.django.DjangoModelFactory):
class Meta:
model = Item
@factory.post_generation
def add_items_to_store(obj, created, *args, **kwargs):
obj.store.items.add(obj)
id = factory.Faker('pyint')
name = f"{factory.Faker('name')} {factory.Faker('sentence', nb_words=5)}"
price_in_mob = factory.Faker('pydecimal', positive=True, left_digits=3, right_digits=6)
description = factory.Faker('sentence', nb_words=50)
short_description = factory.Faker('sentence', nb_words=10)
image_link = factory.Sequence(lambda n: f"https://img.com/image{n}")
store = factory.SubFactory(StoreFactory)
@factory.lazy_attribute
def store_id(self):
return self.store.id
class SkuFactory(factory.django.DjangoModelFactory):
class Meta:
model = Sku
identifier = factory.Faker('pystr')
class CustomerFactory(factory.django.DjangoModelFactory):
class Meta:
model = Customer
django_get_or_create = ('phone_number',)
phone_number = factory.Sequence(lambda n: f"+447911" + "%06d" % (n + 100000))
class BonusCoinFactory(factory.django.DjangoModelFactory):
class Meta:
model = BonusCoin
drop = factory.SubFactory(DropFactory, drop_type=DropType.AIRDROP)
amount_mob = factory.Faker('pydecimal', positive=True, left_digits=3, right_digits=6)
number_available_at_start = 10
class ItemDropFactory(DropFactory):
drop_type = DropType.ITEM
class AirDropFactory(DropFactory):
drop_type = DropType.AIRDROP
class DropSessionFactory(factory.django.DjangoModelFactory):
class Meta:
model = DropSession
customer = factory.SubFactory(CustomerFactory)
drop = factory.SubFactory(DropFactory)
class OldDropSessionFactory(DropSessionFactory):
drop = factory.SubFactory(OldDropFactory)
class OrderFactory(factory.django.DjangoModelFactory):
class Meta:
model = Order
inline_args = ('sku',)
drop_session = factory.SubFactory(DropSessionFactory)
@factory.lazy_attribute
def customer(self):
return self.drop_session.customer
class GenericItemDropFactory(factory.django.DjangoModelFactory):
pass
| 2.078125 | 2 |
speedex/adminzone/refgen.py | 97-63/Django-project | 0 | 12765467 | <gh_stars>0
import random#modeule generating random nos
def getref():
refno='sp'
for i in range(1,3):#here range(1,3) means for(i>=1;i<=3;i++)
n=random.randrange(1,5)
refno+=str(n)
n=random.randrange(0,7)
refno +=str(n)
n = random.randrange(3,9)
refno +=str(n)
return refno | 3.1875 | 3 |
loss/IQA/watson.py | milesgray/CALAE | 0 | 12765468 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..dct2d import Dct2d
EPS = 1e-10
def softmax(a, b, factor=1):
concat = torch.cat([a.unsqueeze(-1), b.unsqueeze(-1)], dim=-1)
softmax_factors = F.softmax(concat * factor, dim=-1)
return a * softmax_factors[:,:,:,:,0] + b * softmax_factors[:,:,:,:,1]
class WatsonDistance(nn.Module):
"""
Loss function based on Watsons perceptual distance.
Based on DCT quantization
"""
def __init__(self, blocksize=8, trainable=False, reduction='sum'):
"""
Parameters:
blocksize: int, size of the Blocks for discrete cosine transform
trainable: bool, if True parameters of the loss are trained and dropout is enabled.
reduction: 'sum' or 'none', determines return format
"""
super().__init__()
# input mapping
blocksize = torch.as_tensor(blocksize)
# module to perform 2D blockwise DCT
self.add_module('dct', Dct2d(blocksize=blocksize.item(), interleaving=False))
# parameters, initialized with values from watson paper
self.blocksize = nn.Parameter(blocksize, requires_grad=False)
if self.blocksize == 8:
# init with Jpeg QM
self.t_tild = nn.Parameter(torch.log(torch.tensor( # log-scaled weights
[[1.40, 1.01, 1.16, 1.66, 2.40, 3.43, 4.79, 6.56],
[1.01, 1.45, 1.32, 1.52, 2.00, 2.71, 3.67, 4.93],
[1.16, 1.32, 2.24, 2.59, 2.98, 3.64, 4.60, 5.88],
[1.66, 1.52, 2.59, 3.77, 4.55, 5.30, 6.28, 7.60],
[2.40, 2.00, 2.98, 4.55, 6.15, 7.46, 8.71, 10.17],
[3.43, 2.71, 3.64, 5.30, 7.46, 9.62, 11.58, 13.51],
[4.79, 3.67, 4.60, 6.28, 8.71, 11.58, 14.50, 17.29],
[6.56, 4.93, 5.88, 7.60, 10.17, 13.51, 17.29, 21.15]]
)), requires_grad=trainable)
else:
# init with uniform QM
self.t_tild = nn.Parameter(torch.zeros((self.blocksize, self.blocksize)), requires_grad=trainable)
# other default parameters
self.alpha = nn.Parameter(torch.tensor(0.649), requires_grad=trainable) # luminance masking
w = torch.tensor(0.7) # contrast masking
self.w_tild = nn.Parameter(torch.log(w / (1- w)), requires_grad=trainable) # inverse of sigmoid
self.beta = nn.Parameter(torch.tensor(4.), requires_grad=trainable) # pooling
# dropout for training
self.dropout = nn.Dropout(0.5 if trainable else 0)
# reduction
self.reduction = reduction
if reduction not in ['sum', 'none']:
raise Exception('Reduction "{}" not supported. Valid values are: "sum", "none".'.format(reduction))
@property
def t(self):
# returns QM
qm = torch.exp(self.t_tild)
return qm
@property
def w(self):
# return luminance masking parameter
return torch.sigmoid(self.w_tild)
def forward(self, input, target):
# dct
c0 = self.dct(target)
c1 = self.dct(input)
N, K, B, B = c0.shape
# luminance masking
avg_lum = torch.mean(c0[:,:,0,0])
t_l = self.t.view(1, 1, B, B).expand(N, K, B, B)
t_l = t_l * (((c0[:,:,0,0] + EPS) / (avg_lum + EPS)) ** self.alpha).view(N, K, 1, 1)
# contrast masking
s = softmax(t_l, (c0.abs() + EPS)**self.w * t_l**(1 - self.w))
# pooling
watson_dist = (((c0 - c1) / s).abs() + EPS) ** self.beta
watson_dist = self.dropout(watson_dist) + EPS
watson_dist = torch.sum(watson_dist, dim=(1,2,3))
watson_dist = watson_dist ** (1 / self.beta)
# reduction
if self.reduction == 'sum':
watson_dist = torch.sum(watson_dist)
return watson_dist
| 2.6875 | 3 |
paper_example.py | PhysiCell-Tools/DAPT-example | 1 | 12765469 | import os, platform
import dapt
config = dapt.Config(path='config.json')
db = dapt.db.Delimited_file('parameters.csv', delimiter=',')
params = dapt.Param(db, config=config)
p = params.next_parameters()
while p is not None:
dapt.tools.create_XML(p, default_settings="PhysiCell_settings_default.xml", save_settings="PhysiCell_settings.xml")
params.update_status(p["id"], 'running simulation')
if platform.system() == 'Windows':
os.system("biorobots.exe")
else:
os.system("./biorobots")
params.successful(p["id"])
p = params.next_parameters()
| 2.125 | 2 |
examples/ssd1306_pillow_openweather.py | coding-world/SSD1306-python | 0 | 12765470 | <gh_stars>0
import socket
import fcntl
import struct
import board
import digitalio
import requests
import time
import os.path
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
from io import BytesIO
import cairosvg
# set pins and init oled
RESET_PIN = digitalio.DigitalInOut(board.D4)
i2c = board.I2C()
oled = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3C, reset=RESET_PIN)
# clear screen
oled.fill(0)
oled.show()
# load fonts
font1 = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 10)
font2 = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 14)
font3 = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 16)
def get_icon(id, size=32):
url = './icons/'+id+'.svg'
if os.path.isfile(url):
return Image.open(
BytesIO(cairosvg.svg2png(url=url)) # convert svg to png
).resize((size, size)).convert('1') # return resized image
else:
return Image.new("1", (size, size)) # return empty image
while True:
# openweather api request
data = requests.get(
url='https://api.openweathermap.org/data/2.5/onecall'
'?appid=d3355b38ac0d56b2e91cefcd5fd744fb' # should be changed to own api key
'&units=metric' # units
'&lang=de' # referred language
'&lat=54.788' # location (latitude)
'&lon=9.43701', # location (longitude)
timeout=10
).json()
# display hourly data
for step in [
{'title': 'Jetzt:', 'data': data['current']},
{'title': 'in einer Stunde:', 'data': data['hourly'][1]},
{'title': 'in zwei Stunden:', 'data': data['hourly'][2]},
{'title': 'in 3 Stunden:', 'data': data['hourly'][3]},
{'title': 'in 6 Stunden:', 'data': data['hourly'][6]},
]:
image = Image.new("1", (oled.width, oled.height))
draw = ImageDraw.Draw(image)
draw.text((0, 0), step['title'], font=font3, fill=255)
draw.text((0, 16), step['data']['weather'][0]['description'], font=font2, fill=255)
draw.text((48, 32), str(step['data']['temp']) + '°C', font=font3, fill=255)
draw.text((48, 48), str(step['data']['humidity']) + '%', font=font3, fill=255)
image.paste(get_icon(step['data']['weather'][0]['icon']), (8, 32))
oled.image(image)
oled.show()
time.sleep(4)
image = Image.new("1", (oled.width, oled.height))
draw = ImageDraw.Draw(image)
draw.text((0, 0), 'nächste Tage', font=font3, fill=255)
for i in range(1, 4):
draw.text((24, 16 * i), str(data['daily'][i]['temp']['day'])[:4] + '°C', font=font2, fill=255)
draw.text((76, 16 * i), str(data['daily'][i]['temp']['night'])[:4] + '°C', font=font2, fill=255)
image.paste(get_icon(data['daily'][i]['weather'][0]['icon'], 16), (0, 16 * i))
oled.image(image)
oled.show()
time.sleep(8)
| 2.328125 | 2 |
main.py | Draichi/Bitcoin-Trader-RL | 0 | 12765471 | <gh_stars>0
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from env.BitcoinTradingEnv import BitcoinTradingEnv
import pandas as pd
train_df = pd.read_csv('./datasets/bot_train_ETHBTC_700_hour.csv')
train_df = train_df.sort_values('Date')
test_df = pd.read_csv('./datasets/bot_rollout_ETHBTC_700_hour.csv')
test_df = test_df.sort_values('Date')
train_env = DummyVecEnv(
[lambda: BitcoinTradingEnv(train_df, serial=True)])
model = PPO2(MlpPolicy, train_env, verbose=1, tensorboard_log="./tensorboard/")
model.learn(total_timesteps=5000)
test_env = DummyVecEnv(
[lambda: BitcoinTradingEnv(test_df, serial=True)])
obs = test_env.reset()
for i in range(50000):
action, _states = model.predict(obs)
obs, rewards, done, info = test_env.step(action)
test_env.render(mode="human", title="BTC")
test_env.close()
| 2.0625 | 2 |
RecommenderModule/evaluation/evaluation_item_based.py | amir-rahim/BookClubSocialNetwork | 4 | 12765472 | <filename>RecommenderModule/evaluation/evaluation_item_based.py
from RecommenderModule.recommenders.item_based_recommender import ItemBasedRecommender
from RecommenderModule.evaluation.evaluator import Evaluator
"""This class evaluates item-based collaborative filtering recommenders with
different parameters, in order for the developer to pick the best parameters
for the app's item-based recommender."""
class EvaluationItemBased:
"""Evaluate the possible item-based recommenders and print the insights."""
def run_evaluations(self):
evaluator = Evaluator()
parameters_to_evaluate = {
'min_support': [1, 2, 5, 7, 8],
'model_function_name': ['cosine', 'msd', 'pearson', 'pearson_baseline']
}
recommender = ItemBasedRecommender()
evaluator.evaluate_all_combinations(recommender, parameters_to_evaluate)
| 2.765625 | 3 |
Predictions/Flinc_theory_inputs/inputs/MakeNofZForCosmosis_function.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 7 | 12765473 | <reponame>KiDS-WL/Cat_to_Obs_K1000_P1
#NZDATA = T // This sentinel marks the extension as n(z) data
#EXTNAME = NAME // The name of this n(z) kernel.
#NBIN = 5 // Integer number of tomographic bins
#NZ = 100 // Integer number of histogram bins
#The extension must then contain these data columns:
#Z_LOW 8-byte real // Real, the z value for the lower end of each redshift histogram bin
#Z_MID 8-byte real // Real, the z value for a middle point of each redshift histogram bin
#Z_HIGH 8-byte real // Real, the z value for the upper end of each redshift histogram bin
#BIN1 8-byte real // Real, the n(z) value for this histogram bin for the first tomographic bin
#etc. BIN2, BIN3, etc.
import sys
import os
import numpy as np
import pylab as plt
from matplotlib.ticker import ScalarFormatter
import pyfits
def MakeNofz_fits(input_files,outputfileName,OneBin_nofzFileName,neff,type='lowerEdge',suffix='SAMPLE'):
nBins=len(input_files)
print('I got '+str(nBins)+' files from input. Type is set to '+type)
cols=[]
for bin1 in range(nBins):
file= open(input_files[bin1])
nofZ=np.loadtxt(file,comments='#')
if(bin1==0):
z_vec=nofZ[:,1]*neff[bin1]
DeltaZ=nofZ[1,0]-nofZ[0,0]
if(type=='lowerEdge'):
Z_LOW=nofZ[:,0]
Z_HIGH=nofZ[:,0]+DeltaZ
Z_MID=Z_LOW+DeltaZ/2.
elif(type=='middle'):
Z_MID=nofZ[:,0]
Z_LOW=nofZ[:,0]-DeltaZ/2.
Z_HIGH=nofZ[:,0]+DeltaZ/2.
elif(type=='upperEdge'):
Z_HIGH=nofZ[:,0]
Z_MID=nofZ[:,0]-DeltaZ/2.
Z_LOW=nofZ[:,0]-DeltaZ
else:
print('not a recognised bin type, exiting now ...')
exit(1)
cols.append(pyfits.Column(name='Z_lOW', format='D', array=Z_LOW))
cols.append(pyfits.Column(name='Z_HIGH', format='D', array=Z_HIGH))
cols.append(pyfits.Column(name='Z_MID', format='D', array=Z_MID))
else:
z_vec+=nofZ[:,1]*neff[bin1]
cols.append(pyfits.Column(name='BIN'+str(bin1+1), format='D', array=nofZ[:,1]))
new_cols = pyfits.ColDefs(cols)
#what happened here?
#if python version older than 3.3
#hdulist_new = pyfits.new_table(data.columns+new_cols)
#else
hdulist_new = pyfits.BinTableHDU.from_columns(new_cols)
hdulist_new.header['NZDATA'] = True
hdulist_new.header['EXTNAME'] = 'NZ_'+suffix
hdulist_new.header['NBIN'] = 5
hdulist_new.header['NZ'] = len(Z_LOW)
hdulist_new.writeto(outputfileName)
# now one bin
cols = []
cols.append(pyfits.Column(name='Z_lOW', format='D', array=Z_LOW))
cols.append(pyfits.Column(name='Z_HIGH', format='D', array=Z_HIGH))
cols.append(pyfits.Column(name='Z_MID', format='D', array=Z_MID))
cols.append(pyfits.Column(name='BIN1', format='D', array=z_vec))
new_cols = pyfits.ColDefs(cols)
#what happened here?
#if python version older than 3.3
#hdulist_new = pyfits.new_table(data.columns+new_cols)
#else
#OneBin_nofzFileName='Version2/Nz_DIR/Nz_DIR_Mean/nofZ1bin.fits'
hdulist_new = pyfits.BinTableHDU.from_columns(new_cols)
hdulist_new.header['NZDATA'] = True
hdulist_new.header['EXTNAME'] = 'NZ_'+suffix
hdulist_new.header['NBIN'] = 1
hdulist_new.header['NZ'] = len(Z_LOW)
outputfileName=OneBin_nofzFileName
hdulist_new.writeto(outputfileName)
nBins_lens=2
nBins_source=5
# neff=[1.5099137858524687,1.5413199175708638 , 1.6196461014383043, 0.8268988140297754]
neff=[1,1,1,1,1,1,1]
OutputFileName='lens_and_source_flinc_nofz.fits'
OneBin_nofzFileName='lens_and_source_flinc_nofz_1bin.fits'
input_files=[]
for bin1 in range(nBins_lens):
fileNameInput='numida/lens/'+'nOfZ_hist_BOSSA_tomo'+str(bin1)+'.dat'
input_files.append(fileNameInput)
for bin1 in range(nBins_source):
fileNameInput='numida/source/'+'nOfZ_hist_KiDSVDA_tomo'+str(bin1)+'.dat'
input_files.append(fileNameInput)
MakeNofz_fits(input_files,OutputFileName,OneBin_nofzFileName,neff,type='lowerEdge',suffix='lens_flinc')
# neff=[1.5099137858524687,1.5413199175708638 , 1.6196461014383043, 0.8268988140297754]
neff=[1,1,1,1,1]
OutputFileName='source_flinc_nofz.fits'
OneBin_nofzFileName='source_flinc_nofz_1bin.fits'
input_files=[]
for bin1 in range(nBins_source):
fileNameInput='numida/source/'+'nOfZ_hist_KiDSVDA_tomo'+str(bin1)+'.dat'
input_files.append(fileNameInput)
MakeNofz_fits(input_files,OutputFileName,OneBin_nofzFileName,neff,type='lowerEdge',suffix='source_flinc')
| 2.375 | 2 |
jsparse/weiboUserInfo/__init__.py | PyDee/Spiders | 6 | 12765474 | user_info = {
"ok": 1,
"data": {
"avatar_guide": [],
"isStarStyle": 0,
"userInfo": {
"id": 3112824195,
"screen_name": "琪琪琪咩Sickey",
"profile_image_url": "https:\/\/tvax3.sinaimg.cn\/crop.0.0.996.996.180\/b989ed83ly8gcdemg2y21j20ro0rotas.jpg?KID=imgbed,tva&Expires=1594729063&ssig=6+Ph+Rvab8",
"profile_url": "https:\/\/m.weibo.cn\/u\/3112824195?uid=3112824195&luicode=10000011&lfid=1005053112824195",
"statuses_count": 12,
"verified": False,
"verified_type": -1,
"close_blue_v": False,
"description": "为未来的我而努力💚",
"gender": "f",
"mbtype": 0,
"urank": 14,
"mbrank": 0,
"follow_me": False,
"following": False,
"followers_count": 51,
"follow_count": 158,
"cover_image_phone": "https:\/\/tva2.sinaimg.cn\/crop.0.0.640.640.640\/a1d3feabjw1ecasunmkncj20hs0hsq4j.jpg",
"avatar_hd": "https:\/\/wx3.sinaimg.cn\/orj480\/b989ed83ly8gcdemg2y21j20ro0rotas.jpg",
"like": False,
"like_me": False,
"toolbar_menus": [
{
"type": "profile_follow",
"name": "关注",
"pic": "",
"params": {
"uid": 3112824195
}
},
{
"type": "link",
"name": "聊天",
"pic": "http:\/\/h5.sinaimg.cn\/upload\/2015\/06\/12\/2\/toolbar_icon_discuss_default.png",
"params": {
"scheme": "sinaweibo:\/\/messagelist?uid=3112824195&nick=琪琪琪咩Sickey"
},
"scheme": "https:\/\/passport.weibo.cn\/signin\/welcome?entry=mweibo&r=https://m.weibo.cn/api/container/getIndex?type=uid&value=3112824195"
},
{
"type": "link",
"name": "文章",
"pic": "",
"params": {
"scheme": "sinaweibo:\/\/cardlist?containerid=2303190002_445_3112824195_WEIBO_ARTICLE_LIST_DETAIL&count=20"
},
"scheme": "https:\/\/m.weibo.cn\/p\/index?containerid=2303190002_445_3112824195_WEIBO_ARTICLE_LIST_DETAIL&count=20&luicode=10000011&lfid=1005053112824195"
}
]
},
"fans_scheme": "https:\/\/m.weibo.cn\/p\/index?containerid=231051_-_fans_intimacy_-_3112824195&luicode=10000011&lfid=1005053112824195",
"follow_scheme": "https:\/\/m.weibo.cn\/p\/index?containerid=231051_-_followersrecomm_-_3112824195&luicode=10000011&lfid=1005053112824195",
"tabsInfo": {
"selectedTab": 1,
"tabs": [
{
"id": 1,
"tabKey": "profile",
"must_show": 1,
"hidden": 0,
"title": "主页",
"tab_type": "profile",
"containerid": "2302833112824195"
},
{
"id": 2,
"tabKey": "weibo",
"must_show": 1,
"hidden": 0,
"title": "微博",
"tab_type": "weibo",
"containerid": "1076033112824195",
"apipath": "\/profile\/statuses",
"url": "\/index\/my"
},
{
"id": 10,
"tabKey": "album",
"must_show": 0,
"hidden": 0,
"title": "相册",
"tab_type": "album",
"containerid": "1078033112824195"
}
]
},
"scheme": "sinaweibo:\/\/userinfo?uid=3112824195&type=uid&value=3112824195&luicode=10000011&lfid=1005051005051004933462&v_p=42",
"showAppTips": 0
}
}
| 1.515625 | 2 |
snake_OOP/score.py | M4rkopolo/turtle_OOP_inheritance | 0 | 12765475 | <reponame>M4rkopolo/turtle_OOP_inheritance
from turtle import Turtle
class Score(Turtle):
def __init__(self):
super().__init__()
self.score = 0
with open("hi score.txt", mode="r") as file: #reading the best score
self.hi_score = int(file.read())
self.color("white")
self.penup()
self.hideturtle()
self.goto(0,270)
self.add_points()
def add_points(self):
self.clear()
self.write(f"score: {self.score}. hi score: {self.hi_score}", align="center", font=('Aria', 24, 'normal'))
def game_over(self): #change score to GAME OVER sentence
self.goto(0, 0)
self.write(f"GAME OVER", align="center", font=('Aria', 24, 'normal'))
def reset(self):
if self.score > self.hi_score:
self.hi_score = self.score
with open("hi score.txt", mode="w") as file: #saving the best result if the old one is worse
file.write(f"{self.hi_score}")
self.score = 0 #reseting current score
self.add_points()
def increase_score(self):
self.score += 1
self.add_points() | 3.875 | 4 |
pyravendb/tests/jvm_migrated_tests/test_crud.py | poissoncorp/ravendb-python-client | 0 | 12765476 | <reponame>poissoncorp/ravendb-python-client
from pyravendb.tests.test_base import TestBase, User
class TestCrud(TestBase):
def setUp(self):
super(TestCrud, self).setUp()
def test_update_property_from_null_to_object(self):
poc = Poc("jacek", None)
with self.store.open_session() as session:
session.store(poc, "pocs/1")
session.save_changes()
with self.store.open_session() as session:
poc = session.load("pocs/1", Poc)
self.assertIsNone(poc.user)
poc.user = User(None, None)
session.save_changes()
with self.store.open_session() as session:
poc = session.load("pocs/1", Poc)
self.assertIsNotNone(poc)
class Poc:
def __init__(self, name, user):
self.name = name
self.user = user
| 2.203125 | 2 |
src/reader.py | epogrebnyak/data-rosstat-boo-light | 0 | 12765477 | <reponame>epogrebnyak/data-rosstat-boo-light
from itertools import islice
from collections import OrderedDict
import os
import pandas as pd
import settings
import streams
import row_parser
from inspect_columns import Columns
from logs import print_elapsed_time
COLUMNS = Columns.COLUMNS
VALID_ROW_WIDTH = len(COLUMNS)
def _raw_rows(year):
path = settings.url_local_path(year)
return streams.yield_csv_rows(path)
def has_valid_length(_row, n=VALID_ROW_WIDTH):
return len(_row) == n
def raw_rows(year):
return filter(has_valid_length, _raw_rows(year))
def as_dict(row, columns=COLUMNS):
return OrderedDict(zip(columns, row))
def _raw_dicts(year):
return map(as_dict, _raw_rows(year))
def has_inn(_dict):
return _dict['inn']
def raw_dicts(year):
return filter(has_inn, _raw_dicts(year))
assert next(raw_rows(2012))
assert next(raw_dicts(2017))
class Dataset:
dtypes = row_parser.DTYPES
colnames = row_parser.COLNAMES
def __init__(self, year: int):
self.year = year
# FIXME: this is untrivial - the function accepts a dict and produces a list
def rows(self):
gen = raw_dicts(self.year)
return map(row_parser.parse_row_to_list, gen)
def dicts(self):
gen = raw_dicts(self.year)
return map(row_parser.parse_row_to_dict, gen)
# FIXME: make separate functions
# @staticmethod
# def nth(gen, n):
# return next(islice(gen, n, n + 1))
#
# def nth_row(self, n=0):
# return self.nth(self.rows(), n)
#
# def nth_dict(self, n=0):
# return self.nth(self.dicts(), n)
@property
def path(self):
return settings.csv_path_processed(self.year)
def to_csv(self):
if not os.path.exists(self.path):
print(f"{self.year}: Saving large file to", self.path)
streams.rows_to_csv(path = self.path,
stream = self.rows(),
cols = self.colnames)
else:
print(f"{self.year}: File already exists:", self.path)
@print_elapsed_time
def read_dataframe(self):
print("Reading {} dataframe...".format(self.year))
with open(self.path, 'r', encoding='utf-8') as f:
return pd.read_csv(f, dtype=self.dtypes)
#class Subset:
# def __init__(self, year: int, inns: list):
# self.dataset = Dataset(year)
# self.inns = [str(x) for x in inns]
#
# def dicts(self):
# for d in self.dataset.dicts():
# inn = str(d['inn'])
# if inn in self.inns:
# self.inns.remove(inn)
# yield d
# if not self.inns:
# break
#
# def not_found(self):
# return "\n".join(sorted(k.inns))
#
# def to_csv(self, filename):
# path = tempfile(filename)
# if not os.path.exists(path):
# dicts_to_csv(path = path,
# dict_stream = self.dicts(),
# column_names = self.dataset.colnames)
# return path
#if __name__ == "__main__":
# # create model dataset
# stream = list(islice(RawDataset(2012).rows(), 0, 500))
# path = tempfile('reference_dataset.txt')
# to_csv(path, stream, cols=None)
# # TODO: place at
#
#
# #Subset(2015, 'test1').to_csv()
# d = Dataset(2012)
# a = next(Dataset(2016).dicts())
# z = next(RawDataset(2016).get_rows())
# import random
# ix = [random.choice(range(100)) for _ in range(5)]
# inns = [d.nth_dict(i)['inn'] for i in ix]
# inns = ['2224102690', '2204026804', '2222057509', '2204026730', '2207007165']
# s = Subset(2012, inns)
# #gen = s.dicts()
# #print(list(gen))
# s.to_csv("sample5.csv")
#
# #df = Dataset(2016).read_dataframe()
# #Dataset(2016).to_csv()
# # FIXME: results in MemoryError
#
# doc = """6125021399
#6165111610
#5501092795
#3252005997
#2617013243
#0214005782
#6125028404
#7840322535
#2723127073
#7726311464
#6432005430
#2460222454
#2009002493
#2460205089
#7707049388
#7713591359
#4027083322
#7601000640
#7702347870
#1627005779
#6135006840
#2320102816
#5007035121
#7801499923
#2502039781
#2465102746
#7709756135
#7614005035
#2721162072
#7725027605
#7704753638
#2310119472
#7709758887
#6234028965
#6312034863
#7727541830
#2312153550
#7328063237
#1661028712
#7734046851
#4501122913
#7701897582
#1834051678
#4003034171
#2317044843
#7714175986
#7606053324
#7735128151
#7206025040
#6320002223
#2420002597
#1327000226
#6125022025
#3327823181
#1646021952
#1650161470
#4703038767
#7710884741
#7713730490
#1650206314
#2320153289
#2317010611
#5029140480
#7830002705
#2320126091
#6313036408
#2325014338
#4807013380
#7813173683
#6906011193
#4715019631
#2721167592
#5030062677
#7425756540
#2319037591
#7116145872
#5010032360
#6163082392
#1659032038
#7712094033
#5029006702
#2130001337
#7707327050
#7611020204
#7724791423
#7714005350
#1434045743
#7706273281
#7731084175
#4713008017
#6315376946
#7817312063
#7708624200
#7714046028
#6167081833
#4214018010
#3013015987
#0522016027
#2277011020
#7743816842
#7801435581
#7718532879
#5614023224
#1216015989
#7718226550
#7705620334
#7707131554
#4027077632
#5307006883
#2342016712
#7701513162
#5614054173
#2127007427
#3815011264
#2130009512
#6453010174
#2130181337
#6450079058
#7707296041
#8300005580
#7105514574
#5032172562
#0710005596
#2709001880
#3663075863
#5402480282
#3904612524
#6123015784
#7724674670
#7708320240
#4214000252
#5040066582
#6453076256
#3917016350
#7842012360
#5604009492
#7705514093
#6230004963
#5616009708
#7702334864
#5032124142
#5613001002
#3437006665
#5040058775
#2703000858
#2011002420
#7730589568
#3837049102
#5614018560
#1616016850
#6623029538
#7730052050
#7731644035
#7839395419
#7731644035
#6659190900
#2902060361
#7327016379
#7709413138
#7708710924
#7725638925
#7708304859
#7717163097
#7724736609
#7714619159
#5032178356
#7728278043
#3663029916
#7702326045
#7729355614
#7722787661
#9909391333
#9909391291
#9909391260
#7708201998
#9909001382
#9909378244
#9909439151
#9909012056"""
#
# inns = doc.split("\n")
# k = Subset(2016, inns)
# k.to_csv('179.csv')
#
# """Not finished:
#
# subsets as Excel files
# manageable, smaller files
# Expert 200
#
#"""
#
#
#
| 2.625 | 3 |
wrench/endmodel/bert_model.py | Stranger469/ARS2 | 0 | 12765478 | <reponame>Stranger469/ARS2
import logging
from typing import Any, Optional, Union, Callable
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm.auto import trange
from transformers import AdamW, get_linear_schedule_with_warmup, AutoTokenizer
from ..backbone import BackBone
from ..basemodel import BaseTorchClassModel
from ..dataset import BaseDataset
from ..utils import cross_entropy_with_probs, get_bert_model_class, get_bert_torch_dataset_class, construct_collate_fn_trunc_pad
logger = logging.getLogger(__name__)
collate_fn = construct_collate_fn_trunc_pad('mask')
class BertClassifierModel(BaseTorchClassModel):
def __init__(self,
model_name: Optional[str] = 'bert-base-cased',
lr: Optional[float] = 3e-5,
l2: Optional[float] = 0.0,
max_tokens: Optional[int] = 512,
batch_size: Optional[int] = 16,
real_batch_size: Optional[int] = 16,
test_batch_size: Optional[int] = 16,
n_steps: Optional[int] = 10000,
fine_tune_layers: Optional[int] = -1,
binary_mode: Optional[bool] = False,
):
super().__init__()
self.hyperparas = {
'model_name' : model_name,
'fine_tune_layers': fine_tune_layers,
'lr' : lr,
'l2' : l2,
'max_tokens' : max_tokens,
'batch_size' : batch_size,
'real_batch_size' : real_batch_size,
'test_batch_size' : test_batch_size,
'n_steps' : n_steps,
'binary_mode' : binary_mode,
}
self.model: Optional[BackBone] = None
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
def fit(self,
dataset_train: BaseDataset,
y_train: Optional[np.ndarray] = None,
dataset_valid: Optional[BaseDataset] = None,
y_valid: Optional[np.ndarray] = None,
sample_weight: Optional[np.ndarray] = None,
evaluation_step: Optional[int] = 10,
metric: Optional[Union[str, Callable]] = 'acc',
direction: Optional[str] = 'auto',
patience: Optional[int] = 10,
tolerance: Optional[float] = -1.0,
device: Optional[torch.device] = None,
verbose: Optional[bool] = True,
**kwargs: Any):
if not verbose:
logger.setLevel(logging.ERROR)
self._update_hyperparas(**kwargs)
hyperparas = self.hyperparas
n_steps = hyperparas['n_steps']
if hyperparas['real_batch_size'] == -1 or hyperparas['batch_size'] < hyperparas['real_batch_size']:
hyperparas['real_batch_size'] = hyperparas['batch_size']
accum_steps = hyperparas['batch_size'] // hyperparas['real_batch_size']
torch_dataset = get_bert_torch_dataset_class(dataset_train)(dataset_train, self.tokenizer, self.hyperparas['max_tokens'],
n_data=n_steps * hyperparas['batch_size'])
train_dataloader = DataLoader(torch_dataset, batch_size=hyperparas['real_batch_size'], shuffle=True, collate_fn=collate_fn)
if y_train is None:
y_train = dataset_train.labels
y_train = torch.Tensor(y_train).to(device)
if sample_weight is None:
sample_weight = np.ones(len(dataset_train))
sample_weight = torch.FloatTensor(sample_weight).to(device)
n_class = dataset_train.n_class
model = get_bert_model_class(dataset_train)(
n_class=n_class,
**hyperparas
).to(device)
self.model = model
optimizer = AdamW(model.parameters(), lr=hyperparas['lr'], weight_decay=hyperparas['l2'])
# Set up the learning rate scheduler
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=n_steps)
valid_flag = self._init_valid_step(dataset_valid, y_valid, metric, direction, patience, tolerance)
history = {}
last_step_log = {}
try:
with trange(n_steps, desc=f"[FINETUNE] {hyperparas['model_name']} Classifier", unit="steps", disable=not verbose, ncols=150, position=0, leave=True) as pbar:
cnt = 0
step = 0
model.train()
optimizer.zero_grad()
for batch in train_dataloader:
outputs = model(batch)
batch_idx = batch['ids'].to(device)
target = y_train[batch_idx]
loss = cross_entropy_with_probs(outputs, target, reduction='none')
loss = torch.mean(loss * sample_weight[batch_idx])
loss.backward()
cnt += 1
if cnt % accum_steps == 0:
# Clip the norm of the gradients to 1.0.
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
step += 1
if valid_flag and step % evaluation_step == 0:
metric_value, early_stop_flag, info = self._valid_step(step)
if early_stop_flag:
logger.info(info)
break
history[step] = {
'loss' : loss.item(),
f'val_{metric}' : metric_value,
f'best_val_{metric}': self.best_metric_value,
'best_step' : self.best_step,
}
last_step_log.update(history[step])
last_step_log['loss'] = loss.item()
pbar.update()
pbar.set_postfix(ordered_dict=last_step_log)
if step >= n_steps:
break
except KeyboardInterrupt:
logger.info(f'KeyboardInterrupt! do not terminate the process in case need to save the best model')
self._finalize()
return history
| 1.960938 | 2 |
modules/2.79/bpy/types/WoodTexture.py | cmbasnett/fake-bpy-module | 0 | 12765479 | <reponame>cmbasnett/fake-bpy-module<filename>modules/2.79/bpy/types/WoodTexture.py
WoodTexture.users_object_modifier = None
| 0.941406 | 1 |
rdftools/tools/rdf2rdf.py | cosminbasca/rdftools | 4 | 12765480 | #
# author: <NAME>
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from multiprocessing import Pool, cpu_count
from rdftools.raptorutil import rdf_ext
from rdftools.tools.base import RdfTool
from rdftools.tools.jvmrdftools import run_rdf2rdf_converter
__author__ = 'basca'
def dest_file_name(src, dst_format):
ext = os.path.splitext(src)[-1]
dst_ext = rdf_ext.get(dst_format, [None])[0]
if ext != '.%s' % dst_ext:
return '%s.%s' % (os.path.splitext(src)[0], dst_ext)
return None
def to_process(src, dst_format):
if os.path.isdir(src):
return False
return dest_file_name(src, dst_format) is not None
def convert_file(source, dst_format, clr_src=False):
rdf2rdf = Rdf2Rdf()
rdf2rdf.convert(source, dst_format, clear_source=clr_src)
class Rdf2Rdf(RdfTool):
def __init__(self, *args, **kwargs):
super(Rdf2Rdf, self).__init__(*args, **kwargs)
def convert(self, source, destination_format, clear_source=False):
"""
convert source rdf files to destination format
:param source: the source file(s)
:param destination_format: the destination format
:param clear_source: if set delete the source files, default = False
:return: None
"""
run_rdf2rdf_converter(source, destination_format)
if clear_source:
self._log.warn('REMOVE: {0}'.format(source))
os.remove(source)
def _run(self, source, destination_format, clear_source=False, workers=-1):
"""
parallel version of the `convert` method
:param source: (rdf) files to convert (source path)
:param destination_format: the destination format
:param clear_source: if set, delete the source files. Default = False
:return: None
"""
files = []
src = os.path.abspath(source)
if os.path.isdir(src):
files = [os.path.join(src, f) for f in os.listdir(src) if to_process(f, destination_format)]
elif os.path.exists(src):
files = [src]
self._log.info('to process: {0}'.format(files))
if clear_source:
self._log.warn('will remove original files after conversion')
def job_finished(res):
print '.',
sys.stdout.flush()
num_cpus = cpu_count()
num_workers = workers if 0 < workers < num_cpus else num_cpus
pool = Pool(processes=num_workers)
for src in files:
dst = dest_file_name(src, destination_format)
if dst:
pool.apply_async(convert_file, (src, dst, clear_source), callback=job_finished)
pool.close()
pool.join()
| 2.015625 | 2 |
c/tools/shaderdesc.py | gberthou/incremental-voronoi | 0 | 12765481 | <reponame>gberthou/incremental-voronoi
import sys
import re
import extractglsl
typevec = re.compile(r"([biud])?vec([234])")
typemat = re.compile(r"(d)?mat([234])x?([234])?")
typesampler = re.compile(r"([iu])?sampler(.+)")
STYPE_VERT = "vert"
STYPE_FRAG = "frag"
STYPE_GEO = "geo"
STYPE_TCS = "tcs"
STYPE_TES = "tes"
SHADER_TYPES = ["vert", "frag", "geo", "tcs", "tes"]
STYPE_TO_GL = {
STYPE_VERT: "GL_VERTEX_SHADER",
STYPE_FRAG: "GL_FRAGMENT_SHADER",
STYPE_GEO : "GL_GEOMETRY_SHADER",
STYPE_TCS : "GL_TESS_CONTROL_SHADER",
STYPE_TES : "GL_TESS_EVALUATION_SHADER"
}
def indent(s, n):
lines = s.split("\n")
return "\n".join((" " * n) + line for line in lines)
def matdims(mmat):
x = int(mmat.group(2))
if mmat.group(3):
return (x, int(mmat.group(3)))
return (x, x)
def matdim(mmat):
x, y = matdims(mmat)
return x * y
def is_scalar(_type):
# Todo: arrays?
mvec = typevec.match(_type)
mmat = typemat.match(_type)
return not (mvec or mmat)
def Ctype_of(_type):
if _type == "uint" or _type == "bool":
return "GLuint"
if _type == "int":
return "GLint"
if _type == "double" or _type == "float":
return "GLfloat"
mvec = typevec.match(_type)
mmat = typemat.match(_type)
msampler = typesampler.match(_type)
if mvec:
__type = mvec.group(1)
if __type == "b":
subtype = "GLboolean"
elif __type == "i":
subtype = "GLint"
elif __type == "u":
subtype = "GLuint"
else: # What of "d"?
subtype = "GLfloat"
return "std::array<" + subtype + ", " + mvec.group(2) + ">"
if mmat:
if mmat.group(1):
subtype = "double"
else:
subtype = "float"
dim = matdim(mmat)
return "std::array<" + subtype + ", " + str(dim) + ">"
if msampler:
return "GLint"
raise Exception("Ctype_of: unsupported type \"%s\"" % _type)
def uniform_type_of(_type):
mvec = typevec.match(_type)
mmat = typemat.match(_type)
msampler = typesampler.match(_type)
if _type == "uint" or _type == "bool":
return "1ui"
if _type == "int":
return "1i"
if _type == "float" or _type == "double":
return "1f"
if mvec:
__type = mvec.group(1)
n = mvec.group(2)
if __type == "b" or __type == "u":
return mvec.group(2) + "uiv"
if __type == "i":
return mvec.group(2) + "iv"
else: # What of "d"?
return mvec.group(2) + "fv"
elif mmat:
x, y = matdims(mmat)
if x != y:
return "Matrix" + str(x) + "x" + str(y) + "fv"
return "Matrix" + str(x) + "fv"
elif msampler:
if msampler.group(1) == None:
return "1i"
def type_arity(_type):
mvec = typevec.match(_type)
if mvec:
return int(mvec.group(2))
def default_vertex_type(_type):
mvec = typevec.match(_type)
if mvec:
__type = mvec.group(1)
if __type == None:
return "GL_FLOAT"
def C_member(couple):
return "GLint " + couple[0] + ";"
def C_setter_prototype(couple, prefix = ""):
name, _type = couple
ctype = Ctype_of(_type)
return "void " + prefix + "Set" + name + " (const " + ctype + " &_" + name + ") const"
def C_bind_prototype(prefix = ""):
return "void " + prefix + "Bind() const"
def C_code_arg(shadertype):
return "code" + shadertype
def C_assign_attribute(couple):
return couple[0] + " = glGetAttribLocation(program, \"" + couple[0] + "\");"
def C_assign_uniform(couple):
return couple[0] + " = glGetUniformLocation(program, \"" + couple[0] + "\");"
def C_default_attrib_member(couple):
name, _type = couple
mvec = typevec.match(_type)
mmat = typemat.match(_type)
if mvec:
__type = mvec.group(1)
if __type == None:
return "GLfloat " + name + "[" + mvec.group(2) + "];"
elif mmat:
pass
def C_VertexAttribPointer(classname, attributes):
tmp = ""
for name, _type in attributes.items():
#stride = "sizeof(" + classname + "::Vertex) - sizeof(((" + classname + "::Vertex*)0)->" + name + ")"
stride = "sizeof(" + classname + "::Vertex)"
offset = "(void*) offsetof(" + classname + "::Vertex, " + name + ")"
tmp += "glVertexAttribPointer(" + name + ", " + str(type_arity(_type)) + ", " + default_vertex_type(_type) + ", GL_FALSE, " + stride + ", " + offset + ");\n"
return tmp
class Shader:
def __init__(self, name, shadertypes, attributes, merged_uniforms):
self.name = name
self.shadertypes = list(i for i in SHADER_TYPES if i in shadertypes)
self.attributes = attributes
self.merged_uniforms = merged_uniforms
def module_name(self):
return "Program" + self.name
def header_name(self):
return self.module_name() + ".h"
def source_name(self):
return self.module_name() + ".cpp"
def C_name(self):
return self.module_name()
def C_constructor_args(self, appendix = "", omittypes = False):
if not omittypes:
prefix = "const std::string &"
else:
prefix = ""
return ", ".join(prefix + C_code_arg(i) + appendix for i in self.shadertypes)
def C_constructor(self, implem = False):
args = self.C_constructor_args()
tmp = self.C_name() + "(" + args + ")"
if implem:
return self.C_name() + "::" + tmp
return tmp
def C_destructor(self, implem = False):
tmp = "~" + self.C_name() + "()"
if implem:
return self.C_name() + "::" + tmp
return tmp
def C_constructor_source(self):
emplace = "\n".join("EmplaceShader(" + STYPE_TO_GL[i] + ", " + C_code_arg(i) + ");" for i in self.shadertypes)
attributes = "\n".join(map(C_assign_attribute, self.attributes.items()))
uniforms = "\n".join(map(C_assign_uniform, self.merged_uniforms.items()))
return self.C_constructor(True) + "\n{\n" \
+ indent( \
"glGenVertexArrays(1, &vao);\n" \
+ "glGenBuffers(1, &vbo);\n\n" \
+ emplace \
+ "\nFinalize();\n\n" \
+ attributes + "\n" \
+ uniforms + "\n\n" \
+ "glBindVertexArray(vao);\n" \
+ "glBindBuffer(GL_ARRAY_BUFFER, vbo);\n" \
+ C_VertexAttribPointer(self.module_name(), self.attributes) \
+ "\n" \
+ "\n".join("glEnableVertexAttribArray(" + name + ");" for name, _ in self.attributes.items()) \
+ "\n\nglBindVertexArray(0);\n"
, 1) \
+ "\n}\n"
def C_destructor_source(self):
return self.C_destructor(True) + "\n{\n" \
+ indent( \
"glDeleteVertexArrays(1, &vao);\n" \
+ "glDeleteBuffers(1, &vbo);" \
, 1) \
+ "\n}\n"
def C_members(self):
return "GLuint vao;\nGLuint vbo;\n\n" \
+ "\n".join(map(C_member, self.attributes.items())) \
+ "\n\n" \
+ "\n".join(map(C_member, self.merged_uniforms.items()))
def C_setters(self):
return "\n".join(map(lambda x: C_setter_prototype(x) + ";", self.merged_uniforms.items()))
def C_setters_source(self):
tmp = ""
for name, _type in self.merged_uniforms.items():
uniform_type = uniform_type_of(_type)
scalar = is_scalar(_type)
args = "(" + name + ", "
if not scalar:
args += "1, "
if uniform_type[0] == "M": # Matrix
args += "GL_FALSE, "
args += "_" + name
if not scalar:
args += ".data()"
args += ")"
tmp += C_setter_prototype((name, _type), self.C_name() + "::") \
+ "\n{\n" \
+ indent("glUniform" + uniform_type_of(_type) + args + ";", 1) \
+ "\n}\n"
return tmp
def C_bind_source(self):
return C_bind_prototype(self.C_name() + "::") \
+ "\n{\n glBindVertexArray(vao);\n glBindBuffer(GL_ARRAY_BUFFER, vbo);\n}\n"
def C_class(self):
v = Vertex(self.attributes)
return "class " + self.C_name() + " : public glutils::Program\n" \
+ "{\n public:\n" \
+ indent(v.C_source(), 2) + "\n" \
+ indent(self.C_constructor(), 2) + ";\n" \
+ indent(self.C_destructor(), 2) + ";\n" \
+ indent(C_bind_prototype(), 2) + ";\n" \
+ indent(self.C_setters(), 2) + "\n" \
+ " private:\n" \
+ indent(self.C_members(), 2) + "\n" \
+ "};"
def C_header_body(self):
return self.C_class()
def C_header(self):
CPP_VARIABLE = self.module_name().upper()
return "#ifndef " + CPP_VARIABLE + "\n" \
+ "#define " + CPP_VARIABLE + "\n\n" \
+ "#include <string>\n#include <array>\n#include <glutils.h>\n" \
+ self.C_header_body() \
+ "\n#endif\n"
def C_source_body(self):
return self.C_constructor_source() \
+ self.C_destructor_source() \
+ self.C_bind_source() \
+ self.C_setters_source()
def C_source(self):
return "#include <gl.h>\n#include \"" + self.header_name() + "\"\n\n" + self.C_source_body() + "\n"
class Vertex:
def __init__(self, attributes):
self.attributes = attributes
def C_source(self):
return "struct Vertex\n{\n" \
+ indent("\n".join(map(C_default_attrib_member, self.attributes.items())), 1) \
+ "\n};\n"
def to_shader(name, shader_desc, types):
attributes_and_types = list()
uniforms_and_types = list()
for _type, filename in shader_desc.items():
uniforms, attributes = extractglsl.uniforms_and_attributes_of(filename)
if _type == STYPE_VERT:
attributes_and_types.append(extractglsl.types_of(attributes))
uniforms_and_types.append(extractglsl.types_of(uniforms))
shader = Shader(name, types, extractglsl.merge(attributes_and_types), extractglsl.merge(uniforms_and_types))
return shader
| 2.171875 | 2 |
setup.py | apnarm/autodock-cron | 0 | 12765482 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup, find_packages
def parse_requirements(filename):
with open(filename, "r") as f:
for line in f:
if line and line[:2] not in ("#", "-e"):
yield line.strip()
setup(
name="autodock-cron",
version="0.0.1",
description="autodock cron plugin",
long_description=open("README.rst", "r").read(),
author="<NAME>",
author_email="<NAME>, prologic at shortcircuit dot net dot au",
url="https://github.com/prologic/autodock-cron",
download_url="https://github.com/prologic/autodock-cron/archive/master.zip",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
license="MIT",
keywords="autodock cron plugin docker",
platforms="POSIX",
packages=find_packages("."),
install_requires=list(parse_requirements("requirements.txt")),
scripts=["autodock-cron"]
)
| 2.03125 | 2 |
books/templatetags/books.py | Nuurek/HomeLibrary | 0 | 12765483 | from django.template import Library
from django.forms.models import model_to_dict
from django.contrib.auth.models import User
from books.models import Book
from libraries.models import BookCopy, Lending, Reading
register = Library()
@register.inclusion_tag('books/tags/book_tag.html')
def render_book(book: Book):
return model_to_dict(book)
@register.inclusion_tag('books/tags/google_book_tag.html')
def render_google_book(book: dict):
return book
@register.inclusion_tag('books/tags/book_copy_tag.html')
def render_book_copy(copy: BookCopy, user: User, **kwargs):
context = book_copy_to_dict(copy)
clean = kwargs.get('clean', False)
context['clean'] = clean
if clean:
context['only_description'] = True
else:
context['only_description'] = kwargs.get('only_description', False)
library = kwargs.get('library')
context['user_library'] = user.userprofile.home_library.pk
context['is_owner'] = library == user.userprofile.home_library
is_book_owner = copy.library == user.userprofile.home_library
context['is_book_owner'] = is_book_owner
context['is_read'] = Reading.objects.filter(copy=copy)
is_kept_by_user = copy.is_kept_by(user.userprofile)
context['is_kept_by_user'] = is_kept_by_user
context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists()
if is_kept_by_user:
context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists()
try:
lending = copy.lending_set.get(is_completed=False)
context['lending'] = lending
library = kwargs.get('library')
if library == lending.borrower:
context['borrowed'] = True
context['lender'] = copy.library.owner.user.username if copy.library else None
else:
context['lent'] = True
context['borrower'] = lending.borrower.owner.user.username if lending.borrower else None
context['is_return_available'] = is_book_owner or (lending.borrower and user == lending.borrower.owner.user)
except Lending.DoesNotExist:
context['is_lending_available'] = is_book_owner
return context
def book_copy_to_dict(copy: BookCopy):
book_dict = model_to_dict(copy.book)
book_dict.pop('id')
copy_dict = model_to_dict(copy)
copy_dict.update(book_dict)
return copy_dict
| 2.25 | 2 |
koocook_core/views/__init__.py | KooCook/koocook-dj | 1 | 12765484 | <filename>koocook_core/views/__init__.py
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from .comments import *
from .posts import *
from .recipes import *
from .handlers import *
from .ingredients import *
from .forms import RecipeForm
from .profile import UserProfileInfoView, UserSettingsInfoView
from ..models import Author, Recipe
from ..models.user import KoocookUser
@receiver(post_save, sender=User)
def dispatch(sender, instance: User, created, **kwargs):
if created:
kc_user = KoocookUser(user=instance)
kc_user.save()
author = Author(name=kc_user.name, user=kc_user)
author.save()
| 2.078125 | 2 |
tests/apps/courses/test_cms_toolbars.py | lunika/richie | 0 | 12765485 | <filename>tests/apps/courses/test_cms_toolbars.py
"""
Test suite of the toolbar extension for organization pages
"""
from django.test.utils import override_settings
from cms.api import create_page
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.items import Menu, ModalItem
from richie.apps.core.factories import UserFactory
from richie.apps.courses.factories import CourseFactory, OrganizationFactory
from richie.apps.persons.tests.utils import CheckToolbarMixin
# pylint: disable=too-many-ancestors
class OrganizationCMSToolbarTestCase(CheckToolbarMixin, CMSTestCase):
"""Testing the integration of organization page extensions in the toolbar"""
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_course_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the course is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
course = CourseFactory()
self.check_toolbar_item(course, "Course settings...")
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_organization_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the organization is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
organization = OrganizationFactory()
self.check_toolbar_item(organization, "Organization settings...")
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_no_page_extension(self):
"""
The toolbar should not include any item to edit a page extension on a page not related
to any page extension.
"""
# Testing with a superuser proves our point
superuser = UserFactory(is_staff=True, is_superuser=True)
# Create a page not related to any page extension
page = create_page("A page", template="richie/fullwidth.html", language="en")
cases = [[False, False], [False, True], [True, False]]
for args in cases:
toolbar = self.get_toolbar_for_page(page, superuser, *args)
page_menu = toolbar.find_items(Menu, name="Page")[0].item
# Check that the course item is absent
results = page_menu.find_items(ModalItem, name="Course settings...")
self.assertEqual(results, [])
# Check that the organization item is absent
results = page_menu.find_items(ModalItem, name="Organization settings...")
self.assertEqual(results, [])
| 1.9375 | 2 |
tests/test_subprocess_test_proxy.py | ustudio/nose_connection_report | 1 | 12765486 | <filename>tests/test_subprocess_test_proxy.py
import mock
import os
import os.path
from nose_connection_report import SubprocessTestProxy
import subprocess
import sys
import unittest
class TestSubprocessTestProxy(unittest.TestCase):
@mock.patch("subprocess.Popen")
def test_calls_test_inside_strace(self, mock_popen_class):
plugin = mock.MagicMock()
test = mock.MagicMock()
test.address.return_value = ("foo.py", "tests", "TestFoo.test_something")
base_command = sys.argv[0]
cwd = os.getcwd()
mock_popen = mock_popen_class.return_value
mock_popen.stdout.read.return_value = ""
proxy = SubprocessTestProxy(plugin, test)
proxy(None)
mock_popen_class.assert_called_with(
[
"strace",
"-e", "trace=connect",
base_command,
"--with-process-isolation-reporter",
"foo.py:TestFoo.test_something"
],
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
@mock.patch("subprocess.Popen")
def test_parses_strace_output_when_done(self, mock_popen_class):
mock_popen = mock_popen_class.return_value
mock_popen.stdout.read.return_value = ''
plugin = mock.MagicMock()
test = mock.MagicMock()
test.address.return_value = ("foo.py", "tests", "TestFoo.test_something")
proxy = SubprocessTestProxy(plugin, test)
# This is a goofy way to test this, but I don't feel like
# pulling apart the existing IPC parsing from the Popen calls
with mock.patch.object(proxy, "parse_strace") as mock_parse_strace:
proxy(None)
mock_parse_strace.assert_called_with(mock_popen.stderr)
def test_parses_strace_for_connect_calls(self):
strace_output = """connect(3, {sa_family=AF_FILE, path="/var/run/nscd/socket"}, 110) = -1 ENOENT (No such file or directory)
connect(3, {sa_family=AF_INET, sin_port=htons(8080), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 ECONNREFUSED (Connection refused)
connect(3, {sa_family=AF_INET, sin_port=htons(8000), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 ECONNREFUSED (Connection refused)
"""
plugin = mock.MagicMock()
test = mock.MagicMock()
proxy = SubprocessTestProxy(plugin, test)
proxy.parse_strace(strace_output.split("\n"))
plugin.add_test_connections.assert_called_with(
test,
[
{
"host": "127.0.0.1",
"port": 8080,
},
{
"host": "127.0.0.1",
"port": 8000
}
]
)
| 2.390625 | 2 |
mailpimp.py | kradalby/mailpimp | 0 | 12765487 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import email
import logging
import logging.config
import os
import sys
from list import ListManager
from mailgun import MailGunSMTP
logger = logging.getLogger(__name__)
CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.ini')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mailpimp.log'),
'formatter': 'verbose'
},
},
'loggers': {
__name__: {
'handlers': ['file'],
'level': 'DEBUG',
},
'list': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
class MailPimp():
def __init__(self, sender, recipient, mail):
self.config = configparser.ConfigParser()
self.config.read(CONFIG_FILE)
self.lm = ListManager(self.config['list']['list_file'])
self.mg = MailGunSMTP(
self.config["mailgun"]["user"],
self.config["mailgun"]["password"]
)
logger.debug(self.lm.get_lists())
self.sender = sender
self.recipient = recipient
self.mail = mail
def allowed(self):
list = self.lm.get_list(self.recipient)
if list and self.sender in list.get_senders():
return True
return False
def distribute(self):
if self.allowed():
logger.info('Sender %s, is authorized to send to %s' %
(self.sender, self.recipient))
list = self.lm.get_list(self.recipient)
self.mg.send_message(
self.mail["From"],
list.get_recipients(),
self.mail
)
else:
logger.info('Sender %s, is not authorized to send to %s' %
(self.sender, self.recipient))
def get_attachments(self):
files = []
if self.mail.is_multipart():
for file in self.mail.get_payload():
files.append((file.get_filename(), file.get_payload()))
return files
return files
if __name__ == '__main__':
try:
mail = email.message_from_binary_file(sys.stdin.buffer)
sender = sys.argv[1]
recipient = sys.argv[2]
logger.debug("######################")
logger.debug("To: %s" % recipient)
logger.debug("From: %s" % sender)
logger.debug("Subject: %s" % mail["Subject"])
logger.debug("######################\n")
mp = MailPimp(sender, recipient, mail)
mp.distribute()
except Exception as e:
logger.exception(e)
sys.exit(1)
| 2.296875 | 2 |
Chapter 04/4.01/model.py | ACsBlack/Tkinter-GUI-Application-Development-Blueprints-Second-Edition | 120 | 12765488 | <filename>Chapter 04/4.01/model.py<gh_stars>100-1000
"""
Code illustration: 4.01
@ Tkinter GUI Application Development Blueprints
"""
from configurations import *
class Model():
def __init__(self):
pass
| 1.539063 | 2 |
notifications_utils/letter_timings.py | alphagov-mirror/notifications-utils | 0 | 12765489 | <filename>notifications_utils/letter_timings.py
from collections import namedtuple
from datetime import datetime, time, timedelta
import pytz
from govuk_bank_holidays.bank_holidays import BankHolidays
from notifications_utils.countries.data import Postage
from notifications_utils.timezones import (
convert_utc_to_bst,
utc_string_to_aware_gmt_datetime,
)
LETTER_PROCESSING_DEADLINE = time(17, 30)
CANCELLABLE_JOB_LETTER_STATUSES = [
'created', 'cancelled', 'virus-scan-failed', 'validation-failed', 'technical-failure', 'pending-virus-check'
]
non_working_days_dvla = BankHolidays(
use_cached_holidays=True,
weekend=(5, 6),
)
non_working_days_royal_mail = BankHolidays(
use_cached_holidays=True,
weekend=(6,) # Only Sunday (day 6 of the week) is a non-working day
)
def set_gmt_hour(day, hour):
return day.astimezone(pytz.timezone('Europe/London')).replace(hour=hour, minute=0).astimezone(pytz.utc)
def get_next_work_day(date, non_working_days):
next_day = date + timedelta(days=1)
if non_working_days.is_work_day(
date=next_day.date(),
division=BankHolidays.ENGLAND_AND_WALES,
):
return next_day
return get_next_work_day(next_day, non_working_days)
def get_next_dvla_working_day(date):
"""
Printing takes place monday to friday, excluding bank holidays
"""
return get_next_work_day(date, non_working_days=non_working_days_dvla)
def get_next_royal_mail_working_day(date):
"""
Royal mail deliver letters on monday to saturday
"""
return get_next_work_day(date, non_working_days=non_working_days_royal_mail)
def get_delivery_day(date, *, days_to_deliver):
next_day = get_next_royal_mail_working_day(date)
if days_to_deliver == 1:
return next_day
return get_delivery_day(next_day, days_to_deliver=(days_to_deliver - 1))
def get_min_and_max_days_in_transit(postage):
return {
# first class post is printed earlier in the day, so will
# actually transit on the printing day, and be delivered the next
# day, so effectively spends no full days in transit
'first': (0, 0),
'second': (1, 2),
Postage.EUROPE: (3, 5),
Postage.REST_OF_WORLD: (5, 7),
}[postage]
def get_earliest_and_latest_delivery(print_day, postage):
for days_to_transit in get_min_and_max_days_in_transit(postage):
yield get_delivery_day(print_day, days_to_deliver=1 + days_to_transit)
def get_letter_timings(upload_time, postage):
LetterTimings = namedtuple(
'LetterTimings',
'printed_by, is_printed, earliest_delivery, latest_delivery'
)
# shift anything after 5:30pm to the next day
processing_day = utc_string_to_aware_gmt_datetime(upload_time) + timedelta(hours=6, minutes=30)
print_day = get_next_dvla_working_day(processing_day)
earliest_delivery, latest_delivery = get_earliest_and_latest_delivery(print_day, postage)
# print deadline is 3pm BST
printed_by = set_gmt_hour(print_day, hour=15)
now = datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(pytz.timezone('Europe/London'))
return LetterTimings(
printed_by=printed_by,
is_printed=(now > printed_by),
earliest_delivery=set_gmt_hour(earliest_delivery, hour=16),
latest_delivery=set_gmt_hour(latest_delivery, hour=16),
)
def letter_can_be_cancelled(notification_status, notification_created_at):
'''
If letter does not have status of created or pending-virus-check
=> can't be cancelled (it has already been processed)
If it's after 5.30pm local time and the notification was created today before 5.30pm local time
=> can't be cancelled (it will already be zipped up to be sent)
'''
if notification_status not in ('created', 'pending-virus-check'):
return False
if _after_letter_processing_deadline() and _notification_created_before_today_deadline(notification_created_at):
return False
time_created_at = convert_utc_to_bst(notification_created_at)
day_created_on = time_created_at.date()
current_time = convert_utc_to_bst(datetime.utcnow())
current_day = current_time.date()
if _notification_created_before_that_day_deadline(notification_created_at) and day_created_on < current_day:
return False
if (current_day - day_created_on).days > 1:
return False
return True
def _after_letter_processing_deadline():
current_utc_datetime = datetime.utcnow()
bst_time = convert_utc_to_bst(current_utc_datetime).time()
return bst_time >= LETTER_PROCESSING_DEADLINE
def _notification_created_before_today_deadline(notification_created_at):
current_bst_datetime = convert_utc_to_bst(datetime.utcnow())
todays_deadline = current_bst_datetime.replace(
hour=LETTER_PROCESSING_DEADLINE.hour,
minute=LETTER_PROCESSING_DEADLINE.minute,
)
notification_created_at_in_bst = convert_utc_to_bst(notification_created_at)
return notification_created_at_in_bst <= todays_deadline
def _notification_created_before_that_day_deadline(notification_created_at):
notification_created_at_bst_datetime = convert_utc_to_bst(notification_created_at)
created_at_day_deadline = notification_created_at_bst_datetime.replace(
hour=LETTER_PROCESSING_DEADLINE.hour,
minute=LETTER_PROCESSING_DEADLINE.minute,
)
return notification_created_at_bst_datetime <= created_at_day_deadline
| 2.609375 | 3 |
AtC_Gra_Con_001-010/AGC001/B.py | yosho-18/AtCoder | 0 | 12765490 | n, x = map(int, input().split())
if (x > n // 2 and n % 2 == 0) or (x > (n + 1) // 2 and n % 2 == 1):
x = n - x
A = n - x
B = x
k = 0
m = -1
ans = n
while m != 0:
k = A // B
m = A % B
ans += B * k * 2
if m == 0:
ans -= B
A = B
B = m
print(ans) | 2.8125 | 3 |
simpleapi/message/formatter.py | ghuntley/simpleapi | 1 | 12765491 | # -*- coding: utf-8 -*-
import cPickle
from common import json
try:
import yaml
has_yaml = True
except ImportError:
has_yaml = False
from py2xml import PythonToXML
from sajson import SimpleAPIEncoder, SimpleAPIDecoder
__all__ = ('formatters', 'Formatter')
class FormattersSingleton(object):
"""This singleton takes care of all registered formatters. You can easily
register your own formatter for use in both the Namespace and python client.
"""
_formatters = {}
def __new__(cls):
it = cls.__dict__.get("__it__")
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
return it
def register(self, name, formatter, override=False):
"""Register the given formatter. If there's already an formatter with
the given `name`, you can override by setting `override` to ``True``.
"""
if not isinstance(formatter(None, None), Formatter):
raise TypeError(u"You can only register a Formatter not a %s" % formatter)
if name in self._formatters and not override:
raise AttributeError(u"%s is already a valid format type, try a new name" % name)
self._formatters[name] = formatter
def get_defaults(self):
result = filter(lambda item: getattr(item[1], '__active_by_default__', True),
self._formatters.items())
return dict(result).keys()
def copy(self):
return dict(**self._formatters)
def __contains__(self, value):
return value in self._formatters
def __getitem__(self, name):
return self._formatters.get(name)
def __setitem__(self, *args):
raise AttributeError
formatters = FormattersSingleton()
class Formatter(object):
"""Baseclass for Formatter-implementations"""
def __init__(self, sapi_request, callback):
"""A Formatter takes the original http request (Django's one) and a
callback name, e. g. for JSONP."""
self.sapi_request = sapi_request
self.callback = callback
def build(self, value):
"""Takes care of the building process and returns the encoded data."""
raise NotImplementedError
def kwargs(self, value, action='build'):
"""Is called within ``simpleapi``. This method invokes both the parse
and build function when needed."""
raise NotImplementedError
def parse(self, value):
"""Takes care of the parsing proccess and returns the decoded data."""
raise NotImplementedError
class JSONFormatter(Formatter):
"""Formatter for the JSON-format. Used by default by the python client and
by many Javascript-Frameworks."""
__mime__ = "application/json"
def build(self, value):
return json.dumps(value, cls=SimpleAPIEncoder)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return json.loads(value, cls=SimpleAPIDecoder)
class JSONPFormatter(Formatter):
"""Formatter for JSONP-format. Used for cross-domain requests. If `callback`
isn't provided, `simpleapiCallback` is used."""
__mime__ = "application/javascript"
def build(self, value):
func = self.callback or 'simpleapiCallback'
result = u'%(func)s(%(data)s)' % {'func': func.decode("utf-8"), 'data': json.dumps(value)}
return result.encode("utf-8")
def kwargs(self, value):
if action == 'build':
return json.dumps(value, cls=SimpleAPIEncoder)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return json.loads(value, cls=SimpleAPIDecoder)
class ValueFormatter(Formatter):
"""Basic formatter for simple, fast and tiny transports (it has a lot of
limitations, though)."""
__mime__ = "text/html"
def build(self, value):
return value
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return unicode(value)
class PickleFormatter(Formatter):
"""Formatter for use the cPickle python module which supports python object
serialization. It has the fewest limitations (ie. it can also serialize
datetime objects), but is a security risk and should only be used in a
trusted environment. It's strongly recommended that you use authentication
mechanismen to protect your namespace. The formatter is not activated by
default and can be enabled by putting 'pickle' into Namespace's ``__input__``
and ``__output__`` configuration. """
__mime__ = "application/octet-stream"
__active_by_default__ = False
def build(self, value):
return cPickle.dumps(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
if isinstance(value, unicode):
value = value.encode("utf-8")
return cPickle.loads(value)
class XMLFormatter(Formatter):
__mime__ = "text/xml"
def build(self, value):
return PythonToXML().build(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return PythonToXML().parse(value)
class YAMLFormatter(Formatter):
__mime__ = "application/x-yaml"
def build(self, value):
return yaml.safe_dump(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return yaml.safe_load(value)
formatters.register('json', JSONFormatter)
formatters.register('jsonp', JSONPFormatter)
formatters.register('value', ValueFormatter)
formatters.register('pickle', PickleFormatter)
formatters.register('xml', XMLFormatter)
if has_yaml:
formatters.register('yaml', YAMLFormatter)
| 2.515625 | 3 |
streammorphology/__init__.py | adrn/StreamMorphology | 0 | 12765492 | <reponame>adrn/StreamMorphology
import os
project_path = os.path.split(os.path.split(__file__)[0])[0]
del os
# Three orbits:
import numpy as np
from collections import OrderedDict
three_orbits = OrderedDict([
('near-resonant', np.array([17.0, 0.0, 26.060606060606062, 0.0, 0.12912205829404055, 0.0])), # resonant
('non-resonant', np.array([17.0, 0.0, 23.03030303030303, 0.0, 0.15198454276899373, 0.0])), # non-resonant
('weak-chaos', np.array([17.0, 0.0, 25.353535353535353, 0.0, 0.1346704105535305, 0.0])), # weak chaos
('strong-chaos', np.array([17.0, 0.0, 28.686868686868685, 0.0, 0.10691643457775891, 0.0])) # strong chaos
])
name_map = dict([
('near-resonant','A'),
('non-resonant', 'B'),
('weak-chaos', 'C'),
('strong-chaos', 'D')
])
from .experimentrunner import *
from .freqmap import Freqmap
from .lyapunov import Lyapmap
from .freqvar import FreqVariance
from .ensemble import Ensemble
from .ensemblefreqvar import EnsembleFreqVariance
| 1.835938 | 2 |
amrlib/alignments/faa_aligner/faa_aligner.py | plandes/amrlib | 103 | 12765493 | <filename>amrlib/alignments/faa_aligner/faa_aligner.py
import os
import sys
import json
import subprocess
import logging
import tarfile
from .preprocess import preprocess_infer
from .postprocess import postprocess
from .get_alignments import GetAlignments
from ..penman_utils import to_graph_line
from ...defaults import data_dir
logger = logging.getLogger(__name__)
this_dir = os.path.dirname(os.path.realpath(__file__))
class FAA_Aligner(object):
def __init__(self, **kwargs):
self.model_dir = kwargs.get('model_dir', os.path.join(data_dir, 'model_aligner_faa'))
self.model_tar_fn = kwargs.get('model_tar_fn', os.path.join(this_dir, 'model_aligner_faa.tar.gz'))
self.setup_model_dir()
self.aligner = TrainedAligner(self.model_dir, **kwargs)
try:
self.aligner.check_for_binaries() # Will raise FileNotFoundError if binaries can't be found
except FileNotFoundError:
logger.critical('No binaries for fast_algin (https://github.com/clab/fast_align) found. ' \
'These must be installed to use the faa_aligner. See the amrlib docs for details.')
raise
# Input space_tok_sents is a list of space tokenized strings
# graph_strings is a list and amr graph strings, the same size.
def align_sents(self, space_tok_sents, graph_strings):
assert len(space_tok_sents) == len(graph_strings)
graph_strings = [to_graph_line(g) for g in graph_strings]
data = preprocess_infer(space_tok_sents, graph_strings, skip_empty_check=True)
# Filter lines for empty strings. The aligner doesn't return a value for blanks on either eng or amr
skips, eng_lines, amr_lines = set(), [], []
for i, (eng_l, amr_l) in enumerate(zip(data.eng_preproc_lines, data.amr_preproc_lines)):
eng_l, amr_l = eng_l.strip(), amr_l.strip()
if not eng_l or not amr_l:
skips.add(i)
else:
eng_lines.append(eng_l)
amr_lines.append(amr_l)
model_out_lines = self.aligner.align(eng_lines, amr_lines)
assert len(model_out_lines) == len(eng_lines)
# Add back in blanks for skipped lines
final_astrings = [''] * len(data.eng_preproc_lines)
for i in range(len(final_astrings)):
if i not in skips:
final_astrings[i] = model_out_lines.pop(0)
data.model_out_lines = final_astrings
amr_surface_aligns, alignment_strings = postprocess(data)
return amr_surface_aligns, alignment_strings
# check the model directory, if it doesn't have the metadata file try to create
# the directory from the tar.gz file
def setup_model_dir(self):
# Check for the metadata and if so, consider the model ready to go
if os.path.isfile(os.path.join(self.model_dir, 'amrlib_meta.json')):
return True
# if there's a local copy, etract it
elif os.path.isfile(self.model_tar_fn):
tar = tarfile.open(self.model_tar_fn)
tar.extractall(path=data_dir)
logger.info('Extracting a local copy of model')
if os.path.isfile(os.path.join(self.model_dir, 'amrlib_meta.json')):
return True
else:
return False
else:
logger.critical('No model in model_dir and no local version available to extract')
return False
# Code adapted from from https://github.com/clab/fast_align/blob/master/src/force_align.py
class TrainedAligner:
def __init__(self, model_in_dir, **kwargs):
# If the bin_dir is not provided, get it from the environment, but default
# to '' which means it must be in the path
bin_dir = os.environ.get('FABIN_DIR', '')
bin_dir = kwargs.get('bin_dir', bin_dir)
self.fast_align = os.path.join(bin_dir, 'fast_align')
self.atools = os.path.join(bin_dir, 'atools')
fwd_params_fn = os.path.join(model_in_dir, 'fwd_params')
rev_params_fn = os.path.join(model_in_dir, 'rev_params')
# Get the parameters from the metadata
with open(os.path.join(model_in_dir, 'amrlib_meta.json')) as f:
meta = json.load(f)
p = meta['train_params']
# timeout the exe to exit
self.timeout = kwargs.get('timeout', 1.0)
# Create the actual commands to execute
fwd_cmd = '%s -i - -d -q %f -a %f -T %f -m %f -f %s' % \
(self.fast_align, p['q'], p['a'], p['fwd_T'], p['fwd_m'], fwd_params_fn)
rev_cmd = '%s -i - -d -q %f -a %f -T %f -m %f -f %s -r' % \
(self.fast_align, p['q'], p['a'], p['fwd_T'], p['fwd_m'], rev_params_fn)
tools_cmd = '%s -i - -j - -c %s' % (self.atools, p['heuristic'])
self.fwd_cmd = fwd_cmd.split()
self.rev_cmd = rev_cmd.split()
self.tools_cmd = tools_cmd.split()
# Open a connection to the subprocess in text mode
@staticmethod
def popen_io(cmd):
return subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
def align(self, eng_td_lines, amr_td_lines):
# Combine lines into fast align input format
lines = ['%s ||| %s' % (el, al) for el, al in zip(eng_td_lines, amr_td_lines)]
# Open connections to the alignment binaries
self.fwd_align = self.popen_io(self.fwd_cmd)
self.rev_align = self.popen_io(self.rev_cmd)
self.tools = self.popen_io(self.tools_cmd)
# Input to fast_align
fa_in = '\n'.join([l.strip() for l in lines])
fwd_out, fwd_err = self.fwd_align.communicate(fa_in, timeout=self.timeout)
rev_out, fwd_err = self.rev_align.communicate(fa_in, timeout=self.timeout)
# output is f words ||| e words ||| links ||| score
fwd_lines = [l.split('|||')[2].strip() for l in fwd_out.splitlines() if l]
rev_lines = [l.split('|||')[2].strip() for l in rev_out.splitlines() if l]
# Input to atools
# be sure to put a line-feed at the end or you'll get a duplicate line in the output
at_in = '\n'.join(['%s\n%s' % (fl, rl) for fl, rl in zip(fwd_lines, rev_lines)]) + '\n'
at_out, at_err = self.tools.communicate(at_in, timeout=self.timeout)
at_lines = [l.strip() for l in at_out.splitlines()]
return at_lines
# This will raise FileNotFoundError if either call fails
# Note that both commands trigger the help message and will produce a return-code of 1
# which is typically considered and error
def check_for_binaries(self):
ret_fa = subprocess.run(self.fast_align, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
ret_tool = subprocess.run(self.atools, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
| 2.21875 | 2 |
day6/part2/lantern_fish_256.py | kreako/advent-of-code-2021 | 0 | 12765494 | from collections import defaultdict
class Fishes(object):
def __init__(self, ages):
self.ages = defaultdict(int)
for age in ages:
self.ages[age] += 1
def next_generation(self):
new_born = self.ages[0]
for i in range(8):
self.ages[i] = self.ages[i + 1]
self.ages[6] += new_born
self.ages[8] = new_born
def count(self):
return sum(self.ages.values())
if __name__ == "__main__":
little = "3,4,3,1,2"
fishes = Fishes([int(a) for a in little.split(",")])
for _ in range(256):
fishes.next_generation()
print(fishes.count())
with open("../input", "r") as f:
fishes = Fishes([int(a) for a in f.read().split(",")])
for i in range(256):
fishes.next_generation()
print(fishes.count())
| 3.453125 | 3 |
examples/rkhs.py | gautam1858/autograd | 6,119 | 12765495 | <filename>examples/rkhs.py
"""
Inferring a function from a reproducing kernel Hilbert space (RKHS) by taking
gradients of eval with respect to the function-valued argument
"""
from __future__ import print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.extend import primitive, defvjp, defjvp, VSpace, Box
from autograd.util import func
from autograd import grad
class RKHSFun(object):
def __init__(self, kernel, alphas={}):
self.alphas = alphas
self.kernel = kernel
self.vs = RKHSFunVSpace(self)
@primitive
def __call__(self, x):
return sum([a * self.kernel(x, x_repr)
for x_repr, a in self.alphas.items()], 0.0)
def __add__(self, f): return self.vs.add(self, f)
def __mul__(self, a): return self.vs.scalar_mul(self, a)
# TODO: add vjp of __call__ wrt x (and show it in action)
defvjp(func(RKHSFun.__call__),
lambda ans, f, x: lambda g: RKHSFun(f.kernel, {x : 1}) * g)
class RKHSFunBox(Box, RKHSFun):
@property
def kernel(self): return self._value.kernel
RKHSFunBox.register(RKHSFun)
class RKHSFunVSpace(VSpace):
def __init__(self, value):
self.kernel = value.kernel
def zeros(self): return RKHSFun(self.kernel)
def randn(self):
# These arbitrary vectors are not analogous to randn in any meaningful way
N = npr.randint(1,3)
return RKHSFun(self.kernel, dict(zip(npr.randn(N), npr.randn(N))))
def _add(self, f, g):
assert f.kernel is g.kernel
return RKHSFun(f.kernel, add_dicts(f.alphas, g.alphas))
def _scalar_mul(self, f, a):
return RKHSFun(f.kernel, {x : a * a_cur for x, a_cur in f.alphas.items()})
def _inner_prod(self, f, g):
assert f.kernel is g.kernel
return sum([a1 * a2 * f.kernel(x1, x2)
for x1, a1 in f.alphas.items()
for x2, a2 in g.alphas.items()], 0.0)
RKHSFunVSpace.register(RKHSFun)
def add_dicts(d1, d2):
d = {}
for k, v in d1.items() + d2.items():
d[k] = d[k] + v if k in d else v
return d
if __name__=="__main__":
def sq_exp_kernel(x1, x2): return np.exp(-(x1-x2)**2)
xs = range(5)
ys = [1, 2, 3, 2, 1]
def logprob(f, xs, ys):
return -sum((f(x) - y)**2 for x, y in zip(xs, ys))
f = RKHSFun(sq_exp_kernel)
for i in range(100):
f = f + grad(logprob)(f, xs, ys) * 0.01
for x, y in zip(xs, ys):
print('{}\t{}\t{}'.format(x, y, f(x)))
| 3.046875 | 3 |
Scripts/core/native/animation/__init__.py | velocist/TS4CheatsInfo | 0 | 12765496 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\animation\__init__.py
# Compiled at: 2019-04-24 01:24:31
# Size of source mod 2**32: 13189 bytes
from _math import Vector3, Quaternion, Transform
from _resourceman import Key
import collections
from native.animation.arb import NativeArb, BoundaryConditionInfo
import api_config, sims4
logger = sims4.log.Logger('Animation(Native)')
try:
from _animation import AsmBase
from _animation import _ASM_ACTORTYPE_INVALID as ASM_ACTORTYPE_INVALID
from _animation import _ASM_ACTORTYPE_OBJECT as ASM_ACTORTYPE_OBJECT
from _animation import _ASM_ACTORTYPE_SIM as ASM_ACTORTYPE_SIM
from _animation import _ASM_ACTORTYPE_PROP as ASM_ACTORTYPE_PROP
from _animation import _ASM_REQUESTRESULT_SUCCESS as ASM_REQUESTRESULT_SUCCESS
from _animation import _ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND as ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
from _animation import _ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE as ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE
except:
ASM_REQUESTRESULT_SUCCESS = 0
ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND = 1
ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE = 2
class AsmBase:
def __init__(self, key):
pass
def _request(self, to_state, arb, request_id=0, interrupt=False):
return ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
def _traverse(self, from_state, to_state, arb, request_id=0, from_boundary_conditions=False):
return False
def _set_actor(self, actor_name, actor_id, suffix):
return False
def _clear_actor(self, actor_name):
return False
def _add_virtual_actor(self, actor_name, actor_id, suffix):
return False
def _remove_virtual_actor(self, actor_name, actor_id, suffix):
return False
def _set_parameter(self, parameter_name, value):
return False
def _set_actor_parameter(self, actor_name, actor_id, parameter_name, value):
return False
def _set_single_actor_parameter_if_possible(self, actor_name, parameter_name, value):
return False
def _add_actor_instance_namespace_override(self, actor_name, actor_id, actor_suffix, namespace, target_id, target_suffix):
return False
def _enter(self):
return False
def _exit(self, arb, request_id=0):
return ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
def _schedule_exit_content(self, arb):
pass
def _set_current_state(self, state_name):
return False
def _get_supported_postures_for_actor(self, actor_name):
return False
def _get_resource_key_for_actor(self, actor_name):
return False
def _get_props_in_traversal(self, from_state, to_state):
return False
def _get_actor_definition(self, actor_name):
pass
class NativeAsm(AsmBase):
_BASE_ROOT_STRING = 'b__subroot__'
class ActorDescription(collections.namedtuple('_ActorDescription', ('actor_name', 'actor_name_hash', 'actor_type',
'is_master', 'is_virtual', 'prop_resource_key'))):
slots = []
def set_actor(self, name, actor, rig_key=None, suffix=None):
if actor is not None:
return self._set_actor(name, actor.id, suffix)
return self._clear_actor(name)
def set_reaction_actor(self, name):
return self._set_reaction_actor(name)
def add_virtual_actor(self, name, actor, suffix=None):
return self._add_virtual_actor(name, actor.id, suffix)
def remove_virtual_actor(self, name, actor, suffix=None):
return self._remove_virtual_actor(name, actor.id, suffix)
def get_actor_name(self):
return '<unknown>'
def set_parameter(self, parameter, value):
return self._set_parameter(parameter, value)
def set_actor_parameter(self, actor, instance, parameter, value, suffix=None):
return self._set_actor_parameter(actor, instance.id, parameter, value, suffix)
def specialize_virtual_actor_relationship(self, actor_name, actor, actor_suffix, namespace, target, target_suffix):
return self._add_actor_instance_namespace_override(actor_name, actor.id, actor_suffix, namespace, target.id, target_suffix)
def request(self, state_name, arb_instance, request_id=0, interrupt=False):
return self._request(state_name, arb_instance, request_id, interrupt)
def traverse(self, from_state_name, to_state_name, arb_instance, request_id=0, from_boundary_conditions=False):
return self._traverse(from_state_name, to_state_name, arb_instance, request_id, from_boundary_conditions)
def set_current_state(self, state_name):
self._set_current_state(state_name)
def get_supported_postures_for_actor(self, actor_name):
postures_actor = self._get_supported_postures_for_actor(actor_name)
postures_default = self._get_supported_postures_for_actor(None)
if postures_default is not None:
if postures_actor is not None:
combined_postures = set(postures_actor)
combined_postures.update(postures_default)
return combined_postures
return postures_default
return postures_actor
def get_resource_key_for_actor(self, actor_name):
return self._get_resource_key_for_actor(actor_name)
def get_props_in_traversal(self, from_state, to_state):
return self._get_props_in_traversal(from_state, to_state)
def get_actor_definition(self, actor_name):
description_args = self._get_actor_definition(actor_name)
if not description_args:
return
return (self.ActorDescription)(*description_args)
def enter(self):
self._enter()
def exit(self, arb_instance, request_id=0):
return self._exit(arb_instance, request_id)
def schedule_exit_content(self, arb_instance):
return self._schedule_exit_content(arb_instance)
def set_param_sequence(self, param_dict):
if param_dict is not None:
for key, value in param_dict.items():
if isinstance(key, tuple):
param = key[0]
actor = key[1]
if actor is not None:
self._set_single_actor_parameter_if_possible(actor, param, value)
else:
self.set_parameter(param, value)
else:
self.set_parameter(key, value)
def get_initial_offset(self, actor, to_state_name, from_state_name='entry'):
arb = NativeArb()
self.traverse(from_state_name, to_state_name, arb, from_boundary_conditions=True)
offset = arb.get_initial_offset(actor)
return Transform(Vector3(*offset[0]), Quaternion(*offset[1]))
def get_boundary_conditions(self, actor, to_state_name, from_state_name='entry'):
arb = NativeArb()
self.traverse(from_state_name, to_state_name, arb, from_boundary_conditions=True)
return arb.get_boundary_conditions(actor)
Asm = NativeAsm
def get_joint_transform_from_rig(rig_key, joint_name):
import _animation
try:
transform = _animation.get_joint_transform_from_rig(rig_key, joint_name)
except Exception as exe:
try:
logger.error('Failed to get transform from rig: {}, {}'.format(rig_key, joint_name))
raise exe
finally:
exe = None
del exe
return transform
def get_joint_name_for_hash_from_rig(rig_key, joint_name_hash):
import _animation
return _animation.get_joint_name_for_hash_from_rig(rig_key, joint_name_hash)
def get_joint_name_for_index_from_rig(rig_key, joint_index):
import _animation
return _animation.get_joint_name_for_index_from_rig(rig_key, joint_index)
def get_mirrored_joint_name_hash(rig_key, joint_name):
import _animation
return _animation.get_mirrored_joint_name_hash(rig_key, joint_name)
def update_post_condition_arb(post_condition, content):
import _animation
return _animation.update_post_condition_arb(post_condition, content)
def enable_native_reaction_event_handling(enabled):
import _animation
return _animation.enable_native_reaction_event_handling(enabled) | 1.90625 | 2 |
adventofcode/2020/10/b.py | nevivurn/cp | 0 | 12765497 | #!/usr/bin/env python3
import sys
nums = list(sorted([0] + [int(line.rstrip()) for line in sys.stdin]))
nums.append(nums[-1] + 3)
dp = [1]
while len(dp) < len(nums):
cur = len(dp)
i = cur-1
cum = 0
while i >= 0 and nums[cur]-nums[i] <= 3:
cum += dp[i]
i -= 1
dp.append(cum)
print(dp[-1])
| 3.09375 | 3 |
usenester.py | ColorPenBoy/PythonClass | 0 | 12765498 | import colorpennester
print ("------------------我是分割线-------------------")
movies = ["The Holy Grail", 1975, "<NAME> & <NAME>", 91, ["<NAME>", ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]]]
# 调用函数
# 函数前面需要加上命名空间 -> 名字与包同名
colorpennester.printListMethod(movies,True,0)
| 3.3125 | 3 |
Chapter01/concurrent_image.py | ibiscum/Learning-Concurrency-in-Python | 0 | 12765499 | <filename>Chapter01/concurrent_image.py
""" Learning Concurrency in Python - Chapter 01 - concurrent image """
import threading
import urllib.request
import time
def download_image(image_path, file_name):
""" Download image. """
print("Downloading Image from ", image_path)
urllib.request.urlretrieve(image_path, file_name)
print("Completed Download")
def execute_thread(i):
""" Execute thread. """
image_name = "temp/image-" + str(i) + ".jpg"
download_image("http://lorempixel.com/400/200/sports", image_name)
def main():
""" Concurrent image download. """
t_0 = time.time()
# create an array which will store a reference to
# all of our threads
threads = []
# create 10 threads, append them to our array of threads
# and start them off
for i in range(10):
thread = threading.Thread(target=execute_thread, args=(i,))
threads.append(thread)
thread.start()
# ensure that all the threads in our array have completed
# their execution before we log the total time to complete
for i in threads:
i.join()
# calculate the total execution time
t_1 = time.time()
total_time = t_1 - t_0
print(f"Total Execution Time {total_time}")
if __name__ == '__main__':
main()
| 3.953125 | 4 |
phresh/backend/app/db/tasks.py | khayweee/docker-fastapi-postgres | 0 | 12765500 | <gh_stars>0
"""
To Establish database connection and handle
any additional configurations
"""
from fastapi import FastAPI
from databases import Database
from app.core.config import DATABASE_URL
import logging
import os
logger = logging.getLogger(__name__)
async def connect_to_db(app: FastAPI) -> None:
DB_URL = f"{DATABASE_URL}_test" if os.environ.get("TESTING") else DATABASE_URL
database = Database(DB_URL, min_size=2, max_size=10) # these can be configured in config as well
try:
await database.connect()
app.state._db = database
logger.info("Successfully loaded DB")
except Exception as e:
logger.warn("--- DB CONNECTION ERROR ---")
logger.warn(e)
logger.warn("--- DB CONNECTION ERROR ---")
async def close_db_connection(app: FastAPI) -> None:
try:
await app.state._db.disconnect()
except Exception as e:
logger.warn("--- DB DISCONNECT ERROR ---")
logger.warn(e)
logger.warn("--- DB DISCONNECT ERROR ---")
| 2.703125 | 3 |
hive/utils/loggers.py | chandar-lab/RLHive | 81 | 12765501 | import abc
import copy
import os
from typing import List
import torch
import wandb
from hive.utils.registry import Registrable, registry
from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule
from hive.utils.utils import Chomp, create_folder
class Logger(abc.ABC, Registrable):
"""Abstract class for logging in hive."""
def __init__(self, timescales=None):
"""Constructor for base Logger class. Every Logger must call this constructor
in its own constructor
Args:
timescales (str | list(str)): The different timescales at which logger
needs to log. If only logging at one timescale, it is acceptable to
only pass a string.
"""
if timescales is None:
self._timescales = []
elif isinstance(timescales, str):
self._timescales = [timescales]
elif isinstance(timescales, list):
self._timescales = timescales
else:
raise ValueError("Need string or list of strings for timescales")
def register_timescale(self, timescale):
"""Register a new timescale with the logger.
Args:
timescale (str): Timescale to register.
"""
self._timescales.append(timescale)
@abc.abstractmethod
def log_config(self, config):
"""Log the config.
Args:
config (dict): Config parameters.
"""
pass
@abc.abstractmethod
def log_scalar(self, name, value, prefix):
"""Log a scalar variable.
Args:
name (str): Name of the metric to be logged.
value (float): Value to be logged.
prefix (str): Prefix to append to metric name.
"""
pass
@abc.abstractmethod
def log_metrics(self, metrics, prefix):
"""Log a dictionary of values.
Args:
metrics (dict): Dictionary of metrics to be logged.
prefix (str): Prefix to append to metric name.
"""
pass
@abc.abstractmethod
def save(self, dir_name):
"""Saves the current state of the log files.
Args:
dir_name (str): Name of the directory to save the log files.
"""
pass
@abc.abstractmethod
def load(self, dir_name):
"""Loads the log files from given directory.
Args:
dir_name (str): Name of the directory to load the log file from.
"""
pass
@classmethod
def type_name(cls):
return "logger"
class ScheduledLogger(Logger):
"""Abstract class that manages a schedule for logging.
The update_step method should be called for each step in the loop to update
the logger's schedule. The should_log method can be used to check whether
the logger should log anything.
This schedule is not strictly enforced! It is still possible to log something
even if should_log returns false. These functions are just for the purpose
of convenience.
"""
def __init__(self, timescales=None, logger_schedules=None):
"""
Any timescales not assigned schedule from logger_schedules will be assigned
a ConstantSchedule(True).
Args:
timescales (str|list[str]): The different timescales at which logger needs
to log. If only logging at one timescale, it is acceptable to only pass
a string.
logger_schedules (Schedule|list|dict): Schedules used to keep track of when
to log. If a single schedule, it is copied for each timescale. If a
list of schedules, the schedules are matched up in order with the list
of timescales provided. If a dictionary, the keys should be the
timescale and the values should be the schedule.
"""
super().__init__(timescales)
if logger_schedules is None:
logger_schedules = ConstantSchedule(True)
if isinstance(logger_schedules, dict):
self._logger_schedules = logger_schedules
elif isinstance(logger_schedules, list):
self._logger_schedules = {
self._timescales[idx]: logger_schedules[idx]
for idx in range(min(len(logger_schedules), len(self._timescales)))
}
elif isinstance(logger_schedules, Schedule):
self._logger_schedules = {
timescale: copy.deepcopy(logger_schedules)
for timescale in self._timescales
}
else:
raise ValueError(
"logger_schedule must be a dict, list of Schedules, or Schedule object"
)
for timescale, schedule in self._logger_schedules.items():
if isinstance(schedule, dict):
self._logger_schedules[timescale] = get_schedule(
schedule["name"], schedule["kwargs"]
)
for timescale in self._timescales:
if timescale not in self._logger_schedules:
self._logger_schedules[timescale] = ConstantSchedule(True)
self._steps = {timescale: 0 for timescale in self._timescales}
def register_timescale(self, timescale, schedule=None):
"""Register a new timescale.
Args:
timescale (str): Timescale to register.
schedule (Schedule): Schedule to use for this timescale.
"""
super().register_timescale(timescale)
if schedule is None:
schedule = ConstantSchedule(True)
self._logger_schedules[timescale] = schedule
self._steps[timescale] = 0
def update_step(self, timescale):
"""Update the step and schedule for a given timescale.
Args:
timescale (str): A registered timescale.
"""
self._steps[timescale] += 1
self._logger_schedules[timescale].update()
return self.should_log(timescale)
def should_log(self, timescale):
"""Check if you should log for a given timescale.
Args:
timescale (str): A registered timescale.
"""
return self._logger_schedules[timescale].get_value()
def save(self, dir_name):
logger_state = Chomp()
logger_state.timescales = self._timescales
logger_state.schedules = self._logger_schedules
logger_state.steps = self._steps
logger_state.save(os.path.join(dir_name, "logger_state.p"))
def load(self, dir_name):
logger_state = Chomp()
logger_state.load(os.path.join(dir_name, "logger_state.p"))
self._timescales = logger_state.timescales
self._logger_schedules = logger_state.schedules
self._steps = logger_state.steps
class NullLogger(ScheduledLogger):
"""A null logger that does not log anything.
Used if you don't want to log anything, but still want to use parts of the
framework that ask for a logger.
"""
def __init__(self, timescales=None, logger_schedules=None):
super().__init__(timescales, logger_schedules)
def log_config(self, config):
pass
def log_scalar(self, name, value, timescale):
pass
def log_metrics(self, metrics, timescale):
pass
def save(self, dir_name):
pass
def load(self, dir_name):
pass
class WandbLogger(ScheduledLogger):
"""A Wandb logger.
This logger can be used to log to wandb. It assumes that wandb is configured
locally on your system. Multiple timescales/loggers can be implemented by
instantiating multiple loggers with different logger_names. These should still
have the same project and run names.
Check the wandb documentation for more details on the parameters.
"""
def __init__(
self,
timescales=None,
logger_schedules=None,
project=None,
name=None,
dir=None,
mode=None,
id=None,
resume=None,
start_method=None,
**kwargs,
):
"""
Args:
timescales (str|list[str]): The different timescales at which logger needs
to log. If only logging at one timescale, it is acceptable to only pass
a string.
logger_schedules (Schedule|list|dict): Schedules used to keep track of when
to log. If a single schedule, it is copied for each timescale. If a
list of schedules, the schedules are matched up in order with the list
of timescales provided. If a dictionary, the keys should be the
timescale and the values should be the schedule.
project (str): Name of the project. Wandb's dash groups all runs with
the same project name together.
name (str): Name of the run. Used to identify the run on the wandb
dash.
dir (str): Local directory where wandb saves logs.
mode (str): The mode of logging. Can be "online", "offline" or "disabled".
In offline mode, writes all data to disk for later syncing to a server,
while in disabled mode, it makes all calls to wandb api's noop's, while
maintaining core functionality.
id (str, optional): A unique ID for this run, used for resuming.
It must be unique in the project, and if you delete a run you can't
reuse the ID.
resume (bool, str, optional): Sets the resuming behavior.
Options are the same as mentioned in Wandb's doc.
start_method (str): The start method to use for wandb's process. See
https://docs.wandb.ai/guides/track/launch#init-start-error.
**kwargs: You can pass any other arguments to wandb's init method as
keyword arguments. Note, these arguments can't be overriden from the
command line.
"""
super().__init__(timescales, logger_schedules)
settings = None
if start_method is not None:
settings = wandb.Settings(start_method=start_method)
wandb.init(
project=project,
name=name,
dir=dir,
mode=mode,
id=id,
resume=resume,
settings=settings,
**kwargs,
)
def log_config(self, config):
# Convert list parameters to nested dictionary
for k, v in config.items():
if isinstance(v, list):
config[k] = {}
for idx, param in enumerate(v):
config[k][idx] = param
wandb.config.update(config)
def log_scalar(self, name, value, prefix):
metrics = {f"{prefix}/{name}": value}
metrics.update(
{
f"{timescale}_step": self._steps[timescale]
for timescale in self._timescales
}
)
wandb.log(metrics)
def log_metrics(self, metrics, prefix):
metrics = {f"{prefix}/{name}": value for (name, value) in metrics.items()}
metrics.update(
{
f"{timescale}_step": self._steps[timescale]
for timescale in self._timescales
}
)
wandb.log(metrics)
class ChompLogger(ScheduledLogger):
"""This logger uses the Chomp data structure to store all logged values which are
then directly saved to disk.
"""
def __init__(self, timescales=None, logger_schedules=None):
super().__init__(timescales, logger_schedules)
self._log_data = Chomp()
def log_config(self, config):
self._log_data["config"] = config
def log_scalar(self, name, value, prefix):
metric_name = f"{prefix}/{name}"
if metric_name not in self._log_data:
self._log_data[metric_name] = [[], []]
if isinstance(value, torch.Tensor):
self._log_data[metric_name][0].append(value.item())
else:
self._log_data[metric_name][0].append(value)
self._log_data[metric_name][1].append(
{timescale: self._steps[timescale] for timescale in self._timescales}
)
def log_metrics(self, metrics, prefix):
for name in metrics:
metric_name = f"{prefix}/{name}"
if metric_name not in self._log_data:
self._log_data[metric_name] = [[], []]
if isinstance(metrics[name], torch.Tensor):
self._log_data[metric_name][0].append(metrics[name].item())
else:
self._log_data[metric_name][0].append(metrics[name])
self._log_data[metric_name][1].append(
{timescale: self._steps[timescale] for timescale in self._timescales}
)
def save(self, dir_name):
super().save(dir_name)
self._log_data.save(os.path.join(dir_name, "log_data.p"))
def load(self, dir_name):
super().load(dir_name)
self._log_data.load(os.path.join(dir_name, "log_data.p"))
class CompositeLogger(Logger):
"""This Logger aggregates multiple loggers together.
This logger is for convenience and allows for logging using multiple loggers without
having to keep track of several loggers. When timescales are updated, this logger
updates the timescale for each one of its component loggers. When logging, logs to
each of its component loggers as long as the logger is not a ScheduledLogger that
should not be logging for the timescale.
"""
def __init__(self, logger_list: List[Logger]):
super().__init__([])
self._logger_list = logger_list
def register_timescale(self, timescale, schedule=None):
for logger in self._logger_list:
if isinstance(logger, ScheduledLogger):
logger.register_timescale(timescale, schedule)
else:
logger.register_timescale(timescale)
def log_config(self, config):
for logger in self._logger_list:
logger.log_config(config)
def log_scalar(self, name, value, prefix):
for logger in self._logger_list:
logger.log_scalar(name, value, prefix)
def log_metrics(self, metrics, prefix):
for logger in self._logger_list:
logger.log_metrics(metrics, prefix=prefix)
def update_step(self, timescale):
"""Update the step and schedule for a given timescale for every
ScheduledLogger.
Args:
timescale (str): A registered timescale.
"""
for logger in self._logger_list:
if isinstance(logger, ScheduledLogger):
logger.update_step(timescale)
return self.should_log(timescale)
def should_log(self, timescale):
"""Check if you should log for a given timescale. If any logger in the list
is scheduled to log, returns True.
Args:
timescale (str): A registered timescale.
"""
for logger in self._logger_list:
if not isinstance(logger, ScheduledLogger) or logger.should_log(timescale):
return True
return False
def save(self, dir_name):
for idx, logger in enumerate(self._logger_list):
save_dir = os.path.join(dir_name, f"logger_{idx}")
create_folder(save_dir)
logger.save(save_dir)
def load(self, dir_name):
for idx, logger in enumerate(self._logger_list):
load_dir = os.path.join(dir_name, f"logger_{idx}")
logger.load(load_dir)
registry.register_all(
Logger,
{
"NullLogger": NullLogger,
"WandbLogger": WandbLogger,
"ChompLogger": ChompLogger,
"CompositeLogger": CompositeLogger,
},
)
get_logger = getattr(registry, f"get_{Logger.type_name()}")
| 2.625 | 3 |
roomfinder_router/roomfinder_router/router.py | GuillaumeMorini/roomfinder | 14 | 12765502 | #!/usr/bin/env python2.7
import pika, os, sys, json, requests, datetime
import base64, urllib, unicodedata, re
import os, json, requests, sys, re, base64
try:
from BeautifulSoup import BeautifulSoup
from HTMLParser import HTMLParser
except ImportError:
from bs4 import BeautifulSoup
from html.parser import HTMLParser
def guest(firstName,lastName,email):
base_url="https://internet.cisco.com"
uri1=":8443/sponsorportal/LoginCheck.action"
uri2=":8443/sponsorportal/guest_accounts/AddGuestAccount.action"
user=dir_user
pwd=dir_<PASSWORD>
s = requests.Session()
r=s.get(base_url)
# print(r.text)
parsed_html = BeautifulSoup(r.text)
token=parsed_html.body.find('input', attrs={'id':'FORM_SECURITY_TOKEN'}).get("value")
print "Token: "+str(token)
# token="<PASSWORD>"
try:
response1 = s.post(
url=base_url+uri1,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30",
"Referer": "https://internet.cisco.com:8443/sponsorportal/Logout.action",
"Origin": "https://internet.cisco.com:8443",
# "Cookie": "JSESSIONID=B640D7B33CCDFAE3E1FEF98048303FB9",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
},
data={
"sponsorUser.loginUsername": user,
"FORM_SECURITY_TOKEN": token,
"L_T": "",
"sponsorUser.password": <PASSWORD>,
},
)
date=datetime.datetime.now().strftime("%m/%d/%Y")
startTime=datetime.datetime.now().strftime("T%I:%M:%S")
try:
response2 = s.post(
url=base_url+uri2,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30",
"Referer": "https://internet.cisco.com:8443/sponsorportal/guest_accounts/GetAddAccountPage.action",
"Origin": "https://internet.cisco.com:8443",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
},
data={
"guestUser.groupRole.id": "2f15d2c0-be86-11e1-ba69-0050568e002b",
"guestUser.endDate": date,
"guestUser.startTime": startTime,
"serverDate": date,
"serverTime": datetime.datetime.utcnow().strftime("%I:%M"),
"guestUser.endLimit": "7",
"guestUser.timezone": "GMT++02:00+Europe/Paris",
"FORM_SECURITY_TOKEN": token,
"guestUser.timeProfileDuration": "86400000",
"guestUser.notifyByEmail": "true",
"guestUser.endTime": startTime,
"guestUser.startLimit": "30",
"guestUser.timeProfile": "Valid_for_1_Day_after_initial_login",
"guestUser.languageNotification": "English",
"guestUser.firstName": firstName,
"guestUser.timeProfileType": "FromFirstLogin",
"guestUser.company": "Roomfinder",
"emailMandatory": "true",
"guestUser.startDate": date,
"guestUser.emailAddress": email,
"guestUser.lastName": lastName,
"nameToUse": "",
},
)
return "Guest account created"
except requests.exceptions.RequestException:
print('HTTP Request failed')
except requests.exceptions.RequestException:
print('HTTP Request failed')
return "Error during guest account creation"
def find_dir(cco):
# Timeout for requests get and post
TIMEOUT=1
headers = {}
headers["Content-type"] = "application/json; charset=utf-8"
dir_server="https://collab-tme.cisco.com/ldap/?have=cn&want=cn,description,manager,telephonenumber,mobile,title,co,ciscoitbuilding,postofficebox&values="
dir_server_2="https://collab-tme.cisco.com/ldap/?have=description&want=cn,description,manager,telephonenumber,mobile,title,co,ciscoitbuilding,postofficebox&values="
dir_detail_server="http://wwwin-tools.cisco.com/dir/reports/"
pic_server="http://wwwin.cisco.com/dir/photo/zoom/"
index=cco
cco=cco.encode('utf-8')
sys.stderr.write('cco: '+str(cco)+'\n')
u = dir_server + urllib.pathname2url('*'+cco+'*')
try:
r=requests.get(u, timeout=TIMEOUT)
except requests.exceptions.RequestException as e:
sys.stderr.write('Timeout looking for exact CCO. Exception: '+str(e)+'\n')
return 'Timeout looking for exact CCO\n'
reply=r.json()
if 'responseCode' not in reply :
sys.stderr.write('no responseCode looking for exact CCO\n')
return 'no responseCode looking for exact CCO\n'
if reply["responseCode"] != 0:
sys.stderr.write("Exact CCO not found !\n")
u = dir_server_2 + urllib.pathname2url('*'+cco+'*')
try:
r=requests.get(u, timeout=TIMEOUT)
except requests.exceptions.RequestException as e:
sys.stderr.write('Timeout looking for name. Exception: '+str(e)+'\n')
return 'Timeout looking for name\n'
reply=r.json()
if 'responseCode' not in reply or reply["responseCode"] != 0:
if reply["responseCode"] == 2:
sys.stderr.write('Nobody found with this name or CCO in the directory\n')
return 'Nobody found with this name or CCO in the directory\n'
else:
sys.stderr.write('Connection error to the directory\n')
return 'Connection error to the directory\n'
else:
for r in reply["results"]:
index=r
reply["results"][r]["description"]=index
else:
sys.stderr.write("Exact CCO found !\n")
for r in reply["results"]:
reply["results"][r]["cn"]=r
if "results" not in reply :
sys.stderr.write('no results\n')
return 'no results\n'
l=reply["results"]
sys.stderr.write(str(l)+"\n")
if cco not in l:
if len(l) > 1:
sys.stderr.write("List of CCO found !\n")
sys.stderr.write("reply: "+str(l)+"\n")
txt="Are you looking for one of these people:"
for e in l:
cco=l[e]["cn"]
name=l[e]["description"]
txt+="\n * "+str(name.encode('utf-8'))+" ("+str(cco)+")"
return txt
else:
sys.stderr.write("One person found !\n")
index=list(l.keys())[0]
cco=l[index]["cn"]
#reply["results"][index]["description"]=index
else:
index=cco
sys.stderr.write("cco2: "+str(cco)+"\n")
sys.stderr.write("index: "+str(index.encode('utf-8'))+"\n")
# Add picture URL from picture server
reply["results"][index]["pic"]=pic_server+cco+".jpg"
response = requests.get(reply["results"][index]["pic"], stream=True)
encoded_string = base64.b64encode(response.raw.read())
# Add Link to directory
reply["results"][index]["link"]=dir_detail_server+cco
# Add manager name
if "manager" in reply["results"][index] and reply["results"][index]["manager"] is not None:
sys.stderr.write("Manager found\n")
manager_cco=reply["results"][index]["manager"].split(',')[0].split('=')[1]
u = dir_server + manager_cco
try:
r=requests.get(u, timeout=TIMEOUT)
except requests.exceptions.RequestException:
sys.stderr.write('Timeout looking for manager name\n')
return 'Timeout looking for manager name\n'
reply2=r.json()
if 'responseCode' not in reply2 or reply2["responseCode"] != 0 or "results" not in reply2 :
sys.stderr.write('no responseCode or results looking for manager name\n')
return 'no responseCode or results looking for manager name\n'
sys.stderr.write("Manager: "+str(reply2["results"])+"\n")
reply["results"][index]["manager_name"]=reply2["results"][manager_cco]["description"]+" ("+manager_cco+")"
else:
sys.stderr.write("Manager not found\n")
# No manager
pass
# Add building
location=reply["results"][index]["postofficebox"]
if location is None:
sys.stderr.write("Location not found\n")
reply["results"][index]["building"]="None"
else:
sys.stderr.write("Location found\n")
end = location.find('/')
if end > 0 :
reply["results"][index]["building"]=location[0:end]
else:
reply["results"][index]["building"]="None"
sys.stderr.write("reply: "+str(reply["results"])+"\n")
text = ""
if reply["results"][index]["description"] != None:
text+=reply["results"][index]["description"]+"<br>;"
else:
text+=";"
if reply["results"][index]["title"] != None:
text+=reply["results"][index]["title"]+"<br>;"
else:
text+=";"
if reply["results"][index]["manager"] != None and "manager_name" in reply["results"][index]:
text+=reply["results"][index]["manager_name"]+"<br>;"
else:
text+=";"
phone_text=""
if reply["results"][index]["telephonenumber"] != None:
phone_text+="<b>Work</b>: "+reply["results"][index]["telephonenumber"]+"<br>"
if reply["results"][index]["mobile"] != None:
phone_text+="<b>Mobile</b>: "+reply["results"][index]["mobile"]+"<br>"
text+=phone_text+";"+encoded_string+";"+"<b>Internal directory</b>: <a href=\"http://wwwin-tools.cisco.com/dir/details/"+reply["results"][index]["cn"]+"\">link</a>"
return text.encode('utf-8')
def building(name):
name=name.upper()
sys.stderr.write("Looking for building with name: "+name+"\n")
url="http://wwwin.cisco.com/c/dam/cec/organizations/gbs/wpr/serverBuildingOnlineDetail.txt"
try:
s = requests.Session()
r=s.get(url)
#sys.stderr.write(str(r.text.encode('utf-8'))+"\n")
headers={'Content-type': 'application/x-www-form-urlencoded'}
data="userid="+dir_user+"&password="+<PASSWORD>_<PASSWORD>+"&target=&login-button=Log+In&login-button-login=Next&login-button-login=Log+in"
response=s.post(sso_url,data,headers)
if response.status_code==200:
if response.text:
buildings=json.loads(response.text)
found=[]
for theater,t in buildings.iteritems():
#print theater, ":", t["theaterName"]
for region in t["regions"]:
#print "\t",region["regionCode"], ":", region["regionName"]
for country in region["countries"]:
#print "\t\t",country["countryName"]
for state in country["states"]:
#print "\t\t\t",state["stateName"]
for city in state["cities"]:
#print "\t\t\t\t",city["cityName"]
for campus in city["campuses"]:
#print "\t\t\t\t\t",campus["campusName"]
for building in campus["buildings"]:
import re
if re.match('[a-zA-Z][a-zA-Z0-9]', building["buildingId"]):
#print "\t\t\t\t\t\t",building["buildingId"],building["buildingName"]
if (
country["countryName"].find(name) >= 0 or
state["stateName"].find(name) >= 0 or
city["cityName"].find(name) >= 0 or
campus["campusName"].find(name) >= 0 or
building["buildingId"].find(name) >= 0 or
building["buildingName"].find(name) >= 0
) :
sys.stderr.write("Found "+name+" in one of "+country["countryName"]+" "+state["stateName"]+" "+city["cityName"]+" "+campus["campusName"]+" "+building["buildingName"]+"\n")
found.append({"id": building["buildingId"],"name": building["buildingName"]})
if len(found)>15:
return "Sorry too much building ! Please refine your request !"
elif len(found)==0:
return "Sorry not found !"
else:
txt="Are you looking for one of these buildings:"
for e in found:
id=e["id"]
name=e["name"]
txt+="\n * "+str(id.encode('utf-8'))+" ("+str(name.encode('utf-8'))+")"
return txt
return "Connection error to building server"
except Exception as e:
return "Connection error to building server"
def map(floor):
#http://wwwin.cisco.com/c/dam/cec/organizations/gbs/wpr/FloorPlans/ILM-AFP-5.pdf
s=floor.split('-')
if len(s)!=2:
return "Not Found"
else:
#return "http://wwwin.cisco.com/c/dam/cec/organizations/gbs/wpr/FloorPlans/"+s[0].replace(' ','')+"-AFP-"+s[1]+".pdf"
url="http://wwwin.cisco.com/c/dam/cec/organizations/gbs/wpr/FloorPlans/"+s[0].replace(' ','')+"-AFP-"+s[1]+".pdf"
sys.stderr.write("Getting map on url: "+url+"\n")
try:
s = requests.Session()
r=s.get(url)
#sys.stderr.write(str(r.text.encode('utf-8'))+"\n")
headers={'Content-type': 'application/x-www-form-urlencoded'}
data="userid="+dir_user+"&password="+<PASSWORD>+"&target=&login-button=Log+In&login-button-login=Next&login-button-login=Log+in"
response=s.post(sso_url,data,headers, stream=True)
if response.headers["Content-Type"] == "application/pdf":
sys.stderr.write("Content is a pdf file\n")
encoded_string = base64.b64encode(response.raw.read())
sys.stderr.write("Encoded string:\n"+str(encoded_string)+"\n")
return json.dumps({"text": "Here is the map !\n", "pdf": encoded_string })
else:
return "Connection error to map server"
except requests.exceptions.ConnectionError:
return "Connection error to map server"
def on_request(ch, method, props, body):
sys.stderr.write(" [x] Received %r\n" % body)
#sys.stderr.write("Method: {}\n".format(method))
#sys.stderr.write("Properties: {}\n".format(properties))
data = json.loads(body)
cmd=data['cmd']
request_data=data["data"]
sys.stderr.write("Command: {}\n".format(cmd))
sys.stderr.write("Data: {}\n".format(request_data))
if cmd == "book":
sys.stderr.write("Request booking of a room to %s\n" % book_server)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
page = requests.post(book_server+'/book',data = json.dumps(request_data),headers=headers)
txt=page.text
sys.stderr.write("txt: {}\n".format(txt.encode('utf-8')))
elif cmd == "dir":
cco= request_data["cco"]
sys.stderr.write("Request directory entry in %s for %s\n" % (str(dir_server.encode('utf-8')), str(cco.encode('utf-8'))))
print "dir_server: "+dir_server
txt=find_dir(cco).decode('utf-8')
sys.stderr.write("txt: {}\n".format(txt.encode('utf-8')))
elif cmd == "map":
floor = request_data["floor"]
sys.stderr.write("Request map for %s\n" % str(floor.encode('utf-8')) )
txt=map(floor).encode('utf-8')
#sys.stderr.write("txt: {}\n".format(txt))
elif cmd == "building":
b = request_data["building"]
sys.stderr.write("Request building lookup for %s\n" % str(b.encode('utf-8')) )
txt=building(b) #.encode('utf-8')
sys.stderr.write("txt: {}\n".format(txt))
elif cmd == "sr":
pass
elif cmd == "dispo":
sys.stderr.write("Request dispo of a room to %s\n" % book_server)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
page = requests.post(book_server+'/dispo',data = json.dumps(request_data),headers=headers)
txt=page.text.encode('utf-8')
sys.stderr.write("txt: {}\n".format(txt))
elif cmd == "where":
sys.stderr.write("Request where is a room to %s\n" % book_server)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
page = requests.post(book_server+'/where',data = json.dumps(request_data),headers=headers)
txt=page.text.encode('utf-8')
sys.stderr.write("txt: {}\n".format(txt))
elif cmd == "guest":
sys.stderr.write("Request for a guest account creation\n")
firstName= request_data["firstName"]
lastName= request_data["lastName"]
email= request_data["email"]
sys.stderr.write("Request guest account for %s %s <%s>\n" % (firstName.encode('utf-8'), lastName.encode('utf-8'), email.encode('utf-8')) )
txt=guest(firstName,lastName,email).encode('utf-8')
sys.stderr.write("txt: {}\n".format(txt))
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=txt)
ch.basic_ack(delivery_tag = method.delivery_tag)
return txt
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser("Room Finder Router Service")
parser.add_argument("-r","--rabbitmq", help="IP or hostname for rabbitmq server, e.g. 'rabbit.domain.com'.")
parser.add_argument("-p","--port", help="tcp port for rabitmq server, e.g. '2765'.")
parser.add_argument("-b","--book", help="URL for roomfinder book server, e.g. 'http://book.domain.com:1234'.")
parser.add_argument("-d", "--dir", help="Address of directory server", required=False)
parser.add_argument("-e", "--dir-detail", help="Address of detailed directory server", required=False)
parser.add_argument(
"-i", "--photo", help="Address of photo directory server", required=False
)
parser.add_argument("-u","--user", help="URL for user of directory server.")
parser.add_argument("-k","--password", help="URL for password of directory server.")
parser.add_argument("-s","--sso", help="URL for SSO.")
# parser.add_argument("-p","--password", help="password for exchange server.")
args = parser.parse_args()
rabbitmq = args.rabbitmq
if (rabbitmq == None):
rabbitmq = os.getenv("roomfinder_rabbitmq_server")
if (rabbitmq == None):
get_rabbitmq_server = raw_input("What is the rabbitmq server IP or hostname? ")
rabbitmq = get_rabbitmq_server
rabbitmq_port = args.port
if (rabbitmq_port == None):
rabbitmq_port = os.getenv("roomfinder_rabbitmq_port")
if (rabbitmq_port == None):
get_rabbitmq_port = raw_input("What is the rabbitmq TCP port? ")
rabbitmq_port = get_rabbitmq_port
book_server = args.book
if (book_server == None):
book_server = os.getenv("roomfinder_book_server")
if (book_server == None):
get_book_server = raw_input("What is the book server URL? ")
book_server = get_book_server
dir_server = args.dir
# print "Arg Dir: " + str(dir_server)
if (dir_server == None):
dir_server = os.getenv("roomfinder_dir_server")
# print "Env Dir: " + str(dir_server)
# print "Dir Server: " + dir_server
sys.stderr.write("Directory Server: " + str(dir_server) + "\n")
dir_detail_server = args.dir_detail
if (dir_detail_server == None):
dir_detail_server = os.getenv("roomfinder_dir_detail_server")
dir_user = args.user
if (dir_user == None):
dir_user = os.getenv("roomfinder_dir_user")
sys.stderr.write("Directory User: " + str(dir_user) + "\n")
dir_pass = args.password
if (dir_pass == None):
dir_pass = os.getenv("roomfinder_dir_pass")
sys.stderr.write("Directory Password " + str(dir_pass) + "\n")
sso_url = args.sso
if (sso_url == None):
sso_url = os.getenv("roomfinder_sso_url")
sys.stderr.write("SSO URL: " + str(sso_url) + "\n")
sys.stderr.write("Connecting to "+rabbitmq+" on port "+rabbitmq_port+"\n")
connection = pika.BlockingConnection(pika.ConnectionParameters(
host="localhost", port=5672 ))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
sys.stderr.write(' [*] Waiting for messages. To exit press CTRL+C\n')
channel.start_consuming()
| 2.703125 | 3 |
utilize.py | YangLi1221/CoRA | 9 | 12765503 | import tensorflow as tf
from functools import reduce
from operator import mul
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
"""
with tf.name_scope(name or "dropout"):
if is_train is None:
if keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
else:
if keep_prob < 1.0:
out = tf.cond(
is_train,
lambda: tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed),
lambda: x
)
return out
"""
with tf.name_scope(name or "dropout"):
if is_train is None:
if keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
else:
if is_train and keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
return x
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def selu(x):
with tf.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
def gelu(x): # read
# return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3))))
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))
return x * cdf
def swish(x):
return x*tf.nn.sigmoid(x)
def activation_name_to_func(activation_name):
assert isinstance(activation_name, str)
if isinstance(activation_name, str):
if activation_name == 'linear':
act_fn = tf.identity
elif activation_name == 'relu':
act_fn = tf.nn.relu
elif activation_name == 'elu':
act_fn = tf.nn.elu
elif activation_name == 'selu':
act_fn = selu
elif activation_name == 'sigmoid':
act_fn = tf.nn.sigmoid
elif activation_name == 'tanh':
act_fn = tf.nn.tanh
elif activation_name == 'exp':
act_fn = tf.exp
elif activation_name == 'log':
act_fn = tf.log
elif activation_name == 'gelu':
act_fn = gelu
elif activation_name == 'swish':
act_fn = swish
elif activation_name == 'lrelu':
act_fn = tf.nn.leaky_relu
else:
raise AttributeError('no activation function named as %s' % activation_name)
elif hasattr(activation_name, '__call__'): # callable
act_fn = activation_name
else:
raise AttributeError
return act_fn
def act_name2fn(afn):
return activation_name_to_func(afn)
def bn_dense_layer_v2(
input_tensor, hn, bias, bias_start=0.0, scope=None,
activation='relu', enable_bn=False,
wd=0., keep_prob=1.0, is_train=None, dup_num=1, merge_var=False
):
act_fn = act_name2fn(activation)
with tf.variable_scope(scope or 'bn_dense_layer'):
input_tensor = dropout(input_tensor, keep_prob, is_train)
# the comment use a 3d tensor [bs,sl,hn] as a example
input_shape = get_shape_list(input_tensor) # [3]
assert len(input_shape) >= 2 # at least [bs,hn]
# merge
dims_merge = input_shape[:-1] # [all unrelated dims]
new_dim = reduce(mul, dims_merge) # get the merged dim
new_shape = [new_dim, input_shape[-1]] # new shape for matmul [2]
input_tensor_rsp = tf.reshape(input_tensor, new_shape) # [xx,dim]
# dense layer
input_dim = new_shape[-1]
if merge_var:
weight = tf.get_variable('W', shape=[input_dim, hn * dup_num], dtype=tf.float32)
else:
weight_list = []
for i in range(dup_num):
weight_list.append(tf.get_variable('W_%d' % i, shape=[input_dim, hn]))
weight = tf.concat(weight_list, -1)
output_rsp = tf.matmul(input_tensor_rsp, weight)
if bias:
if merge_var or dup_num == 1:
bias_val = tf.get_variable(
'bias', shape=[hn * dup_num], dtype=tf.float32,
initializer=tf.constant_initializer(bias_start)
)
else:
bias_list = []
for i in range(dup_num):
bias_list.append(
tf.get_variable(
'bias_%d' % i, shape=[hn], dtype=tf.float32,
initializer=tf.constant_initializer(bias_start))
)
bias_val = tf.concat(bias_list, -1)
output_rsp += bias_val
# output reshape
output_shape = dims_merge + [hn * dup_num] # [3] for [bs,sl,new_hn]
output = tf.reshape(output_rsp, output_shape) # [bs,sl,new_hn]
if enable_bn:
output = tf.contrib.layers.batch_norm(
output, center=True, scale=True, is_training=is_train,
updates_collections=None, decay=0.9,
scope='bn')
if wd:
tf.add_to_collection('reg_vars', weight)
return act_fn(output) | 2.984375 | 3 |
caesar.py | milescarberry/simple_caesar_encryption_python | 0 | 12765504 | <filename>caesar.py
import string
alphabets_lower = string.ascii_lowercase
alphabets_upper = string.ascii_uppercase
plain_text = "hkpafh uhtiphy"
shift = len(alphabets_lower) - 7 # len(alphabets_lower) == 26
shift = shift % len(alphabets_lower)
alphabets_lower_shifted = alphabets_lower[shift::1] + alphabets_lower[:shift:1]
# Now, let's create a "translation table".
translation_table = str.maketrans(alphabets_lower, alphabets_lower_shifted)
# What str.maketrans() function does that it places the two passed in string objects one below the other and then maps each of the individual characters.
encrypted_text = plain_text.translate(translation_table)
print()
print(encrypted_text)
# alphabet_set "ABCDEFG"
# alphabet_set_shift_two = "CDEFGAB"
# shift = shift % 26 ==> We are actually moving around in circles inside the alphabet string. The number of characters in the alphabet string will always remain constant at 26, no matter how many shifts are performed. Therefore, we calculate the remainder of the division if the user enters a "shift" number greater than the length of the alphabet string.
| 4.53125 | 5 |
xpxchain/models/mosaic/mosaic_id.py | Sharmelen/python-xpx-chain-sdk | 1 | 12765505 | """
mosaic_id
=========
Identifier for an asset.
License
-------
Copyright 2019 NEM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from .mosaic_nonce import MosaicNonce
from ..account.public_account import PublicAccount
from ... import util
__all__ = ['MosaicId']
@util.inherit_doc
@util.dataclass(frozen=True, id=0)
class MosaicId(util.IntMixin, util.Object):
"""
Mosaic identifier.
:param id: Raw identifier for mosaic.
"""
id: int
def __int__(self) -> int:
return self.id
def get_id(self) -> str:
return util.hexlify(self.id.to_bytes(8, 'big'))
@classmethod
def create_from_nonce(
cls,
nonce: MosaicNonce,
owner: PublicAccount,
):
"""
Create mosaic ID from nonce and owner.
:param nonce: Mosaic nonce.
:param owner: Account of mosaic owner.
"""
key = util.unhexlify(owner.public_key)
return cls(util.generate_mosaic_id(nonce.nonce, key))
| 2.1875 | 2 |
mmdet/models/anchor_heads/__init__.py | ziming-liu/ObjectDet | 0 | 12765506 | from .anchor_head import AnchorHead
from .fcos_head import FCOSHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .retina_head import RetinaHead
from .rpn_head import RPNHead
from .ssd_head import SSDHead
from .desh_head import DeshHead
__all__ = [
'AnchorHead', 'GuidedAnchorHead','DeshHead', 'FeatureAdaption', 'RPNHead',
'GARPNHead', 'RetinaHead', 'GARetinaHead', 'SSDHead', 'FCOSHead'
]
| 1.09375 | 1 |
tests/core/test_rtree.py | apatlpo/pangeo-pyinterp | 0 | 12765507 | <reponame>apatlpo/pangeo-pyinterp<gh_stars>0
# Copyright (c) 2019 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import os
import pickle
import unittest
import netCDF4
try:
import matplotlib.pyplot
HAVE_PLT = True
except ImportError:
HAVE_PLT = False
import numpy as np
import pyinterp.core as core
def plot(x, y, z, filename):
figure = matplotlib.pyplot.figure(figsize=(15, 15), dpi=150)
value = z.mean()
std = z.std()
normalize = matplotlib.colors.Normalize(
vmin=value - 3 * std, vmax=value + 3 * std)
axe = figure.add_subplot(2, 1, 1)
axe.pcolormesh(x, y, z, cmap='jet', norm=normalize)
figure.savefig(
os.path.join(os.path.dirname(os.path.abspath(__file__)), filename),
bbox_inches='tight',
pad_inches=0.4)
class TestRTree(unittest.TestCase):
GRID = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "dataset", "mss.nc")
@classmethod
def load_data(cls):
with netCDF4.Dataset(cls.GRID) as ds:
z = ds.variables['mss'][:].T
z[z.mask] = float("nan")
x, y = np.meshgrid(
ds.variables['lon'][:], ds.variables['lat'][:], indexing='ij')
mesh = core.RTreeFloat32(core.geodetic.System())
mesh.packing(
np.vstack((x.flatten(), y.flatten())).T,
z.data.flatten())
return mesh
def test_interpolate(self):
mesh = self.load_data()
lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0
lat = np.arange(-90, 90, 1 / 3.0) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
z0, _ = mesh.inverse_distance_weighting(
np.vstack((x.flatten(), y.flatten())).T,
within=False,
radius=35434,
k=8,
num_threads=0)
z1, _ = mesh.inverse_distance_weighting(
np.vstack((x.flatten(), y.flatten())).T,
within=False,
radius=35434,
k=8,
num_threads=1)
z0 = np.ma.fix_invalid(z0)
z1 = np.ma.fix_invalid(z1)
self.assertTrue(np.all(z1 == z0))
if HAVE_PLT:
plot(x, y, z0.reshape((len(lon), len(lat))), "mss_rtree_idw.png")
def test_pickle(self):
interpolator = self.load_data()
other = pickle.loads(pickle.dumps(interpolator))
self.assertTrue(isinstance(other, core.RTreeFloat32))
if __name__ == "__main__":
unittest.main()
| 1.953125 | 2 |
src/scion/service/test/test_scion_cdipdata.py | scion-network/scion | 0 | 12765508 | <gh_stars>0
#!/usr/bin/env python
__author__ = '<NAME>'
from nose.plugins.attrib import attr
import gevent
import os
from pyon.util.int_test import IonIntegrationTestCase
from pyon.public import BadRequest, NotFound, IonObject, RT, PRED, OT, CFG, StreamSubscriber, log
from ion.agent.control import AgentControl, StreamingAgentClient
from ion.agent.streaming_agent import StreamingAgent
from ion.data.persist.hdf5_dataset import DS_BASE_PATH, DS_FILE_PREFIX
from ion.data.schema.schema import DataSchemaParser
from interface.services.scion.iscion_management import ScionManagementClient
from interface.services.core.iidentity_management_service import IdentityManagementServiceClient
from interface.objects import Instrument, Dataset, GeospatialLocation, DataPacket
@attr('INT', group='scion')
class TestScionCDIPAgentData(IonIntegrationTestCase):
"""Test for Scion with agents streaming data from CDIP source
"""
def setUp(self):
self._start_container()
self.patch_alt_cfg('scion.process.preload.preloader.CFG',
{'scion': {'preload': {'enabled': False}}})
self.container.start_rel_from_url('res/deploy/scion.yml')
self.rr = self.container.resource_registry
self.scion_client = ScionManagementClient()
self.idm_client = IdentityManagementServiceClient()
self.system_actor_id = None
self.ui_server_proc = self.container.proc_manager.procs_by_name["ui_server"]
self.scion_proc = self.container.proc_manager.procs_by_name["scion_management"]
self.ui_base_url = self.ui_server_proc.base_url
self.sg_base_url = self.ui_server_proc.gateway_base_url
def tearDown(self):
pass
def test_scion_agent(self):
# Create user
actor_id = self.scion_client.define_user(
first_name="John", last_name="Doe",
username="<EMAIL>@<EMAIL>", password="<PASSWORD>", email="<EMAIL>")
# Create instrument
agent_info=[dict(agent_type="data_agent",
config=dict(plugin="scion.agent.model.cdip.cdip_plugin.CDIP_DataAgentPlugin",
sampling_interval=10, stream_name="basic_streams",
auto_streaming=False))]
inst_obj = Instrument(name="TA_121A/MGENC/M40", description="CDIP buoy data",
location=GeospatialLocation(latitude=37.94831666666667, longitude=-123.4675),
agent_info=agent_info)
inst_id, _ = self.rr.create(inst_obj, actor_id=actor_id)
# Create dataset
schema_def = DataSchemaParser.parse_schema_ref("ds_cdip01_main")
ds_obj = Dataset(name="Dataset Sensor",
schema_definition=schema_def)
ds_id, _ = self.rr.create(ds_obj, actor_id=actor_id)
self.rr.create_association(inst_id, PRED.hasDataset, ds_id)
ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))
self.assertFalse(os.path.exists(ds_filename))
inst_data_t0 = self.scion_client.get_asset_data(inst_id)
self.assertEquals(inst_data_t0["dataset_id"], ds_id)
self.assertEquals(inst_data_t0["num_rows"], 0)
# Install a data packet catcher
self.recv_packets, self.recv_rows = [], 0
def process_packet_cb(packet, route, stream):
if not isinstance(packet, DataPacket):
log.warn("Received a non DataPacket message")
self.recv_packets.append(packet)
self.recv_rows += len(packet.data["data"])
log.info("Received data packet #%s: rows=%s, cols=%s", len(self.recv_packets), len(packet.data["data"]),
packet.data["cols"])
#log.info('Packet data: ' + str(packet.data))
def cleanup_stream_sub():
if self.stream_sub:
self.stream_sub.stop()
self.stream_sub = None
self.stream_sub = StreamSubscriber(process=self.scion_proc, stream="basic_streams", callback=process_packet_cb)
self.stream_sub.start()
self.addCleanup(cleanup_stream_sub)
# Start agent
self.assertFalse(StreamingAgentClient.is_agent_active(inst_id))
agent_pid = self.scion_client.start_agent(inst_id)
self.assertTrue(StreamingAgentClient.is_agent_active(inst_id))
sac = StreamingAgentClient(resource_id=inst_id, process=self.scion_proc)
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_INITIALIZED)
sac.connect()
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_CONNECTED)
# Coming in from the agent config.
streaming_args = {
'url' : 'http://cdip.ucsd.edu/data_access/justdar.cdip?029+pm',
'sampling_interval' : 10
}
sac.start_streaming(streaming_args)
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_STREAMING)
# Set to progressively high values for real data stream tests.
gevent.sleep(20)
# Retrieve data
self.assertTrue(os.path.exists(ds_filename))
inst_data = self.scion_client.get_asset_data(inst_id)
"""
{'data': {'Dp': [[1465682100000, 325]],
'Hs': [[1465682100000, 3.03]],
'Ta': [[1465682100000, 6.92]],
'Temp': [[1465682100000, 12.2]],
'Tp': [[1465682100000, 9.09]]},
'dataset_id': '08bc829159e6401182462b713b180dbe',
'num_rows': 1,
'ts_generated': '1465685467675',
'var_def': [{'base_type': 'ntp_time',
'description': 'NTPv4 timestamp',
'name': 'time',
'storage_dtype': 'i8',
'unit': ''},
{'base_type': 'float',
'description': 'Significant wave height',
'name': 'Hs',
'storage_dtype': 'f8',
'unit': 'meters'},
{'base_type': 'float',
'description': 'Peak wave period',
'name': 'Tp',
'storage_dtype': 'f8',
'unit': 'seconds'},
{'base_type': 'int',
'description': 'Peak wave direction',
'name': 'Dp',
'storage_dtype': 'i4',
'unit': 'degrees'},
{'base_type': 'float',
'description': 'Average wave period',
'name': 'Ta',
'storage_dtype': 'f8',
'unit': 'seconds'},
{'base_type': 'float',
'description': 'Surface temperature',
'name': 'Temp',
'storage_dtype': 'f8',
'unit': 'celcius'}],
'variables': ['time', 'Hs', 'Tp', 'Dp', 'Ta', 'Temp']}
"""
num_rows = inst_data["num_rows"]
log.info('CDIP test produced %i data rows.' % num_rows)
# Take down agent
sac.stop_streaming() # Not required to stop agent, just to test here
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_CONNECTED)
sac.disconnect()
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_INITIALIZED)
self.scion_client.stop_agent(inst_id)
self.assertFalse(StreamingAgentClient.is_agent_active(inst_id))
| 1.617188 | 2 |
test/nmea_queue_test.py | quiet-oceans/libais | 161 | 12765509 | <filename>test/nmea_queue_test.py<gh_stars>100-1000
"""Tests for ais.nmea_queue."""
import contextlib
import unittest
import pytest
import six
from six.moves import StringIO
import ais
from ais import nmea
from ais import nmea_queue
BARE_NMEA = """
# pylint: disable=line-too-long
$GPZDA,203003.00,12,07,2009,00,00,*47
!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E
!BSVDM,1,1,,A,15Mj23`PB`o=Of>KjvnJg8PT0L2R,0*7E
!SAVDM,1,1,,B,35Mj2p001qo@5tVKLBWmIDJT01:@,0*33
!AIVDM,1,1,,A,B5NWV1P0<vSE=I3QdK4bGwoUoP06,0*4F
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
!SAVDM,2,1,4,A,55Mub7P00001L@;SO7TI8DDltqB222222222220O0000067<0620@jhQDTVG,0*43
!SAVDM,2,2,4,A,30H88888880,2*49
"""
TAG_BLOCK = r"""
# pylint: disable=line-too-long
\n:440661,s:r3669963,c:1428537660*0F\$GPZDA,000253,09,04,2015,+00,00*6C
\g:1-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*32\!AIVDM,2,1,2,B,576u>F02>hOUI8AGR20tt<j104p4l62222222216H14@@Hoe0JPEDp1TQH88,0*16
\s:rORBCOMM999u,c:1426032000,T:2015-03-11 00.00.00*36\!AIVDM,1,1,,,;5Qu0v1utmGssvvkA`DRgm100000,0*46
\g:2-2-4372,s:rORBCOMM109,c:1426032000,T:2015-03-11 00.00.00*31\!AIVDM,2,2,2,B,88888888880,2*25
\g:1-2-27300,n:636994,s:b003669710,c:1428621738*5F\!SAVDM,2,1,2,B,55Mw@A7J1adAL@?;7WPl58F0U<h4pB222222220t1PN5553fN4g?`4iSp5Rc,0*26
\g:2-2-27300,n:636995*15\!SAVDM,2,2,2,B,iP`88888880,2*5E
\n:636996,s:b003669710,c:1428621738*19\!SAVDM,1,1,,B,35Mv4LPP@Go?FFtEbDDWQmlT20k@,0*04
\g:4-4-993623,n:577969*22\$ARVSI,r003669930,,233948.825272,1831,-97,0*24
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
"""
USCG = r"""
# pylint: disable=line-too-long
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,2,1,4,B,54h@7?02BAF=`L4wN21<eTH4hj2222222222220U4HG6553U06T0C3H0Q@@j,0*5D,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!SAVDM,2,2,4,B,88888888880,2*39,d-86,S389,t161310.00,T10.377780,D07MN-MI-LAKBS1,1429287190
!AIVDM,1,1,,B,3592u`iP03GWEflBRosm0Ov@0000,0*70,d-107,S0297,t161407.00,T07.92201452,r11CSDO1,1429287248
!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258
"""
MIXED = r"""
!SAVDM,1,1,,A,15N4OMPP01I<cGrA1v>Id?vF060l,0*22,b003669978,1429287189
!SAVDM,1,1,,A,403Owi1utn1W0qMtr2AKStg020S:,0*4B
\n:80677,s:b003669952,c:1428884269*2A\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17
random text
"""
class NmeaQueueTest(unittest.TestCase):
def testTextData(self):
# These lines should all pass straight through.
src_lines = (
'',
'a',
'123',
# Not quite NMEA strings.
'$GPZDA',
'!AIVDM',
'*FF',)
queue = nmea_queue.NmeaQueue()
for line in src_lines:
queue.put(line)
self.assertEqual(queue.qsize(), len(src_lines))
for i in range(1, queue.qsize() + 1):
msg = queue.get()
self.assertEqual(msg['line_nums'], [i])
self.assertEqual(msg['line_type'], nmea.TEXT)
self.assertEqual(msg['lines'], list(src_lines[i-1:i]))
self.assertEqual(msg,
{'line_nums': [6], 'line_type': 'TEXT', 'lines': ['*FF']})
def testBareSingleLineData(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in BARE_NMEA.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 7)
msgs = []
while not queue.empty():
msgs.append(queue.get())
self.assertEqual(msgs[0],
{'line_nums': [1],
'line_type': 'BARE',
'lines': ['$GPZDA,203003.00,12,07,2009,00,00,*47']})
self.assertEqual(
msgs[1],
{'decoded': {
'cog': 52.099998474121094,
'id': 2,
'md5': '99c8c2804fde0481e6143051930b66c4',
'mmsi': 218069000,
'nav_status': 0,
'position_accuracy': 0,
'raim': False,
'repeat_indicator': 0,
'rot': 0.0,
'rot_over_range': False,
'slot_number': 683,
'slot_timeout': 2,
'sog': 11.100000381469727,
'spare': 0,
'special_manoeuvre': 0,
'sync_state': 0,
'timestamp': 16,
'true_heading': 48,
'x': -118.227775,
'y': 31.24317},
'line_nums': [2],
'line_type': 'BARE',
'lines': ['!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'],
'matches': [{
'body': '23?up2001gGRju>Ap:;R2APP08:c',
'chan': 'B',
'checksum': '0E',
'fill_bits': 0,
'sen_num': 1,
'sen_tot': 1,
'seq_id': None,
'talker': 'AI',
'vdm_type': 'VDM',
'vdm': '!AIVDM,1,1,,B,23?up2001gGRju>Ap:;R2APP08:c,0*0E'}]}
)
def testTagBlockLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in TAG_BLOCK.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 6)
msgs = []
while not queue.empty():
msgs.append(queue.get())
# self.assertNotIn('decoded', msgs[0])
# TODO(schwehr): Check the ZDA message decoding.
for msg_num in range(1, 5):
self.assertIn('decoded', msgs[msg_num])
ids = [msg['decoded']['id'] for msg in msgs[1:] if 'decoded' in msg]
self.assertEqual(ids, [11, 5, 5, 3, 27])
self.assertEqual(
msgs[-1],
{'decoded': {
'cog': 131,
'gnss': True,
'id': 27,
'md5': '50898a3435865cf76f1b502b2821672b',
'mmsi': 577305000,
'nav_status': 5,
'position_accuracy': 1,
'raim': False,
'repeat_indicator': 0,
'sog': 0,
'spare': 0,
'x': -90.20666666666666,
'y': 29.145},
'line_nums': [9],
'line_type': 'TAGB',
'lines': [
'\\n:80677,s:b003669952,c:1428884269*2A'
'\\!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17'],
'matches': [{
'dest': None,
'group': None,
'group_id': None,
'line_num': 80677,
'metadata': 'n:80677,s:b003669952,c:1428884269*2A',
'payload': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'quality': None,
'rcvr': 'b003669952',
'rel_time': None,
'sentence_num': None,
'sentence_tot': None,
'tag_checksum': '2A',
'text': None,
'text_date': None,
'time': 1428884269}],
'times': [1428884269]})
def testUscgLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in USCG.split('\n') if ',' in line]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 4)
msgs = []
while not queue.empty():
msgs.append(queue.get())
for msg in msgs:
self.assertIn('decoded', msg)
ids = [msg['decoded']['id'] for msg in msgs]
self.assertEqual(ids, [1, 5, 3, 27])
self.assertEqual(
msgs[3],
{
'decoded': {
'cog': 131,
'gnss': True,
'id': 27,
'md5': '50898a3435865cf76f1b502b2821672b',
'mmsi': 577305000,
'nav_status': 5,
'position_accuracy': 1,
'raim': False,
'repeat_indicator': 0,
'sog': 0,
'spare': 0,
'x': -90.20666666666666,
'y': 29.145},
'line_nums': [5],
'line_type': 'USCG',
'lines': ['!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17,rMySat,1429287258'],
'matches': [{
'body': 'K8VSqb9LdU28WP8<',
'chan': 'B',
'checksum': '17',
'counter': None,
'fill_bits': 0,
'hour': None,
'minute': None,
'payload': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'receiver_time': None,
'rssi': None,
'second': None,
'sen_num': 1,
'sen_tot': 1,
'seq_id': None,
'signal_strength': None,
'slot': None,
'station': 'rMySat',
'station_type': 'r',
'talker': 'SA',
'time': 1429287258,
'time_of_arrival': None,
'uscg_metadata': ',rMySat,1429287258',
'vdm': '!SAVDM,1,1,,B,K8VSqb9LdU28WP8<,0*17',
'vdm_type': 'VDM'}]})
def testMixedLines(self):
queue = nmea_queue.NmeaQueue()
lines = [line for line in MIXED.split('\n') if line.strip()]
for line in lines:
queue.put(line)
self.assertEqual(queue.qsize(), 4)
msgs = []
while not queue.empty():
msgs.append(queue.get())
for msg in msgs[:-1]:
self.assertIn('decoded', msg)
ids = [msg['decoded']['id'] for msg in msgs[:-1]]
self.assertEqual(ids, [1, 4, 27])
line_types = [msg['line_type'] for msg in msgs]
self.assertEqual(
line_types,
[nmea.USCG, nmea.BARE, nmea.TAGB, nmea.TEXT])
@pytest.mark.parametrize("nmea", [
six.text_type(BARE_NMEA.strip()),
six.text_type(TAG_BLOCK.strip()),
six.text_type(USCG.strip()),
six.text_type(MIXED.strip())
])
def test_NmeaFile_against_queue(nmea):
queue = nmea_queue.NmeaQueue()
for line in nmea.splitlines():
queue.put(line)
expected = []
msg = queue.GetOrNone()
while msg:
expected.append(msg)
msg = queue.GetOrNone()
with contextlib.closing(StringIO(nmea)) as f, ais.open(f) as src:
actual = list(src)
for e, a in zip(expected, actual):
assert e == a
if __name__ == '__main__':
unittest.main()
| 1.820313 | 2 |
beers/migrations/0022_auto_20171117_1915.py | nvembar/onehundredbeers | 1 | 12765510 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-18 00:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beers', '0021_auto_20171117_1846'),
]
operations = [
migrations.AddField(
model_name='contest_checkin',
name='bonus_type',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='contest_checkin',
name='tx_type',
field=models.CharField(choices=[('BE', 'Beer'), ('BR', 'Brewery'), ('CB', 'Challenge Beer'), ('CL', 'Challenge Beer Loss'), ('BO', 'Bonus')], default='BE', max_length=2),
),
]
| 1.75 | 2 |
data_loader.py | hassancpu/Temporal-Action-Localization | 0 | 12765511 | <reponame>hassancpu/Temporal-Action-Localization
# coding: utf-8
import os
import json
import random
import numpy as np
import h5py
class Dataloader(object):
'''
Usage:
dataloader = Dataloader(config, 'train')
batch_data = dataloader.batch_data_iterator()
'''
def __init__(self, config, split):
self.config = config
self.split = split
assert self.split in {'train', 'val', 'test'}
self.split_gt_dict = json.load(open(os.path.join(self.config.split_gt_info_path,
self.split + '_gt.json'), 'r'))
self.video_names = list(self.split_gt_dict.keys())
self.split_size = len(self.video_names)
self.feat_rgb = h5py.File(self.config.feat_path_rgb, 'r')
self.feat_flow = h5py.File(self.config.feat_path_flow, 'r')
self.gt_scores = self.generate_gt_scores()
def calc_ioa(self, gt_starts, gt_ends, anchor_start, anchor_end):
'''
calc intersection over anchor length, frame_level
gt_starts, gt_ends: multi values, shape: [gt_num]
anchor_start, anchor_end: single value
'''
intersection_starts = np.maximum(gt_starts, anchor_start)
intersection_ends = np.minimum(gt_ends, anchor_end)
intersection_lens = np.maximum(intersection_ends - intersection_starts, 0)
anchor_len = anchor_end - anchor_start
ioa = intersection_lens / float(anchor_len)
return ioa
def generate_gt_scores(self):
gt_scores = {}
for video_name in self.video_names:
gt_frame_stamps = np.asarray(self.split_gt_dict[video_name]['gt_frame_stamps'])
cur_video_feat_len = self.split_gt_dict[video_name]['valid_frame_num'] / self.config.feat_resolution
# shape: [gt_num]
gt_frame_starts = gt_frame_stamps[:, 0]
gt_frame_ends = gt_frame_stamps[:, 1]
gt_frame_lens = gt_frame_ends - gt_frame_starts
gt_boundary_region_lens = np.maximum(gt_frame_lens / 10, self.config.feat_resolution)
# shape: [gt_num, 2]
gt_start_regions = np.stack(
(gt_frame_starts - gt_boundary_region_lens / 2, gt_frame_starts + gt_boundary_region_lens / 2), axis=1)
gt_end_regions = np.stack(
(gt_frame_ends - gt_boundary_region_lens / 2, gt_frame_ends + gt_boundary_region_lens / 2), axis=1)
# shape: [cur_video_feat_len]
start_scores = []
for i in range(int(cur_video_feat_len)):
feat_frame_start = i * self.config.feat_resolution
feat_frame_end = (i + 1) * self.config.feat_resolution
best_score = np.max(
self.calc_ioa(gt_start_regions[:, 0], gt_start_regions[:, 1], feat_frame_start, feat_frame_end))
start_scores.append(best_score)
end_scores = []
for i in range(int(cur_video_feat_len)):
feat_frame_start = i * self.config.feat_resolution
feat_frame_end = (i + 1) * self.config.feat_resolution
best_score = np.max(
self.calc_ioa(gt_end_regions[:, 0], gt_end_regions[:, 1], feat_frame_start, feat_frame_end))
end_scores.append(best_score)
action_scores = []
for i in range(int(cur_video_feat_len)):
feat_frame_start = i * self.config.feat_resolution
feat_frame_end = (i + 1) * self.config.feat_resolution
best_score = np.max(self.calc_ioa(gt_frame_starts, gt_frame_ends, feat_frame_start, feat_frame_end))
action_scores.append(best_score)
cur_gt_scores = {'start_scores': start_scores, 'end_scores': end_scores, 'action_scores': action_scores}
gt_scores[video_name] = cur_gt_scores
return gt_scores
def mask_process(self, batch_feat, batch_score):
'''
input:
batch_feat: [batch_size, window_size(vary), feat_dimension]
batch_score: [batch_size, window_size(vary), recap_length*3]
return:
batch_feat_masked, batch_score_masked
batch_mask: [batch_size, window_size(max)], np.int32
'''
window_sizes = []
for video_feat in batch_feat:
cur_window_size = video_feat.shape[0]
window_sizes.append(cur_window_size)
max_window_size = max(window_sizes)
# print window_sizes
batch_feat_masked = np.zeros((len(batch_feat), max_window_size, batch_feat[0].shape[-1]), dtype=np.float32)
batch_score_masked = np.zeros((len(batch_feat), max_window_size, batch_score[0].shape[-1]), dtype=np.float32)
batch_mask = np.zeros((len(batch_feat), max_window_size), dtype=np.int32)
for i in range(len(batch_feat)):
batch_feat_masked[i, :window_sizes[i], :] = batch_feat[i]
batch_score_masked[i, :window_sizes[i], :] = batch_score[i]
batch_mask[i, :window_sizes[i]] = 1
return batch_feat_masked, batch_score_masked, batch_mask
def balance_mask_func(self, score, ratio=[1, 1, 1]):
'''
score: start + end + action masked_scores, shape: [batch_size, window_size(masked), recap_length]
'''
start_scores = score[:, :, :self.config.recap_length]
end_scores = score[:, :, self.config.recap_length: self.config.recap_length * 2]
action_scores = score[:, :, self.config.recap_length * 2:]
def balance_mask(score, cur_ratio):
score_flat = np.reshape(score, [-1])
thres_score = (score_flat >= 0.5).astype(np.float32)
pos = np.where(thres_score == 1.)[0]
neg = np.where(thres_score == 0.)[0]
sample_idx = list(pos) + random.sample(list(neg), min(int(len(pos) * cur_ratio), len(neg)))
mask = np.zeros_like(score_flat, dtype=np.float32)
mask[sample_idx] = 1.
mask = np.reshape(mask, score.shape)
return mask
balanced_start_mask = balance_mask(start_scores, ratio[0])
balanced_end_mask = balance_mask(end_scores, ratio[1])
balanced_action_mask = balance_mask(action_scores, ratio[2])
return np.concatenate((balanced_start_mask, balanced_end_mask, balanced_action_mask), axis=-1)
def batch_data_iterator(self):
'''
return:
batch_data: dict.
'''
if self.split == 'train':
random.shuffle(self.video_names)
cur_ptr = 0
while True:
batch_feat = [] # shape: [batch_size, window_size(vary), feat_dimension]
batch_score = [] # shape: [batch_size, window_size(vary), recap_length*3]
batch_video_name = [] # shape: [batch_size]
cur_batch_len = min(self.config.batch_size, self.split_size - cur_ptr)
for video_idx in range(cur_batch_len):
cur_video_name = self.video_names[video_idx + cur_ptr]
batch_video_name.append(cur_video_name)
cur_feat_rgb = self.feat_rgb[cur_video_name]['i3d_features'] # [whole_video_feat_len, 1024]
cur_feat_flow = self.feat_flow[cur_video_name]['i3d_features']
if self.config.feat_mode == 'rgb':
cur_feat = cur_feat_rgb
elif self.config.feat_mode == 'flow':
cur_feat = cur_feat_flow
else:
align_len = cur_feat_flow.shape[0]
cur_feat = np.concatenate((cur_feat_rgb[:align_len], cur_feat_flow[:align_len]), axis=-1) # [whole_video_feat_len, feat_dimension]
cur_feat_size = cur_feat.shape[0]
assert cur_feat.shape[1] == self.config.feat_dimension, 'feat_dimension in config is {}, but the actual value read from the file is {}. They must match.'.format(
self.config.feat_dimension, cur_feat.shape[1])
if self.split == 'train':
window_size = min(self.config.window_size, cur_feat_size)
else:
window_size = cur_feat_size
feat_start_idx = random.randint(0, cur_feat_size - window_size)
feat_end_idx = feat_start_idx + window_size
# shape: [window_size, feat_dimension]
feat_sequence = cur_feat[feat_start_idx: feat_end_idx]
batch_feat.append(feat_sequence)
cur_scores = np.zeros((window_size, self.config.recap_length * 3))
for i in range(feat_start_idx, feat_end_idx):
effective_cap_length = min(self.config.recap_length, i + 1)
for j in range(effective_cap_length):
cur_scores[i - feat_start_idx, j] = self.gt_scores[cur_video_name]['start_scores'][i - j]
cur_scores[i - feat_start_idx, j + self.config.recap_length] = \
self.gt_scores[cur_video_name]['end_scores'][i - j]
cur_scores[i - feat_start_idx, j + self.config.recap_length * 2] = \
self.gt_scores[cur_video_name]['action_scores'][i - j]
batch_score.append(cur_scores)
batch_feat_masked, batch_score_masked, batch_mask = self.mask_process(batch_feat, batch_score)
balanced_score_mask = self.balance_mask_func(batch_score_masked, ratio=self.config.balance_ratio)
if self.split == 'train':
yield [batch_feat_masked, batch_score_masked, balanced_score_mask], []
if self.split == 'val':
yield [batch_feat_masked, batch_score_masked, balanced_score_mask], []
if self.split == 'test':
balanced_score_mask_dummy = np.ones_like(balanced_score_mask)
yield [batch_feat_masked, batch_score_masked, balanced_score_mask_dummy, cur_video_name]
cur_ptr += cur_batch_len
if cur_ptr == self.split_size:
cur_ptr = 0
if self.split == 'train':
random.shuffle(self.video_names)
@property
def batch_num(self):
return int(np.ceil(float(self.split_size) / self.config.batch_size))
| 2.21875 | 2 |
core/base/pydantic.py | cleiveliu/django-template | 0 | 12765512 | from pydantic import BaseModel, ValidationError, EmailStr, Field
| 1.328125 | 1 |
tools/handle_noise_wavs.py | BaiYuhaoSpiceeYJ/SEGAN_denoise | 0 | 12765513 | <reponame>BaiYuhaoSpiceeYJ/SEGAN_denoise
###将其中的MP3音频转为wav###
import glob
import os
import librosa
import time
noise_path = r'C:\Users\SpiceeYJ\Desktop\nn'
mp3_files = glob.glob(noise_path+ r'\*.mp3')
for mp3_file in mp3_files:
filename = (mp3_file.split('\\')[-1]).split('.')[0]
print(filename)
x='ffmpeg -i {} -ar 8000 {}'.format(noise_path+'\\'+filename+'.mp3',noise_path+'\\'+filename+'.wav')
os.system(r'{}'.format(x))
y='del {}'.format(noise_path+'\\'+filename+'.mp3')
os.system(r'{}'.format(y))
###将wav音频进行8k重采样###
noise_8k_path = noise_path + '-8k'
if not os.path.exists(noise_8k_path):
os.mkdir(noise_8k_path)
wav_files = glob.glob(noise_path + r'\*.wav')
for wav_file in wav_files:
filename = (wav_file.split('\\')[-1]).split('.')[0]
print(filename)
os.rename(noise_path + '\\' + filename + '.wav',
noise_path + '\\' + (filename.split('(')[-1]).split(')')[0] + '.wav', )
x = 'ffmpeg -i {} -ar 8000 {}'.format(noise_path + '\\' + (filename.split('(')[-1]).split(')')[0] + '.wav',
noise_8k_path + '\\' + (filename.split('(')[-1]).split(')')[0] + '-8k.wav')
os.system(r'{}'.format(x))
###将音频转成16bit位深度###
noise_8k_16bit_path = noise_8k_path + '-16bit'
if not os.path.exists(noise_8k_16bit_path):
os.mkdir(noise_8k_16bit_path)
wav_files = glob.glob(noise_8k_path+ r'\*.wav')
for wav_file in wav_files:
filename = (wav_file.split('\\')[-1]).split('.')[0]
print(filename)
x='sox {} -b 16 {}'.format(noise_8k_path+'\\'+filename+'.wav',noise_8k_16bit_path+'\\'+filename+'-16bit.wav')
os.system(r'{}'.format(x))
###给噪音分段4s###
noise_8k_16bit_frame_path = noise_8k_16bit_path + '-frame2'
if not os.path.exists(noise_8k_16bit_frame_path):
os.mkdir(noise_8k_16bit_frame_path)
wav_files = glob.glob(noise_8k_16bit_path + r'\*.wav')
def calculate_time(time):
minu = time // 60
sec = time % 60
return minu, sec
for wav_file in wav_files:
duration = librosa.get_duration(filename=wav_file)
filename = (wav_file.split('\\')[-1]).split('.')[0]
print(filename)
if duration < 2:
pass
else:
i = 0
count = 0
while duration - i > 4:
start_min, start_sec = calculate_time(i)
x = 'ffmpeg -i {} -ss 00:{}:{} -t 00:00:04 {}'.format(noise_8k_16bit_path + '\\' + filename + '.wav', \
start_min, start_sec,
noise_8k_16bit_frame_path + '\\' + filename + '-' + str(
count) + '.wav')
os.system(r'{}'.format(x))
i += 1
count += 1
if duration - i < 2:
pass
else:
left = round(duration - i)
start_min, start_sec = calculate_time(i)
x = 'ffmpeg -i {} -ss 00:{}:{} -t 00:00:{} {}'.format(noise_8k_16bit_path + '\\' + filename + '.wav', \
start_min, start_sec, left,
noise_8k_16bit_frame_path + '\\' + filename + '-' + str(
count) + '.wav')
os.system(r'{}'.format(x)) | 2.8125 | 3 |
python/common/dict.py | yudongjin/public | 0 | 12765514 | <gh_stars>0
#!/usr/bin/env python
#coding=utf-8
'''
File Name: dict.py
Author: dongjin.ydj
mail: <EMAIL>
Created Time: Thu 23 Jun 2016 11:36:31 AM CST
'''
'''
python dict.py
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def dict_to_list(d, value_build_fun = lambda key, value : [key ,value]):
res = []
for key in d:
res.append(value_build_fun(key, d[key]))
return res
def load_dict(path, split_key = '\t', key_value_build_fun = lambda items : (items[0], items[1])):
f = open(path)
lines = f.readlines()
f.close()
res = {}
for line in lines:
line = line.strip()
if len(line) == 0:
continue
items = line.split(split_key)
key, value = key_value_build_fun(items)
res[key] = value
return res
def save_dict(path, dict_save, split_key = '\t', value_build_fun = lambda value : value):
f = open(path, 'w')
for key in dict_save:
f.write('%s%s%s\n' % (str(key), split_key, value_build_fun(dict_save[key])))
f.close()
if __name__ == '__main__':
d = {'key': ['v1', 'v2', 'v3'], 'key2': ['v2', 'v3']}
l = dict_to_list(d)
print l
save_dict('dict_test', d, ':', lambda value : ','.join(value))
res = load_dict('dict_test', ':', lambda items : (items[0], items[1].split(',')))
print res
| 2.96875 | 3 |
run.py | helpsterTee/tes-mapper | 0 | 12765515 | import struct
import sys
import time
import json
import os
from PyQt5.QtCore import QDir, Qt
from PyQt5.QtGui import QBrush, QPen
from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog,
QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget,
QGraphicsScene, QGraphicsView)
class Button(QToolButton):
def __init__(self, text, parent=None):
super(Button, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self.setText(text)
def sizeHint(self):
size = super(Button, self).sizeHint()
size.setHeight(size.height() + 20)
size.setWidth(max(size.width(), size.height()))
return size
class MainWin(QMainWindow):
files = []
def __init__(self):
super(MainWin, self).__init__()
self.setupGUI()
self.setWindowTitle("TES Mapper")
self.resize(500, 400)
# config
self.config = json.loads(open('config.json').read())
def setupGUI(self):
tabmain = QTabWidget()
self.setCentralWidget(tabmain)
# GPS Converter
widget = QWidget()
mainLayout = QGridLayout()
widget.setLayout(mainLayout)
self.openButton = self.createButton("Open Files", self.openFileClicked)
mainLayout.addWidget(self.openButton, 0, 0)
self.listWidget = QListWidget()
mainLayout.addWidget(self.listWidget, 2, 0, 1, 2)
self.runConvButton = self.createButton("Run Conversion", self.runConversionClicked)
mainLayout.addWidget(self.runConvButton, 0, 1)
self.runConvButton.setEnabled(False)
self.multiCheckbox = self.createCheckbox("Multiple Markers per Map")
mainLayout.addWidget(self.multiCheckbox, 1,1)
tabmain.addTab(widget, "GPS Data Conversion")
# GPS View
gpswidget = QWidget()
gpsLayout = QGridLayout()
gpswidget.setLayout(gpsLayout)
gview = QGraphicsView()
scene = QGraphicsScene()
gview.setScene(scene)
gpsLayout.addWidget(gview)
blueBrush = QBrush(Qt.blue)
mypen = QPen(Qt.black)
scene.addRect(100, 0, 80, 100, mypen, blueBrush)
tabmain.addTab(gpswidget, "GPS Visualisation")
def createButton(self, text, member):
button = Button(text)
button.clicked.connect(member)
return button
def createCheckbox(self, text):
checkbox = QCheckBox(text)
return checkbox
def openFileClicked(self):
fnames = QFileDialog.getOpenFileNames(self, 'Open files',
'./')
self.listWidget.clear()
self.files.clear()
if len(fnames[0]) > 0:
self.runConvButton.setEnabled(True)
for f in fnames[0]:
self.listWidget.addItem(f)
self.files.append(f)
else:
self.runConvButton.setEnabled(False)
def runConversionClicked(self):
multiPositions = []
multiTimediffs = []
multiNames = []
for fi in self.files:
positions = []
timediffs = []
lasttime = -1
printedtime = False
with open(fi, 'rb') as f:
print("Processing ["+fi+"]")
if os.path.splitext(fi)[1].lower() == ".nmea":
print("\tNMEA parsing mode")
for line in f:
parts = line.decode().split(',')
if parts[0] == "$GPRMC":
parttime = parts[1]
status = parts[2] #A okay, V Warnings
lat = parts[3]
latori = parts[4]
lon = parts[5]
lonori = parts[6] #1 is fix, 0 is no fix
speed = parts[7] #knots
course = parts[8] # to true north
date = parts[9]
signalValid = parts[12] # signal integrity Axx valid, Nxx invalid or no signal
mytime = time.strptime(date[0:2]+"."+date[2:4]+"."+"20"+date[4:6]+" - "+parttime[0:2]+':'+parttime[2:4]+':'+parttime[4:6], '%d.%m.%Y - %H:%M:%S')
if len(lat) > 0 and len(lon) > 0:
# convert to decimal degrees
dlat = int(lat[0:2])
dlon = int(lon[0:3])
mlat = float(lat[2:])/60.0
mlon = float(lon[3:])/60.0
rlat = dlat + mlat
rlon = dlon + mlon
positions.append([rlat, rlon])
if printedtime == False:
print("\t"+date[0:2]+"."+date[2:4]+"."+"20"+date[4:6]+" - "+parttime[0:2]+':'+parttime[2:4]+':'+parttime[4:6])
print("\tInit at: "+str([rlat, rlon]))
printedtime = True
ticks = int(time.mktime(mytime))
myticks = 0
if lasttime == -1:
lasttime = ticks
myticks = 0
else:
myticks = ticks - lasttime
lasttime = ticks
timediffs.append(myticks*1000)
if self.multiCheckbox.checkState() == Qt.Unchecked:
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(positions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(timediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str([os.path.split(fi)[1]]))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "false")
out = fi.replace('.NMEA', '.nmea')
out = out.replace('.nmea', '.html')
out = open(out, 'w')
out.write(tempstr)
out.close()
else:
multiPositions.append(positions)
multiTimediffs.append(timediffs)
multiNames.append(os.path.split(fi)[1])
else:
print("\tTES parsing mode")
while True:
bytes = f.read(2)
if len(bytes) < 2:
break # exit if eof
# type of point
types = struct.unpack('=h', bytes)
#if types[0] & 1 == 1:
# print('Split mark')
#elif types[0] & 2 == 1:
# print('Interest point')
#elif types[0] & 4 == 1:
# print('Track point')
# date of record
bytes = f.read(4)
date = struct.unpack('=L', bytes)
s = int(0)
smask = 63
s = (date[0] & smask)
m = int(0)
mmask = smask << 6
m = (date[0] & mmask) >> 6
h = int(0)
hmask = 31 << 12
h = (date[0] & hmask) >> 12
d = int(0)
dmask = 31 << 17
d = (date[0] & dmask) >> 17
mo = int(0)
momask = 15 << 22
mo = (date[0] & momask) >> 22
y = int(0)
ymask = 63 << 26
y = ((date[0] & ymask) >> 26) + 2000
if printedtime == False:
print('\tDate: '+str(d)+'.'+str(mo)+'.'+str(y)+" - "+str(h)+':'+str(m)+':'+str(s))
printedtime = True
mytime = time.strptime(str(d)+'.'+str(mo)+'.'+str(y)+" - "+str(h)+':'+str(m)+':'+str(s), '%d.%m.%Y - %H:%M:%S')
ticks = int(time.mktime(mytime))
myticks = 0
if lasttime == -1:
lasttime = ticks
myticks = 0
else:
myticks = ticks - lasttime
lasttime = ticks
# lat
bytes = f.read(4)
lat = struct.unpack('=l', bytes)
#print('\tLat: '+str(lat[0]*1e-7))
# lon
bytes = f.read(4)
lon = struct.unpack('=l', bytes)
#print('\tLon: '+str(lon[0]*1e-7))
# alt
bytes = f.read(2)
alt = struct.unpack('=h', bytes)
#print('\tAlt:'+str(alt[0]))
#print('')
positions.append([lat[0]*1e-7,lon[0]*1e-7]);
timediffs.append(myticks*1000)
if self.multiCheckbox.checkState() == Qt.Unchecked:
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(positions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(timediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str([os.path.split(fi)[1]]))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "false")
fi = fi.replace('.TES', '.html')
out = open(fi, 'w')
out.write(tempstr)
out.close()
else:
multiPositions.append(positions)
multiTimediffs.append(timediffs)
multiNames.append(os.path.split(fi)[1])
# processing of individual files finishes
if self.multiCheckbox.checkState() == Qt.Checked:
print("in Multimode")
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(multiPositions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(multiTimediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str(multiNames))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "true")
out = open("multimap.html", 'w')
out.write(tempstr)
out.close()
QMessageBox.information(self, "Information",
"Processing has finished")
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWin()
mainWin.show()
sys.exit(app.exec_())
| 2.28125 | 2 |
tests/datasets/serializers/test_serializers.py | Faysalali534/wazimap-ng | 0 | 12765516 | <reponame>Faysalali534/wazimap-ng
import pytest
from wazimap_ng.datasets.serializers import MetaDataSerializer
from tests.datasets.factories import MetaDataFactory, LicenceFactory
@pytest.fixture
def licence():
return LicenceFactory(name="licence name", url="abc url")
@pytest.fixture
def metadata(licence):
return MetaDataFactory(source="XYZ", url="http://example.com", description="ABC", licence=licence)
@pytest.mark.django_db
class TestMetaDataSerializer:
def test_output(self, metadata):
serializer = MetaDataSerializer(metadata)
print(serializer.data)
assert serializer.data == {
"description": "ABC", "source": "XYZ", "url": "http://example.com", "licence": {
"name": "licence name", "url": "abc url"
}
}
| 2.625 | 3 |
backend/styleopt.py | mrzzy/Portfolio-I | 1 | 12765517 | #
# styleopt.py
# Artistic Style Transfer
# Optimisation method
# as defined in Gatys et. al
#
import os
import api
import numpy as np
import tensorflow as tf
import keras.backend as K
import matplotlib.pyplot as plt
import stylefn
from PIL import Image
from keras.models import Model, Sequential
from util import apply_settings
from tensorflow.contrib.opt import ScipyOptimizerInterface
from datetime import datetime
# Style transfer settings
# NOTE: the following are default settings and may be overriden
SETTINGS = {
"image_shape": (512, 512, 3),
# Optimisation settings
"learning_rate": 10,
"n_epochs": 100,
}
# Represents the computational graph that will perform style transfer using the
# optimisation method
class TransfuseGraph:
# Create a style transfer graph that caters the style and content images shapes
# with the given style transfer settings overrides & pastiche init value
def __init__(self, pastiche_init, settings):
self.settings = settings
# Define tensor shapes
self.style_shape = self.settings["image_shape"]
self.content_shape = self.settings["image_shape"]
self.pastiche_shape = self.settings["image_shape"]
self.build(pastiche_init)
# Build style transfer graph for the given pastiche_init value
def build(self, pastiche_init):
K.clear_session()
# Setup content and style tensors
self.content_op = K.placeholder(self.content_shape, name="content")
self.style_op = K.placeholder(self.style_shape, name="style")
# Setup pastiche tensor derieved from random noise
self.pastiche_op = K.variable(pastiche_init, name="pastiche")
# Build style transfer graph
self.loss_op = stylefn.build_loss(self.pastiche_op, self.content_op,
self.style_op, self.settings)
# Setup optimisation
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss_op, method='L-BFGS-B', options={'maxiter': 20},
var_list=[self.pastiche_op])
# Setup tensorboard
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter("./logs/{}-{}".format(
self.settings, datetime.now().strftime("%H:%M:%S")))
self.session = K.get_session()
# Perform one iteration of style transfer using the inputs in feed dic
def transfer(self, feed):
# Perform training setup
self.optimizer.minimize(self.session, feed_dict=feed)
# Callback for writing tensorboard infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_tensorboard(graph, feed, i_epoch):
summary = graph.session.run(graph.summary_op, feed_dict=feed)
graph.writer.add_summary(summary, i_epoch)
# Callback for display progress infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_progress(graph, feed, i_epoch):
loss = graph.session.run(graph.loss_op, feed_dict=feed)
print("[{}/{}] loss: {:e}".format(i_epoch, graph.settings["n_epochs"], loss))
# Callback to display current pastiche given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_pastiche(graph, feed, i_epoch):
pastiche = graph.session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, graph.pastiche_shape)
# Display image as a plot
plt.imshow(np.asarray(pastiche_image))
plt.draw()
plt.pause(1e-6)
plt.clf()
# Perform style transfer using the optimisation method on the given content imag
# using the style from the given style image, parameterised by settings
# Applys the given style transfer settings before performing style transfer
# Every callback_step number of epochs, will call the given callbacks
# Returns the pastiche, the results of performing style transfer
def transfer_style(content_image, style_image, settings={}, callbacks=[], callback_step=1):
# Apply setting overrides
settings = apply_settings(settings, SETTINGS)
print(settings)
# Preprocess image data
image_shape = settings["image_shape"]
content = stylefn.preprocess_image(content_image, image_shape)
style = stylefn.preprocess_image(style_image, image_shape)
# Define limits for generated pastiche
min_limits = - stylefn.IMG_BGR_MEAN
max_limits = 255.0 - stylefn.IMG_BGR_MEAN
# Build style transfer graph
pastiche_init = np.random.uniform(size=image_shape) * 255.0 - 127.5
graph = TransfuseGraph(pastiche_init=pastiche_init, settings=settings)
session = graph.session
session.run(tf.global_variables_initializer())
# Optimise style transfer graph to perform style transfer
feed = {graph.content_op: content, graph.style_op: style}
n_epochs = settings["n_epochs"]
for i_epoch in range(1, n_epochs + 1):
# Clip the pastiche to ensure values say within limits
clipped_pastiche_op = tf.clip_by_value(graph.pastiche_op,
min_limits, max_limits)
graph.pastiche_op.assign(clipped_pastiche_op)
# Perform style transfer
graph.transfer(feed)
# Call callbacks
if i_epoch % callback_step == 0:
for callback in callbacks: callback(graph, feed, i_epoch)
# Deprocess style transfered image
pastiche = session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, image_shape)
return pastiche_image
if __name__ == "__main__":
content_image = Image.open("data/Tuebingen_Neckarfront.jpg")
style_image = Image.open("data/stary_night.jpg")
settings = {
"image_shape": (32, 32, 3),
"n_epochs": 100
}
pastiche_image = transfer_style(content_image, style_image, settings=settings,
callbacks=[callback_pastiche, callback_progress,
callback_tensorboard],
callback_step=20)
pastiche_image.save("pastiche.jpg")
| 2.484375 | 2 |
Desafios/desafio019.py | sthe-eduarda/Curso-de-python | 0 | 12765518 | from random import randint
a = input('Nome do 1° aluno: ')
b = input('Nome do 2° aluno: ')
c = input('Nome do 3° aluno: ')
d = input('Nome do 4° aluno: ')
esc = randint(1, 4)
print('=' * 12)
print(f'Aluno 1: {a}')
print(f'Aluno 2: {b}')
print(f'Aludo 3: {c}')
print(f'Aludo 4: {d}')
print(f'Escolhido: aluno {esc}')
| 3.53125 | 4 |
FixedBGExamples/RealScalarBH/plotmom.py | KAClough/GRChombo-1 | 3 | 12765519 | # A simple python script to plot the GW
# signals over time, for a chosen mode
import numpy as np;
import matplotlib.pyplot as plt;
# output data for setup
M = 1.0
mu = 0.05
r = 300
symmetry = 4
# make the plot
fig = plt.figure()
# volume integral dataset out
data1 = np.loadtxt("VolumeIntegrals.dat")
timedata = data1[:,0]
dM = symmetry*data1[:,3] - symmetry*data1[0,3]
Source = symmetry*data1[:,4]
# flux dataset out
data1 = np.loadtxt("SurfaceIntegrals.dat")
labelstring = "integral(Flux * dt)"
timedata = data1[:,0]
dt = timedata[1] - timedata[0]
NetEiFlux = data1[:,3]
NetEoFlux = data1[:,6]
FEodt = np.zeros_like(timedata)
FEidt = np.zeros_like(timedata)
Source_dt = np.zeros_like(timedata)
for i, F in enumerate(timedata) :
if (i > 0) :
FEodt[i] += FEodt[i-1] + NetEoFlux[i] * dt
FEidt[i] += FEidt[i-1] + NetEiFlux[i] * dt
Source_dt[i] += Source_dt[i-1]+ Source[i] * dt
plt.plot(timedata, FEodt, '-', lw = 1.0, label="Mdot outer dt")
plt.plot(timedata, FEidt, '-', lw = 1.0, label="Mdot inner dt")
plt.plot(timedata, Source_dt, '-', lw = 1.0, label="Source dt")
plt.plot(timedata, dM, '-', lw = 1.0, label="M-M0")
plt.plot(timedata, FEidt - FEodt + Source_dt, '--', lw = 1.0, label="check M-M0")
# make the plot look nice
plt.xlabel("time")
plt.ylabel("Change in Cloud Mom")
#plt.xlim(0, 100)
#plt.ylim(-10, 10)
plt.legend(loc=0)
plt.grid()
# save as png image
filename = "MvsT.png"
plt.savefig(filename)
| 2.265625 | 2 |
FLASH4.2.1_save/tools/python/flmake/lsruns.py | mtsafarzadeh/FLASHverES | 1 | 12765520 | <reponame>mtsafarzadeh/FLASHverES<gh_stars>1-10
import os
import json
# Relative imports needed
from .. import FLASH_SRC_DIR
from .setup_globals import gvars
from .utils import hash_list_to_dict, hash_dict_to_str
USAGE = ("Searches through local sub-directories\n"
"and lists those which contain flash runs\n"
"(and the run hashes) in a tree.\n\n"
"usage: flmake ls-runs")
def main(ns, rc):
"""Lists flash run dirs as a tree."""
gvars.init(FLASH_SRC_DIR)
# get run dirs that
subdirs = [f for f in os.listdir('.') if os.path.isdir(f)]
rundirs = [d for d in subdirs if os.path.exists(os.path.join(d, gvars.desc_filename))]
# grab the appropriate data out of the description files
labels = {}
hash_lists = []
for rundir in rundirs:
with open(os.path.join(rundir, gvars.desc_filename)) as desc_file:
desc = json.load(desc_file)
if 'run' not in desc:
continue
labels[desc['run']['id']] = rundir + os.path.sep
hl = desc['run']['history'] + [desc['run']['id']]
hash_lists.append(hl)
# Create graph stucture
hash_dict = {}
hash_lists.sort()
for hash_list in hash_lists:
hash_dict = hash_list_to_dict(hash_list, hash_dict)
# Make into string, print, and return
s = hash_dict_to_str(hash_dict, labels)[:-1]
if 1 < len(s):
print s
| 2.484375 | 2 |
board.py | ravitejavalluri/departuresns | 0 | 12765521 | import os
import csv
import datetime
import jinja2
import webapp2
from webapp2_extras import json
from google.appengine.api import urlfetch
from google.appengine.api import memcache
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def fetchData():
departures = memcache.get('departures')
if departures is not None:
return departures
headers = {}
etag = memcache.get('etag')
if etag:
headers['If-None-Match'] = etag
result = urlfetch.fetch(
url='http://developer.mbta.com/lib/gtrtfs/Departures.csv',
headers=headers)
#result = urlfetch.fetch('http://localhost:8080/static/example.csv')
if result.status_code == 200:
memcache.set('etag', result.headers['etag'])
memcache.set('lastresult', result.content)
result = result.content
elif result.status_code == 304:
result = memcache.get('lastresult')
else:
return None
response = []
boardcsv = csv.DictReader(result.splitlines())
for row in boardcsv:
if row['Origin'] == 'North Station':
response.append(row)
memcache.set('departures', response, time=15)
return response
class BoardHandler(webapp2.RequestHandler):
def get(self):
departures = fetchData()
if departures is not None:
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.encode(departures))
class NoJsHandler(webapp2.RequestHandler):
def get(self):
departures = fetchData()
if departures is not None:
nowtime = datetime.datetime.now()
for row in departures:
timestamp = datetime.datetime.fromtimestamp(int(row['ScheduledTime']))
row['ScheduledTime'] = timestamp.strftime("%H:%M")
template_values = {
'refreshtime': 'last updated',
'dayofweek': nowtime.strftime("%A"),
'curdate': nowtime.strftime("%Y-%m-%d"),
'curtime': nowtime.strftime("%H:%M") + ' utc',
'tracks': departures,
}
template = JINJA_ENVIRONMENT.get_template('board.templ')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/board', BoardHandler),
('/nojs.html', NoJsHandler),
], debug=True)
| 2.71875 | 3 |
app/utils/environment.py | RandyDeng/InterviewScheduler | 0 | 12765522 | import datetime
import os
# Environmental variables are stored inside a .env file for
# testing locally and on Heroku for staging and production.
# These variables contain extremely sensitive information regarding
# the project and should not be disclosed to anyone except
# the developers working on this project.
ENV_VARIABLES = {
'BASE_URL': os.environ.get('BASE_URL'),
'CSRF_SESSION_KEY': os.environ.get('CSRF_SESSION_KEY'),
'GMAIL_PASSWORD': <PASSWORD>('GMAIL_PASSWORD'),
'GMAIL_USERNAME': os.environ.get('GMAIL_USERNAME'),
'MONGODB_URI': os.environ.get('MONGODB_URI'),
'SECRET_KEY': os.environ.get('SECRET_KEY'),
}
# The following is a dictionary of all the available
# positions in The Hive. Each position is matched with its
# abbreviated version. Not all positions will be available
# at all times and some may not go through the Interview Scheduler
# process.
# Currently, only PI and the core officer positions are supported.
POSITIONS = {
'President': 'President',
'VP': 'Vice President',
'DoF': 'Director of Finances',
'DoO': 'Director of Operations',
'DoC': 'Director of Communications',
'DoN': 'Director of Networks',
'ADoF': 'Assistant Director of Finances',
'ADoO': 'Assistant Director of Operations',
'ADoC': 'Assistant Director of Communications',
'ADoN': 'Assistant Director of Networks',
'MPI': 'Master Peer Instructor',
'PI': 'Peer Instructor',
}
# These variables determine the start and end of each school semester.
# The starting dates are put slightly before the first day of class
# since most of The Hive hiring happens at the beginning of the semester
# rather than at the end.
FALL_START = {'month': 8, 'day': 15}
SPRING_START = {'month': 1, 'day': 1}
SUMMER_START = {'month': 5, 'day': 10}
# Determines the current semester.
# The semester start and end dates are hardcoded.
def current_semester():
now = datetime.date.today()
fall_begin = datetime.date(year=now.year,
month=FALL_START['month'],
day=FALL_START['day'])
spring_begin = datetime.date(year=now.year,
month=SPRING_START['month'],
day=SPRING_START['day'])
spring_next = spring_begin.replace(year=now.year + 1)
summer_begin = datetime.date(year=now.year,
month=SUMMER_START['month'],
day=SUMMER_START['day'])
if now > fall_begin and now < spring_next:
return ("Fall " + str(now.year))
if now > spring_begin and now < summer_begin:
return ("Spring " + str(now.year))
if now > summer_begin and now < fall_begin:
return ("Summer " + str(now.year))
return "NA"
# Determines the next 3 semesters.
# This is mainly used in the Applicant form to determine which
# semesters an applicant will be on campus.
def next_semesters():
current = current_semester().split()
next_year = str(int(current[1]) + 1)
if "Fall" in current:
future = ["Spring " + next_year,
"Summer " + next_year,
"Fall " + next_year]
elif "Spring" in current:
future = ["Summer " + current[1],
"Fall " + current[1],
"Spring " + next_year]
elif "Summer" in current:
future = ["Fall " + current[1],
"Spring " + next_year,
"Summer " + next_year]
else:
future = None
return future
| 2.1875 | 2 |
demosys/effect_templates/cube_simple/dependencies.py | Contraz/demosys-py | 70 | 12765523 | <gh_stars>10-100
import os
from demosys.resources.meta import ProgramDescription
def local(path):
"""
Prepend the effect package name to a path so resources
can still be loaded when copied into a new effect package.
"""
return os.path.join(__name__.split('.')[-2], path)
effect_packages = []
resources = [
ProgramDescription(label='cube_plain', path=local('cube_plain.glsl')),
]
| 1.703125 | 2 |
plot/plot_hyperopt.py | knowledgetechnologyuhh/goal_conditioned_RL_baselines | 15 | 12765524 | import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns;
from collections import deque
sns.set()
import glob2
import argparse
from cycler import cycler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from matplotlib.ticker import MaxNLocator
def load_results(file, dtype=None):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
if dtype is None:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
else:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0., dtype=dtype)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
# def pad(xs, value=np.nan, maxlen=None):
# if maxlen is None:
# maxlen = np.max([len(x) for x in xs])
#
# padded_xs = []
# for x in xs:
# if x.shape[0] >= maxlen:
# padded_xs.append(x)
#
# padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
# x_padded = np.concatenate([x, padding], axis=0)
# assert x_padded.shape[1:] == x.shape[1:]
# assert x_padded.shape[0] == maxlen
# padded_xs.append(x_padded)
# return np.array(padded_xs)
#
# def smooth_curve(x, y):
# halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
# k = halfwidth
# xsmoo = x
# ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
# mode='same')
# return xsmoo, ysmoo
def prepare_data(paths):
inter_dict = {}
var_param_keys = set()
max_episodes = 0
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
print('loading {}'.format(curr_path))
# results = load_results(os.path.join(curr_path, 'mask_records.csv'))
# if not results:
# print('skipping {}'.format(curr_path))
# continue
with open(os.path.join(curr_path, 'params.json'), 'r') as f:
params = json.load(f)
for k,v in params.items():
if k not in inter_dict.keys():
inter_dict[k] = [v]
if v not in inter_dict[k]:
inter_dict[k].append(v)
var_param_keys.add(k)
# max_episodes = max(max_episodes, len(results['episode']))
return var_param_keys
def plot_epochs_success(data, percent_to_achieve, fig_dir):
plt.clf()
# fig = plt.figure(figsize=(20, 8))
plt.figure(figsize=(9, 4.5))
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
# plt.rc('axes', prop_cycle=(cycler('linestyle', ['-', '--', ':']) * cycler('color', new_colors)))
surf_plot_data = {}
uniform_sampling_epochs = []
none_sampling_epochs = []
kappa_s = set()
rg_s = set()
for config in sorted(data.keys()):
epochs = []
for d in data[config]:
try:
epoch = min(np.argwhere(d[1] > percent_to_achieve))[0]
except:
print("Not enough data for {}".format(config))
continue
epochs.append(epoch)
# epochs = [len(d[0]) for d in data[config]]
if 'curriculum_sampling: none' in config:
none_sampling_epochs += epochs
kappa_s.add(-1)
continue
median_epochs = np.median(epochs)
min_perc = np.nanpercentile(epochs, 25, axis=0)
max_perc = np.nanpercentile(epochs, 75, axis=0)
avg_epochs = np.mean(epochs)
n_runs = len(epochs)
std_epochs = np.std(epochs)
if 'stochastic3_' not in config:
continue
rg = float(config.split("stochastic3_")[1].split("_")[0])
rg_s.add(rg)
kappa = float(config.split("stochastic3_")[1].split("_")[2])
kappa_s.add(kappa)
if rg not in surf_plot_data.keys():
surf_plot_data[rg] = {}
if kappa == 0.0:
uniform_sampling_epochs += epochs
surf_plot_data[rg][kappa] = (avg_epochs, std_epochs, n_runs, median_epochs, min_perc, max_perc)
uniform_avg_epochs = np.mean(uniform_sampling_epochs)
none_avg_epochs = np.mean(none_sampling_epochs)
uniform_std_epochs = np.std(uniform_sampling_epochs)
none_std_epochs = np.std(none_sampling_epochs)
uniform_median_epochs = np.median(uniform_sampling_epochs)
none_median_epochs = np.median(none_sampling_epochs)
uniform_min_perc = np.nanpercentile(uniform_sampling_epochs, 25, axis=0)
none_min_perc = np.nanpercentile(none_sampling_epochs, 25, axis=0)
uniform_max_perc = np.nanpercentile(uniform_sampling_epochs, 75, axis=0)
none_max_perc = np.nanpercentile(none_sampling_epochs, 75, axis=0)
for rg in surf_plot_data.keys():
surf_plot_data[rg][0.0] = (
uniform_avg_epochs, uniform_std_epochs, len(uniform_sampling_epochs), uniform_median_epochs, uniform_min_perc,
uniform_max_perc)
surf_plot_data[rg][-1] = (
none_avg_epochs, none_std_epochs, len(none_sampling_epochs), none_median_epochs, none_min_perc,
none_max_perc)
kappa_s = sorted(list(kappa_s))
# kappa_s.insert(1,0)
rg_s = sorted(list(rg_s))
# surf_plot_data_arr = np.array(list(surf_plot_data.items()))
for idx, kappa in enumerate(kappa_s):
# label = "c={} -n: {}".format(c, len(surf_plot_data[0][kappa]))
# n_runs = ''
# n_runs = np.mean(0)
c_label = "$\kappa$={}".format(kappa)
if kappa== -1:
c_label = "no CGM"
continue
if kappa== 0:
c_label = "uniform GM"
continue
label = "{}".format(c_label)
xs = sorted(list(surf_plot_data.keys()))
xs = np.array([k for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# ys = np.array([surf_plot_data[k][kappa][0] for k in sorted(surf_plot_data.keys()) if kappain surf_plot_data[k].keys()])
# std_ys = np.array([surf_plot_data[k][kappa][1] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# min_vals = ys + std_ys
# max_vals = ys - std_ys
ys = np.array([surf_plot_data[k][kappa][3] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
n_runs = np.array([surf_plot_data[k][kappa][2] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
min_vals = np.array([surf_plot_data[k][kappa][4] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
max_vals = np.array([surf_plot_data[k][kappa][5] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
if np.array(xs).shape != ys.shape:
print("This data probably has not all kappas")
continue
color = new_colors[idx]
print("C {} has color {}".format(kappa,color))
# Add median points
plt.scatter(xs, ys, color=color)
# Add number of runs
# for d_idx, n in enumerate(n_runs):
# plt.gca().annotate(str(n), (xs[d_idx], ys[d_idx]))
# Add lines
plt.plot(xs, ys, label=label, color=color)
# Add quartiles
plt.plot(xs, min_vals, linestyle='dashed', color=color, alpha=0.25)
plt.plot(xs, max_vals, linestyle='dashed', color=color, alpha=0.25)
# break
# plt.fill_between(xs, min_vals, max_vals, alpha=0.25)
# plt.fill_between(xs, min_vals, max_vals, alpha=0.1)
# plt.legend(loc='upper left', bbox_to_anchor=(5.05,1.83))
ax = plt.gca()
# ax.set_xlim([0, 70])
ax.set_ylim([20, 80])
plt.xlabel('$c_g$')
plt.ylabel('epochs to achieve {}% success rate'.format(int(percent_to_achieve*100)))
plt.legend(loc='upper left')
# plt.title("Number of epochs to achieve {}% success rate".format(int(percent_to_achieve*100)), loc='center', pad=-20)
plt.savefig(os.path.join(fig_dir, 'penalty_hyperopt_.png'))
if __name__ == '__main__':
matplotlib.rcParams['font.family'] = "serif"
matplotlib.rcParams['font.weight'] = 'normal'
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
parallel_rollouts=4
training_rollout_cycles_per_epoch=64
eval_rollout_cycles_per_epoch = 10
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
args = parser.parse_args()
plot_epochs_success(data, 50, parser.args.dir)
| 2.40625 | 2 |
names.py | ruTESTER/pminer | 6 | 12765525 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import config
EN_NAMES = {
1: 'Bulbasaur', 2: 'Ivysaur', 3: 'Venusaur', 4: 'Charmander',
5: 'Charmeleon', 6: 'Charizard', 7: 'Squirtle', 8: 'Wartortle',
9: 'Blastoise', 10: 'Caterpie', 11: 'Metapod', 12: 'Butterfree',
13: 'Weedle', 14: 'Kakuna', 15: 'Beedrill', 16: 'Pidgey', 17: 'Pidgeotto',
18: 'Pidgeot', 19: 'Rattata', 20: 'Raticate', 21: 'Spearow', 22: 'Fearow',
23: 'Ekans', 24: 'Arbok', 25: 'Pikachu', 26: 'Raichu', 27: 'Sandshrew',
28: 'Sandslash', 29: 'Nidoran♀', 30: 'Nidorina', 31: 'Nidoqueen',
32: 'Nidoran♂', 33: 'Nidorino', 34: 'Nidoking', 35: 'Clefairy',
36: 'Clefable', 37: 'Vulpix', 38: 'Ninetales', 39: 'Jigglypuff',
40: 'Wigglytuff', 41: 'Zubat', 42: 'Golbat', 43: 'Oddish', 44: 'Gloom',
45: 'Vileplume', 46: 'Paras', 47: 'Parasect', 48: 'Venonat',
49: 'Venomoth', 50: 'Diglett', 51: 'Dugtrio', 52: 'Meowth', 53: 'Persian',
54: 'Psyduck', 55: 'Golduck', 56: 'Mankey', 57: 'Primeape',
58: 'Growlithe', 59: 'Arcanine', 60: 'Poliwag', 61: 'Poliwhirl',
62: 'Poliwrath', 63: 'Abra', 64: 'Kadabra', 65: 'Alakazam', 66: 'Machop',
67: 'Machoke', 68: 'Machamp', 69: 'Bellsprout', 70: 'Weepinbell',
71: 'Victreebel', 72: 'Tentacool', 73: 'Tentacruel', 74: 'Geodude',
75: 'Graveler', 76: 'Golem', 77: 'Ponyta', 78: 'Rapidash', 79: 'Slowpoke',
80: 'Slowbro', 81: 'Magnemite', 82: 'Magneton', 83: "Farfetch'd",
84: 'Doduo', 85: 'Dodrio', 86: 'Seel', 87: 'Dewgong', 88: 'Grimer',
89: 'Muk', 90: 'Shellder', 91: 'Cloyster', 92: 'Gastly', 93: 'Haunter',
94: 'Gengar', 95: 'Onix', 96: 'Drowzee', 97: 'Hypno', 98: 'Krabby',
99: 'Kingler', 100: 'Voltorb', 101: 'Electrode', 102: 'Exeggcute',
103: 'Exeggutor', 104: 'Cubone', 105: 'Marowak', 106: 'Hitmonlee',
107: 'Hitmonchan', 108: 'Lickitung', 109: 'Koffing', 110: 'Weezing',
111: 'Rhyhorn', 112: 'Rhydon', 113: 'Chansey', 114: 'Tangela',
115: 'Kangaskhan', 116: 'Horsea', 117: 'Seadra', 118: 'Goldeen',
119: 'Seaking', 120: 'Staryu', 121: 'Starmie', 122: 'Mr. Mime',
123: 'Scyther', 124: 'Jynx', 125: 'Electabuzz', 126: 'Magmar',
127: 'Pinsir', 128: 'Tauros', 129: 'Magikarp', 130: 'Gyarados',
131: 'Lapras', 132: 'Ditto', 133: 'Eevee', 134: 'Vaporeon', 135: 'Jolteon',
136: 'Flareon', 137: 'Porygon', 138: 'Omanyte', 139: 'Omastar',
140: 'Kabuto', 141: 'Kabutops', 142: 'Aerodactyl', 143: 'Snorlax',
144: 'Articuno', 145: 'Zapdos', 146: 'Moltres', 147: 'Dratini',
148: 'Dragonair', 149: 'Dragonite', 150: 'Mewtwo', 151: 'Mew'
}
DE_NAMES = {
1: 'Bisasam', 2: 'Bisaknosp', 3: 'Bisaflor', 4: 'Glumanda', 5: 'Glutexo',
6: 'Glurak', 7: 'Schiggy', 8: 'Schillok', 9: 'Turtok', 10: 'Raupy',
11: 'Safcon', 12: 'Smettbo', 13: 'Hornliu', 14: 'Kokuna', 15: 'Bibor',
16: 'Taubsi', 17: 'Tauboga', 18: 'Tauboss', 19: 'Rattfratz',
20: 'Rattikarl', 21: 'Habitak', 22: 'Ibitak', 23: 'Rettan', 24: 'Arbok',
25: 'Pikachu', 26: 'Raichu', 27: 'Sandan', 28: 'Sandamer', 29: 'Nidoran♀',
30: 'Nidorina', 31: 'Nidoqueen', 32: 'Nidoran♂', 33: 'Nidorino',
34: 'Nidoking', 35: 'Piepi', 36: 'Pixi', 37: 'Vulpix', 38: 'Vulnona',
39: 'Pummeluff', 40: 'Knuddeluff', 41: 'Zubat', 42: 'Golbat', 43: 'Myrapla',
44: 'Duflor', 45: 'Giflor', 46: 'Paras', 47: 'Parasek', 48: 'Bluzuk',
49: 'Omot', 50: 'Digda', 51: 'Digdri', 52: 'Mauzi', 53: 'Snobilikat',
54: 'Enton', 55: 'Entoron', 56: 'Menki', 57: 'Rasaff', 58: 'Fukano',
59: 'Arkani', 60: 'Quapsel', 61: 'Quaputzi', 62: 'Quappo', 63: 'Abra',
64: 'Kadabra', 65: 'Simsala', 66: 'Machollo', 67: 'Maschock',
68: 'Machomei', 69: 'Knofensa', 70: 'Ultrigaria', 71: 'Sarzenia',
72: 'Tentacha', 73: 'Tentoxa', 74: 'Kleinstein', 75: 'Georok',
76: 'Geowaz', 77: 'Ponita', 78: 'Gallopa', 79: 'Flegmon', 80: 'Lahmus',
81: 'Magnetilo', 82: 'Magneton', 83: 'Porenta', 84: 'Dodu', 85: 'Dodri',
86: 'Jurob', 87: 'Jugong', 88: 'Sleima', 89: 'Sleimok', 90: 'Muschas',
91: 'Austos', 92: 'Nebulak', 93: 'Alpollo', 94: 'Gengar', 95: 'Onix',
96: 'Traumato', 97: 'Hypno', 98: 'Krabby', 99: 'Kingler', 100: 'Voltobal',
101: 'Lektrobal', 102: 'Owei', 103: 'Kokowei', 104: 'Tragosso',
105: 'Knogga', 106: 'Kicklee', 107: 'Nockchan', 108: 'Schlurp',
109: 'Smogon', 110: 'Smogmog', 111: 'Rihorn', 112: 'Rizeros',
113: 'Chaneira', 114: 'Tangela', 115: 'Kangama', 116: 'Seeper',
117: 'Seemon', 118: 'Goldini', 119: 'Golking', 120: 'Sterndu',
121: 'Starmie', 122: 'Pantimos', 123: 'Sichlor', 124: 'Rossana',
125: 'Elektek', 126: 'Magmar', 127: 'Pinsir', 128: 'Tauros',
129: 'Karpador', 130: 'Garados', 131: 'Lapras', 132: 'Ditto', 133: 'Evoli',
134: 'Aquana', 135: 'Blitza', 136: 'Flamara', 137: 'Porygon',
138: 'Amonitas', 139: 'Amoroso', 140: 'Kabuto', 141: 'Kabutops',
142: 'Aerodactyl', 143: 'Relaxo', 144: 'Arktos', 145: 'Zapdos',
146: 'Lavados', 147: 'Dratini', 148: 'Dragonir', 149: 'Dragoran',
150: 'Mewtu', 151: 'Mew'
}
FR_NAMES = {
1: 'Bulbizarre', 2: 'Herbizarre', 3: 'Florizarre', 4: 'Salamèche',
5: 'Reptincel', 6: 'Dracaufeu', 7: 'Carapuce', 8: 'Carabaffe',
9: 'Tortank', 10: 'Chenipan', 11: 'Chrysacier', 12: 'Papilusion',
13: 'Aspicot', 14: 'Coconfort', 15: 'Dardargnan', 16: 'Roucool',
17: 'Roucoups', 18: 'Roucarnage', 19: 'Rattata', 20: 'Rattatac',
21: 'Piafabec', 22: 'Rapasdepic', 23: 'Abo', 24: 'Arbok', 25: 'Pikachu',
26: 'Raichu', 27: 'Sabelette', 28: 'Sablaireau', 29: 'Nidoran♀',
30: 'Nidorina', 31: 'Nidoqueen', 32: 'Nidoran♂', 33: 'Nidorino',
34: 'Nidoking', 35: 'Mélofée', 36: 'Mélodelfe', 37: 'Goupix',
38: 'Feunard', 39: 'Rondoudou', 40: 'Grodoudou', 41: 'Nosferapti',
42: 'Nosferalto', 43: 'Mystherbe', 44: 'Ortide', 45: 'Rafflesia',
46: 'Paras', 47: 'Parasect', 48: 'Mimitoss', 49: 'Aéromite',
50: 'Taupiqueur', 51: 'Triopikeur', 52: 'Miaouss', 53: 'Persian',
54: 'Psykokwak', 55: 'Akwakwak', 56: 'Férosinge', 57: 'Colossinge',
58: 'Caninos', 59: 'Arcanin', 60: 'Ptitard', 61: 'Têtarte', 62: 'Tartard',
63: 'Abra', 64: 'Kadabra', 65: 'Alakazam', 66: 'Machoc', 67: 'Machopeur',
68: 'Mackogneur', 69: 'Chétiflor', 70: 'Boustiflor', 71: 'Empiflor',
72: 'Tentacool', 73: 'Tentacruel', 74: 'Racaillou', 75: 'Gravalanch',
76: 'Grolem', 77: 'Ponyta', 78: 'Galopa', 79: 'Ramoloss', 80: 'Flagadoss',
81: 'Magnéti', 82: 'Magnéton', 83: 'Canarticho', 84: 'Doduo',
85: 'Dodrio', 86: 'Otaria', 87: 'Lamantine', 88: 'Tadmorv',
89: 'Grotadmorv', 90: 'Kokiyas', 91: 'Crustabri', 92: 'Fantominus',
93: 'Spectrum', 94: 'Ectoplasma', 95: 'Onix', 96: 'Soporifik',
97: 'Hypnomade', 98: 'Krabby', 99: 'Krabboss', 100: 'Voltorbe',
101: 'Électrode', 102: 'Nœunœuf', 103: 'Noadkoko', 104: 'Osselait',
105: 'Ossatueur', 106: 'Kicklee', 107: 'Tygnon', 108: 'Excelangue',
109: 'Smogo', 110: 'Smogogo', 111: 'Rhinocorne', 112: 'Rhinoféros',
113: 'Leveinard', 114: 'Saquedeneu', 115: 'Kangourex', 116: 'Hypotrempe',
117: 'Hypocéan', 118: 'Poissirène', 119: 'Poissoroy', 120: 'Stari',
121: 'Staross', 122: '<NAME>', 123: 'Insécateur', 124: 'Lippoutou',
125: 'Élektek', 126: 'Magmar', 127: 'Scarabrute', 128: 'Tauros',
129: 'Magicarpe', 130: 'Léviator', 131: 'Lokhlass', 132: 'Métamorph',
133: 'Évoli', 134: 'Aquali', 135: 'Voltali', 136: 'Pyroli',
137: 'Porygon', 138: 'Amonita', 139: 'Amonistar', 140: 'Kabuto',
141: 'Kabutops', 142: 'Ptéra', 143: 'Ronflex', 144: 'Artikodin',
145: 'Électhor', 146: 'Sulfura', 147: 'Minidraco', 148: 'Draco',
149: 'Dracolosse', 150: 'Mewtwo', 151: 'Mew'
}
ZH_NAMES = {
1: '妙蛙种子', 2: '妙蛙草', 3: '妙蛙花', 4: '小火龙', 5: '火恐龙',
6: '喷火龙', 7: '杰尼龟', 8: '卡咪龟', 9: '水箭龟', 10: '绿毛虫',
11: '铁甲蛹', 12: '巴大蝶', 13: '独角虫', 14: '铁壳蛹', 15: '大针蜂',
16: '波波', 17: '比比鸟', 18: '大比鸟', 19: '小拉达', 20: '拉达',
21: '烈雀', 22: '大嘴雀', 23: '阿柏蛇', 24: '阿柏怪', 25: '皮卡丘',
26: '雷丘', 27: '穿山鼠', 28: '穿山王', 29: '尼多兰', 30: '尼多娜',
31: '尼多后', 32: '尼多朗', 33: '尼多力诺', 34: '尼多王', 35: '皮皮',
36: '皮可西', 37: '六尾', 38: '九尾', 39: '胖丁', 40: '胖可丁',
41: '超音蝠', 42: '大嘴蝠', 43: '走路草', 44: '臭臭花', 45: '霸王花',
46: '派拉斯', 47: '派拉斯特', 48: '毛球', 49: '摩鲁蛾', 50: '地鼠',
51: '三地鼠', 52: '喵喵', 53: '猫老大', 54: '可达鸭', 55: '哥达鸭',
56: '猴怪', 57: '火暴猴', 58: '卡蒂狗', 59: '风速狗', 60: '蚊香蝌蚪',
61: '蚊香君', 62: '蚊香泳士', 63: '凯西', 64: '勇基拉', 65: '胡地',
66: '腕力', 67: '豪力', 68: '怪力', 69: '喇叭芽', 70: '口呆花',
71: '大食花', 72: '玛瑙水母', 73: '毒刺水母', 74: '小拳石', 75: '隆隆石',
76: '隆隆岩', 77: '小火马', 78: '烈焰马', 79: '呆呆兽', 80: '呆壳兽',
81: '小磁怪', 82: '三合一磁怪', 83: '大葱鸭', 84: '嘟嘟', 85: '嘟嘟利',
86: '小海狮', 87: '白海狮', 88: '臭泥', 89: '臭臭泥', 90: '大舌贝',
91: '刺甲贝', 92: '鬼斯', 93: '鬼斯通', 94: '耿鬼', 95: '大岩蛇',
96: '催眠貘', 97: '引梦貘人', 98: '大钳蟹', 99: '巨钳蟹', 100: '霹雳电球',
101: '顽皮雷弹', 102: '蛋蛋', 103: '椰蛋树', 104: '卡拉卡拉',
105: '嘎啦嘎啦', 106: '飞腿郎', 107: '快拳郎', 108: '大舌头',
109: '瓦斯弹', 110: '双弹瓦斯', 111: '独角犀牛', 112: '钻角犀兽',
113: '吉利蛋', 114: '蔓藤怪', 115: '袋兽', 116: '墨海马', 117: '海刺龙',
118: '角金鱼', 119: '金鱼王', 120: '海星星', 121: '宝石海星',
122: '魔墙人偶', 123: '飞天螳螂', 124: '迷唇姐', 125: '电击兽',
126: '鸭嘴火兽', 127: '凯罗斯', 128: '肯泰罗', 129: '鲤鱼王',
130: '暴鲤龙', 131: '拉普拉斯', 132: '百变怪', 133: '伊布', 134: '水伊布',
135: '雷伊布', 136: '火伊布', 137: '多边兽', 138: '菊石兽',
139: '多刺菊石兽', 140: '化石盔', 141: '镰刀盔', 142: '化石翼龙',
143: '卡比兽', 144: '急冻鸟', 145: '闪电鸟', 146: '火焰鸟', 147: '迷你龙',
148: '哈克龙', 149: '快龙', 150: '超梦', 151: '梦幻'
}
POKEMON_NAMES = {
'DE': DE_NAMES,
'FR': FR_NAMES,
'ZH': ZH_NAMES,
}.get(config.LANGUAGE.upper(), EN_NAMES)
| 1.375 | 1 |
InvenTree/users/test_migrations.py | onurtatli/InvenTree | 0 | 12765526 | <reponame>onurtatli/InvenTree
"""
Unit tests for the user model database migrations
"""
from django_test_migrations.contrib.unittest_case import MigratorTestCase
from InvenTree import helpers
class TestForwardMigrations(MigratorTestCase):
"""
Test entire schema migration sequence for the users app
"""
migrate_from = ('users', helpers.getOldestMigrationFile('users'))
migrate_to = ('users', helpers.getNewestMigrationFile('users'))
def prepare(self):
User = self.old_state.apps.get_model('auth', 'user')
User.objects.create(
username='fred',
email='<EMAIL>',
password='password'
)
User.objects.create(
username='brad',
email='<EMAIL>',
password='password'
)
def test_users_exist(self):
User = self.new_state.apps.get_model('auth', 'user')
self.assertEqual(User.objects.count(), 2)
| 2.34375 | 2 |
app/items.py | roddehugo/patents-scraper | 2 | 12765527 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class GooglePatentsItem(Item):
publication_number = Field()
title = Field()
filing_date = Field()
publication_date = Field()
priority_date = Field()
grant_date = Field()
inventors = Field()
assignees = Field()
pdf = Field()
external_links = Field()
images = Field()
classifications = Field()
citations = Field()
cited_by = Field()
legal_events = Field()
leget_status = Field()
abstract = Field()
description = Field()
claims = Field()
depth = Field()
| 2.28125 | 2 |
neo/test/iotest/test_elphyio.py | Mario-Kart-Felix/python-neo | 199 | 12765528 | """
Tests of neo.io.elphyo
"""
import unittest
from neo.io import ElphyIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestElphyIO(BaseTestIO, unittest.TestCase):
ioclass = ElphyIO
entities_to_download = [
'elphy'
]
entities_to_test = ['elphy/DATA1.DAT',
'elphy/ElphyExample.DAT',
'elphy/ElphyExample_Mode1.dat',
'elphy/ElphyExample_Mode2.dat',
'elphy/ElphyExample_Mode3.dat']
def test_read_data(self):
for filename in self.entities_to_test:
io = ElphyIO(self.get_local_path(filename))
bl = io.read_block()
self.assertTrue(len(bl.segments) > 0)
# ensure that at least one data object is generated for each file
self.assertTrue(any(list(bl.segments[0].size.values())))
if __name__ == "__main__":
unittest.main()
| 2.671875 | 3 |
lstm/src/data/collect_paradigms.py | brightp-py/rnng-and-rts | 0 | 12765529 | <reponame>brightp-py/rnng-and-rts
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from collections import defaultdict
from data import data_utils
parser = argparse.ArgumentParser(description='Reading and processing a large gzip file')
parser.add_argument('--input', type=str, required=True,
help='Input path (in a column CONLL UD format)')
parser.add_argument('--output', type=str, required=True, help="Output file name")
parser.add_argument('--nwords', type=int, default='100000000', required=False,
help='How many words to process')
parser.add_argument('--min_freq', type=int, default='5', required=False,
help='Minimal frequency of paradigm to be included in the dictionary')
args = parser.parse_args()
nwords = 0
paradigms = defaultdict(int)
for line in data_utils.read(args.input):
if line.strip() == "" or len(line.split("\t")) < 2:
continue
else:
fields = line.split("\t")
if fields[1].isalpha():
paradigms[(fields[1], fields[2], fields[3], fields[5])] += 1
nwords += 1
if nwords > args.nwords:
break
with open(args.output, 'w') as f:
for p in paradigms:
if paradigms[p] > args.min_freq:
f.write("\t".join(el for el in p) + "\t" + str(paradigms[p]) + "\n")
f.close()
| 2.03125 | 2 |
tests/settings/settings.py | caputomarcos/mongorest | 16 | 12765530 | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
from os import environ
from mongorest.settings import settings
from mongorest.testcase import TestCase
class TestSettings(TestCase):
def test_settings_default_values(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
self.assertEqual(settings.AUTH_COLLECTION, '')
self.assertIsNotNone(settings.CORS)
self.assertEqual(
settings.CORS['Access-Control-Allow-Origin'],
'*'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Methods'],
'GET,POST,PUT,PATCH,DELETE,OPTIONS'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Headers'],
'Accept,Accept-Encoding,Authorization,Content-Length,Content-Type,'
'Origin,User-Agent,X-CSRFToken,X-Requested-With'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Credentials'], 'true'
)
self.assertEqual(settings.MIDDLEWARES, [])
self.assertIsNotNone(settings.MONGODB)
self.assertEqual(settings.MONGODB['URI'], '')
self.assertEqual(settings.MONGODB['USERNAME'], '')
self.assertEqual(settings.MONGODB['PASSWORD'], '')
self.assertEqual(settings.MONGODB['HOST'], 'localhost')
self.assertEqual(settings.MONGODB['HOSTS'], [])
self.assertEqual(settings.MONGODB['PORT'], 27017)
self.assertEqual(settings.MONGODB['PORTS'], [])
self.assertEqual(settings.MONGODB['DATABASE'], 'mongorest')
self.assertEqual(settings.MONGODB['OPTIONS'], [])
self.assertEqual(settings.RETRY_LIMIT, 5)
self.assertEqual(settings.BASE_RETRY_TIME, 2)
self.assertEqual(settings.LINEAR_RETRIES, False)
self.assertEqual(settings.SESSION_STORE, '')
def test_a_default_setting_can_be_overwritten(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
self.assertEqual(settings.MONGODB['URI'], '')
environ['MONGOREST_SETTINGS_MODULE'] = 'tests.fixtures.settings_test_settings'
self.assertEqual(settings.MONGODB['URI'], 'test')
def test_a_new_setting_value_can_be_added(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
environ['MONGOREST_SETTINGS_MODULE'] = 'tests.fixtures.settings_test_settings'
self.assertEqual(settings.TEST, 'test')
def test_an_invalid_setting_will_raise_error(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
with self.assertRaises(AttributeError):
return settings.i_am_an_invalid_setting
| 1.890625 | 2 |
RecSearch/DataInterfaces/Recommenders/hits.py | matthew-kimm/RecSearch | 0 | 12765531 | from RecSearch.DataInterfaces.Recommenders.Abstract import IMatrixRecommender
import pandas as pd
class IXHITSRecommend(IMatrixRecommender):
def iget_recommendation(self, who: dict, possible: pd.DataFrame, n_column: str, ir_column: str,
req_rating: float, xi: float, tol: float, max_iter: int, filter_name: str = None) -> list:
f = self.get_filter(who, filter_name)
df = self.get_reduced_df(who, possible, n_column)
df[ir_column] = df[ir_column].apply(lambda x: [';'.join((k, str(v))) for k, v in x.items() if k not in f])
# Exploding on [] results in nan
df = df.explode(column=ir_column).dropna()
if df.empty:
return []
else:
df[['Item', 'Rating']] = df[ir_column].str.split(pat=';', expand=True)
df['Rating'] = df['Rating'].astype(float)
df = df[df['Rating'] >= req_rating]
df['Rating'] = 1
anames = list(df['Item'].sort_values().unique())
hnames = list(df.index.unique())
matrix = self.createAdjacencyMatrix(df, ['Item'], ['Rating'])
matrix = self.createBipartiteHITSMatrix(matrix)
matrix = self.addTeleportation(matrix, xi)
auth = self.powerIteration(matrix, tol, max_iter, anames)
# add hub?
return self.sortedResult(auth)
| 2.3125 | 2 |
codewof/programming/content/en/string-concatenation/solution.py | taskmaker1/codewof | 3 | 12765532 | <gh_stars>1-10
string_1 = input("String 1? ")
string_2 = input("String 2? ")
string_3 = input("String 3? ")
print(string_1 + string_2 + string_3)
| 3.453125 | 3 |
lang/py/cookbook/v2/source/cb2_15_5_sol_1.py | ch1huizong/learning | 0 | 12765533 | # give the base class a short, readable nickname
from SimpleXMLRPCServer import SimpleXMLRPCServer as BaseServer
class Server(BaseServer):
def __init__(self, host, port):
# accept separate hostname and portnumber and group them
BaseServer.__init__(self, (host, port))
def server_bind(self):
# allow fast restart of the server after it's killed
import socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseServer.server_bind(self)
allowedClientHosts = '127.0.0.1', '192.168.0.15',
def verify_request(self, request, client_address):
# forbid requests except from specific client hosts
return client_address[0] in self.allowedClientHosts
| 3.015625 | 3 |
setup.py | nimnull/talkbot | 6 | 12765534 | <reponame>nimnull/talkbot<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from talkbot import __version__
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Communications :: Chat',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
fname = os.path.join(os.path.abspath(os.path.dirname(
__file__)), 'requirements.txt')
extra = {
'entry_points': {
'console_scripts': ['talk_bot = talkbot.cli:main']
},
'install_requires': open(fname, 'r').readlines(),
'extras_require': {
'test': [
'pytest',
'pytest-aiohttp',
'docker',
]
}
}
setup(
name='talkbot',
version=__version__,
description='',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/nimnull/talkabit/',
packages=find_packages(exclude=('tests',)),
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.rst').read(),
include_package_data=True,
zip_safe=False,
**extra
)
| 1.484375 | 1 |
metaflow/event_logger.py | RobBlumberg/metaflow | 2 | 12765535 | from .sidecar import SidecarSubProcess
from .sidecar_messages import Message, MessageTypes
class NullEventLogger(object):
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def log(self, payload):
pass
def terminate(self):
pass
class EventLogger(NullEventLogger):
def __init__(self, logger_type):
# type: (str) -> None
self.sidecar_process = None
self.logger_type = logger_type
def start(self):
self.sidecar_process = SidecarSubProcess(self.logger_type)
def log(self, payload):
msg = Message(MessageTypes.LOG_EVENT, payload)
self.sidecar_process.msg_handler(msg)
def terminate(self):
self.sidecar_process.kill()
| 2.296875 | 2 |
src/sagemaker_sklearn_extension/preprocessing/encoders.py | perunicic/sagemaker-scikit-learn-extension | 0 | 12765536 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import warnings
from math import ceil
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing.label import _encode, _encode_check_unknown
from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples
from sagemaker_sklearn_extension.impute import RobustImputer
class ThresholdOneHotEncoder(OneHotEncoder):
"""Encode categorical integer features as a one-hot numeric array, with optional restrictions on feature encoding.
This adds functionality to encode only if a feature appears more than ``threshold`` number of times. It also adds
functionality to bound the number of categories per feature to ``max_categories``.
This transformer is an extension of ``OneHotEncoder`` from the ``sklearn.preprocessing`` module.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values (default = 'auto')
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith column. The passed categories should not
mix strings and numeric values within a single feature, and should be sorted in case of numeric values.
The used categories can be found in the ``categories_`` attribute.
drop : 'first' or a list/array of shape (n_features,) (default = None)
Specifies a methodology to use to drop one of the categories per feature. This is useful in situations where
perfectly collinear features cause problems, such as when feeding the resulting data into a neural network or
an unregularized regression.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one category is present, the feature will be
dropped entirely.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that should be dropped.
sparse : boolean (default = True)
Will return sparse matrix if set True else will return an array.
dtype : number type (default = np.float64)
Desired dtype of output.
threshold : float (default = max(10, n_features / 1000))
The threshold for including a value in the encoding of the result. Default value is the maximum of `10` or
`n_features / 1000` where `n_features` is the number of columns of input X. How this parameter is interpreted
depends on whether it is more than or equal to or less than 1.
- If `threshold` is more than or equal to one, it represents the number of times a value must appear to be
one hot encoded in the result.
- If `threshold` is less than one, it represents the fraction of rows which must contain the value for it to be
one hot encoded in the result. The values is rounded up, so if `threshold` is 0.255 and there are 100 rows, a
value must appear at least 26 times to be included.
max_categories : int (default = 100)
Maximum number of categories to encode per feature. If the number of observed categories is greater than
``max_categories``, the encoder will take the top ``max_categories`` observed categories, sorted by count.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting (in order of the features in X and corresponding with
the output of ``transform``). This includes the category specified in ``drop`` (if any).
drop_idx_ : array of shape (n_features,)
``drop_idx_[i]`` is the index in ``categories_[i]`` of the category to be dropped for each feature. None if all
the transformed features will be retained.
"""
def __init__(self, categories=None, drop=None, sparse=True, dtype=np.float64, threshold=None, max_categories=100):
super().__init__(None, None, categories, drop, sparse, dtype, "ignore")
self.threshold = threshold
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit ThresholdOneHotEncoder to X.
Overrides self.categories_ under the following conditions:
- include values that appear at least ``threshold`` number of times
- include the top ``self.max_categories`` number of categories to encode
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self : ThresholdOneHotEncoder
"""
super().fit(X, y)
assert self.max_categories >= 1
_, n_samples, n_features = self._check_X(X)
if not self.threshold:
threshold = max(10, n_samples / 1000)
elif self.threshold >= 1:
threshold = self.threshold
else:
threshold = ceil(self.threshold * n_samples)
n_features_completely_under_threshold = 0
for j in range(n_features):
# get unique values and their counts
items, counts = np.unique([row[j] for row in X], return_counts=True)
# add items that appear more than threshold times
self.categories_[j] = items[counts >= threshold].astype("O")
if self.categories_[j].size == 0:
n_features_completely_under_threshold += 1
# If no category is above the threshold, then create an unknown category to prevent
# self.transform() from raising an IndexError.
items.sort()
unknown_category = "{}___".format(items[-1])
# It's important to keep the dtype of `self.categories_[j]` as 'U' here because our `unknown_category`
# might end up being longer than any of the seen categories, and that changes the behavior of
# the `self._transform` method.
self.categories_[j] = np.asarray([unknown_category], dtype="U")
elif len(self.categories_[j]) > self.max_categories:
items_and_counts = dict(zip(items, counts))
self.categories_[j] = np.asarray(
sorted(items_and_counts, key=items_and_counts.get, reverse=True)[: self.max_categories], dtype="O"
)
if n_features_completely_under_threshold > 0:
times = "time" if self.threshold == 1 else "times"
warnings.warn(
"{} out of {} features do not have any categories appearing more than threshold={} {}.".format(
n_features_completely_under_threshold, n_features, self.threshold, times
)
)
return self
def _more_tags(self):
return {"X_types": ["categorical"]}
class RobustLabelEncoder(LabelEncoder):
"""Encode labels for seen and unseen labels.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Similar to ``sklearn.preprocessing.LabelEncoder`` with additional features.
- ``RobustLabelEncoder`` encodes unseen values with ``fill_encoded_label_value`` or ``fill_label_value``
if ``fill_unseen_labels=True`` for ``transform`` or ``inverse_transform`` respectively
- ``RobustLabelEncoder`` can use predetermined labels with the parameter``labels``.
Examples
--------
>>> from sagemaker_sklearn_extension.preprocessing import RobustLabelEncoder
>>> rle = RobustLabelEncoder()
>>> rle.fit([1, 2, 2, 6])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> rle.classes_
array([1, 2, 6])
>>> rle.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> rle.transform([1, 1, 2, 6, 1738])
array([ 0, 0, 1, 2, 3])
>>> rle.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
>>> rle.inverse_transform([-1738, 0, 0, 1, 2])
['<unseen_label>', 1, 1, 2, 6]
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> rle = RobustLabelEncoder()
>>> rle.fit(["hot dog", "hot dog", "banana"])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> list(rle.classes_)
['banana', 'hot dog']
>>> rle.transform(["hot dog", "hot dog"])
array([1, 1])
>>> rle.transform(["banana", "llama"])
array([0, 2])
>>> list(rle.inverse_transform([2, 2, 1]))
['<unseen_label>', '<unseen_label>', 'hot dog']
Parameters
----------
labels : list of values (default = None)
List of unique values for label encoding. Overrides ``self.classes_``.
If ``labels`` is None, RobustLabelEncoder will automatically determine the labels.
fill_unseen_labels : boolean (default = True)
Whether or not to fill unseen values during transform or inverse_transform.
fill_encoded_label_value : int (default = n_classes)
Replacement value for unseen labels during ``transform``.
Default value is n_classes.
fill_label_value : str (default = '<unseen_label>')
Replacement value for unseen encoded labels during ``inverse_transform``.
Attributes
----------
classes_ : array of shape (n_classes,)
Holds the label for each class.
"""
def __init__(
self, labels=None, fill_unseen_labels=True, fill_encoded_label_value=None, fill_label_value="<unseen_label>"
):
super().__init__()
self.labels = labels
self.fill_unseen_labels = fill_unseen_labels
self.fill_encoded_label_value = fill_encoded_label_value
self.fill_label_value = fill_label_value
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Label values.
Returns
-------
self : RobustLabelEncoder.
"""
y = column_or_1d(y, warn=True)
self.classes_ = self._check_labels_and_sort() or _encode(y)
return self
def _check_labels_and_sort(self):
if not self.labels:
return None
if self._is_sorted(self.labels):
return self.labels
warnings.warn("`labels` parameter is expected to be sorted. Sorting `labels`.")
return sorted(self.labels)
def _is_sorted(self, iterable):
return all(iterable[i] <= iterable[i + 1] for i in range(len(iterable) - 1))
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
``fill_unseen_labels=True`` does nothing in ``fit_transform`` because there will be no unseen labels.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
y = column_or_1d(y, warn=True)
sorted_labels = self._check_labels_and_sort()
self.classes_, y_encoded = (
_encode(y, uniques=sorted_labels, encode=True) if sorted_labels else _encode(y, encode=True)
)
return y_encoded
def transform(self, y):
"""Transform labels to normalized encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_encoded_label_value`` for unseen values.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
if self.fill_unseen_labels:
_, mask = _encode_check_unknown(y, self.classes_, return_mask=True)
y_encoded = np.searchsorted(self.classes_, y)
fill_encoded_label_value = self.fill_encoded_label_value or len(self.classes_)
y_encoded[~mask] = fill_encoded_label_value
else:
_, y_encoded = _encode(y, uniques=self.classes_, encode=True)
return y_encoded
def inverse_transform(self, y):
"""Transform labels back to original encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_label_value`` for unseen values.
Parameters
----------
y : numpy array of shape [n_samples]
Encoded label values.
Returns
-------
y_decoded : numpy array of shape [n_samples]
Label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
if y.dtype.kind not in ("i", "u"):
try:
y = y.astype(np.float).astype(np.int)
except ValueError:
raise ValueError("`y` contains values not convertible to integer.")
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
labels = np.arange(len(self.classes_))
diff = np.setdiff1d(y, labels)
if diff and not self.fill_unseen_labels:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y_decoded = [self.classes_[idx] if idx in labels else self.fill_label_value for idx in y]
return y_decoded
class NALabelEncoder(BaseEstimator, TransformerMixin):
"""Encoder for transforming labels to NA values.
Uses `RobustImputer` on 1D inputs of labels
- Uses `is_finite_numeric` mask for encoding by default
- Only uses the `RobustImputer` strategy `constant` and fills using `np.nan`
- Default behavior encodes non-float and non-finite values as nan values in
the target column of a given regression dataset
Parameters
----------
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
"""
def __init__(self, mask_function=None):
self.mask_function = mask_function
def fit(self, y):
"""Fit the encoder on y.
Parameters
----------
y : {array-like}, shape (n_samples,)
Input column, where `n_samples` is the number of samples.
Returns
-------
self : NALabelEncoder
"""
self.model_ = RobustImputer(strategy="constant", fill_values=np.nan, mask_function=self.mask_function)
y = y.reshape(-1, 1)
self.model_.fit(X=y)
return self
def transform(self, y):
"""Encode all non-float and non-finite values in y as NA values.
Parameters
----------
y : {array-like}, shape (n_samples)
The input column to encode.
Returns
-------
yt : {ndarray}, shape (n_samples,)
The encoded input column.
"""
check_is_fitted(self, "model_")
y = y.reshape(-1, 1)
return self.model_.transform(y).flatten()
def inverse_transform(self, y):
"""Returns input column"""
return y
def _more_tags(self):
return {"X_types": ["1dlabels"]}
| 1.859375 | 2 |
proj03/proj03.py | dstingley22/project | 0 | 12765537 | # Name:
# Date:
"""
proj 03: Guessing Game
Generate a random number between 1 and 9 (including 1 and 9).
Ask the user to guess the number, then tell them whether they guessed too low, too high,
or exactly right. Keep the game going until the user types exit.
Keep track of how many guesses the user has taken, and when the game ends, print this out.
"""
var3 = 'yes'
while var3 == 'yes':
import random
var = random.randint(1,1000)
#print var
y = int(raw_input("How many guesses would you like to have?")) - 1
x = int(raw_input("Guess a number between 1 and 1000 or type 0 to quit: "))
while y > 0:
if x != 0:
x = int(x)
if x > var:
print ""
x = raw_input("Too high. Guess again")
elif x < var:
print ""
x = raw_input("Too low. Guess again")
elif x == 'exit':
x = var
print "Game over. The answer is", var
break
if x == var:
print "Correct. The answer is", var
break
else:
if x == var:
print 'Correct. The answer is', var
break
else:
print "Game over. The answer is", var
break
y = y - 1
var3 = raw_input("Would you like to play again?") | 4.25 | 4 |
rpi/distance.py | inafi/wearovision | 5 | 12765538 | <gh_stars>1-10
import serial
from redis import Redis
cli = Redis("localhost")
ser = serial.Serial('/dev/serial0', 9600)
while (True):
if(ser.in_waiting >0):
line = ser.readline()
print(line)
| 2.5 | 2 |
tests/backend/shellvars.py | edyan/python-anyconfig | 0 | 12765539 | <reponame>edyan/python-anyconfig
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
# Copyright (C) 2017 Red Hat, Inc.
# License: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
# pylint: disable=ungrouped-imports
from __future__ import absolute_import
import anyconfig.backend.shellvars as TT
import tests.backend.common as TBC
from anyconfig.compat import OrderedDict
CNF_S = """\
a=0
b='bbb' # a comment
c="ccc" # an another comment
export d='ddd' ## double comment
export e="eee" ### tripple comment
"""
CNF = OrderedDict((("a", "0"), ("b", "bbb"), ("c", "ccc"), ("d", "ddd"),
("e", "eee")))
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF
cnf_s = CNF_S
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait):
pass
class Test_20(TBC.Test_20_dump_and_load, HasParserTrait):
pass
# vim:sw=4:ts=4:et:
| 2.015625 | 2 |
DynamicProgramming/MaximumSumContiguousSubarray.py | tejasnikumbh/Algorithms | 8 | 12765540 | <filename>DynamicProgramming/MaximumSumContiguousSubarray.py
# Importing standard libraries
import sys
'''
Reads in an integer from stream passed in as the parameter. Simple
parsing function that can read from files as well as standard input
'''
def parseInt(stream):
return int(stream.readline().rstrip())
'''
Reads in an array of integers from stream passed in as parameter.This
is a simple parsing function that can read from files as well as
standard input
'''
def parseIntArr(stream):
return [int(x) for x in stream.readline().rstrip().split()]
'''
Function to print the array with numbers in a space seperated format
'''
def printArray(a,delimiter):
arrayStr = ""
for i in a:
arrayStr += str(i) + delimiter
arrayStr.rstrip()
print arrayStr
'''
Applying K-dane's algorithm to get the maximum contiguous subarray
Sum in the array passed in as parameter. If all elements non positive
then return the max of the numbers in the array
'''
def getMaxContiguous(a):
if(allNonPos(a)):
return max(a)
else:
maxEndingHere = maxSoFar = 0
for i in a:
maxEndingHere += i
if(maxEndingHere < 0): maxEndingHere = 0
maxSoFar = max(maxEndingHere,maxSoFar)
return maxSoFar
'''
Simply the sum of all positive numbers in array. If all are non
Positive return the max of the numbers in the array
'''
def getMaxNonContiguous(a):
if(allNonPos(a)):
return max(a)
else:
return sum([x for x in a if x>0])
'''
Returns True if all elements in array are 0 or negative.(non postive)
'''
def allNonPos(a):
for i in a:
if(i > 0):
return False;
return True
'''
Main function for the program
'''
if __name__ == "__main__":
stream = sys.stdin
t = parseInt(stream)
for i in range(t):
lenA = parseInt(stream)
a = parseIntArr(stream)
ans = [-1,-1]
ans[0] = getMaxContiguous(a)
ans[1] = getMaxNonContiguous(a)
printArray(ans," ")
| 4.03125 | 4 |
misc/audio/save_block.py | Wenhao-Yang/TwoWayRadio | 1 | 12765541 | <reponame>Wenhao-Yang/TwoWayRadio
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Mon Jul 13 23:11:42 2020
##################################################
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import filter
from gnuradio import gr
from gnuradio.filter import firdes
import osmosdr
import time
import argparse
parser = argparse.ArgumentParser(description='Speaker Recognition System for record wav')
# Model options
parser.add_argument('--wavpath', type=str, default='data\\wav\\tmp\\tmp.wav', help='path to dataset')
parser.add_argument('--freq', type=float, default=401.195e6, help='path to voxceleb1 test dataset')
args = parser.parse_args()
class top_block(gr.top_block):
def __init__(self, freq, wavpath):
gr.top_block.__init__(self, "Top Block")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 4e6
self.freq = freq
##################################################
# Blocks
##################################################
self.rational_resampler_xxx_0 = filter.rational_resampler_ccc(
interpolation=12,
decimation=5,
taps=None,
fractional_bw=0.01,
)
self.osmosdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + 'hackrf=0' )
self.osmosdr_source_0.set_time_now(osmosdr.time_spec_t(time.time()), osmosdr.ALL_MBOARDS)
self.osmosdr_source_0.set_sample_rate(samp_rate)
self.osmosdr_source_0.set_center_freq(freq, 0)
self.osmosdr_source_0.set_freq_corr(0, 0)
self.osmosdr_source_0.set_dc_offset_mode(0, 0)
self.osmosdr_source_0.set_iq_balance_mode(0, 0)
self.osmosdr_source_0.set_gain_mode(False, 0)
self.osmosdr_source_0.set_gain(0, 0)
self.osmosdr_source_0.set_if_gain(35, 0)
self.osmosdr_source_0.set_bb_gain(62, 0)
self.osmosdr_source_0.set_antenna('', 0)
self.osmosdr_source_0.set_bandwidth(200000, 0)
self.low_pass_filter_0 = filter.fir_filter_ccf(20, firdes.low_pass(
1, samp_rate, 75e3, 25e3, firdes.WIN_HAMMING, 6.76))
self.blocks_wavfile_sink_0 = blocks.wavfile_sink(wavpath, 1, 48000, 16)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((1.4, ))
# self.audio_sink_0 = audio.sink(48000, '', False)
self.analog_simple_squelch_cc_0 = analog.simple_squelch_cc(-20.8, 1e-3)
self.analog_nbfm_rx_1 = analog.nbfm_rx(
audio_rate=48000,
quad_rate=480000,
tau=75e-6,
max_dev=10e3,
)
##################################################
# Connections
##################################################
self.connect((self.analog_nbfm_rx_1, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.analog_simple_squelch_cc_0, 0), (self.analog_nbfm_rx_1, 0))
# self.connect((self.blocks_multiply_const_vxx_0, 0), (self.audio_sink_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.blocks_wavfile_sink_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.osmosdr_source_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.analog_simple_squelch_cc_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.osmosdr_source_0.set_sample_rate(self.samp_rate)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 75e3, 25e3, firdes.WIN_HAMMING, 6.76))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.osmosdr_source_0.set_center_freq(self.freq, 0)
def main(top_block_cls=top_block, options=None):
try:
tb = top_block_cls(wavpath=args.wavpath, freq=args.freq)
tb.start()
raw_input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait()
except Exception as e:
raise e
if __name__ == '__main__':
main()
# try:
# tb = top_block(wavpath=args.wavpath, freq=args.freq)
# tb.start()
# raw_input('Press Enter to quit: ')
# except EOFError:
# tb.stop()
# tb.wait()
# except Exception as e:
# raise e
| 2.28125 | 2 |
pyFileMove/mvfiles.py | nmschorr/pyFileMove9 | 0 | 12765542 | '''
Created on Nov 22, 2015
@author: <NAME>
'''
## You need to create a file called "fileslist.py" in the the same directory as this file
## inside fileslist.py you need a line that contains the file types to move
## Put one line in the file the defines the list dirTypes like this:
## dirTypes = ["pdf", "cat", "car", "aaa", "ring", "avery", "avalon", "jpg", "doc" ]
## if paths get mixed up, go into: Preferences/PyDev/Interpreter-Python and delete the
## python.exe interpreter, then re-add it back
## Also, make sure on Windows that system variable PYTHONPATH looks like below, and is in
## the regular system PATH
## PYTHONPATH=C:\Python27;C:\Python27\libs;C:\Python27\Lib;C:\Python27\DLLs;C:\Python27\Scripts;C:\Python27\Lib\site-packages;
## C:\Python27\lib\site-packages;C:\Python27\lib
PYTHONDONTWRITEBYTECODE = True #prevents bytecode .pyc files from being created
import os
import sys
import re
import shutil
import traceback
import time
sys.path.append(os.path.abspath("E:\\Workspace\\PrivateFiles\\")) ## this is where the list
### of file types is kept
if __name__ == '__main__': pass
from fileslist import dirNames
inDir = "C:\\Users\\user\\Desktop\\MYDOCS" ## where your files to be sorted are
outDir = "C:\\Users\\user\\Desktop\\movedFinance" ## where you are sorting to
changed_list = []
unchanged_list = []
impfiles=sys.modules.keys() ## print out the python modules we're using
for i in impfiles :
if re.search("fileslist", str(i)): # make sure fileslist.py can be found
print str(i);
#print "sys.path is: " + str(sys.path) + "\n" ## just for fun
def mywalk(inputDir):
print " "
for eachFile in os.listdir(inputDir):
#print "here now"
origFilenameWPath = os.path.join(inputDir, eachFile)
for dirTypeItem in dirNames:
#print "inside for loop"
#print "dirTypeItem : " + str(dirTypeItem) + " dirNames: " + str(dirNames)
if re.search(dirTypeItem, eachFile, re.IGNORECASE ):
print ""
print "New loop. Filename is: " + eachFile + " dirTypeItem: " + str(dirTypeItem)
newOutDir = outDir + "\\" + dirTypeItem
newFilenameWPath = os.path.join(newOutDir, eachFile)
newDirExistsBoolean = os.path.isdir(newOutDir)
sameNewFnamAlreadyExistsBoo = os.path.exists(newFilenameWPath)
print "Does newOutDir exist? : " + str( newDirExistsBoolean )
if not newDirExistsBoolean :
print " Can't move file: " + str(eachFile)
print " ERROR!!! Destination dir " + '\"' +str(newOutDir)+ '\"' + " not there! Can't move to file-name-location : \"" + str(newFilenameWPath)+"\""
unchanged_list.append(str(origFilenameWPath))
if sameNewFnamAlreadyExistsBoo :
print " ERROR!! same filename Already exists : " + str(newFilenameWPath) + " - skipping"
unchanged_list.append(str(origFilenameWPath))
if (newDirExistsBoolean and not sameNewFnamAlreadyExistsBoo) :
try :
print "-----> GOING TO MOVE \"" + str(origFilenameWPath) + "\" to \"" + str(newOutDir)+"\""
shutil.move(origFilenameWPath, newOutDir)
time.sleep(.3)
newChangeExistsBoo = os.path.exists(newFilenameWPath)
print "newChangeExistsBoo " + str(newChangeExistsBoo)
print "------------->>check for filename: " + str(newFilenameWPath)
if newChangeExistsBoo :
print "write successful!"
changed_list.append(str(newFilenameWPath))
else :
print "write failed!"
unchanged_list.append(str(origFilenameWPath))
except Exception, e:
print "Exception caught: " + str(e)
## end of newDirexists
#print "-------done with a loop" + "\n"
##break ## breaking because there is no sense in continuing with this item
mywalk(inDir)
print "\nHere's what got changed:"
for s in changed_list :
print s
print "\nHere's what didn't change:"
for s in unchanged_list :
print s
print "\nEnd of Program"
| 3.296875 | 3 |
openslides_backend/action/tag/__init__.py | reiterl/openslides-backend | 5 | 12765543 | from . import create_update_delete # noqa
| 0.957031 | 1 |
formly/views/design.py | coloradocarlos/formly | 34 | 12765544 | <gh_stars>10-100
from django.core.exceptions import PermissionDenied
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from formly.forms.design import (
FieldChoiceForm,
FieldForm,
OrdinalScaleForm,
PageUpdateForm,
SurveyCreateForm,
)
from formly.models import Field, FieldChoice, OrdinalScale, Page, Survey
from formly.utils.views import BaseDeleteView
try:
from account.decorators import login_required
except ImportError:
from django.contrib.auth.decorators import login_required
@login_required
def survey_list(request):
if not request.user.has_perm("formly.view_survey_list"):
raise PermissionDenied() # pragma: no cover -> never invoked because @login_required
return render(
request,
"formly/design/survey_list.html",
context={
"surveys": Survey.objects.all().order_by("-created"),
"survey_form": SurveyCreateForm(user=request.user)
})
@login_required
def survey_detail(request, pk):
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.view_survey_detail", obj=survey):
raise PermissionDenied()
response = render(
request,
"formly/design/survey_list.html",
context={
"surveys": Survey.objects.all().order_by("-created"),
"survey_form": SurveyCreateForm(user=request.user),
"pages": survey.pages.all(),
"selected_survey": survey
})
return response
@login_required
def page_detail(request, pk):
page = get_object_or_404(Page, pk=pk)
if not request.user.has_perm("formly.view_survey_detail", obj=page.survey):
raise PermissionDenied()
return render(
request,
"formly/design/survey_list.html",
context={
"surveys": Survey.objects.all().order_by("-created"),
"survey_form": SurveyCreateForm(user=request.user),
"pages": page.survey.pages.all(),
"selected_page": page,
"selected_survey": page.survey,
"fields": page.fields.all().order_by("ordinal")
})
@login_required
def survey_create(request):
if not request.user.has_perm("formly.create_survey"):
raise PermissionDenied() # pragma: no cover -> never invoked because @login_required
if request.method == "POST":
form = SurveyCreateForm(request.POST, user=request.user)
if form.is_valid():
survey = form.save()
return redirect(survey.first_page())
else:
form = SurveyCreateForm(user=request.user)
return render(
request,
"formly/design/survey_form.html",
context={
"form": form,
})
@require_POST
@login_required
def survey_change_name(request, pk):
"""
Works well with:
http://www.appelsiini.net/projects/jeditable
"""
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.change_survey_name", obj=survey):
raise PermissionDenied()
survey.name = request.POST.get("name")
survey.save()
return JsonResponse({
"status": "OK",
"name": survey.name
})
@require_POST
@login_required
def survey_publish(request, pk):
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.publish_survey", obj=survey):
raise PermissionDenied()
survey.publish()
return redirect("formly:survey_list")
@require_POST
@login_required
def survey_duplicate(request, pk):
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.duplicate_survey", obj=survey):
raise PermissionDenied()
duped = survey.duplicate()
return redirect("formly:survey_detail", pk=duped.pk)
@require_POST
@login_required
def page_create(request, pk):
survey = get_object_or_404(Survey, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=survey):
raise PermissionDenied()
page = survey.pages.create()
return redirect(page)
@require_POST
@login_required
def field_create(request, pk):
page = get_object_or_404(Page, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=page.survey):
raise PermissionDenied()
field = page.fields.create(
label="New Field",
survey=page.survey,
field_type=Field.TEXT_FIELD,
ordinal=1
)
return redirect(field)
@login_required
def page_update(request, pk):
# @@@ break this apart into separate views
page = get_object_or_404(Page, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=page.survey):
raise PermissionDenied()
if request.method == "POST":
if request.POST.get("action") == "page_update":
form = PageUpdateForm(data=request.POST, instance=page)
field_form = FieldForm(prefix="fields")
if form.is_valid():
page = form.save()
return redirect(page)
if request.POST.get("action") == "field_add":
form = PageUpdateForm(instance=page)
field_form = FieldForm(data=request.POST, prefix="fields")
if field_form.is_valid():
field = field_form.save(commit=False)
field.page = page
field.survey = page.survey
field.save()
return redirect(page)
else:
form = PageUpdateForm(instance=page)
field_form = FieldForm(prefix="fields")
return render(
request,
"formly/design/page_form.html",
context={
"form": form,
"page": page,
"field_form": field_form
})
@require_POST
@login_required
def field_move_up(request, pk):
field = get_object_or_404(Field, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=field.survey):
raise PermissionDenied()
field.move_up()
data = {
"html": render_to_string(
"formly/design/_fields.html",
context={
"selected_page": field.page,
"fields": field.page.fields.all().order_by("ordinal"),
"selected_field": field
},
request=request
)
}
return JsonResponse(data)
@require_POST
@login_required
def field_move_down(request, pk):
field = get_object_or_404(Field, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=field.survey):
raise PermissionDenied()
field.move_down()
data = {
"html": render_to_string(
"formly/design/_fields.html",
context={
"selected_page": field.page,
"fields": field.page.fields.all().order_by("ordinal"),
"selected_field": field
},
request=request
)
}
return JsonResponse(data)
@require_POST
@login_required
def field_add_choice(request, pk):
field = get_object_or_404(Field, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=field.survey):
raise PermissionDenied()
field_choice_form = FieldChoiceForm(
data=request.POST,
prefix="choices"
)
if field_choice_form.is_valid():
choice = field_choice_form.save(commit=False)
choice.field = field
choice.save()
return redirect(field)
form = FieldForm(instance=field)
return render(
request,
"formly/design/survey_list.html",
context={
"surveys": Survey.objects.all().order_by("-created"),
"survey_form": SurveyCreateForm(user=request.user),
"pages": field.page.survey.pages.all(),
"selected_page": field.page,
"selected_survey": field.survey,
"fields": field.page.fields.all().order_by("ordinal"),
"selected_field": field,
"field_form": form,
"field_choice_form": field_choice_form
})
@require_POST
@login_required
def likert_scale_set(request, field_pk, scale_pk):
field = get_object_or_404(Field, pk=field_pk, field_type=Field.LIKERT_FIELD)
scale = get_object_or_404(OrdinalScale, pk=scale_pk)
field.scale = scale
field.save()
return JsonResponse({
"html": render_to_string(
"formly/design/_likert_scales.html",
context={
"selected_field": field,
"likert_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_LIKERT)
},
request=request
)
})
@require_POST
@login_required
def likert_scale_create(request, field_pk):
field = get_object_or_404(Field, pk=field_pk, field_type=Field.LIKERT_FIELD)
likert_scale_form = OrdinalScaleForm(request.POST, balanced=True)
if likert_scale_form.is_valid():
scale = likert_scale_form.save(commit=False)
scale.kind = OrdinalScale.ORDINAL_KIND_LIKERT
scale.save()
choices = likert_scale_form.cleaned_data["scale"]
max_score = int((len(choices) - 1) / 2)
min_score = -max_score
for index, score in enumerate(range(min_score, max_score + 1)):
scale.choices.create(
label=choices[index],
score=score
)
field.scale = scale
field.save()
likert_scale_form = OrdinalScaleForm()
return JsonResponse({
"html": render_to_string(
"formly/design/_likert_scale_form.html",
context={
"selected_field": field,
"likert_scale_form": likert_scale_form,
},
request=request
),
"fragments": {
".likert-scales": render_to_string(
"formly/design/_likert_scales.html",
context={
"selected_field": field,
"likert_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_LIKERT),
},
request=request
)
}
})
@require_POST
@login_required
def rating_scale_set(request, field_pk, scale_pk):
field = get_object_or_404(Field, pk=field_pk, field_type=Field.RATING_FIELD)
scale = get_object_or_404(OrdinalScale, pk=scale_pk)
field.scale = scale
field.save()
return JsonResponse({
"html": render_to_string(
"formly/design/_rating_scales.html",
context={
"selected_field": field,
"rating_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_RATING)
},
request=request
)
})
@require_POST
@login_required
def rating_scale_create(request, field_pk):
field = get_object_or_404(Field, pk=field_pk, field_type=Field.RATING_FIELD)
rating_scale_form = OrdinalScaleForm(request.POST)
if rating_scale_form.is_valid():
scale = rating_scale_form.save(commit=False)
scale.kind = OrdinalScale.ORDINAL_KIND_RATING
scale.save()
choices = rating_scale_form.cleaned_data["scale"]
for index, choice in enumerate(choices):
scale.choices.create(
label=choice,
score=index
)
field.scale = scale
field.save()
rating_scale_form = OrdinalScaleForm()
return JsonResponse({
"html": render_to_string(
"formly/design/_rating_scale_form.html",
context={
"selected_field": field,
"rating_scale_form": rating_scale_form
},
request=request
),
"fragments": {
".rating-scales": render_to_string(
"formly/design/_rating_scales.html",
context={
"selected_field": field,
"rating_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_RATING)
},
request=request
)
}
})
@login_required
def field_update(request, pk):
field = get_object_or_404(Field, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=field.survey):
raise PermissionDenied()
if request.method == "POST":
if request.POST.get("action") == "field_update":
form = FieldForm(data=request.POST, instance=field)
field_choice_form = FieldChoiceForm(prefix="choices")
if form.is_valid():
form.save()
return redirect(field)
else:
form = FieldForm(instance=field)
field_choice_form = FieldChoiceForm(prefix="choices")
likert_scale_form = OrdinalScaleForm()
rating_scale_form = OrdinalScaleForm()
else:
form = FieldForm(instance=field)
field_choice_form = FieldChoiceForm(prefix="choices")
likert_scale_form = OrdinalScaleForm()
rating_scale_form = OrdinalScaleForm()
return render(
request,
"formly/design/survey_list.html",
context={
"surveys": Survey.objects.all().order_by("-created"),
"survey_form": SurveyCreateForm(user=request.user),
"pages": field.page.survey.pages.all(),
"selected_page": field.page,
"selected_survey": field.survey,
"fields": field.page.fields.all().order_by("ordinal"),
"selected_field": field,
"field_form": form,
"field_choice_form": field_choice_form,
"likert_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_LIKERT),
"likert_scale_form": likert_scale_form,
"rating_scales": OrdinalScale.objects.filter(kind=OrdinalScale.ORDINAL_KIND_RATING),
"rating_scale_form": rating_scale_form
})
@require_POST
@login_required
def choice_delete(request, pk):
choice = get_object_or_404(FieldChoice, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=choice.field.survey):
raise PermissionDenied()
choice.delete()
return JsonResponse({"html": ""})
@login_required
def choice_update(request, pk):
choice = get_object_or_404(FieldChoice, pk=pk)
if not request.user.has_perm("formly.edit_survey", obj=choice.field.survey):
raise PermissionDenied()
if request.method == "POST":
form = FieldChoiceForm(
data=request.POST,
instance=choice
)
if form.is_valid():
form.save()
return redirect(choice.field.page)
else:
form = FieldChoiceForm(instance=choice)
return render(
request,
"formly/design/choice_form.html",
context={
"form": form,
"choice": choice,
"page": choice.field.page
})
class SurveyDelete(BaseDeleteView):
model = Survey
success_url_name = "formly:survey_list"
class PageDelete(BaseDeleteView):
model = Page
success_url_name = "formly:survey_detail"
pk_obj_name = "survey"
class FieldDelete(BaseDeleteView):
model = Field
success_url_name = "formly:page_update"
pk_obj_name = "page"
class ChoiceDelete(BaseDeleteView):
model = FieldChoice
success_url_name = "formly:field_update"
pk_obj_name = "field"
| 2.09375 | 2 |
experimental/registry-dtl/test_compo_service.py | nunet-io/ai-dsl | 7 | 12765545 | <gh_stars>1-10
import sys
import grpc
# import the generated classes
import service.proto_spec.compo_service_pb2_grpc as grpc_compo_grpc
import service.proto_spec.compo_service_pb2 as grpc_compo_pb2
from service import registry
if __name__ == "__main__":
# Call Compo Service
try:
# Ask endpoint and argument
dflt_ep = "localhost:{}".format(registry["compo_service"]["grpc"])
endpoint = input("Endpoint [default={}]: ".format(dflt_ep)) or dflt_ep
argument = int(input("Argument [default=41]: ") or 41)
# Open a gRPC channel
channel = grpc.insecure_channel(endpoint)
stub = grpc_compo_grpc.CompoStub(channel)
arguments = grpc_compo_pb2.Arguments(argument=argument)
# Carry out service
response = stub.compo(arguments)
print(response.value)
except Exception as e:
print(e)
exit(1)
| 2.4375 | 2 |
Python/self-dividing-numbers.py | sm2774us/leetcode_interview_prep_2021 | 0 | 12765546 | <filename>Python/self-dividing-numbers.py
# Time: O(nlogr) = O(n)
# Space: O(logr) = O(1)
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
def isDividingNumber(num):
n = num
while n > 0:
n, r = divmod(n, 10)
if r == 0 or (num%r) != 0:
return False
return True
return [num for num in range(left, right+1) if isDividingNumber(num)]
# Time: O(nlogr) = O(n)
# Space: O(logr) = O(1)
import itertools
class Solution2(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
return [num for num in range(left, right+1) \
if not any(itertools.map(lambda x: int(x) == 0 or num%int(x) != 0, str(num)))]
| 3.65625 | 4 |
star.py | eli173/star | 0 | 12765547 | <reponame>eli173/star
#80#############################################################################
import sqlite3
import json
from flask import Flask, request, session, g, redirect, \
url_for, abort, render_template, flash
from contextlib import closing
import bcrypt
import game
#cfg
DATABASE = 'db.db'
#
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = b'\<KEY>'
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def get_username(uid):
qr = g.db.execute('select username from users where id=?',(uid,)).fetchall()
if qr == []:
return None
return qr[0][0]
@app.route('/games')
def games():
if 'logged_in' not in session:
return redirect(url_for("index"))
uname = session['username']
uid = g.db.execute('select id from users where username=?',(uname,))
uid = uid.fetchall()[0][0]
games = g.db.execute('select * from games where player1=? or player2=?',
(uid,uid)).fetchall()
glist = []
for game in games:
if game[3] == uname:# 3 is p1
glist.append((get_username(game[4]),game[2]==uid,game[0]))
else:
glist.append((get_username(game[3]),game[2]!=uid,game[0]))
app.logger.debug(glist)
games_waiting = g.db.execute('select * from waiting where player=?',
(uid,)).fetchall()
waiting = False
if len(games_waiting)!=0:
waiting = True
app.logger.debug(waiting)
return render_template('games.html',waiting=waiting,glist=glist)
@app.route('/newgame')
def newgame():
if 'logged_in' not in session:
return redirect(url_for("index"))
app.logger.debug(session['username'])
uid = g.db.execute('select id from users where username=?',
(session['username'],)).fetchall()[0][0]
waiting = g.db.execute('select * from waiting').fetchall()
for game in waiting:
if uid==game[1]:
return redirect(url_for("games"))
for game in waiting:
opp_id = game[1]
if opp_id!=uid:
g.db.execute('delete from waiting where id=?',
(game[0],))
g.db.execute('insert into games (player1, player2, whose_turn) values (?,?,?)',(uid,opp_id,uid))
g.db.commit()
game_id = g.db.execute('select id from games where player1=? and player2=?',(uid,opp_id)).fetchall()
return redirect(url_for("play",game_id=game_id[0][0]))
g.db.execute('insert into waiting (player) values (?)',(uid,))
g.db.commit()
return redirect(url_for("games")) # how to tell if on waitlist?
@app.route('/play/<int:game_id>')
def play(game_id):# whose turn?
g.game_id = game_id
db_gm = g.db.execute('select * from games where id=?',(game_id,)).fetchall()
if db_gm == []:
return redirect(url_for("games"))
the_gm = game.Game(db_gm[0][3],db_gm[0][4],db_gm[0][0])
the_gm.import_string(db_gm[0][1])
app.logger.debug(db_gm[0][1])
waiting = False
whose_turn = db_gm[0][2]
uid = session['uid']
if whose_turn!=uid:
waiting = True
g.color_table = {}
cell_list = []
for cg in game.cell_groups:
cell_list += cg
for cell in cell_list:
curr_color = "ffff00"
if cell in the_gm.p1_cells:
curr_color = "ff0000"
elif cell in the_gm.p2_cells:
curr_color = "0000ff"
g.color_table[cell] = curr_color
return render_template('play.html',waiting=waiting)
@app.route('/forefeit/<int:game_id>')
def forefeit(game_id):
g_q = g.db.execute('select (player1,player2) from games where id=?',
(game_id,)).fetchall()
p1 = g_q[0][2]
p2 = g_q[0][3]
winner = None
loser = None
if p1==session['uid']:
winner = p2
loser = p1
else:
winner = p1
loser = p2
g.db.execute("delete from games where id=?",(game_id,))
g.db.execute("update users set wins=wins+1 where id=?",(winner,))
g.db.execute("update users set losses=losses+1 where id=?",(loser,))
g.db.commit()
return redirect(url_for("games"))
@app.route('/submit/<int:game_id>/<move>')
def submit(game_id, move):
# get game from db
db_gm = g.db.execute('select * from games where id=?',(game_id,))
gdata = db_gm.fetchall()
if gdata == []:
return redirect(url_for("index"))
the_gm = game.Game(gdata[0][3],gdata[0][4],game_id)
the_gm.import_string(gdata[0][1])
# check right user
curr_user = session['username']
user_id_q = g.db.execute('select id from users where username=?',
(curr_user,)).fetchall()
if user_id_q == []:
return redirect(url_for("play",game_id=game_id))
app.logger.debug(move)
uid = user_id_q[0][0]
app.logger.debug(uid)
app.logger.debug(gdata)
if uid!=gdata[0][2]: # don't need to do more than this?
return redirect(url_for("play",game_id=game_id))
# checks move valid
app.logger.debug(the_gm.open_cells)
if move not in the_gm.open_cells:
return redirect(url_for("play",game_id=game_id))
# do it?
the_gm.move(uid,move)
app.logger.debug(the_gm.is_over())
if the_gm.is_over():
winner = the_gm.get_winner()
loser = the_gm.p1 if winner==the_gm.p2 else the_gm.p2
g.db.execute('delete from games where id=?',
(the_gm.g_id,))
g.db.execute('update users set wins=wins+1 where id=?',
(winner,))
g.db.execute('update users set losses=losses+1 where id=?',
(loser,))
g.db.commit()
return redirect(url_for("games"))
estr = the_gm.export_string()
app.logger.debug(game_id)
opp_id = the_gm.p1 if the_gm.p2==uid else the_gm.p2
g.db.execute('update games set board=? where id=?',(estr,game_id))
g.db.execute('update games set whose_turn=? where id=?',
(opp_id,game_id))
g.db.commit()
return redirect(url_for("play",game_id=game_id))
@app.route('/logout')
def logout():
session.pop('logged_in',None)
return redirect(url_for("index"))
@app.route('/login', methods=['POST'])
def login():
if request.method == 'POST':
user = g.db.execute('select * from users where username=?',
(request.form.get('username'),)).fetchall()
user_exists = len(user)!=0
if request.form.get('login')!=None:
if request.form.get('username')==None:
return redirect(url_for("index"))
if not user_exists:
flash(u'No account with this username exists','login error')
return redirect(url_for("index"))
pw_sql = g.db.execute('select pw_hash from users where username=?',
(request.form.get('username'),))
pw_hash = pw_sql.fetchall()[0][0]
pw_plain = request.form.get('password').encode('UTF-8')
if bcrypt.hashpw(pw_plain, pw_hash) == pw_hash:
#app.logger.debug('success!')
session['logged_in'] = True
session['username'] = request.form.get('username')
session['uid'] = user[0][0]
return redirect(url_for("games"))
else:
flash(u'Wrong Password','login error')
return redirect(url_for("index"))
elif request.form.get('register')!=None:
if(user_exists):
flash(u'Username already taken','login error')
return redirect(url_for("index"))
pw_plain = request.form.get('password').encode('UTF-8')
pw_hash = bcrypt.hashpw(pw_plain, bcrypt.gensalt())
g.db.execute('insert into users (username, pw_hash) values (?, ?)',
(request.form.get('username'),pw_hash))
g.db.commit()
return redirect(url_for("games"))
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run
| 2.953125 | 3 |
chandra_models/xija/aca/aca_fit_script.py | jzuhone/chandra_models | 0 | 12765548 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import xija
import sys
from os.path import expanduser
home = expanduser("~")
addthispath = home + '/AXAFLIB/xijafit/'
sys.path.insert(0, addthispath)
import xijafit
stars = '*'*80
n = 0
newmodel = xijafit.XijaFit('aca_model_spec.json', start='2014:001', stop='2018:300',
set_data_exprs=(u'aca0=12.0',), quiet=False, name='aacccdpt')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'solarheat__aca0__ampl')
# newmodel.thaw_solarheat_roll()
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_dp()
newmodel.thaw_param(u'solarheat__aca0__ampl')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'solarheat__aca0__ampl')
# newmodel.thaw_solarheat_roll()
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_dp()
newmodel.thaw_param(u'solarheat__aca0__ampl')
newmodel.fit(method='moncar')
newmodel.write_spec_file()
newmodel.write_snapshots_file()
newmodel = xijafit.XijaFit('aacccdpt_model_spec.json', start='2018:270', stop='2018:305',
set_data_exprs=(u'aca0=-11.0',), quiet=False, name='aacccdpt')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'step_power__aca0__P')
newmodel.fit(method='moncar')
newmodel.write_spec_file()
newmodel.write_snapshots_file()
| 1.992188 | 2 |
bmtrain/checkpointing.py | Achazwl/BMTrain | 19 | 12765549 | import torch
from typing import Callable, TypeVar
from functools import wraps
from . import debug
class ScopedDebugTensorList:
def __init__(self) -> None:
self._hidden_states = []
@property
def hidden_states(self):
return self._hidden_states
def _set_hidden_states(self, hidden_states):
self._hidden_states = hidden_states
class ScopedTensorInspectorContext:
def __init__(self):
pass
def __enter__(self):
self.prev_hidden = debug.get("_inspect_hidden_states", [])
debug.set("_inspect_hidden_states", [])
self._local_list = ScopedDebugTensorList()
return self._local_list
def __exit__(self, *args):
self._local_list._set_hidden_states(debug.get("_inspect_hidden_states", []))
debug.set("_inspect_hidden_states", self.prev_hidden)
self.prev_hidden = None
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, placeholder, func, preserve_rng_state, *args):
ctx.func = func
ctx.preserve_rng_state = preserve_rng_state
ctx.cuda_rng_state = torch.cuda.get_rng_state() if preserve_rng_state else None
tensors = []
others = []
for arg in args:
if torch.is_tensor(arg):
tensors.append(arg)
others.append(None)
else:
tensors.append(None)
others.append(arg)
ctx.nontensor_inputs = others
ctx.save_for_backward(*tensors)
with torch.no_grad(), ScopedTensorInspectorContext() as inspector:
outputs = func(*args)
# append scoped hidden states to global list as a placeholder
for it in inspector.hidden_states:
debug.append("_inspect_hidden_states", it)
ctx.inspect_list = inspector.hidden_states
return outputs
@staticmethod
def backward(ctx, *grad_outputs):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad() or when an `inputs` parameter"
" is passed to .backward(). Please use .backward() and do not pass its `inputs`"
" argument.")
all_inputs = []
input_reqires_grad = []
for tensor, other in zip(ctx.saved_tensors, ctx.nontensor_inputs):
if tensor is None:
all_inputs.append(other)
input_reqires_grad.append(False)
else:
input_reqires_grad.append( tensor.requires_grad )
nw_tensor = tensor.detach()
nw_tensor.requires_grad = tensor.requires_grad
all_inputs.append(nw_tensor)
with torch.random.fork_rng(devices=[torch.cuda.current_device()], enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.cuda.set_rng_state(ctx.cuda_rng_state)
with torch.enable_grad(), ScopedTensorInspectorContext() as inspector:
outputs = ctx.func(*all_inputs)
assert len(ctx.inspect_list) == len(inspector.hidden_states), "Backward step changed"
for i, it in enumerate(inspector.hidden_states):
assert it["name"] == ctx.inspect_list[i]["name"], "Backward step changed"
assert it["shape"] == ctx.inspect_list[i]["shape"], "Backward step changed"
assert it["group"] == ctx.inspect_list[i]["group"], "Backward step changed"
# change the tensor in placeholder
ctx.inspect_list[i]["tensor"] = it["tensor"]
if not isinstance(outputs, tuple):
outputs = (outputs,)
assert len(outputs) == len(grad_outputs)
outputs_with_grad = []
grad_of_output = []
for i, output in enumerate(outputs):
if torch.is_tensor(output) and output.requires_grad:
outputs_with_grad.append(output)
grad_of_output.append(grad_outputs[i])
torch.autograd.backward(
outputs_with_grad,
grad_of_output,
)
grads = []
for inp, requires_grad in zip(all_inputs, input_reqires_grad):
if requires_grad:
grads.append(inp.grad)
else:
grads.append(None)
return (None, None, None) + tuple(grads)
R = TypeVar("R")
def checkpoint(func : Callable[..., R]) -> Callable[..., R]:
@wraps(func)
def wrapper(*args):
placeholder = torch.tensor([], requires_grad=torch.is_grad_enabled())
return CheckpointFunction.apply(placeholder, func, True, *args)
return wrapper
| 2.171875 | 2 |
install-nagios-server/fix_nagios_cfg.py | raychorn/svn_jenkins_projects | 0 | 12765550 | <filename>install-nagios-server/fix_nagios_cfg.py
import re, os, sys
__re__ = re.compile("temp_path=(?P<temp_path>.*)", re.DOTALL | re.MULTILINE)
def find_nagios_cfg(top,target):
print 'DEBUG: top=%s, target=%s' % (top,target)
for folder,dirs,files in os.walk(top):
if (any([f == target for f in files])):
return os.sep.join([folder,target])
print 'DEBUG: None found !!!'
return None
__top__ = '/usr'
fpath = find_nagios_cfg(__top__, 'nagios.cfg')
if (os.path.exists(fpath)):
fIn = open(fpath, 'r')
lines = fIn.readlines()
fIn.close()
__temp_path__ = os.path.dirname(fpath)
toks = __temp_path__.split(os.sep)
if (len(toks) > 1):
del toks[-1]
toks.append('tmp')
__temp_path__ = os.sep.join(toks)
if (not os.path.exists(__temp_path__)):
os.mkdir(__temp_path__)
fOut = open(fpath+'.new', mode='w')
for l in lines:
matches = __re__.search(l)
if (matches):
temp_path = matches.groupdict().get('temp_path',None)
if (temp_path):
l = l.replace(temp_path,__temp_path__)
print >> fOut, str(l).rstrip()
fOut.flush()
fOut.close()
os.remove(fpath)
os.rename(fOut.name,fpath)
else:
print >> sys.stderr, 'WARNING: Cannot find "%s".' % (fpath)
| 2.578125 | 3 |