max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
rumor/upstreams/aws.py | SudoQ/rumor | 1 | 12764351 | import json
import math
from datetime import datetime, timedelta
from decimal import Decimal
from typing import Any, Dict, List
import boto3
import boto3.dynamodb.types
from boto3.dynamodb.conditions import Attr
from logzero import logger
def send_messages(messages: List[Dict[str, str]], queue_name: str,
batch_size: int = 10) -> None:
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
entries = [{
'Id': f'{i}',
'MessageBody': json.dumps(msg)
} for i, msg in enumerate(messages)]
num_segments = math.ceil(len(messages)/float(batch_size))
for k in range(num_segments):
i = k*batch_size
j = i+batch_size
client.send_message_batch(
QueueUrl=queue.url,
Entries=entries[i:j]
)
def get_messages(queue_name: str, batch_size: int = 10) -> List[Dict[str, Any]]:
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
response = client.receive_message(QueueUrl=queue.url,
MaxNumberOfMessages=batch_size,
WaitTimeSeconds=0)
return response.get('Messages', [])
def delete_messages(messages: List[Dict[str, Any]], queue_name: str):
sqs = boto3.resource('sqs')
client = boto3.client('sqs')
queue = sqs.get_queue_by_name(QueueName=queue_name)
for message in messages:
client.delete_message(QueueUrl=queue.url,
ReceiptHandle=message['ReceiptHandle'])
def store_item(item: Dict[str, Any], table_name: str) -> None:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table_name)
table.put_item(Item=item)
def get_news_items(news_item_table_name: str, created_at_from: datetime,
created_at_to: datetime) -> List[Dict[str, Any]]:
client = boto3.client('dynamodb')
paginator = client.get_paginator('query')
number_of_queries = math.ceil((created_at_to - created_at_from).total_seconds() / 86400.0) + 1
_now = datetime.now()
partition_keys = [str((_now - timedelta(days=d)).date()) for d in range(number_of_queries)]
operation_parameters_list = [{
'TableName': news_item_table_name,
'IndexName': 'LSI',
'KeyConditionExpression': ('created_at_date = :created_at_date AND '
'created_at BETWEEN :ca_from AND :ca_to'),
'ExpressionAttributeValues': {
':created_at_date': {'S': pk},
':ca_from': {'N': str(created_at_from.timestamp())},
':ca_to': {'N': str(created_at_to.timestamp())},
}
} for pk in partition_keys]
items = []
deserializer = boto3.dynamodb.types.TypeDeserializer()
for operation_parameters in operation_parameters_list:
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
for item in page['Items']:
items.append(deserializer.deserialize({'M': item}))
logger.info('Found {} news items to evaluate'.format(len(items)))
return items
def get_preferences(preference_table_name: str):
client = boto3.client('dynamodb')
paginator = client.get_paginator('query')
operation_parameters = {
'TableName': preference_table_name,
'KeyConditionExpression': 'preference_type = :preference_type',
'ExpressionAttributeValues': {
':preference_type': {'S': 'KEYWORD'}
}
}
items = []
deserializer = boto3.dynamodb.types.TypeDeserializer()
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
for item in page['Items']:
items.append(deserializer.deserialize({'M': item}))
logger.info('Found {} keywords'.format(len(items)))
return items
def store_preference(keyword: str, weight: float, preference_table_name: str):
preference_item = {
'preference_type': 'KEYWORD',
'preference_key': keyword,
'preference_weight': Decimal(weight)
}
return store_item(preference_item, preference_table_name)
def send_notification(msg: str, topic_arn_hint: str, subject: str) -> None:
client = boto3.client('sns')
topics = client.list_topics()['Topics']
topic_arn = get_topic_arn(topics, topic_arn_hint)
client.publish(
Subject=subject,
Message=msg,
TopicArn=topic_arn
)
def get_topic_arn(topics: List[Dict[str, Any]], topic_arn_hint: str) -> str:
for topic in topics:
if topic_arn_hint in topic['TopicArn']:
return topic['TopicArn']
def get_reports(evaluation_report_table_name: str, created_at_from: datetime,
created_at_to: datetime) -> List[Dict[str, Any]]:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(evaluation_report_table_name)
response = table.scan(
FilterExpression=(
Attr('created_at').gte(Decimal(created_at_from.timestamp())) &
Attr('created_at').lt(Decimal(created_at_to.timestamp()))
)
)
return response['Items']
| 1.921875 | 2 |
awwardapp/views.py | mornicamwende/ranker | 0 | 12764352 | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
# from geelweb.django.ratings.views import RateView
from .models import post
from django.urls import reverse
from django.shortcuts import get_object_or_404
import random
def home(request):
try:
posts= post.objects.all()
posts = posts[::-1]
one_post = random.randint(0, len(posts)-1)
random_post= posts[one_post]
print(random_post)
except post.DoesNotExist:
posts = None
return render(request, 'awwardapp/home.html', locals())
class PostListView(ListView):
model = post
template_name = 'awwardapp/home.html' #<app>/<model> <viewtype>.html
context_object_name = 'posts'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = post
class PostCreateView(LoginRequiredMixin, CreateView):
model = post
fields = ['title', 'caption', 'image', 'owner', 'url']
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = post
fields = ['title', 'caption']
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.owner:
return True
else:
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.owner:
return True
else:
return False
def about(request):
return render(request, 'awwardapp/about.html', {'title': 'About'})
# class PostRateView(LoginRequiredMixin, RateView):
# model = Rating
# fields = ['usability', 'design', 'content']
# def form_valid(self, form):
# form.instance.owner = self.request.user
# return super().form_valid(form)
| 2.03125 | 2 |
Python/bankbalance.py | BlackTimber-Labs/DemoPr | 10 | 12764353 | class Bank:
def __init__(self,owner,balance):
self.owner=owner
self.balance=balance
def deposit(self,d):
self.balance=d+self.balance
print("amount : {}".format(d))
print("deposit accepted!!")
return self.balance
def withdraw(self,w):
if w>self.balance:
print("amount has exceeded the limit!!")
print('balance : {}'.format(self.balance))
else:
self.balance= self.balance-w
print("amount withdrawn : {}".format(w))
print("withdrawal completed!!")
return self.balance
def __str__(self):
return (f"Account owner : {self.owner} \nAccount balance : {self.balance}")
| 3.703125 | 4 |
2-EstruturaDeDecisao/ex--14/main.py | dev-everaldo-cyrino/PythonBrasil | 0 | 12764354 | <filename>2-EstruturaDeDecisao/ex--14/main.py
dt = input('coloque uma data no formato dd/mm/aaaa : ')
try:
if dt[2] == '/' and dt[5] == '/':
dia = int(dt[0]+dt[1])
mes = int(dt[3]+dt[4])
if dia > 31 or mes > 12:
print('invalido, dia ou mes errados')
else:
print(' a data {} é valida !'.format(dt))
except:
print(' a data é invalida !') | 3.859375 | 4 |
constants.bzl | maxwellE/bazel-diff | 0 | 12764355 | <reponame>maxwellE/bazel-diff
"""
Various constants used to build bazel-diff
"""
DEFAULT_JVM_EXTERNAL_TAG = "3.3"
RULES_JVM_EXTERNAL_SHA = "d85951a92c0908c80bd8551002d66cb23c3434409c814179c0ff026b53544dab"
BUILD_PROTO_MESSAGE_SHA = "50b79faec3c4154bed274371de5678b221165e38ab59c6167cc94b922d9d9152"
BAZEL_DIFF_MAVEN_ARTIFACTS = [
"junit:junit:4.12",
"org.mockito:mockito-core:3.3.3",
"info.picocli:picocli:jar:4.3.2",
"com.google.code.gson:gson:jar:2.8.6",
"com.google.guava:guava:29.0-jre"
]
| 0.890625 | 1 |
py/0206.reverse-linked-list.py | ck2w/leetcode | 0 | 12764356 | #
# @lc app=leetcode id=206 lang=python3
#
# [206] Reverse Linked List
#
# https://leetcode.com/problems/reverse-linked-list/description/
#
# algorithms
# Easy (65.21%)
# Likes: 6441
# Dislikes: 123
# Total Accepted: 1.3M
# Total Submissions: 2M
# Testcase Example: '[1,2,3,4,5]'
#
# Given the head of a singly linked list, reverse the list, and return the
# reversed list.
#
#
# Example 1:
#
#
# Input: head = [1,2,3,4,5]
# Output: [5,4,3,2,1]
#
#
# Example 2:
#
#
# Input: head = [1,2]
# Output: [2,1]
#
#
# Example 3:
#
#
# Input: head = []
# Output: []
#
#
#
# Constraints:
#
#
# The number of nodes in the list is the range [0, 5000].
# -5000 <= Node.val <= 5000
#
#
#
# Follow up: A linked list can be reversed either iteratively or recursively.
# Could you implement both?
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# class Solution:
# def reverseList(self, head: ListNode) -> ListNode:
# rever_tail = ListNode()
# curr = rever_tail
# while head != None:
# curr.val = head.val
# new_node = ListNode()
# new_node.next = curr
# curr = new_node
# head = head.next
# return curr.next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
# @lc code=end
| 4.09375 | 4 |
telebot/plugins/sticklol.py | IloveOrbiter/TinyBot | 0 | 12764357 | # (c)2020 TeleBot
# You may not use this file without proper authorship and consent from @TeleBotSupport
#
"""
Available command(s)
.sticklol
Generates a. random laughing sticker.
"""
import random
from telethon import functions, types, utils
from telebot.utils import admin_cmd
def choser(cmd, pack, blacklist=None):
if blacklist is None:
blacklist = {}
docs = None
@telebot.on(admin_cmd(pattern=rf"{cmd}", outgoing=True))
async def handler(event):
await event.delete()
nonlocal docs
if docs is None:
docs = [
utils.get_input_document(x)
for x in (
await borg(
functions.messages.GetStickerSetRequest(
types.InputStickerSetShortName(pack)
)
)
).documents
if x.id not in blacklist
]
await event.respond(file=random.choice(docs))
choser(
"sticklol",
"TeleBot_LOLPack",
{
3088919966519394666,
3088919966519394334,
3088919966519394334,
3088919966519394334,
},
)
| 2.296875 | 2 |
pages/index.py | Jordan-Ireland/Unit2-Sprint4 | 0 | 12764358 | import dash
import os
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import json
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from selenium import webdriver
chrome_exec_shim = "/app/.apt/opt/google/chrome/chrome"
opts = webdriver.ChromeOptions()
opts.binary_location = chrome_exec_shim
opts.add_argument("--no-sandbox");
opts.add_argument("--disable-gpu");
driver = webdriver.Chrome(executable_path=chrome_exec_shim, chrome_options=opts)
import pickle
with open('notebooks/pipeline.pkl', 'rb') as f:
pipeline = pickle.load(f)
from app import app
class Player:
def __init__(self, level, rating, prestige, games_won, qps, medals):
self.level = level
self.rating = rating
self.prestige = prestige
self.qps = qps
self.medals = medals
self.games_won = games_won
class Stats:
def __init__(self, elims=0, dmg_done=0, deaths=0, solo_kills=0):
self.elims = elims
self.dmg_done = dmg_done
self.deaths = deaths
self.solo_kills = solo_kills
class Medals:
def __init__(self, bronze=0, silver=0, gold=0):
self.bronze = bronze
self.silver = silver
self.gold = gold
def create_player(js):
if 'error' in js:
return Player(0,0,0, 0, Stats(), Medals())
if 'quickPlayStats' not in js:
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
if 'careerStats' not in js['quickPlayStats']:
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
if js.get('quickPlayStats',{}).get('careerStats',{}) == None or 'allHeroes' not in js.get('quickPlayStats',{}).get('careerStats',{}):
return Player(js['level'],js['rating'],js['prestige'], 0, Stats(), Medals())
elims = 0
damageDone = 0
deaths = 0
soloKills = 0
if js['quickPlayStats']['careerStats']['allHeroes']['combat'] != None:
if 'eliminations' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
elims = js['quickPlayStats']['careerStats']['allHeroes']['combat']['eliminations']
if 'damageDone' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
damageDone = js['quickPlayStats']['careerStats']['allHeroes']['combat']['damageDone']
if 'deaths' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
deaths = js['quickPlayStats']['careerStats']['allHeroes']['combat']['deaths']
if 'soloKills' in js['quickPlayStats']['careerStats']['allHeroes']['combat']:
soloKills = js['quickPlayStats']['careerStats']['allHeroes']['combat']['soloKills']
qps = Stats(elims,damageDone,deaths,soloKills)
medals = Medals(js['quickPlayStats']['awards'].get('medalsBronze'),
js['quickPlayStats']['awards'].get('medalsSilver'),
js['quickPlayStats']['awards'].get('medalsGold'))
return Player(js['level'],js['rating'],js['prestige'], js['quickPlayStats']['games']['won'], qps, medals)
def df_object(p):
item = [p.level,p.rating,p.prestige,p.games_won,p.qps.elims,p.qps.dmg_done,
p.qps.deaths,p.qps.solo_kills,p.medals.bronze,p.medals.silver,p.medals.gold]
return item
def select_player(username):
url = f"https://ow-api.com/v1/stats/pc/us/{username}/complete"
print(url)
response = requests.get(url)
j = json.loads(response.text)
return create_player(j)
##dataframe setup
columns = ['level','rating','prestige','games_won','qps_elims','qps_dmg_done',
'qps_deaths','qps_solo_kills','medals_bronze','medals_silver','medals_gold']
def predict(data):
kd = [i/(1+sum([data.qps_elims,data.qps_deaths])) for i in [data.qps_elims,data.qps_deaths]]
data['kill_ratio'] = kd[0]
data['death_ratio'] = kd[1]
column0 = []
column1 = []
for col in data.columns:
column0.append(col+str(0))
column1.append(col+str(1))
team1 = data.iloc[0:6].mean(axis=0)
team2 = data.iloc[6:12].mean(axis=0)
t1 = 0
t2 = 0
for col in data.columns:
if 'deaths' in col:
if team1[col] > team2[col]:
t1 = t1 - 1
t2 = t2 + 1
else:
t1 = t1 + 1
t2 = t2 - 1
else:
if team1[col] > team2[col]:
t1 = t1 + 1
t2 = t2 - 1
else:
t1 = t1 - 1
t2 = t2 + 1
data1 = dict(zip(column0,team1))
data2 = dict(zip(column1,team2))
data3 = pd.DataFrame([data1,data2])
data4 = pd.DataFrame(data3.max()).T
if np.random.randint(0,100) >= 90:
t1 = t1 + 10
elif np.random.randint(0,100) <= 10:
t2 = t2 + 10
if t1 > t2:
data4['won'] = 0
elif t2 > t1:
data4['won'] = 1
else:
data4['won'] = 0
data4 = data4.fillna(0)
target = 'won'
X_test = data4.drop(columns=target)
return pipeline.predict(X_test)
amount = 12;
list_col1_inputs = []
list_col1_inputs.append(
html.H2("Enter Teammate Usernames")
)
for i in range(amount):
if(i == 6):
list_col1_inputs.append(html.H2("Enter Enemy Usernames"))
temp = html.Div(className="container",children=[
dcc.Input(
id='username-'+str(i),
className='userinput',
placeholder='Enter Username',
type='text',
value=''
)
]
)
list_col1_inputs.append(temp)
list_col1_inputs.extend([html.Button('Submit' ,id='submit'),html.P(id='username_out')])
column1 = dbc.Col(
list_col1_inputs,
md=5,
)
list_col2_inputs = [html.H2('Select Teammates')]
for i in range(amount):
if(i == 6):
list_col2_inputs.append(html.H2("Select Enemies"))
list_col2_inputs.append(html.Div(id='listofusernames'+str(i)))
list_col2_inputs.append(html.Button("Complete",id='complete'))
column2 = dbc.Col(
list_col2_inputs
)
column3 = dbc.Col(
[
html.Div(id='prediction')
]
)
layout = [dbc.Row([column1, column2]), dbc.Row([column3])]
list_of_username_outputs = []
list_of_username_inputs = []
list_of_username_variables= []
list_of_users_input = []
for i in range(amount):
list_of_username_outputs.append(Output('listofusernames'+str(i),'children'))
list_of_username_inputs.append(State('username-'+str(i), 'value'))
list_of_users_input.append(State('user'+str(i), 'value'))
@app.callback(list_of_username_outputs,
[Input('submit', 'n_clicks')],
state=list_of_username_inputs
)
def search_players(n_clicks,*args):
if n_clicks != None:
dropdowns = []
for i in range(amount):
driver.get(f"https://www.overbuff.com/search?q={args[i]}")
page_source = driver.page_source
soup = BeautifulSoup(page_source)
players = soup.find_all('a', class_="SearchResult", href=True)
userlist = []
for element in players:
if element.find(class_='player-platform').find(class_="fa fa-windows") == None:
continue
players.remove(element)
user = element['href'][12:]
userlist.append({'label':user,'value':user})
dropdowns.append(dcc.Dropdown(
id='user'+str(i),
options=userlist,
placeholder='Select Player',
value=userlist[0]['value']
))
return dropdowns
@app.callback(Output('prediction','children'),
[Input('complete', 'n_clicks')],
state=list_of_users_input
)
def create_teams(n_clicks,*args):
if n_clicks != None:
team1 = []
team2 = []
teams_dataframe = pd.DataFrame(columns=columns)
for i in range(len(args)):
player = select_player(args[i])
teams_dataframe.loc[len(teams_dataframe), :] = df_object(player)
chance = np.random.random()*100
return f'Chances of you winning this game is {chance}%'
| 2.421875 | 2 |
tests/test_board.py | Renaud11232/pyduinocli | 6 | 12764359 | <reponame>Renaud11232/pyduinocli
from . import *
import warnings
class TestBoardCommand(CoreNeedingTest):
def test_attach(self):
warnings.warn("Cannot test attach, it needs special hardware to be connected.")
def test_details(self):
details = self._arduino.board.details("arduino:avr:mega")["result"]
self.assertIsInstance(details, dict)
self.assertIn("config_options", details)
def test_list(self):
warnings.warn("Cannot test list, it needs special hardware to be connected.")
def test_listall(self):
list = self._arduino.board.listall()["result"]
self.assertIsInstance(list, dict)
self.assertIn("boards", list)
def test_search(self):
boards = self._arduino.board.search()["result"]
self.assertIsInstance(boards, list)
self.assertTrue(all(["name" in board for board in boards]))
if __name__ == '__main__':
unittest.main()
| 2.78125 | 3 |
actions/macro/train/ultralisk_creation.py | Matuiss2/Greedy_bot | 9 | 12764360 | <gh_stars>1-10
"""Everything related to training ultralisks goes here"""
from sc2.constants import UnitTypeId, UpgradeId
class UltraliskCreation:
"""Good for now but it might need to be changed vs particular enemy units compositions"""
def __init__(self, main):
self.main = main
async def should_handle(self):
"""Requirement for training ultralisks"""
if not self.main.can_train(UnitTypeId.ULTRALISK, self.main.settled_cavern):
return False
if self.main.second_tier_armor and not self.main.already_pending_upgrade(UpgradeId.ZERGGROUNDARMORSLEVEL3):
self.main.armor_three_lock = True
return False
self.main.armor_three_lock = False
return True
async def handle(self):
"""Execute the action of training ultralisks"""
self.main.add_action(self.main.larvae.random.train(UnitTypeId.ULTRALISK))
| 2.03125 | 2 |
tests/generate_examples.py | hsolbrig/avidreader | 3 | 12764361 | <reponame>hsolbrig/avidreader<gh_stars>1-10
import os
from hbreader import FileInfo, hbopen, hbread
# This removes any absolute paths from the output -- not generally used
FileInfo.rel_offset = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
# Open a vanilla file
metadata = FileInfo()
with hbopen('../tests/data/test data 1.txt', metadata) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
#
# FileInfo(source_file='hbreader/tests/data/test data 1.txt', source_file_date='Wed Feb 17 17:01:09 2021', source_file_size=28, base_path='hbreader/tests/data')
# Open a file using a base address
data_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/data'))
with hbopen('test data 1.txt', base_path=data_file_dir) as f:
print(f.read())
# I'm some friendly test data
# Open an absolute URL
FileInfo.rel_offset = None
url = "https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt"
with hbopen("https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt", metadata.clear()) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
#
# FileInfo(source_file='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test%20data%201.txt', source_file_date='Thu, 18 Feb 2021 16:02:50 GMT', source_file_size='28', base_path='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data')
# Open a relative URL
base_address = metadata.base_path
print(f"Base: {base_address}")
# Base: https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data
with hbopen('test data 1.txt', base_path=base_address) as f:
print(f.read())
# I'm some friendly test data
# Open a file handle
with open('../tests/data/test data 1.txt') as fhandle:
with hbopen(fhandle, metadata.clear()) as f:
print(f.read())
print(metadata)
# I'm some friendly test data
# FileInfo(source_file='../tests/data/test data 1.txt', source_file_date='Wed Feb 17 17:01:09 2021', source_file_size=28, base_path='../tests/data')
# Open an 'latin-1' encoded file
with hbopen('test_8859.txt', base_path=data_file_dir, read_codec='latin-1') as f:
print(f.read())
# Some Text With weird ÒtextÓ And single ÔquotesÕ
# Open a bytes file handle -- still reads as text
with open('data/test data 1.txt', 'rb') as fhandle:
with hbopen(fhandle) as f:
print(f.read())
# I'm some friendly test data
# Open a block of text as a file
some_text = """
This is the honey badger. Watch it run in slow motion.
It's pretty badass. Look. It runs all over the place. "Whoa! Watch out!" says that bird.
Eew, it's got a snake! Oh! It's chasing a jackal! Oh my gosh!
Oh, the honey badger is just crazy!
The honey badger has been referred to by the Guiness Book of World Records as the most fearless animal in the animal kingdom. It really doesn't give a shit. If it's hungry, it's hungry.
"""
with hbopen(some_text, metadata.clear()) as f:
print(f.read())
print(metadata)
#
# This is the honey badger. Watch it run in slow motion.
#
# It's pretty badass. Look. It runs all over the place. "Whoa! Watch out!" says that bird.
#
# Eew, it's got a snake! Oh! It's chasing a jackal! Oh my gosh!
#
# Oh, the honey badger is just crazy!
#
# The honey badger has been referred to by the Guiness Book of World Records as the most fearless animal in the animal kingdom. It really doesn't give a shit. If it's hungry, it's hungry.
# hbopen doesn't require 'with'
f = hbopen('l1\nl2\nl3\n')
for l in f:
print(l, end='')
f.close()
# l1
# l2
# l3
# hpread returns the content rather than a file handle
print(hbread('test_8859.txt', base_path=data_file_dir, read_codec='latin-1'))
# Some Text With weird ÒtextÓ And single ÔquotesÕ
print(hbread("https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test data 1.txt", metadata.clear()))
# I'm some friendly test data
print(metadata)
# FileInfo(source_file='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data/test%20data%201.txt', source_file_date='Thu, 18 Feb 2021 16:28:37 GMT', source_file_size='28', base_path='https://raw.githubusercontent.com/hsolbrig/hbreader/master/tests/data')
| 2.703125 | 3 |
rest_auth/serializers.py | recamshak/django-rest-auth | 0 | 12764362 | <reponame>recamshak/django-rest-auth
from django.contrib.auth import get_user_model
from django.conf import settings
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
try:
from django.utils.http import urlsafe_base64_decode as uid_decoder
except:
# make compatible with django 1.5
from django.utils.http import base36_to_int as uid_decoder
from django.contrib.auth.tokens import default_token_generator
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
class LoginSerializer(AuthTokenSerializer):
def validate(self, attrs):
attrs = super(LoginSerializer, self).validate(attrs)
if 'rest_auth.registration' in settings.INSTALLED_APPS:
from allauth.account import app_settings
if app_settings.EMAIL_VERIFICATION == app_settings.EmailVerificationMethod.MANDATORY:
user = attrs['user']
email_address = user.emailaddress_set.get(email=user.email)
if not email_address.verified:
raise serializers.ValidationError('E-mail is not verified.')
return attrs
class TokenSerializer(serializers.ModelSerializer):
"""
Serializer for Token model.
"""
class Meta:
model = Token
fields = ('key',)
class UserDetailsSerializer(serializers.ModelSerializer):
"""
User model w/o password
"""
class Meta:
model = get_user_model()
fields = ('username', 'email', 'first_name', 'last_name')
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
def validate_email(self, attrs, source):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(data=attrs)
if not self.reset_form.is_valid():
raise serializers.ValidationError('Error')
return attrs
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'use_https': request.is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request,
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
uid = serializers.CharField(required=True)
token = serializers.CharField(required=True)
set_password_form_class = SetPasswordForm
def custom_validation(self, attrs):
pass
def validate(self, attrs):
self._errors = {}
# Get the UserModel
UserModel = get_user_model()
# Decode the uidb64 to uid to get User object
try:
uid = uid_decoder(attrs['uid'])
self.user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
self._errors['uid'] = ['Invalid value']
self.custom_validation(attrs)
# Construct SetPasswordForm instance
self.set_password_form = self.set_password_form_class(user=self.user,
data=attrs)
if not self.set_password_form.is_valid():
self._errors['token'] = ['Invalid value']
if not default_token_generator.check_token(self.user, attrs['token']):
self._errors['token'] = ['Invalid value']
def save(self):
self.set_password_form.save()
class PasswordChangeSerializer(serializers.Serializer):
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
set_password_form_class = SetPasswordForm
def validate(self, attrs):
request = self.context.get('request')
self.set_password_form = self.set_password_form_class(user=request.user,
data=attrs)
if not self.set_password_form.is_valid():
self._errors = self.set_password_form.errors
return None
return attrs
def save(self):
self.set_password_form.save()
| 2.25 | 2 |
src/validx/py/pipelines.py | Cottonwood-Technology/ValidX | 19 | 12764363 | from .. import contracts
from .. import exc
from . import abstract
class AllOf(abstract.Validator):
"""
AND-style Pipeline Validator
All steps must be succeeded.
The last step returns result.
:param Validator \\*steps:
nested validators.
:raises ValidationError:
raised by the first failed step.
:note:
it uses :class:`validx.exc.Step` marker to indicate,
which step is failed.
"""
__slots__ = ("steps",)
def __init__(self, *steps_, steps=None, alias=None, replace=False):
steps = contracts.expect_sequence(
self, "steps", steps or steps_, item_type=abstract.Validator
)
setattr = object.__setattr__
setattr(self, "steps", steps)
self._register(alias, replace)
def __call__(self, value, __context=None):
if __context is None:
__context = {} # Setup context, if it's top level call
for num, step in enumerate(self.steps):
try:
value = step(value, __context)
except exc.ValidationError as e:
raise e.add_context(exc.Step(num))
return value
class OneOf(abstract.Validator):
"""
OR-style Pipeline Validator
The first succeeded step returns result.
:param Validator \\*steps:
nested validators.
:raises SchemaError:
if all steps are failed,
so it contains all errors,
raised by each step.
:note:
it uses :class:`validx.exc.Step` marker to indicate,
which step is failed.
"""
__slots__ = ("steps",)
def __init__(self, *steps_, steps=None, alias=None, replace=False):
steps = contracts.expect_sequence(
self, "steps", steps or steps_, item_type=abstract.Validator
)
setattr = object.__setattr__
setattr(self, "steps", steps)
self._register(alias, replace)
def __call__(self, value, __context=None):
if __context is None:
__context = {} # Setup context, if it's top level call
errors = []
for num, step in enumerate(self.steps):
try:
return step(value, __context)
except exc.ValidationError as e:
errors.extend(ne.add_context(exc.Step(num)) for ne in e)
if errors:
raise exc.SchemaError(errors)
| 2.546875 | 3 |
src/chainmock/__init__.py | ollipa/chainmock | 8 | 12764364 | """Chainmock.
Spy, stub, and mock library for Python and Pytest.
"""
from . import _doctest # imported for side-effects
from ._api import Assert, Mock, mocker
__all__ = [
"Assert",
"Mock",
"mocker",
]
| 1.367188 | 1 |
func_tests/tests/editorganizationtests/edit_organization_data.py | ICT4H/dcs-web | 1 | 12764365 | from framework.utils.common_utils import by_css
from testdata.test_data import url
USERNAME = 'username'
PASSWORD = 'password'
VALID_CREDENTIALS = {USERNAME: "<EMAIL>",
PASSWORD: "<PASSWORD>"}
DATA_WINNERS_ACCOUNT_PAGE = url("/account/")
ORGANIZATION_SECTOR_DROP_DOWN_LIST = by_css("select#id_sector")
| 1.960938 | 2 |
gan_train.py | Aitical/ADspeech2face | 1 | 12764366 | <reponame>Aitical/ADspeech2face
import os
import time
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from configs import DATASET_PARAMETERS, NETWORKS_PARAMETERS
from parse_dataset import get_dataset
from models import get_network, SimCLRLoss, SupContrastiveLoss
from utils import Meter, cycle_voice, cycle_face, save_model
from edsr.model import Model
import cv2
from einops import rearrange, repeat
import math
# dataset and dataloader
print('Parsing your dataset...')
voice_list, face_list, id_class_num = get_dataset(DATASET_PARAMETERS)
NETWORKS_PARAMETERS['c']['output_channel'] = id_class_num
print('Preparing the datasets...')
voice_dataset = DATASET_PARAMETERS['voice_dataset'](voice_list,
DATASET_PARAMETERS['nframe_range'])
face_dataset = DATASET_PARAMETERS['face_dataset'](face_list)
print('Preparing the dataloaders...')
collate_fn = DATASET_PARAMETERS['collate_fn'](DATASET_PARAMETERS['nframe_range'])
voice_loader = DataLoader(voice_dataset, shuffle=True, drop_last=True,
batch_size=DATASET_PARAMETERS['batch_size'],
num_workers=DATASET_PARAMETERS['workers_num'],
collate_fn=collate_fn)
face_loader = DataLoader(face_dataset, shuffle=True, drop_last=True,
batch_size=DATASET_PARAMETERS['batch_size'],
num_workers=DATASET_PARAMETERS['workers_num'])
voice_iterator = iter(cycle_voice(voice_loader))
face_iterator = iter(cycle_face(face_loader))
# networks, Fe, Fg, Fd (f+d), Fc (f+c)
print('Initializing networks...')
e_net, e_optimizer = get_network('e', NETWORKS_PARAMETERS, train=False)
g_net, g_optimizer = get_network('g', NETWORKS_PARAMETERS, train=True)
f_net, f_optimizer = get_network('f', NETWORKS_PARAMETERS, train=True)
d_net, d_optimizer = get_network('d', NETWORKS_PARAMETERS, train=True)
c_net, c_optimizer = get_network('c', NETWORKS_PARAMETERS, train=True)
# label for real/fake faces
real_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 1)
fake_label = torch.full((DATASET_PARAMETERS['batch_size'], 1), 0)
# Meters for recording the training status
iteration = Meter('Iter', 'sum', ':5d')
data_time = Meter('Data', 'sum', ':4.2f')
batch_time = Meter('Time', 'sum', ':4.2f')
D_real = Meter('D_real', 'avg', ':3.2f')
D_fake = Meter('D_fake', 'avg', ':3.2f')
C_real = Meter('C_real', 'avg', ':3.2f')
GD_fake = Meter('G_D_fake', 'avg', ':3.2f')
GC_fake = Meter('G_C_fake', 'avg', ':3.2f')
current_epoch = 1
def adjust_learning_rate(optimizer, epoch, lr=0.1):
"""Decay the learning rate based on schedule"""
# cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / 400))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# wandb.log({'lr': lr, 'epoch': epoch})
sr_model = Model('/home/aitical/Documents/paper_with_code/speech2face/speech2face_dense/edsr/pretrained/model_best.pt')
sr_model.model.eval()
for param in sr_model.model.parameters():
param.requires_grad = False
sr_model.cuda()
print('SR model loaded')
l1_loss = torch.nn.L1Loss().cuda()
l2_loss = torch.nn.MSELoss().cuda()
affine_loss = torch.nn.KLDivLoss().cuda()
contrastive_loss = SimCLRLoss(temperature=0.2).cuda()
sup_contratsive_loss = SupContrastiveLoss().cuda()
print('Training models...')
for it in range(50000):
# data
adjust_learning_rate(optimizer=g_optimizer, epoch=current_epoch, lr=3e-4)
start_time = time.time()
voice, voice_label = next(voice_iterator)
face, face_label, face_lr = next(face_iterator)
noise = 0.05*torch.randn(DATASET_PARAMETERS['batch_size'], NETWORKS_PARAMETERS['e']['output_channel'], 1, 1)
# use GPU or not
if NETWORKS_PARAMETERS['GPU']:
voice, voice_label = voice.cuda(), voice_label.cuda()
face, face_label, face_lr = face.cuda(), face_label.cuda(), face_lr.cuda()
real_label, fake_label = real_label.cuda(), fake_label.cuda()
noise = noise.cuda()
data_time.update(time.time() - start_time)
with torch.no_grad():
latent, lr_16, lr_32, lr_64 = sr_model(face_lr)
# print(latent.shape, lr_16.shape, lr_64.shape)
# BXCXHxH
face_vector = torch.mean(lr_16, dim=[2, 3])
face_vector = torch.nn.functional.normalize(face_vector, dim=1)
# get embeddings and generated faces
embeddings = e_net(voice)
embeddings = F.normalize(embeddings)
# introduce some permutations
embeddings = embeddings + noise
embeddings = F.normalize(embeddings)
# print(embeddings.shape)
# loss1 = 0.1*(contrastive_loss(embeddings.squeeze(), face_vector) + contrastive_loss(face_vector, embeddings.squeeze()))
fake, fake_16, fake_32, fake_64 = g_net(embeddings)
# print(fake.shape, fake_16.shape, fake_64.shape)
# print(fake.shape)
# Discriminator
# e_optimizer.zero_grad()
f_optimizer.zero_grad()
d_optimizer.zero_grad()
c_optimizer.zero_grad()
real_score_out = d_net(f_net(face))
fake_score_out = d_net(f_net(fake.detach()))
real_label_out = c_net(f_net(face))
clip_feature = F.normalize(f_net(face).squeeze())
# print(clip_feature.shape, embeddings.shape)
#
# F_clip_loss = 0.1 * 0.5*(contrastive_loss(clip_feature, embeddings.squeeze().detach()) + contrastive_loss(embeddings.squeeze().detach(), clip_feature))
# clip_fake_feature = F.normalize(f_net(fake.detach()).squeeze())
# F_clip_contrastive = 0.3 * contrastive_loss(clip_fake_feature, clip_feature)
D_real_loss = F.binary_cross_entropy(torch.sigmoid(real_score_out), real_label.float())
D_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out), fake_label.float())
C_real_loss = F.nll_loss(F.log_softmax(real_label_out, 1), face_label)
D_real.update(D_real_loss.item())
D_fake.update(D_fake_loss.item())
C_real.update(C_real_loss.item())
(D_real_loss + D_fake_loss + C_real_loss).backward()
f_optimizer.step()
d_optimizer.step()
c_optimizer.step()
# Generator
g_optimizer.zero_grad()
fake_score_out = d_net(f_net(fake))
fake_label_out = c_net(f_net(fake))
# with torch.no_grad():
fake_feature_out = F.normalize(f_net(fake).squeeze())
real_feature_out = F.normalize(f_net(face).squeeze())
# print(f_net(fake).shape)
GD_fake_loss = F.binary_cross_entropy(torch.sigmoid(fake_score_out), real_label.float())
GC_fake_loss = 0.5 * F.nll_loss(F.log_softmax(fake_label_out, 1), voice_label)
# Embedded_contrastive_loss = 0.5 * sup_contratsive_loss(fake_feature_out, real_feature_out, voice_label)
# Embedded_contrastive_loss = 0.1 * l2_loss(fake_feature_out, real_feature_out)
# out_space_loss = 0.1*(0.5*l1_loss(fake_16, lr_16) + 0.5*l1_loss(fake_32, lr_32))
loss2 = 0.1*(l1_loss(fake_16, lr_16))
loss3 = 0.1*(l1_loss(fake_64, lr_64))
# # BxCx16x16
# b, c, h, w = lr_16.shape
# non_local_lr = lr_16.reshape(b, c, h*w)
# non_local_sim = torch.bmm(non_local_lr.permute(0, 2, 1), non_local_lr).reshape(b*h*w, h*w)
# non_local_prob = torch.nn.functional.softmax(non_local_sim, dim=1)
#
#
# non_local_fake = fake_16.reshape(b, c, h*w)
# non_local_sim_fake = torch.bmm(non_local_fake.permute(0, 2, 1), non_local_fake).reshape(b*h*w, h*w)
# non_local_fake_prob = torch.nn.functional.log_softmax(non_local_sim_fake, dim=1)
#
# loss2 = 0.05 * affine_loss(non_local_fake_prob, non_local_prob)
(GD_fake_loss + GC_fake_loss + loss2 + loss3).backward()
GD_fake.update(GD_fake_loss.item())
GC_fake.update(GC_fake_loss.item())
g_optimizer.step()
# e_optimizer.step()
batch_time.update(time.time() - start_time)
# print status
if it % 200 == 0:
current_epoch += 1
print(iteration, data_time, batch_time,
D_real, D_fake, C_real, GD_fake, GC_fake)
data_time.reset()
batch_time.reset()
D_real.reset()
D_fake.reset()
C_real.reset()
GD_fake.reset()
GC_fake.reset()
# snapshot
save_model(g_net, NETWORKS_PARAMETERS['g']['model_path'])
# save_model(e_net, NETWORKS_PARAMETERS['e']['model_path'])
save_model(f_net, NETWORKS_PARAMETERS['f']['model_path'])
save_model(d_net, NETWORKS_PARAMETERS['d']['model_path'])
save_model(c_net, NETWORKS_PARAMETERS['c']['model_path'])
iteration.update(1)
| 2.15625 | 2 |
custom_rst2s5.py | kumar303/unicode-in-python | 97 | 12764367 | <filename>custom_rst2s5.py<gh_stars>10-100
#!/usr/bin/env python
# kumar: sniped from: http://matt-good.net/files/software-dev-with-trac/rst2s5
# Author: <NAME>
# Contact: <EMAIL>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import get_formatter_by_name
from docutils.parsers import rst
from docutils import nodes
def code_formatter(language, content):
lexer = get_lexer_by_name(language)
formatter = get_formatter_by_name('html', noclasses=True)
html = pygments.highlight(content, lexer, formatter)
return nodes.raw('', html, format='html')
def code_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
language = options.get('language')
if not language:
args = text.split(':', 1)
language = args[0]
if len(args) == 2:
text = args[1]
else:
text = ''
reference = code_formatter(language, text)
return [reference], []
def code_block(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Create a code-block directive for docutils.
Usage: .. code-block:: language
If the language can be syntax highlighted it will be.
"""
language = arguments[0]
text = '\n'.join(content)
reference = code_formatter(language, text)
return [reference]
# These are documented
# at http://docutils.sourceforge.net/spec/howto/rst-directives.html.
code_block.arguments = (
1, # Number of required arguments.
0, # Number of optional arguments.
0) # True if final argument may contain whitespace.
# A mapping from option name to conversion function.
code_role.options = code_block.options = {
'language' :
rst.directives.unchanged # Return the text argument, unchanged
}
code_block.content = 1 # True if content is allowed.
# Register the directive with docutils.
rst.directives.register_directive('code-block', code_block)
rst.roles.register_local_role('code-block', code_role)
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| 2.3125 | 2 |
tfmplugins/utils/eventbased.py | extremq/tfmplugins | 4 | 12764368 | # Copied from https://github.com/Athesdrake/aiotfm/blob/master/aiotfm/client.py
import sys
import asyncio
import traceback
class InvalidEvent(Exception):
"""Exception thrown when you added an invalid event to the client.
An event is valid only if its name begin by 'on_' and it is coroutine.
"""
class EventBased:
"""A class that implements asynchronous events
"""
def __init__(self):
self._waiters = {}
def event(self, coro):
"""A decorator that registers an event.
"""
name = coro.__name__
if not name.startswith('on_'):
raise InvalidEvent("'{}' isn't a correct event naming.".format(name))
if not asyncio.iscoroutinefunction(coro):
message = "Couldn't register a non-coroutine function for the event {}.".format(name)
raise InvalidEvent(message)
setattr(self, name, coro)
return coro
def wait_for(self, event, condition=None, timeout=None, stopPropagation=False):
"""Wait for an event.
:param event: :class:`str` the event name.
:param condition: Optional[`function`] A predicate to check what to wait for.
The arguments must meet the parameters of the event being waited for.
:param timeout: Optional[:class:`int`] the number of seconds before
throwing asyncio.TimeoutError
:return: [`asyncio.Future`](https://docs.python.org/3/library/asyncio-future.html#asyncio.Future)
a future that you must await.
"""
event = event.lower()
future = self.loop.create_future()
if condition is None:
def everything(*a):
return True
condition = everything
if event not in self._waiters:
self._waiters[event] = []
self._waiters[event].append((condition, future, stopPropagation))
return asyncio.wait_for(future, timeout)
async def _run_event(self, coro, event_name, *args, **kwargs):
"""|coro|
Runs an event and handle the error if any.
:param coro: a coroutine function.
:param event_name: :class:`str` the event's name.
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: :class:`bool` whether the event ran successfully or not
"""
try:
await coro(*args, **kwargs)
return True
# except asyncio.CancelledError:
# raise
except Exception as e:
if hasattr(self, 'on_error'):
await self.on_error(event_name, e, *args, **kwargs)
return False
def dispatch(self, event, *args, **kwargs):
"""Dispatches events
:param event: :class:`str` event's name. (without 'on_')
:param args: arguments to pass to the coro.
:param kwargs: keyword arguments to pass to the coro.
:return: [`Task`](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task)
the _run_event wrapper task
"""
method = 'on_' + event
if method in self._waiters:
to_remove = []
waiters = self._waiters[method]
for i, (cond, fut, stop) in enumerate(waiters):
if fut.cancelled():
to_remove.append(i)
continue
try:
result = bool(cond(*args))
except Exception as e:
fut.set_exception(e)
else:
if result:
fut.set_result(args[0] if len(args) == 1 else args if len(args) > 0 else None)
if stop:
del waiters[i]
return None
to_remove.append(i)
if len(to_remove) == len(waiters):
del self._waiters[method]
else:
for i in to_remove[::-1]:
del waiters[i]
coro = getattr(self, method, None)
if coro is not None:
dispatch = self._run_event(coro, method, *args, **kwargs)
return self.loop.call_soon_threadsafe(
self.loop.create_task,
dispatch
)
async def on_error(self, event, err, *a, **kw):
"""Default on_error event handler. Prints the traceback of the error."""
message = '\nAn error occurred while dispatching the event "{0}":\n\n{2}'
tb = traceback.format_exc(limit=-3)
print(message.format(event, err, tb), file=sys.stderr)
return message.format(event, err, tb) | 2.78125 | 3 |
randomForestTest.py | vlstyxz/Brain-Computer-Interface-with-Neurosky | 9 | 12764369 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 21:24:37 2019
@author: anilosmantur
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 20:43:41 2019
@author: anilosmantur
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from pylab import rcParams
rcParams['figure.figsize'] = 10, 5
n_samples = 30#91
dataNameList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',
'lowBeta','highBeta','lowGamma','midGamma','poorSignal']
featureList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',
'lowBeta','highBeta','lowGamma','midGamma']
labels = ['focus','relax', 'upWord', 'downWord',
'upColor', 'downColor',
'CyanUP','greenDOWN', 'yellowRIGHT', 'BlackLEFT']#,'blink']
labels = ['relax','upColor','CyanUP']
n_label = len(labels)
#label = labels[2]
#count = 0
trainDataDict = dict()
for data in dataNameList:
trainDataDict[data] = []
testDataDict = dict()
for data in dataNameList:
testDataDict[data] = []
def load_data(dataDict, label, count):
for data in dataNameList:
dataDict[data].append(np.load('dataset/{}/{}/{}.npy'.format(label,count,data))[:100])
#n_samples = 10
test_n_samples = int(n_samples/2)
test_size = n_label * int(n_samples/2)
train_n_samples = round(n_samples/2)
train_size = n_label * round(n_samples/2)
#nums = np.arange(n_samples)*2
nums = np.arange(n_samples)
trainNums = np.concatenate([nums[:5],nums[10:15],nums[20:25]])#,nums[31:41], nums[51:61],nums[71:81]])
#trainNums = nums[:5]
np.random.shuffle(trainNums)
testNums = np.concatenate([nums[5:10],nums[15:20],nums[25:30]])#,nums[41:51], nums[61:71],nums[81:91]])
#testNums = nums[5:10]
np.random.shuffle(testNums)
for label in labels:
for i in trainNums:
load_data(trainDataDict,label, i)
for label in labels:
for i in testNums:
load_data(testDataDict,label, i)
for data in dataNameList:
trainDataDict[data] = np.array(trainDataDict[data])
for data in dataNameList:
testDataDict[data] = np.array(testDataDict[data])
#connect features
trainData = []
for data in featureList:
trainData.append(trainDataDict[data])
trainData = np.array(trainData).transpose(1,0,2)
testData = []
for data in featureList:
testData.append(testDataDict[data])
testData = np.array(testData).transpose(1,0,2)
trainData = trainData.astype('float32')
testData = testData.astype('float32')
## normalization needed
scaler = MinMaxScaler()
print(scaler.fit(trainData.reshape(-1, 1100)))
trainData = scaler.transform(trainData.reshape(-1, 1100))
testData = scaler.transform(testData.reshape(-1, 1100))
trainLabels = []
for i in range(n_label):
trainLabels.append(np.ones(train_n_samples)*i )#,np.ones(15)*2])
trainLabels = np.concatenate(trainLabels)
testLabels = []
for i in range(n_label):
testLabels.append(np.ones(test_n_samples)*i )#,np.ones(15)*2])
testLabels = np.concatenate(testLabels)
from sklearn.model_selection import GridSearchCV
param_grid = {
'n_estimators':[20, 50, 100, 150, 200],
'max_features':['auto', 'sqrt', 'log2'],
'max_depth':[2,3,4],
'criterion':['gini','entropy'],
}
rfc = RandomForestClassifier(random_state=42)
rfc_cv = GridSearchCV(estimator=rfc,param_grid=param_grid,cv=5)
rfc_cv.fit(trainData, trainLabels)
#print('feature : ', dataNameList[i])
print(rfc_cv.best_score_)
print(rfc_cv.best_params_)
preds = np.array(rfc_cv.predict(testData))
scores = metrics.accuracy_score(testLabels, preds)
print('test %: {:6.2f}%'.format(scores*100)) | 2.140625 | 2 |
dependencies/FontTools/Lib/fontTools/ttLib/tables/_h_d_m_x.py | charlesmchen/typefacet | 21 | 12764370 | import DefaultTable
import sstruct
import string
hdmxHeaderFormat = """
> # big endian!
version: H
numRecords: H
recordSize: l
"""
class table__h_d_m_x(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
self.hdmx = {}
for i in range(self.numRecords):
ppem = ord(data[0])
maxSize = ord(data[1])
widths = {}
for glyphID in range(numGlyphs):
widths[glyphOrder[glyphID]] = ord(data[glyphID+2])
self.hdmx[ppem] = widths
data = data[self.recordSize:]
assert len(data) == 0, "too much hdmx data"
def compile(self, ttFont):
self.version = 0
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
self.recordSize = 4 * ((2 + numGlyphs + 3) / 4)
pad = (self.recordSize - 2 - numGlyphs) * "\0"
self.numRecords = len(self.hdmx)
data = sstruct.pack(hdmxHeaderFormat, self)
items = self.hdmx.items()
items.sort()
for ppem, widths in items:
data = data + chr(ppem) + chr(max(widths.values()))
for glyphID in range(len(glyphOrder)):
width = widths[glyphOrder[glyphID]]
data = data + chr(width)
data = data + pad
return data
def toXML(self, writer, ttFont):
writer.begintag("hdmxData")
writer.newline()
ppems = self.hdmx.keys()
ppems.sort()
records = []
format = ""
for ppem in ppems:
widths = self.hdmx[ppem]
records.append(widths)
format = format + "%4d"
glyphNames = ttFont.getGlyphOrder()[:]
glyphNames.sort()
maxNameLen = max(map(len, glyphNames))
format = "%" + `maxNameLen` + 's:' + format + ' ;'
writer.write(format % (("ppem",) + tuple(ppems)))
writer.newline()
writer.newline()
for glyphName in glyphNames:
row = []
for ppem in ppems:
widths = self.hdmx[ppem]
row.append(widths[glyphName])
if ";" in glyphName:
glyphName = "\\x3b".join(glyphName.split(";"))
writer.write(format % ((glyphName,) + tuple(row)))
writer.newline()
writer.endtag("hdmxData")
writer.newline()
def fromXML(self, (name, attrs, content), ttFont):
if name <> "hdmxData":
return
content = string.join(content, "")
lines = string.split(content, ";")
topRow = string.split(lines[0])
assert topRow[0] == "ppem:", "illegal hdmx format"
ppems = map(int, topRow[1:])
self.hdmx = hdmx = {}
for ppem in ppems:
hdmx[ppem] = {}
lines = map(string.split, lines[1:])
for line in lines:
if not line:
continue
assert line[0][-1] == ":", "illegal hdmx format"
glyphName = line[0][:-1]
if "\\" in glyphName:
from fontTools.misc.textTools import safeEval
glyphName = safeEval('"""' + glyphName + '"""')
line = map(int, line[1:])
assert len(line) == len(ppems), "illegal hdmx format"
for i in range(len(ppems)):
hdmx[ppems[i]][glyphName] = line[i]
| 2.640625 | 3 |
capreolus/tests/common_fixtures.py | AlexWang000/capreolus | 1 | 12764371 | <reponame>AlexWang000/capreolus
import pytest
from pathlib import Path
from capreolus.collection import DummyCollection
from capreolus.index import AnseriniIndex
from capreolus import registry
@pytest.fixture(scope="function")
def tmpdir_as_cache(tmpdir, monkeypatch):
monkeypatch.setattr(registry, "CACHE_BASE_PATH", Path(tmpdir))
@pytest.fixture(scope="function")
def dummy_index(tmpdir_as_cache):
index = AnseriniIndex({"_name": "anserini", "indexstops": False, "stemmer": "porter"})
index.modules["collection"] = DummyCollection({"_name": "dummy"})
index.create_index()
return index
| 1.976563 | 2 |
scipy/weave/tests/test_inline_tools.py | ogrisel/scipy | 1 | 12764372 | from __future__ import absolute_import, print_function
from numpy.testing import TestCase, dec, assert_, run_module_suite
from scipy.weave import inline_tools
class TestInline(TestCase):
"""These are long running tests...
Would be useful to benchmark these things somehow.
"""
@dec.slow
def test_exceptions(self):
a = 3
code = """
if (a < 2)
throw_error(PyExc_ValueError,
"the variable 'a' should not be less than 2");
else
return_val = PyInt_FromLong(a+1);
"""
result = inline_tools.inline(code,['a'])
assert_(result == 4)
## Unfortunately, it is not always possible to catch distutils compiler
## errors, since SystemExit is used. Until that is fixed, these tests
## cannot be run in the same process as the test suite.
## try:
## a = 1
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've thrown a ValueError
## except ValueError:
## pass
## from distutils.errors import DistutilsError, CompileError
## try:
## a = 'string'
## result = inline_tools.inline(code,['a'])
## assert_(1) # should've gotten an error
## except:
## # ?CompileError is the error reported, but catching it doesn't work
## pass
if __name__ == "__main__":
run_module_suite()
| 2.296875 | 2 |
convertmusic/tools/ffmpeg_bin/ffmpeg.py | groboclown/music-uploader | 0 | 12764373 |
import os
import subprocess
import re
BIN_FFMPEG = 'ffmpeg'
def convert(srcfile, outfile, bit_rate, channels, sample_rate, codec, tags, volume=None, verbose=False):
"""
Converts the source file to the outfile with the proper transformations.
Includes the additional tags.
"""
if srcfile == outfile:
raise Exception('Does not support overwriting file')
if os.path.isfile(outfile):
os.unlink(outfile)
cmd = [
BIN_FFMPEG, '-i', srcfile,
'-vn', '-sn', '-dn',
'-acodec', codec, '-ar', str(sample_rate),
'-ac', str(channels), '-b:a', str(bit_rate),
'-bits_per_raw_sample', '16'
]
if volume:
cmd.append('-filter:a')
cmd.append("volume={0}".format(volume))
for k, v in tags.items():
cmd.append('-metadata')
cmd.append('{0}={1}'.format(k, v))
cmd.append(outfile)
if verbose:
print(' '.join(cmd))
# force bits per sample = 16.
subprocess.run(cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
def trim_audio(srcfile, outfile, start_time, end_time):
"""
Trims audio. Start and end time must be in the "hh:mm:ss.nn" format (e.g. 00:01:22.00)
"""
if srcfile == outfile:
raise Exception('Does not support overwriting file')
if os.path.isfile(outfile):
os.unlink(outfile)
cmd = [
BIN_FFMPEG, '-i', srcfile,
#'-movflags', 'use_metadata_tags',
'-map_metadata', '0:g',
'-map_metadata:s:a', '0:g',
'-c', 'copy'
]
if start_time is not None:
cmd.extend(['-ss', start_time])
if end_time is not None:
cmd.extend(['-to', end_time])
cmd.append(outfile)
print('Running "{0}"'.format(' '.join(cmd)))
subprocess.run(cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
LINE_MEAN_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] mean_volume: (-?\d+\.?\d*) dB')
LINE_MAX_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] max_volume: (-?\d+\.?\d*) dB')
LINE_HISTOGRAM_VOLUME = \
re.compile(r'\[Parsed_volumedetect_\d+ @ ([^\]]+)\] histogram_(\d+)db: (-?\d+\.?\d*)')
class VolumeLevel(object):
def __init__(self, mean_v, max_v, hist):
self.mean = mean_v
self.max = max_v
self.histogram = hist
def find_volume_levels(srcfile):
"""
"""
cmd = [
BIN_FFMPEG, '-i', srcfile,
'-af', "volumedetect",
'-vn', '-sn', '-dn',
'-f', 'null', os.path.devnull
]
# print('DEBUG running [{0}]'.format(' '.join(cmd)))
proc = subprocess.run(cmd, check=True,
#capture_output=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
encoding='utf-8', errors='ignore')
max_volume = None
mean_volume = None
histogram = {}
for line in proc.stdout.splitlines():
line = line.strip()
# print('DEBUG output [{0}]'.format(line))
m = LINE_MEAN_VOLUME.match(line)
if m:
mean_volume = float(m.group(2))
# print('DEBUG mean_volume = {0} / {1}'.format(m.group(2), mean_volume))
m = LINE_MAX_VOLUME.match(line)
if m:
max_volume = float(m.group(2))
# print('DEBUG max_volume = {0} / {1}'.format(m.group(2), max_volume))
m = LINE_HISTOGRAM_VOLUME.match(line)
if m:
histogram[m.group(2)] = float(m.group(3))
# print('DEBUG histogram {0}db = {1}'.format(m.group(2), m.group(3)))
if mean_volume is None or max_volume is None or histogram is None:
return None
return VolumeLevel(mean_volume, max_volume, histogram)
| 2.875 | 3 |
leetCodeSolutionsPython/bubbleSort.py | hpnog/computingProblems | 0 | 12764374 | <reponame>hpnog/computingProblems<gh_stars>0
# Bubble Sort
#
# Time Complexity: O(n*log(n))
# Space Complexity: O(1)
class Solution:
def bubbleSort(self, array: [int]) -> [int]:
dirty = False
for i in range(len(array)):
for j in range(0, len(array) - 1 - i):
if array[j] > array[j + 1]:
dirty = True
array[j], array[j + 1] = array[j + 1], array[j]
if not dirty:
break
dirty = False
return array | 3.8125 | 4 |
instance/env_to_config_shim.py | uk-gov-mirror/alphagov.govuk-shielded-vulnerable-people-service | 3 | 12764375 | import os
ENVIRONMENT = os.environ.get("ENVIRONMENT")
SECRET_KEY = os.environ.get("SECRET_KEY")
ORDNANCE_SURVEY_PLACES_API_KEY = os.environ.get("ORDNANCE_SURVEY_PLACES_API_KEY")
PERMANENT_SESSION_LIFETIME = int(os.environ.get("PERMANENT_SESSION_LIFETIME"))
GA_TRACKING_ID = os.environ.get("GA_TRACKING_ID")
GA_CROSS_DOMAIN_TRACKING_ID = os.environ.get("GA_CROSS_DOMAIN_TRACKING_ID")
# NHS OIDC config
NHS_OIDC_AUTHORITY_URL = os.environ.get("NHS_OIDC_AUTHORITY_URL")
NHS_OIDC_CLIENT_ID = os.environ.get("NHS_OIDC_CLIENT_ID")
NHS_OIDC_REGISTRATION_CALLBACK_URL = os.environ.get("NHS_OIDC_REGISTRATION_CALLBACK_URL")
NHS_OIDC_LOGIN_CALLBACK_URL = os.environ.get("NHS_OIDC_LOGIN_CALLBACK_URL")
NHS_OIDC_LOGIN_PRIVATE_KEY = os.environ.get("NHS_OIDC_LOGIN_PRIVATE_KEY")
# AWS CONFIG
LOCAL_AWS_ENDPOINT_URL = os.environ.get("LOCAL_AWS_ENDPOINT_URL")
LOCAL_SQS_ENDPOINT_URL = os.environ.get("LOCAL_SQS_ENDPOINT_URL")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
DATABASE_SECRET_TAGS = [s.strip() for s in os.environ.get("DATABASE_SECRET_TAGS", "").split(",")]
DATABASE_CLUSTER_PREFIX = os.environ.get("DATABASE_CLUSTER_PREFIX")
AWS_RDS_DATABASE_ARN_OVERRIDE = os.environ.get("AWS_RDS_DATABASE_ARN_OVERRIDE")
AWS_RDS_SECRET_ARN_OVERRIDE = os.environ.get("AWS_RDS_SECRET_ARN_OVERRIDE")
AWS_SQS_QUEUE_URL = os.environ.get("AWS_SQS_QUEUE_URL")
SENTRY_DSN = os.environ.get("SENTRY_DSN")
POSTCODE_TIER_OVERRIDE = os.environ.get("POSTCODE_TIER_OVERRIDE")
SUBMISSION_TRACING_PEPPER = os.environ.get("SUBMISSION_TRACING_PEPPER")
| 2 | 2 |
brainfuck.py | MineRobber9000/pyfiles | 0 | 12764376 | import sys
major = sys.version_info.major
def six(two,three):
return lambda: two if major==2 else three
def newmodule():
if major==2:
return __import__("new").classobj
elif major==3:
return lambda name,base,ns: __import__("types").new_class(name,base,dict(),lambda d: d.update(ns))
def wh(cond,func):
while cond():
func()
class BrainfuckManual:
def __init__(self):
self.mem = {}
self.dp = 0
self.globals = globals()
def add(self):
return eval("mem.__setitem__(dp,(mem.get(dp,0)+1))",self.globals,self.__dict__)
def sub(self):
return eval("mem.__setitem__(dp,(mem.get(dp,0)-1))",self.globals,self.__dict__)
def left(self):
return eval("locals().__setitem__('dp',dp-1)",self.globals,self.__dict__)
def right(self):
return eval("locals().__setitem__('dp',dp+1)",self.globals,self.__dict__)
def out(self):
return eval(six("print chr(mem[dp]),","print(chr(mem[dp]),end='')")(),self.globals,self.__dict__)
def run(self,p):
self.i = 0
self.p = p
wh(lambda: self.i<len(p),self.tick)
def tick(self):
self.c = self.p[self.i]
{"+":self.add,"-":self.sub,">":self.right,"<":self.left,".":self.out}[self.c]()
self.i = self.i+1
Brainfuck = (lambda new,globals: new("Brainfuck",(),dict(__init__=lambda self: self.__dict__.update(dict(mem={},dp=0,globals=globals)),add=lambda self: eval("mem.__setitem__(dp,(mem.get(dp,0)+1))",self.globals,self.__dict__),sub=lambda self: eval("mem.__setitem__(dp,(mem.get(dp,0)-1))",self.globals,self.__dict__),left=lambda self: eval("locals().__setitem__('dp',dp-1)",self.globals,self.__dict__),right=lambda self: eval("locals().__setitem__('dp',dp+1)",self.globals,self.__dict__),out=lambda self: eval(six("print chr(mem[dp]),","print(chr(mem[dp]),end='')")(),self.globals,self.__dict__),run=lambda self,p: (self.__dict__.update(dict(i=0,p=p)),wh(lambda: self.i<len(p),self.tick))[0],tick=lambda self: ({"+":self.add,"-":self.sub,"<":self.left,">":self.right}[self.p[self.i]](),eval("locals().__setitem__('i',i+1)",self.globals,self.__dict__))[0])))(newmodule(),globals())
| 2.984375 | 3 |
lph/envs/Baking.py | PhilippeMorere/learning-to-plan-hierarchically | 6 | 12764377 | from lph.envs.AbsEnv import GraphEnv
class BakingEnv(GraphEnv):
"""
Baking environment, making chocolate or sultana cookies
Actions are:
0. Get egg
1. Break egg
2. Whisk egg
3. Pour egg
4. Get flour
5. Add flour
6. Get sugar
7. Pour sugar
8. Get salt
9. Add salt
10. Get oil
11. Pour oil
12. Get milk
13. Pour milk
14. Mix liquids
15. Mix solids
16. Mix batter
17. Get butter
18. Get tray
19. Oil tray
20. Get chocolate
21. Make chocolate chips
22. Mix in chocolate chips
23. Pour chocolate batter
24. Turn oven on
25. Cook chocolate cookies
26. Get sultana
27. Mix in sultana
28. Poor sultana batter
29. Cook sultana cookies
State dims are the same as actions.
"""
conditions = [[], [0], [1], [2], # ............. 0, 1, 2, 3
[], [4], [], [6], # .............. 4, 5, 6, 7
[], [8], [], [10], # ............. 8, 9, 10, 11
[], [12], # ...................... 12, 13
(3, 11, 13), (5, 7, 9), # ........ 14, 15
(14, 15), [], [], (17, 18), # .... 16, 17, 18, 19
[], [20], (16, 21), # ............ 20, 21, 22
(19, 22), [], (23, 24), # ........ 23, 24, 25
[], (16, 26), (19, 27), [28]] # .. 26, 27, 28, 29
# A few useful goals
goal_break_egg = ([1], [1])
goal_pour_egg = ([3], [1])
goal_pour_four = ([5], [1])
goal_pour_sugar = ([7], [1])
goal_add_salt = ([9], [1])
goal_pour_oil = ([11], [1])
goal_pour_milk = ([13], [1])
goal_mix_liquids = ([14], [1])
goal_mix_solids = ([15], [1])
goal_mix_batter = ([16], [1])
goal_oil_tray = ([19], [1])
goal_make_choc_chips = ([21], [1])
goal_mix_choc_chips = ([22], [1])
goal_pour_choc_batter = ([23], [1])
goal_cook_choc_cookie = ([25], [1])
goal_pour_sultana_batter = ([28], [1])
goal_cook_sultana_cookie = ([29], [1])
# Suggested curriculum
curriculum = [
goal_break_egg,
goal_pour_egg,
goal_pour_four,
goal_pour_sugar,
goal_add_salt,
goal_pour_oil,
goal_pour_milk,
goal_mix_liquids,
goal_mix_solids,
goal_mix_batter,
goal_oil_tray,
goal_make_choc_chips,
goal_mix_choc_chips,
goal_pour_choc_batter,
goal_cook_choc_cookie,
goal_pour_sultana_batter,
goal_cook_sultana_cookie
]
def __init__(self, stochastic_reset=False, goal=None):
super().__init__(BakingEnv.conditions, BakingEnv.curriculum,
stochastic_reset, goal=goal)
| 2.1875 | 2 |
e2xgrader/exporters/__init__.py | mhwasil/e2xgrader | 0 | 12764378 | <gh_stars>0
from .gradeexporter import GradeTaskExporter, GradeNotebookExporter, GradeAssignmentExporter
from .formexporter import FormExporter | 1.078125 | 1 |
0109.Convert Sorted List to Binary Search Tree/solution.py | zhlinh/leetcode | 0 | 12764379 | <reponame>zhlinh/leetcode<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-03-07
Last_modify: 2016-03-07
******************************************
'''
'''
Given a **singly linked list** where elements are
sorted in ascending order, convert it to a height balanced BST.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
p = head
n = 0
while p:
p = p.next
n += 1
self.cur = head
return self.helper(n)
def helper(self, n):
if n <= 0:
return None
root = TreeNode(0)
root.left = self.helper(n // 2)
root.val = self.cur.val
self.cur = self.cur.next
root.right = self.helper(n - (n // 2) - 1)
return root
| 3.625 | 4 |
speechdb/views.py | bmverhel/dices | 0 | 12764380 | from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.core.paginator import Paginator
from django.db.models import Q, Count, Max
from django.views.generic import ListView, DetailView, TemplateView
from django_filters.views import FilterView
from rest_framework.generics import ListAPIView, RetrieveAPIView
from django_filters import rest_framework as filters
from .models import Author, Work, Character, CharacterInstance, Speech, SpeechCluster
from .serializers import AuthorSerializer, WorkSerializer, CharacterSerializer, CharacterInstanceSerializer, SpeechSerializer, SpeechClusterSerializer
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
CTS_READER = 'https://scaife.perseus.org/reader/'
PAGE_SIZE = 25
# parameter validation
def ValidateParams(request, valid_params):
'''collect valid parameters, check types'''
params = {}
for param, vtype in valid_params:
if param in request.GET:
val = request.GET[param][:256].strip()
if val != '':
try:
params[param] = vtype(val)
except ValueError:
pass
return params
#
# API filters
#
class AuthorFilter(filters.FilterSet):
class Meta:
model = Author
fields = ['id', 'name', 'wd']
class WorkFilter(filters.FilterSet):
author_id = filters.NumberFilter('author__id')
author_name = filters.CharFilter('author__name')
author_wd = filters.CharFilter('author__wd')
author_urn = filters.CharFilter('author__urn')
class Meta:
model = Work
fields = ['id', 'title', 'wd', 'urn',
'author_name', 'author_id', 'author_wd']
class CharacterFilter(filters.FilterSet):
class Meta:
model = Character
fields = ['id', 'name', 'wd', 'manto', 'gender', 'number', 'being']
class CharacterInstanceFilter(filters.FilterSet):
name = filters.CharFilter('name')
gender = filters.ChoiceFilter('gender',
choices=Character.CharacterGender.choices)
number = filters.ChoiceFilter('number',
choices=Character.CharacterNumber.choices)
being = filters.ChoiceFilter('being',
choices=Character.CharacterBeing.choices)
anon = filters.BooleanFilter('anon')
char_id = filters.NumberFilter('char__id')
char_name = filters.CharFilter('char__name')
char_wd = filters.CharFilter('char__wd')
char_manto = filters.CharFilter('char__manto')
char_gender = filters.ChoiceFilter('char__gender',
choices=Character.CharacterGender.choices)
char_number = filters.ChoiceFilter('char__number',
choices=Character.CharacterNumber.choices)
char_being = filters.ChoiceFilter('char__being',
choices=Character.CharacterBeing.choices)
class Meta:
model = CharacterInstance
fields = ['id', 'name', 'gender', 'number', 'being', 'anon',
'char_id', 'char_name', 'char_wd', 'char_manto',
'char_gender', 'char_number', 'char_being']
class SpeechFilter(filters.FilterSet):
spkr_id = filters.NumberFilter('spkr__char__id')
spkr_name = filters.CharFilter('spkr__name')
spkr_manto = filters.CharFilter('spkr__char__manto')
spkr_wd = filters.CharFilter('spkr__char__wd')
spkr_gender = filters.ChoiceFilter('spkr__gender',
choices=Character.CharacterGender.choices)
spkr_number = filters.ChoiceFilter('spkr__number',
choices=Character.CharacterNumber.choices)
spkr_being = filters.ChoiceFilter('spkr__being',
choices=Character.CharacterBeing.choices)
spkr_anon = filters.BooleanFilter('spkr__anon')
addr_id = filters.NumberFilter('addr__char__id')
addr_name = filters.CharFilter('addr__name')
addr_manto = filters.CharFilter('addr__char__manto')
addr_wd = filters.CharFilter('addr__char__wd')
addr_gender = filters.ChoiceFilter('addr__gender',
choices=Character.CharacterGender.choices)
addr_number = filters.ChoiceFilter('addr__number',
choices=Character.CharacterNumber.choices)
addr_being = filters.ChoiceFilter('addr__being',
choices=Character.CharacterBeing.choices)
addr_anon = filters.BooleanFilter('addr__anon')
spkr_inst = filters.NumberFilter('spkr__id')
addr_inst = filters.NumberFilter('addr__id')
type = filters.ChoiceFilter('type', choices=Speech.SpeechType.choices)
cluster_id = filters.NumberFilter('cluster__id')
work_id = filters.NumberFilter('work__id')
work_title = filters.CharFilter('work__title')
work_urn = filters.CharFilter('work__urn')
work_wd = filters.CharFilter('work__wd')
author_id = filters.NumberFilter('work__author__id')
author_name = filters.CharFilter('work__author__name')
author_wd = filters.CharFilter('work__author__wd')
author_urn = filters.CharFilter('work__author__urn')
class Meta:
model = Speech
fields = ['id',
'spkr_id', 'spkr_name', 'spkr_manto', 'spkr_wd', 'spkr_gender',
'spkr_number', 'spkr_being', 'spkr_anon',
'addr_id', 'addr_name', 'addr_manto', 'addr_wd', 'addr_gender',
'addr_number', 'addr_being', 'addr_anon',
'spkr_inst', 'addr_inst',
'type',
'cluster_id',
'work_id', 'work_title', 'work_urn', 'work_wd',
'author_id', 'author_name', 'author_urn', 'author_wd',
'part']
class SpeechClusterFilter(filters.FilterSet):
class Meta:
model = SpeechCluster
fields = ['id']
#
# API class-based views
#
class AuthorList(ListAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
filterset_class = AuthorFilter
class AuthorDetail(RetrieveAPIView):
queryset = Author.objects.all()
serializer_class = AuthorSerializer
class WorkList(ListAPIView):
queryset = Work.objects.all()
serializer_class = WorkSerializer
filterset_class = WorkFilter
class WorkDetail(RetrieveAPIView):
queryset = Work.objects.all()
serializer_class = WorkSerializer
class CharacterList(ListAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
filterset_class = CharacterFilter
class CharacterDetail(RetrieveAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
class CharacterInstanceList(ListAPIView):
queryset = CharacterInstance.objects.all()
serializer_class = CharacterInstanceSerializer
filterset_class = CharacterInstanceFilter
class CharacterInstanceDetail(RetrieveAPIView):
queryset = CharacterInstance.objects.all()
serializer_class = CharacterInstanceSerializer
class SpeechList(ListAPIView):
queryset = Speech.objects.all()
serializer_class = SpeechSerializer
filterset_class = SpeechFilter
class SpeechDetail(RetrieveAPIView):
queryset = Speech.objects.all()
serializer_class = SpeechSerializer
class SpeechClusterList(ListAPIView):
queryset = SpeechCluster.objects.all()
serializer_class = SpeechClusterSerializer
filterset_class = SpeechClusterFilter
class SpeechClusterDetail(RetrieveAPIView):
queryset = SpeechCluster.objects.all()
serializer_class = SpeechClusterSerializer
#
# Web frontend class-based views
#
class AppAuthorList(ListView):
model = Author
template_name = 'speechdb/author_list.html'
queryset = Author.objects.all()
paginate_by = PAGE_SIZE
class AppWorkList(ListView):
model = Work
template_name = 'speechdb/work_list.html'
queryset = Work.objects.all()
paginate_by = PAGE_SIZE
class AppCharacterList(ListView):
model = Character
template_name = 'speechdb/character_list.html'
queryset = Character.objects.all()
paginate_by = PAGE_SIZE
_valid_params = [
('name', str),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
# speaker by id
if 'name' in self.params:
query.append(Q(name=self.params['name']))
qs = Character.objects.filter(*query).order_by('name')
# calculate some useful counts
qs = qs.annotate(
Count('instances__speeches', distinct=True),
Count('instances__addresses', distinct=True),
)
return qs
class AppCharacterInstanceList(ListView):
model = CharacterInstance
template_name = 'speechdb/characterinstance_list.html'
queryset = CharacterInstance.objects.all()
paginate_by = PAGE_SIZE
_valid_params = [
('name', str),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
# speaker by id
if 'name' in self.params:
query.append(Q(char__name=self.params['name']))
qs = CharacterInstance.objects.filter(*query).order_by('char__name')
# calculate some useful counts
qs = qs.annotate(
Count('speeches', distinct=True),
Count('addresses', distinct=True),
)
return qs
class AppCharacterInstanceDetail(DetailView):
model = CharacterInstance
template_name = 'speechdb/characterinstance_detail.html'
context_object_name = 'inst'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppCharacterDetail(DetailView):
model = Character
template_name = 'speechdb/character_detail.html'
context_object_name = 'char'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppSpeechList(ListView):
model = Speech
template_name = 'speechdb/speech_list.html'
paginate_by = PAGE_SIZE
ordering = ['work', 'seq']
_valid_params = [
('spkr_id', int),
('addr_id', int),
('char_id', int),
('char_inst', int),
('spkr_inst', int),
('addr_inst', int),
('cluster_id', int),
('type', str),
('part', int),
('n_parts', int),
('work_id', int),
]
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speech_types'] = Speech.SpeechType.choices
context['search_params'] = self.params.items()
return context
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# initial set of objects plus annotations
qs = Speech.objects.annotate(Count('cluster__speech'))
# construct query
query = []
# any participant
if 'char_id' in self.params:
query.append(
Q(spkr__char=self.params['char_id']) |
Q(spkr__disg=self.params['char_id']) |
Q(addr__char=self.params['char_id']) |
Q(addr__disg=self.params['char_id'])
)
# speaker by id
if 'spkr_id' in self.params:
query.append(Q(spkr__char=self.params['spkr_id']) | Q(spkr__disg=self.params['spkr_id']))
# speaker by instance
if 'spkr_inst' in self.params:
query.append(Q(spkr=self.params['spkr_inst']))
# addressee by id
if 'addr_id' in self.params:
query.append(Q(addr__char=self.params['addr_id']) | Q(addr__disg=self.params['addr_id']))
# addressee by instance
if 'addr_inst' in self.params:
query.append(Q(addr=self.params['addr_inst']))
if 'cluster_id' in self.params:
query.append(Q(cluster__pk=self.params['cluster_id']))
if 'type' in self.params:
query.append(Q(type=self.params['type']))
if 'part' in self.params:
query.append(Q(part=self.params['part']))
if 'n_parts' in self.params:
query.append(Q(cluster__speech__count=self.params['n_parts']))
if 'work_id' in self.params:
query.append(Q(cluster__work__pk=self.params['work_id']))
qs = qs.filter(*query)
qs = qs.order_by('seq')
qs = qs.order_by('work')
return qs
class AppSpeechClusterList(ListView):
model = SpeechCluster
template_name = 'speechdb/speechcluster_list.html'
queryset = SpeechCluster.objects.all()
paginate_by = PAGE_SIZE
_valid_params = []
def get_queryset(self):
# collect user search params
self.params = ValidateParams(self.request, self._valid_params)
# construct query
query = []
return SpeechCluster.objects.filter(*query)
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
context['search_params'] = self.params.items()
return context
class AppSpeechClusterDetail(DetailView):
model = SpeechCluster
template_name = 'speechdb/speechcluster_detail.html'
context_object_name = 'cluster'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['reader'] = CTS_READER
return context
class AppIndex(TemplateView):
template_name = 'speechdb/index.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speeches'] = Speech.objects.all()
return context
class AppSpeechSearch(TemplateView):
template_name = 'speechdb/speech_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['max_parts'] = Speech.objects.aggregate(Max('part'))['part__max']
context['speech_types'] = Speech.SpeechType.choices
return context
class AppSpeechClusterSearch(TemplateView):
template_name = 'speechdb/speechcluster_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['works'] = Work.objects.all()
context['characters'] = Character.objects.all()
context['speech_types'] = Speech.SpeechType.choices
return context
class AppCharacterSearch(TemplateView):
template_name = 'speechdb/character_search.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# add useful info
context['characters'] = Character.objects.all()
return context
| 2.0625 | 2 |
layers/custom_layers.py | PentaHiggs/fantastic-pancakes | 3 | 12764381 | import tensorflow as tf
from tensorflow.python.framework import ops
import os
dot_slash = os.path.dirname(__file__)
# Making roi_pooling_layer available for import as a library
roi_location = os.path.join(dot_slash, "rpl.so")
op_module = tf.load_op_library(roi_location)
roi_pooling_layer = op_module.roi_pooler
# Maknig nms available for import as a library
nms_location = os.path.join(dot_slash, "nms.so")
nms_module = tf.load_op_library(nms_location)
nms = nms_module.nms
# Making roi_pooling_layer's gradient available for import
roi_grad_location = os.path.join(dot_slash, "rpl_grad.so")
roi_grad_module = tf.load_op_library(roi_grad_location)
roi_pooling_layer_grad = roi_grad_module.roi_pooler_grad
@ops.RegisterGradient("RoiPooler")
def _roi_pool_grad_cc(op, grad):
return [roi_pooling_layer_grad(op.inputs[0], op.inputs[1], op.inputs[2], grad,
op.get_attr("pooled_height"), op.get_attr("pooled_width"),
op.get_attr("feature_stride")), None, None]
# Making iou_labeler available for import
iou_labeler_location = os.path.join(dot_slash, "iou_labeler.so")
iou_labeler_module = tf.load_op_library(iou_labeler_location)
iou_labeler = iou_labeler_module.iou_labeler
| 2.484375 | 2 |
eventex/core/views.py | Golker/wttd | 0 | 12764382 | from django.views.generic import ListView, DetailView
from eventex.core.models import Speaker, Talk
home = ListView.as_view(template_name='index.html', model=Speaker)
speaker_detail = DetailView.as_view(model=Speaker)
talk_list = ListView.as_view(model=Talk)
| 1.921875 | 2 |
repoclone/exceptions.py | pikamachu/pika-repo-batch-clone | 1 | 12764383 | <reponame>pikamachu/pika-repo-batch-clone<filename>repoclone/exceptions.py
# -*- coding: utf-8 -*-
"""
repoclone.exceptions
-----------------------
All exceptions used in the repoclone code base are defined here.
"""
class RepocloneException(Exception):
"""
Base exception class. All Cookiecutter-specific exceptions should subclass
this class.
"""
| 1.648438 | 2 |
src/internal_api/v1/__init__.py | hvuhsg/OpenAPIGateway | 1 | 12764384 | <reponame>hvuhsg/OpenAPIGateway
from .v1 import v1
__all__ = ["v1"]
| 0.996094 | 1 |
wpt/websockets/websock_handlers/webkit/unknown-frame-type_wsh.py | gsnedders/presto-testo | 0 | 12764385 | <gh_stars>0
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
for i in range(1, 128):
request.connection.write(chr(i) + str(i) + '\xff')
for i in range(128, 256):
msg = str(i)
request.connection.write(chr(i) + chr(len(msg)) + msg)
msgutil.send_message(request, 'done')
| 2.453125 | 2 |
tests/test_basis_evaluation.py | jiduque/scikit-fda | 147 | 12764386 |
from skfda.representation.basis import (
FDataBasis, Monomial, BSpline, Fourier, Constant, VectorValued, Tensor)
import unittest
import numpy as np
class TestBasisEvaluationFourier(unittest.TestCase):
def test_evaluation_simple_fourier(self):
"""Test the evaluation of FDataBasis"""
fourier = Fourier(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[8.71, 9.66, 1.84, -4.71, -2.80, 2.71,
2.45, -3.82, -6.66, -0.30, 8.71],
[22.24, 26.48, 10.57, -4.95, -3.58, 6.24,
5.31, -7.69, -13.32, 1.13, 22.24]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_fourier(self):
"""Test the evaluation of a single point FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
# Test different ways of call f with a point
res = np.array([-0.903918107989282, -0.267163981229459]
).reshape((2, 1, 1)).round(4)
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_fourier(self):
"""Test the evaluation of the derivative of a FDataBasis"""
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([4.34138447771721, -7.09352774867064, 2.75214327095343,
4.34138447771721, 6.52573053999253,
-4.81336320468984, -1.7123673353027, 6.52573053999253]
).reshape((2, 4, 1)).round(3)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3), res
)
def test_evaluation_grid_fourier(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(f(np.atleast_2d(t), grid=True),
res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_fourier(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
fourier = Fourier(domain_range=(0, 1), n_basis=3)
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_fourier(self):
"""Test the evaluation of FDataBasis"""
for fourier in (Fourier(domain_range=[(0, 1)], n_basis=3),
Fourier(domain_range=((0, 1),), n_basis=3),
Fourier(domain_range=np.array((0, 1)), n_basis=3),
Fourier(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = np.array([[0.00078238, 0.48857741, 0.63971985],
[0.01778079, 0.73440271, 0.20148638]])
f = FDataBasis(fourier, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([0.905, 0.147, -1.05, 0.905, 0.303,
0.775, -1.024, 0.303]).reshape((2, 4, 1))
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationBSpline(unittest.TestCase):
def test_evaluation_simple_bspline(self):
"""Test the evaluation of FDataBasis"""
bspline = BSpline(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array([[1, 1.54, 1.99, 2.37, 2.7, 3,
3.3, 3.63, 4.01, 4.46, 5],
[6, 6.54, 6.99, 7.37, 7.7, 8,
8.3, 8.63, 9.01, 9.46, 10]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_bspline(self):
"""Test the evaluation of a single point FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
# Test different ways of call f with a point
res = np.array([[0.5696], [0.3104]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_bspline(self):
"""Test the evaluation of the derivative of a FDataBasis"""
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2.927, 0.453, -1.229, 0.6],
[4.3, -1.599, 1.016, -2.52]])[..., np.newaxis]
)
def test_evaluation_grid_bspline(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_bspline(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
bspline = BSpline(domain_range=(0, 1), n_basis=5, order=3)
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_bspline(self):
"""Test the evaluation of FDataBasis"""
for bspline in (BSpline(domain_range=[(0, 1)], n_basis=5, order=3),
BSpline(domain_range=((0, 1),), n_basis=5, order=3),
BSpline(domain_range=np.array((0, 1)), n_basis=5,
order=3),
BSpline(domain_range=np.array([(0, 1)]), n_basis=5,
order=3)
):
coefficients = [[0.00078238, 0.48857741, 0.63971985, 0.23, 0.33],
[0.01778079, 0.73440271, 0.20148638, 0.54, 0.12]]
f = FDataBasis(bspline, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[0.001, 0.564, 0.435, 0.33],
[0.018, 0.468, 0.371, 0.12]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
# Check error
with np.testing.assert_raises(ValueError):
BSpline(domain_range=[(0, 1), (0, 1)])
class TestBasisEvaluationMonomial(unittest.TestCase):
def test_evaluation_simple_monomial(self):
"""Test the evaluation of FDataBasis"""
monomial = Monomial(domain_range=(0, 2), n_basis=5)
coefficients = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]])
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 2, 11)
# Results in R package fda
res = np.array(
[[1.00, 1.56, 2.66, 4.79, 8.62, 15.00,
25.00, 39.86, 61.03, 90.14, 129.00],
[6.00, 7.81, 10.91, 16.32, 25.42, 40.00,
62.21, 94.59, 140.08, 201.98, 284.00]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(2), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(2), res)
def test_evaluation_point_monomial(self):
"""Test the evaluation of a single point FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
# Test different ways of call f with a point
res = np.array([[2.75], [1.525]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f([0.5]).round(4), res)
np.testing.assert_array_almost_equal(f((0.5,)).round(4), res)
np.testing.assert_array_almost_equal(f(0.5).round(4), res)
np.testing.assert_array_almost_equal(f(np.array([0.5])).round(4), res)
# Problematic case, should be accepted or no?
#np.testing.assert_array_almost_equal(f(np.array(0.5)).round(4), res)
def test_evaluation_derivative_monomial(self):
"""Test the evaluation of the derivative of a FDataBasis"""
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
f_deriv = f.derivative()
np.testing.assert_array_almost_equal(
f_deriv(t).round(3),
np.array([[2., 4., 6., 8.],
[1.4, 2.267, 3.133, 4.]])[..., np.newaxis]
)
def test_evaluation_grid_monomial(self):
"""Test the evaluation of FDataBasis with the grid option set to
true. Nothing should be change due to the domain dimension is 1,
but can accept the """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res_test = f(t)
# Different ways to pass the axes
np.testing.assert_array_almost_equal(f(t, grid=True), res_test)
np.testing.assert_array_almost_equal(f((t,), grid=True), res_test)
np.testing.assert_array_almost_equal(f([t], grid=True), res_test)
np.testing.assert_array_almost_equal(
f(np.atleast_2d(t), grid=True), res_test)
# Number of axis different than the domain dimension (1)
with np.testing.assert_raises(ValueError):
f((t, t), grid=True)
def test_evaluation_composed_monomial(self):
"""Test the evaluation of FDataBasis the a matrix of times instead of
a list of times """
monomial = Monomial(domain_range=(0, 1), n_basis=3)
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
# Test same result than evaluation standart
np.testing.assert_array_almost_equal(f([1]),
f([[1], [1]],
aligned=False))
np.testing.assert_array_almost_equal(f(t), f(np.vstack((t, t)),
aligned=False))
# Different evaluation times
t_multiple = [[0, 0.5], [0.2, 0.7]]
np.testing.assert_array_almost_equal(f(t_multiple[0])[0],
f(t_multiple,
aligned=False)[0])
np.testing.assert_array_almost_equal(f(t_multiple[1])[1],
f(t_multiple,
aligned=False)[1])
def test_domain_in_list_monomial(self):
"""Test the evaluation of FDataBasis"""
for monomial in (Monomial(domain_range=[(0, 1)], n_basis=3),
Monomial(domain_range=((0, 1),), n_basis=3),
Monomial(domain_range=np.array((0, 1)), n_basis=3),
Monomial(domain_range=np.array([(0, 1)]), n_basis=3)):
coefficients = [[1, 2, 3], [0.5, 1.4, 1.3]]
f = FDataBasis(monomial, coefficients)
t = np.linspace(0, 1, 4)
res = np.array([[1., 2., 3.667, 6.],
[0.5, 1.111, 2.011, 3.2]])[..., np.newaxis]
np.testing.assert_array_almost_equal(f(t).round(3), res)
np.testing.assert_array_almost_equal(f.evaluate(t).round(3), res)
class TestBasisEvaluationVectorValued(unittest.TestCase):
def test_vector_valued_constant(self):
basis_first = Constant()
basis_second = Constant()
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[[1, 2], [3, 4]])
self.assertEqual(fd.dim_codomain, 2)
res = np.array([[[1, 2]], [[3, 4]]])
np.testing.assert_allclose(fd(0), res)
def test_vector_valued_constant_monomial(self):
basis_first = Constant(domain_range=(0, 5))
basis_second = Monomial(n_basis=3, domain_range=(0, 5))
basis = VectorValued([basis_first, basis_second])
fd = FDataBasis(basis=basis, coefficients=[
[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(fd.dim_codomain, 2)
np.testing.assert_allclose(fd.domain_range[0], (0, 5))
res = np.array([[[1, 2], [1, 9], [1, 24]],
[[3, 4], [3, 15], [3, 38]]])
np.testing.assert_allclose(fd([0, 1, 2]), res)
class TestBasisEvaluationTensor(unittest.TestCase):
def test_tensor_monomial_constant(self):
basis = Tensor([Monomial(n_basis=2), Constant()])
fd = FDataBasis(basis=basis, coefficients=[1, 1])
self.assertEqual(fd.dim_domain, 2)
self.assertEqual(fd.dim_codomain, 1)
np.testing.assert_allclose(fd([0., 0.]), [[[1.]]])
np.testing.assert_allclose(fd([0.5, 0.5]), [[[1.5]]])
np.testing.assert_allclose(
fd([(0., 0.), (0.5, 0.5)]), [[[1.0], [1.5]]])
fd_grid = fd.to_grid()
fd2 = fd_grid.to_basis(basis)
np.testing.assert_allclose(fd.coefficients, fd2.coefficients)
if __name__ == '__main__':
print()
unittest.main()
| 2.4375 | 2 |
utils/models/train_step.py | sooooner/faster-RCNN | 1 | 12764387 | import tensorflow as tf
import time
from IPython.display import display
from utils.label_generator import classifier_label_generator
def frcnn_train_step(model, train_dataset, train_stage, epochs=1, valid_dataset=None, change_lr=False, rpn_lr=None, cls_lr=None):
if change_lr:
if rpn_lr:
tf.keras.backend.set_value(model.rpn.optimizer.learning_rate, rpn_lr)
if cls_lr:
tf.keras.backend.set_value(model.classifier.optimizer.learning_rate, cls_lr)
if train_stage == 1:
print('Train RPNs \n')
model.rpn.trainable = True
model.classifier.trainable = False
elif train_stage == 2:
print('Train Fast R-CNN using the proposals from RPNs \n')
model.rpn.trainable = False
model.rpn.base_model.trainable = True
model.classifier.trainable = True
elif train_stage == 3:
print('Fix the shared convolutional layers and fine-tune unique layers to RPN \n')
model.rpn.trainable = True
model.rpn.base_model.trainable = False
model.classifier.trainable = False
elif train_stage == 4:
print('Fine-tune unique layers to Fast R-CNN \n')
model.rpn.trainable = False
model.classifier.trainable = True
max_step = 'Unknown'
for epoch in range(epochs):
epoch_start = time.time()
print(f"epoch {epoch+1}/{epochs}")
display_loss = display("Training loss at step 0 : 0", display_id=True)
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
start = time.time()
y_cls_rpn, y_reg_rpn, gts = y_batch_train
if train_stage == 1 or train_stage == 3:
result = model.rpn.train_step((x_batch_train, (y_cls_rpn, y_reg_rpn)))
losses = round(float(result['rpn_loss'].numpy()), 5)
else:
scores, rps, feature_map = model.rpn(x_batch_train, training=False)
if train_stage == 2:
model.rpn.train_step((x_batch_train, (y_cls_rpn, y_reg_rpn)))
rps = model.rpn.inverse_bbox_regression(rps)
candidate_area, scores = model.get_candidate((scores, rps, model.n_train_pre_nms))
nms = model.get_nms((candidate_area, scores, model.n_train_post_nms))
box_labels, cls_labels, nms = classifier_label_generator(nms, gts)
rois = model.roipool((feature_map, nms))
result = model.classifier.train_step(((rois, nms), (cls_labels, box_labels)))
losses = round(float(result['classifier_loss'].numpy()), 5)
display_loss.update(f"Training loss at step {step}/{max_step} : {losses} - {round(time.time() - start, 4)}sec/step - {time.strftime('%Hh%Mm%Ss', time.gmtime(time.time()-epoch_start))}/epoch")
max_step = step
display_loss.update(f"Training loss at step {step}/{max_step} : {losses} - {round(time.time()-start, 4)}sec/step - {time.strftime('%Hh%Mm%Ss', time.gmtime(time.time()-epoch_start))}/epoch")
if valid_dataset is not None:
display_loss_valid = display("validation loss : 0", display_id=True)
for x_batch_test, y_batch_test in valid_dataset:
y_cls_rpn, y_reg_rpn, gts = y_batch_test
if train_stage == 1 or train_stage == 3:
result = model.rpn.test_step((x_batch_test, (y_cls_rpn, y_reg_rpn)))
losses = round(float(result['rpn_loss_val'].numpy()), 5)
else:
scores, rps, feature_map = model.rpn.predict(x_batch_test)
rps = model.rpn.inverse_bbox_regression(rps)
candidate_area, scores = model.get_candidate((scores, rps, model.n_test_pre_nms))
nms = model.get_nms((candidate_area, scores, model.n_test_post_nms))
box_labels, cls_labels, nms = classifier_label_generator(nms, gts, valid=True)
rois = model.roipool((feature_map, nms))
result = model.classifier.test_step(((rois, nms), (cls_labels, box_labels)))
losses = round(float(result['classifier_loss_val'].numpy()), 5)
display_loss_valid.update(f"validation loss : {losses}")
return model | 2.703125 | 3 |
setup.py | dew-uff/versioned-prov | 1 | 12764388 | <gh_stars>1-10
from setuptools import setup
setup(
name='versioned-prov',
version='1.0.0',
description='Versioned-PROV website dependencies',
url='https://github.com/dew-uff/versioned-prov',
author=('<NAME>, <NAME>, '
'<NAME>, <NAME>'),
author_email='<EMAIL>',
license='MIT',
install_requires = [
'extensible_provn==0.2.1',
'pandas==0.20.1',
'numpy==1.12.1',
'matplotlib==2.2.2',
'pypandoc==1.4',
],
zip_safe=False
)
| 1.242188 | 1 |
bots/td_crosspost.py | elnuno/reddit_bots | 0 | 12764389 | from time import time, strftime, sleep
import praw
source = 'the_donald'
dest = 'td_uncensored'
log_file = 'td_bot_log.txt'
reddit = praw.Reddit(
client_id='client_id',
client_secret='client_secret',
password='password',
username='username',
user_agent='linux:td_uncensored:0.1 (by /u/username)'
)
def log_event(string):
"""Log events and errors"""
with open(log_file, 'a') as log:
log.write('{}\t\t'.format(strftime("%Y-%m-%d\t%H:%M:%S")) + string + '\n')
def cross_post(sub):
"""Create the cross-post"""
if sub.selftext:
reddit.subreddit(dest).submit(title=sub.title, selftext=sub.selftext, send_replies=False)
return
reddit.subreddit(dest).submit(title=sub.title, url=sub.url, send_replies=False)
def main():
"""Stream and cross-post submissions. Exit if more than 5 restarts"""
resets = 0
while resets < 5:
start = time()
try:
for submission in reddit.subreddit(source).stream.submissions():
if not submission.created_utc < start:
cross_post(submission)
except Exception as e:
log_event('Reset\t\t{}: {}'.format(type(e).__name__, e))
resets += 1
sleep(60)
continue
log_event('Stopped\t\tExcessive Restarts\n')
if __name__ == '__main__':
log_event('Start')
main()
| 2.6875 | 3 |
clustering/mysql_helper.py | marc1404/msc-thesis | 1 | 12764390 | <filename>clustering/mysql_helper.py
import mysql.connector
from dotenv import load_dotenv
import os
load_dotenv(dotenv_path='.env')
def connect():
print('Connecting to MySQL database...')
config = {
'host': os.getenv('MYSQL_HOST'),
'user': os.getenv('MYSQL_USER'),
'password': os.getenv('<PASSWORD>'),
'database': os.getenv('MYSQL_DATABASE')
}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print('Done.')
return cnx, cursor
def disconnect(cnx, cursor):
print('Closing MySQL connection...')
cnx.commit()
cursor.close()
cnx.close()
print('Done.')
| 2.546875 | 3 |
task6.py | SimeonHristov99/SQLEveryDay | 0 | 12764391 | import pandas as pd
averages_df = employee.groupby(by='department').mean()[['salary']]
merged_df = pd.merge(employee, averages_df, how='inner',
left_on='department', right_on=averages_df.index)
merged_df[['department', 'first_name', 'salary_x', 'salary_y']]
| 3.21875 | 3 |
python/iceberg/api/expressions/expression_parser.py | moulimukherjee/incubator-iceberg | 2,161 | 12764392 | <reponame>moulimukherjee/incubator-iceberg
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Derived from the SimpleSQL Parser example in pyparsing, retrofitted to just handle the
# where clause predicates
# https://github.com/pyparsing/pyparsing/blob/master/examples/simpleSQL.py
import logging
from pyparsing import (
alphanums,
alphas,
CaselessKeyword,
delimitedList,
Group,
infixNotation,
oneOf,
opAssoc,
pyparsing_common as ppc,
quotedString,
Word
)
_logger = logging.getLogger(__name__)
AND, OR, IN, IS, NOT, NULL, BETWEEN = map(
CaselessKeyword, "and or in is not null between".split()
)
NOT_NULL = NOT + NULL
ident = Word(alphas, alphanums + "_$").setName("identifier")
columnName = delimitedList(ident, ".", combine=True).setName("column name")
binop = oneOf("= == != < > >= <= eq ne lt le gt ge <>", caseless=False)
realNum = ppc.real()
intNum = ppc.signed_integer()
columnRval = (realNum
| intNum
| quotedString
| columnName) # need to add support for alg expressions
whereCondition = Group(
(columnName + binop + columnRval)
| (columnName + IN + Group("(" + delimitedList(columnRval) + ")"))
| (columnName + IS + (NULL | NOT_NULL))
| (columnName + BETWEEN + columnRval + AND + columnRval)
)
whereExpression = infixNotation(
Group(whereCondition
| NOT + whereCondition
| NOT + Group('(' + whereCondition + ')')
| NOT + columnName),
[(NOT, 1, opAssoc.LEFT), (AND, 2, opAssoc.LEFT), (OR, 2, opAssoc.LEFT), (IS, 2, opAssoc.LEFT)],
)
op_map = {"=": "eq",
"==": "eq",
"eq": "eq",
">": "gt",
"gt": "gt",
">=": "gte",
"gte": "gte",
"<": "lt",
"lt": "lt",
"<=": "lte",
"lte": "lte",
"!": "not",
"not": "not",
"!=": "neq",
"<>": "neq",
"neq": "neq",
"||": "or",
"or": "or",
"&&": "and",
"and": "and",
"in": "in",
"between": "between",
"is": "is"}
def get_expr_tree(tokens):
if isinstance(tokens, (str, int)):
return tokens
if len(tokens) > 1:
if (tokens[0] == "not"):
return {"not": get_expr_tree(tokens[1])}
if (tokens[0] == "(" and tokens[-1] == ")"):
return get_expr_tree(tokens[1:-1])
else:
return get_expr_tree(tokens[0])
op = op_map[tokens[1]]
if op == "in":
return {'in': [get_expr_tree(tokens[0]), [token for token in tokens[2][1:-1]]]}
elif op == "between":
return {'and': [{"gte": [get_expr_tree(tokens[0]), tokens[2]]},
{"lte": [get_expr_tree(tokens[0]), tokens[4]]}]}
elif op == "is":
if tokens[2] == 'null':
return {"missing": tokens[0]}
else:
return {"exists": tokens[0]}
if len(tokens) > 3:
binary_tuples = get_expr_tree(tokens[2:])
else:
binary_tuples = get_expr_tree(tokens[2])
return {op: [get_expr_tree(tokens[0]),
binary_tuples]}
def get_expr(node, expr_map):
if isinstance(node, dict):
for i in node.keys():
op = i
if op == "literal":
return node["literal"]
mapped_op = expr_map.get(op, expr_map)
if len(mapped_op) == 1:
mapped_op = mapped_op[0]
if mapped_op is None:
raise RuntimeError("no mapping for op: %s" % op)
if op in ("not", "exists", "missing"):
return mapped_op(get_expr(node[op], expr_map))
return mapped_op(*get_expr(node[op], expr_map))
elif isinstance(node, (list, tuple)):
return (get_expr(item, expr_map) for item in node)
elif isinstance(node, (str, int, float)):
return node
else:
raise RuntimeError("unknown node type" % node)
def parse_expr_string(predicate_string, expr_map):
from pyparsing import ParseException
try:
expr = whereExpression.parseString(predicate_string, parseAll=True)
expr = get_expr_tree(expr)
return get_expr(expr, expr_map)
except ParseException as pe:
_logger.error("Error parsing string expression into iceberg expression: %s" % str(pe))
raise
| 1.75 | 2 |
DAY_1/SOLUTION_2.py | Malod219/AdventOfCode2019 | 0 | 12764393 | <reponame>Malod219/AdventOfCode2019
inputFile = str(input("Input file"))
f = open(inputFile,"r")
data = f.readlines()
f.close()
runningFuelSum = 0
for line in data:
line = line.replace("\n","")
mass = int(line)
fuelNeeded = (mass//3)-2
runningFuelSum += fuelNeeded
while(fuelNeeded > 0):
fuelNeeded = (fuelNeeded//3)-2
if(fuelNeeded>=0):
runningFuelSum += fuelNeeded
print(runningFuelSum) | 3.375 | 3 |
blazar/api/v2/controllers/extensions/host.py | takanattie/blazar | 24 | 12764394 | <reponame>takanattie/blazar
# Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import pecan
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from blazar.api.v2.controllers import base
from blazar.api.v2.controllers import extensions
from blazar.api.v2.controllers import types
from blazar import exceptions
from blazar.i18n import _
from blazar import policy
from blazar.utils import trusts
LOG = logging.getLogger(__name__)
class Host(base._Base):
id = types.IntegerType()
"The ID of the host"
hypervisor_hostname = wtypes.text
"The hostname of the host"
# FIXME(sbauza): API V1 provides 'name', so mapping is necessary until we
# patch the client
name = hypervisor_hostname
hypervisor_type = wtypes.text
"The type of the hypervisor"
vcpus = types.IntegerType()
"The number of VCPUs of the host"
hypervisor_version = types.IntegerType()
"The version of the hypervisor"
memory_mb = types.IntegerType()
"The memory size (in Mb) of the host"
local_gb = types.IntegerType()
"The disk size (in Gb) of the host"
cpu_info = types.CPUInfo()
"The CPU info JSON data given by the hypervisor"
trust_id = types.UuidType()
"The ID of the trust created for delegating the rights of the user"
extra_capas = wtypes.DictType(wtypes.text, types.TextOrInteger())
"Extra capabilities for the host"
@classmethod
def convert(cls, rpc_obj):
extra_keys = [key for key in rpc_obj
if key not in
[i.key for i in wtypes.list_attributes(Host)]]
extra_capas = dict((capa, rpc_obj[capa])
for capa in extra_keys if capa not in ['status'])
rpc_obj['extra_capas'] = extra_capas
obj = cls(**rpc_obj)
return obj
def as_dict(self):
dct = super(Host, self).as_dict()
extra_capas = dct.pop('extra_capas', None)
if extra_capas is not None:
dct.update(extra_capas)
return dct
@classmethod
def sample(cls):
return cls(id='1',
hypervisor_hostname='host01',
hypervisor_type='QEMU',
vcpus=1,
hypervisor_version=1000000,
memory_mb=8192,
local_gb=50,
cpu_info="{\"vendor\": \"Intel\", \"model\": \"qemu32\", "
"\"arch\": \"x86_64\", \"features\": [],"
" \"topology\": {\"cores\": 1}}",
extra_capas={'vgpus': 2, 'fruits': 'bananas'},
)
class HostsController(extensions.BaseController):
"""Manages operations on hosts."""
name = 'oshosts'
extra_routes = {'os-hosts': 'oshosts',
'oshosts': None}
@policy.authorize('oshosts', 'get')
@wsme_pecan.wsexpose(Host, types.IntegerType())
def get_one(self, id):
"""Returns the host having this specific uuid
:param id: ID of host
"""
host_dct = pecan.request.hosts_rpcapi.get_computehost(id)
if host_dct is None:
raise exceptions.NotFound(object={'host_id': id})
return Host.convert(host_dct)
@policy.authorize('oshosts', 'get')
@wsme_pecan.wsexpose([Host], q=[])
def get_all(self):
"""Returns all hosts."""
return [Host.convert(host)
for host in
pecan.request.hosts_rpcapi.list_computehosts()]
@policy.authorize('oshosts', 'post')
@wsme_pecan.wsexpose(Host, body=Host, status_code=201)
@trusts.use_trust_auth()
def post(self, host):
"""Creates a new host.
:param host: a host within the request body.
"""
# here API should go to Keystone API v3 and create trust
host_dct = host.as_dict()
# FIXME(sbauza): DB exceptions are currently catched and return a lease
# equal to None instead of being sent to the API
host = pecan.request.hosts_rpcapi.create_computehost(host_dct)
if host is not None:
return Host.convert(host)
else:
raise exceptions.BlazarException(_("Host can't be created"))
@policy.authorize('oshosts', 'put')
@wsme_pecan.wsexpose(Host, types.IntegerType(), body=Host)
def put(self, id, host):
"""Update an existing host.
:param id: ID of a host.
:param host: a subset of a Host containing values to update.
"""
host_dct = host.as_dict()
host = pecan.request.hosts_rpcapi.update_computehost(id, host_dct)
if host is None:
raise exceptions.NotFound(object={'host_id': id})
return Host.convert(host)
@policy.authorize('oshosts', 'delete')
# NOTE(sbauza): We need to expose text for parameter type as Manager is
# expecting it and int raises an AttributeError
@wsme_pecan.wsexpose(None, wtypes.text,
status_code=204)
def delete(self, id):
"""Delete an existing host.
:param id: UUID of a host.
"""
try:
pecan.request.hosts_rpcapi.delete_computehost(id)
except TypeError:
# The host was not existing when asking to delete it
raise exceptions.NotFound(object={'host_id': id})
| 1.945313 | 2 |
murano/tests/unit/db/migration/test_migrations_base.py | chenyujie/hybrid-murano | 0 | 12764395 | <filename>murano/tests/unit/db/migration/test_migrations_base.py<gh_stars>0
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Ripped off from Nova's test_migrations.py
# The only difference between Nova and this code is usage of alembic instead
# of sqlalchemy migrations.
#
# There is an ongoing work to extact similar code to oslo incubator. Once it is
# extracted we'll be able to remove this file and use oslo.
import io
import os
from alembic import command
from alembic import config as alembic_config
from alembic import migration
from alembic import script as alembic_script
from oslo_config import cfg
from murano.common.i18n import _LE
import murano.db.migration
from murano.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BaseWalkMigrationTestCase(object):
ALEMBIC_CONFIG = alembic_config.Config(
os.path.join(os.path.dirname(murano.db.migration.__file__),
'alembic.ini')
)
ALEMBIC_CONFIG.murano_config = CONF
def _configure(self, engine):
"""For each type of repository we should do some of configure steps.
For migrate_repo we should set under version control our database.
For alembic we should configure database settings. For this goal we
should use oslo.config and openstack.commom.db.sqlalchemy.session with
database functionality (reset default settings and session cleanup).
"""
CONF.set_override('connection', str(engine.url), group='database')
def _alembic_command(self, alembic_command, engine, *args, **kwargs):
"""Most of alembic command return data into output.
We should redefine this setting for getting info.
"""
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override('connection', str(engine.url), group='database')
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
return res
def _up_and_down_versions(self):
"""Since alembic version has a random algorithm of generation
(SA-migrate has an ordered autoincrement naming) we should store
a tuple of versions (version for upgrade and version for downgrade)
for successful testing of migrations in up>down>up mode.
"""
env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG)
versions = []
for rev in env.walk_revisions():
versions.append((rev.revision, rev.down_revision or '-1'))
versions.reverse()
return versions
def walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
self._configure(engine)
up_and_down_versions = self._up_and_down_versions()
for ver_up, ver_down in up_and_down_versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, ver_up, with_data=True)
if snake_walk:
downgraded = self._migrate_down(engine,
ver_down,
with_data=True,
next_version=ver_up)
if downgraded:
self._migrate_up(engine, ver_up)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
up_and_down_versions.reverse()
for ver_up, ver_down in up_and_down_versions:
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine,
ver_down, next_version=ver_up)
if snake_walk and downgraded:
self._migrate_up(engine, ver_up)
self._migrate_down(engine, ver_down, next_version=ver_up)
def _get_version_from_db(self, engine):
"""For each type of migrate repo latest version from db
will be returned.
"""
conn = engine.connect()
try:
context = migration.MigrationContext.configure(conn)
version = context.get_current_revision() or '-1'
finally:
conn.close()
return version
def _migrate(self, engine, version, cmd):
"""Base method for manipulation with migrate repo.
It will upgrade or downgrade the actual database.
"""
self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version)
def _migrate_down(self, engine, version, with_data=False,
next_version=None):
try:
self._migrate(engine, version, 'downgrade')
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self._get_version_from_db(engine))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % next_version, None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
check_version = version
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % check_version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self._migrate(engine, version, 'upgrade')
self.assertEqual(version, self._get_version_from_db(engine))
if with_data:
check = getattr(self, "_check_%s" % check_version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE(
"Failed to migrate to version %(ver)s on engine %(eng)s") %
{'ver': version, 'eng': engine})
raise
| 1.773438 | 2 |
algo/main.py | xuedong/time-series | 4 | 12764396 | # Fichier permettant de moduler les differentes methodes de clustering
try:
# Import generaux
import numpy as np
import pylab
import sys
import platform
import matplotlib.pyplot as plt
import re
# Import locaux
import kmeans
import rkde
except:
exit(1)
""" Clustering """
# Clusterise les donnees avec la methode desiree
# Entree :
# - M : la matrice des distances entre les objets
# - methode : une chaine de caractere donnant le nom de la methode (nom de module)
# - params : une liste des parametres requis pour la methode demandee
# - kmeans : params = [k, n_iter]
# - rkde : params = [bandwidth, prob]
# Sortie :
# - assign : un tableau donnant pour chaque entier (objet) son numero de cluster
# - nb_cluster : le nombre de clusters formes
def make_clusters(M, methode, params):
function = methode + ".do"
assign, nb_clusters = eval(function)(M, params[0], params[1])
return assign, nb_clusters
""" Lecture et affichage de donnees """
# Fonction de lecture dans un fichier
# Entree :
# - file_name : une chaine de caracteres donnant le nom du fichier a ouvrir
# - nb_item : nombre de lignes a lire (-1 pour tout lire, defaut a -1)
# Sortie :
# - data : une liste de liste de flottants
def read_data(file_name, nb_item = -1):
f = open(file_name,'r')
data = []
cpt = 0
for line in f:
if (0 <= nb_item and nb_item <= cpt):
break
line = re.split('\s+', line) # '\s' matches whitespace characters
line = [float(x) for x in line if x != '']
data.append(line)
cpt += 1
f.close()
return data
# Fonction d'affichage d'un nuage de points
# Entree :
# - data : un ensemble de points sous la forme d'une matrice de taille n*2
# - assign : un tableau de taille n representant une assignation de [data]
def show(data, assign):
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
nb_clusters = max(assign) + 1
pylab.figure()
mini = min( min(data[:][0]), min(data[:][1]) )
maxi = max( max(data[i][0]), max(data[i][1]) )
pylab.xlim([mini, maxi])
pylab.ylim([mini, maxi])
if (nb_clusters < 8):
for i_k in range(nb_clusters):
pylab.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
for i_k in range(nb_clusters):
pylab.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7]) + symbols[int(i_k / 7)]
pylab.show()
""" Lecture et ecriture d'une assignation """
# Lis un fichier ou est inscrit une assignation.
# Entree :
# - file : adresse et nom du fichier
# Sortie :
# - assign : un vecteur numpy d'entiers
def read_assign(file_name):
f = open(file_name,'r')
assign_tmp = []
i = 0
for line in f:
try:
assign_tmp.append(int(line))
i = i + 1
except ValueError:
continue
f.close()
return np.array(assign_tmp)
# Ecris une assignation dans un fichier
# Entree :
# - file_name : adresse et nom d'un fichier
# - assign : l'assignation a ecrire
# - nb_iter : le nombre d'iterations faites par l'algorithme (-1) s'il n'est pas
# base sur ce principe
# - s : la seed utilisee pour le clustering
def write_cluster(file_name, assign, nb_iter, s):
nb_data = len(assign)
nb_cluster = max(assign) + 1
f = open(file_name, 'w')
f.write('nb_cluster = ' + str(nb_cluster) + '\n')
f.write('nb_iter = ' + str(nb_iter) + '\n')
f.write('nb_data = ' + str(nb_data) + '\n')
f.write('seed = ' + str(s) + '\n')
for i in assign:
f.write(str(i) + '\n')
f.close()
""" Fonctions non encore retravaillees """
# Fonction pour enregistrer des images :
# data_file = fichier contenant les donnees
# assign_file = fichier cree a partir du clustering et contenant la table d'assignation
# file_figure = nom du fichier dans lequel sera enregistre l'image
# format = nom de l'extention du fichier cree (pdf,svg,png...)
# exemple : save('cercles/cercles.txt', 'cercles_kmeans', 'figure_cercles_kmeans', 'pdf')
def save(data_file, assign_file,file_figure,format):
data = read_data(data_file)
assign = read_assign(data,assign_file)
nombre_clusters = numpy.amax(assign) +1
plt.ioff()
fig = plt.figure()
colors = "bgrcmyk"
symbols = ".ov18sp*h+xD_"
mini = min( min([data[i][0] for i in range(len(data))]), min([data[i][1] for i in range(len(data))]) )
maxi = max( max([data[i][0] for i in range(len(data))]), max([data[i][1] for i in range(len(data))]) )
plt.xlim([mini, maxi])
plt.ylim([mini, maxi])
if (nombre_clusters < 8):
for i_k in range(nombre_clusters):
plt.plot([data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k] + ".")
else:
if (nombre_clusters < 85):
for i_k in range(nombre_clusters):
plt.plot( [data[i][0] for i in range(len(data)) if assign[i] == i_k],
[data[i][1] for i in range(len(data)) if assign[i] == i_k],
colors[i_k % 7] + symbols[int(i_k / 7)] )
else:
print("too many clusters")
if (platform.system() == "Windows"):
plt.savefig('C:/users/alex/documents/Alex/Cours/ENS/M1_Cours/Projet/data/Results/'+file_figure+'.'+format)
else:
plt.savefig('../data/Results/'+file_figure+'.'+format)
plt.close(fig)
| 3.171875 | 3 |
main/app.py | anngle/myapp | 0 | 12764397 | <reponame>anngle/myapp
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from main import commands, public, user, admin
from main.extensions import bcrypt, cache, csrf_protect, db, \
debug_toolbar, login_manager, migrate, redis_store, principal
from main.settings import ProdConfig
from main import models
from flask_sse import sse
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0],static_folder='../static',template_folder='../tpl')
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
redis_store.init_app(app)
# rbac.init_app(app)
principal.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.bp)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(admin.bp)
app.register_blueprint(sse, url_prefix='/stream')
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User,
'sysconfig': admin.models.SysConfig,
}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| 2.3125 | 2 |
hive_attention_tokens/chain/consensus/peers.py | imwatsi/hive-attention-tokens | 0 | 12764398 | <filename>hive_attention_tokens/chain/consensus/peers.py
"""Consensus on the state of peer nodes.""" | 1.054688 | 1 |
models/SCADC_networks.py | choyingw/SCADC-DepthCompletion | 9 | 12764399 | <filename>models/SCADC_networks.py
import torch
import torch.nn as nn
from torch.nn import init
import torchvision
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
from copy import deepcopy
import numpy as np
import cv2
import collections
import matplotlib.pyplot as plt
from .submodule import *
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
lambda_rule = lambda epoch: opt.lr_gamma ** ((epoch+1) // opt.lr_decay_epochs)
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer,step_size=opt.lr_decay_iters, gamma=0.1)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
net = net
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
elif init_type == 'pretrained':
pass
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None and init_type != 'pretrained':
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
for root_child in net.children():
for children in root_child.children():
if children in root_child.need_initialization:
init_weights(children, init_type, gain=init_gain)
return net
def define_SCADCNet(init_type='xavier', init_gain=0.02, gpu_ids=[]):
net = SCADCNetGenerator()
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class SAConv(nn.Module):
# Convolution layer for sparse data
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, bias=True):
super(SAConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False)
self.if_bias = bias
if self.if_bias:
self.bias = nn.Parameter(torch.zeros(out_channels).float(), requires_grad=True)
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation)
nn.init.kaiming_normal_(self.conv.weight, mode='fan_out', nonlinearity='relu')
self.pool.require_grad = False
def forward(self, input):
x, m = input
x = x * m
x = self.conv(x)
weights = torch.ones(torch.Size([1, 1, 3, 3])).cuda()
mc = F.conv2d(m, weights, bias=None, stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation)
mc = torch.clamp(mc, min=1e-5)
mc = 1. / mc * 9
if self.if_bias:
x = x + self.bias.view(1, self.bias.size(0), 1, 1).expand_as(x)
m = self.pool(m)
return x, m
class SAConvBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=1, dilation=1, bias=True):
super(SAConvBlock, self).__init__()
self.sparse_conv = SAConv(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, input):
x, m = input
x, m = self.sparse_conv((x, m))
assert (m.size(1)==1)
x = self.relu(x)
return x, m
def make_blocks_from_names(names,in_dim,out_dim):
layers = []
if names[0] == "block1" or names[0] == "block2":
layers += [SAConvBlock(in_dim, out_dim, 3,stride = 1)]
layers += [SAConvBlock(out_dim, out_dim, 3,stride = 1)]
else:
layers += [SAConvBlock(in_dim, out_dim, 3,stride = 1)]
layers += [SAConvBlock(out_dim, out_dim, 3,stride = 1)]
layers += [SAConvBlock(out_dim, out_dim, 3,stride = 1)]
return nn.Sequential(*layers)
#######
# Starting from here
#######
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv2 = convbn(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1, dilation=1)
self.conv3 = nn.Sequential(convbn(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose2d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm2d(inplanes*2))
self.conv6 = nn.Sequential(nn.ConvTranspose2d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm2d(inplanes))
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
# Each Houglass has its own structure. This is fine.
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class hourglass_l(nn.Module):
def __init__(self, inplanes):
super(hourglass_l, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv2 = convbn(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1, dilation=1)
self.conv3 = nn.Sequential(convbn(inplanes*2, inplanes*4, kernel_size=3, stride=2, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn(inplanes*4, inplanes*4, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose2d(inplanes*4, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm2d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose2d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm2d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
# Each Houglass has its own structure. This is fine.
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class SCADCNetGenerator(nn.Module):
def __init__(self):
super(SCADCNetGenerator, self).__init__()
batchNorm_momentum = 0.1
self.need_initialization = []
self.sa1 = make_blocks_from_names(["block1"], 1,64)
self.sa2 = make_blocks_from_names(["block2"], 64, 2)
self.conv1 = nn.Sequential(convbn(1, 32, kernel_size=3, stride=1, pad=1, dilation=1), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(convbn(32, 32, kernel_size=3, stride=1, pad=1, dilation=1), nn.ReLU(inplace=True))
self.hg_0 = hourglass(32)
self.hg_1 = hourglass(32)
self.hg_2 = hourglass(32)
self.classif1 = nn.Sequential(convbn(32, 32, 3, 1, 1, 1),nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 1, kernel_size=1, padding=0, stride=1))
self.classif2 = nn.Sequential(convbn(32, 32, 3, 1, 1, 1),nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 1, kernel_size=1, padding=0, stride=1))
self.classif3 = nn.Sequential(convbn(32, 32, 3, 1, 1, 1),nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.Conv2d(32, 1, kernel_size=1, padding=0, stride=1))
self.count = 0
def forward(self, in_disp, in_depth, sp_map, mask):
fmap_1, m_1 = self.sa1((sp_map,mask))
fmap_2, m_2 = self.sa2((fmap_1,m_1))
## Using softmax for AMP
AMP = F.softmax(fmap_2, dim=1)
AMP_dep = AMP[:,1,:,:].unsqueeze(1)
in_1 = torch.sum((torch.cat((in_disp, in_depth), dim=1)*AMP), dim=1).unsqueeze(1)
f_1 = self.conv1(in_1)
f_2 = self.conv2(f_1) + f_1
o_1, prev_1, post_1 = self.hg_0(f_2, None, None)
o_1 = o_1 + f_2
o_2, prev_2, post_2 = self.hg_1(o_1, prev_1, post_1)
o_2 = o_2 + f_2
o_3, prev_3, post_3 = self.hg_2(o_2, prev_1, post_2)
o_3 = o_3 + f_2
logits_1 = self.classif1(o_1)
logits_2 = self.classif2(o_2) + logits_1
logits_3 = self.classif3(o_3) + logits_2
return logits_1, logits_2, logits_3, AMP_dep
#####
# Loss term definition
#####
class MaskedMSELoss(nn.Module):
def __init__(self):
super(MaskedMSELoss, self).__init__()
def forward(self, pred, target):
assert pred.dim() == target.dim(), "inconsistent dimensions"
valid_mask = (target>0).detach()
diff = target - pred
diff = diff[valid_mask]
self.loss = (diff ** 2).mean()
return self.loss
if __name__ == '__main__':
a = torch.ones((2,1,100,100))
b = torch.ones((2,1,100,100))
net = SCADCNetGenerator()
out = net(a,b) | 2.21875 | 2 |
worker/imapsync_dummy.py | dolfim/imap2gsuite-migrate | 0 | 12764400 | import os
import time
import subprocess
SOURCE_IMAP_HOST=os.getenv('SOURCE_IMAP_HOST', '')
SOURCE_IMAP_PORT=os.getenv('SOURCE_IMAP_PORT', None)
TARGET_AUTH_FILE=os.getenv('TARGET_AUTH_FILE')
_example_out = r"""
Here is imapsync 1.983 on host 39eb9c59f7a5, a linux system with 0.6/1.9 free GiB of RAM
with Perl 5.28.1 and Mail::IMAPClient 3.42
Transfer started at Sun May 3 12:08:04 2020
PID is 1 my PPID is 0
Load is 0.40 0.09 0.03 4/418 on 2 cores
Current directory is /var/tmp
Real user id is nobody (uid 65534)
Effective user id is nobody (euid 65534)
$RCSfile: imapsync,v $ $Revision: 1.983 $ $Date: 2020/03/19 02:08:12 $
Command line used, run by /usr/bin/perl:
/usr/bin/imapsync --host1 imap.mail.hostpoint.ch --ssl1 --user1 <EMAIL> --password1 <PASSWORD> --gmail2 --user2 <EMAIL> --password2 <PASSWORD> --authmech2 XOAUTH2
Temp directory is /tmp ( to change it use --tmpdir dirpath )
Under docker context so installing only signals to exit
kill -INT 1 # special behavior: call to sub catch_exit
kill -QUIT 1 # special behavior: call to sub catch_exit
kill -TERM 1 # special behavior: call to sub catch_exit
No variable pid_filename
PID file is unset ( to set it, use --pidfile filepath ; to avoid it use --pidfile "" )
Modules version list:
Authen::NTLM 1.09
CGI 4.40
Compress::Zlib 2.074
Crypt::OpenSSL::RSA 0.31
Data::Uniqid 0.12
Digest::HMAC_MD5 1.01
Digest::HMAC_SHA1 1.03
Digest::MD5 2.55
Encode 2.97
Encode::IMAPUTF7 1.05
File::Copy::Recursive 0.44
File::Spec 3.74
Getopt::Long 2.5
HTML::Entities 3.69
IO::Socket 1.39
IO::Socket::INET 1.39
IO::Socket::INET6 2.72
IO::Socket::IP 0.39
IO::Socket::SSL 2.060
IO::Tee 0.65
JSON 4.02
JSON::WebToken 0.10
LWP 6.36
MIME::Base64 3.15
Mail::IMAPClient 3.42
Net::Ping 2.62
Net::SSLeay 1.85
Term::ReadKey 2.38
Test::MockObject 1.20180705
Time::HiRes 1.9759
URI::Escape 3.31
Unicode::String 2.10
( use --no-modulesversion to turn off printing this Perl modules list )
Info: will resync flags for already transferred messages. Use --noresyncflags to not resync flags.
SSL debug mode level is --debugssl 1 (can be set from 0 meaning no debug to 4 meaning max debug)
Host1: SSL default mode is like --sslargs1 "SSL_verify_mode=0", meaning for host1 SSL_VERIFY_NONE, ie, do not check the certificate server.
Host1: Use --sslargs1 SSL_verify_mode=1 to have SSL_VERIFY_PEER, ie, check the certificate server of host1
Host2: SSL default mode is like --sslargs2 "SSL_verify_mode=0", meaning for host2 SSL_VERIFY_NONE, ie, do not check the certificate server.
Host2: Use --sslargs2 SSL_verify_mode=1 to have SSL_VERIFY_PEER, ie, check the certificate server of host2
Info: turned ON syncinternaldates, will set the internal dates (arrival dates) on host2 same as host1.
Host1: will try to use LOGIN authentication on host1
Host2: will try to use XOAUTH2 authentication on host2
Host1: imap connection timeout is 120 seconds
Host2: imap connection timeout is 120 seconds
Host1: IMAP server [imap.mail.hostpoint.ch] port [993] user [<EMAIL>]
Host2: IMAP server [imap.gmail.com] port [993] user [<EMAIL>]
Host1: connecting and login on host1 [imap.mail.hostpoint.ch] port [993] with user [<EMAIL>]
Host1 IP address: 172.16.17.32
Host1 banner: * OK [CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN] Dovecot ready.
Host1 capability before authentication: IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN AUTH
Host1: success login on [imap.mail.hostpoint.ch] with user [<EMAIL>] auth [LOGIN]
Host2: connecting and login on host2 [imap.gmail.com] port [993] with user [<EMAIL>]
Host2 IP address: 192.168.3.11
Host2 banner: * OK Gimap ready for requests from 172.16.31.10 u21mb183888216edq
Host2 capability before authentication: IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 XYZZY SASL-IR AUTH=XOAUTH2 AUTH=PLAIN AUTH=PLAIN-CLIENTTOKEN AUTH=OAUTHBEARER AUTH=XOAUTH AUTH
Host2: imap.gmail.com says it has CAPABILITY for AUTHENTICATE XOAUTH2
Host2: success login on [imap.gmail.com] with user [<EMAIL>] auth [XOAUTH2]
Host1: state Authenticated
Host2: state Authenticated
Host1 capability once authenticated: IMAP4rev1 SASL-IR LOGIN-REFERRALS ID ENABLE IDLE SORT SORT=DISPLAY THREAD=REFERENCES THREAD=REFS THREAD=ORDEREDSUBJECT MULTIAPPEND URL-PARTIAL CATENATE UNSELECT CHILDREN NAMESPACE UIDPLUS LIST-EXTENDED I18NLEVEL=1 CONDSTORE QRESYNC ESEARCH ESORT SEARCHRES WITHIN CONTEXT=SEARCH LIST-STATUS BINARY MOVE SNIPPET=FUZZY PREVIEW=FUZZY LITERAL+ NOTIFY SPECIAL-USE QUOTA THREAD I18NLEVEL CONTEXT SNIPPET PREVIEW
Host2 capability once authenticated: IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 UIDPLUS COMPRESS=DEFLATE ENABLE MOVE CONDSTORE ESEARCH UTF8=ACCEPT LIST-EXTENDED LIST-STATUS LITERAL- SPECIAL-USE APPENDLIMIT=35651584 COMPRESS UTF8 APPENDLIMIT
Host1: found ID capability. Sending/receiving ID, presented in raw IMAP for now.
In order to avoid sending/receiving ID, use option --noid
Sending: 4 ID ("name" "imapsync" "version" "1.983" "os" "linux" "vendor" "Gilles LAMIRAL" "support-url" "https://imapsync.lamiral.info/" "date" "19-Mar-2020 02:08:12 +0000" "side" "host1")
Sent 181 bytes
Read: * ID ("name" "Dovecot")
4 OK ID completed (0.001 + 0.000 secs).
Sending: 4 ID ("name" "imapsync" "version" "1.983" "os" "linux" "vendor" "Gilles LAMIRAL" "support-url" "https://imapsync.lamiral.info/" "date" "19-Mar-2020 02:08:12 +0000" "side" "host2")
Host2: found ID capability. Sending/receiving ID, presented in raw IMAP for now.
In order to avoid sending/receiving ID, use option --noid
Sent 181 bytes
Read: * ID ("name" "GImap" "vendor" "Google, Inc." "support-url" "https://support.google.com/mail" "version" "gmail_imap_200428.12_p0" "remote-host" "172.16.31.10")
4 OK Success
Host2: found quota, presented in raw IMAP
Sending: 5 GETQUOTAROOT INBOX
Sent 22 bytes
Host2: Quota current storage is 460800 bytes. Limit is 32212254720 bytes. So 0.00 % full
Host2: found APPENDLIMIT=35651584 in CAPABILITY (use --appendlimit xxxx to override this automatic setting)
Read: * QUOTAROOT "INBOX" ""
* QUOTA "" (STORAGE 450 31457280)
5 OK Success
Host2: Setting maxsize to 35651584 (min of --maxsize 35651584 and appendlimit 35651584
Host1: found 5 folders.
Host2: found 9 folders.
Host1: guessing separator from folder listing: [/]
Host1: separator given by NAMESPACE: [/]
Host2: guessing separator from folder listing: [/]
Host2: separator given by NAMESPACE: [/]
Host1: guessing prefix from folder listing: []
Host1: prefix given by NAMESPACE: []
Host2: guessing prefix from folder listing: []
Host2: prefix given by NAMESPACE: []
Host1: separator and prefix: [/][]
Host2: separator and prefix: [/][]
Including all folders found by default. Use --subscribed or --folder or --folderrec or --include to select specific folders. Use --exclude to unselect specific folders.
Excluding folders matching pattern \[Gmail\]$
Host1: Checking wanted folders exist. Use --nocheckfoldersexist to avoid this check (shared of public namespace targeted).
Host1: Checking wanted folders are selectable. Use --nocheckselectable to avoid this check.
Turned on automapping folders ( use --noautomap to turn off automapping )
Host1: special Drafts = \Drafts
Host1: special Sent = \Sent
Host1: special Trash = \Trash
Host1: special spam = \Junk
Host2: special [Gmail]/All Mail = \All
Host2: special [Gmail]/Bin = \Trash
Host2: special [Gmail]/Drafts = \Drafts
Host2: special [Gmail]/Sent Mail = \Sent
Host2: special [Gmail]/Spam = \Junk
Host2: special [Gmail]/Starred = \Flagged
++++ Listing folders
All foldernames are presented between brackets like [X] where X is the foldername.
When a foldername contains non-ASCII characters it is presented in the form
[X] = [Y] where
X is the imap foldername you have to use in command line options and
Y is the utf8 output just printed for convenience, to recognize it.
Host1: folders list (first the raw imap format then the [X] = [Y]):
* LIST (\HasNoChildren \UnMarked \Drafts) "/" Drafts
* LIST (\HasNoChildren \UnMarked \Sent) "/" Sent
* LIST (\HasNoChildren \UnMarked \Trash) "/" Trash
* LIST (\HasNoChildren \UnMarked \Junk) "/" spam
* LIST (\HasNoChildren) "/" INBOX
15 OK List completed (0.001 + 0.000 secs).
[Drafts]
[INBOX]
[Sent]
[Trash]
[spam]
Host2: folders list (first the raw imap format then the [X] = [Y]):
* LIST (\HasNoChildren) "/" "INBOX"
* LIST (\HasNoChildren) "/" "Originals"
* LIST (\HasChildren \Noselect) "/" "[Gmail]"
* LIST (\All \HasNoChildren) "/" "[Gmail]/All Mail"
* LIST (\HasNoChildren \Trash) "/" "[Gmail]/Bin"
* LIST (\Drafts \HasNoChildren) "/" "[Gmail]/Drafts"
* LIST (\HasNoChildren \Important) "/" "[Gmail]/Important"
* LIST (\HasNoChildren \Sent) "/" "[Gmail]/Sent Mail"
* LIST (\HasNoChildren \Junk) "/" "[Gmail]/Spam"
* LIST (\Flagged \HasNoChildren) "/" "[Gmail]/Starred"
11 OK Success
[INBOX]
[Originals]
[[Gmail]/All Mail]
[[Gmail]/Bin]
[[Gmail]/Drafts]
[[Gmail]/Important]
[[Gmail]/Sent Mail]
[[Gmail]/Spam]
[[Gmail]/Starred]
Folders in host2 not in host1:
[[Gmail]/Starred]
[[Gmail]/Important]
[[Gmail]/All Mail]
[Originals]
Folders mapping from --automap feature (use --f1f2 to override any mapping):
[spam] -> [[Gmail]/Spam]
[Sent] -> [[Gmail]/Sent Mail]
[Drafts] -> [[Gmail]/Drafts]
[Trash] -> [[Gmail]/Bin]
Checking SEARCH ALL works on both accounts. To avoid that check, use --nochecknoabletosearch
Host1: checking if SEARCH ALL works on INBOX
Host1: folder [INBOX] has 25 messages mentioned by SELECT
Host1: folder [INBOX] has 25 messages found by SEARCH ALL
Host1: folder [INBOX] has the same messages count (25) by SELECT and SEARCH ALL
Host2: checking if SEARCH ALL works on INBOX
Host2: folder [INBOX] has 32 messages mentioned by SELECT
Host2: folder [INBOX] has 32 messages found by SEARCH ALL
Host2: folder [INBOX] has the same messages count (32) by SELECT and SEARCH ALL
Good! SEARCH ALL works on both accounts.
Folders sizes before the synchronization.
You can remove foldersizes listings by using "--nofoldersizes" and "--nofoldersizesatend"
but then you will also lose the ETA (Estimation Time of Arrival) given after each message copy.
Host1 folder 1/5 [Drafts] Size: 0 Messages: 0 Biggest: 0
Host2 folder 1/5 [[Gmail]/Drafts] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 2/5 [INBOX] Size: 393251 Messages: 25 Biggest: 59437
Host2 folder 2/5 [INBOX] Size: 461389 Messages: 32 Biggest: 59437
Host2-Host1 68138 7 0
Host1 folder 3/5 [Sent] Size: 0 Messages: 0 Biggest: 0
Host2 folder 3/5 [[Gmail]/Sent Mail] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 4/5 [Trash] Size: 0 Messages: 0 Biggest: 0
Host2 folder 4/5 [[Gmail]/Bin] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 5/5 [spam] Size: 0 Messages: 0 Biggest: 0
Host2 folder 5/5 [[Gmail]/Spam] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 Nb folders: 5 folders
Host2 Nb folders: 5 folders
Host1 Nb messages: 25 messages
Host2 Nb messages: 32 messages
Host1 Total size: 393251 bytes (384.034 KiB)
Host2 Total size: 461389 bytes (450.575 KiB)
Host1 Biggest message: 59437 bytes (58.044 KiB)
Host2 Biggest message: 59437 bytes (58.044 KiB)
Time spent on sizing: 2.4 seconds
++++ Looping on each one of 5 folders to sync
ETA: Sun May 3 12:08:06 2020 0 s 25/25 msgs left
Folder 1/5 [Drafts] -> [[Gmail]/Drafts]
Host1: folder [Drafts] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Drafts] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Drafts] permanentflags:
Host1: Expunging Drafts
Host1: folder [Drafts] considering 0 messages
Host2: folder [[Gmail]/Drafts] considering 0 messages
Host1: folder [Drafts] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Drafts] selected 0 messages, duplicates 0
Host1: Expunging folder Drafts
ETA: Sun May 3 12:08:09 2020 2 s 25/25 msgs left
Folder 2/5 [INBOX] -> [INBOX]
Host1: folder [INBOX] has 25 messages in total (mentioned by SELECT)
Host2: folder [INBOX] has 32 messages in total (mentioned by SELECT)
Host2: folder [INBOX] permanentflags:
Host1: Expunging INBOX
Host1: folder [INBOX] considering 25 messages
Host2: folder [INBOX] considering 32 messages
Host1: folder [INBOX] selected 25 messages, duplicates 0
Host2: folder [INBOX] selected 32 messages, duplicates 0
Host1: Expunging folder INBOX
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 3/5 [Sent] -> [[Gmail]/Sent Mail]
Host1: folder [Sent] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Sent Mail] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Sent Mail] permanentflags:
Host1: Expunging Sent
Host1: folder [Sent] considering 0 messages
Host2: folder [[Gmail]/Sent Mail] considering 0 messages
Host1: folder [Sent] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Sent Mail] selected 0 messages, duplicates 0
Host1: Expunging folder Sent
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 4/5 [Trash] -> [[Gmail]/Bin]
Host1: folder [Trash] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Bin] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Bin] permanentflags:
Host1: Expunging Trash
Host1: folder [Trash] considering 0 messages
Host2: folder [[Gmail]/Bin] considering 0 messages
Host1: folder [Trash] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Bin] selected 0 messages, duplicates 0
Host1: Expunging folder Trash
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
Folder 5/5 [spam] -> [[Gmail]/Spam]
Host1: folder [spam] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Spam] has 0 messages in total (mentioned by SELECT)
Host2: folder [[Gmail]/Spam] permanentflags:
Host1: Expunging spam
Host1: folder [spam] considering 0 messages
Host2: folder [[Gmail]/Spam] considering 0 messages
Host1: folder [spam] selected 0 messages, duplicates 0
Host2: folder [[Gmail]/Spam] selected 0 messages, duplicates 0
Host1: Expunging folder spam
ETA: Sun May 3 12:08:07 2020 0 s 0/25 msgs left
++++ End looping on each folder
Folders sizes after the synchronization.
You can remove this foldersizes listing by using "--nofoldersizesatend"
Host1 folder 1/5 [Drafts] Size: 0 Messages: 0 Biggest: 0
Host2 folder 1/5 [[Gmail]/Drafts] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 2/5 [INBOX] Size: 393251 Messages: 25 Biggest: 59437
Host2 folder 2/5 [INBOX] Size: 461389 Messages: 32 Biggest: 59437
Host2-Host1 68138 7 0
Host1 folder 3/5 [Sent] Size: 0 Messages: 0 Biggest: 0
Host2 folder 3/5 [[Gmail]/Sent Mail] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 4/5 [Trash] Size: 0 Messages: 0 Biggest: 0
Host2 folder 4/5 [[Gmail]/Bin] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 folder 5/5 [spam] Size: 0 Messages: 0 Biggest: 0
Host2 folder 5/5 [[Gmail]/Spam] Size: 0 Messages: 0 Biggest: 0
Host2-Host1 0 0 0
Host1 Nb folders: 5 folders
Host2 Nb folders: 5 folders
Host1 Nb messages: 25 messages
Host2 Nb messages: 32 messages
Host1 Total size: 393251 bytes (384.034 KiB)
Host2 Total size: 461389 bytes (450.575 KiB)
Host1 Biggest message: 59437 bytes (58.044 KiB)
Host2 Biggest message: 59437 bytes (58.044 KiB)
Time spent on sizing: 0.4 seconds
++++ Statistics
Transfer started on : Sun May 3 12:08:04 2020
Transfer ended on : Sun May 3 12:08:07 2020
Transfer time : 3.4 sec
Folders synced : 5/5 synced
Messages transferred : 0
Messages skipped : 25
Messages found duplicate on host1 : 0
Messages found duplicate on host2 : 0
Messages found crossduplicate on host2 : 0
Messages void (noheader) on host1 : 0
Messages void (noheader) on host2 : 0
Messages found in host1 not in host2 : 0 messages
Messages found in host2 not in host1 : 7 messages
Messages deleted on host1 : 0
Messages deleted on host2 : 0
Total bytes transferred : 0 (0.000 KiB)
Total bytes skipped : 393251 (384.034 KiB)
Message rate : 0.0 messages/s
Average bandwidth rate : 0.0 KiB/s
Reconnections to host1 : 0
Reconnections to host2 : 0
Memory consumption at the end : 183.0 MiB (started with 159.8 MiB)
Load end is : 0.40 0.09 0.03 1/418 on 2 cores
Biggest message : 0 bytes (0.000 KiB)
Memory/biggest message ratio : NA
Start difference host2 - host1 : 7 messages, 68138 bytes (66.541 KiB)
Final difference host2 - host1 : 7 messages, 68138 bytes (66.541 KiB)
The sync looks good, all 25 identified messages in host1 are on host2.
There is no unidentified message
The sync is not strict, there are 7 messages in host2 that are not on host1. Use --delete2 to delete them and have a strict sync. (32 identified messages in host2)
Detected 0 errors
This imapsync is up to date. ( local 1.983 >= official 1.977 )( Use --noreleasecheck to avoid this release check. )
Homepage: https://imapsync.lamiral.info/
Exiting with return value 0 (EX_OK: successful termination) 0/50 nb_errors/max_errors
"""
def imapsync(source_username, source_password, target_email):
print(f'Received task with {source_username} {target_email}')
time.sleep(10)
return _example_out
| 1.976563 | 2 |
Game/data/tiles/Tile-Trunk.py | joaompinto/2DExplorer | 9 | 12764401 | from Tile import tile
class trunk (tile):
def __init__(self,parentWorld,colRow):
super().__init__(parentWorld,colRow,6,1)
self.durability = 20
self.drop = 6
self.dropAmount = 4
self.physical = False
self.tool = "axe"
def setter(parentWorld, colRow):
x = trunk(parentWorld, colRow)
return x
| 2.859375 | 3 |
broker/eblocbroker_scripts/process_payment.py | ebloc/ebloc-broker | 3 | 12764402 | <reponame>ebloc/ebloc-broker
#!/usr/bin/env python3
import sys
from typing import Any, Union
from broker import cfg
from broker._utils.tools import log, print_tb
from broker.config import env
from broker.lib import state
from broker.utils import StorageID, ipfs_to_bytes32
def process_payment(
self,
job_key,
index,
job_id,
elapsed_time,
result_ipfs_hash,
cloud_storage_ids,
end_time,
data_transfer_in,
data_transfer_out,
core,
run_time,
received_block_number=0,
):
"""Process payment of the paid job."""
log(
f"~/ebloc-broker/broker/eblocbroker_scripts/process_payment.py {job_key} {index} {job_id} {elapsed_time}"
f" {result_ipfs_hash} '{cloud_storage_ids}' {end_time} {data_transfer_in} {data_transfer_out} '{core}'"
f" '{run_time}'",
"bold blue",
)
for cloud_storage_id in cloud_storage_ids:
if len(result_ipfs_hash) != 46 and cloud_storage_id in (StorageID.IPFS, StorageID.IPFS_GPG):
log("E: Result ipfs's length does not match with its original length. Please check your job_key")
raise
self.get_job_info(env.PROVIDER_ID, job_key, index, job_id, received_block_number, is_print=False)
if self.job_info["stateCode"] == state.code["COMPLETED"]:
log("Warning: Job is completed and already get paid")
sys.exit(1)
"""
if self.job_info["stateCode"] == str(state.code["COMPLETED"]):
logging.error("Job is completed and already get paid")
sys.exit(1)
"""
try:
if result_ipfs_hash == b"" or not result_ipfs_hash:
result_ipfs_hash = ""
else:
result_ipfs_hash = ipfs_to_bytes32(result_ipfs_hash)
final_job = True # true only for the final job
args = [
int(index),
int(job_id),
int(end_time),
int(data_transfer_in),
int(data_transfer_out),
core,
run_time,
final_job,
]
tx = self._process_payment(job_key, args, int(elapsed_time), result_ipfs_hash) # tx is not returned
except Exception as e:
print_tb(e)
raise e
return self.tx_id(tx)
if __name__ == "__main__":
Ebb = cfg.Ebb
if len(sys.argv) == 12:
args = sys.argv[1:]
my_args = [] # type: Union[Any]
for arg in args:
if arg.startswith("[") and arg.endswith("]"):
arg = arg.replace("[", "").replace("]", "")
my_args.append(arg.split(","))
else:
my_args.append(arg)
job_key = str(my_args[0])
index = int(my_args[1])
job_id = int(my_args[2])
elapsed_time = int(my_args[3])
result_ipfs_hash = str(my_args[4])
cloud_storage_id = my_args[5]
end_time = int(my_args[6])
data_transfer_in = float(my_args[7])
data_transfer_out = float(my_args[8])
core = my_args[9]
run_time = my_args[10]
# convert all strings in a list to int of the following arguments
cloud_storage_id = list(map(int, cloud_storage_id))
core = list(map(int, core))
run_time = list(map(int, run_time))
else: # dummy call
job_key = "<KEY>"
index = 0
job_id = 0
elapsed_time = 1
result_ipfs_hash = ""
cloud_storage_id = 1
end_time = 1584375940
data_transfer_in = 0.029152870178222656
data_transfer_out = 0.0
core = [1]
run_time = [5]
try:
tx_hash = Ebb.process_payment(
job_key,
index,
job_id,
elapsed_time,
result_ipfs_hash,
cloud_storage_id,
end_time,
data_transfer_in,
data_transfer_out,
core,
run_time,
)
log(f"tx_hash={tx_hash}")
except:
sys.exit(1)
| 1.78125 | 2 |
run.py | xucz153/robotautomation | 0 | 12764403 | import os
import fileinput
if os.path.exists("log.txt"):
os.remove("log.txt")
if os.path.exists("log.xml"):
os.remove("log.xml")
os.system("ant build")
os.system("adb push bin/Checker.jar /data/local/tmp/")
for line in fileinput.input("testsuites.txt"):
if len(line.strip()) != 0 :
exeStr = "adb shell uiautomator runtest Checker.jar -c " + line.strip() + " >>log.txt"
os.system(exeStr)
os.system("java -jar libs/log-converter.jar log.txt")
os.system("pause") | 2.578125 | 3 |
rnnmorph/test_predictor.py | AliceCyber/rnnmorph | 124 | 12764404 | <reponame>AliceCyber/rnnmorph<filename>rnnmorph/test_predictor.py
import unittest
import logging
import sys
import numpy as np
import nltk
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
from rnnmorph.predictor import RNNMorphPredictor
from rnnmorph.tag_genres import tag_ru_files, tag_en_files
class TestLSTMMorph(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
nltk.download("wordnet")
nltk.download('averaged_perceptron_tagger')
nltk.download('universal_tagset')
cls.en_predictor = RNNMorphPredictor(language="en")
cls.ru_predictor = RNNMorphPredictor(language="ru")
def __assert_parse(self, parse, pos, normal_form, tag):
self.assertEqual(parse.pos, pos)
self.assertEqual(parse.normal_form, normal_form)
self.assertEqual(parse.tag, tag)
def test_ru_sentence_analysis1(self):
forms = self.ru_predictor.predict(["косил", "косой", "косой", "косой"])
self.__assert_parse(forms[0], 'VERB', 'косить',
'Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.assertIn(1, forms[0].vector)
def test_empty_sentence(self):
forms = self.ru_predictor.predict([])
self.assertEqual(forms, [])
def test_ru_sentence_analysis2(self):
forms = self.ru_predictor.predict(["мама", "мыла", "раму"])
self.__assert_parse(forms[0], 'NOUN', 'мама', 'Case=Nom|Gender=Fem|Number=Sing')
self.__assert_parse(forms[1], 'VERB', 'мыть',
'Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[2], 'NOUN', 'рама', 'Case=Acc|Gender=Fem|Number=Sing')
def test_ru_sentences_analysis1(self):
forms = self.ru_predictor.predict_sentences([["косил", "косой", "косой", "косой"], ["мама", "мыла", "раму"]])
self.__assert_parse(forms[0][0], 'VERB', 'косить',
'Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[1][0], 'NOUN', 'мама', 'Case=Nom|Gender=Fem|Number=Sing')
self.__assert_parse(forms[1][1], 'VERB', 'мыть',
'Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act')
self.__assert_parse(forms[1][2], 'NOUN', 'рама', 'Case=Acc|Gender=Fem|Number=Sing')
def test_empty_sentences(self):
forms = self.ru_predictor.predict_sentences([[]])
self.assertEqual(forms, [[]])
def test_ru_one_empty_sentence_in_sentences(self):
forms = self.ru_predictor.predict_sentences([["косил", "косой", "косой", "косой"], []])
self.assertEqual(forms[1], [])
self.assertNotEqual(forms[0], [])
def test_ru_proba(self):
forms = self.ru_predictor.predict(["косил", "косой", "косой", "косой"], include_all_forms=True)
self.assertEqual(len(forms[0].possible_forms), 252)
indices = np.array([form.score for form in forms[2].possible_forms]).argsort()[-5:][::-1]
variants = [forms[2].possible_forms[i].tag for i in indices]
self.assertIn('Case=Nom|Degree=Pos|Gender=Masc|Number=Sing', variants)
def test_ru_genres_accuracy(self):
quality = tag_ru_files(self.ru_predictor)
self.assertGreater(quality['Lenta'].tag_accuracy, 95)
self.assertGreater(quality['Lenta'].sentence_accuracy, 70)
self.assertGreater(quality['VK'].tag_accuracy, 93)
self.assertGreater(quality['VK'].sentence_accuracy, 65)
self.assertGreater(quality['JZ'].tag_accuracy, 94)
self.assertGreater(quality['JZ'].sentence_accuracy, 70)
print("Точность по тегам по всем разделам: %.2f%%" % (quality['All']['tag_accuracy']*100))
print("Точность по PoS тегам по всем разделам: %.2f%%" % (quality['All']['pos_accuracy'] * 100))
print("Точность по предложениям по всем разделам: %.2f%%" % (quality['All']['sentence_accuracy'] * 100))
self.assertGreater(quality['All']['tag_accuracy'], 0.95)
def test_en_accuracy(self):
self.assertGreater(tag_en_files(self.en_predictor).tag_accuracy, 85)
| 2.4375 | 2 |
cncore/laser.py | matthewSorensen/cnc-core | 0 | 12764405 | <gh_stars>0
import svgwrite
import tempfile
import os
import subprocess
# operations:
# * manage file handles
# * manage file conversions transparently
# * colors / laser semantics
# * access to the underlying svg
# * higher-level drawing operations
class LaserOutput:
# This is really only applicable to universal laser systems at the moment
CUT = svgwrite.rgb(255, 0, 0, '%')
SCORE = svgwrite.rgb(0, 0, 255, '%')
RASTER = svgwrite.rgb(0, 0, 0, '%')
ALT = svgwrite.rgb(0,255,0,'%')
def __init__(self, filename):
rest, _ = os.path.splitext(filename)
self._svg_filename_ = rest + '.svg'
self._dxf_filename_ = rest + '.dxf'
self._drawing_ = None
def __enter__(self):
self._drawing_ = svgwrite.Drawing(filename = self._svg_filename_,size = ("1000mm", "1000mm"), profile = "full")
return self
def __exit__(self,exc_type, exc_value, traceback):
self._drawing_.save()
with tempfile.NamedTemporaryFile() as f:
subprocess.call(["inkscape",self._svg_filename_, '--export-ps=' + f.name])
subprocess.call(["pstoedit","-dt", "-f", "dxf:-polyaslines -mm",f.name, self._dxf_filename_])
return False
def mm(self,obj):
""" A rather nice, if crude, hammer to make things look more mm-ish """
try:
return [self.mm(x) for x in obj]
except Exception:
return 3.543307 * obj
def polyline(self, points, mode, closed = False, fill = False):
points = self.mm(points)
if closed:
points.append(points[0])
if fill == False:
fill = 'none'
self._drawing_.add(svgwrite.shapes.Polyline(points, stroke = mode, fill = fill))
| 2.59375 | 3 |
source/tools/utils_maintenance/c_sort_blocks.py | simileV/blenderStereo29 | 3 | 12764406 | <reponame>simileV/blenderStereo29
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import os
import sys
PWD = os.path.dirname(__file__)
sys.path.append(os.path.join(PWD, "modules"))
from batch_edit_text import run
SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.join(PWD, "..", "..", ".."))))
# TODO, move to config file
SOURCE_DIRS = (
"source",
"intern/ghost",
)
SOURCE_EXT = (
# C/C++
".c", ".h", ".cpp", ".hpp", ".cc", ".hh", ".cxx", ".hxx", ".inl",
# Objective C
".m", ".mm",
)
def sort_struct_lists(fn, data_src):
import re
# eg:
# struct Foo;
re_match_struct = re.compile(r"struct\s+[A-Za-z_][A-Za-z_0-9]*\s*;")
# eg:
# struct Foo Bar;
re_match_struct_type = re.compile(r"struct\s+[A-Za-z_][A-Za-z_0-9]*\s+[A-Za-z_][A-Za-z_0-9]*\s*;")
# typedef struct Foo Bar;
re_match_typedef_struct_type = re.compile(r"typedef\s+struct\s+[A-Za-z_][A-Za-z_0-9]*\s+[A-Za-z_][A-Za-z_0-9]*\s*;")
re_match_enum = re.compile(r"enum\s+[A-Za-z_][A-Za-z_0-9]*\s*;")
# eg:
# extern char datatoc_splash_png[];
re_match_datatoc = re.compile(r"extern\s+(char)\s+datatoc_[A-Za-z_].*;")
lines = data_src.splitlines(keepends=True)
def can_sort(l):
if re_match_struct.match(l):
return 1
if re_match_struct_type.match(l):
return 2
if re_match_typedef_struct_type.match(l):
return 3
if re_match_enum.match(l):
return 4
# Disable for now.
# if re_match_datatoc.match(l):
# return 5
return None
i = 0
while i < len(lines):
i_type = can_sort(lines[i])
if i_type is not None:
j = i
while j + 1 < len(lines):
if can_sort(lines[j + 1]) != i_type:
break
j = j + 1
if i != j:
lines[i:j + 1] = list(sorted(lines[i:j + 1]))
i = j
i = i + 1
data_dst = "".join(lines)
if data_src != data_dst:
return data_dst
run(
directories=[os.path.join(SOURCE_DIR, d) for d in SOURCE_DIRS],
is_text=lambda fn: fn.endswith(SOURCE_EXT),
text_operation=sort_struct_lists,
use_multiprocess=True,
)
| 1.664063 | 2 |
lib/test/vot20/stark_st50_lt.py | tzhhhh123/Stark | 376 | 12764407 | <filename>lib/test/vot20/stark_st50_lt.py
from lib.test.vot20.stark_vot20lt import run_vot_exp
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '6'
run_vot_exp('stark_st', 'baseline', vis=False)
| 1.53125 | 2 |
digproj/core/migrations/0005_persona.py | mans-18/digest | 0 | 12764408 | # Generated by Django 2.2.4 on 2020-04-18 20:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_event'),
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('mobile', models.CharField(max_length=20)),
('whatsapp', models.CharField(blank=True, max_length=20)),
('telephone', models.CharField(blank=True, max_length=20)),
('email', models.EmailField(blank=True, max_length=254)),
('street', models.CharField(blank=True, max_length=255)),
('complement', models.CharField(blank=True, max_length=100)),
('postalcode', models.CharField(blank=True, max_length=20)),
('dob', models.DateField(blank=True)),
('comment', models.CharField(blank=True, max_length=255)),
('events', models.ManyToManyField(to='core.Event')),
('kollegen', models.ManyToManyField(to='core.Kollege')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
},
),
]
| 1.726563 | 2 |
walltask.py | paecko/walltask | 1 | 12764409 | #!/usr/bin/python3
import ctypes
import os
import sys
import argparse
import json
from PIL import Image, ImageFont, ImageDraw
org_wall = 'wall.jpg'
new_wall = 'task_wall.jpg'
try:
rf = open('data.json', 'r')
# Initial usage of the script will create a json file with all the default settings and a list to store tasks
except FileNotFoundError:
temp_wf = open('data.json', 'w')
init_data = {'tasks': [], 'xpos': 600, 'colour': 'white', 'fontsize': 30}
json.dump(init_data, temp_wf, indent=4)
temp_wf.close()
rf = open('data.json', 'r')
# Is used throughout this program to access the tasks, settings in json file and update them
data = json.load(rf)
def create_wallpaper():
''' Create a new wallpaper by accessing tasks list in data dict loaded from json file. Is called when tasks are added or removed from json file'''
# Open org_wall. Tasks will be drawn on contents of this image and will be saved to new_wall without affecting org_wall
try:
wall = Image.open(org_wall)
except FileNotFoundError:
print("Name your wallpaper 'wall' and store in script directory")
exit()
draw = ImageDraw.Draw(wall)
fontsize = int(data['fontsize'])
font = ImageFont.truetype('arial.ttf', fontsize)
ypos = 50
xpos = data['xpos']
# Loop over each item in tasks list saved in json data file. The tasks are then drawn on wallpaper.
for task in data['tasks']:
try:
draw.text((xpos, ypos), '{}. {}'.format(task['id'], task['t']), data['colour'], font=font)
except ValueError:
print('The colour or the font you set did not exist')
# ypos is updated by the font size to ensure that two tasks don't draw over each other on the wallpaper.
ypos += fontsize
with open('data.json', 'w') as wf:
json.dump(data, wf, indent=4)
# Save changed image to new_wall, leaving the org_wall unaffected
wall.save(new_wall)
def update_wallpaper(file):
'''Sets a new desktop background'''
SPI_SETDESKWALLPAPER = 20
SPIF_UPDATEINIFILE = 1
if os.path.isfile(file):
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, file, SPIF_UPDATEINIFILE)
else:
print("File does not exist")
def add(added_tasks):
''' Adds tasks to data dict. added_tasks is a list from taking in arguments in the commandline from user.'''
# Following is to get max no of tasks in data so as to assign id to a task by continuing after the largest id.
tasks_len = len(data['tasks'])
for i in range(len(added_tasks)):
# Number each task by adding tasks_len to i, so the id keeps increasing.
cur_id = tasks_len + i
data['tasks'].append({'t': '{}'.format(added_tasks[i]), 'id': cur_id})
# Return updated data dict which will be saved to data.json in main() and then new wallpaper is created with new data
return data
def remove(ids):
# Reverse sorted ids list since am removing from data['tasks] by index. If args were not reversed; ex:[2,5], After removing 2 first would result in
# abrupt IndexError since index 5 would no longer exist
try:
for i in sorted(ids, reverse=True):
del data['tasks'][i]
# Re-assign the ids to tasks as there will be gaps after removing.
for i, task_dict in enumerate(data['tasks']):
task_dict['id'] = i
# If user provides an index that has not been assigned to a task.
except IndexError:
print("There does not exist a task with atleast one of the indexes you provided")
def clear_tasks():
''' Clears all tasks from data.json and subsequently the wallpaper'''
# Empty the tasks list
data['tasks'] = []
# Dump the updated data dict to the same file from where it was retrieved
with open('data.json', 'w') as wf:
json.dump(data, wf, indent=4)
wf.close()
try:
# Remove new_wall and change wallpaper to original
os.remove(new_wall)
update_wallpaper(os.path.join(os.getcwd(), org_wall))
# If file 'new_wall' is not found, the user has already cleared tasks by removing new_wall
except FileNotFoundError:
print('You have already cleared all your tasks.')
def main():
parser = argparse.ArgumentParser(description='Add or remove tasks to your wallpaper')
parser.add_argument('-a', '--add', nargs='*', help='Add tasks to wallpaper')
parser.add_argument('-c', '--clear', help='Clear all of your tasks', action='store_true')
parser.add_argument('-r', '--remove', nargs='*', type=int, help='Remove a task by id')
parser.add_argument('-m', '--margin', nargs=1, type=int, help='Change left margin')
parser.add_argument('-cl', '--colour', nargs=1, help='Change colour of the tasks')
parser.add_argument('-fs', '--fontsize', nargs=1, help='Change font of the tasks')
args = parser.parse_args()
# Following arguments will always require 3 or more arguments to make a change
if len(sys.argv) > 2:
# Stores returned data after it has been changed in a function. Is then dumped to json data.
updated_data = {}
if args.add:
updated_data = add(args.add)
if args.remove:
updated_data = remove(args.remove)
if args.margin:
data['xpos'] = args.margin
if args.colour:
data['colour'] = args.colour
if args.fontsize:
data['font'] = args.fontsize
with open('data.json', 'w') as wf:
json.dump(updated_data, wf, indent=4)
wf.close()
create_wallpaper()
update_wallpaper(os.path.join(os.getcwd(), new_wall))
if args.clear:
clear_tasks()
if __name__ == "__main__":
main()
| 3.390625 | 3 |
rust/core/paginator.py | AsiaLi/rust | 5 | 12764410 | <reponame>AsiaLi/rust<filename>rust/core/paginator.py
# -*- coding: utf-8 -*-
import peewee
DEFAULT_COUNT_PER_PAGE = 20
class TargetPage(object):
"""
分页器
"""
__slots__ = (
'__cur_page',
'__count_per_page',
'__total_object_count',
)
def __new__(cls, args):
"""
:param args: api层处理函数的params
如果不包含cur_page,则返回None
"""
if not args.get('cur_page'):
return None
return object.__new__(cls)
def __init__(self, args):
"""
一旦初始化,属性及属性值不可变
"""
self.__total_object_count = 0
self.__cur_page = int(args['cur_page'])
self.__count_per_page = int(args.get('count_per_page', DEFAULT_COUNT_PER_PAGE))
@property
def has_prev(self):
return False if self.__cur_page == 1 else True
@property
def has_next(self):
return False if self.__cur_page >= self.__total_object_count else True
@property
def has_head(self):
return False if self.__cur_page == 1 else True
@property
def has_tail(self):
return False if self.__cur_page == self.__total_object_count else True
@property
def next(self):
return self.__cur_page + 1 if self.has_next else self.max_page
@property
def prev(self):
return self.__cur_page - 1 if self.has_prev else 1
@property
def max_page(self):
if self.__total_object_count % self.__count_per_page == 0:
total_page = self.__total_object_count / self.__count_per_page
if total_page == 0:
total_page = 1
else:
total_page = self.__total_object_count / self.__count_per_page + 1
return total_page
@property
def display_pages(self):
display_pages = []
cur_page = self.__cur_page
max_page = self.max_page
if max_page <= 5:
display_pages = range(1, max_page + 1)
elif cur_page + 2 <= max_page:
if cur_page >= 3:
display_pages = range(cur_page - 2, cur_page + 3)
else:
display_pages = range(1, 6)
else:
if cur_page >= 5:
display_pages = range(max_page - 5, max_page + 1)
return display_pages
def __get_page_range(self):
"""
获得当前页显示的item集合的范围
"""
start = (self.__cur_page - 1) * self.__count_per_page
end = start + self.__count_per_page
return start, end
def paginate(self, objects):
# 计算总页数
try:
item_count = objects.count()
except:
item_count = len(objects)
self.__total_object_count = item_count
total_page_count = self.max_page
# 如果浏览页数超过最大页数,则显示最后一页数据
if self.__cur_page > total_page_count:
self.__cur_page = total_page_count
if isinstance(objects, peewee.SelectQuery):
paged_objects = objects.paginate(self.__cur_page, self.__count_per_page)
else:
start, end = self.__get_page_range()
paged_objects = objects[start:end]
return paged_objects
def to_dict(self):
return {
'cur_page': self.__cur_page,
'count_per_page': self.__count_per_page,
'total_object_count': self.__total_object_count,
'display_pages': self.display_pages,
'has_head': self.has_head,
'has_tail': self.has_tail,
'has_prev': self.has_prev,
'has_next': self.has_next,
'prev': self.prev,
'next': self.next,
'max_page': self.max_page,
} | 2.75 | 3 |
sortingalgorithms/allSortingAlgsTimed.py | eduardoquerido/pythonstuffs | 0 | 12764411 | from bubblesort import bubblesort
from heapsort import heapsort
from insertionsort import insertionsort
from mergesort import mergesort
from quicksort import quicksort
from radixsort import radixsort
from selectionsort import selectionsort
from timsort import timsort
from timeit import default_timer as timer
# BubbleSort
start = timer()
bubblesort()
end = timer()
bubblesort_elapsed_time = end - start
# HeapSort
start = timer()
heapsort()
end = timer()
heapsort_elapsed_time = end - start
# InsertionSort
start = timer()
insertionsort()
end = timer()
insertion_elapsed_time = end - start
# MergeSort
start = timer()
mergesort()
end = timer()
mergesort_elapsed_time = end - start
# QuickSort
start = timer()
quicksort()
end = timer()
quicksort_elapsed_time = end - start
# RadixSort
start = timer()
radixsort()
end = timer()
radixsort_elapsed_time = end - start
# SelectionSort
start = timer()
selectionsort()
end = timer()
selectionsort_elapsed_time = end - start
# TimSort
start = timer()
timsort()
end = timer()
timsort_elapsed_time = end - start
print(f"Elapsed time for bubblesort: {bubblesort_elapsed_time}")
print(f"Elapsed time for heapsort: {heapsort_elapsed_time}")
print(f"Elapsed time for insertionsort: {insertion_elapsed_time}")
print(f"Elapsed time for quicksort: {quicksort_elapsed_time}")
print(f"Elapsed time for radixsort: {radixsort_elapsed_time}")
print(f"Elapsed time for selectionsort: {selectionsort_elapsed_time}")
print(f"Elapsed time for timsort: {timsort_elapsed_time}")
| 3.640625 | 4 |
charbon/main.py | GninninwokyOuattara/open-pharma | 1 | 12764412 | import json
import os
from classes.firebase import InitFirebaseConnection
from firebase_admin import db
from classes.google_maps import GoogleMaps
from classes.pharma_consults import PharmaConsults
from datetime import datetime
from sys import argv
print(argv)
if not argv[1] in ["-fo", "-fa"]:
raise BaseException("Invalid command option")
if argv[1] == "-fo":
InitFirebaseConnection()
print("Fetching currently open pharmacies...")
currentlyOpenPharmacies = PharmaConsults.getCurrentlyOpenPharmacies()
if argv[2] and argv[2] == "-p":
if len(currentlyOpenPharmacies)!= 0:
print("Updating database...")
try:
now = datetime.now().timestamp()
ref = db.reference("/currently_open")
ref.set(currentlyOpenPharmacies)
ref = db.reference("/last_update")
ref.set(now)
except Exception as e:
raise e
elif argv[1] == "-fa":
# Fetch all pharmacies coordinates
from classes.base_pharmacy import BasePharmacy
with open("./pharmacies.txt", "r") as pharmcies_file:
pharmacies = pharmcies_file.read().strip().split("\n")
links = GoogleMaps.get_meta_links(pharmacies)
if argv[2] and argv[2] == "-p":
if os.path.exists("./cached.json"):
with open("./cached.json", "r") as f:
data = json.loads(f.read())
ref = db.reference("/all")
all = ref.get()
if all:
ans = input("Overwrite ? Y/N > ").lower()
if ans == "y":
ref = db.reference()
ref.child("all").set(data)
else:
ref = db.reference()
ref.child("all_pharmacies").set(data)
| 2.578125 | 3 |
jax_cfd/base/subgrid_models_test.py | ngam/jax-cfd | 244 | 12764413 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_cfd.subgrid_models."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
from jax_cfd.base import advection
from jax_cfd.base import boundaries
from jax_cfd.base import finite_differences as fd
from jax_cfd.base import funcutils
from jax_cfd.base import grids
from jax_cfd.base import pressure
from jax_cfd.base import subgrid_models
from jax_cfd.base import test_util
import numpy as np
def periodic_grid_variable(data, offset, grid):
return grids.GridVariable(
array=grids.GridArray(data, offset, grid),
bc=boundaries.periodic_boundary_conditions(grid.ndim))
def zero_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns an all-zero periodic velocity fields."""
return tuple(periodic_grid_variable(jnp.zeros(grid.shape), o, grid)
for o in grid.cell_faces)
def sinusoidal_velocity_field(grid: grids.Grid) -> grids.GridVariableVector:
"""Returns a divergence-free velocity flow on `grid`."""
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
vs = tuple(jnp.sin(2. * np.pi * g / s)
for g, s in zip(grid.mesh(), mesh_size))
return tuple(periodic_grid_variable(v, o, grid)
for v, o in zip(vs[1:] + vs[:1], grid.cell_faces))
def gaussian_force_field(grid: grids.Grid) -> grids.GridArrayVector:
"""Returns a 'Gaussian-shaped' force field in the 'x' direction."""
mesh = grid.mesh()
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
offsets = grid.cell_faces
v = [grids.GridArray(
jnp.exp(-sum([jnp.square(x / s - .5)
for x, s in zip(mesh, mesh_size)]) * 100.),
offsets[0], grid)]
for j in range(1, grid.ndim):
v.append(grids.GridArray(jnp.zeros(grid.shape), offsets[j], grid))
return tuple(v)
def gaussian_forcing(v: grids.GridVariableVector) -> grids.GridArrayVector:
"""Returns Gaussian field forcing."""
grid = grids.consistent_grid(*v)
return gaussian_force_field(grid)
def momentum(v: grids.GridVariableVector, density: float):
"""Returns the momentum due to velocity field `v`."""
grid = grids.consistent_grid(*v)
return jnp.array([u.data for u in v]).sum() * density * jnp.array(
grid.step).prod()
def _convect_upwind(v: grids.GridVariableVector) -> grids.GridArrayVector:
return tuple(advection.advect_upwind(u, v) for u in v)
class SubgridModelsTest(test_util.TestCase):
def test_smagorinsky_viscosity(self):
grid = grids.Grid((3, 3))
v = (periodic_grid_variable(jnp.zeros(grid.shape), (1, 0.5), grid),
periodic_grid_variable(jnp.zeros(grid.shape), (0.5, 1), grid))
c00 = grids.GridArray(jnp.zeros(grid.shape), offset=(0, 0), grid=grid)
c01 = grids.GridArray(jnp.zeros(grid.shape), offset=(0, 1), grid=grid)
c10 = grids.GridArray(jnp.zeros(grid.shape), offset=(1, 0), grid=grid)
c11 = grids.GridArray(jnp.zeros(grid.shape), offset=(1, 1), grid=grid)
s_ij = grids.GridArrayTensor(np.array([[c00, c01], [c10, c11]]))
viscosity = subgrid_models.smagorinsky_viscosity(
s_ij=s_ij, v=v, dt=0.1, cs=0.2)
self.assertIsInstance(viscosity, grids.GridArrayTensor)
self.assertEqual(viscosity.shape, (2, 2))
self.assertAllClose(viscosity[0, 0], c00)
self.assertAllClose(viscosity[0, 1], c01)
self.assertAllClose(viscosity[1, 0], c10)
self.assertAllClose(viscosity[1, 1], c11)
def test_evm_model(self):
grid = grids.Grid((3, 3))
v = (
periodic_grid_variable(jnp.zeros(grid.shape), (1, 0.5), grid),
periodic_grid_variable(jnp.zeros(grid.shape), (0.5, 1), grid))
viscosity_fn = functools.partial(
subgrid_models.smagorinsky_viscosity, dt=1.0, cs=0.2)
acceleration = subgrid_models.evm_model(v, viscosity_fn)
self.assertIsInstance(acceleration, tuple)
self.assertLen(acceleration, 2)
self.assertAllClose(acceleration[0], v[0].array)
self.assertAllClose(acceleration[1], v[1].array)
@parameterized.named_parameters(
dict(
testcase_name='sinusoidal_velocity_base',
cs=0.0,
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
dict(
testcase_name='gaussian_force_upwind_with_subgrid_model',
cs=0.12,
velocity=zero_velocity_field,
forcing=gaussian_forcing,
shape=(40, 40, 40),
step=(1., 1., 1.),
density=1.,
viscosity=0,
convect=_convect_upwind,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=100,
divergence_atol=1e-4,
momentum_atol=1e-4),
dict(
testcase_name='sinusoidal_velocity_with_subgrid_model',
cs=0.12,
velocity=sinusoidal_velocity_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_fast_diag,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
)
def test_divergence_and_momentum(
self,
cs,
velocity,
forcing,
shape,
step,
density,
viscosity,
convect,
pressure_solve,
dt,
time_steps,
divergence_atol,
momentum_atol,
):
grid = grids.Grid(shape, step)
kwargs = dict(
density=density,
viscosity=viscosity,
cs=cs,
dt=dt,
grid=grid,
convect=convect,
pressure_solve=pressure_solve,
forcing=forcing)
# Explicit and implicit navier-stokes solvers:
explicit_eq = subgrid_models.explicit_smagorinsky_navier_stokes(**kwargs)
implicit_eq = subgrid_models.implicit_smagorinsky_navier_stokes(**kwargs)
v_initial = velocity(grid)
v_final = funcutils.repeated(explicit_eq, time_steps)(v_initial)
# TODO(dkochkov) consider adding more thorough tests for these models.
with self.subTest('divergence free'):
divergence = fd.divergence(v_final)
self.assertLess(jnp.max(divergence.data), divergence_atol)
with self.subTest('conservation of momentum'):
initial_momentum = momentum(v_initial, density)
final_momentum = momentum(v_final, density)
if forcing is not None:
expected_change = (
jnp.array([f.data for f in forcing(v_initial)]).sum() *
jnp.array(grid.step).prod() * dt * time_steps)
else:
expected_change = 0
expected_momentum = initial_momentum + expected_change
self.assertAllClose(expected_momentum, final_momentum, atol=momentum_atol)
with self.subTest('explicit-implicit consistency'):
v_final_2 = funcutils.repeated(implicit_eq, time_steps)(v_initial)
for axis in range(grid.ndim):
self.assertAllClose(v_final[axis], v_final_2[axis], atol=1e-4,
err_msg=f'axis={axis}')
if __name__ == '__main__':
absltest.main()
| 2.21875 | 2 |
ehmatthes-pcc_2e-078318e/beyond_pcc/chess_game/settings.py | charliechocho/py-crash-course | 12 | 12764414 | class Settings:
def __init__(self):
self.screen_width, self.screen_height = 800, 300
self.bg_color = (225, 225, 225) | 1.8125 | 2 |
lemonshort/views.py | RobbiNespu/lemoncurry | 0 | 12764415 | <reponame>RobbiNespu/lemoncurry
from django.apps import apps
from django.shortcuts import get_object_or_404, redirect
from .convert import abc_to_id
def unshort(request, model, tiny):
entity = get_object_or_404(apps.get_model(model), pk=abc_to_id(tiny))
return redirect(entity, permanent=True)
| 1.984375 | 2 |
venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/__init__.py | realxwx/leetcode-solve | 0 | 12764416 | <filename>venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
# Copyright (c) 2020
# Author: xiaoweixiang
from .file_cache import FileCache # noqa
from .redis_cache import RedisCache # noqa
| 1.304688 | 1 |
transcriptome_clustering/pitchfork_paramvary.py | mattsmart/biomodels | 0 | 12764417 | <reponame>mattsmart/biomodels
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from inference import solve_true_covariance_from_true_J
from pitchfork_langevin import jacobian_pitchfork, steadystate_pitchfork, langevin_dynamics
from settings import DEFAULT_PARAMS, PARAMS_ID, FOLDER_OUTPUT, TIMESTEP, INIT_COND, NUM_TRAJ, NUM_STEPS, NOISE
from spectrums import get_spectrums, plot_spectrum_hists, get_spectrum_from_arr, plot_rank_order_spectrum, \
scan_J_truncations, plot_spectrum_extremes, plot_sliding_tau_scores, gene_control_scores
from statistical_formulae import collect_multitraj_info, build_diffusion_from_langevin
from visualize_matrix import plot_matrix
# TODO store output and params in OUTPUT dir
def many_traj_varying_params(params_list, num_steps=NUM_STEPS, dt=TIMESTEP, num_traj=NUM_TRAJ, noise=NOISE):
"""
Computes num_traj langevin trajectories, for num_steps, for each params in params_list
Returns:
(1) multitraj_varying: NUM_STEPS x NUM_STATES x NUM_TRAJ x PARAM_IDX
"""
# TODO decide if dict would work better
base_params = params_list[0]
print "Generating: num_steps x base_params.dim x num_traj x len(params_list) --", \
num_steps, base_params.dim, num_traj, len(params_list)
multitraj_varying = np.zeros((num_steps, base_params.dim, num_traj, len(params_list)))
t0 = time.time()
for idx, p in enumerate(params_list):
print "on param_list %d of %d" % (idx, len(params_list))
for traj in xrange(num_traj):
steadystates = steadystate_pitchfork(p)
fp_mid = steadystates[:, 0]
langevin_states, _ = langevin_dynamics(init_cond=fp_mid, dt=dt, num_steps=num_steps, params=p, noise=noise)
multitraj_varying[:, :, traj, idx] = langevin_states
print "done, timer:", time.time() - t0
return multitraj_varying
def gen_params_list(pv_name, pv_low, pv_high, pv_num=10, params=DEFAULT_PARAMS):
"""
Creates a list of params based off DEFAULT_PARAMS
Default behaviour is to vary tau across the bifurcation which occurs (expect tau=2.0)
"""
assert pv_name in PARAMS_ID.values()
pv_range = np.linspace(pv_low, pv_high, pv_num)
params_list = [0] * len(pv_range)
for idx, pv in enumerate(pv_range):
params_with_pv = params.mod_copy({pv_name: pv})
params_list[idx] = params_with_pv
return params_list, pv_range
if __name__ == '__main__':
avoid_traj = False
skip_inference = False
plot_hists_all = False
plot_rank_order_selection = True
verbosity = False
spectrum_extremes = False
sliding_tau_cg_plot = True
plot_matrices = True
noise = 0.1
pv_name = 'tau'
params_list, pv_range = gen_params_list(pv_name, 1.2, 2.2, pv_num=20)
num_genes = params_list[0].dim
# prepare main scoring object TODO consider convert to class and data import/export method maybe pickle
score_labels = ['J_true']
int_U_to_use = [0, 1, 9]
int_infer_to_use = [0, 1, 2, 3, 4, 5, 6]
tau_step_anchor_score = 0
# prep remainder of score labels depending on run flags
C_list = ['_lyap']
if not avoid_traj:
C_list.append('_data')
for mod in C_list:
score_labels.append('C%s' % mod)
for elem in int_U_to_use:
score_labels.append('J_U%d%s' % (elem, mod))
if not skip_inference:
for elem in int_infer_to_use:
score_labels.append('J_infer%d%s' % (elem, mod))
# now fill in rest, dependent on whether inference and C_data are being used
print "List of score labels that will be analyzed:\n", score_labels
score_dict = {label: {'skip': False,
'method_list': [0]*len(pv_range),
'matrix_list': [0]*len(pv_range),
'spectrums_unperturbed': np.zeros((num_genes, len(pv_range))),
'spectrums_perturbed': np.zeros((num_genes, num_genes - 1, len(pv_range))),
'cg_min': np.zeros((num_genes, len(pv_range))),
'cg_max': np.zeros((num_genes, len(pv_range))),
'cg_denom_anchor_min': np.zeros(num_genes),
'cg_denom_anchor_max': np.zeros(num_genes),
'cg_min_anchor': np.zeros((num_genes, len(pv_range))),
'cg_max_anchor': np.zeros((num_genes, len(pv_range))),
'outdir': FOLDER_OUTPUT + os.sep + label} for label in score_labels}
for label in score_labels:
if not os.path.exists(score_dict[label]['outdir']):
os.makedirs(score_dict[label]['outdir'])
# optionally skip generating trajectories and use theoretical covariance
if not avoid_traj:
multitraj_varying = many_traj_varying_params(params_list, noise=noise)
for idx, pv in enumerate(pv_range):
title_mod = '(%s_%.3f)' % (pv_name, pv)
print "idx, pv:", idx, title_mod
params = params_list[idx]
fp_mid = steadystate_pitchfork(params)[:, 0]
J_true = jacobian_pitchfork(params, fp_mid, print_eig=False)
D_true = build_diffusion_from_langevin(params, noise)
# build data covariance or solve asymptotic covariance
C_lyap = solve_true_covariance_from_true_J(J_true, D_true)
if not avoid_traj:
_, C_data, _ = collect_multitraj_info(multitraj_varying[:, :, :, idx], params, noise, skip_infer=True)
# score spectrum fill in begin
score_dict['J_true']['method_list'][idx] = 'J_true'
score_dict['J_true']['matrix_list'][idx] = J_true
score_dict['J_true']['spectrums_unperturbed'][:, idx] = get_spectrum_from_arr(J_true, real=True)
# now fill in rest, dependent on whether inference and C_data are being used
C_list = [(C_lyap, '_lyap')]
if not avoid_traj:
C_list.append((C_data, '_data'))
for C, mod in C_list:
# fill in C info
label = 'C%s' % mod
score_dict[label]['method_list'][idx] = 'covariance%s' % mod
score_dict[label]['matrix_list'][idx] = C
score_dict[label]['spectrums_unperturbed'][:, idx] = get_spectrum_from_arr(C, real=True)
# do J(U) method
# TODO this means for one sliding tau run, each tau point will have different random U -- I think this is OK?
list_of_J_u, specs_u, labels_u = get_spectrums(C, D_true, method='U%s' % mod)
if plot_hists_all:
plot_spectrum_hists(specs_u, labels_u, method='U%s' % mod, hist='violin', title_mod=title_mod,
plotdir=score_dict[label]['outdir'])
# fill in J(U) info
for elem in int_U_to_use:
label = 'J_U%d%s' % (elem, mod)
score_dict[label]['method_list'][idx] = labels_u[elem]
score_dict[label]['matrix_list'][idx] = list_of_J_u[elem]
score_dict[label]['spectrums_unperturbed'][:, idx] = specs_u[elem, :]
# do inference method
if not skip_inference:
list_of_J_infer, specs_infer, labels_infer = get_spectrums(C, D_true, method='infer%s' % mod)
if plot_hists_all:
plot_spectrum_hists(specs_infer, labels_infer, method='infer%s' % mod, hist='violin',
title_mod=title_mod, plotdir=score_dict[label]['outdir'])
# fill in inference info
for elem in int_infer_to_use:
label = 'J_infer%d%s' % (elem, mod)
score_dict[label]['method_list'][idx] = labels_infer[elem]
score_dict[label]['matrix_list'][idx] = list_of_J_infer[elem]
score_dict[label]['spectrums_unperturbed'][:, idx] = specs_infer[elem, :]
# plot sorted rank order distributions of each spectrum (for each tau)
if plot_rank_order_selection:
for label in score_dict.keys():
if not score_dict[label]['skip']:
spec = score_dict[label]['spectrums_unperturbed'][:, idx]
method = label + '_' + score_dict[label]['method_list'][idx]
plot_rank_order_spectrum(spec, method=method, title_mod=title_mod, plotdir=score_dict[label]['outdir'])
plt.close('all')
if plot_matrices:
for label in score_dict.keys():
if score_dict[label]['skip']:
print 'Skipping label %s because skip flag is true' % label
else:
arr = score_dict[label]['matrix_list'][idx]
label_detailed = label + '_' + score_dict[label]['method_list'][idx]
plot_matrix(arr, method=label_detailed, title_mod=title_mod, plotdir=score_dict[label]['outdir'])
# perform spectrum perturbation scanning (slow step)
for label in score_dict.keys():
if not score_dict[label]['skip']:
print "Scanning truncations for matrix %s" % label
spec, spec_perturb = scan_J_truncations(score_dict[label]['matrix_list'][idx],
spectrum_unperturbed=score_dict[label]['spectrums_unperturbed'][:, idx])
score_dict[label]['spectrums_perturbed'][:, :, idx] = spec_perturb
if idx == tau_step_anchor_score:
perturbed_maxes = np.max(spec_perturb, axis=1)
perturbed_mins = np.min(spec_perturb, axis=1)
score_dict[label]['cg_denom_anchor_max'][:] = np.max(spec) - perturbed_maxes
score_dict[label]['cg_denom_anchor_min'][:] = np.min(spec) - perturbed_mins
score_dict[label]['cg_min'][:, idx] = gene_control_scores(spec, spec_perturb, use_min=True)
score_dict[label]['cg_max'][:, idx] = gene_control_scores(spec, spec_perturb, use_min=False)
score_dict[label]['cg_min_anchor'][:, idx] = gene_control_scores(spec, spec_perturb, fixed_denom=score_dict[label]['cg_denom_anchor_min'], use_min=True)
score_dict[label]['cg_max_anchor'][:, idx] = gene_control_scores(spec, spec_perturb, fixed_denom=score_dict[label]['cg_denom_anchor_max'], use_min=False)
if spectrum_extremes:
method = label + '_' + score_dict[label]['method_list'][idx]
plot_spectrum_extremes(spec, spec_perturb, method=method, title_mod=title_mod, max=True,
plotdir=score_dict[label]['outdir'])
plot_spectrum_extremes(spec, spec_perturb, method=method, title_mod=title_mod, max=False,
plotdir=score_dict[label]['outdir'])
if sliding_tau_cg_plot:
for label in score_dict.keys():
if score_dict[label]['skip']:
print 'Skipping label %s because skip flag is true' % label
else:
np.savetxt(score_dict[label]['outdir'] + os.sep + "%s_range.txt" % pv_name, pv_range, delimiter=',')
print "Generating sliding tau plot for label %s" % label
# save plot and data for cg scores
plot_sliding_tau_scores(pv_range, score_dict[label]['cg_min'].T, label, 'cg_min', score_dict[label]['outdir'])
plot_sliding_tau_scores(pv_range, score_dict[label]['cg_max'].T, label, 'cg_max', score_dict[label]['outdir'])
np.savetxt(score_dict[label]['outdir'] + os.sep + "%s_cg_min.txt" % label,
score_dict[label]['cg_min'].T, delimiter=',')
np.savetxt(score_dict[label]['outdir'] + os.sep + "%s_cg_max.txt" % label,
score_dict[label]['cg_max'].T, delimiter=',')
# repeat for anchor cg scores
plot_sliding_tau_scores(pv_range, score_dict[label]['cg_min_anchor'].T, label, 'cg_min_anchor',
score_dict[label]['outdir'])
plot_sliding_tau_scores(pv_range, score_dict[label]['cg_max_anchor'].T, label, 'cg_max_anchor',
score_dict[label]['outdir'])
np.savetxt(score_dict[label]['outdir'] + os.sep + "%s_cg_min_anchor.txt" % label,
score_dict[label]['cg_min_anchor'].T, delimiter=',')
np.savetxt(score_dict[label]['outdir'] + os.sep + "%s_cg_max_anchor.txt" % label,
score_dict[label]['cg_max_anchor'].T, delimiter=',')
| 2.21875 | 2 |
e2e_tests/tests/deploy/test_local.py | trentwatt/determined | 0 | 12764418 | import json
import os
import random
import subprocess
import time
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List, Optional
import docker
import pytest
from determined.common.api import bindings
from tests import config as conf
from tests import experiment as exp
from ..cluster.test_users import ADMIN_CREDENTIALS, logged_in_user
def det_deploy(subcommand: List) -> None:
command = [
"det",
"deploy",
"local",
] + subcommand
subprocess.run(command)
def cluster_up(arguments: List) -> None:
command = ["cluster-up", "--no-gpu"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
det_deploy(command)
def cluster_down(arguments: List) -> None:
command = ["cluster-down"]
command += arguments
det_deploy(command)
def master_up(arguments: List) -> None:
command = ["master-up"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
det_deploy(command)
def master_down(arguments: List) -> None:
command = ["master-down"]
command += arguments
det_deploy(command)
def agent_up(arguments: List, fluent_offset: Optional[int] = None) -> None:
command = ["agent-up", conf.MASTER_IP, "--no-gpu"]
det_version = conf.DET_VERSION
if det_version is not None:
command += ["--det-version", det_version]
command += arguments
if fluent_offset is not None:
with NamedTemporaryFile() as tf:
with open(tf.name, "w") as f:
f.write(
f"""
fluent:
port: {24224 + fluent_offset}
container_name: fluent-{fluent_offset}"""
)
det_deploy(command + ["--agent-config-path", tf.name])
else:
det_deploy(command)
def agent_down(arguments: List) -> None:
command = ["agent-down"]
command += arguments
det_deploy(command)
def agent_enable(arguments: List) -> None:
with logged_in_user(ADMIN_CREDENTIALS):
subprocess.run(["det", "-m", conf.make_master_url(), "agent", "enable"] + arguments)
def agent_disable(arguments: List) -> None:
with logged_in_user(ADMIN_CREDENTIALS):
subprocess.run(["det", "-m", conf.make_master_url(), "agent", "disable"] + arguments)
@pytest.mark.det_deploy_local
def test_cluster_down() -> None:
master_host = "localhost"
master_port = "8080"
name = "fixture_down_test"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
cluster_up(["--cluster-name", name])
container_name = name + "_determined-master_1"
client = docker.from_env()
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
cluster_down(["--cluster-name", name])
containers = client.containers.list(filters={"name": container_name})
assert len(containers) == 0
@pytest.mark.det_deploy_local
def test_custom_etc() -> None:
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
etc_path = str(Path(__file__).parent.joinpath("etc/master.yaml").resolve())
cluster_up(["--master-config-path", etc_path])
exp.run_basic_test(
conf.fixtures_path("no_op/single-default-ckpt.yaml"),
conf.fixtures_path("no_op"),
1,
)
assert os.path.exists("/tmp/ckpt-test/")
cluster_down([])
@pytest.mark.det_deploy_local
def test_agent_config_path() -> None:
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
# Config makes it unmodified.
etc_path = str(Path(__file__).parent.joinpath("etc/agent.yaml").resolve())
agent_name = "test-path-agent"
agent_up(["--agent-config-path", etc_path])
client = docker.from_env()
agent_container = client.containers.get(agent_name)
exit_code, out = agent_container.exec_run(["cat", "/etc/determined/agent.yaml"])
assert exit_code == 0
with open(etc_path) as f:
assert f.read() == out.decode("utf-8")
for _ in range(10):
try:
client.containers.get("test-fluent")
break
except docker.errors.NotFound:
print("Waiting for 'test-fluent' container to be created")
time.sleep(10)
else:
pytest.fail("uh-oh, fluent didn't come online")
agent_down(["--agent-name", agent_name])
# Validate CLI flags overwrite config file options.
agent_name += "-2"
agent_up(
["--agent-name", agent_name, "--agent-config-path", etc_path, "--agent-label", "cli-flag"]
)
agent_list = json.loads(subprocess.check_output(["det", "a", "list", "--json"]).decode())
agent_list = [el for el in agent_list if el["id"] == agent_name]
assert len(agent_list) == 1
assert agent_list[0]["label"] == "cli-flag"
agent_down(["--agent-name", agent_name])
master_down([])
@pytest.mark.det_deploy_local
def test_custom_port() -> None:
name = "port_test"
master_host = "localhost"
master_port = "12321"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
arguments = [
"--cluster-name",
name,
"--master-port",
f"{master_port}",
]
cluster_up(arguments)
exp.run_basic_test(
conf.fixtures_path("no_op/single-one-short-step.yaml"),
conf.fixtures_path("no_op"),
1,
)
cluster_down(["--cluster-name", name])
@pytest.mark.det_deploy_local
def test_agents_made() -> None:
master_host = "localhost"
master_port = "8080"
name = "agents_test"
num_agents = 2
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
arguments = [
"--cluster-name",
name,
"--agents",
f"{num_agents}",
]
cluster_up(arguments)
container_names = [name + f"-agent-{i}" for i in range(0, num_agents)]
client = docker.from_env()
for container_name in container_names:
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
cluster_down(["--cluster-name", name])
@pytest.mark.det_deploy_local
def test_master_up_down() -> None:
master_host = "localhost"
master_port = "8080"
name = "determined"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up(["--master-name", name])
container_name = name + "_determined-master_1"
client = docker.from_env()
containers = client.containers.list(filters={"name": container_name})
assert len(containers) > 0
master_down([])
containers = client.containers.list(filters={"name": container_name})
assert len(containers) == 0
@pytest.mark.det_deploy_local
def test_agent_up_down() -> None:
master_host = "localhost"
master_port = "8080"
agent_name = "determined-agent"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
agent_up(["--agent-name", agent_name])
client = docker.from_env()
containers = client.containers.list(filters={"name": agent_name})
assert len(containers) > 0
agent_down(["--agent-name", agent_name])
containers = client.containers.list(filters={"name": agent_name})
assert len(containers) == 0
master_down([])
@pytest.mark.parametrize("steps", [10])
@pytest.mark.parametrize("num_agents", [3, 5])
@pytest.mark.parametrize("should_disconnect", [False, True])
@pytest.mark.det_deploy_local
def test_stress_agents_reconnect(steps: int, num_agents: int, should_disconnect: bool) -> None:
random.seed(42)
master_host = "localhost"
master_port = "8080"
conf.MASTER_IP = master_host
conf.MASTER_PORT = master_port
master_up([])
# Start all agents.
agents_are_up = [True] * num_agents
for i in range(num_agents):
agent_up(["--agent-name", f"agent-{i}"], fluent_offset=i)
time.sleep(3)
for _ in range(steps):
for agent_id, agent_is_up in enumerate(agents_are_up):
if random.choice([True, False]): # Flip agents status randomly.
continue
if should_disconnect:
# Can't just randomly deploy up/down due to just getting a Docker name conflict.
if agent_is_up:
agent_down(["--agent-name", f"agent-{agent_id}"])
else:
agent_up(["--agent-name", f"agent-{agent_id}"], fluent_offset=agent_id)
agents_are_up[agent_id] = not agents_are_up[agent_id]
else:
if random.choice([True, False]):
agent_disable([f"agent-{agent_id}"])
agents_are_up[agent_id] = False
else:
agent_enable([f"agent-{agent_id}"])
agents_are_up[agent_id] = True
time.sleep(10)
# Validate that our master kept track of the agent reconnect spam.
agent_list = json.loads(
subprocess.check_output(
[
"det",
"agent",
"list",
"--json",
]
).decode()
)
assert sum(agents_are_up) <= len(agent_list)
for agent in agent_list:
agent_id = int(agent["id"].replace("agent-", ""))
assert agents_are_up[agent_id] == agent["enabled"]
# Can we still schedule something?
if any(agents_are_up):
experiment_id = exp.create_experiment(
conf.fixtures_path("no_op/single-one-short-step.yaml"),
conf.fixtures_path("no_op"),
None,
)
exp.wait_for_experiment_state(
experiment_id, bindings.determinedexperimentv1State.STATE_COMPLETED
)
for agent_id in range(num_agents):
agent_down(["--agent-name", f"agent-{agent_id}"])
master_down([])
| 1.882813 | 2 |
src/MapMyNotesApplication/tests/test_module_code.py | Ryan-Gouldsmith/MajorProject-MapMyNotes | 0 | 12764419 | <gh_stars>0
from MapMyNotesApplication import database
from MapMyNotesApplication.models.module_code import ModuleCode
from flask import Flask
from flask.ext.testing import TestCase
class TestNote(TestCase):
def create_app(self):
app = Flask(__name__)
app.config['TESTING'] = True
# http://blog.toast38coza.me/adding-a-database-to-a-flask-app/ Used to help with the test database, maybe could move this to a config file..
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite'
return app
def setUp(self):
database.session.close()
database.drop_all()
database.create_all()
def test_saving_a_module_code(self):
module_code = ModuleCode("CS31310")
database.session.add(module_code)
database.session.commit()
assert module_code.id == 1
def test_saving_module_code_getting_module_back(self):
module_code = ModuleCode("CS31310")
database.session.add(module_code)
database.session.commit()
assert module_code.module_code == "CS31310"
def test_the_save_function(self):
module_code = ModuleCode("CS31310")
module_code.save()
assert module_code.module_code == "CS31310"
assert module_code.id == 1
def test_static_function_returning_same_module_code(self):
module_code = ModuleCode("CS31310")
module_code.save()
module_code_obj = ModuleCode.find_id_by_module_code("CS31310")
assert module_code.id == module_code_obj.id
def test_static_function_returns_none_if_not_found(self):
module_code = ModuleCode("CS31310")
module_code.save()
module_code_obj = ModuleCode.find_id_by_module_code("SE31520")
assert None is module_code_obj
| 2.59375 | 3 |
Notes/23-Demo/seed2.py | pedwards95/Springboard_Class | 0 | 12764420 | from models2 import Department, Employee, db
from app2 import app
db.drop_all()
db.create_all()
d1 = Department(dept_code="mktg", dept_name="Marketing",phone="897-9999")
d2 = Department(dept_code="acct", dept_name="Accounting",phone="111-5429")
river = Employee(name="<NAME>", state="NY", dept_code="mktg")
summer = Employee(name="<NAME>", state="OR", dept_code="mktg")
joaquin = Employee(name="<NAME>", dept_code="acct")
db.session.add(d1)
db.session.add(d2)
db.session.commit()
db.session.add(river)
db.session.add(joaquin)
db.session.add(summer)
db.session.commit() | 2.796875 | 3 |
points2mesh/train.py | Hyde46/pc2mesh | 0 | 12764421 | <filename>points2mesh/train.py
import tensorflow as tf
import numpy as np
import cv2
import argparse
import os
from tensorpack import *
from tensorpack.input_source import QueueInput
from tensorpack.dataflow import (PrintData, BatchData)
from PointCloudDataFlow import get_modelnet_dataflow
from models import *
from fetcher import *
from Idiss_df import *
enable_argscope_for_module(tf.layers)
# TOTAL_BATCH_SIZE = 16
TOTAL_BATCH_SIZE = 1
BATCH_SIZE = 1
NUM_EPOCH = 125
PC = {'num': 1024, 'dp': 3, 'ver': "40", 'gt': 10000}
seed = 1024
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings for shapes with number of vertices per unpooling step
# basic Ellipsoid 156 - 618 - 2466
# basic torus 160 - 640 - 2560
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('coord_dim', 3, 'Number of units in output layer')
flags.DEFINE_integer(
'feat_dim', 227, 'Number of units in FlexConv Feature layer')
flags.DEFINE_integer('hidden', 192, 'Number of units in hidden layer')
flags.DEFINE_float('weight_decay', 5e-6, 'Weight decay for L2 loss.')
flags.DEFINE_float('collapse_epsilon', 0.008, 'Collapse loss epsilon')
flags.DEFINE_float('learning_rate', 3e-5, 'Initial learning rage.')
flags.DEFINE_integer('pc_num', PC['num'],
'Number of points per pointcloud object')
flags.DEFINE_integer('dp', 3, 'Dimension of points in pointcloud')
flags.DEFINE_integer('feature_depth', 32,
'Dimension of first flexconv feature layer')
flags.DEFINE_integer(
'num_neighbors', 6, 'Number of neighbors considered during Graph projection layer')
flags.DEFINE_integer('batch_size', 1, 'Batchsize')
flags.DEFINE_string('base_model_path', 'utils/ellipsoid/info_ellipsoid.dat',
'Path to base model for mesh deformation')
#
# Ellipsoid allowing 4 unpooling steps
# flags.DEFINE_string('base_model_path', 'utils/ellipsoid/ellipsoid.dat',
# 'Path to base model for mesh deformation')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--fusion', help='run sampling', default='')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = "3"
logger.set_logger_dir(
'/path/to/train_log/true_c1_1024_small_%s' % (args.fusion))
# Loading Data
df_train = get_modelnet_dataflow('train', batch_size=FLAGS.batch_size,
num_points=PC["num"], model_ver=PC["ver"], shuffle=True, normals=True, prefetch_data=True, noise_level=0.0)
df_test = get_modelnet_dataflow('test', batch_size=2 * FLAGS.batch_size,
num_points=PC["num"], model_ver=PC["ver"], shuffle=True, normals=True, prefetch_data=True, noise_level=0.0)
steps_per_epoch = len(df_train)
# Setup Model
# Setup training step
config = TrainConfig(
model=FlexmeshModel(PC, name="Flexmesh"),
data=QueueInput(df_train),
callbacks=[
ModelSaver(),
MinSaver('total_loss'),
],
extra_callbacks=[
MovingAverageSummary(),
ProgressBar([]),
MergeAllSummaries(),
RunUpdateOps()
],
steps_per_epoch=steps_per_epoch,
starting_epoch=0,
max_epoch=NUM_EPOCH
)
launch_train_with_config(config, SimpleTrainer())
| 2.234375 | 2 |
src/engine/SCons/Sig/MD5Tests.py | datalogics-staylor/scons | 3 | 12764422 | <filename>src/engine/SCons/Sig/MD5Tests.py
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import unittest
import string
from SCons.Sig.MD5 import current, collect, signature, to_string, from_string
class my_obj:
"""A dummy object class that satisfies the interface
requirements of the MD5 class.
"""
def __init__(self, value = ""):
self.value = value
def get_signature(self):
if not hasattr(self, "sig"):
self.sig = signature(self)
return self.sig
def get_contents(self):
return self.value
class MD5TestCase(unittest.TestCase):
def test_current(self):
"""Test deciding if an object is up-to-date
Simple comparison of different "signature" values.
"""
obj = my_obj('111')
assert not current(obj.get_signature(), signature(my_obj('110')))
assert current(obj.get_signature(), signature(my_obj('111')))
assert not current(obj.get_signature(), signature(my_obj('112')))
def test_collect(self):
"""Test collecting a list of signatures into a new signature value
"""
s = map(signature, map(my_obj, ('111', '222', '333')))
assert '698d51a19d8a121ce581499d7b701668' == collect(s[0:1])
assert '8980c988edc2c78cc43ccb718c06efd5' == collect(s[0:2])
assert '53fd88c84ff8a285eb6e0a687e55b8c7' == collect(s)
def test_signature(self):
"""Test generating a signature"""
o1 = my_obj(value = '111')
s = signature(o1)
assert '698d51a19d8a121ce581499d7b701668' == s, s
o2 = my_obj(value = 222)
s = signature(o2)
assert 'bcbe3365e6ac95ea2c0343a2395834dd' == s, s
try:
signature('string')
except AttributeError, e:
assert string.find(str(e), "unable to fetch contents") == 0, str(e)
else:
raise AttributeError, "unexpected get_contents() attribute"
# Make sure we don't eat AttributeErrors raised internally
# by the get_contents() method (or anything it calls).
caught = None
try:
class xxx:
def get_contents(self):
raise AttributeError, "internal AttributeError"
signature(xxx())
except AttributeError, e:
assert str(e) == "internal AttributeError", e
caught = 1
assert caught, "did not catch expected AttributeError"
def test_to_string(self):
assert '698d51a19d8a121ce581499d7b701668' == to_string('698d51a19d8a121ce581499d7b701668')
def test_from_string(self):
assert '698d51a19d8a121ce581499d7b701668' == from_string('698d51a19d8a121ce581499d7b701668')
if __name__ == "__main__":
suite = unittest.makeSuite(MD5TestCase, 'test_')
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
| 2.421875 | 2 |
examples/misc_examples/flatten_mesh.py | SAFedorov/bfieldtools | 17 | 12764423 | # -*- coding: utf-8 -*-
"""
Flatten mesh using conformal mapping
=============================================
Map 3D mesh to a 2D (complex) plane with angle-preserving (conformal) mapping
Based on these course notes
https://www.cs.cmu.edu/~kmcrane/Projects/DDG/
section 7.4.
"""
import numpy as np
from bfieldtools.flatten_mesh import flatten_mesh
from bfieldtools.flatten_mesh import mesh2plane
from bfieldtools.flatten_mesh import plane2mesh
from bfieldtools.utils import load_example_mesh
#%% Determine 2D parameterization and plot coordinate function on the 3D mesh
from mayavi import mlab
from bfieldtools.viz import plot_data_on_vertices, plot_mesh, plot_data_on_faces
mesh = load_example_mesh("meg_helmet", process=False)
u, v, mesh2d = flatten_mesh(mesh, _lambda=0.80)
plot_data_on_vertices(mesh, u, ncolors=15)
plot_data_on_vertices(mesh, v, ncolors=15)
#%% Determine lambda with smallest area distortion
# lls = np.linspace(0.01,1.0, 100)
# mm = []
# for ll in lls:
# u, v, mesh2d = flatten_mesh(mesh, _lambda=ll)
# d = mesh2d.area_faces / mesh.area_faces
# mm.append(np.std(d)/np.mean(d))
# print(np.std(d)/np.mean(d))
# plt.plot(lls, mm)
#%% Plot flattened mesh and area distortion on faces
plot_data_on_faces(mesh2d, mesh2d.area_faces / mesh.area_faces)
#%% Plot gradient of the two coordinate functions and the cosine of the angle between the gradients
from bfieldtools.mesh_calculus import gradient
gx = gradient(u, mesh)
gy = gradient(v, mesh)
cos = np.sum(gx * gy, axis=0) / (
np.linalg.norm(gx, axis=0) * np.linalg.norm(gy, axis=0)
)
plot_data_on_faces(mesh, cos, vmin=-1, vmax=1)
mlab.quiver3d(*mesh.triangles_center.T, *gx, color=(1, 0, 0), mode="arrow")
q = mlab.quiver3d(*mesh.triangles_center.T, *gy, color=(0, 0, 1), mode="arrow")
q.scene.isometric_view()
#%% Map hexagonal grid from 2d to the 3D mesh
d = np.sqrt(3 / 4)
m = np.min((u.min(), v.min()))
mm = np.min((u.max(), v.max()))
xx = np.linspace(m * 1.05, mm * 1.05, 12)
yy = np.linspace(m * 1.05, mm * 1.05, 12) * d
p = np.array(np.meshgrid(xx, yy, 0, indexing="ij"))
p[0, :, ::2] += (xx[1] - xx[0]) * 0.5
p = p.reshape(3, -1).T
pp = plane2mesh(p, mesh, u, v)
plot_data_on_vertices(mesh, u, ncolors=15)
mlab.points3d(*pp.T, scale_factor=0.01)
| 3.34375 | 3 |
tests/integration/platform/test_core_platforms_async.py | m1009d/scrapli_cfg | 15 | 12764424 | import pytest
@pytest.mark.asyncio
@pytest.mark.scrapli_replay
async def test_get_config(async_cfg_conn):
await async_cfg_conn.prepare()
config = await async_cfg_conn.get_config()
assert config.failed is False
# expected config is loaded from disk and set as an attribute in the fixture to make life easy
assert async_cfg_conn._config_cleaner(config.result) == async_cfg_conn._config_cleaner(
async_cfg_conn._expected_config
)
@pytest.mark.asyncio
@pytest.mark.scrapli_replay
async def test_load_config_merge_diff_and_abort(async_cfg_conn):
await async_cfg_conn.prepare()
load_config = await async_cfg_conn.load_config(
config=async_cfg_conn._load_config, replace=False
)
assert load_config.failed is False
diff_config = await async_cfg_conn.diff_config()
assert diff_config.failed is False
abort_config = await async_cfg_conn.abort_config()
assert abort_config.failed is False
# dont bother with checking the diff itself, we'll do that in unit tests much more thoroughly
@pytest.mark.asyncio
@pytest.mark.scrapli_replay
async def test_load_config_merge_diff_and_commit(async_cfg_conn):
await async_cfg_conn.prepare()
load_config = await async_cfg_conn.load_config(
config=async_cfg_conn._expected_config, replace=True
)
assert load_config.failed is False
diff_config = await async_cfg_conn.diff_config()
assert diff_config.failed is False
commit_config = await async_cfg_conn.commit_config()
assert commit_config.failed is False
# dont bother with checking the diff itself, we'll do that in unit tests much more thoroughly
| 2.046875 | 2 |
smartsearch/nlp.py | brandonwatts/Capstone | 0 | 12764425 | import re
from smartsearch.matcher import field_matcher, phrase_matcher, zip_matcher
from smartsearch.model import extractions, nlp
from smartsearch.referencer import extract_references
def static_args(**kwargs):
"""This decorator method is used to add static arguments to another method.
The reason we are doing this is because we are passing a regular expression as an argument. Since the regular
expression is being compiled the via the static args, it is not being compiled on every method call.
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def remove_commas(match):
"""Helper method to remove commas from a match group
Args:
match (match): This is a regular expression match object.
Returns:
(str): The text with commas removed.
"""
match = match.group()
return re.sub(',', '', match)
@static_args(squarefoot=re.compile(r"(?<!\w)(square|sq(\.)?)(\s)?(feet|foot|ft(\.)?)(?!\w)"),
numberRE=re.compile(r"\$?\d+,?\d+", re.I))
def preprocess(text):
"""This method is used to preprocess text before any nlp is done on it.
The method first turns any variation of squarefoot into "squarefoot" so we do not have to check for it later in
the pipeline. It also removes commas from the numbers as its easier piped into API as an integer.
Note:
This method is always called before parse() is called.
Args:
text (str): This is the block of text that we wish to preprocess.
Returns:
(str): The text that has been preprocessed.
"""
sqftSub = preprocess.squarefoot.sub("squarefoot", text)
numberSub = preprocess.numberRE.sub(remove_commas, sqftSub)
return numberSub
def parse(text):
"""This method is what is called by the models.
Args:
text (str): This is a string of text that has been preprocessed.
Returns:
(dict): Dictionary of fields mapped to their respective values.
"""
extractions.clear()
doc = nlp(preprocess(text))
for span in [doc[head:tail] for (match_id, head, tail) in phrase_matcher(doc)]:
try:
span.merge()
except IndexError:
pass
for span in [doc[head:tail] for (match_id, head, tail) in field_matcher(doc)]:
try:
span.merge()
except IndexError:
pass
zip_matcher(doc)
extract_references(doc, extractions)
if is_negated(doc):
negate(extractions)
return extractions
def is_negated(doc):
"""This method checks if the sentence has been negated indicating the user wants the opposite of what he/she asked.
Ex. "Show me all the apartments that are NOT dog friendly". This works by starting at the head of the sentence and
then navigating through the parse tree looking for a negated target word.
Args:
doc (doc): This is a spacy doc object.
Returns:
(bool): True if text contains a negation, False otherwise.
"""
token = doc[0]
while token != token.head:
token = token.head
children = [i.text.lower() for i in token.children]
negations = ["no", "not", "n't", "nothing", "nowhere"]
if any(i in children for i in negations):
return True
# this could be improved using what's in references
topics = [
"home", "homes",
"house", "houses",
"apartment", "apartments",
"building", "buildings",
"place", "places",
"residence", "residences",
"anywhere",
"anyplace"]
for i in token.children:
if i.text.lower() in topics:
grandchildren = [j.text.lower() for j in i.children]
if any(j in grandchildren for j in negations):
return True
return False
def negate(extractions):
"""This is a helper method to negate the fields if a negation is found.
Args:
extractions (dict): This is a dictionary object which contains all of the extracted fields.
Returns:
(bool): True if text contains a negation, False otherwise.
"""
if extractions.get("max_price") and not extractions.get("min_price"):
extractions["min_price"] = extractions["max_price"]
extractions["max_price"] = None
elif extractions.get("min_price") and not extractions.get("max_price"):
extractions["max_price"] = extractions["min_price"]
extractions["min_price"] = None
if extractions.get("max_sqft") and not extractions.get("min_sqft"):
extractions["min_sqft"] = extractions["max_sqft"]
extractions["max_sqft"] = None
elif extractions.get("min_sqft") and not extractions.get("max_sqft"):
extractions["max_sqft"] = extractions["min_sqft"]
extractions["min_sqft"] = None
if extractions.get("max_bed") and not extractions.get("min_bed"):
extractions["min_bed"] = extractions["max_bed"]
extractions["max_bed"] = None
elif extractions.get("min_bed") and not extractions.get("max_bed"):
extractions["max_bed"] = extractions["min_bed"]
extractions["min_bed"] = None
if extractions.get("dog_friendly"):
extractions.get["dog_friendly"] = False
if extractions.get("cat_friendly"):
extractions.get["cat_friendly"] = False
| 3.203125 | 3 |
testcases/csvbarfeed_test.py | tibkiss/pyalgotrade | 2 | 12764426 | <gh_stars>1-10
# PyAlgoTrade
#
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pytest
import unittest
import datetime
import pytz
from pyalgotrade.barfeed import Frequency
from pyalgotrade.barfeed import csvfeed
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.barfeed import ninjatraderfeed
from pyalgotrade.providers.interactivebrokers import ibfeed
from pyalgotrade.utils import dt
from pyalgotrade import marketsession
import common
class BarFeedEventHandler_TestLoadOrder:
def __init__(self, testcase, barFeed, instrument):
self.__testcase = testcase
self.__count = 0
self.__prevDateTime = None
self.__barFeed = barFeed
self.__instrument = instrument
def onBars(self, bars):
self.__count += 1
dateTime = bars.getBar(self.__instrument).getDateTime()
if self.__prevDateTime != None:
# Check that bars are loaded in order
self.__testcase.assertTrue(self.__prevDateTime < dateTime)
# Check that the last value in the dataseries match the current datetime.
self.__testcase.assertTrue(self.__barFeed.getDataSeries()[-1].getDateTime() == dateTime)
# Check that the datetime for the last value matches that last datetime in the dataseries.
self.__testcase.assertEqual(self.__barFeed.getDataSeries()[-1].getDateTime(), self.__barFeed.getDataSeries().getDateTimes()[-1])
self.__prevDateTime = dateTime
def getEventCount(self):
return self.__count
class BarFeedEventHandler_TestFilterRange:
def __init__(self, testcase, instrument, fromDate, toDate):
self.__testcase = testcase
self.__count = 0
self.__instrument = instrument
self.__fromDate = fromDate
self.__toDate = toDate
def onBars(self, bars):
self.__count += 1
if self.__fromDate != None:
self.__testcase.assertTrue(bars.getBar(self.__instrument).getDateTime() >= self.__fromDate)
if self.__toDate != None:
self.__testcase.assertTrue(bars.getBar(self.__instrument).getDateTime() <= self.__toDate)
def getEventCount(self):
return self.__count
class YahooTestCase(unittest.TestCase):
TestInstrument = "orcl"
def __parseDate(self, date):
parser = csvfeed.YahooRowParser(datetime.time(23, 59))
row = {"Date":date, "Close":0, "Open":0 , "High":0 , "Low":0 , "Volume":0 , "Adj Close":0}
return parser.parseBar(row).getDateTime()
def testParseDate_1(self):
date = self.__parseDate("1950-01-01")
assert date.day == 1
assert date.month == 1
assert date.year == 1950
def testParseDate_2(self):
date = self.__parseDate("2000-01-01")
assert date.day == 1
assert date.month == 1
assert date.year == 2000
def testDateCompare(self):
assert self.__parseDate("2000-01-01") == self.__parseDate("2000-01-01")
assert self.__parseDate("2000-01-01") != self.__parseDate("2001-01-01")
assert self.__parseDate("1999-01-01") < self.__parseDate("2001-01-01")
assert self.__parseDate("2011-01-01") > self.__parseDate("2001-02-02")
def testCSVFeedLoadOrder(self):
barFeed = csvfeed.YahooFeed()
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"))
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"))
# Dispatch and handle events.
handler = BarFeedEventHandler_TestLoadOrder(self, barFeed, YahooTestCase.TestInstrument)
barFeed.getNewBarsEvent().subscribe(handler.onBars)
while not barFeed.stopDispatching():
barFeed.dispatch()
assert handler.getEventCount() > 0
def __testFilteredRangeImpl(self, fromDate, toDate):
barFeed = csvfeed.YahooFeed()
barFeed.setBarFilter(csvfeed.DateRangeFilter(fromDate, toDate))
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"))
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"))
# Dispatch and handle events.
handler = BarFeedEventHandler_TestFilterRange(self, YahooTestCase.TestInstrument, fromDate, toDate)
barFeed.getNewBarsEvent().subscribe(handler.onBars)
while not barFeed.stopDispatching():
barFeed.dispatch()
assert handler.getEventCount() > 0
def testFilteredRangeFrom(self):
# Only load bars from year 2001.
self.__testFilteredRangeImpl(datetime.datetime(2001, 1, 1, 00, 00, tzinfo=pytz.utc), None)
def testFilteredRangeTo(self):
# Only load bars up to year 2000.
self.__testFilteredRangeImpl(None, datetime.datetime(2000, 12, 31, 23, 55, tzinfo=pytz.utc))
def testFilteredRangeFromTo(self):
# Only load bars in year 2000.
self.__testFilteredRangeImpl(datetime.datetime(2000, 1, 1, 00, 00, tzinfo=pytz.utc),
datetime.datetime(2000, 12, 31, 23, 55, tzinfo=pytz.utc))
def testWithoutTimezone(self):
# At the moment we need to set both the Feed's constructor and addBarsFromCSV to None in order to get a naive
# datetime
barFeed = yahoofeed.Feed(timezone=None)
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"),
timezone=None)
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"),
timezone=None)
barFeed.start()
for bars in barFeed:
bar = bars.getBar(YahooTestCase.TestInstrument)
assert dt.datetime_is_naive(bar.getDateTime())
barFeed.stop()
barFeed.join()
def testDefaultTimezoneIsUTC(self):
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"),
timezone=None)
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"),
timezone=None)
barFeed.start()
for bars in barFeed:
bar = bars.getBar(YahooTestCase.TestInstrument)
assert bar.getDateTime().tzinfo == pytz.utc
barFeed.stop()
barFeed.join()
def testWithDefaultTimezone(self):
barFeed = yahoofeed.Feed(marketsession.USEquities.getTimezone())
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"))
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"))
barFeed.start()
for bars in barFeed:
bar = bars.getBar(YahooTestCase.TestInstrument)
assert not dt.datetime_is_naive(bar.getDateTime())
barFeed.stop()
barFeed.join()
def testWithPerFileTimezone(self):
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"), marketsession.USEquities.getTimezone())
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2001-yahoofinance.csv"), marketsession.USEquities.getTimezone())
barFeed.start()
for bars in barFeed:
bar = bars.getBar(YahooTestCase.TestInstrument)
assert not dt.datetime_is_naive(bar.getDateTime())
barFeed.stop()
barFeed.join()
def testWithIntegerTimezone(self):
try:
barFeed = yahoofeed.Feed(-5)
assert False, "Exception expected"
except Exception, e:
assert str(e).find("timezone as an int parameter is not supported anymore") == 0
try:
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"), -3)
assert False, "Exception expected"
except Exception, e:
assert str(e).find("timezone as an int parameter is not supported anymore") == 0
def testMapTypeOperations(self):
barFeed = yahoofeed.Feed()
barFeed.addBarsFromCSV(YahooTestCase.TestInstrument, common.get_data_file_path("orcl-2000-yahoofinance.csv"), marketsession.USEquities.getTimezone())
barFeed.start()
for bars in barFeed:
assert YahooTestCase.TestInstrument in bars
assert not YahooTestCase.TestInstrument not in bars
bars[YahooTestCase.TestInstrument]
with self.assertRaises(KeyError):
bars["pirulo"]
barFeed.stop()
barFeed.join()
class NinjaTraderTestCase(unittest.TestCase):
def __loadIntradayBarFeed(self, timeZone = None):
ret = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE, timeZone)
ret.addBarsFromCSV("spy", common.get_data_file_path("nt-spy-minute-2011.csv"))
# This is need to get session close attributes set. Strategy class is responsible for calling this.
ret.start()
# Process all events to get the dataseries fully loaded.
while not ret.stopDispatching():
ret.dispatch()
ret.stop()
ret.join()
return ret
def testWithTimezone(self):
timeZone = marketsession.USEquities.getTimezone()
barFeed = self.__loadIntradayBarFeed(timeZone)
ds = barFeed.getDataSeries()
for i in xrange(ds.getLength()):
currentBar = ds[i]
assert not dt.datetime_is_naive(currentBar.getDateTime())
assert ds[i].getDateTime() == ds.getDateTimes()[i]
# Disabling this testcase as naive datetimes in bars makes it error prone to
# deal with mixed data (real and daily). As of 2014 all the datetimes are tz aware.
@pytest.mark.skip(reason="Naive datetimes are not supported anymore.")
def testWithoutTimezone(self):
barFeed = self.__loadIntradayBarFeed(None)
ds = barFeed.getDataSeries()
for i in xrange(ds.getLength()):
currentBar = ds[i]
# Datetime must be set to UTC.
currentBarDT = currentBar.getDateTime()
assert not dt.datetime_is_naive(currentBarDT)
assert ds[i].getDateTime() == ds.getDateTimes()[i]
def testWithIntegerTimezone(self):
try:
barFeed = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE, -3)
assert False, "Exception expected"
except Exception, e:
assert str(e).find("timezone as an int parameter is not supported anymore") == 0
try:
barFeed = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE)
barFeed.addBarsFromCSV("spy", common.get_data_file_path("nt-spy-minute-2011.csv"), -5)
assert False, "Exception expected"
except Exception, e:
assert str(e).find("timezone as an int parameter is not supported anymore") == 0
def testLocalizeAndFilter(self):
timezone = marketsession.USEquities.getTimezone()
# The prices come from NinjaTrader interface when set to use 'US Equities RTH' session template.
prices = {
datetime.datetime(2011, 3, 9, 9, 31, tzinfo=timezone) : 132.35,
datetime.datetime(2011, 3, 9, 16, tzinfo=timezone) : 131.82,
datetime.datetime(2011, 3, 10, 9, 31, tzinfo=timezone) : 130.81,
datetime.datetime(2011, 3, 10, 16, tzinfo=timezone) : 130.47,
datetime.datetime(2011, 3, 11, 9, 31, tzinfo=timezone) : 129.72,
datetime.datetime(2011, 3, 11, 16, tzinfo=timezone) : 130.07,
}
barFeed = ninjatraderfeed.Feed(ninjatraderfeed.Frequency.MINUTE, timezone)
barFeed.addBarsFromCSV("spy", common.get_data_file_path("nt-spy-minute-2011-03.csv"))
for bars in barFeed:
price = prices.get(bars.getDateTime(), None)
if price != None:
bar = bars.getBar("spy")
closingPrice = bar.getClose()
assert price == closingPrice
| 2.125 | 2 |
batchq/contrib/virtualbox/run_testscript.py | troelsfr/BatchQ | 1 | 12764427 | ## Copyright 2012 (c) <NAME>
from batchq.core.errors import CommunicationTimeout
from batchq.pipelines.shell import BashTerminal as LocalMachine
from batchq.pipelines.shell import SSHTerminal as RemoteMachine
from batchq.pipelines.shell.ssh import BaseSecureTerminalHostVerification
from batchq.pipelines.shell import SFTPTerminal as SFTP
import logging
import copy
from colorlogging import ColorizingStreamHandler
import time
import re
import sys
class TestModule(object):
def __init__(self, filename):
self._logger = logging.getLogger('Virtual Machine Test: %s'%filename) #logging
self._logger.setLevel(logging.INFO)
ch = ColorizingStreamHandler()
formatter = logging.Formatter('[%(levelname)s] - %(asctime)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.INFO)
self._logger.addHandler(ch)
self._lmachine = LocalMachine()
self._sshconnection = None
self._sftpconnection = None
self._ssh_time_wait = 3
self._port = 3022
## Parsing the script
file = open(filename)
self._filename =filename
script = file.read()
file.close()
lines = script.split("\n")
lines = [(i, lines[i-1].strip()) for i in range(1,len(lines)+1)]
self._lines = [y for y in filter(lambda x: x[1]!="" and ( x[1].startswith("#:") or not x[1].startswith("#")), lines)]
## Defining the running environment
def timeout(to):
self._start_ssh_if_none()
self._logger.info("Setting expect timeout to %d."%int(to))
self._sshconnection.set_timeout(int(to))
def respond(token, answer):
self._start_ssh_if_none()
self._expect_token += "|"+token
self._answers.append( (token, answer, re.compile(token)) )
def allow_fail():
self._start_ssh_if_none()
self._nofail = False
def has_vm(name):
return self.has_vm(name)
def clone(name):
return self.clone(name)
def isdir(name):
self._start_ssh_if_none()
return self._sshconnection.isdir(name)
def send_command(cmd):
self._start_ssh_if_none()
return self._sshconnection.send_command(cmd)
def isfile(name):
self._start_ssh_if_none()
return self._sshconnection.isfile(name)
def exists(name):
self._start_ssh_if_none()
return self._sshconnection.exists(name)
def runif(condition):
self._run_next = bool(condition)
def sendfile(f,t):
self._start_ssh_if_none()
self._logger.info("Transfering '%s' -> '%s'" %(f,t))
self._sftpconnection.sendfile(f,t)
def getfile(f,t):
self._start_ssh_if_none()
self._logger.info("Transfering '%s' <- '%s'" %(f,t))
self._sftpconnection.getfile(f,t)
def export(name, *values):
newarr = []
for oldconf in self._exports:
for a in values:
n = copy.deepcopy(oldconf)
n[name] = a
newarr.append(n)
self._exports = newarr
self._locals = {'send_command':send_command, 'export': export, 'sendfile':sendfile, 'getfile':getfile, 'runif':runif,'isdir':isdir, 'isfile':isfile, 'exists':exists, 'clone': clone, 'has_vm':has_vm, 'allow_fail':allow_fail, 'respond':respond, 'timeout': timeout, '__file__': filename}
self._globals = {}
self._reset()
def _reset(self):
self._bash_token= "#-->"
self._expect_token=self._bash_token
self._answers = []
self._nofail = True
self._run_next = True
self._exports = [{'dummy':'dummy'}]
def start_virtual_machine(self):
self._image =self._get_variable("image")
self.network_interface()
self.port_forwarding()
self._logger.info( "Booting up virtual machine.")
cmd = "VBoxManage startvm %s #--type=headless" % self._image
out = self._lmachine.send_command(cmd)
job = self._lmachine.last_exitcode()
if job != 0:
self._logger.info( "Failed to start virtual machine. Machine already running?")
user = self._get_variable("user")
password = self._get_variable("password")
cont = True
self._sshconnection = None
while cont:
self._logger.info( "Waiting for SSH to open on port %d ..." %self._port )
try:
self._sshconnection = RemoteMachine("localhost", user,password, self._port, additional_arguments = "-o ConnectTimeout=2", accept_fingerprint=True, debug_level=0)
cont = False
except BaseSecureTerminalHostVerification as e:
self._logger.error("SSH host verification failure:\n\n%s\n" % e.ssh_message)
self._logger.info("Deleting line %d in %s." % (e.ssh_line, e.ssh_filename))
f = open(e.ssh_filename, "r")
cont = f.read().split("\n")
f.close()
cont = cont[0:(e.ssh_line-1)] + cont[e.ssh_line:]
f = open(e.ssh_filename, "w")
f.write("\n".join(cont))
f.close()
# raise
except:
time.sleep( self._ssh_time_wait )
self._sftpconnection = SFTP("localhost", user,password, self._port, accept_fingerprint=True, debug_level=0)
self._logger.info("SSH to virtual machine successfully running.")
def shutdown(self):
self._logger.info("Shutting down.")
self._image = self._get_variable("image")
cmd = "VBoxManage acpipowerbutton %s" % self._image
self._logger.debug(cmd + " (via send_command)")
self._lmachine.send_command(cmd)
job = self._lmachine.last_exitcode()
if job != 0:
self._logger.error("Failed to shut down virtual machine " + image_name)
def network_interface(self):
self._logger.info( "Setting up network interface to NAT for the virtual machine.")
cmd = "VBoxManage modifyvm %s --nic1 nat" % self._image
self._lmachine.send_command(cmd)
job = self._lmachine.last_exitcode()
if job != 0:
self._logger.error("Failed to network interface to NAT. Maybe the machine is already running?")
def port_forwarding(self):
self._logger.info( "Setting port fowarding up.")
cmd = "VBoxManage modifyvm %s --natpf1 \"ssh,tcp,,3022,,22\"" % self._image
out = self._lmachine.send_command(cmd)
job = self._lmachine.last_exitcode()
if job != 0:
self._logger.error("Failed to setup port forwarding: \n\n%s\n" % out )
def _start_ssh_if_none(self):
if self._sshconnection is None:
self.start_virtual_machine()
if self._sshconnection is None:
raise BaseException("No SSH connection available.")
def delete(self, name):
# VBoxManage unregistervm <uuid>|<name> [--delete]
pass
def has_vm(self, name):
self._logger.info( "Checking whether '%s' exists." % name)
cmd = "VBoxManage list vms"
vms = [x.rsplit(" ",1)[0][1:-1] for x in self._lmachine.send_command(cmd).split("\n")]
return name in vms
def _get_variable(self, name):
if name in self._locals:
return self._locals[name]
if name in self._globals:
return self._globals[name]
raise BaseException("Variable '%s' not set." % name)
def _set_variable(self, name, val):
if name in self._locals:
self._locals[name] = val
return
if name in self._globals:
self._globals[name] = val
return
def clone(self, new_name):
self._image = self._get_variable("image")
if self.has_vm(new_name):
self._set_variable("image", new_name)
self._image = new_name
self._logger.info( "Using existing clone.")
return
self._logger.info( "Cloning VM: %s -> %s." %(self._image, new_name))
cmd = "VBoxManage clonevm %s --mode machine --name \"%s\" --register" % (self._image, new_name)
out = self._lmachine.send_command(cmd)
job = self._lmachine.last_exitcode()
if job != 0:
# print self._lmachine.buffer
self._logger.critical("Failed to clone VM:\n\n%s\n" %out )
sys.exit(-1)
self._set_variable("image", new_name)
self._image = new_name
return True
def __call__(self):
for n, l in self._lines:
if l.startswith("#:"):
exec compile("\n"*(n-1) + l[2:].strip(),self._filename,'single') in self._locals, self._globals
else:
self._start_ssh_if_none()
if not self._run_next :
self._logger.info("Skipping $ %s (line %d)" % (l,n))
self._reset()
continue
for environ in self._exports:
if len(environ) > 1:
self._logger.info("Setting environment.")
for name, val in environ.items():
out = self._sshconnection.send_command("export %s=\"%s\""%(name, val))
if self._expect_token==self._bash_token:
self._logger.info("$ "+l+" (via send_command)")
out = self._sshconnection.send_command(l)
code = self._sshconnection.last_exitcode()
if code !=0 and self._nofail:
self._logger.critical("Non-zero exitcode not allowed. Type # allow_fail() before command to allow failure. Output reads:\n\n%s\n"%out.strip())
## TODO: enable break
if code!=0:
self._logger.info("Returned: "+str(code))
else:
self._logger.info("$ "+l+" (via write, expects: '%s')"%self._expect_token)
self._sshconnection.write(l+"\n")
self._sshconnection.consume_output(consume_until="\n")
cont = True
while cont:
if not self._sshconnection.isalive():
cont = False
break
try:
exp = self._sshconnection.expect(re.compile(self._expect_token))
if exp.endswith(self._bash_token):
cont = False
break
self._logger.info("Program asks:\n\n%s\n" % exp.split("\n")[-1])
except CommunicationTimeout:
self._logger.error("Communication timeout recieved.")
cont = False
break
self._logger.info("Looking for answer.")
cont = False
for tok,ans,pat in self._answers:
self._logger.debug("Testing: '%s'" % tok)
if pat.search(exp):
cont = True
self._logger.info("Answer: '%s'" % ans)
self._sshconnection.write(ans+"\n")
self._sshconnection.consume_output(consume_until="\n")
break
if not cont:
self._logger.critical("No answer found.")
self._reset()
x._sshconnection.flush_pipe()
self._logger.info("SSH session: \n\n%s\n" % self._sshconnection.buffer.replace(self._bash_token,"$ "))
x._sftpconnection.flush_pipe()
self._logger.info("SFTP session: \n\n%s\n" % self._sftpconnection.buffer)
x._lmachine.flush_pipe()
self._logger.info("Local bash session: \n\n%s\n" % self._lmachine.buffer.replace(self._bash_token,"$ "))
x = TestModule("ubuntu32")
try:
x()
except:
if not x._sshconnection is None:
x._sshconnection.flush_pipe()
print x._sshconnection.buffer
raise
#print
#print
| 2.15625 | 2 |
Leetcode/0075. Sort Colors/0075.py | Next-Gen-UI/Code-Dynamics | 0 | 12764428 | <gh_stars>0
class Solution:
def sortColors(self, nums: List[int]) -> None:
zero = -1
one = -1
two = -1
for num in nums:
if num == 0:
two += 1
one += 1
zero += 1
nums[two] = 2
nums[one] = 1
nums[zero] = 0
elif num == 1:
two += 1
one += 1
nums[two] = 2
nums[one] = 1
else:
two += 1
nums[two] = 2
| 3.3125 | 3 |
bcferries/crossing.py | duncmacdonald/bcferries | 11 | 12764429 | <filename>bcferries/crossing.py
from abstract import BCFerriesAbstractObject
from capacity import BCFerriesCapacity
import dateutil.parser
class BCFerriesCrossing(BCFerriesAbstractObject):
def __init__(self, name, row, api):
super(BCFerriesCrossing, self).__init__(self)
self.route_name = name
self._api = api
time, percent_full = row.find_by_tag('td')
self.time = dateutil.parser.parse(time.text)
self.name = "{} at {}".format(name, time.text)
self.capacity = BCFerriesCapacity(percent_full.find_one('a'))
self._register_properties(['route_name', 'time', 'capacity'])
| 2.640625 | 3 |
esd_process/ncei_backend.py | noaa-ocs-hydrography/esd_process | 0 | 12764430 | import os
import sqlite3
import logging
from esd_process import scrape_variables
class BaseBackend:
"""
Base class for backends, must be inherited to use
"""
def __init__(self):
self.output_folder = None
# these attributes are populated during scrape and saved to the backend (database)
self.downloaded_success_count = 0
self.downloaded_error_count = 0
self.ignored_count = 0
self.ship_name = ''
self.survey_name = ''
self.survey_url = ''
self.raw_data_path = ''
self.processed_data_path = ''
self.grid_path = ''
self._backend_logger = logging.getLogger(scrape_variables.logger_name + '_backend')
self._backend_logger.setLevel(scrape_variables.logger_level)
def _configure_backend(self):
raise NotImplementedError('_configure_backend must be implemented for this backend to operate')
def _create_backend(self):
raise NotImplementedError('_create_backend must be implemented for this backend to operate')
def _add_survey(self):
raise NotImplementedError('_add_survey must be implemented for this backend to operate')
def _check_for_survey(self, shipname: str, surveyname: str):
raise NotImplementedError('_check_for_survey must be implemented for this backend to operate')
def _check_for_grid(self, shipname: str, surveyname: str):
raise NotImplementedError('_check_for_grid must be implemented for this backend to operate')
def _remove_survey(self, shipname: str, surveyname: str):
raise NotImplementedError('_remove_survey must be implemented for this backend to operate')
def _close_backend(self):
raise NotImplementedError('_close_backend must be implemented for this backend to operate')
class SqlBackend(BaseBackend):
"""
python sqlite3 backend, will store metdata about surveys in the 'surveys' table in the self.database_file sqlite3 file.
"""
def __init__(self):
super().__init__()
self.database_file = None
self._cur = None
self._conn = None
def _configure_backend(self):
"""
Creates the database_file if it does not exist. Will also run _create_backend to generate a blank table
if that table does not exist.
"""
self.database_file = os.path.join(self.output_folder, 'survey_database.sqlite3')
needs_create = False
if not os.path.exists(self.database_file):
needs_create = True
self._conn = sqlite3.connect(self.database_file)
self._cur = self._conn.cursor()
if needs_create:
self._create_backend()
def _create_backend(self):
"""
Generate a new sqlite3 database for the project
"""
self._backend_logger.log(logging.INFO, f'Generating new table "surveys" for scrape data...')
# create the single table that we need to store survey metadata
self._cur.execute('''CREATE TABLE surveys
(ship_name text, survey text, downloaded_success int, downloaded_error int,
ignored int, raw_data_path text, processed_data_path text, grid_path text)''')
self._conn.commit()
def _add_survey(self):
"""
Add a new entry for this survey to the database
"""
if self.ship_name and self.survey_name:
if not self._check_for_survey(self.ship_name, self.survey_name):
self._backend_logger.log(logging.INFO, f'Adding new data for {self.ship_name}/{self.survey_name} to sqlite database')
self._cur.execute(f'INSERT INTO surveys VALUES ("{self.ship_name.lower()}","{self.survey_name.lower()}",'
f'{self.downloaded_success_count},{self.downloaded_error_count},{self.ignored_count},'
f'"{self.raw_data_path}","{self.processed_data_path}","{self.grid_path}")')
self._conn.commit()
# reset data to defaults to get ready for next survey
self.ship_name = ''
self.survey_name = ''
self.downloaded_success_count = 0
self.downloaded_error_count = 0
self.ignored_count = 0
self.raw_data_path = ''
self.processed_data_path = ''
self.grid_path = ''
def _check_for_survey(self, shipname: str, surveyname: str):
"""
Check to see if this survey exists in the database
"""
data = self._cur.execute(f'SELECT * FROM surveys WHERE ship_name="{shipname.lower()}" and survey="{surveyname.lower()}"')
if len(data.fetchall()) > 0:
return True
else:
return False
def _check_for_grid(self, shipname: str, surveyname: str):
"""
Check to see if this survey has a grid path in the database (lets you know if you have successfully created a
grid with this survey)
"""
data = self._cur.execute(f'SELECT * FROM surveys WHERE ship_name="{shipname.lower()}" and survey="{surveyname.lower()}" and grid_path != ""')
if len(data.fetchall()) > 0:
return True
else:
return False
def _remove_survey(self, shipname: str, surveyname: str):
"""
Remove the entry for this survey from the database
"""
self._cur.execute(f'DELETE FROM surveys WHERE shipname="{shipname.lower()}" and survey="{surveyname.lower()}"')
self._conn.commit()
def _close_backend(self):
"""
Close the database connection
"""
self._conn.close()
| 2.578125 | 3 |
two_sigma_problems/problem_7.py | loftwah/Daily-Coding-Problem | 129 | 12764431 | <reponame>loftwah/Daily-Coding-Problem
"""This problem was asked by Two Sigma.
You’re tracking stock price at a given instance of time.
Implement an API with the following functions: add(), update(), remove(),
which adds/updates/removes a datapoint for the stock price you are tracking.
The data is given as (timestamp, price), where timestamp is specified in unix
epoch time.
Also, provide max(), min(), and average() functions that give the max/min/average
of all values seen thus far.
""" | 2.890625 | 3 |
generators/argument_unpacking.py | aTechGuide/python | 0 | 12764432 | <filename>generators/argument_unpacking.py
"""
dictionary unpacking => It unpacks a dictionary as named arguments to a function.
"""
class User:
def __init__(self, username, password):
self.username = username
self.password = password
@classmethod
def from_dict(cls, data):
return cls(data['username'], data['password'])
def __repr__(self):
return f"<User {self.username} with password {self.password}>"
users = [
{'username': 'kamran', 'password': '<PASSWORD>'},
{'username': 'ali', 'password': '<PASSWORD>'}
]
user_objects = [ User.from_dict(u) for u in users ]
## With Dictionary Unpacking
class User2:
def __init__(self, username, password):
self.username = username
self.password = password
# @classmethod
# def from_dict(cls, data):
# return cls(data['username'], data['password'])
def __repr__(self):
return f"<User2 {self.username} with password {self.password}>"
users = [
{'username': 'kamran', 'password': '<PASSWORD>'},
{'username': 'ali', 'password': '<PASSWORD>'}
]
## It unpacks a dictionary as named arguments to a function. In this case it's username and password.
# So username is data['username']
# Basically it is equivalent to "user_objects_with_unpacking = [ User2(username=u['username'], password=u['password']) for u in users ] "
# It's important because dictionary may not be in order. And remember named arguments can be jumbled up and that's fine.
user_objects_with_unpacking = [ User2(**u) for u in users ]
print(user_objects_with_unpacking)
# if data is in form of tuple
users_tuple = [
('kamran', '123'),
('ali', '123')
]
user_objects_from_tuples = [User2(*u) for u in users_tuple] | 4.4375 | 4 |
ipware/__init__.py | wking/django-ipware | 0 | 12764433 | <reponame>wking/django-ipware<filename>ipware/__init__.py
from .ip2 import get_client_ip
default_app_config = 'ipware.apps.IPwareConfig'
__author__ = '<NAME> @ Neekware Inc. [@vneekman]'
__description__ = "A Django application to retrieve user's IP address"
__version__ = '2.0.1'
| 1.789063 | 2 |
Utils/RandamData.py | yasserfaraazkhan/Selenium-Python-Pytest | 0 | 12764434 | import random
class Utils():
@classmethod
def _get_random_alphanumeric_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK123456') for i in range(5))
@classmethod
def _get_random_numeric_string(cls):
return ''.join(random.choice('1234567890') for i in range(10))
@classmethod
def _get_random_five_number_string(cls):
return ''.join(random.choice('123456789') for i in range(5))
@classmethod
def _get_random_alphabetic_string(cls):
return ''.join(random.choice('ABCDSFGEHIJK') for i in range(5))
| 3.375 | 3 |
lambda_deployment/wikia/wikia.py | Pizzaface/SpongeBot | 0 | 12764435 | from __future__ import unicode_literals
import requests
import time
import mimetypes
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from .exceptions import (
PageError, DisambiguationError, RedirectError, HTTPTimeoutError,
WikiaException, ODD_ERROR_MESSAGE)
from .util import cache, stdout_encode, debug
# Generate all extensions from the OS
mimetypes.init()
API_URL = 'http://{lang}{sub_wikia}.wikia.com/api/v1/{action}'
# URL used when browsing the wikia proper
STANDARD_URL = 'http://{lang}{sub_wikia}.wikia.com/wiki/{page}'
LANG = ""
RATE_LIMIT = False
RATE_LIMIT_MIN_WAIT = None
RATE_LIMIT_LAST_CALL = None
USER_AGENT = 'wikia (https://github.com/Timidger/Wikia/)'
def set_lang(language):
'''
Sets the global language variable, which is sent in the params
'''
global LANG
LANG = language.lower() + '.' if language else ''
for cached_func in (search, summary):
cached_func.clear_cache()
def set_user_agent(user_agent_string):
'''
Set the User-Agent string to be used for all requests.
Arguments:
* user_agent_string - (string) a string specifying the User-Agent header
'''
global USER_AGENT
USER_AGENT = user_agent_string
def set_rate_limiting(rate_limit, min_wait=timedelta(milliseconds=50)):
'''
Enable or disable rate limiting on requests to the wikia servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikia, the number of requests you and other `wikia` users
are making, and other factors), Wikia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* rate_limit - (Boolean) whether to enable rate limiting or not
Keyword arguments:
* min_wait - if rate limiting is enabled, `min_wait` is a timedelta describing the minimum time to wait before requests.
Defaults to timedelta(milliseconds=50)
'''
global RATE_LIMIT
global RATE_LIMIT_MIN_WAIT
global RATE_LIMIT_LAST_CALL
RATE_LIMIT = rate_limit
if not rate_limit:
RATE_LIMIT_MIN_WAIT = None
else:
RATE_LIMIT_MIN_WAIT = min_wait
RATE_LIMIT_LAST_CALL = None
@cache
def search(sub_wikia, query, results=10):
'''
Do a Wikia search for `query`.
Keyword arguments:
* sub_wikia - the sub wikia to search in (i.e: "runescape", "elderscrolls")
* results - the maxmimum number of results returned
'''
global LANG
search_params = {
'action': 'Search/List?/',
'sub_wikia': sub_wikia,
'lang': LANG,
'limit': results,
'query': query
}
raw_results = _wiki_request(search_params)
try:
search_results = (d['title'] for d in raw_results['items'])
except KeyError as e:
raise WikiaError("Could not locate page \"{}\" in subwikia \"{}\"".format(query,
sub_wikia))
return list(search_results)
def random(pages=1):
'''
Get a list of random Wikia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, U
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikia.org/w/api.php?action=query&list=random&rnlimit=5000&format=
query_params = {
'lang': LANG
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(sub_wikia, title, chars=500, redirect=True):
'''
Plain text summary of the page from the sub-wikia.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enab
Keyword arguments:
* chars - if set, return only the first `chars` characters (limit is 500)
* auto_suggest - let Wikia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
'''
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(sub_wikia, title, redirect=redirect)
title = page_info.title
pageid = page_info.pageid
query_params = {
'action': 'Articles/Details?/',
'sub_wikia': sub_wikia,
'titles': title,
'ids': pageid,
'abstract': chars,
'lang': LANG
}
request = _wiki_request(query_params)
summary = request['items'][str(pageid)]['abstract']
return summary
def page(sub_wikia, title=None, pageid=None, redirect=True, preload=False):
'''
Get a WikiaPage object for the page in the sub wikia with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
return WikiaPage(sub_wikia, title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikiaPage(sub_wikia, pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
class WikiaPage(object):
'''
Contains data from a Wikia page.
Uses property methods to filter data from the raw HTML.
'''
def __init__(self, sub_wikia, title=None, pageid=None, redirect=True, preload=False, original_title=''):
if title is not None:
self.title = title
self.original_title = original_title or title
elif pageid is not None:
self.pageid = pageid
else:
raise ValueError("Either a title or a pageid must be specified")
self.sub_wikia = sub_wikia
try:
self.__load(redirect=redirect, preload=preload)
except AttributeError as e:
raise WikiaError("Could not locate page \"{}\" in subwikia \"{}\"".format(title or pageid,
sub_wikia))
if preload:
for prop in ('content', 'summary', 'images', 'references', 'links', 'sections'):
getattr(self, prop)
def __repr__(self):
return stdout_encode(u'<WikiaPage \'{}\'>'.format(self.title))
def __eq__(self, other):
try:
return (
self.pageid == other.pageid
and self.title == other.title
and self.url == other.url
)
except:
return False
def __load(self, redirect=True, preload=False):
'''
Load basic information from Wikia.
Confirm that page exists and is not a disambiguation/redirect.
Does not need to be called manually, should be called automatically during __init__.
'''
query_params = {
'action': 'Articles/Details?/',
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
if not getattr(self, 'pageid', None):
query_params['titles'] = self.title
else:
query_params['ids'] = self.pageid
try:
request = _wiki_request(query_params)
query = list(request['items'].values())[0]
except IndexError:
raise WikiaError("Could not find page \"{}\""
"of the sub-wikia {}".format(self.title or self.pageid,
self.sub_wikia))
self.pageid = query['id']
self.title = query['title']
lang = query_params['lang']
self.url = STANDARD_URL.format(lang=lang, sub_wikia=self.sub_wikia,
page=self.title)
def __continued_query(self, query_params):
'''
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
'''
query_params.update(self.__title_query_param)
last_continue = {}
prop = query_params.get('prop', None)
while True:
params = query_params.copy()
params.update(last_continue)
request = _wiki_request(params)
if 'query' not in request:
break
pages = request['query']['pages']
if 'generator' in query_params:
for datum in pages.values(): # in python 3.3+: "yield from pages.values()"
yield datum
else:
for datum in pages[self.pageid][prop]:
yield datum
if 'continue' not in request:
break
last_continue = request['continue']
@property
def __title_query_param(self):
if getattr(self, 'title', None) is not None:
return {'titles': self.title}
else:
return {'pageids': self.pageid}
def html(self):
'''
Get full page HTML.
.. warning:: This can get pretty slow on long pages.
'''
if not getattr(self, '_html', False):
request = requests.get(self.url)
self._html = request.text
return self._html
@property
def content(self):
'''
Plain text content of each section of the page, excluding images, tables,
and other data.
'''
if not getattr(self, '_content', False):
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
self._content = "\n".join(segment['text'] for section in request['sections']
for segment in section['content']
if segment['type'] == "paragraph")
return self._content
@property
def revision_id(self):
'''
Revision ID of the page.
The revision ID is a number that uniquely identifies the current
version of the page. It can be used to create the permalink or for
other direct API calls. See `Help:Page history
<http://en.wikia.org/wiki/Wikia:Revision>`_ for more
information.
'''
if not getattr(self, '_revid', False):
query_params = {
'action': "Articles/Details?/",
'ids': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
self._revision_id = request['items'][str(self.pageid)]['revision']['id']
return self._revision_id
@property
def summary(self):
'''
Plain text summary of the page.
'''
if not getattr(self, '_summary', False):
self._summary = summary(self.sub_wikia, self.title)
return self._summary
@property
def images(self):
'''
List of URLs of images on the page.
'''
if not getattr(self, '_images', False):
# Get the first round of images
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': str(self.pageid),
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
images = [section['images'][0]['src'] for section in request["sections"]
if section['images']]
# Get the second round of images
# This time, have to use a different API call
query_params['action'] = "Articles/Details?/"
query_params['titles'] = self.title # This stops redirects
request = _wiki_request(query_params)
image_thumbnail = request["items"][str(self.pageid)]["thumbnail"]
# Only if there are more pictures to grab
if image_thumbnail:
images.append(image_thumbnail)
# A little URL manipulation is required to get the full sized version
for index, image in enumerate(images):
# Remove the /revision/ fluff after the image url
image = image.partition("/revision/")[0]
image_type = mimetypes.guess_type(image)[0]
image_type = "." + image_type.split("/")[-1]
# JPEG has a special case, where sometimes it is written as "jpg"
if image_type == ".jpeg" and image_type not in image:
image_type = ".jpg"
# Remove the filler around the image url that reduces the size
image = "".join(image.partition(image_type)[:2])
images[index] = image.replace("/thumb/", "/")
self._images = images
return self._images
@property
def related_pages(self):
'''
Lists up to 10 of the wikia URLs of pages related to this page.
'''
if not getattr(self, "_related_pages", False):
query_params = {
'action': "RelatedPages/List?/",
'ids': self.pageid,
'limit': 10,
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
self._related_pages = [request['basepath'] + url['url']
for url in request['items'][str(self.pageid)]]
return self._related_pages
@property
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'Articles/AsSimpleJson?/',
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG,
}
request = _wiki_request(query_params)
self._sections = [section['title'] for section in request['sections']]
return self._sections
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
if section_title not in self.sections:
return None
query_params = {
'action': "Articles/AsSimpleJson?/",
'id': self.pageid,
'sub_wikia': self.sub_wikia,
'lang': LANG
}
request = _wiki_request(query_params)
section = "\n".join(segment['text'] for section in request['sections']
if section['title'] == section_title
for segment in section['content']
if segment['type'] == "paragraph")
return section
@cache
def languages():
'''
List all the currently supported language prefixes (usually ISO language code).
Can be inputted to `set_lang` to change the Wikia that `wikia` requests
results from.
Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes,
use `wikia.languages().keys()`.
'''
query_params = {
'action': "WAM/WAMLanguages?/",
'timestamp': time.time(), # Uses the UNIX timestamp to determine available LANGs
'sub_wikia': '',
'lang': LANG
}
request = _wiki_request(query_params)
return response['languages']
def _wiki_request(params):
'''
Make a request to the Wikia API using the given search parameters.
Returns a parsed dict of the JSON response.
'''
global RATE_LIMIT_LAST_CALL
global USER_AGENT
api_url = API_URL.format(**params)
params['format'] = 'json'
headers = {
'User-Agent': USER_AGENT
}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \
RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now():
# it hasn't been long enough since the last API call
# so wait until we're in the clear to make the request
wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(api_url, params=params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
# If getting the json representation did not work, our data is mangled
try:
r = r.json()
except ValueError:
raise WikiaError("Your request to the url \"{url}\" with the paramaters"
"\"{params}\" returned data in a format other than JSON."
"Please check your input data.".format(url=api_url,
params=params))
# If we got a json response, then we know the format of the input was correct
if "exception" in r:
details, message, error_code= r['exception'].values()
if error_code == 408:
raise HTTPTimeoutError(query)
raise WikiaError("{}. {} ({})".format(message, details, error_code))
return r
class WikiaError(Exception):
pass
| 2.640625 | 3 |
tests/context.py | itsdaveba/rubik-solver | 0 | 12764436 | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import rubik_solver
from rubik_solver.defs import available_moves | 1.492188 | 1 |
gateway/builders/verify_3d_enrollment_builder.py | TransactPRO/gw3-python-client | 1 | 12764437 | # The MIT License
#
# Copyright (c) 2017 Transact Pro.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from gateway.data_sets.request_parameters import (
RequestParameters,
RequestParametersTypes
)
from gateway.utils.data_structures import DataStructuresUtils
class Verify3dEnrollmentBuilder(object):
"""
Payment data - information about credit card
"""
def __init__(self, __client_transaction_data_set, __client_mandatory_fields):
self.__input_data_structure = {}
self.__data_structure_util = DataStructuresUtils
self.__data_sets = RequestParameters
self.__data_types = RequestParametersTypes
self.__input_data_set = __client_transaction_data_set
self.__input_mandatory_fields = __client_mandatory_fields
def add_pan_number(self, pan_number=None):
"""
Add credit card number
Args:
pan_number (str): Credit card number
"""
self.__input_mandatory_fields[self.__data_sets.PAYMENT_METHOD_DATA_PAN] = self.__data_types.PAYMENT_METHOD_DATA_PAN
self.__input_data_structure.update({self.__data_sets.PAYMENT_METHOD_DATA_PAN: pan_number})
self.__input_data_set.update(self.__input_data_structure)
def add_terminal_mid(self, terminal_id=None):
"""
Add terminal MID when selecting terminal manually
Args:
terminal_id (str): Terminal MID when selecting terminal manually
"""
self.__input_mandatory_fields[self.__data_sets.COMMAND_DATA_TERMINAL_MID] = self.__data_types.COMMAND_DATA_TERMINAL_MID
self.__input_data_structure.update({self.__data_sets.COMMAND_DATA_TERMINAL_MID: terminal_id})
self.__input_data_set.update(self.__input_data_structure)
def add_currency(self, iso_4217_ccy=None):
"""
Add payment currency, ISO-4217 format
Args:
iso_4217_ccy (str): Currency, ISO-4217 format
"""
self.__input_mandatory_fields[self.__data_sets.MONEY_DATA_CURRENCY] = self.__data_types.MONEY_DATA_CURRENCY
self.__input_data_structure.update({self.__data_sets.MONEY_DATA_CURRENCY: iso_4217_ccy})
self.__input_data_set.update(self.__input_data_structure)
| 1.820313 | 2 |
python/fibonacci-recursion.py | kirigaine/Notes-and-Programs | 0 | 12764438 | """
*********************************************************
* *
* Project Name: Recursive Fibonacci Sequence *
* Author: github.com/kirigaine *
* Description: A simple program to put in how many *
* numbers of the Fibonacci Sequence you would like. *
* Requirements: Python Standard Library (re) *
* *
*********************************************************
"""
import re
def main():
while True:
# Prompt user, exit program if "-1"
x = userPrompt()
if x == -1:
break
print("------------------------------------------------------------")
print(str(fibonacci(x)))
print("------------------------------------------------------------\n")
def userPrompt():
# Prompt user for nth number to print up to
temp_nth = ""
regex_passed = None
# Accept only integers and negative one
while not regex_passed:
temp_nth = input("How many digits of the Fibonacci Sequence would you like (-1 to quit): ")
regex_passed = re.search("^(([0-9]*)|(-1))$", temp_nth)
if not regex_passed:
print("Invalid integer. Please try again entering a positive integer (or -1 to quit).\n")
return int(temp_nth)
def fibonacci(x):
# fibonacci(x) == fibonacci(x-1) + fibonacci(x-2)
# ...
# fibonacci(4) = fibonacci(3) + fibonacci(2)
# fibonacci(3) = fibonacci(2) + fibonacci(1)
# fibonacci(2) = 1
# fibonacci(1) = 0
if x is not None:
if x==0:
# If you request none, you get none!
return None
elif x==1:
return 0
elif x==2:
return 1
else:
# print(f"{x-1} {x-2}")
return(fibonacci(x-1) + fibonacci(x-2))
main() | 4.1875 | 4 |
temboo/core/Library/Twilio/OutgoingCallerIDs/ListCallerIDs.py | jordanemedlock/psychtruths | 7 | 12764439 | <reponame>jordanemedlock/psychtruths
# -*- coding: utf-8 -*-
###############################################################################
#
# ListCallerIDs
# Returns a list of Outgoing Caller IDs for a Twilio account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListCallerIDs(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListCallerIDs Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListCallerIDs, self).__init__(temboo_session, '/Library/Twilio/OutgoingCallerIDs/ListCallerIDs')
def new_input_set(self):
return ListCallerIDsInputSet()
def _make_result_set(self, result, path):
return ListCallerIDsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListCallerIDsChoreographyExecution(session, exec_id, path)
class ListCallerIDsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListCallerIDs
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(ListCallerIDsInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(ListCallerIDsInputSet, self)._set_input('AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) Only show the caller id resource that exactly matches this name.)
"""
super(ListCallerIDsInputSet, self)._set_input('FriendlyName', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page.)
"""
super(ListCallerIDsInputSet, self)._set_input('PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
super(ListCallerIDsInputSet, self)._set_input('Page', value)
def set_PhoneNumber(self, value):
"""
Set the value of the PhoneNumber input for this Choreo. ((optional, string) Only return the caller id resource that exactly matches this phone number.)
"""
super(ListCallerIDsInputSet, self)._set_input('PhoneNumber', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ListCallerIDsInputSet, self)._set_input('ResponseFormat', value)
def set_SubAccountSID(self, value):
"""
Set the value of the SubAccountSID input for this Choreo. ((optional, string) The SID of the subaccount associated with the outgoing caller id. If not specified, the main AccountSID used to authenticate is used in the request.)
"""
super(ListCallerIDsInputSet, self)._set_input('SubAccountSID', value)
class ListCallerIDsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListCallerIDs Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class ListCallerIDsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListCallerIDsResultSet(response, path)
| 1.96875 | 2 |
bejmy/transactions/migrations/0005_auto_20170606_0722.py | bejmy/backend | 0 | 12764440 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-06 07:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0004_auto_20170606_0638'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='balanced',
field=models.BooleanField(default=False, verbose_name='balanced'),
),
migrations.AddField(
model_name='transaction',
name='balanced_at',
field=models.DateTimeField(blank=True, null=True, verbose_name='balanced at'),
),
migrations.AddField(
model_name='transaction',
name='datetime',
field=models.DateTimeField(blank=True, null=True, verbose_name='datetime'),
),
]
| 1.570313 | 2 |
src/basic/web/flask/02using_templates/main.py | hbulpf/pydemo | 6 | 12764441 | <reponame>hbulpf/pydemo
import os
from flask import Flask, request, render_template, Markup
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def demo():
if request.method == 'GET':
return render_template('index.html', input_text = '', res_text = '')
else:
inputText = request.form.get("input_text")
resText = Markup(formatRes(reverseText(inputText)))
return render_template('index.html', input_text = inputText, res_text = resText)
def formatRes(textList):
return '<p>' + '</p><p>'.join(textList) + '</p>'
# A sample
def reverseText(text):
res = []
res.append('Original text: %s' %(text))
res.append('Converted text: %s' %(''.join(reversed(list(text)))))
return res
if __name__ == '__main__':
app.run(host='0.0.0.0') | 2.859375 | 3 |
apps/dot_ext/apps.py | dtisza1/bluebutton-web-server | 25 | 12764442 | <gh_stars>10-100
from django.apps import AppConfig
class dot_extConfig(AppConfig):
name = 'apps.dot_ext'
label = 'dot_ext'
verbose_name = 'Django OAuth Toolkit Extension'
def ready(self):
from . import signals # noqa
| 1.429688 | 1 |
eastern/yaml_formatter/formatter.py | tuterdust/eastern | 0 | 12764443 | <reponame>tuterdust/eastern
import logging
import os
import re
from stevedore.driver import DriverManager
from stevedore.exception import NoMatches
from . import utils
from ..plugin import get_plugin_manager
from ..formatter import BaseFormatter
DRIVER_NS = 'eastern.command'
class Formatter(BaseFormatter):
"""
:param str raw: Template string
:param str path: Path to template
:param dict[str,str] env: List of variables
"""
logger = logging.getLogger(__name__)
def __init__(self, raw, path='', env=None):
super().__init__(raw, path, env)
self.plugin = get_plugin_manager()
envs = self.plugin.map_method('env_hook', formatter=self)
for item in envs:
self.env.update(**item)
def format(self):
"""
:return: Formatted template
"""
self.body = self.raw
self.body = self.plugin.chain(
'format_pre_hook', self.body, formatter=self)
self.body = self.interpolate_env(self.body)
self.body = self.parse_lines(self.body)
self.body = self.plugin.chain(
'format_post_hook', self.body, formatter=self)
return self.body
def interpolate_env(self, text):
return re.sub(r'\${([^}]+)}', self.replace_env, text)
def replace_env(self, match):
key = match.group(1)
if key in self.env:
return self.env[key]
else:
self.logger.warning('Interpolated variable not found: %s', key)
return match.group()
def parse_lines(self, body):
body_lines = body.split(os.linesep)
return os.linesep.join(
utils.flatten([self.parse_line(line) for line in body_lines]))
def parse_line(self, line):
if '#' not in line:
return line
line = self.plugin.chain('line_pre_hook', line, formatter=self)
before, after = line.split('#', 1)
# line must only have precending spaces
if not re.match(r'^\s*$', before):
return line
splitted = after.strip().split(' ', 1)
command = splitted[0]
args = []
if len(splitted) > 1:
args = splitted[1]
try:
func = DriverManager(DRIVER_NS, command)
func.propagate_map_exceptions = True
except NoMatches:
self.logger.debug('Command not found %s', command, exc_info=True)
return line
output = func(lambda ext: ext.plugin(args, line=line, formatter=self))
if output is None:
output = []
elif isinstance(output, str):
output = output.split(os.linesep)
output = os.linesep.join([before + item for item in output])
output = self.plugin.chain('line_post_hook', output, formatter=self)
return output
| 2.203125 | 2 |
cla.py | ksIsCute/Rocket | 0 | 12764444 | <gh_stars>0
cla = "TOKEN HERE"
| 0.933594 | 1 |
ion_functions/qc/perf/test_qc_performance.py | steinermg/ion-functions | 10 | 12764445 | <gh_stars>1-10
from ion_functions.data.perf.test_performance import PerformanceTestCase, a_year, a_day
from ion_functions.qc.qc_functions import dataqc_globalrangetest_minmax as grt
from ion_functions.qc.qc_functions import dataqc_spiketest as spiketest
from ion_functions.qc.qc_functions import dataqc_stuckvaluetest as stuckvalue
from ion_functions.qc.qc_functions import dataqc_polytrendtest as trend
from ion_functions.qc.qc_functions import dataqc_gradienttest as grad
from ion_functions.qc.qc_functions import dataqc_localrangetest as local
from ion_functions.qc.qc_functions import ntp_to_month
import numpy as np
import unittest
class TestQCPerformance(PerformanceTestCase):
def test_globalrangetest(self):
stats = []
sample_set = np.empty(a_year, dtype=np.float)
sample_set.fill(17.)
indexes = [i for i in xrange(a_year) if not i%20]
sample_set[indexes] = 40
mins = np.empty(a_year, dtype=np.float)
maxs = np.empty(a_year, dtype=np.float)
mins.fill(10)
maxs.fill(10)
self.profile(stats, grt, sample_set, mins, maxs)
def test_spiketest(self):
stats = []
sample_set = np.empty(a_year, dtype=np.float)
sample_set.fill(3)
indexes = [i for i in xrange(a_day * 2) if not i%20]
sample_set[indexes] = 40
self.profile(stats, spiketest, sample_set, 0.1)
def test_stuckvalue(self):
stats = []
sample_set = np.arange(a_year, dtype=np.float)
v = [4.83, 1.40, 3.33, 3.33, 3.33, 3.33, 4.09, 2.97, 2.85, 3.67]
sample_set[0:len(v)] = v
self.profile(stats, stuckvalue, sample_set, 0.001, 4)
def test_trend(self):
stats = []
x = np.arange(a_year, dtype=np.float)
sample_set = np.sin(np.pi * 2 * x/60.) * 6 + 3.
self.profile(stats, trend, sample_set, np.arange(a_year, dtype=np.float))
def test_gradient(self):
stats = []
sample_set = np.arange(a_year, dtype=np.float)
self.profile(stats, grad, sample_set, sample_set, [-50,50], .1, [], 5)
def test_local_range(self):
stats = []
dat = np.sin(np.arange(a_year) / 60.) * 4 + 2
z = np.arange(a_year)
datlim = np.array([0,5] * a_year).reshape((a_year,2))
datlimz = np.arange(a_year)
self.profile(stats, local, dat, z, datlim, datlimz)
def test_ntp_to_month(self):
stats = []
t0 = 1356998400 + 2208988800 # 2013-01-01 + NTP Offset
dat = np.arange(a_year) + t0
dat = np.asanyarray(dat, dtype=np.float)
self.profile(stats, ntp_to_month, dat)
| 1.78125 | 2 |
simulation/task/visualization/AverageStress.py | SergioML9/tfm | 0 | 12764446 | from mesa.visualization.ModularVisualization import VisualizationElement
import numpy as np
class AverageStress(VisualizationElement):
package_includes = ["Chart.min.js"]
local_includes = ["visualization/AverageStress.js"]
def __init__(self, canvas_height, canvas_width):
self.canvas_height = canvas_height
self.canvas_width = canvas_width
new_element = "new AverageStress({}, {})"
new_element = new_element.format(canvas_width,
canvas_height)
self.js_code = "elements.push(" + new_element + ");"
def render(self, model):
#wealth_vals = [agent.stress for agent in model.users]
#hist = np.histogram(wealth_vals, bins=self.bins)[0]
if model.time.new_day:
total_stress = sum(user.stress for user in model.users)
#model.time.new_day = False
return [total_stress/len(model.users), "Day " + str(model.time.days)]
else:
return -1
| 2.53125 | 3 |
backend/instruments/apps.py | codepanda64/logs-and-metas-for-stations | 0 | 12764447 | from django.apps import AppConfig
class InstrumentsConfig(AppConfig):
name = 'instruments'
| 1.101563 | 1 |
evaluation/models.py | JorgeScp/performance | 1 | 12764448 | <filename>evaluation/models.py
from django.db import models
from django.utils import timezone
from django.conf import settings
class Test_Assign(models.Model):
evaluated = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="employee_evaluated"
)
evaluator = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="employee_evaluator"
)
relation = models.CharField(max_length=100,blank=True, null=True)
done = models.CharField(max_length=100,blank=True, null=True,default="Asignado")
def __str__(self):
return self.evaluated.first_name + ' ' + self.evaluated.last_name
| 2.40625 | 2 |
main/re-sub-regex-substitution/re-sub-regex-substitution.py | EliahKagan/old-practice-snapshot | 0 | 12764449 | <reponame>EliahKagan/old-practice-snapshot
#!/usr/bin/env python3
import re
AND = re.compile(r'(?<= )&&(?= )')
OR = re.compile(r'(?<= )\|\|(?= )')
for _ in range(int(input())):
print(OR.sub('or', AND.sub('and', input())))
| 3.6875 | 4 |
lomapy/helpers/__init__.py | AlanTaranti/Lomapy | 2 | 12764450 | from .padronizar_resposta import padronizar_resposta_categoria
| 1.25 | 1 |