max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
assignment1/q2_neural.py | gyubokLee/CS224 | 125 | 12770851 | <reponame>gyubokLee/CS224
import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
out = np.dot(x.reshape(N, D), w.reshape(D, M)) + b.reshape(1, M)
return out, (x,w,b)
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
N = x.shape[0]
D = np.prod(x.shape[1:])
M = b.shape[1]
dx = np.dot(dout, w.reshape(D, M).T).reshape(x.shape)
dw = np.dot(x.reshape(N, D).T, dout).reshape(w.shape)
db = np.sum(dout, axis=0)
return dx, dw, db
def sigmoid_forward(x):
"""
Computes the forward pass for a sigmoid activation.
Inputs:
- x: Input data, numpy array of arbitary shape;
Returns a tuple (out, cache)
- out: output of the same shape as x
- cache: identical to out; required for backpropagation
"""
return sigmoid(x), sigmoid(x)
def sigmoid_backward(dout, cache):
"""
Computes the backward pass for an sigmoid layer.
Inputs:
- dout: Upstream derivative, same shape as the input
to the sigmoid layer (x)
- cache: sigmoid(x)
Returns a tuple of:
- dx: back propagated gradient with respect to x
"""
x = cache
return sigmoid_grad(x) * dout
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
N = data.shape[0]
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
hidden = np.dot(data,W1) + b1
layer1_a = sigmoid(hidden)
layer2 = np.dot(layer1_a, W2) + b2
# need to calculate the softmax loss
probs = softmax(layer2)
cost = - np.sum(np.log(probs[np.arange(N), np.argmax(labels, axis=1)]))
### END YOUR CODE
### YOUR CODE HERE: backward propagation
#There is no regularization :/
# dx -> sigmoid -> W2 * layer1_a + b -> sigmoid -> W1 * data + b1 -> ..
dx = probs.copy()
dx -= labels
dlayer2 = np.zeros_like(dx)
gradW2 = np.zeros_like(W2)
gradW1 = np.zeros_like(W1)
gradb2 = np.zeros_like(b2)
gradb1 = np.zeros_like(b1)
gradW2 = np.dot(layer1_a.T, dx)
gradb2 = np.sum(dx, axis=0)
dlayer2 = np.dot(dx, W2.T)
dlayer1 = sigmoid_grad(layer1_a) * dlayer2
gradW1 = np.dot(data.T, dlayer1)
gradb1 = np.sum(dlayer1, axis=0)
# Decided to implement affine (forward and backward function)
# sigmoid (forward and backward function)
# These should work properly;
# scores, cache_1 = affine_forward(data, W1, b1)
# scores, cache_s1 = sigmoid_forward(scores)
# scores, cache_2 = affine_forward(scores, W2, b2)
# # need to calculate the softmax loss
# probs = softmax(scores)
# cost = -np.sum(np.log(probs[np.arange(N), np.argmax(labels)] + 1e-12)) / N
# softmax_dx = probs.copy()
# softmax_dx[np.arange(N), np.argmax(labels,axis=1)] -= 1
# softmax_dx /= N
# grads = {}
# dlayer2, grads['W2'], grads['b2'] = affine_backward(softmax_dx, cache_2)
# dlayer1s = sigmoid_backward(dlayer2, cache_s1)
# dlayer1, grads['W1'], grads['b1'] = affine_backward(dlayer1s, cache_1)
#softmax_dx is the gradient of the loss w.r.t. y_{est}
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print("Running sanity check...")
N = 300
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in range(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
#cost, _ = forward_backward_prop(data, labels, params, dimensions)
# # expect to get 1 in 10 correct
#print(np.exp(-cost))
# #cost is roughly correct
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print("Running your sanity checks...")
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
| 3.40625 | 3 |
functions/loginAoxiang.py | Steve-Xyh/AutoAoxiang | 7 | 12770852 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import getpass
from bs4 import BeautifulSoup
from . import formatString
from .getInfo import remove_cache
def login(user='', passwd='', url_login='https://uis.nwpu.edu.cn/cas/login', keyword='Log In Successful'):
'''
使用POST方法登录
#### Parameters::
user - 用户名
passwd - 密码
url_login - 登录链接
keyword - 登录成功显示的标志关键字
#### Returns::
返回session, 登录状态status
status = 1: 登录成功
status = 0: 密码正确, 登录失败
status = -1: 密码错误
'''
try:
session = requests.Session()
session.get(url_login)
# 登录页请求头
header = {
'Origin': 'https://uis.nwpu.edu.cn',
'Referer': url_login,
'Content-Type': 'application/x-www-form-urlencoded',
}
# 登录信息
loginData = {
'username': user,
'password': <PASSWORD>,
'currentMenu': 1,
'execution': 'e1s1',
'_eventId': 'submit',
}
res = session.post(url=url_login, data=loginData, headers=header).text
if res.find(keyword) != -1:
print(f'用户:{user}'+formatString.setColor(string='登录成功√', color='greenFore'))
status = 1
else:
if res.find('Invalid credentials.') != -1:
print(f'用户:{user}'+formatString.setColor(string='密码错误, 请重试', color='redBack'))
status = -1
else:
print(
f'用户:{user}' +
formatString.setColor(
string='密码正确, 登录失败, 准备重新登录...', color='redBack'
))
status = 0
except Exception as exc:
print(exc)
return session, status
def login_check(user='', passwd='', url_login='https://uis.nwpu.edu.cn/cas/login'):
'''
检查登录状态, 若登录失败则反复尝试
'''
session, status = login(user=user, passwd=<PASSWORD>, url_login=url_login)
while True:
if status == 1:
return session
else:
if status == -1:
remove_cache()
exit(-1)
else:
print('正在重新登录...')
session, status = login(user=user, passwd=<PASSWORD>, url_login=url_login)
return session
# if __name__ == "__main__":
# username = str(input('学号:'))
# password = str(<PASSWORD>('密码:'))
# login(user = username, passwd = password)
| 2.75 | 3 |
settings.py | khaledboka/point_to_line | 0 | 12770853 | <reponame>khaledboka/point_to_line
# this file is excuted from cartoview.app_manager.settings using exec_file
import os
import point_to_line
app_folder = os.path.dirname(point_to_line.__file__)
| 1.445313 | 1 |
LeetCode/Problems/31_next_permutation.py | hooyao/LeetCode-Py3 | 0 | 12770854 | <filename>LeetCode/Problems/31_next_permutation.py
import sys
class Solution:
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
last_val = nums[-1]
for i in reversed(range(len(nums) - 1)):
if nums[i] < last_val:
val, relative_pos = self.bsearch(nums[i + 1:], nums[i])
tmp = nums[i]
nums[i] = nums[i + 1 + relative_pos]
nums[i + 1 + relative_pos] = tmp
nums[i + 1:] = reversed(nums[i + 1:])
return
last_val = nums[i]
nums.reverse()
def bsearch(self, arr, target):
if target < arr[-1]:
return arr[-1], len(arr) - 1
if len(arr) <= 2:
for i in reversed(range(len(arr))):
if arr[i] > target:
return arr[i], i
left, right = 0, len(arr) - 1
while right - left > 1:
mid = (left + right) // 2
if arr[mid] > target:
left = mid
else:
right = mid
return arr[left], left
def main(*args):
solution = Solution()
p1 = [1, 2, 3]
p2 = [3, 2, 1]
p3 = [1, 1, 5]
p4 = [2, 2, 0, 4, 3, 1]
p5 = [6, 5, 3, 1]
solution.nextPermutation(p4)
print(p4)
# result = solution.bsearch([4, 3, 1], 0)
# print(result)
if __name__ == '__main__':
main(*sys.argv[1:])
| 3.828125 | 4 |
ElevatorBot_old/functions/dataLoading.py | LukasSchmid97/destinyBloodoakStats | 3 | 12770855 | <reponame>LukasSchmid97/destinyBloodoakStats
import asyncio
from datetime import datetime
from ElevatorBot.database.database import (
get_connection_pool,
lookupDiscordID,
insertPgcrActivities,
getPgcrActivity,
insertPgcrActivitiesUsersStats,
insertPgcrActivitiesUsersStatsWeapons,
getFailToGetPgcrInstanceId,
deleteFailToGetPgcrInstanceId,
updateDestinyDefinition,
getVersion,
updateVersion,
deleteEntries,
)
from ElevatorBot.backendNetworking.formating import embed_message
from ElevatorBot.networking.models import WebResponse
from ElevatorBot.networking.network import get_json_from_url
from ElevatorBot.static.config import CLANID
from ElevatorBot.static.dict import weaponTypeKinetic, weaponTypeEnergy, weaponTypePower
# gets the weapon (name, [hash1, hash2, ...]) for the search term for all weapons found
# more than one weapon can be found if it got reissued
async def searchForItem(ctx, search_term):
# search for the weapon in the api
info = await get_json_from_url(
f"http://www.bungie.net/Platform/Destiny2/Armory/Search/DestinyInventoryItemDefinition/{search_term}/"
)
data = {}
try:
for weapon in info.content["Response"]["results"]["results"]:
# only add weapon if its not a catalyst
if "catalyst" not in weapon["displayProperties"]["name"].lower():
n = weapon["displayProperties"]["name"]
h = weapon["hash"]
if n not in data:
data[n] = [h]
else:
data[n].append(h)
if not data:
raise KeyError
# if no weapon was found
except KeyError:
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"I do not know the weapon `{search_term}`. \nPlease try again",
),
)
return None, None
# defer now that we know the weapon exists
if not ctx.deferred:
await ctx.defer()
# check if we found multiple items with different names. Ask user to specify which one is correct
index = 0
if len(data) > 1:
# asking user for the correct item
text = "Multiple items can be found with that search term, please specify which item you meant by sending the corresponding number:\n\u200B"
i = 1
for name in data.keys():
text += f"\n**{i}** - {name}"
i += 1
msg = await ctx.channel.send(embed=embed_message(f"{ctx.author.display_name}, I need one more thing", text))
# to check whether or not the one that send the msg is the original author for the function after this
def check(answer_msg):
return answer_msg.author == ctx.author and answer_msg.channel == ctx.channel
# wait for reply from original user to set the time parameters
try:
answer_msg = await ctx.bot.wait_for("message", timeout=60.0, check=check)
# if user is too slow, let him know
except asyncio.TimeoutError:
await ctx.send(
embed=embed_message(
f"Sorry {ctx.author.display_name}",
f"You took to long to answer my question, please start over",
)
)
await msg._delete()
return None, None
# try to convert the answer to int, else throw error
else:
try:
# check if int
index = int(answer_msg.content) - 1
# check if within length
if (index + 1) > len(data):
raise ValueError
await msg._delete()
await answer_msg._delete()
except ValueError:
await msg._delete()
await answer_msg._delete()
await ctx.send(
embed=embed_message(
f"Sorry {ctx.author.display_name}",
f"{answer_msg.content} is not a valid number. Please start over",
)
)
return None, None
name = list(data.keys())[index]
return name, data[name]
async def getNameToHashMapByClanid(clanid):
requestURL = "https://www.bungie.net/Platform/GroupV2/{}/members/".format(clanid) # memberlist
memberJSON = await get_json_from_url(requestURL)
if not memberJSON:
return {}
memberlist = memberJSON.content["Response"]["results"]
memberids = dict()
for member in memberlist:
memberids[member["destinyUserInfo"]["LastSeenDisplayName"]] = member["destinyUserInfo"]["membershipId"]
return memberids
async def getNameAndCrossaveNameToHashMapByClanid(clanid):
requestURL = "https://www.bungie.net/Platform/GroupV2/{}/members/".format(clanid) # memberlist
memberJSON = await get_json_from_url(requestURL)
if not memberJSON:
return {}
memberlist = memberJSON.content["Response"]["results"]
memberids = dict()
for member in memberlist:
if "bungieNetUserInfo" in member.keys():
memberids[member["destinyUserInfo"]["membershipId"]] = (
member["destinyUserInfo"]["LastSeenDisplayName"],
member["bungieNetUserInfo"]["displayName"],
)
else:
memberids[member["destinyUserInfo"]["membershipId"]] = (
member["destinyUserInfo"]["LastSeenDisplayName"],
"none",
)
return memberids
# async def get_pgcr(instance_id: int) -> WebResponse:
# return await get_json_from_url(
# f"https://www.bungie.net/Platform/Destiny2/Stats/PostGameCarnageReport/{instance_id}/"
# )
# async def updateManifest():
# # get the manifest
# manifest_url = "http://www.bungie.net/Platform/Destiny2/Manifest/"
# manifest = await get(manifest_url)
# if not manifest:
# print("Couldnt get manifest, aborting")
# return
#
# # check if the downloaded version is different to ours, if so drop entries and redownload info
# name = "Manifest"
# version = manifest.content["Response"]["version"]
# if version == await getVersion(name):
# return
#
# print("Starting manifest _update...")
#
# # version is different, so re-download:
# # For that we are using a transaction to not disrupt normal bot behaviour
# pool = await get_connection_pool()
# async with pool.acquire() as connection:
# async with connection.transaction():
# # Now Drop all the table entries and then loop through the relevant manifest locations and save them in the DB
# for definition, url in manifest.content["Response"]["jsonWorldComponentContentPaths"]["en"].items():
# if definition == "DestinyActivityDefinition":
# print("Starting DestinyActivityDefinition _update...")
# await deleteEntries(connection, "DestinyActivityDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"] if values["displayProperties"]["name"] else None,
# activityLevel=values["activityLevel"] if "activityLevel" in values else 0,
# activityLightLevel=values["activityLightLevel"],
# destinationHash=values["destinationHash"],
# placeHash=values["placeHash"],
# activityTypeHash=values["activityTypeHash"],
# isPvP=values["isPvP"],
# directActivityModeHash=values["directActivityModeHash"]
# if "directActivityModeHash" in values
# else None,
# directActivityModeType=values["directActivityModeType"]
# if "directActivityModeType" in values
# else None,
# activityModeHashes=values["activityModeHashes"] if "activityModeHashes" in values else None,
# activityModeTypes=values["activityModeTypes"] if "activityModeTypes" in values else None,
# )
#
# elif definition == "DestinyActivityTypeDefinition":
# print("Starting DestinyActivityTypeDefinition _update...")
# await deleteEntries(connection, "DestinyActivityTypeDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if "displayProperties" in values
# and "description" in values["displayProperties"]
# and values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"]
# if "displayProperties" in values
# and "name" in values["displayProperties"]
# and values["displayProperties"]["name"]
# else None,
# )
#
# elif definition == "DestinyActivityModeDefinition":
# print("Starting DestinyActivityModeDefinition _update...")
# await deleteEntries(connection, "DestinyActivityModeDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# values["modeType"],
# description=values["displayProperties"]["description"]
# if values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"] if values["displayProperties"]["name"] else None,
# hash=int(referenceId),
# activityModeCategory=values["activityModeCategory"],
# isTeamBased=values["isTeamBased"],
# friendlyName=values["friendlyName"],
# )
#
# elif definition == "DestinyCollectibleDefinition":
# print("Starting DestinyCollectibleDefinition _update...")
# await deleteEntries(connection, "DestinyCollectibleDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"] if values["displayProperties"]["name"] else None,
# sourceHash=values["sourceHash"] if "sourceHash" in values else None,
# itemHash=values["itemHash"] if "itemHash" in values else None,
# parentNodeHashes=values["parentNodeHashes"] if "parentNodeHashes" in values else None,
# )
#
# elif definition == "DestinyInventoryItemDefinition":
# print("Starting DestinyInventoryItemDefinition _update...")
# await deleteEntries(connection, "DestinyInventoryItemDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"] if values["displayProperties"]["name"] else None,
# classType=values["classType"] if "classType" in values else None,
# bucketTypeHash=values["inventory"]["bucketTypeHash"],
# tierTypeHash=values["inventory"]["tierTypeHash"],
# tierTypeName=values["inventory"]["tierTypeName"]
# if "tierTypeName" in values["inventory"]
# else None,
# equippable=values["equippable"],
# )
#
# elif definition == "DestinyRecordDefinition":
# print("Starting DestinyRecordDefinition _update...")
# await deleteEntries(connection, "DestinyRecordDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if values["displayProperties"]["description"]
# else None,
# name=values["displayProperties"]["name"] if values["displayProperties"]["name"] else None,
# hasTitle=values["titleInfo"]["hasTitle"],
# titleName=values["titleInfo"]["titlesByGender"]["Male"]
# if "titlesByGender" in values["titleInfo"]
# else None,
# objectiveHashes=values["objectiveHashes"] if "objectiveHashes" in values else None,
# ScoreValue=values["completionInfo"]["ScoreValue"] if "completionInfo" in values else None,
# parentNodeHashes=values["parentNodeHashes"] if "parentNodeHashes" in values else None,
# )
#
# elif definition == "DestinyInventoryBucketDefinition":
# print("Starting DestinyInventoryBucketDefinition _update...")
# await deleteEntries(connection, "DestinyInventoryBucketDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if "description" in values["displayProperties"]
# else None,
# name=values["displayProperties"]["name"] if "name" in values["displayProperties"] else None,
# category=values["category"],
# itemCount=values["itemCount"],
# location=values["location"],
# )
#
# elif definition == "DestinyPresentationNodeDefinition":
# print("Starting DestinyPresentationNodeDefinition _update...")
# await deleteEntries(connection, "DestinyPresentationNodeDefinition")
# result = await get(f"http://www.bungie.net{url}")
# # _update table
# for referenceId, values in result.content.items():
# await updateDestinyDefinition(
# connection,
# definition,
# int(referenceId),
# description=values["displayProperties"]["description"]
# if "description" in values["displayProperties"]
# else None,
# name=values["displayProperties"]["name"] if "name" in values["displayProperties"] else None,
# objectiveHash=values["objectiveHash"] if "objectiveHash" in values else None,
# presentationNodeType=values["presentationNodeType"],
# childrenPresentationNodeHash=[
# list(x.values())[0] for x in values["children"]["presentationNodes"]
# ]
# if "children" in values and values["children"]["presentationNodes"]
# else None,
# childrenCollectibleHash=[list(x.values())[0] for x in values["children"]["collectibles"]]
# if "children" in values and values["children"]["collectibles"]
# else None,
# childrenRecordHash=[list(x.values())[0] for x in values["children"]["records"]]
# if "children" in values and values["children"]["records"]
# else None,
# childrenMetricHash=[list(x.values())[0] for x in values["children"]["metrics"]]
# if "children" in values and values["children"]["metrics"]
# else None,
# parentNodeHashes=values["parentNodeHashes"] if "parentNodeHashes" in values else None,
# index=values["index"],
# redacted=values["redacted"],
# )
#
# # _update version entry
# await updateVersion(name, version)
#
# print("Done with manifest _update!")
# async def insertPgcrToDB(instanceID: int, activity_time: datetime, pcgr: dict):
# """Saves the specified PGCR data in the DB"""
# await insertPgcrActivities(
# instanceID,
# pcgr["activityDetails"]["referenceId"],
# pcgr["activityDetails"]["directorActivityHash"],
# activity_time,
# pcgr["startingPhaseIndex"],
# pcgr["activityDetails"]["mode"],
# pcgr["activityDetails"]["modes"],
# pcgr["activityDetails"]["isPrivate"],
# pcgr["activityDetails"]["membershipType"],
# )
#
# # loop though user and save info to db
# for user_pcgr in pcgr["entries"]:
# characterID = user_pcgr["characterId"]
# membershipID = user_pcgr["player"]["destinyUserInfo"]["membershipId"]
#
# await insertPgcrActivitiesUsersStats(
# instanceID,
# membershipID,
# characterID,
# user_pcgr["player"]["characterClass"] if "characterClass" in user_pcgr["player"] else "",
# user_pcgr["player"]["characterLevel"],
# user_pcgr["player"]["destinyUserInfo"]["membershipType"],
# user_pcgr["player"]["lightLevel"],
# user_pcgr["player"]["emblemHash"],
# user_pcgr["standing"],
# int(user_pcgr["values"]["assists"]["basic"]["value"]),
# int(user_pcgr["values"]["completed"]["basic"]["value"]),
# int(user_pcgr["values"]["deaths"]["basic"]["value"]),
# int(user_pcgr["values"]["kills"]["basic"]["value"]),
# int(user_pcgr["values"]["opponentsDefeated"]["basic"]["value"]),
# user_pcgr["values"]["efficiency"]["basic"]["value"],
# user_pcgr["values"]["killsDeathsRatio"]["basic"]["value"],
# user_pcgr["values"]["killsDeathsAssists"]["basic"]["value"],
# int(user_pcgr["values"]["score"]["basic"]["value"]),
# int(user_pcgr["values"]["activityDurationSeconds"]["basic"]["value"]),
# int(user_pcgr["values"]["completionReason"]["basic"]["value"]),
# int(user_pcgr["values"]["startSeconds"]["basic"]["value"]),
# int(user_pcgr["values"]["timePlayedSeconds"]["basic"]["value"]),
# int(user_pcgr["values"]["playerCount"]["basic"]["value"]),
# int(user_pcgr["values"]["teamScore"]["basic"]["value"]),
# int(user_pcgr["extended"]["values"]["precisionKills"]["basic"]["value"]),
# int(user_pcgr["extended"]["values"]["weaponKillsGrenade"]["basic"]["value"]),
# int(user_pcgr["extended"]["values"]["weaponKillsMelee"]["basic"]["value"]),
# int(user_pcgr["extended"]["values"]["weaponKillsSuper"]["basic"]["value"]),
# int(user_pcgr["extended"]["values"]["weaponKillsAbility"]["basic"]["value"]),
# )
#
# # loop though each weapon and save that info in the DB
# if "weapons" in user_pcgr["extended"]:
# for weapon_user_pcgr in user_pcgr["extended"]["weapons"]:
# await insertPgcrActivitiesUsersStatsWeapons(
# instanceID,
# characterID,
# membershipID,
# weapon_user_pcgr["referenceId"],
# int(weapon_user_pcgr["values"]["uniqueWeaponKills"]["basic"]["value"]),
# int(weapon_user_pcgr["values"]["uniqueWeaponPrecisionKills"]["basic"]["value"]),
# )
# async def updateMissingPcgr():
# # this gets called after a lot of requests, relaxing bungie first
# await asyncio.sleep(30)
#
# for (instanceID, activity_time) in await getFailToGetPgcrInstanceId():
# # instanceID = missing[0]
# # activity_time = missing[1]
#
# # check if info is already in DB, _delete and skip if so
# if await getPgcrActivity(instanceID):
# await deleteFailToGetPgcrInstanceId(instanceID)
# continue
#
# # get PGCR
# pcgr = await get_pgcr(instanceID)
#
# # only continue if we get a response this time
# if not pcgr:
# continue
#
# # add info to DB
# pcgr = pcgr.content["Response"]
# await insertPgcrToDB(instanceID, activity_time, pcgr)
#
# # _delete from to-do DB
# await deleteFailToGetPgcrInstanceId(instanceID)
# async def getClanMembers(client):
# # get all clan members {destinyID: discordID}
# memberlist = {}
# for member in (await get_json_from_url(f"https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/")).content[
# "Response"
# ]["results"]:
# destinyID = int(member["destinyUserInfo"]["membershipId"])
# discordID = await lookupDiscordID(destinyID)
# if discordID is not None:
# memberlist.update({destinyID: discordID})
#
# return memberlist
def translateWeaponSlot(weapon_slot: int) -> str:
"""Returns weapon_slot as a string"""
slot = {
weaponTypeKinetic: "Kinetic",
weaponTypeEnergy: "Energy",
weaponTypePower: "Power",
}
return slot[weapon_slot]
| 2.75 | 3 |
Leader.py | Jubas/index-cost-sim | 1 | 12770856 | #imports
import Cluster
import math
class Leader():
def __init__(self, i, height, leader=None):
# represents local level id
self.identity = i
# Leader of this leader (if any)
self.leader = leader
# height of this leader in the tiers
self.height = height
# contains cluster for leader;
# either a specific cluster or multiple other subleaders
self.cluster = []
# T/F value to signify if this leader has a Cluster or not
self.represents_cluster = False
# Max capacity for leader - relevant when creating subleads;
# '1' for leaders representing a cluster
self.max_capacity = 1
#Current size for leader;
# '1' for leaders representing a cluster
# > 1 for leaders representing other sub-leaders
self.size = 1
def sub_clustering(self, identity, per_lead, target_leader_size, hi_pct, height, l):
tmp_leads = []
tmp_clusters = []
if l > 1:
# set max capacity to target_size + the added extra 'Hi' percent
self.max_capacity = math.ceil((target_leader_size/100)*(100+hi_pct))
# set size to target
self.size = per_lead
for i in range(per_lead):
leader = Leader(identity, height, leader=self)
identity += 1
self.cluster.append(leader)
tmp_leads.append(leader)
leads, clusters, c_id = leader.sub_clustering(identity, per_lead, target_leader_size, hi_pct, height-1, (l-1))
identity = c_id
tmp_leads.extend(leads)
tmp_clusters.extend(clusters)
return tmp_leads, tmp_clusters, identity
else:
self.represents_cluster = True
cluster = Cluster.Cluster(leader=self)
self.cluster.append(cluster)
return tmp_leads, self.cluster, identity
# Used on cluster creation time to eliminate extra leaders.
# Can fail badly if used incorrectly.
def elim_leader(self, i):
#find index of said leader
idx = [ix for ix, x in enumerate(self.cluster) if x.identity == i]
#remove him
del self.cluster[idx[0]]
self.size = self.size -1
# Used to insert into the Cluster from this leader
def insert_into_cluster(self):
if self.represents_cluster == True:
self.cluster[0].insert()
else:
raise RuntimeError("Tried calling 'insert_into_cluster' on a leader which does not represent a Cluster")
def __str__(self):
return "Leader: " + \
"Cluster: " + str(self.cluster)
| 3.15625 | 3 |
examples/cli.py | b3cch4/hug | 1 | 12770857 | <gh_stars>1-10
"""A basic cli client written with hug"""
import hug
@hug.cli(version="1.0.0")
def cli(name: '<NAME>', age: hug.types.number):
"""Says happy birthday to a user"""
return "Happy {age} Birthday {name}!\n".format(**locals())
if __name__ == '__main__':
cli.interface.cli()
| 2.375 | 2 |
setup.py | tclarke/ricecomp-cfitsio-python | 2 | 12770858 | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
name="ricecomp-cfitsio",
version="1.0",
description="Rice compression and decompression for Python.",
long_description="Rice comression and decompression using the routines in the cfitsio library.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/tclarke/ricecomp-cfitsio-python.git",
license="BSD License",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Compression',
],
keywords = ('compression','rice','lossless','module'),
requires = ["Cython (>=0.2)","numpy (>=1.7.0)"],
cmdclass = {'build_ext':build_ext},
ext_modules = [Extension("ricecomp", ["ricecomp.pyx"],
libraries=["cfitsio"])]
)
| 1.523438 | 2 |
discern/mmd/mmd.py | imsb-uke/discern | 0 | 12770859 | <filename>discern/mmd/mmd.py
"""Module to select the mmd loss function."""
import logging
from typing import Tuple
import numpy as np
_LOGGER = logging.getLogger(__name__)
try: # pragma: no cover
from discern.mmd._mmd import _mmd_loop as _mmd_loop_c
USE_C_IMPLEMENTATION = True
except (ImportError, ModuleNotFoundError): # pragma: no cover
_LOGGER.warning("Fallback to Python version, MMD computation may be slow")
USE_C_IMPLEMENTATION = False
else: # pragma: no cover
_LOGGER.debug("Using cython version of MMD")
def _mmd_loop_py(dist_xy, dist_xx, dist_yy, scales, sigma):
# pylint: disable=too-many-locals
stat = np.zeros_like(scales)
n_x = np.float(dist_xx.shape[0])
n_y = np.float(dist_yy.shape[0])
for i, k in enumerate(scales):
val = k * sigma
k_xx = np.exp(-dist_xx / (2 * val))
np.fill_diagonal(k_xx, 0.0)
k_xxnd = np.sum(k_xx) / (n_x * n_x - n_x)
k_yy = np.exp(-dist_yy / (2 * val))
np.fill_diagonal(k_yy, 0.0)
k_yynd = np.sum(k_yy) / (n_y * n_y - n_y)
res1 = k_xxnd + k_yynd
res2 = np.exp(-dist_xy / (2 * val))
res2 = np.sum(res2) * 2. / (n_x * n_y)
stat[i] = res1 - res2
return np.max(stat)
if USE_C_IMPLEMENTATION: # pragma: no cover
_mmd_loop = _mmd_loop_c # pylint: disable=invalid-name
else: # pragma: no cover
_mmd_loop = _mmd_loop_py # pylint: disable=invalid-name
def _calculate_distances(
x: np.ndarray,
y: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Calculate euclidean distances.
Faster implementation than calling
sklearn.metrics.pairwise.euclidean_distance three times, but
without multiprocessing.
Args:
x (np.ndarray): First array
y (np.ndarray): Second array
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]:
Euclidian distance between x-y, x-x and y-y.
"""
# pylint: disable=invalid-name
dot_x = np.einsum('ij,ij->i', x, x)[:, np.newaxis]
dot_y = np.einsum('ij,ij->i', y, y)[np.newaxis, :]
dist_xy = np.matmul(x, y.T)
dist_xx = np.matmul(x, x.T)
dist_yy = np.matmul(y, y.T)
np.multiply(dist_xy, -2., out=dist_xy)
np.multiply(dist_xx, -2., out=dist_xx)
np.multiply(dist_yy, -2., out=dist_yy)
np.add(dist_xy, dot_x, out=dist_xy)
np.add(dist_xy, dot_y, out=dist_xy)
np.add(dist_xx, dot_x, out=dist_xx)
np.add(dist_xx, dot_x.T, out=dist_xx)
np.add(dist_yy, dot_y.T, out=dist_yy)
np.add(dist_yy, dot_y, out=dist_yy)
np.fill_diagonal(dist_xx, 0.)
np.fill_diagonal(dist_yy, 0.)
return dist_xy, dist_xx, dist_yy
def mmd_loss(random_cells: np.ndarray, valid_cells: np.ndarray,
sigma: float) -> float:
"""Compute mmd loss between random cells and valid cells.
Args:
random_cells (np.ndarray): Random generated cells.
valid_cells (np.ndarray): Valid (decoded) cells.
sigma (float): Precalculated Sigma value.
Returns:
float: MMD loss between random and valid cells.
"""
# pylint: disable=too-many-locals
random_cells = random_cells.astype(np.float32)
valid_cells = valid_cells.astype(np.float32)
dist_xy, dist_xx, dist_yy = _calculate_distances(random_cells, valid_cells)
scales = np.linspace(0.8, 1.5, num=23, dtype=np.float32)
sigma = np.float32(sigma)
return _mmd_loop(dist_xy, dist_xx, dist_yy, scales, sigma)
| 2.421875 | 2 |
solver.py | ishaanshah/Eight_solver | 0 | 12770860 | <reponame>ishaanshah/Eight_solver
# This is a program that solves the 8 puzzle using the a* algorithm
# Game logic and design by arnisritins. https://github.com/arnisritins/15-Puzzle.
import copy
import time
from collections import deque
from operator import attrgetter
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
# Initialize web driver to communicate with web page
driver = webdriver.Chrome(r"C:\Users\ishaa\chromedriver.exe")
driver.get("file:///E:/PyCharm%20Projects/8-Puzzle/index.html")
# grid for puzzle
grid = [[i for i in range(3)] for j in range(3)]
# solved state of puzzle
solved_grid = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
# structure for nodes
class Node:
def __init__(self, config, parent, f, g, h, num):
self.config = config
self.parent = parent
self.f = f
self.g = g
self.h = h
self.num = num
class Continue(Exception):
pass
# check if alert is present
def alert_present():
try:
driver.switch_to.alert.dismiss()
except NoAlertPresentException:
return False
return True
# algorithm for solving the puzzle
def a_star():
get_current_state()
start = Node(grid, None, heuristic(grid), 0, heuristic(grid), None)
end = Node(solved_grid, None, None, None, 0, None)
closed_set = deque()
open_set = deque()
open_set.appendleft(start)
while len(open_set) != 0:
current = min(open_set, key=attrgetter("f"))
open_set.remove(current)
for neighbour in find_neighbours(current):
if same_state(neighbour.config, end.config):
reconstruct_moves(current)
return True
# check if node is already evaluated
try:
for node in closed_set:
if same_state(node.config, neighbour.config):
raise Continue
except Continue:
continue
# if not continue and save the node
else:
neighbour.g = current.g + 1
neighbour.h = heuristic(neighbour.config)
neighbour.f = neighbour.g + neighbour.h
open_set.appendleft(neighbour)
closed_set.appendleft(current)
return False
# finds the neighbours of current config
def find_neighbours(node):
j = [j for j in node.config if 0 in j][0]
zero_x = node.config.index(j)
zero_y = j.index(0)
neighbours = []
if zero_x + 1 <= 2:
neighbour_1 = copy.deepcopy(node.config)
neighbour_1[zero_x + 1][zero_y], neighbour_1[zero_x][zero_y] = \
neighbour_1[zero_x][zero_y], neighbour_1[zero_x + 1][zero_y]
neighbours.append(Node(neighbour_1, node, None, None, None, neighbour_1[zero_x][zero_y]))
if zero_x - 1 >= 0:
neighbour_2 = copy.deepcopy(node.config)
neighbour_2[zero_x - 1][zero_y], neighbour_2[zero_x][zero_y] = \
neighbour_2[zero_x][zero_y], neighbour_2[zero_x - 1][zero_y]
neighbours.append(Node(neighbour_2, node, None, None, None, neighbour_2[zero_x][zero_y]))
if zero_y + 1 <= 2:
neighbour_3 = copy.deepcopy(node.config)
neighbour_3[zero_x][zero_y + 1], neighbour_3[zero_x][zero_y] = \
neighbour_3[zero_x][zero_y], neighbour_3[zero_x][zero_y + 1]
neighbours.append(Node(neighbour_3, node, None, None, None, neighbour_3[zero_x][zero_y]))
if zero_y - 1 >= 0:
neighbour_4 = copy.deepcopy(node.config)
neighbour_4[zero_x][zero_y - 1], neighbour_4[zero_x][zero_y] = \
neighbour_4[zero_x][zero_y], neighbour_4[zero_x][zero_y - 1]
neighbours.append(Node(neighbour_4, node, None, None, None, neighbour_4[zero_x][zero_y]))
return neighbours
# find heuristics
def heuristic(config):
dist = 0
for i in range(1, 9):
j = [j for j in solved_grid if i in j][0]
s_x = solved_grid.index(j)
s_y = j.index(i)
k = [k for k in config if i in k][0]
n_x = config.index(k)
n_y = k.index(i)
dist += (abs(s_x - n_x) + abs(s_y - n_y))
return dist
# get current state of board
def get_current_state():
# empty the list
grid[:] = []
for i in range(3):
row = []
for j in range(3):
tile = driver.find_element_by_id("cell-{}-{}".format(i, j)).text
# check for black tile
if tile == '':
tile = "0"
row.insert(j, int(tile))
# insert row in grid
grid.insert(i, row)
# scramble the puzzle
def scramble():
driver.find_element_by_id("scramble").click() # wait for animation
time.sleep(0.6) # wait for animation
# check if the given grids have the same configuration
def same_state(grid1, grid2):
for i in range(3):
for j in range(3):
if grid1[i][j] != grid2[i][j]:
return False
return True
# submit a
def submit(tile):
input_box = driver.find_element_by_id("number")
input_box.send_keys(str(tile))
submit_btn = driver.find_element_by_id("submit")
submit_btn.click()
time.sleep(0.5) # wait for animation
def reconstruct_moves(node):
moves = []
# traverse upe the decision tree
while node.num is not None:
moves.append(node.num)
node = node.parent
moves.reverse()
for move in moves:
submit(move)
# make the last move and close the alert
submit(6)
if alert_present():
pass
else:
submit(8)
driver.switch_to.alert.dismiss()
def main():
scramble()
a_star()
if __name__ == "__main__":
main()
| 3.453125 | 3 |
scratch_2.py | ddsanchezc/PythonSourceCode | 0 | 12770861 | <filename>scratch_2.py<gh_stars>0
temp=int(input("Ingrese la temperatura"))
print("f° c°")
for temp in range(0,temp,2):
print(temp," ",int((temp-32)*5/9))
| 3.953125 | 4 |
baselines/a2c/a2c.py | andrewgough94/baselines | 0 | 12770862 | import os
import os.path as osp
import gym
import time
import datetime
import joblib
import logging
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.atari_wrappers import wrap_deepmind
from baselines.common import tf_util
from baselines.a2c.utils import discount_with_dones
from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables
from baselines.a2c.utils import cat_entropy, mse
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.make_session()
nact = ac_space.n
nbatch = nenvs*nsteps
A = tf.placeholder(tf.int32, [nbatch])
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Defines step_model function and train_model functions
# Pass each model a copy of 'sess'
print("Constructing model... STEP_MODEL & TRAIN_MODEL: constructing step_model policy | " + str(policy))
step_model = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
# train_model takes in the mini-batch produced by 5 step_models, NOTE: reuse = true
train_model = policy(sess, ob_space, ac_space, nenvs*nsteps, nsteps, reuse=True)
# var init: this neglogpac is still somewhat unknown,
# looks like it does softmax over policy layer of training model
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=A)
print("MAIN: neglocpac = sparse_softmax_cross_entropy_with_logits() inputs: ")
print("MAIN: train_model_pi: " + str(train_model.pi))
print("MAIN: labels: " + str(A))
# var init: policy gradient loss determined by average of all advantage * neglogpac
pg_loss = tf.reduce_mean(ADV * neglogpac)
# value function loss is mse(tf.squeeze(train_model.vf), R)
# ^ in english, mse(model value prediction, actual Reward)
# mse == means squared error, defined in a2c/utils.py
vf_loss = tf.reduce_mean(mse(tf.squeeze(train_model.vf), R))
# entropy of policy
entropy = tf.reduce_mean(cat_entropy(train_model.pi))
# total loss calculation?
# todo: is this the loss function definition??? check with a3c paper
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# params gets trainable variables from model (weights of network?)
params = find_trainable_variables("model")
# computes gradients (change of weights, or direction of weights) using 'loss' and 'params' above
# computes 'symbolic derivatives of sum 'loss' w.r.t 'params'
# from tflow docs: 'gradients() adds ops to the graph to output the derivs of 'params'
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
# TODO: how many gradients are computed here, should be 16
grads = list(zip(grads, params))
# RMSProp optimizes learning rate , check thesis notes
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
# RMSProp pushes back new gradients over trainable variables to change weights
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
writer = tf.summary.FileWriter("/tmp/helloTensorBoard.txt")
writer.add_graph(sess.graph)
# Trains the model,
# TODO: What is 'masks' input param
# TODO: How often does train_model (steps thru train_model) get run vs. step_model
# A: I think it does a 'train_model' for each mini-batch, which is currently 5 steps
# Does a sess.run with train_model
def train(obs, states, rewards, masks, actions, values):
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
# td_map hooks up all inputs for train model?
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
# Policy Loss, Value Loss, and Policy Entropy calculations
# Propagates losses backwards through the neural network?
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
def save(save_path):
path = logger.get_dir() + "/model.pkl"
print("Logger dir: " + logger.get_dir())
print("MODEL SAVED TO : " + str(path))
ps = sess.run(params)
#make_path(osp.dirname(save_path))
joblib.dump(ps, path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
ps = sess.run(restores)
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess)
class Runner(object):
# Run is passed a model and nsteps default to 5, runs both models?
def __init__(self, env, model, nsteps=5, gamma=0.99):
self.env = env
self.model = model
nh, nw, nc = env.observation_space.shape
nenv = env.num_envs
self.batch_ob_shape = (nenv*nsteps, nh, nw, nc)
self.obs = np.zeros((nenv, nh, nw, nc), dtype=np.uint8)
self.nc = nc
obs = env.reset()
self.gamma = gamma
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
# run() steps through 'nsteps' of each 'nenvs' environment, adds actions values
# 'nsteps' is 5 actions set above
def run(self):
# initializes mini-batch arrays
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
mb_states = self.states
# For each step n (5), the model steps through each environment without 'learning' anything, adds rewards
for n in range(self.nsteps):
actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)
print("#######************###### a2c::::: run() iter: " + str(n))
print("action(s): " + str(actions))
print("values(s): " + str(values))
# Records actions and values predicted from the model.step() call above
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
# Executes the actions predicted above
# print("RUNNER: self.env: " + str(self.env))
obs, rewards, dones, _ = self.env.step(actions)
print("a2c::::: run(): rewards: " + str(rewards))
# print("RUNNER: len(obs): " + str(len(obs)))
# print("RUNNER: len(rewards): " + str(len(rewards)))
self.states = states
self.dones = dones
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
#batch of steps to batch of rollouts, aggregates all observations, rewards, actions, values, dones, swaps axis?
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = self.model.value(self.obs, self.states, self.dones).tolist()
#discount/bootstrap off value fn
# For each (reward, dones, value) tuple in enumerate(zip(..,..,..) : add rewards to list, add dones to list,
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
# Todo: What are these values, print out, the original data is .flattened() to produce return vals
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values
def learn(policy, env, seed, nsteps=5, total_timesteps=int(80e6), vf_coef=0.5, ent_coef=0.01, max_grad_norm=0.5, lr=7e-4, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100):
tf.reset_default_graph()
set_global_seeds(seed)
nenvs = env.num_envs
print('rockin ' + str(nenvs))
ob_space = env.observation_space
ac_space = env.action_space
print('observation space: ' + str(ob_space))
print('action space: ' + str(ac_space))
# Initializes model with all arguments obtained from run_atari
# Model DOES NOT GET the env stack object
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
# Intializes a runner using the above model, an environment, and nsteps to run '5'
# env is the VectorFrameStack object created in run_atari, holds 16 environments
# Runner DOES GET the env stack object
# Runner DOES get the model, which lacks the env stack object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
file = open("testOutput.txt", "w")
file.write(str(datetime.datetime.now()))
nbatch = nenvs*nsteps
tstart = time.time()
maxAvgReward = 0
# Todo: Figure out how frequently this is: loop 1 to 137,501
for update in range(1, total_timesteps//nbatch+1):
# print("__________ LEARN control loop: " + str(update) + " ------> " + str(total_timesteps//nbatch+1))
# runner.run(), steps model, returns observations, states, rewards, masks, actions, values for all agents?
obs, states, rewards, masks, actions, values = runner.run()
# 80 observations, 16 envs * 5 steps
# print("LEARNING FROM: len(obs): " + str(len(obs)))
# Printing states: TypeError: object of type 'NoneType' has no len()
#print("len(states): " + str(len(states)))
# print("LEARNING FROM: len(rewards): " + str(len(rewards)))
# model.train(), trains model, takes all that above data, processes it through train_model
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
avgReward = 0
rewardCount = 0
for reward in rewards:
# Prints 80 reward values? (5 training steps * 16 nenvs) = 80 reward values
print("a2c::::: learn() reward(s): " + str(reward))
avgReward += reward
rewardCount += 1
avgReward = avgReward / rewardCount
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("avgReward", float(avgReward))
logger.record_tabular("explained_variance", float(ev))
logger.dump_tabular()
# If avg reward of this batch is greater than previous avg reward, save model
if avgReward > maxAvgReward:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
maxAvgReward, avgReward))
# Save model
model.save("modelName")
# Set prevAvgReward = avgReward
maxAvgReward = avgReward
file.close()
env.close()
| 1.960938 | 2 |
backend/modules/event_manager.py | miversen33/ProjectPsittacosaurus | 9 | 12770863 | <gh_stars>1-10
import time
CALLBACK_EXPIRED_EVENT = 'Listener Expired'
CALLBACK_REGISTERED_EVENT = 'New Listener Registered'
ALL_EVENTS = '*'
GLOBAL_EVENTS = [
ALL_EVENTS,
CALLBACK_EXPIRED_EVENT,
CALLBACK_REGISTERED_EVENT,
]
__callbacks = {}
def emit(event: str, *args, **kwargs) -> None:
'''
Fires off the string event and notifies all registered (non expired) callables of the event. Note, if the callable returns True on its call, we will consider the event consumed and will not continue notifying other registered listeners
:param event (string):
This is the event string that is fired
:param args (iterable, Optional):
Any additional args to pass to the callable on the event fire
:param kwargs (dict, Optional):
Any additional kwargs to pass.
Note: kwargs['event'] will be the string event that was fired, and this will override any item associated with kwargs['event']. Consider 'event' a reserved keyword
:return None:
'''
for callback in _get_callbacks(event):
kwargs['event'] = event
consumed = callback(*args, **kwargs)
if consumed is True:
break
def register(event: str, callback: callable, ttl: float=float('inf')) -> None:
'''
Registers a callback for when the listed event is fired
:param event (string):
The string event to listen for. There are some global events that can be found at event_manager.GLOBAL_EVENTS. Alternatively, provide a string to listen for. Note, event can be '*', to which every event that is fired, this callback will be called
:param callback (callable):
The callable to call when a matching event is fired.
Note: You can optional return a True from your callable to indicate that the fired event was consumed.
@see event_manager.emit
:param ttl (float, Optional):
The time to live for the callback. If not provided, defaults to infinity
:return None:
'''
if event not in __callbacks.keys():
__callbacks[event] = {}
__callbacks[event][callback] = ttl if ttl == float('inf') else time.time() + ttl
emit(CALLBACK_REGISTERED_EVENT, callback, ttl)
def unregister(event: str, callback: callable) -> None:
'''
Removes the listed callback as a callback for the provided event
:param event (string):
The string event to unregister from
:param callback (callable):
The callback to unregister
:return None:
'''
del __callbacks[event][callback]
def _get_callbacks(event: str) -> list:
'''
Gets a list of all non-expired callbacks for the provided event.
Note, auto expires any callbacks that have met/passed their ttl
:param event (string):
The string event to get callbacks for
:return list:
'''
callbacks = []
for _event in [event, '*']:
for callback, ttl in __callbacks.get(_event, dict()).items():
if time.time() >= ttl:
unregister(_event, callback)
emit(CALLBACK_EXPIRED_EVENT, callback)
continue
callbacks.append(callback)
return callbacks
| 2.875 | 3 |
app/app.py | tlsrio/ml | 0 | 12770864 | <gh_stars>0
from flask import Flask, request, json
import pipelines
from scrapers.scrapeContent import getContent, htmlToText
from scrapers.fetchArticles import fetchArticles
app = Flask(__name__)
@app.route("/api/")
def connected():
return "Connected"
@app.route("/api/summarize", methods=["POST"])
def summarize():
content = request.get_json()
result = pipelines.getSummary(content["text"])
return result
@app.route("/api/QA", methods=["POST"])
def questions():
content = request.get_json()
result = pipelines.getAnswer(content["question"], content["text"])
return result
@app.route("/api/sentiment", methods=["POST"])
def sentiment():
content = request.get_json()
result = pipelines.getSentiment(content["text"])
return result
@app.route("/api/NER", methods=["POST"])
def NER():
content = request.get_json()
result = pipelines.getNER(content["text"])
return result
@app.route("/api/classification", methods=["POST"])
def classification():
content = request.get_json()
result = pipelines.getCategory(content["text"])
return result
####################################################
# Scraping Routes
@app.route("/api/urlToText", methods=["POST"])
def getUrlText():
content = request.get_json()
html = getContent(content["link"])
text = htmlToText(html)
return json.dumps({"text": text})
@app.route("/api/fetchArticles", methods=["POST"])
def articles():
content = request.get_json()
result = fetchArticles(content)
return result
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
# for x-www-form-urlencoded POST requests
# TODO: should we be able to take both types of requests and differentiate them?
# @app.route('/api/summarize', methods=['POST'])
# def summarize():
# result = getSummary(request.form['text'])
# return result
# @app.route('/api/QA', methods=['POST'])
# def questions():
# result = getAnswer(request.form['question'], request.form['text'])
# return result
# @app.route('/api/sentiment', methods=['POST'])
# def sentiment():
# result = getSentiment(request.form['text'])
# return result
# @app.route('/api/NER', methods=['POST'])
# def NER():
# result = getNER(request.form['text'])
# return result
# @app.route('/api/classification', methods=['POST'])
# def classification():
# result = getCategory(request.form['text'])
# return result
| 2.671875 | 3 |
biosteam/units/_multi_effect_evaporator.py | tylerhuntington222/biosteam | 0 | 12770865 | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import numpy as np
import biosteam as bst
from .. import Unit
from ._mixer import Mixer
from ._hx import HXutility
from ._flash import Evaporator_PV, Evaporator_PQ
from .design_tools import (
compute_vacuum_system_power_and_cost,
compute_heat_transfer_area
)
from thermosteam import MultiStream, Stream, settings
import flexsolve as flx
from warnings import warn
from .design_tools import heat_transfer as ht
__all__ = ('MultiEffectEvaporator',)
log = np.log
exp = np.exp
# Table 22.32 Product process and design (pg 592)
# Name: ('Area range (m2)', 'Cost(A) (USD)', 'U (kJ/(hr*m2*K)))', 'Material')
evaporators = {'Horizontal tube':
((9.29, 743.224),
lambda A, CE: CE*2.304*A**0.53,
4906.02,
'Carbon steel'),
'Long-tube vertical':
((9.29, 743.224),
lambda A, CE: CE*3.086*A**0.55,
8176.699,
'Carbon steel'),
'Forced circulation':
((13.935, 8000),
lambda A, CE: CE/500*exp(8.2986 + 0.5329*log(A*0.0929)-0.000196*log(A*0.0929)**2),
10731.918,
'Carbon steel'),
'Falling film':
((13.935, 371.612),
lambda A, CE: CE*7.416*A**0.55,
10220.874,
'Stainless steel tubes/Carbon steel shell')}
class MultiEffectEvaporator(Unit):
"""
Creates evaporatorators with pressures given by P (a list of pressures).
Adjusts first evaporator vapor fraction to satisfy an overall fraction
evaporated. All evaporators after the first have zero duty. Condenses
the vapor coming out of the last evaporator. Pumps all liquid streams
to prevent back flow in later parts. All liquid evaporated is ultimately
recondensed. Cost is based on required heat transfer area. Vacuum system
is based on air leakage. Air leakage is based on volume, as given by
residence time `tau` and flow rate to each evaporator.
Parameters
----------
ins : stream
Inlet.
outs : stream sequence
* [0] Solid-rich stream.
* [1] Condensate stream.
component : str
Component being evaporated.
P : tuple[float]
Pressures describing each evaporator (Pa).
V : float
Overall molar fraction of component evaporated.
P_liq : tuple
Liquid pressure after pumping (Pa).
"""
line = 'Multi-Effect Evaporator'
_units = {'Area': 'm^2',
'Volume': 'm^3'}
_BM = {'Evaporators': 2.45,
'Liquid-ring pump': 1.0,
'Condenser': 3.17}
_N_outs = 2
_N_heat_utilities = 2
#: Residence time (hr)
tau = 0.30
# Evaporator type
_Type = 'Forced circulation'
# Data for simmulation and costing
_evap_data = evaporators[_Type]
@property
def Type(self):
"""Evaporation type."""
return self._Type
@Type.setter
def Type(self, evap_type):
try:
self._evap_data = evaporators[evap_type]
except KeyError:
dummy = str(evaporators.keys())[11:-2]
raise ValueError(f"Type must be one of the following: {dummy}")
self._Type = evap_type
def __init__(self, ID='', ins=None, outs=(), thermo=None, *, P, V):
Unit.__init__(self, ID, ins, outs, thermo)
# Unpack
out_wt_solids, liq = self.outs
self.V = V #: [float] Overall molar fraction of component evaporated.
self._V1 = V/2.
# Create components
self._N_evap = n = len(P) # Number of evaporators
first_evaporator = Evaporator_PV(None, outs=(None, None), P=P[0])
# Put liquid first, then vapor side stream
evaporators = [first_evaporator]
for i in range(1, n):
evap = Evaporator_PQ(None, outs=(None, None, None), P=P[i], Q=0)
evaporators.append(evap)
condenser = HXutility(None, outs=Stream(None), V=0)
self.heat_utilities = (first_evaporator.heat_utilities[0],
condenser.heat_utilities[0])
mixer = Mixer(None, outs=Stream(None))
self.components = {'evaporators': evaporators,
'condenser': condenser,
'mixer': mixer}
def _run(self):
out_wt_solids, liq = self.outs
ins = self.ins
n = self._N_evap # Number of evaporators
# Set-up components
components = self.components
evaporators = components['evaporators']
first_evaporator, *other_evaporators = evaporators
first_evaporator.ins[:] = [i.copy() for i in ins]
condenser = components['condenser']
mixer = components['mixer']
# Put liquid first, then vapor side stream
ins = [first_evaporator.outs[1], first_evaporator.outs[0]]
for evap in other_evaporators:
evap.ins[:] = ins
ins = [evap.outs[1], evap.outs[0]]
def compute_overall_vapor_fraction(v1):
v_overall = v1
first_evaporator.V = v1
first_evaporator._run()
for evap in other_evaporators:
evap._run()
v_overall += (1-v_overall) * evap.V
return v_overall - self.V
x0 = 0.0001
x1 = 0.9990
y0 = compute_overall_vapor_fraction(x0)
y1 = compute_overall_vapor_fraction(x1)
self._V1 = flx.IQ_interpolation(compute_overall_vapor_fraction,
x0, x1, y0, y1, self._V1,
xtol=0.0001, ytol=0.001,
checkiter=False)
# Condensing vapor from last effector
outs_vap = evaporators[-1].outs[0]
condenser.ins[:] = [outs_vap]
condenser._run()
outs_liq = [condenser.outs[0]] # list containing all output liquids
# Unpack other output streams
out_wt_solids.copy_like(evaporators[-1].outs[1])
for i in range(1, n):
evap = evaporators[i]
outs_liq.append(evap.outs[2])
# Mix liquid streams
mixer.ins[:] = outs_liq
mixer._run()
liq.copy_like(mixer.outs[0])
mixed_stream = MultiStream(thermo=self.thermo)
mixed_stream.copy_flow(self.ins[0])
mixed_stream.vle(P=evaporators[-1].P, V=self.V)
out_wt_solids.mol = mixed_stream.imol['l']
liq.mol = mixed_stream.imol['g']
def _design(self):
# This functions also finds the cost
A_range, C_func, U, _ = self._evap_data
components = self.components
evaporators = components['evaporators']
Design = self.design_results
Cost = self.purchase_costs
CE = bst.CE
first_evaporator = evaporators[0]
hu = first_evaporator.heat_utilities[0]
duty = first_evaporator.H_out - first_evaporator.H_in
Q = abs(duty)
Tci = first_evaporator.ins[0].T
Tco = first_evaporator.outs[0].T
hu(duty, Tci, Tco)
Th = hu.inlet_utility_stream.T
LMTD = ht.compute_LMTD(Th, Th, Tci, Tco)
ft = 1
A = abs(compute_heat_transfer_area(LMTD, U, Q, ft))
self._evap_costs = evap_costs = [C_func(A, CE)]
# Find condenser requirements
condenser = components['condenser']
condenser._design()
condenser._cost()
Cost['Condenser'] = condenser.purchase_cost
# Find area and cost of evaporators
As = [A]
A_min, A_max = A_range
for evap in evaporators[1:]:
Q = evap.design_results['Heat transfer']
Tc = evap.outs[0].T
Th = evap.outs[2].T
LMTD = Th - Tc
A = compute_heat_transfer_area(LMTD, U, Q, ft)
As.append(A)
if settings.debug and not A_min < A < A_max:
warn(f'area requirement ({A}) is out of range, {A_range}')
evap_costs.append(C_func(A, CE))
self._As = As
Design['Area'] = A = sum(As)
Design['Volume'] = total_volume = self._N_evap * self.tau * self.ins[0].F_vol
Cost['Evaporators'] = sum(evap_costs)
# Calculate power
power, cost = compute_vacuum_system_power_and_cost(
F_mass=0, F_vol=0, P_suction=evap.outs[0].P,
vessel_volume=total_volume,
vacuum_system_preference='Liquid-ring pump')
Cost['Liquid-ring pump'] = cost
self.power_utility(power)
| 1.804688 | 2 |
bank/src/deposit_app/views/__init__.py | yuramorozov01/bank_system | 0 | 12770866 | from deposit_app.views.deposit_contract import DepositContractViewSet
from deposit_app.views.deposit_type import DepositTypeViewSet
| 1.101563 | 1 |
Pandas-dataframe/code.py | Sufi737/ga-learner-dsmp-repo | 2 | 12770867 | <filename>Pandas-dataframe/code.py
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank = pd.read_csv(path,sep=',')
# code starts here
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
#code ends here
del bank['Loan_ID']
banks = bank
bank_mode = banks.mode()
banks = banks.fillna(mode)
print(banks)
# --------------
# code starts here
# check the avg_loan_amount
avg_loan_amount = pd.pivot_table(banks,values="LoanAmount", index=["Gender","Married","Self_Employed"], aggfunc=np.mean)
print (avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved_se = banks[(banks['Self_Employed'] == "Yes") & (banks['Loan_Status']=='Y')]
loan_approved_nse = banks[(banks['Self_Employed'] == "No") & (banks['Loan_Status']=='Y')]
se_count = len(loan_approved_se.index)
nse_count = len(loan_approved_nse.index)
percentage_se = (se_count/614)*100
percentage_nse = (nse_count/614)*100
print(percentage_se)
print(percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x: x/12)
big_loan_term_list = loan_term[loan_term >= 25]
big_loan_term = len(big_loan_term_list)
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')['ApplicantIncome','Credit_History']
mean_values = loan_groupby.agg(np.mean)
print(mean_values)
# code ends here
| 3.3125 | 3 |
tests/lemmatizer/test_lemmatizer.py | zavliju/nlp-id | 46 | 12770868 | <reponame>zavliju/nlp-id
from nlp_id.lemmatizer import Lemmatizer
def test_lemmatizer():
"""
test for Lemmatizer
"""
lemmatizer = Lemmatizer()
text = 'Saya sedang mencoba'
expected_result = 'saya sedang coba'
assert lemmatizer.lemmatize(text) == expected_result
| 2.703125 | 3 |
school_management_app/StudentViews.py | iamtomc/SchoolManagementSystem-Mandakh | 6 | 12770869 | import datetime
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import FileSystemStorage
from school_management_app.models import Students, Courses, Subjects, CustomUser, Attendance, AttendanceReport, \
LeaveReportStudent, FeedBackStudent, NotificationStudent, StudentResult, SessionYearModel, News, SComment
def student_home(request):
student_obj=Students.objects.get(admin=request.user.id)
attendance_total=AttendanceReport.objects.filter(student_id=student_obj).count()
attendance_present=AttendanceReport.objects.filter(student_id=student_obj,status=True).count()
attendance_absent=AttendanceReport.objects.filter(student_id=student_obj,status=False).count()
course=Courses.objects.get(id=student_obj.course_id.id)
subjects=Subjects.objects.filter(course_id=course).count()
subjects_data=Subjects.objects.filter(course_id=course)
session_obj=SessionYearModel.object.get(id=student_obj.session_year_id.id)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
subject_name=[]
data_present=[]
data_absent=[]
subject_data=Subjects.objects.filter(course_id=student_obj.course_id)
for subject in subject_data:
attendance=Attendance.objects.filter(subject_id=subject.id)
attendance_present_count=AttendanceReport.objects.filter(attendance_id__in=attendance,status=True,student_id=student_obj.id).count()
attendance_absent_count=AttendanceReport.objects.filter(attendance_id__in=attendance,status=False,student_id=student_obj.id).count()
subject_name.append(subject.subject_name)
data_present.append(attendance_present_count)
data_absent.append(attendance_absent_count)
return render(request,"student_template/student_home_template.html",{"notifications":notifications,"total_attendance":attendance_total,"attendance_absent":attendance_absent,"attendance_present":attendance_present,"subjects":subjects,"data_name":subject_name,"data1":data_present,"data2":data_absent,"student":student})
def student_view_attendance(request):
student=Students.objects.get(admin=request.user.id)
course=student.course_id
subjects=Subjects.objects.filter(course_id=course)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/student_view_attendance.html",{"subjects":subjects,"student":student,"notifications":notifications})
def student_view_attendance_post(request):
subject_id=request.POST.get("subject")
start_date=request.POST.get("start_date")
end_date=request.POST.get("end_date")
start_data_parse=datetime.datetime.strptime(start_date,"%Y-%m-%d").date()
end_data_parse=datetime.datetime.strptime(end_date,"%Y-%m-%d").date()
subject_obj=Subjects.objects.get(id=subject_id)
user_object=CustomUser.objects.get(id=request.user.id)
stud_obj=Students.objects.get(admin=user_object)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
attendance=Attendance.objects.filter(attendance_date__range=(start_data_parse,end_data_parse),subject_id=subject_obj)
attendance_reports=AttendanceReport.objects.filter(attendance_id__in=attendance,student_id=stud_obj)
return render(request,"student_template/student_attendance_data.html",{"attendance_reports":attendance_reports,"student":student,"notifications":notifications})
def student_apply_leave(request):
student_obj = Students.objects.get(admin=request.user.id)
leave_data=LeaveReportStudent.objects.filter(student_id=student_obj)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/student_apply_leave.html",{"leave_data":leave_data,"student":student,"notifications":notifications})
def student_apply_leave_save(request):
if request.method!="POST":
return HttpResponseRedirect(reverse("student_apply_leave"))
else:
leave_start_date=request.POST.get("leave_start_date")
leave_end_date=request.POST.get("leave_end_date")
leave_date=request.POST.get("leave_date")
leave_msg=request.POST.get("leave_msg")
student_obj=Students.objects.get(admin=request.user.id)
try:
leave_report=LeaveReportStudent(student_id=student_obj,leave_start_date=leave_start_date,leave_end_date=leave_end_date,leave_message=leave_msg,leave_status=0)
leave_report.save()
messages.success(request, "Чөлөөний хүсэлт амжилттай илгээлээ")
return HttpResponseRedirect(reverse("student_apply_leave"))
except:
messages.error(request, "Хүсэлт илгээхэд алдаа гарлаа")
return HttpResponseRedirect(reverse("student_apply_leave"))
def student_feedback(request):
staff_id=Students.objects.get(admin=request.user.id)
feedback_data=FeedBackStudent.objects.filter(student_id=staff_id)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/student_feedback.html",{"feedback_data":feedback_data,"student":student,"notifications":notifications})
def student_feedback_save(request):
if request.method!="POST":
return HttpResponseRedirect(reverse("student_feedback"))
else:
feedback_msg=request.POST.get("feedback_msg")
student_obj=Students.objects.get(admin=request.user.id)
try:
feedback=FeedBackStudent(student_id=student_obj,feedback=feedback_msg,feedback_reply="")
feedback.save()
messages.success(request, "Санал хүсэлт амжилттай илгээлээ")
return HttpResponseRedirect(reverse("student_feedback"))
except:
messages.error(request, "Санал хүсэлт илгээхэд алдаа гарлаа")
return HttpResponseRedirect(reverse("student_feedback"))
def student_profile(request):
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/student_profile.html",{"user":user,"student":student,"notifications":notifications})
def student_profile_save(request):
if request.method!="POST":
return HttpResponseRedirect(reverse("student_profile"))
else:
first_name=request.POST.get("first_name")
last_name=request.POST.get("last_name")
password=request.POST.get("password")
address=request.POST.get("address")
if request.FILES.get('profile_pic',False):
profile_pic=request.FILES['profile_pic']
fs=FileSystemStorage()
filename=fs.save(profile_pic.name,profile_pic)
profile_pic_url=fs.url(filename)
else:
profile_pic_url=None
try:
customuser=CustomUser.objects.get(id=request.user.id)
customuser.first_name=first_name
customuser.last_name=last_name
if password!=None and password!="":
customuser.set_password(password)
customuser.save()
student=Students.objects.get(admin=customuser)
student.address=address
if profile_pic_url!=None:
student.profile_pic=profile_pic_url
student.save()
messages.success(request, "Мэдээлэл шинэчлэгдлээ")
return HttpResponseRedirect(reverse("student_profile"))
except:
messages.error(request, "Мэдээлэл шинэчлэхэд алдаа гарлаа")
return HttpResponseRedirect(reverse("student_profile"))
def student_news(request):
news=News.objects.all().order_by('-ndate')
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request, "student_template/student_news.html",{"news":news,"student":student,"notifications":notifications})
def view_student_news(request, news_id):
news=News.objects.get(id=news_id)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
comment=SComment.objects.filter(News=news_id, reply=None).order_by('-id')
staff=CustomUser.objects.get(id=request.user.id)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
comments_count = 0
for b in comment:
comments_count += b.count
return render(request, "student_template/view_student_news.html",{"news":news,"notifications":notifications,"student":student,"comment":comment,"comment_count":comments_count,"staff":staff})
# Comment
def view_student_news_comment_save(request):
a = 1
staff=CustomUser.objects.get(id=request.user.id)
if request.method!="POST":
return HttpResponseRedirect(reverse("student_news"))
else:
News = request.POST.get("News_id")
body = request.POST.get("body")
reply_id = request.POST.get('comment_id')
comment_qs = None
if reply_id:
comment_qs = SComment.objects.get(id=reply_id)
try:
comment=SComment(News_id=News, staff_id=staff, body=body, count=a, reply=comment_qs)
comment.save()
messages.success(request, "Сэтгэгдэл нэмэгдлээ!")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":News}))
except:
messages.error(request, "Сэтгэгдэл нэмэхэд алдаа гарлаа")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":News}))
# EDIT
def view_student_news_comment_edit_save(request):
a = 1
staff=CustomUser.objects.get(id=request.user.id)
if request.method!="POST":
messages.error(request, "Method not allowed!")
return HttpResponseRedirect(reverse("student_news"))
else:
comment_id = request.POST.get("comment_id")
News = request.POST.get("News_id")
body = request.POST.get("body")
try:
comment = SComment.objects.get(id=comment_id)
comment.News_id=News
comment.staff_id=staff
comment.body=body
comment.count=a
comment.save()
messages.success(request, "Сэтгэгдэл засагдлаа!")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":News}))
except:
messages.error(request, "Сэтгэгдэл засахад алдаа гарлаа")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":News}))
def delete_scomment(request,comment_id,news_id):
if request.method!="GET":
return HttpResponse("<h2>Method Not Allowed</h2>")
else:
try:
a=SComment.objects.get(id=comment_id)
a.delete()
messages.success(request,"Сэтгэгдэл амжилттай устгалаа")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":news_id}))
except:
messages.error(request,"Сэтгэгдэл устгахад алдаа гарлаа")
return HttpResponseRedirect(reverse("view_student_news",kwargs={"news_id":news_id}))
@csrf_exempt
def student_fcmtoken_save(request):
token=request.POST.get("token")
try:
student=Students.objects.get(admin=request.user.id)
student.fcm_token=token
student.save()
return HttpResponse("True")
except:
return HttpResponse("False")
def student_all_notification(request):
student=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student.id)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
return render(request,"student_template/all_notification.html",{"notifications":notifications,"student":student})
def student_view_result(request):
student=Students.objects.get(admin=request.user.id)
studentresult=StudentResult.objects.filter(student_id=student.id)
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/student_result.html",{"studentresult":studentresult,"student":student,"notifications":notifications})
def scovid19(request):
user=CustomUser.objects.get(id=request.user.id)
student=Students.objects.get(admin=user)
student_notifcation=Students.objects.get(admin=request.user.id)
notifications=NotificationStudent.objects.filter(student_id=student_notifcation.id)
return render(request,"student_template/covid19.html",{"student":student,"notifications":notifications}) | 1.875 | 2 |
linedrive/__init__.py | PoorBillionaire/linedrive | 0 | 12770870 | <filename>linedrive/__init__.py
from . import utils
from . import constants
from .websocket import GamecastWebsocket
| 1.101563 | 1 |
homecontrol/modules/esphome/entities.py | lennart-k/HomeControl | 7 | 12770871 | """HomeControl representation of ESPHome entities"""
from typing import TYPE_CHECKING, Any, Dict, Tuple
import voluptuous as vol
from homecontrol.dependencies.entity_types import Item
from homecontrol.dependencies.state_proxy import StateDef, StateProxy
from homecontrol.modules.switch.module import Switch
if TYPE_CHECKING:
from homecontrol.core import Core
from .module import ESPHomeDevice
from aioesphomeapi.model import (
BinarySensorInfo, BinarySensorState,
EntityInfo, EntityState,
FanInfo, FanState,
LightInfo, LightState,
SensorInfo, SensorState,
SwitchInfo, SwitchState)
class ESPHomeItem(Item):
"""HomeControl representation for esphome entities"""
device: "ESPHomeDevice"
entity: "EntityInfo"
type: str = "esphome.ESPHomeItem"
# pylint: disable=arguments-differ
@classmethod
async def constructor(
cls, identifier: str, name: str,
core: "Core", unique_identifier: str, device: "ESPHomeDevice",
entity: "EntityInfo"
) -> "ESPHomeItem":
item = cls()
item.device = device
item.entity = entity
item.core = core
item.identifier = identifier
item.unique_identifier = unique_identifier
item.name = name
item.module = core.modules.esphome
item.actions = {}
for attribute in dir(item):
func = getattr(item, attribute)
if hasattr(func, "action_name"):
item.actions[getattr(func, "action_name")] = func
item.states = StateProxy(item, core)
return item
def update_state(self, state: "EntityState") -> None:
"""Callback for state updates from esphome"""
class SwitchItem(Switch, ESPHomeItem):
"""An esphome switch"""
entity: "SwitchInfo"
type: str = "esphome.SwitchItem"
async def set_on(self, value: bool) -> Dict[str, Any]:
await self.device.api.switch_command(self.entity.key, value)
return {}
def update_state(self, state: "SwitchState") -> None:
self.states.update("on", state.state)
class BinarySensorItem(ESPHomeItem):
"""An esphome binary_sensor"""
entity: "BinarySensorInfo"
type: str = "esphome.BinarySensorItem"
on = StateDef()
def update_state(self, state: "BinarySensorState") -> None:
self.states.update("on", state.state)
class SensorItem(ESPHomeItem):
"""An esphome sensor"""
entity: "SensorInfo"
type: str = "esphome.SensorItem"
value = StateDef()
def update_state(self, state: "SensorState") -> None:
self.states.update("value", state.state)
class FanItem(ESPHomeItem):
"""An esphome fan"""
entity: "FanInfo"
type: str = "esphome.FanItem"
on = StateDef()
oscillating = StateDef()
speed = StateDef()
def update_state(self, state: "FanState") -> None:
self.states.bulk_update(
on=state.state,
oscillating=state.oscillating,
speed=state.speed
)
class LightItem(ESPHomeItem):
"""An esphome light"""
entity: "LightInfo"
type: str = "esphome.LightItem"
on = StateDef()
brightness = StateDef()
color_temperature = StateDef()
rgb = StateDef()
white = StateDef()
@on.setter(vol.Schema(bool))
async def set_on(self, value: bool) -> Dict[str, Any]:
"""Sets the on state"""
await self.device.api.light_command(self.entity.key, value)
return {}
@brightness.setter(vol.Schema(vol.Coerce(float)))
async def set_brightness(self, brightness: float) -> Dict[str, Any]:
"""Sets the brightness state"""
await self.device.api.light_command(
self.entity.key, bool(brightness), brightness=brightness)
return {}
@rgb.setter(vol.Schema(
[vol.Coerce(float), vol.Coerce(float), vol.Coerce(float)]))
async def set_rgb(self, rgb: Tuple[float, float, float]) -> Dict[str, Any]:
"""Sets the rgb state"""
await self.device.api.light_command(
self.entity.key, all(rgb) or None, rgb=rgb)
return {}
@white.setter(vol.Schema(vol.Coerce(float)))
async def set_white(self, white: float) -> Dict[str, Any]:
"""Sets the white state"""
await self.device.api.light_command(
self.entity.key, bool(white) or None, white=white)
return {}
def update_state(self, state: "LightState") -> None:
self.states.bulk_update(
on=state.state,
brightness=state.brightness,
rgb=(state.red, state.green, state.blue),
white=state.white
)
ENTITY_TYPES = {
"SwitchInfo": SwitchItem,
"BinarySensorInfo": BinarySensorItem,
"FanInfo": FanItem,
"LightInfo": LightItem,
"SensorInfo": SensorItem,
}
| 2.5 | 2 |
ScottiePippen/grids.py | iancze/ScottiePippen | 1 | 12770872 | <reponame>iancze/ScottiePippen<filename>ScottiePippen/grids.py<gh_stars>1-10
import numpy as np
from scipy.interpolate import interp1d
from astropy.io import ascii
import ScottiePippen as SP
class IndexInterpolator:
'''
Object to return fractional distance between grid points of a single grid variable.
:param parameter_list: list of parameter values
:type parameter_list: 1-D list
'''
def __init__(self, parameter_list):
self.parameter_list = np.unique(parameter_list)
self.index_interpolator = interp1d(self.parameter_list, np.arange(len(self.parameter_list)), kind='linear')
def __call__(self, value):
'''
Evaluate the interpolator at a parameter.
:param value:
:type value: float
:raises C.InterpolationError: if *value* is out of bounds.
:returns: ((low_val, high_val), (frac_low, frac_high)), the lower and higher bounding points in the grid
and the fractional distance (0 - 1) between them and the value.
'''
try:
index = self.index_interpolator(value)
except ValueError as e:
print("Requested value {} is out of bounds. {}".format(value, e))
raise
high = np.ceil(index)
low = np.floor(index)
frac_index = index - low
return ((self.parameter_list[low], self.parameter_list[high]), ((1 - frac_index), frac_index))
class Base:
def __init__(self, name, basefmt, age_range, mass_range):
self.name = name
self.basefmt = basefmt
self.age_range = age_range
self.mass_range = mass_range
self.load()
self.setup_interpolator()
def load(self):
print("Load function must be defined by a subclass.")
raise NotImplementedError
# Will load the various grids from file.
# Then, form arrays of age, mass, radius, and temperature
# Put these arrays into the interpolators. Interpolators always take an (Age, Mass) pair,
# in that order.
def setup_interpolator(self):
'''
Once all of the data files have been loaded into memory using grid.load() (implemented by the subclass), then call this function to actually setup the two stage interpolation necessary for smooth contours.
This works this way because all grids provide stars at set mass locations, and then as the age evolves, predict different stellar properties, such as Teff and (log) Luminosity.
This means if we want to query (T,L) for a given (tau, M), we shouldn't just do a bi-linear interpolation between (T,L) points like we were doing before.
Instead, we should find the two mass points in the grid that bracket M, call them M_low and M_high. Then, interpolate T and L for (tau, M_low) and (tau, M_high).
Finally, then interpolate T and L from the (T,L)'s corresponding to (tau, M_low) and (tau, M_high) for M.
'''
# Identify the unique masses
umasses = np.unique(self.masses)
# Create an index interpolator for umasses.
self.mass_interp = IndexInterpolator(umasses)
self.T_interpolators = {}
self.lL_interpolators = {}
for mass in umasses:
ind = np.isclose(self.masses, mass)
# Find all the ages that correspond to this mass
ages = self.ages[ind]
# Sort them in increasing order
ind2 = np.argsort(ages)
# Find all the temps that correspond to this mass
# Find all the ll that correspond to this mass
# Sort all of these according to increasing ages
temps = self.temps[ind][ind2]
lLs = self.lums[ind][ind2]
# Fit a linear interpolator for t(age) and ll(age), store these in an array
self.T_interpolators[mass] = interp1d(ages, temps)
self.lL_interpolators[mass] = interp1d(ages, lLs)
def interp_T(self, p):
'''p is [age, mass] '''
age, mass = p
try:
# First identify the upper and lower masses
(low_val, high_val), (frac_low, frac_high) = self.mass_interp(mass)
T_high = self.T_interpolators[high_val](age)
T_low = self.T_interpolators[low_val](age)
except ValueError:
# This means we must be out of range of the grid.
return np.nan
# Weighted average estimates for age based on how close.
T = frac_low * T_low + frac_high * T_high
return T
def interp_lL(self, p):
'''p is [age, mass] '''
age, mass = p
try:
# First identify the upper and lower masses
(low_val, high_val), (frac_low, frac_high) = self.mass_interp(mass)
lL_high = self.lL_interpolators[high_val](age)
lL_low = self.lL_interpolators[low_val](age)
except ValueError:
# This means we must be out of range of the grid.
return np.nan
# Weighted average estimates for age based on how close.
lL = frac_low * lL_low + frac_high * lL_high
return lL
class DartmouthPMS(Base):
def __init__(self, age_range, mass_range):
super().__init__(name="DartmouthPMS", basefmt=SP.data_dir + "Dartmouth/PMS/fehp00afep0/m{:0>3.0f}fehp00afep0.jc2mass", age_range=age_range, mass_range=mass_range)
def load(self):
# Dartmouth masses
masses = np.concatenate((np.arange(0.1, 1.8, 0.05), np.arange(1.8, 3., 0.1), np.arange(3., 5., 0.2)))
ind = (masses >= self.mass_range[0]) & (masses <= self.mass_range[1])
masses = masses[ind]
# Go through all of the files, read the relevant properties, and then concatenate these
# into 4, 1-D arrays
mass_list = []
age_list = []
temp_list = []
radius_list = []
lum_list = []
for mass in masses:
fname = self.basefmt.format(100 * mass)
data = ascii.read(fname, names=["age", "LTeff", "logg", "LL", "U", "B", "V", "R", "I", "J", "H", "Ks"])
age = 1e-6 * data["age"] # [Myr]
ind = (age >= self.age_range[0]) & (age <= self.age_range[1])
age = age[ind]
temp = 10**data["LTeff"][ind] # [K]
radius = np.sqrt(G * mass * M_sun / (10**data["logg"][ind])) / R_sun # [R_sun]
LL = data["LL"][ind] # [L_sun]
mass_list.append(mass * np.ones(np.sum(ind)))
age_list.append(age)
temp_list.append(temp)
radius_list.append(radius)
lum_list.append(LL)
self.masses = np.concatenate(mass_list)
self.ages = np.concatenate(age_list)
self.temps = np.concatenate(temp_list)
self.radii = np.concatenate(radius_list)
self.lums = np.concatenate(lum_list)
self.points = np.array([self.ages, self.masses]).T
class PISA(Base):
def __init__(self, age_range, mass_range):
super().__init__(name="PISA", basefmt=SP.data_dir + "PISA/Z0.02000_Y0.2880_XD2E5_ML1.68_AS05/TRK_M{:.2f}_Z0.02000_Y0.2880_XD2E5_ML1.68_AS05.DAT", age_range=age_range, mass_range=mass_range)
def load(self):
masses = np.concatenate((np.arange(0.2, 1., 0.05), np.arange(1.0, 2., 0.1), np.arange(2., 4., 0.2), np.arange(4.0, 7.1, 0.5)))
ind = (masses >= self.mass_range[0]) & (masses <= self.mass_range[1])
masses = masses[ind]
# Go through all of the files, read the relevant properties, and then concatenate these
# into 4, 1-D arrays
mass_list = []
age_list = []
temp_list = []
radius_list = []
lum_list = []
for mass in masses:
fname = self.basefmt.format(mass)
data = ascii.read(fname, names=["NMD", "L_age", "Xc", "LL", "LTeff", "LTc", "LOG RHOc", "M-CC", "L-PP", "L-CNO", "L-GRA"])
age = 1e-6 * 10**data["L_age"] # [Myr]
ind = (age >= self.age_range[0]) & (age <= self.age_range[1])
age = age[ind]
temp = 10**data["LTeff"][ind] # [K]
L = 10**data["LL"][ind] * L_sun # [ergs/s]
radius = np.sqrt(L / (4 * np.pi * sigma_k * temp**4)) / R_sun # [R_sun]
mass_list.append(mass * np.ones(np.sum(ind)))
age_list.append(age)
temp_list.append(temp)
radius_list.append(radius)
lum_list.append(data["LL"][ind]) # log10(L_sun)
self.masses = np.concatenate(mass_list)
self.ages = np.concatenate(age_list)
self.temps = np.concatenate(temp_list)
self.radii = np.concatenate(radius_list)
self.lums = np.concatenate(lum_list)
self.points = np.array([self.ages, self.masses]).T
class Baraffe15(Base):
def __init__(self, age_range, mass_range):
super().__init__(name="Baraffe15", basefmt=SP.data_dir + "Baraffe15/{:.4f}.dat", age_range=age_range, mass_range=mass_range)
def load(self):
# In Myr
ages = np.array([0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 8.0, 10.0, 15.0, 20.0, 25.0, 30.0, 40.0, 50.0, 80.0, 100.0, 120.0, 200.0])
# Grid filenames are in Gyr
ind = (ages >= self.age_range[0]) & (ages <= self.age_range[1])
ages = ages[ind]
# Go through all of the files, read the relevant properties, and then concatenate these
# into 4, 1-D arrays
mass_list = []
age_list = []
temp_list = []
radius_list = []
lum_list = []
for age in ages:
fname = self.basefmt.format(1e-3 * age)
data = ascii.read(fname, names=["mass", "Teff", "L", "g", "radius", "Li/Li0", "U*", "G", "R", "I", "12k.I", "ZPR", "Z", "Y", "J", "H", "Ks", "CH4_ON", "CH4_OFF"], comment="\!")
mass = data["mass"] # [M_sun]
ind = (mass >= self.mass_range[0]) & (mass <= self.mass_range[1])
mass = mass[ind] # [M_sun]
temp = data["Teff"][ind] # [K]
radius = data["radius"][ind] # R_sun
LL = data["L"][ind] # L_sun
mass_list.append(mass)
age_list.append(age * np.ones(np.sum(ind)))
temp_list.append(temp)
radius_list.append(radius)
lum_list.append(LL)
self.masses = np.concatenate(mass_list)
self.ages = np.concatenate(age_list)
self.temps = np.concatenate(temp_list)
self.radii = np.concatenate(radius_list)
self.lums = np.concatenate(lum_list)
self.points = np.array([self.ages, self.masses]).T
class Seiss(Base):
def __init__(self, age_range, mass_range):
super().__init__(name="Seiss", basefmt=SP.data_dir + "Seiss/m{:.1f}z02.hrd", age_range=age_range, mass_range=mass_range)
def load(self):
# In Myr
masses = np.concatenate((np.arange(0.3, 2.0, 0.1), np.array([2.0, 2.2, 2.5, 2.7, 3.0, 3.5, 4.0, 5.0, 6.0])))
ind = (masses >= self.mass_range[0]) & (masses <= self.mass_range[1])
masses = masses[ind]
# Go through all of the files, read the relevant properties, and then concatenate these
# into 4, 1-D arrays
mass_list = []
age_list = []
temp_list = []
radius_list = []
lum_list = []
for mass in masses:
fname = self.basefmt.format(mass
# model, L (Lo), Reff (Ro), Teff, log g, age (yr), phase, Mbol, R* (Ro), rho_eff, M (Mo)
data = ascii.read(fname, names=["model", "phase", "L", "Mbol", "Reff", "radius", "Teff", "rho_eff", "log g", "M (Mo)", "age"])
age = 1e-6 * data["age"] # [Myr]
ind = (age >= self.age_range[0]) & (age <= self.age_range[1])
age = age[ind]
L = data["L"][ind] * L_sun # [ergs/s]
radius = data["radius"][ind] # [R_sun]
# Because T_eff is computed at tau=2/3, we'll try recomputing at the surface, using Rstar
temp = (L / (4 * np.pi * (radius * R_sun)**2 * sigma_k))**0.25 # [K]
mass_list.append(mass * np.ones(np.sum(ind)))
age_list.append(age)
temp_list.append(temp)
radius_list.append(radius)
lum_list.append(np.log10(data["L"][ind]))
self.masses = np.concatenate(mass_list)
self.ages = np.concatenate(age_list)
self.temps = np.concatenate(temp_list)
self.radii = np.concatenate(radius_list)
self.lums = np.concatenate(lum_list)
self.points = np.array([self.ages, self.masses]).T
model_dict = {"DartmouthPMS":DartmouthPMS, "PISA":PISA, "Baraffe15": Baraffe15, "Seiss":Seiss}
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in range(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
def main():
pass
if __name__=="__main__":
main()
| 2.90625 | 3 |
original-paas/copy_to_container/www/spdpaas/src/app/features/commonUse/inMemoryDBHandle.py | yishan1331/docker-practice | 0 | 12770873 | # -*- coding: utf-8 -*-
#module description
"""
==============================================================================
created :
Last update: 03/31/2021
Developer: <NAME>
Lite Version 2 @Yishan08212019
API Version 1.0
Filename: inMemoryDBHandle.py
Description: 連接redis api
Total = 8 APIs
==============================================================================
"""
#=======================================================
# System level modules
#=======================================================
#{{{
from sqlalchemy import *
#}}}
#=======================================================
# User-defined modules
#=======================================================
# {{{
from app import *
#Yishan@05212020 added for common modules
from app.modules import *
# }}}
#blueprint
INMEMORYDB_API = Blueprint('INMEMORYDB_API', __name__)
class _RedisQueryAction(object):
def __init__(self,dbRedis,datatype,key):
self.dbRedis = dbRedis
self.datatype = datatype
self.key = key
def get_value(self):
dataDict = {
"string":self._string,
"list":self._list, #lrange(0,-1)
"set":self._set,
"zset":self._zset, #zrange(0,-1)
"hash":self._hash,
}
return dataDict[self.datatype]()
def _string(self):
return self.dbRedis.get(self.key)
def _list(self):
return self.dbRedis.lrange(self.key,0,-1)
def _set(self):
return self.dbRedis.smembers(self.key)
def _zset(self):
return self.dbRedis.zrange(self.key,0,-1)
def _hash(self):
return self.dbRedis.hgetall(self.key)
class _RedisInsertUpdateAction(object):
def __init__(self, dbRedis, datatype, data, whichAction, setExpireTime):
self.dbRedis = dbRedis
self.datatype = datatype
self.data = data
self.whichAction = whichAction
self.doAction = "update"
if whichAction == "Insert":
self.doAction = "add"
self.setExpireTime = setExpireTime
self.err_msg = []
def set_value(self):
dataDict = {
"string":self._string,
"list":self._list_set,
"set":self._list_set,
"zset":self._zset_hash,
"hash":self._zset_hash,
}
return dataDict[self.datatype]()
def _string(self):
try:
if self.setExpireTime:
for key,value in self.data["data"].items():
if self.dbRedis.exists(key) and self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because already exists".format(self.doAction,key))
continue
if not self.dbRedis.exists(key) and not self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because does not exist".format(self.doAction,key))
continue
if isinstance(value,dict) or isinstance(value,list):
self.err_msg.append("Failed to {} Key:'{}' -> Value:'{}' because Value cannot be an Array or Object".format(self.doAction,key,value))
continue
if self.data["expire_time"].get(key) is None:
self.dbRedis.set(key,value)
self.err_msg.append("Key:'{}' doesn't have expire_time,but {} successfully with no expire_time".format(key,self.doAction))
continue
if not (isinstance(self.data["expire_time"][key],int) and self.data["expire_time"][key] > 0):
self.err_msg.append("Failed to add Key:'{}' beacuse expire_time:{} must be positive integer and greater than zero".format(key,self.data["expire_time"][key]))
continue
self.dbRedis.setex(key, self.data["expire_time"][key], value)
else:
self.dbRedis.mset(self.data["data"])
except Exception as e:
self.err_msg.append(str(e))
return self.err_msg
def _list_set(self):
try:
for key,value in self.data["data"].items():
if self.dbRedis.exists(key) and self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because already exists".format(self.doAction,key))
continue
if not self.dbRedis.exists(key) and not self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because does not exist".format(self.doAction,key))
continue
if not isinstance(value,list):
self.err_msg.append("Failed to add Key:'{}' -> Value:'{}' because value must be an Array".format(key,value))
continue
if not value:
self.err_msg.append("Failed to add Key:'{}' -> Value:'{}' because value cannot be empty.".format(key,value))
continue
#若是更新key則先把key刪了再建一個新的
if self.whichAction != "Insert": self.dbRedis.delete(key)
if self.setExpireTime:
if self.data["expire_time"].get(key) is None:
self.dbRedis.rpush(key, *value) if self.datatype == "list" else self.dbRedis.sadd(key, *value)
self.err_msg.append("Key:'{}' doesn't have expire_time,but added successfully with no expire_time".format(key))
continue
if not (isinstance(self.data["expire_time"][key],int) and self.data["expire_time"][key] > 0):
self.err_msg.append("Failed to add Key:'{}' beacuse expire_time:{} must be positive integer and greater than zero".format(key,self.data["expire_time"][key]))
continue
self.dbRedis.rpush(key, *value) if self.datatype == "list" else self.dbRedis.sadd(key, *value)
if self.setExpireTime: self.dbRedis.expire(key,self.data["expire_time"][key])
except Exception as e:
self.err_msg.append(str(e))
return self.err_msg
def _zset_hash(self):
try:
for key,value in self.data["data"].items():
if self.dbRedis.exists(key) and self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because already exists".format(self.doAction,key))
continue
if not self.dbRedis.exists(key) and not self.whichAction == "Insert":
self.err_msg.append("Failed to {} Key:'{}' because does not exist".format(self.doAction,key))
continue
if not isinstance(value,dict):
self.err_msg.append("Failed to add Key:'{}' -> Value:'{}' because value must be an Object".format(key,value))
continue
if not value:
self.err_msg.append("Failed to add Key:'{}' -> Value:'{}' because value cannot be empty.".format(key,value))
continue
if self.datatype == "zset":
legalvalue = True
for i in value.values():
if not (isinstance(i,int) or isinstance(i,float)):
self.err_msg.append("Failed to add Key:'{}' -> Value:'{}' beacuse value's value must be an integer".format(key,value))
legalvalue = False
break
if not legalvalue: continue
#若是更新key則先把key刪了再建一個新的
if self.whichAction == "Put": self.dbRedis.delete(key)
if self.setExpireTime:
if self.data["expire_time"].get(key) is None:
self.dbRedis.zadd(key, value) if self.datatype == "zset" else self.dbRedis.hmset(key, value)
self.err_msg.append("Key:'{}' doesn't have expire_time,but added successfully with no expire_time".format(key))
continue
if not (isinstance(self.data["expire_time"][key],int) and self.data["expire_time"][key] > 0):
self.err_msg.append("Failed to add Key:'{}' beacuse expire_time:{} must be positive integer and greater than zero".format(key,self.data["expire_time"][key]))
continue
self.dbRedis.zadd(key, value) if self.datatype == "zset" else self.dbRedis.hmset(key, value)
if self.setExpireTime: self.dbRedis.expire(key,self.data["expire_time"][key])
except Exception as e:
self.err_msg.append(str(e))
return self.err_msg
def operate_CU_integration(reqdataDict, dbRedis, whichAction):
if whichAction in ["Insert","Put"]:
redis_key_type = ["string","list","set","zset","hash"]
else:
redis_key_type = ["zset","hash"]
status = {}
anydata = False
for this_key_type in redis_key_type:
if reqdataDict.get(this_key_type) is not None:
anydata = True
if not check_post_parameter_exist(reqdataDict.get(this_key_type),["data"]):
status[this_key_type] = ["Data : {} ,Missing post parameters : '{}'".format(reqdataDict.get(this_key_type),post_parameter)]
continue
setExpireTime = False
if "expire_time" in reqdataDict.get(this_key_type).keys(): setExpireTime = True
#若key有要設定有效期限,則一筆一筆修改(較慢),否則一次修改(較快)
status[this_key_type] = _RedisInsertUpdateAction(dbRedis,this_key_type,reqdataDict.get(this_key_type),whichAction,setExpireTime).set_value()
if not anydata:
return False, "Faild to {},because no correct data".format(whichAction)
return True, status
#=======================================================
# API to CommonUse query redis key
# Date: 12172020@<EMAIL>an
# FOR REDIS
#=======================================================
#{{{ INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Interval/<key>', methods = ['GET']),
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/SpecificKey/<key>', methods = ['GET'])
def redis_commonuse_get_specific_key(SYSTEM, key):
#{{{APIINFO
'''
{
"API_application":"提供查詢redis資料庫指定key的值,若pattern為yes表示欲使用*、[]進行條件匹配查詢,例如:h*llo匹配hllo和heeeeello;h[ae]llo匹配hello和hallo,但不匹配hillo",
"API_path_parameters":{"SYSTEM":"合法的系統名稱","key":"Key值"},
"API_parameters":{"uid":"使用者帳號","pattern":"是否使用條件查詢(yes/no)"},
"API_message_parameters":{"QueryValueData":"JSON","DB":"string"},
"API_example":{
"APIS": "GET /api/IOT/1.0/rd/CommonUse/Specific/test_zset",
"OperationTime": "0.001",
"DB": "REDIS",
"System": "IOT",
"BytesTransferred": 140,
"QueryValueData": [
"member",
"dadsd"
],
"Response": "ok"
}
}
'''
#}}}
err_msg = "error"
if SYSTEM == "test": SYSTEM = "IOT"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid","pattern"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
pattern = request.args.get("pattern").encode('utf-8')
if pattern not in ("yes","no"):
dicRet["Response"] = "parameter: pattern -> {} must be yes or no".format(pattern)
return jsonify( **dicRet)
pattern = True if pattern == "yes" else False
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
_keys = [key]
if pattern: _keys = dbRedis.keys(key)
contents = {}
for i in _keys:
if not dbRedis.exists(i):
dicRet["Response"] = "Key : {} doesn't existed".format(i)
return jsonify( **dicRet)
redis_key_type = ["string","list","set","zset","hash"]
this_key_type = dbRedis.type(i)
if this_key_type not in redis_key_type:
dicRet["Response"] = "Key : {} doesn't existed".format(i)
return jsonify( **dicRet)
contents[i] = _RedisQueryAction(dbRedis,this_key_type,i).get_value()
if isinstance(contents[i],set): contents[i] = list(contents[i])
if not pattern:
dicRet["QueryValueData"] = contents[_keys[0]]
else:
dicRet["QueryValueData"] = contents
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}}
#=======================================================
# API to CommonUse register redis key
# Date: 12172020@Yishan
# FOR REDIS
#=======================================================
# {{{ appPaaS.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['POST'])
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['POST'])
def redis_commonuse_register(SYSTEM):
#{{{APIINFO
'''
{
"API_application":"提供新增redis多個key&value資料",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"string":{
"type":"Object",
"requirement":"optional",
"directions":[
"新增字符串形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲新增的key&value(object)",
"是否必需":"必要",
"注意事項":"value只接受字串、數字、json字串類型;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲新增的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_string":123,"test_string2":"test"},"expire_time":{"test_string":60}}
},
"list":{
"type":"Object",
"requirement":"optional",
"directions":[
"新增列表形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲新增的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為陣列且陣列的值只接受字串、數字、json字串類型;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲新增的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data": {"test_list1": [34123,"dwqad"]},"expire_time":{"test_list1":60,"test_list2":30}}
},
"set":{
"type":"Object",
"requirement":"optional",
"directions":[
"新增無序集合形態的value,集合成員是唯一的,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲新增的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為陣列,陣列的值只接受字串、數字、json字串類型且不得重複;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲新增的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data": {"test_set1": [34123,"dwqad"]},"expire_time":{"test_set1":60}}
},
"zset":{
"type":"Object",
"requirement":"optional",
"directions":[
"新增有序集合形態的value,集合成員是唯一的且都會關聯一個double 類型的分數,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲新增的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為物件,value物件的value必需為數字或浮點數;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲新增的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_zset": {"data1":22,"data2":10,"data3":20}},"expire_time":{"test_zset":60}}
},
"hash":{
"type":"Object",
"requirement":"optional",
"directions":[
"新增JSON物件形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲新增的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為物件;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲新增的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_hash1":{"data1":"qqq","data2":10,"data3":20}},"expire_time":{"test_hash1":60}}
}
},
"precautions":{
"注意事項1":"data若有設定期限,則會data-key一一比對並逐一新增(速度較慢);若無則一次新增(速度較快)"
},
"example":[
{
"string": {
"data": {
"test_string1": 1.2,
"test_string2": ["sas",1321]
},
"expire_time":{
"test_string1":30
}
},
"list": {
"data": {
"test_list1": [34123,"dwqad"],
"test_list2": [{"data1":22,"data2":10,"data3":20},"afasfa"]
},
"expire_time":{
"test_list1":60,
"test_list2":30
}
},
"set": {
"data": {
"test_set1": [34123,"dwqad"],
"test_set2": ["tete","tete"]
},
"expire_time":{
"test_set1":15,
"test_set2":30
}
},
"zset": {
"data": {
"test_zset1": {"data1":"qqq","data2":10,"data3":20},
"test_zset2": {"data1":22,"data2":10,"data3":20}
},
"expire_time":{
"test_zset1":15
}
},
"hash": {
"data": {
"test_hash1": {"data1":"qqq","data2":10,"data3":20},
"test_hash2": ["tete","tete"]
},
"expire_time":{
"test_hash1":15,
"test_hash2":30
}
}
}
]
},
"API_message_parameters":{"DB":"string","InsertStatus":"object+各類型資料新增狀態,若全部新增成功則無此Response"},
"API_example":{
"APIS": "POST /api/IOT/1.0/rd/CommonUse/Keys",
"InsertStatus": {
"hash": [
"Failed to add Key:'test_hash2' -> Value:'['tete', 'tete']' because value must be an Object"
],
"string": [
"Invalid input of type: 'list'. Convert to a byte, string or number first."
],
"zset": [
"Failed to add Key:'test_zset1' -> Value:'{'data1': 'qqq', 'data3': 20, 'data2': 10}' beacuse value's value must be an integer",
"Key:'test_zset2' doesn't have expire_time,but added successfully with no expire_time"
]
},
"BytesTransferred": 521,
"OperationTime": "0.004",
"DB": "REDIS",
"System": "IOT",
"Response": "ok"
}
}
'''
#}}}
err_msg = "error"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
#collect data items from a request
reqdataDict = ConvertData().convert(json.loads(request.data))
#多筆資料新增的狀態
insertstatus = {}
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
result, insertstatus = operate_CU_integration(reqdataDict, dbRedis, "Insert")
if not result:
dicRet["Response"] = insertstatus
return jsonify( **dicRet)
# anydata = False
# for this_key_type in redis_key_type:
# if reqdataDict.get(this_key_type) is not None:
# anydata = True
# if not check_post_parameter_exist(reqdataDict.get(this_key_type),["data"]):
# insertstatus[this_key_type] = ["Data : {} ,Missing post parameters : '{}'".format(reqdataDict.get(this_key_type),post_parameter)]
# continue
# setExpireTime = False
# if "expire_time" in reqdataDict.get(this_key_type).keys(): setExpireTime = True
# #若key有要設定有效期限,則一筆一筆新增(較慢),否則一次新增(較快)
# insertstatus[this_key_type] = _RedisInsertUpdateAction(dbRedis,this_key_type,reqdataDict.get(this_key_type),"Insert",setExpireTime).set_value()
# else:
# if not anydata:
# dicRet["Response"] = "Faild to add,because no correct data"
# return jsonify( **dicRet)
for key,value in insertstatus.items():
if not value: del insertstatus[key]
if insertstatus: dicRet["InsertStatus"] = insertstatus
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}}
#=======================================================
# API to CommonUse register redis key
# Date: 12172020@Yishan
# FOR REDIS
#=======================================================
# {{{ INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['PUT'])
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['PUT'])
def redis_commonuse_update_all(SYSTEM):
#{{{APIINFO
'''
{
"API_application":"提供修改redis多個key&value資料,為全部更新",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"string":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改字符串形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object)",
"是否必需":"必要",
"注意事項":"value只接受字串、數字、json字串類型;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_string":123,"test_string2":"test"},"expire_time":{"test_string":60}}
},
"list":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改列表形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為陣列且陣列的值只接受字串、數字、json字串類型;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data": {"test_list1": [34123,"dwqad"]},"expire_time":{"test_list1":60,"test_list2":30}}
},
"set":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改無序集合形態的value,集合成員是唯一的,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為陣列,陣列的值只接受字串、數字、json字串類型且不得重複;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data": {"test_set1": [34123,"dwqad"]},"expire_time":{"test_set1":60}}
},
"zset":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改有序集合形態的value,集合成員是唯一的且都會關聯一個double 類型的分數,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為物件,value物件的value必需為數字或浮點數;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_zset": {"data1":22,"data2":10,"data3":20}},"expire_time":{"test_zset":60}}
},
"hash":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改JSON物件形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object)",
"是否必需":"必要",
"注意事項":"value必須為物件;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_hash1":{"data1":"qqq","data2":10,"data3":20}},"expire_time":{"test_hash1":60}}
}
},
"precautions":{
"注意事項1":"data若有設定期限,則會data-key一一比對並逐一修改(速度較慢);若無則一次修改(速度較快)"
},
"example":[
{
"string": {
"data": {
"test_string1": 1.2,
"test_string2": ["sas",1321]
},
"expire_time":{
"test_string1":30
}
},
"list": {
"data": {
"test_list1": [34123,"dwqad"],
"test_list2": [{"data1":22,"data2":10,"data3":20},"afasfa"]
},
"expire_time":{
"test_list1":60,
"test_list2":30
}
},
"set": {
"data": {
"test_set1": [34123,"dwqad"],
"test_set2": ["tete","tete"]
},
"expire_time":{
"test_set1":15,
"test_set2":30
}
},
"zset": {
"data": {
"test_zset1": {"data1":"qqq","data2":10,"data3":20},
"test_zset2": {"data1":22,"data2":10,"data3":20}
},
"expire_time":{
"test_zset1":15
}
},
"hash": {
"data": {
"test_hash1": {"data1":"qqq","data2":10,"data3":20},
"test_hash2": ["tete","tete"]
},
"expire_time":{
"test_hash1":15,
"test_hash2":30
}
}
}
]
},
"API_message_parameters":{"DB":"string","UpdateStatus":"object+各類型資料修改狀態,若全部修改成功則無此Response"},
"API_example":{
"APIS": "PUT /api/IOT/1.0/rd/CommonUse/Keys",
"UpdateStatus": {
"hash": [
"Failed to add Key:'test_hash2' -> Value:'['tete', 'tete']' because value must be an Object"
],
"string": [
"Invalid input of type: 'list'. Convert to a byte, string or number first."
],
"zset": [
"Failed to add Key:'test_zset1' -> Value:'{'data1': 'qqq', 'data3': 20, 'data2': 10}' beacuse value's value must be an integer",
"Key:'test_zset2' doesn't have expire_time,but added successfully with no expire_time"
]
},
"BytesTransferred": 521,
"OperationTime": "0.004",
"DB": "REDIS",
"System": "IOT",
"Response": "ok"
}
}
'''
#}}}
err_msg = "error"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
#collect data items from a request
reqdataDict = ConvertData().convert(json.loads(request.data))
#多筆資料修改的狀態
updatestatus = {}
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
result, updatestatus = operate_CU_integration(reqdataDict, dbRedis, "Put")
if not result:
dicRet["Response"] = updatestatus
return jsonify( **dicRet)
# anydata = False
# for this_key_type in redis_key_type:
# if reqdataDict.get(this_key_type) is not None:
# anydata = True
# if not check_post_parameter_exist(reqdataDict.get(this_key_type),["data"]):
# updatestatus[this_key_type] = ["Data : {} ,Missing post parameters : '{}'".format(reqdataDict.get(this_key_type),post_parameter)]
# continue
# setExpireTime = False
# if "expire_time" in reqdataDict.get(this_key_type).keys(): setExpireTime = True
# #若key有要設定有效期限,則一筆一筆修改(較慢),否則一次修改(較快)
# updatestatus[this_key_type] = _RedisInsertUpdateAction(dbRedis,this_key_type,reqdataDict.get(this_key_type),"Put",setExpireTime).set_value()
# else:
# if not anydata:
# dicRet["Response"] = "Faild to add,because no correct data"
# return jsonify( **dicRet)
for key,value in updatestatus.items():
if not value: del updatestatus[key]
if updatestatus: dicRet["UpdateStatus"] = updatestatus
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}}
#=======================================================
# API to CommonUse register redis key
# Date: 12172020@Yishan
# FOR REDIS
#=======================================================
# {{{ INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['PATCH'])
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['PATCH'])
def redis_commonuse_partial_update(SYSTEM):
#{{{APIINFO
'''
{
"API_application":"提供修改redis型態為zset、hash的key資料,為部分更新",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"zset":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改有序集合形態的value,集合成員是唯一的且都會關聯一個double 類型的分數,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object),若key不存在會直接建立",
"是否必需":"必要",
"注意事項":"value必須為物件,value物件的value必需為數字或浮點數;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_zset": {"data1":22,"data2":10,"data3":20}},"expire_time":{"test_zset":60}}
},
"hash":{
"type":"Object",
"requirement":"optional",
"directions":[
"修改JSON物件形態的value,詳細內容格式請看'Show Details'",
{
"data":{
"說明":"欲修改的key&value(object),若key不存在會直接建立",
"是否必需":"必要",
"注意事項":"value必須為物件;資料以字串型態儲存"
},
"expire_time":{
"說明":"欲修改的key之有效期限(object)",
"是否必需":"選填",
"注意事項":"若需要設定key的有效期限(秒),key需對應data的key且value必須為正整數;不需要全部data的Key都同時設定期限,可選擇指定key"
}
}
],
"example":{"data":{"test_hash1":{"data1":"qqq","data2":10,"data3":20}},"expire_time":{"test_hash1":60}}
}
},
"precautions":{
"注意事項1":"data若有設定期限,則會data-key一一比對並逐一修改(速度較慢);若無則一次修改(速度較快)"
},
"example":[
{
"zset": {
"data": {
"test_zset1": {"data1":50,"data2":10,"data3":20},
"test_zset2": {"data1":22,"data2":10,"data3":20}
},
"expire_time":{
"test_zset1":15
}
},
"hash": {
"data": {
"test_hash1": {"data1":"qqq","data2":10,"data3":20},
"test_hash2": {"data11":"qqq","data22":10,"data33":20}
},
"expire_time":{
"test_hash1":15,
"test_hash2":30
}
}
}
]
},
"API_message_parameters":{"DB":"string","UpdateStatus":"object+各類型資料修改狀態,若全部修改成功則無此Response"},
"API_example":{
"APIS": "PATCH /api/IOT/1.0/rd/CommonUse/Keys",
"UpdateStatus": {
"hash": [
"Failed to add Key:'test_hash2' -> Value:'['tete', 'tete']' because value must be an Object"
],
"zset": [
"Failed to add Key:'test_zset1' -> Value:'{'data1': 'qqq', 'data3': 20, 'data2': 10}' beacuse value's value must be an integer",
"Key:'test_zset2' doesn't have expire_time,but added successfully with no expire_time"
]
},
"BytesTransferred": 521,
"OperationTime": "0.004",
"DB": "REDIS",
"System": "IOT",
"Response": "ok"
}
}
'''
#}}}
err_msg = "error"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
#collect data items from a request
reqdataDict = ConvertData().convert(json.loads(request.data))
#多筆資料修改的狀態
updatestatus = {}
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
result, updatestatus = operate_CU_integration(reqdataDict, dbRedis, "Patch")
if not result:
dicRet["Response"] = updatestatus
return jsonify( **dicRet)
# anydata = False
# for this_key_type in redis_key_type:
# if reqdataDict.get(this_key_type) is not None:
# anydata = True
# if not check_post_parameter_exist(reqdataDict.get(this_key_type),["data"]):
# updatestatus[this_key_type] = ["Data : {} ,Missing post parameters : '{}'".format(reqdataDict.get(this_key_type),post_parameter)]
# continue
# setExpireTime = False
# if "expire_time" in reqdataDict.get(this_key_type).keys(): setExpireTime = True
# #若key有要設定有效期限,則一筆一筆修改(較慢),否則一次修改(較快)
# updatestatus[this_key_type] = _RedisInsertUpdateAction(dbRedis, this_key_type, reqdataDict.get(this_key_type), "Patch", setExpireTime).set_value()
# else:
# if not anydata:
# dicRet["Response"] = "Faild to add,because no correct data"
# return jsonify( **dicRet)
for key,value in updatestatus.items():
if not value: del updatestatus[key]
if updatestatus: dicRet["UpdateStatus"] = updatestatus
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}}
#=======================================================
# API to CommonUse register redis key
# Date: 12172020@Yishan
# FOR REDIS
#=======================================================
# {{{ INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Hash/Keys/SpecificField', methods = ['PATCH'])
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Hash/Keys/SpecificField', methods = ['PATCH'])
def redis_commonuse_hash_update_specific_field(SYSTEM):
#{{{APIINFO
'''
{
"API_application":"提供修改redis Hash型態key的指定field值,為部分更新",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"key": {"type":"Object","requirement":"required","directions":"欲更新的key,value需為物件且value->key值必須存在才能更新","example":{"test":{"qq":111,"cc":"daa"}}},
"precautions":{
"注意事項1":"第一層與第二層的key必須存在"
},
"example":[
{
"test":{
"qq":"sdfsdf",
"ww":"asdasd",
"cc":222,
"ss":"dsadas",
"rr":"dasd"
}
}
]
},
"API_message_parameters":{"UpdateStatus":"object+各類型資料更新狀態,若全部更新成功則無此Response"},
"API_example":{
"Response": "ok",
"APIS": "PATCH /api/IOT/1.0/rd/CommonUse/Hash/Keys/SpecificField",
"OperationTime": "0.002",
"BytesTransferred": 187,
"DB": "REDIS",
"System": "IOT",
"UpdateStatus": {
"test": [
"key:ss doesn't existed",
"key:cc doesn't existed"
]
}
}
}
'''
#}}}
err_msg = "error"
if SYSTEM == "test": SYSTEM = "IOT"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
is_illegal,is_dict = VerifyDataStrLawyer(request.data).verify_json(check_dict=True)
if not (is_illegal and is_dict):
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
#collect data items from a request
reqdataDict = ConvertData().convert(json.loads(request.data))
#多筆資料修改的狀態
updatestatus = {}
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
for key,value in reqdataDict.items():
this_obj = {}
if not dbRedis.exists(key):
dicRet["Response"] = "Key : {} doesn't existed".format(key)
return jsonify( **dicRet)
if not isinstance(value,dict):
dicRet["Response"] = "Error type of '{}',it must be an Object".format(value)
return jsonify( **dicRet)
updatestatus[key] = []
for field,field_value in value.items():
if field_value is not None:
if not dbRedis.hexists(key, field):
updatestatus[key].append("key:{} doesn't existed".format(field))
else:
this_value = field_value
if isinstance(field_value,dict) or isinstance(field_value,list): this_value = json.dumps(field_value)
this_obj[field] = this_value
dbRedis.hmset(key, this_obj)
for key,value in updatestatus.items():
if not value: del updatestatus[key]
if updatestatus: dicRet["UpdateStatus"] = updatestatus
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}}
#=======================================================
# API to CommonUse register redis key
# Date: 12172020@Yishan
# FOR REDIS
#=======================================================
# {{{ INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['DELETE'])
@INMEMORYDB_API.route('/api/<SYSTEM>/1.0/rd/CommonUse/Keys', methods = ['DELETE'])
def redis_commonuse_delete(SYSTEM):
#{{{APIINFO
'''
{
"API_application":"提供刪除redis多個key",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"keys":{"type":"Array","requirement":"required","directions":"欲刪除的key列表","example":"{'keys':[123,'test',....]}"}
},
"example":[
{
"keys":[123,"1234"]
}
]
},
"API_message_parameters":{"DB":"string"},
"API_example":{
"APIS": "DELETE /api/IOT/1.0/rd/CommonUse/Keys",
"BytesTransferred": 521,
"OperationTime": "0.004",
"DB": "MSSQL",
"System": "IOT",
"Response": "ok"
}
}
'''
#}}}
err_msg = "error"
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
if SYSTEM not in globalvar.SYSTEMLIST[globalvar.SERVERIP]:
dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
#collect data items from a request
reqdataDict = ConvertData().convert(json.loads(request.data))
if not check_post_parameter_exist(reqdataDict,["keys"]):
dicRet["Response"] = "Missing post parameters : '{}'".format(post_parameter)
return jsonify( **dicRet)
if not isinstance(reqdataDict.get("keys"),list):
dicRet["Response"] = "Error type of '{}',it must be an Array".format(reqdataDict.get("keys"))
return jsonify( **dicRet)
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,result= appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
dbRedis.delete(*reqdataDict.get("keys"))
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
dicRet["Response"] = err_msg
dicRet["DB"] = "REDIS"
return jsonify( **dicRet)
# }}} | 1.382813 | 1 |
example/development.py | cclauss/django-authority | 2 | 12770874 |
from example.settings import *
DEBUG=True
TEMPLATE_DEBUG=DEBUG
| 1.195313 | 1 |
main/class/attributes/attributes.py | catalinprescure/python-pages | 0 | 12770875 | <filename>main/class/attributes/attributes.py
# Class attributes ...
#
# Use dot to assign values to a class instance
class Point:
""" a 2D point """
p = Point()
p.x = 1
p.y = 2
assert p.x == 1
assert p.y == 2 | 3.25 | 3 |
mbio/server/endpoint.py | iluxonchik/mercedes-benz-io-challenge-2018-sinfo | 2 | 12770876 | <gh_stars>1-10
"""API endpoints."""
class Endpoint(object):
API_PREFIX = '/api/'
VEHICLES = API_PREFIX + 'vehicles/' # ?dealer=AAA?model=XXX?fuel=YYY?transmission=ZZZ
DEALERS_CLOSEST_LIST = API_PREFIX + 'dealers/' # ?dealer=AAA?model=XXX?fuel=YYY?transmission=ZZZ?latitude=LLL?longitude=OOO
DEALER_CLOSEST = API_PREFIX + 'dealers/closest/' # ?dealer=AAA?model=XXX?fuel=YYY?transmission=ZZZ?latitude=LLL?longitude=OOO
DEALERS_IN_POLYGON = API_PREFIX + 'dealers/polygon/'
BOOKINGS_CREATE = API_PREFIX + 'bookings/create/' # {first_name, last_name, vehicle_id, pickup_date}
BOOKINGS_CANCEL = API_PREFIX + 'bookings/cancel/' # {booking_id, reason}
| 1.78125 | 2 |
tests/_test_google_student.py | ianchen-tw/invisible-hand | 2 | 12770877 | <gh_stars>1-10
from unittest.mock import patch
import pygsheets
import pytest
from hand.errors import ERR_REQUIRE_NO_SPACE, ERR_UNIQUE_STUDENT_ID
from hand.utils.google_student import Gstudents, pygsheetInteractor
# TODO:
# test if pygsheet work as supposed
class TestInteractor:
"""Test Interactor work properly if pygsheet does"""
def test_interactor_need_to_call_open_url_before_use(self):
actor = pygsheetInteractor(pygsheets)
with pytest.raises(RuntimeError):
actor.get_all_record("not-important")
@patch("pygsheets.authorize")
def test_interactor_auth_on_create(self, pyg_auth):
pygsheetInteractor()
pyg_auth.assert_called_once()
@patch("pygsheets.authorize")
def test_get_all_record(self, pyg_auth):
"""return [{'col_name','val'}...]"""
actor = pygsheetInteractor()
actor.open_by_url("test_sheet")
mock_matrix = [["c1", "c2"], ["a2", "a3"], ["b4", "b3"]]
ans = [{"c1": "a2", "c2": "a3"}, {"c1": "b4", "c2": "b3"}]
pyg_wks = pyg_auth().open_by_url().worksheet_by_title()
pyg_wks.get_values.return_value = mock_matrix.copy()
pyg_wks.cols, pyg_wks.rows = 2, 2
ret = actor.get_all_record("not-important")
assert ret == ans
class mockInteractor(pygsheetInteractor):
student_info_table = [
{
"student_id": "A1",
"github_handle": "aaa",
"name": "Andy",
"email": "<EMAIL>",
},
{
"student_id": "A2",
"github_handle": "bbb",
"name": "Ben",
"email": "<EMAIL>",
},
{
"student_id": "A3",
"github_handle": "ccc",
"name": "Cindy",
"email": "<EMAIL>",
},
]
default_records = [
{"student_id": "A1", "score": "99"},
{"student_id": "A2", "score": "102"},
]
def _test_set_records(self, records, title=None):
"""Additional func for mockInteractor"""
if title == "StudentInfo":
self.student_info_table = records.copy()
else:
self.default_records = records.copy()
def __init__(self):
pass
def open_by_url(self, *args, **kwargs):
self.open_by_url_called = True
def get_all_record(self, title, head=1):
if title == "StudentInfo":
return self.student_info_table
return self.default_records
@pytest.fixture(scope="function")
def gstudent():
return Gstudents("test_url", mockInteractor())
class TestGstudents:
"""Test Gstudents work properly with Interactor"""
def test_init_must_call_open_by_url(self, gstudent):
assert gstudent.actor.open_by_url_called == True
def test_gstudent_get_students(self, gstudent):
assert mockInteractor.student_info_table == gstudent.get_students()
def test_left_join(self, gstudent):
ret = gstudent.left_join("hw1")
ans = [
{
"student_id": "A1",
"github_handle": "aaa",
"name": "Andy",
"email": "<EMAIL>",
"score": "99",
},
{
"student_id": "A2",
"github_handle": "bbb",
"name": "Ben",
"email": "<EMAIL>",
"score": "102",
},
]
assert ans == ret
def test_find_student(self, gstudent):
stu = gstudent.get_student("A1")
assert {
"student_id": "A1",
"github_handle": "aaa",
"name": "Andy",
"email": "<EMAIL>",
} == stu
def test_error_on_duplicate_id(self):
actor = mockInteractor()
actor._test_set_records(
[
{
"student_id": "A1",
"github_handle": "aaa",
"name": "Andy",
"email": "<EMAIL>",
},
{
"student_id": "A1",
"github_handle": "bbb",
"name": "Ben",
"email": "<EMAIL>",
},
{
"student_id": "A3",
"github_handle": "ccc",
"name": "Cindy",
"email": "<EMAIL>",
},
],
title="StudentInfo",
)
with pytest.raises(ERR_UNIQUE_STUDENT_ID):
Gstudents("test_url", actor)
def test_error_on_spacy_config_fields(self):
actor = mockInteractor()
actor._test_set_records(
[
{
"student_id": " A1",
"github_handle": "aa a",
"name": "Andy",
"email": "<EMAIL>",
},
],
title="StudentInfo",
)
with pytest.raises(ERR_REQUIRE_NO_SPACE):
Gstudents("test_url", actor)
| 2.3125 | 2 |
visualize_vae.py | RohanCK/StyleTransfer | 0 | 12770878 | <reponame>RohanCK/StyleTransfer
import argparse
import matplotlib.pyplot as plt
import os
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from utils.hyperparameters import DIM_LATENT, NORMALIZE_MEAN, NORMALIZE_STDEV
from utils.img_transforms import transform, transform_back
from utils.plots import grid_add_img
from models.vae import VAE
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vae', type=str, nargs='+', required=True,\
help="path to vae")
parser.add_argument('--imgdir', type=str, required=True,\
help="path to image folder")
args = parser.parse_args()
img_names = os.listdir(args.imgdir)
n_imgs = len(img_names)
rows = len(args.vae) + 1
fig = plt.figure()
with torch.no_grad():
for k in range(len(args.vae)):
vae = VAE(DIM_LATENT)
vae.load_state_dict(torch.load(args.vae[k]))
for i in range(n_imgs):
path = os.path.join(args.imgdir, img_names[i])
img = Image.open(path)
x_true = transform(img)
x_true = x_true.unsqueeze(0)
x_true = x_true.view(1, 3, 64, 64)
x_rec, mu, logvar = vae(x_true)
x_true = x_true.squeeze(0)
x_rec = x_rec.squeeze(0)
img_true = transform_back(x_true)
img_rec = transform_back(x_rec)
if k == 0: grid_add_img(img_true, fig, rows, n_imgs, i+1)
grid_add_img(img_rec, fig, rows, n_imgs, (k+1)*n_imgs+i+1)
fig.subplots_adjust(wspace=0, hspace=0)
plt.show() | 2.265625 | 2 |
src/scripts/themis/job_runner/read_request_queues.py | anku94/themis_tritonsort | 11 | 12770879 | <gh_stars>10-100
#!/usr/bin/env python
import os, sys, argparse, redis, json
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir)))
import utils
def readable_read_request(read_request):
json_req = json.loads(read_request)
req_type = json_req["type"]
if req_type == 0:
json_req["job_ids"] = ','.join(map(str, json_req["job_ids"]))
return ("Read %(path)s for job(s) %(job_ids)s "
"(%(length)dB @ offset %(offset)d)") % json_req
elif req_type == 1:
if "job_ids" in json_req:
json_req["job_ids"] = ','.join(map(str, json_req["job_ids"]))
else:
json_req["job_ids"] = "???"
return ("Halt job(s) %(job_ids)s") % (json_req)
else:
return read_request
def list_read_queues(redis_client, host, read_queues):
print "%s:" % (host)
for read_queue in read_queues:
read_queue_chunks = read_queue.split(':')
worker_name = read_queue_chunks[2]
worker_id = read_queue_chunks[3]
print "%s %s " % (worker_name, worker_id),
read_requests = redis_client.lrange(read_queue, 0, -1)
print "(%d element(s) in queue):" % (len(read_requests))
for i, read_request in enumerate(read_requests):
print "% 5d. %s" % (i + 1, readable_read_request(read_request))
print ""
def flush_read_queues(redis_client, read_queues):
for read_queue in read_queues:
redis_client.delete(read_queue)
def read_request_queues(redis_host, redis_port, redis_db, directive):
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
for host in redis_client.smembers("nodes"):
ip_address = redis_client.hget("ipv4_address", host)
read_queues = list(
redis_client.smembers("read_requests:%s" % (ip_address)))
read_queues.sort()
if directive == "list":
list_read_queues(redis_client, host, read_queues)
elif directive == "flush":
flush_read_queues(redis_client, read_queues)
def main():
parser = argparse.ArgumentParser(
description="dump the contents of the coordinator's read request "
"queues")
utils.add_redis_params(parser)
parser.add_argument("directive", choices=["list", "flush"], help="specify "
"which action to perform on read request queues")
args = parser.parse_args()
return read_request_queues(**vars(args))
if __name__ == "__main__":
sys.exit(main())
| 2.578125 | 3 |
setup.py | wfscheper/epdb | 63 | 12770880 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) SAS Institute, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import absolute_import
from __future__ import print_function
from os.path import dirname
from os.path import join
import io
from setuptools import setup
def read(*names, **kwargs):
return io.open(join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf-8')).read()
Version = "0.16.0"
install_requires = ["six"]
setup(name="epdb",
version=Version,
description="Enhanced Python Debugger",
long_description=read('README.rst'),
author="SAS Institute, Inc.",
author_email="<EMAIL>",
url="https://github.com/sassoftware/epdb",
packages=['epdb'],
license='MIT',
platforms='Posix; MacOS X; Windows',
classifiers=['Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Debuggers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='debugger pdb remote',
entry_points={
'nose.plugins.0.10': [
'epdb-debug = epdb.epdb_nose:Epdb',
],
'console_scripts': [
'epdb = epdb.epdb_client:main',
],
},
install_requires=install_requires,
)
| 1.273438 | 1 |
pybtooth/media_player.py | elParaguayo/pybtooth | 2 | 12770881 | import dbus
from .base import BluetoothBase
from .constants import *
class BluetoothMediaPlayer(BluetoothBase):
TRACK_TYPES = {"Title": str,
"Artist": str,
"Album": str,
"Genre": str,
"NumberOfTracks": int,
"TrackNumber": int,
"Duration": int,}
EQUALIZER_OFF = "off"
EQUALIZER_ON = "on"
REPEAT_OFF = "off"
REPEAT_SINGLE_TRACK = "singletrack"
REPEAT_ALL_TRACKS = "alltracks"
REPEAT_GROUP = "group"
SHUFFLE_OFF = "off"
SHUFFLE_ALL_TRACKS = "alltracks"
SHUFFLE_GROUP = "group"
SCAN_OFF = "off"
SCAN_ALL_TRACKS = "alltracks"
SCAN_GROUP = "group"
STATUS_PLAYING = "playing"
STATUS_STOPPED = "stopped"
STATUS_PAUSED = "paused"
STATUS_FORWARD_SEEK = "forward-seek"
STATUS_REVERSE_SEEK = "reverse-seek"
STATUS_ERROR = "error"
TYPE_AUDIO = "Audio"
TYPE_VIDEO = "Video"
TYPE_AUDIO_BROADCASTING = "Audio Broadcasting"
TYPE_VIDEO_BROADCASTING = "Video Broadcasting"
SUBTYPE_AUDIOBOOK = "Audio Book"
SUBTYPE_PODCAST = "Podcast"
def __init__(self, path):
super(BluetoothMediaPlayer, self).__init__()
self.device = dbus.SystemBus().get_object(SERVICE_NAME, path)
self.interface = dbus.Interface(self.device, PLAYER_IFACE)
self.props = dbus.Interface(self.device, PROPERTIES_IFACE)
self.get = lambda prop: self.props.Get(PLAYER_IFACE, prop)
self.set = lambda prop, value: self.props.Set(PLAYER_IFACE, prop, value)
self.meta_template = {'Album': '',
'NumberOfTracks': 0,
'Title': '',
'Artist': '',
'Duration': 0,
'Genre': '',
'TrackNumber': 0}
def __repr__(self):
try:
n = self.Name
except:
n = "no name"
return ("<pybtooth.media_player.BluetoothMediaPlayer "
"(name='{n}')>").format(n=n)
# Methods
def Play(self):
self.interface.Play()
def Stop(self):
self.interface.Stop()
def Pause(self):
self.interface.Pause()
def Next(self):
self.interface.Next()
def Previous(self):
self.interface.Previous()
def FastForward(self):
self.interface.FastForward()
def Rewind(self):
self.interface.Rewind()
# Properties
@property
def Equalizer(self):
return str(self.get("Equalizer"))
@Equalizer.setter
def Equalizer(self, value):
self.set("Equalizer", value)
@property
def Repeat(self):
return str(self.get("Repeat"))
@Repeat.setter
def Repeat(self, value):
self.set("Repeat", value)
@property
def Shuffle(self):
return str(self.get("Shuffle"))
@Shuffle.setter
def Shuffle(self, value):
self.set("Shuffle", value)
@property
def Scan(self):
return str(self.get("Scan"))
@Scan.setter
def Scan(self, value):
self.set("Scan", value)
@property
def Status(self):
return str(self.get("Status"))
@property
def Position(self):
return int(self.get("Position"))
@property
def Track(self):
try:
meta = self.get("Track")
raw = {str(k): self.TRACK_TYPES[str(k)](v)
for k, v in meta.iteritems()}
return raw
except:
return self.meta_template.copy()
@property
def Device(self):
return self.get("Device")
@property
def Type(self):
return str(self.get("Type"))
@property
def Subtype(self):
return str(self.get("Subtype"))
@property
def Browsable(self):
return bool(self.get("Browsable"))
@property
def Searchable(self):
return bool(self.get("Searchable"))
@property
def Playlist(self):
return self.get("Playlist")
@property
def Name(self):
return str(self.get("Name"))
@property
def Metadata(self):
return self.Track
| 2.546875 | 3 |
rl_agent/pomm_network.py | AntonPotapchuk/playground | 0 | 12770882 | import tensorflow as tf
from keras import Model
from keras.layers import Convolution2D, BatchNormalization, Activation, Add, Dense
from keras.models import load_model
from tensorforce.core.networks import Network
from keras.engine import Input
class PommNetwork(Network):
def tf_apply(self, x, internals, update, return_internals=False):
fc = self.create_network(x)
# TODO maybe {} is not ok
if return_internals:
return fc, {}
return fc
@staticmethod
def create_network(board):
inp = Input(tensor = board['board'])
x = Dense(8)(inp)
x = Activation('relu')(x)
x = Dense(8)(x)
x = Activation('relu')(x)
out = Dense(6)(x)
out = Activation('softmax')(out)
model = Model(inputs=inp, outputs=out)
model.load_weights('./dqn/model/ddgp_dense_8_2/model.h4')
return out | 2.59375 | 3 |
tests/unit/test_ext/test_pendulum.py | monoflo/bloop | 63 | 12770883 | <reponame>monoflo/bloop
from datetime import datetime
import pendulum
import pytest
import pytz
from bloop.ext.pendulum import DateTime, Timestamp
from bloop.types import FIXED_ISO8601_FORMAT
now = datetime.now(pytz.utc).replace(microsecond=0)
now_eastern = datetime.now(pytz.timezone("US/Eastern"))
now_iso8601 = now.strftime(FIXED_ISO8601_FORMAT)
now_timestamp = str(int(now.timestamp()))
@pytest.mark.parametrize("timezone", ["utc", "US/Eastern"])
def test_datetime(timezone):
delorean_now = pendulum.instance(now)
typedef = DateTime(timezone)
assert typedef.dynamo_dump(delorean_now, context={}) == now_iso8601
assert typedef.dynamo_load(now_iso8601, context={}).in_timezone("utc") == now
@pytest.mark.parametrize("timezone", ["utc", "US/Eastern"])
def test_timestamp(timezone):
delorean_now = pendulum.instance(now)
typedef = Timestamp(timezone)
assert typedef.dynamo_dump(delorean_now, context={}) == now_timestamp
assert typedef.dynamo_load(now_timestamp, context={}).in_timezone("utc") == now
@pytest.mark.parametrize("typedef_cls", (DateTime, Timestamp))
def test_none(typedef_cls):
typedef = typedef_cls()
assert typedef.dynamo_dump(None, context={}) is None
assert typedef.dynamo_load(None, context={}) is None
| 2.1875 | 2 |
overhead/clicks.py | crossflag/trackActivity | 1 | 12770884 | import cv2
import sys
import numpy as np
import pyperclip as ppc
from tkinter import filedialog
from tkinter import *
def record_click(event,x,y,flags,param):
global mouseX,mouseY
if event == cv2.EVENT_LBUTTONDBLCLK:
mouseX,mouseY = x,y
point = "[" + str(mouseX) + ", " + str(mouseY) + "]"
cv2.drawMarker(img, (x, y), (0, 0, 255), markerSize=10, thickness=1)
blank = np.zeros((64,172,3), np.uint8)
cv2.putText(blank, point, (2, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.75, (0, 0, 255))
cv2.imshow("Point", blank)
k=cv2.waitKey(10) & 0XFF
failed = False
if len(sys.argv) == 2:
file = str(sys.argv[1])
img = cv2.imread(file)
if not img.any():
failed = True
if len(sys.argv) != 2 or failed:
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = ".",title = "Select file",filetypes = (("png files","*.png"),("jpeg files","*.jpg"),("all files","*.*")))
file = root.filename
img = cv2.imread(file)
root.destroy()
height, width, layers = img.shape
cv2.namedWindow("Select Points")
cv2.namedWindow("Point")
cv2.moveWindow("Point", width+132, 38)
cv2.setMouseCallback("Select Points",record_click)
points = "["
coord = ""
while(1):
cv2.imshow("Select Points",img)
k = cv2.waitKey(20) & 0xFF
if k == ord('\r'):
break
elif k == ord('s'):
coord = "[" + str(mouseX) + ", " + str(mouseY) + "], "
points += coord
print(coord[:-2], " - saved")
elif k == ord('\b'):
points = points[:-len(coord)]
print(coord[:-2], " - removed")
if len(points) > 3:
points = points[:-2]
points += "]"
print(points)
ppc.copy(points)
| 2.859375 | 3 |
frontend/upload.py | davezen1/calc | 0 | 12770885 | from django import forms
class UploadWidget(forms.widgets.FileInput):
'''
This widget represents an upload widget that the user can
easily drag-and-drop files into.
It is tightly coupled to upload.js.
'''
def __init__(self, attrs=None, degraded=False,
accept=(".xlsx", ".xls", ".csv"),
extra_instructions='XLS, XLSX, or CSV format, please.'):
super().__init__(attrs=attrs)
self.degraded = degraded
self.accept = accept
self.extra_instructions = extra_instructions
def render(self, name, value, attrs=None):
degraded_str = ' data-force-degradation' if self.degraded else ''
final_attrs = {}
if attrs:
final_attrs.update(attrs)
final_attrs['accept'] = ",".join(self.accept)
id_for_label = final_attrs.get('id', '')
return "\n".join([
'<div class="upload"%s>' % degraded_str,
' %s' % super().render(name, value, final_attrs),
' <div class="upload-chooser">',
' <label for="%s">Choose file</label>' % id_for_label,
' <span class="js-only" aria-hidden="true">',
' or drag and drop here.',
' </span>',
' <span>%s</span>' % self.extra_instructions,
' </div>',
'</div>'
])
| 2.390625 | 2 |
tests/memory/test_data.py | cjpit/selpi | 5 | 12770886 | <gh_stars>1-10
from binascii import Error
from exception import NotFoundException, OutOfBoundsException, ValidationException
from unittest import TestCase
from unittest.mock import Mock
from memory import Range, Data
from unittest_data_provider import data_provider
from exception import NotFoundException, ValidationException
class DataTest(TestCase):
def test_bytes_raises_before_set(self):
data = Data(Mock())
with self.assertRaises(NotFoundException) as context:
data.bytes
self.assertEqual(
'Unable to read bytes before property is set',
context.exception.args[0]
)
def test_bytes_raises_during_init(self):
with self.assertRaises(ValidationException) as context:
Data(Range(0x0000, 1), b'1234')
self.assertEqual(
'Unable to store 4 bytes to range requiring 2',
context.exception.args[0]
)
@data_provider(lambda: (
(Range(0xcafe, 1), b'ab'),
(Range(0xfeed, 5), b'1234567890'),
))
def test_bytes_getter(self, range: Range, bytes: bytes):
data = Data(range, bytes)
self.assertEqual(bytes, data.bytes)
@data_provider(lambda: (
(Range(0xcafe, 1), b'ab'),
(Range(0xfeed, 5), b'1234567890'),
))
def test_bytes_setter(self, range: Range, bytes: bytes):
data = Data(range)
data.bytes = bytes
self.assertEqual(bytes, data.bytes)
@data_provider(lambda: (
(Range(0xcafe, 2), b'ab', 'Unable to store 2 bytes to range requiring 4'),
(Range(0xfeed, 1), b'1234567890', 'Unable to store 10 bytes to range requiring 2'),
))
def test_bytes_raises_setter(self, range, bytes, expected):
data = Data(range)
with self.assertRaises(ValidationException) as context:
data.bytes = bytes
self.assertEqual(expected, context.exception.args[0])
def test_sorted(self):
datas = [
Data(Range(0x1234, 1), b'zz'),
Data(Range(0x9999, 2), b'aaaa'),
Data(Range(0x0000, 1), b'mm'),
Data(Range(0x4567, 1), b'aa'),
]
self.maxDiff = None
self.assertEqual([
Data(Range(0x0000, 1), b'mm'),
Data(Range(0x1234, 1), b'zz'),
Data(Range(0x4567, 1), b'aa'),
Data(Range(0x9999, 2), b'aaaa'),
], sorted(datas))
| 3 | 3 |
hur_tracker.py | Unidata/HurricaneTracker | 11 | 12770887 | <reponame>Unidata/HurricaneTracker
# -*- coding: utf-8 -*-
from StringIO import StringIO
import urllib2
import gzip
from pandas import Series, DataFrame
import numpy as np
from mpl_toolkits.basemap import Basemap
from collections import OrderedDict
from IPython.display import clear_output
from IPython.html import widgets
from IPython.display import display
import matplotlib.pyplot as plt
import matplotlib.cm as cm
get_ipython().magic(u'matplotlib inline')
# Function to read text files
def readTextFile(url):
request = urllib2.Request(url)
# Adding user to header for NHC user logs
request.add_header("User-agent", "Unidata Python Client Test")
response = urllib2.urlopen(request)
# Store data response in a string buffer.
sio_buffer = StringIO(response.read())
f = sio_buffer.getvalue()
return f.splitlines()
# Function to open and read zipped files
def readGZFile(url):
request = urllib2.Request(url)
# Adding user to header for NHC user logs
request.add_header("User-agent", "Unidata Python Client Test")
response = urllib2.urlopen(request)
# Store data response in a string buffer.
sio_buffer = StringIO(response.read())
# Read from the string buffer as if it were a physical file
gzf = gzip.GzipFile(fileobj = sio_buffer)
data = gzf.read()
return data.splitlines()
# Class to sort ensemble members into ensembles
class SortModels:
def __init__(self):
# Reading in data from text file
self.fileLines = readTextFile("ftp://ftp.nhc.noaa.gov/atcf/docs/nhc_techlist.dat")
# Pulling necessary data from lines in file
model, info = [],[]
for line in self.fileLines[1:]:
model.append(line[4:8].strip())
info.append(line[68:].strip())
#Combining data from file into a Pandas Dataframe dictionary.
self.models = DataFrame({"Model":model, "Info":info})
# Method to sort ensemble members into corresponding ensemble list
def shapeModelInfo(self):
modelStuff = self.models
# Ensemble lists
gfsE,ecmwfE,gfdlE,hwrfE,nhcE,ukmetE,bamsE,shipsE,psuE,\
cmcE,consenE,namE,coampsE,nogapsE,navgemE,jgsmE,esrlE,ncepE \
= [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
# FOR loop to seperate member by checking model information
for line in range(len(modelStuff["Info"])):
if (modelStuff["Info"][line].startswith("GFS")) & ("Mean" not in modelStuff["Info"][line]):
gfsE.append(modelStuff["Model"][line])
if ("SHIPS" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
shipsE.append(modelStuff["Model"][line])
if ("PSU" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
psuE.append(modelStuff["Model"][line])
if ("ESRL" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
esrlE.append(modelStuff["Model"][line])
if ("NCEP" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
ncepE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("Beta")) & ("Mean" not in modelStuff["Info"][line]):
bamsE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("Japanese")) & ("Mean" not in modelStuff["Info"][line]):
jgsmE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("NAM")) & ("Mean" not in modelStuff["Info"][line]):
namE.append(modelStuff["Model"][line])
if ("COAMPS" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
coampsE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("NOGAPS")) & ("Mean" not in modelStuff["Info"][line]):
nogapsE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("NAVGEM")) & ("Mean" not in modelStuff["Info"][line]):
navgemE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("Canadian")) & ("Mean" not in modelStuff["Info"][line]):
cmcE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("UKMET")) & ("Mean" not in modelStuff["Info"][line]):
ukmetE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("ECMWF")) & ("Mean" not in modelStuff["Info"][line]):
ecmwfE.append(modelStuff["Model"][line])
if ("GFDL" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
gfdlE.append(modelStuff["Model"][line])
if ("Consensus" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
consenE.append(modelStuff["Model"][line])
if ("HWRF" in modelStuff["Info"][line]) & ("Mean" not in modelStuff["Info"][line]):
hwrfE.append(modelStuff["Model"][line])
if (modelStuff["Info"][line].startswith("NHC")) & ("Mean" not in modelStuff["Info"][line]):
nhcE.append(modelStuff["Model"][line])
# Dictionary to store ensembles
self.modelGroups = {"GFS":gfsE,"ECMWF":ecmwfE,"GFDL":gfdlE,"NHC":nhcE,"HWRF":hwrfE,
"UKMET":ukmetE,"CMC":cmcE,"Consensus":consenE,"NAM":namE,"COAMPS":coampsE,
"NOGAPS":nogapsE,"NAVGEM":navgemE,"BAMS":bamsE,"JGSM":jgsmE,"NCEP":ncepE,
"SHIPS":shipsE,"PSU":psuE,"ESRL":esrlE}
# Reversing the order of the previous dictionary to have the
# ensemble members be the dictionary keys
self.modelToGroup = {}
for group,models in self.modelGroups.items():
for model in models:
self.modelToGroup[model] = group
return {"modelGroups":self.modelGroups, "modelToGroup":self.modelToGroup}
# Class to get storm names and file name
class StormNameData:
def __init__(self):
self.fileLines = readTextFile("http://ftp.nhc.noaa.gov/atcf/index/storm_list.txt")
# Method to pull necessary data from lines in file
def splitStormInfo(self):
name, cycloneNum, year, stormType, basin, filename = [],[],[],[],[],[]
for line in self.fileLines[1:]:
fields = line.split(",")
name.append(fields[0].strip())
basin.append(fields[1].strip())
cycloneNum.append(fields[7].strip())
year.append(fields[8].strip())
stormType.append(fields[9].strip())
filename.append(fields[-1].strip().lower())
# Combining data from file into a Pandas Dataframe dictionary.
self.storms = DataFrame({"Name":name, "Basin":basin, "CycloneNum":np.array(cycloneNum), "Year":np.array(year), "StormType":stormType, "Filename":filename})
return self.storms
# Class to select desired storm
class StormSelector:
def __init__(self):
self.stormStuff = StormNameData().splitStormInfo()
years = sorted(self.stormStuff.groupby(["Year"]).groups.keys())
# Slider to select year for file
self.menuYears = widgets.IntSliderWidget(min=1851, max=2014, step=1, value=2014, description = "Year")
self.menuNames = widgets.DropdownWidget()
self.menuNames.on_trait_change(self._createUrl, 'value')
# Button to create dropdown menu of storms for the selected year
self.buttonName = widgets.ButtonWidget(description="Get Storm Names")#, value = menuYears.value)
self.buttonName.on_click(self._updateNames)
# Button to call the plotting class and other necessary classes
self.plottingButton = widgets.ButtonWidget(description = "Plot Selected Storm", disabled = True)
# Container widget to hold storm selection widgets
self.stormSelector = widgets.ContainerWidget(children = [self.menuYears, self.buttonName,
self.menuNames, self.plottingButton], visible = True)
# Container widget to hold both storm selectign widgets and plotting button
self.form = widgets.ContainerWidget()
self.form.children = [self.stormSelector]
display(self.form)
# Method to display storms for the specified year in the dropdown menu widget
def _updateNames(self, *args):
p1 = self.stormStuff[self.stormStuff.Year == str(self.menuYears.value)]
self.p2 = p1.groupby(["Name"])
names = sorted(self.p2.groups.keys())
if names:
self.menuNames.values = OrderedDict(zip(names, names))
else:
self.menuNames.values.clear()
# Method to test if files exist for the selected storm
def test_url(self,url):
request = urllib2.Request(url)
request.get_method = lambda : 'HEAD'
try:
response = urllib2.urlopen(request)
return True
except:
return False
# Method to create urls for selected storm
def _createUrl(self, *args):
pullName = self.stormStuff.loc[self.p2.groups[self.menuNames.value]]
full = pullName.Filename.values[0]
# Creates different path for 2014 storms
if self.menuYears.value == 2014:
url = "http://ftp.nhc.noaa.gov/atcf/aid_public/a%8s.dat.gz" % (full)
urlb = "http://ftp.nhc.noaa.gov/atcf/btk/b%8s.dat" % (full)
else:
url = "http://ftp.nhc.noaa.gov/atcf/archive/%4s/a%8s.dat.gz" % (self.menuYears.value,full)
urlb = "http://ftp.nhc.noaa.gov/atcf/archive/%4s/b%8s.dat.gz" % (self.menuYears.value,full)
self.urls = {"Forecast":url, "Best":urlb}
self.aExists = self.test_url(url)
self.bExists = self.test_url(urlb)
clear_output()
# Creating message for intances in which the url test passes or fails
if self.aExists and self.bExists:
message = 'Selected data for: {}'.format(self.menuNames.value)
self.plottingButton.disabled = False
self.plottingButton.on_click(toPlottingFunction)
else:
self.stormSelector.visible = True
self.plottingButton.disabled = True
message = 'No data found for: '
if (not self.aExists) and (not self.bExists):
message = message + "forecast tracks and best track"
elif not self.aExists:
message = message + "forecast tracks"
else:
message = message + "best track"
print message
# Class to gather data from storm files
class HandleStormData:
def __init__(self, ss):
# Creating dictionary that will be filled with data from forecast files and best track files
self.dataDict = {}
for key in ss.urls.keys():
if (ss.urls[key].endswith(".dat")):
self.lines = readTextFile(ss.urls[key])
else:
self.lines = readGZFile(ss.urls[key])
lat = []
lon = []
basin, cycloneNum, warnDT, model = [],[],[],[]
for line in self.lines:
fields = line.split(",")
#Joins together lattitude and longitude strings without directional letters.
#Includes type conversion in order to divide by 10 to get the correct coordinate.
latSingle = int(fields[6][:-1])/10.0
lonSingle = -(int(fields[7][:-1])/10.0)
lat.append(latSingle)
lon.append(lonSingle)
basin.append(fields[0])
cycloneNum.append(fields[1].strip())
warnDT.append(fields[2].strip())
model.append(fields[4].strip())
#Combining data from file into a Pandas Dataframe dictionary.
self.dataDict[key] = DataFrame({"Basin":basin, "CycloneNum":np.array(cycloneNum),
"WarnDT":np.array(warnDT), "Model":model,
"Lat":np.array(lat), "Lon":np.array(lon), "Group":''})
# Method to fill in Group column with corresponding ensemble name
def sortGroup(self):
modelInfo = SortModels().shapeModelInfo()
ensembleMembers = modelInfo["modelToGroup"]
data = self.dataDict["Forecast"]
for k in range(len(data["Model"])):
if data["Model"][k] in ensembleMembers:
data["Group"][k] = ensembleMembers[data["Model"][k]]
else:
data["Group"][k] = data["Model"][k]
self.dataDict["Forecast"] = data
return self.dataDict
# Class to create and mange storm plots
class Plotting:
def __init__(self, hsd):
# Creating necessary dictionaries for data
plt.close('all')
self.modelList = []
self.artistDict = {}
self.multiArtistDict = {}
self.multiTrackDict = {}
self.container = None
# Assigning data from position keys to appropriate tracks
sortedModels = SortModels()
self.membersToEnsemble = sortedModels.shapeModelInfo()["modelGroups"]
self.best = hsd.sortGroup()["Best"]
self.forecast = hsd.sortGroup()["Forecast"]
# Command to set up widgets and actions
self.userControls()
# Creates figure and sets the axes for animation.
self.fig = plt.figure(figsize=(8,8))
self.ax = self.fig.add_subplot(111, autoscale_on=False)
self.ax.grid()
# Creating map for appropriate basin
self.map = self.createMap(self.forecast["Basin"][0], ax=self.ax)
# Commands to set up plot and to plot initial tracks for NHC model
self.plotDetails()
self.setModel("NHC")
# Method create widgets and set up widget events
def userControls(self):
# Button to select new storm from StormSelector
self.selectStorm = widgets.ButtonWidget(description = "Select New Storm", visible = True)
self.selectStorm.on_click(selectNewStorm)
# Model selection widgets and controls to change model
self.modelSelection = widgets.DropdownWidget(values = self.forecast.groupby(["Group"]).groups.keys(), value = "NHC")
self.modelSelection.on_trait_change(self.onChangeModel, 'value')
# Slider widget to move plots through time
self.frameNumber = widgets.IntSliderWidget(min=0, max=1, value = 0, step=1, description = "Time")
# Button widgets to advance and retract time frames
add = widgets.ButtonWidget(description = "+")
subtract = widgets.ButtonWidget(description = "-")
add.on_click(self.advanceFrame)
subtract.on_click(self.subtractFrame)
# Checkbox to add multiple tracks to plot
clearAll = widgets.ButtonWidget(description = "Clear Plot")
clearAll.on_click(self.clearPlot)
self.check = widgets.CheckboxWidget(description = "Add multiple tracks:", value = False)
# Widgets to tracks to plot
self.newTrackMenu = widgets.DropdownWidget(values = self.forecast.groupby(["Group"]).groups.keys())
self.addTrackButton = widgets.ButtonWidget(description = "Add New Track")
self.plotMultiTrackButton = widgets.ButtonWidget(description = "Plot New Tracks")
# Container that holds multiple track widgets
self.addNewTrack = widgets.ContainerWidget(visible=False, children = [self.newTrackMenu, self.addTrackButton,
self.plotMultiTrackButton, clearAll])
# Adding actions to control frameNumber slider widget
self.addTrackButton.on_click(self.addingNewTrack)
self.plotMultiTrackButton.on_click(self.plottingTracks)
self.check.on_trait_change(self.addTracks, 'value')
if self.container is None:
# Container that holds all widgets
self.container = widgets.ContainerWidget(children = [self.selectStorm, self.frameNumber, add, subtract,
self.modelSelection, self.check, self.addNewTrack])
display(self.container)
self.container.visible = True
# Method to clear multi track dictionaries
def clearPlot(self,b):
for line in self.multiArtistDict.values():
line.remove()
self.multiArtistDict.clear()
self.multiTrackDict.clear()
self.modelList = []
# Method to make visible the widgets to add tracks to plot
def addTracks(self, name, value):
self.clearPlot(0)
if value:
self.modelSelection.disabled = True
self.addNewTrack.visible = True
else:
self.modelSelection.disabled = False
self.addNewTrack.visible = False
# Method to advance frame number by one
def advanceFrame(self, b):
if self.addNewTrack.visible is True:
if self.frameNumber.value < len(self.allTimes)-1:
self.frameNumber.value += 1
else:
if self.frameNumber.value < len(self.time)-1:
self.frameNumber.value += 1
# Method to retract frame by one
def subtractFrame(self, b):
if self.addNewTrack.visible is True:
if self.frameNumber.value > 0:
self.frameNumber.value -= 1
else:
if self.frameNumber.value > 0:
self.frameNumber.value -= 1
# Method to add events to frameNumber slider
def updateUI(self):
# Adjust frameNumber length to correspond to the forecast times for the selected model
if self.addNewTrack.visible is True:
self.frameNumber.max = len(self.allTimes) - 1
self.init_multi()
else:
self.frameNumber.max = len(self.time) - 1
self.init_trackPlot()
# Adding appropriate action for the frameNumber slider
self.frameNumber.on_trait_change(self.update_trackPlot, 'value', remove=self.addNewTrack.visible)
self.frameNumber.on_trait_change(self.update_multi, 'value', remove=not self.addNewTrack.visible)
# Re-drawing map for display
def refreshDisplay(self):
self.fig.canvas.draw()
clear_output(wait=True)
display(self.fig)
# Method called to re-assimilate data and frameNumber slider for newly selected model
def setModel(self, newModel):
self.manageData(newModel)
self.updateUI()
# Method called when new model is selected
def onChangeModel(self, traitName, newModel):
self.frameNumber.value = 0
self.setModel(newModel)
self.refreshDisplay()
# Method called when models are selected for multiple track plot
def addingNewTrack(self, b):
self.modelList.append(self.newTrackMenu.value)
# Method to create multiple track dictionaries
def plottingTracks(self, b):
self.updateModelTitle()
self.frameNumber.value = 0
self.refreshDisplay()
# Setting up color cycle for multiple tracks
colormap = plt.cm.gist_rainbow
color_cycle = ([colormap(i) for i in np.linspace(0, 0.9, len(self.modelList))])
self.alltimes = []
# FOR look to create data for tracks and to set color for track
for i in range(len(self.modelList)):
self.trackColor = color_cycle[i]
self.manageData(self.modelList[i])
self.alltimes += self.time
# Creating an array of times from all selected tracks
self.allTimes = sorted(list(set(self.alltimes)))
self.updateUI()
# Method to display title(s) selected
def updateModelTitle(self):
if self.addNewTrack.visible is True:
self.modelTitle.set_text("Plotting Models: %s" % '\n'.join(self.modelList))
else:
self.modelTitle.set_text("Plotting Model: %s" % self.modelChosen)
# Method to set up data dictionariers for selected model
def manageData(self, model = None):
self.modelChosen = model
self.updateModelTitle()
# Reset dictionary of lines -- also need to remove from plot
for line in self.artistDict.values():
line.remove()
self.artistDict.clear()
# Selecting data for a specific model, Model. Grouping by warning data and time
forecast2 = self.forecast[self.forecast.Group == self.modelChosen]
self.timesInGroup = forecast2.groupby(["WarnDT"])
modelsInGroup = forecast2.groupby(["Model"])
# MasterDict order: {"WarnDT":{"model":[lonlats]} }
self.masterDict = {}
for x in sorted(self.timesInGroup.groups):
self.masterDict[x] = {}
self.time = []
for times in sorted(self.timesInGroup.groups):
self.time.append(times)
warningTimes = forecast2.loc[self.timesInGroup.groups[times]]
warndtModels = warningTimes.groupby(["Model"])
for model in sorted(warndtModels.groups):
# Eliminating erroneous lattitude and longitude data
modelPlot = forecast2.loc[warndtModels.groups[model]]
modelPlot = modelPlot[modelPlot.Lon != 0]
self.masterDict[times][model] = self.map(modelPlot["Lon"].values, modelPlot["Lat"].values)
# Filling in dictionaries for multi track plot use
for line in modelsInGroup.groups:
if self.addNewTrack.visible is True:
self.multiTrackDict[line] = self.masterDict
self.multiArtistDict[line], = self.map.plot([], [], linewidth=2.0, color = self.trackColor, zorder=5)
else:
self.artistDict[line], = self.map.plot([], [], linewidth=2.0, zorder=5)
# Method to create plotting titles, text, and best track line
def plotDetails(self):
stormStuff = StormNameData().splitStormInfo()
self.stormName = stormStuff[(stormStuff.Year == self.forecast["WarnDT"][0][:4]) &
(stormStuff.Basin == self.forecast["Basin"][0]) &
(stormStuff.CycloneNum == self.forecast["CycloneNum"][0])]
titlestring ="Storm Name: %s Storm Number: %-4s Year: %-6s" % \
(self.stormName.Name.values[0], self.forecast["CycloneNum"].values[0],
self.forecast["WarnDT"][0][:4])
self.ax.set_title(titlestring, fontsize=13)
self.modelTitle = self.ax.text(1.35, 1.05, "", horizontalalignment='right', verticalalignment = "top",
transform=self.ax.transAxes, fontsize = 13)
self.lineb, = self.map.plot([],[], linewidth=2.5, linestyle = '--', color='b', zorder=4)
self.lonlatsbest = self.map(self.best["Lon"].values, self.best["Lat"].values)
self.time_template = 'Date/Time of Warning: %s'
self.time_text = self.ax.text(0.03, 0.05, '', backgroundcolor='white', transform=self.ax.transAxes,
color='red', fontsize=15.0)
# Method to create map for storm specific basin
def createMap(self, basinName, ax=None):
if ax is None:
ax = plt.gca()
mapParam = {"EP":[-10.0,60.0,-150.0,-80.0], "CP":[-10.0,60.0,-180.0,-130.0],
"AL":[-5.0,60.0,-110.0,-5.0], "SL":[10.0,-40.0,-100.0,-10.0],
"WP":[-10.0,60.0,110.0,-170], "IO":[-10.0,30.0,30.0,110.0],
"SH":[-50.0,10.0,30.0,150.0]}
# Create polar stereographic Basemap instance.
self.bm = Basemap(projection='mill',
llcrnrlat= mapParam[basinName][0],urcrnrlat= mapParam[basinName][1],
llcrnrlon= mapParam[basinName][2],urcrnrlon= mapParam[basinName][3],
rsphere=6371200.,resolution='c', ax=ax)
# Draw coastlines and fills in colors.
self.bm.drawcoastlines()
self.bm.fillcontinents(color = 'tan',lake_color='aqua')
self.bm.drawmapboundary(fill_color='aqua')
# Draw parallels.
parallels = np.arange(0.,90,10.)
self.bm.drawparallels(parallels,labels=[1,0,0,0],fontsize=8)
# Draw meridians
meridians = np.arange(180.,360.,10.)
self.bm.drawmeridians(meridians,labels=[0,0,0,1],fontsize=8)
return self.bm
# Functions to initialize and animate single model track plots
def init_trackPlot(self):
self.lineb.set_data(self.lonlatsbest)
self.time_text.set_text('')
for line in self.artistDict:
self.artistDict[line].set_data([], [])
def update_trackPlot(self, value, Time):
self.time_text.set_text(self.time_template % self.time[Time])
for line in self.artistDict.keys():
framedata = self.masterDict[sorted(self.timesInGroup.groups.keys())[Time]]
if line in framedata:
self.artistDict[line].set_data(framedata[line])
else:
self.artistDict[line].set_data([],[])
self.refreshDisplay()
# Functions to initialize and animate multiple model track plots
def init_multi(self):
self.lineb.set_data(self.lonlatsbest)
self.time_text.set_text('')
for line in self.multiArtistDict:
self.multiArtistDict[line].set_data([], [])
def update_multi(self, value, time):
self.time_text.set_text(self.time_template % self.allTimes[time])
for model in self.multiTrackDict.keys():
if self.allTimes[time] in self.multiTrackDict[model].keys():
for ensembleMember in self.multiTrackDict[model][self.allTimes[time]].keys():
framedata = self.multiTrackDict[model][self.allTimes[time]]
if ensembleMember in framedata:
self.multiArtistDict[ensembleMember].set_data(framedata[ensembleMember])
else:
self.multiArtistDict[ensembleMember].set_data([],[])
else:
self.multiArtistDict[model].set_data([],[])
self.refreshDisplay()
# Command to run storm selector widgets
ss = StormSelector()
# Function to re-select strom from storm selector widgets
def selectNewStorm(b):
tt.container.visible = False
ss.stormSelector.visible = True
# Funtion to hide storm selector widgets and to run plotting class
def toPlottingFunction(b):
ss.stormSelector.visible = False
global tt
tt = Plotting(HandleStormData(ss))
| 2.484375 | 2 |
URY/pipelines.py | oDallas/URY | 0 | 12770888 | # -*- coding: utf-8 -*-
import os
from textwrap import fill
def wrap(text):
filled = fill(str(text[1]), width=120, initial_indent='# ' + text[0] + ': ', subsequent_indent='# ')
return '\n' + filled + '\n'
class ToPython(object):
def process_item(self, item, spider):
if spider.path:
path = os.path.expanduser(spider.path)
if os.path.exists(path):
new_path = path + '/%i-%s.py' % (item.pop('number'), item.pop('title'))
with open(new_path, 'w+') as f:
f.write('# -*- coding: utf-8 -*-\n')
for formatted in [wrap(x) for x in item.items()]:
f.write(formatted)
print("\033[31mThis file doesn't exists: %s\033[m" % spider.path)
return item
| 2.875 | 3 |
ch22-直方图/hist-normalized-numpy-2.py | makelove/OpenCV-Python-Tutorial | 2,875 | 12770889 | <filename>ch22-直方图/hist-normalized-numpy-2.py
# -*-coding:utf8-*-#
__author__ = 'play4fun'
"""
create time:15-10-24 下午5:26
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../data/contrast75.png', 0)
# flatten() 将数组变成一维
hist, bins = np.histogram(img.flatten(), 256, [0, 256])
# 计算累积分布图
cdf = hist.cumsum()
##
# 构建 Numpy 掩模数组 cdf 为原数组 当数组元素为 0 时 掩盖(计算时被忽略
cdf_m = np.ma.masked_equal(cdf, 0)
cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
# 对被掩盖的元素赋值,赋值为 0
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
img2 = cdf[img]
# cv2.imshow("img2",img2)
# cv2.waitKey(0)
##
# flatten() 将数组变成一维
hist, bins = np.histogram(img2.flatten(), 256, [0, 256])
# 计算累积分布图
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max() / cdf.max()
plt.plot(cdf_normalized, color='b')
plt.hist(img.flatten(), 256, [0, 256], color='r')
plt.xlim([0, 256])
plt.legend(('cdf', 'histogram'), loc='upper left')
plt.show()
'''
直方图均 化经常用来使所有的图片具有相同的亮度条件的参考 工具。 在很多情况下 很有用。例如 脸 别 在 练分类器前 练 的所有图片 先 直方图均 化从而使它们 到相同的亮度条件。
''' | 2.71875 | 3 |
credentials-test.py | VictorKMaina/password-vault | 0 | 12770890 | from credentials import Credential
import unittest
class TestCredentials(unittest.TestCase):
"""
Class for testing credentials methods and behaviours
"""
def setUp(self):
"""
Create new instance of credential
"""
self.new_credential = Credential("Instagram", "victormainak", "password")
def test_init_(self):
"""
Test case to check if new credential has been properly instantiated
"""
self.assertEqual(self.new_credential.account_name, "Instagram")
self.assertEqual(self.new_credential.user_name, "victormainak")
self.assertEqual(self.new_credential.password, "password")
if __name__ == "__main__":
unittest.main() | 3.546875 | 4 |
asgard/http/client.py | ThalesSathler/asgard-api | 1 | 12770891 | from typing import Optional, Union
from aiohttp import ClientSession, ClientTimeout # type: ignore
from asgard import conf
default_http_client_timeout = ClientTimeout(
total=conf.ASGARD_HTTP_CLIENT_TOTAL_TIMEOUT,
connect=conf.ASGARD_HTTP_CLIENT_CONNECT_TIMEOUT,
)
class _HttpClient:
_session: Optional[ClientSession]
def __init__(
self,
session_class,
url: str,
method: str,
session_class_args=[],
session_class_kwargs={},
*args,
**kwargs,
) -> None:
self._session = None
self._session_class = session_class
self._url = url
self._args = args
self._kwargs = kwargs
self._method = method
self._session_class_args = session_class_args
self._session_class_kwargs = session_class_kwargs
async def __aenter__(self):
if not self._session:
self._session = self._session_class(
*self._session_class_args, **self._session_class_kwargs
)
return await self._return_session_method(self._session, self._method)(
self._url, *self._args, **self._kwargs
)
def _return_session_method(self, session, method_name):
return getattr(session, method_name.lower())
async def __aexit__(self, exc_type, exc_value, exc_tb):
await self._session.close()
class _HttpClientMaker:
def __init__(self, session_class, *args, **kwargs):
self._session_class = session_class
self.session = None
self._session_class_args = args
self._session_class_kwargs = kwargs
def get(self, url: str, *args, **kwargs):
return _HttpClient(
self._session_class,
url,
"GET",
self._session_class_args,
self._session_class_kwargs,
*args,
**kwargs,
)
def post(self, url: str, *args, **kwargs):
return _HttpClient(
self._session_class,
url,
"POST",
self._session_class_args,
self._session_class_kwargs,
*args,
**kwargs,
)
def put(self, url: str, *args, **kwargs):
return _HttpClient(
self._session_class,
url,
"PUT",
self._session_class_args,
self._session_class_kwargs,
*args,
**kwargs,
)
def delete(self, url: str, *args, **kwargs):
return _HttpClient(
self._session_class,
url,
"DELETE",
self._session_class_args,
self._session_class_kwargs,
*args,
**kwargs,
)
async def __aenter__(self):
if not self.session:
self.session = self._session_class(
timeout=default_http_client_timeout
)
return self.session
async def __aexit__(self, exc_type, exc_value, exc_tb):
await self.session.close()
http_client = _HttpClientMaker(
ClientSession, timeout=default_http_client_timeout
)
| 2.21875 | 2 |
lib/aquilon/worker/commands/publish.py | ned21/aquilon | 7 | 12770892 | <filename>lib/aquilon/worker/commands/publish.py
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010-2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq publish`."""
from tempfile import NamedTemporaryFile
from base64 import b64decode
from aquilon.worker.broker import BrokerCommand
from aquilon.exceptions_ import ProcessException, ArgumentError
from aquilon.aqdb.model import (
Domain,
Review,
Sandbox
)
from aquilon.worker.dbwrappers.branch import (
force_my_sandbox,
sync_all_trackers,
trigger_review_pipeline
)
from aquilon.worker.processes import GitRepo
from aquilon.worker.logger import CLIENT_INFO
class CommandPublish(BrokerCommand):
required_parameters = ["bundle"]
requires_format = True
def render(self, session, logger, dbuser, user, branch, sandbox, bundle,
sync, rebase, review, **_):
if sandbox:
sandbox, _ = force_my_sandbox(session, dbuser, sandbox)
dbsandbox = Sandbox.get_unique(session, sandbox, compel=True)
elif branch:
dbsandbox = Sandbox.get_unique(session, branch, compel=True)
if sync and not dbsandbox.is_sync_valid and dbsandbox.trackers:
# FIXME: Maybe raise an ArgumentError and request that the
# command run with --nosync? Maybe provide a --validate flag?
# For now, we just auto-flip anyway (below) making the point moot.
pass
if not dbsandbox.is_sync_valid:
dbsandbox.is_sync_valid = True
if rebase and dbsandbox.trackers:
raise ArgumentError("{0} has trackers, rebasing is not allowed."
.format(dbsandbox))
# Most of the logic here is duplicated in deploy
kingdir = self.config.get("broker", "kingdir")
tmpfile = NamedTemporaryFile()
tmpfile.write(b64decode(bundle))
tmpfile.flush()
kingrepo = GitRepo(kingdir, logger)
try:
with kingrepo.temp_clone(dbsandbox.name) as temprepo:
temprepo.run(["bundle", "verify", tmpfile.name])
ref = "HEAD:%s" % (dbsandbox.name)
command = ["pull", tmpfile.name, ref]
if rebase:
command.append("--force")
temprepo.run(command, stream_level=CLIENT_INFO)
# Using --force above allows rewriting any history, even before
# the start of the sandbox. We don't want to allow that, so
# verify that the starting point of the sandbox is still part
# of its history.
if rebase:
found = temprepo.ref_contains_commit(dbsandbox.base_commit,
dbsandbox.name)
if not found:
raise ArgumentError("The published branch no longer "
"contains commit {} it was "
"branched from."
.format(dbsandbox.base_commit))
# FIXME: Run tests before pushing back to template-king
temprepo.push_origin(dbsandbox.name, force=rebase)
except ProcessException as e:
raise ArgumentError("\n%s%s" % (e.out, e.err))
finally:
tmpfile.close()
if sync and dbsandbox.autosync:
sync_all_trackers(dbsandbox, logger)
new_head = kingrepo.ref_commit(dbsandbox.name)
dbtarget = Domain.get_unique(
session,
self.config.get("panc", "default_target_branch"),
compel=True)
# try and find existing review
dbreview = Review.get_unique(session,
source=dbsandbox,
target=dbtarget)
if dbreview:
# Invalidate previous review
dbreview.commit_id = new_head
dbreview.tested = None
dbreview.testing_url = None
# Explicit denials should be kept
if dbreview.approved is True:
dbreview.approved = None
# Trigger CI Pipeline
trigger_review_pipeline(dbreview=dbreview,
user=user,
logger=logger)
elif review:
# Create new review
dbreview = Review(source=dbsandbox,
target=dbtarget,
commit_id=new_head)
session.add(dbreview)
# Trigger CI Pipeline
trigger_review_pipeline(dbreview=dbreview,
user=user,
logger=logger)
session.flush()
client_command = "git fetch"
return client_command
| 1.984375 | 2 |
mp_server/_socket/server_socket.py | hopelife/mp_server | 0 | 12770893 | ## https://nowonbun.tistory.com/668
# 소켓을 사용하기 위해서는 socket을 import해야 한다.
import socket, threading
# binder함수는 서버에서 accept가 되면 생성되는 socket 인스턴스를 통해 client로 부터 데이터를 받으면 echo형태로 재송신하는 메소드이다.
def binder(client_socket, addr):
# 커넥션이 되면 접속 주소가 나온다.
print('Connected by', addr)
try:
# 접속 상태에서는 클라이언트로 부터 받을 데이터를 무한 대기한다.
# 만약 접속이 끊기게 된다면 except가 발생해서 접속이 끊기게 된다.
while True:
# socket의 recv함수는 연결된 소켓으로부터 데이터를 받을 대기하는 함수입니다. 최초 4바이트를 대기합니다.
data = client_socket.recv(4)
# 최초 4바이트는 전송할 데이터의 크기이다. 그 크기는 little 엔디언으로 byte에서 int형식으로 변환한다.
length = int.from_bytes(data, "little")
# 다시 데이터를 수신한다.
data = client_socket.recv(length)
# 수신된 데이터를 str형식으로 decode한다.
msg = data.decode()
# 수신된 메시지를 콘솔에 출력한다.
print('Received from', addr, msg)
# 수신된 메시지 앞에 「echo:」 라는 메시지를 붙힌다.
msg = "echo : " + msg
# 바이너리(byte)형식으로 변환한다.
data = msg.encode()
# 바이너리의 데이터 사이즈를 구한다.
length = len(data)
# 데이터 사이즈를 little 엔디언 형식으로 byte로 변환한 다음 전송한다.
client_socket.sendall(length.to_bytes(4, byteorder="little"))
# 데이터를 클라이언트로 전송한다.
client_socket.sendall(data)
except:
# 접속이 끊기면 except가 발생한다.
print("except : " , addr)
finally:
# 접속이 끊기면 socket 리소스를 닫는다.
client_socket.close()
# 소켓을 만든다.
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 소켓 레벨과 데이터 형태를 설정한다.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 서버는 복수 ip를 사용하는 pc의 경우는 ip를 지정하고 그렇지 않으면 None이 아닌 ''로 설정한다.
# 포트는 pc내에서 비어있는 포트를 사용한다. cmd에서 netstat -an | find "LISTEN"으로 확인할 수 있다.
server_socket.bind(('', 9999))
# server 설정이 완료되면 listen를 시작한다.
server_socket.listen()
try:
# 서버는 여러 클라이언트를 상대하기 때문에 무한 루프를 사용한다.
while True:
# client로 접속이 발생하면 accept가 발생한다.
# 그럼 client 소켓과 addr(주소)를 튜플로 받는다.
client_socket, addr = server_socket.accept()
# 쓰레드를 이용해서 client 접속 대기를 만들고 다시 accept로 넘어가서 다른 client를 대기한다.
th = threading.Thread(target=binder, args = (client_socket,addr))
th.start()
except:
print("server")
finally:
# 에러가 발생하면 서버 소켓을 닫는다.
server_socket.close()
| 2.71875 | 3 |
misago/misago/users/models/__init__.py | vascoalramos/misago-deployment | 2 | 12770894 | <gh_stars>1-10
from .rank import Rank
from .online import Online
from .user import AnonymousUser, User, UsernameChange
from .activityranking import ActivityRanking
from .avatar import Avatar
from .audittrail import AuditTrail
from .avatargallery import AvatarGallery
from .ban import Ban, BanCache
from .datadownload import DataDownload
from .deleteduser import DeletedUser
| 1.109375 | 1 |
tests/test_protein.py | leoisl/hypothesis-bio | 0 | 12770895 | <reponame>leoisl/hypothesis-bio
from hypothesis import given
from .minimal import minimal
from hypothesis_bio import protein
@given(protein())
def test_protein_type(seq):
assert type(seq) == str
def test_smallest_example():
assert minimal(protein()) == ""
def test_smallest_example_3_letter_abbrv():
assert minimal(protein(single_letter_protein=False)) == ""
def test_smallest_non_empty_example():
assert minimal(protein(min_size=1)) == "A"
def test_smallest_non_empty_example_3_letter_abbrv():
seq = minimal(protein(single_letter_protein=False, min_size=1))
assert len(seq) == 3
assert seq == "Ala"
def test_2_mer():
assert minimal(protein(min_size=2)) == "AA"
def test_2_mer_3_letter_abbrv():
seq = minimal(protein(single_letter_protein=False, min_size=2))
assert len(seq) == 6
assert seq == "AlaAla"
@given(protein(max_size=10))
def test_max_size(seq):
assert len(seq) <= 10
def test_max_size_3_letter_abbrv():
seq = minimal(protein(single_letter_protein=False, max_size=10))
assert len(seq) <= 30
assert len(seq) % 3 == 0
| 2.96875 | 3 |
rnn_net.py | AuCson/SEDST | 23 | 12770896 | <reponame>AuCson/SEDST
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import math
from config import global_config as cfg
def cuda_(var, aux=None):
if not aux:
return var.cuda() if cfg.cuda else var
elif aux != 'cpu' and aux >= 0 and cfg.cuda:
return var.cuda(aux)
else:
return var.cpu()
def orth_gru(gru):
gru.reset_parameters()
for _, hh, _, _ in gru.all_weights:
for i in range(0, hh.size(0), gru.hidden_size):
torch.nn.init.orthogonal_(hh[i:i + gru.hidden_size], gain=1)
return gru
class LayerNormalization(nn.Module):
""" Layer normalization module """
def __init__(self, d_hid, eps=1e-3):
super(LayerNormalization, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, keepdim=True, dim=-1)
sigma = torch.std(z, keepdim=True, dim=-1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)
return ln_out
class DynamicEncoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size, n_layers, dropout):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = nn.Embedding(input_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True)
def forward(self, input_seqs, input_lens, hidden=None):
"""
forward procedure. No need for inputs to be sorted
:param input_seqs: Variable of [T,B]
:param hidden:
:param input_lens: *numpy array* of len for each input sequence
:return:
"""
batch_size = input_seqs.size(1)
embedded = self.embedding(input_seqs)
embedded = embedded.transpose(0, 1) # [B,T,E]
sort_idx = np.argsort(-input_lens)
unsort_idx = cuda_(torch.LongTensor(np.argsort(sort_idx)))
input_lens = input_lens[sort_idx]
sort_idx = cuda_(torch.LongTensor(sort_idx))
embedded = embedded[sort_idx].transpose(0, 1) # [T,B,E]
packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lens)
outputs, hidden = self.gru(packed, hidden)
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs)
outputs = outputs[:,:,:self.hidden_size] + outputs[:,:,self.hidden_size:]
outputs = outputs.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
hidden = hidden.transpose(0, 1)[unsort_idx].transpose(0, 1).contiguous()
return outputs, hidden
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Linear(self.hidden_size, 1)
def forward(self, hidden, encoder_outputs, normalize=True):
encoder_outputs = encoder_outputs.transpose(0, 1) # [B,T,H]
attn_energies = self.score(hidden, encoder_outputs)
normalized_energy = F.softmax(attn_energies, dim=2) # [B,1,T]
context = torch.bmm(normalized_energy, encoder_outputs) # [B,1,H]
return context.transpose(0, 1) # [1,B,H]
def score(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
H = hidden.repeat(max_len, 1, 1).transpose(0, 1)
energy = self.attn(torch.cat([H, encoder_outputs], 2)) # [B,T,2H]->[B,T,H]
# fix attention here
energy = self.v(F.tanh(energy)).transpose(1,2) # [B,1,T]
return energy | 2.171875 | 2 |
flask_test/app/flaskr_tests.py | nwiizo/joke | 1 | 12770897 | import os
import flaskr
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.DATABASE = tempfile.mkstemp()
self.app = flaskr.app.test_client()
flaskr.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(flaskr.DATABASE)
if __name__ == '__main__':
unittest.main()
| 2.296875 | 2 |
src/software/thunderscope/thunderscope.py | jonl112/Software | 0 | 12770898 | import os
import signal
import argparse
import platform
# PyQt5 doesn't play nicely with i3 and Ubuntu 18, PyQt6 is much more stable
# Unfortunately, PyQt6 doesn't install on Ubuntu 18. Thankfully both
# libraries are interchangeable, and we just need to swap them in this
# one spot, and pyqtgraph will pick up on it and store the library under
# pyqtgraph.Qt. So from PyQt5 import x becomes from pyqtgraph.Qt import x
if "18.04" in platform.version():
import PyQt5
else:
import PyQt6
import pyqtgraph
from pyqtgraph.dockarea import *
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph.Qt.QtWidgets import QVBoxLayout, QWidget
from proto.import_all_protos import *
from software.networking import threaded_unix_sender
from software.thunderscope.arbitrary_plot.named_value_plotter import NamedValuePlotter
from software.thunderscope.field import (
obstacle_layer,
path_layer,
validation_layer,
world_layer,
)
from software.thunderscope.field.field import Field
from software.thunderscope.log.g3log_widget import g3logWidget
from software.thunderscope.robot_diagnostics.drive_and_dribbler_widget import (
DriveAndDribblerWidget,
)
from software.thunderscope.proto_receiver import ProtoReceiver
from software.thunderscope.play.playinfo_widget import playInfoWidget
from software.thunderscope.chicker.chicker import ChickerWidget
class Thunderscope(object):
""" Thunderscope is our main visualizer that can visualize our field,
obstacles, paths, performance metrics, logs, plots. Thunderscope also
provides tools to interact with the robots.
Thunderscope uses pyqtgraph, which is highly configurable during runtime.
Users can move docks (purple bar) around, double click to pop them out into
another window, etc.
The setup_* functions return docks. See configure_default_layout for an
example. The returned docks can be arranged differently based on the
use case (robot diagnostics, simulation, robocup, demo, etc..)
"""
def __init__(self, refresh_interval_ms=5):
# Setup MainApp and initialize DockArea
self.app = pyqtgraph.mkQApp("Thunderscope")
self.app.setStyleSheet(
"QMainWindow{background-color: black;border: 1px solid black;}"
)
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.dock_area = DockArea()
self.window = QtGui.QMainWindow()
self.window.setCentralWidget(self.dock_area)
self.window.setWindowTitle("Thunderscope")
# Setup unix socket directory
try:
os.mkdir("/tmp/tbots")
except:
pass
self.proto_receiver = ProtoReceiver()
self.refresh_functions = []
def __refresh():
for refresh_func in self.refresh_functions:
refresh_func()
# Setup refresh Timer
self.refresh_timer = QtCore.QTimer()
self.refresh_timer.setTimerType(QtCore.Qt.TimerType.PreciseTimer)
self.refresh_timer.timeout.connect(__refresh)
self.refresh_timer.start(refresh_interval_ms) # Refresh at 200hz
def register_refresh_function(self, refresh_func):
"""Register the refresh functions to run at the refresh_interval_ms
passed into thunderscope.
:param refresh_func: The function to call at refresh_interval_ms
"""
self.refresh_functions.append(refresh_func)
def configure_default_layout(self):
"""Configure the default layout for thunderscope
"""
# Configure Docks
field_dock = self.setup_field_widget()
log_dock = self.setup_log_widget()
performance_dock = self.setup_performance_plot()
play_info_dock = self.setup_play_info()
self.dock_area.addDock(field_dock, "left")
self.dock_area.addDock(log_dock, "bottom", field_dock)
self.dock_area.addDock(performance_dock, "right", log_dock)
self.dock_area.addDock(play_info_dock, "right", performance_dock)
def setup_field_widget(self):
"""Setup the field widget with the constituent layers
:returns: The dock containing the field widget
"""
self.field = Field()
# Create layers
world = world_layer.WorldLayer()
obstacles = obstacle_layer.ObstacleLayer()
paths = path_layer.PathLayer()
validation = validation_layer.ValidationLayer()
# Add field layers to field
self.field.add_layer("Vision", world)
self.field.add_layer("Obstacles", obstacles)
self.field.add_layer("Paths", paths)
self.field.add_layer("Validation", validation)
# Register observers
self.proto_receiver.register_observer(World, world.world_buffer)
self.proto_receiver.register_observer(Obstacles, obstacles.obstacle_buffer)
self.proto_receiver.register_observer(
PathVisualization, paths.path_visualization_buffer
)
# Register refresh functions
self.register_refresh_function(self.field.refresh)
# Create and return dock
field_dock = Dock("Field", size=(500, 2000))
field_dock.addWidget(self.field)
return field_dock
def setup_log_widget(self):
"""Setup the wiget that receives logs from full system
:returns: The dock containing the log widget
"""
# Create layout
layout = QVBoxLayout()
widget = QWidget()
# Create widget
self.logs = g3logWidget()
# Register observer
self.proto_receiver.register_observer(RobotLog, self.logs.log_buffer)
# Register refresh function
self.register_refresh_function(self.logs.refresh)
# Setup Checkbox Widget
layout.addWidget(self.logs)
layout.addWidget(self.logs.checkbox_widget)
widget.setLayout(layout)
# Create and return dock
log_dock = Dock("Logs", size=(500, 100))
log_dock.addWidget(widget)
return log_dock
def setup_performance_plot(self):
"""Setup the performance plot
:returns: The performance plot setup in a dock
"""
# Create widget
self.named_value_plotter = NamedValuePlotter()
# Register observer
self.proto_receiver.register_observer(
NamedValue, self.named_value_plotter.named_value_buffer
)
# Register refresh function
self.register_refresh_function(self.named_value_plotter.refresh)
# Create and return dock
named_value_plotter_dock = Dock("Performance", size=(500, 100))
named_value_plotter_dock.addWidget(self.named_value_plotter.plot)
return named_value_plotter_dock
def setup_play_info(self):
"""Setup the play info widget
:returns: The play info widget setup in a dock
"""
play_info = playInfoWidget()
play_info_dock = Dock("playInfo", size=(500, 100))
play_info_dock.addWidget(play_info)
self.proto_receiver.register_observer(PlayInfo, play_info.log_buffer)
self.register_refresh_function(play_info.refresh)
return play_info_dock
def setup_chicker_widget(self):
"""Setup the chicker widget for robot diagnostics
:returns: The dock containing the chicker widget
"""
# Create widget
self.chicker_widget = ChickerWidget()
# Register refresh function
self.register_refresh_function(self.chicker_widget.refresh)
# Create and return dock
chicker_dock = Dock("Chicker", size=(100, 100))
chicker_dock.addWidget(self.chicker_widget)
return chicker_dock
def setup_drive_and_dribbler_widget(self):
drive_and_dribbler = DriveAndDribblerWidget()
drive_and_dribbler_dock = Dock("robot diagnostics", size=(50, 100))
drive_and_dribbler_dock.addWidget(drive_and_dribbler)
return drive_and_dribbler_dock
def show(self):
self.window.show()
pyqtgraph.exec()
def close(self):
QtCore.QTimer.singleShot(0, self.window.close)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Thunderscope")
parser.add_argument(
"--robot_diagnostics",
action="store_true",
help="Run thunderscope in the robot diagnostics configuration",
)
parser.add_argument(
"--run_simulator", action="store_true", help="Run the standalone simulator"
)
args = parser.parse_args()
if args.robot_diagnostics:
thunderscope = Thunderscope()
log_dock = thunderscope.setup_log_widget()
thunderscope.dock_area.addDock(log_dock)
drive_and_dribbler_dock = thunderscope.setup_drive_and_dribbler_widget()
thunderscope.dock_area.addDock(drive_and_dribbler_dock)
thunderscope.show()
elif args.run_simulator:
print(
"TODO #2050, this isn't implemented, just run the current standalone simulator"
)
else:
thunderscope = Thunderscope()
thunderscope.configure_default_layout()
thunderscope.show()
| 1.882813 | 2 |
main.py | balast/MartingaleBetSytemExpectedOutcomes | 2 | 12770899 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
pW = 0.48
pL = 1-pW
b_max = 500 #max bet ($)
def total_losses(b0, f, num_losses):
sum=0
for i in range(0, num_losses):
sum += f**i
return b0*sum
def net_winnings(b0, f, num_games):
# assumes you won on the last game and lost on all games prior
return b0*f**(num_games-1)-total_losses(b0, f, num_games-1)
def expected_outcome(b0, f, printN=False):
# print(b0)
# print(f)
N = np.int(np.log(b_max/b0)/np.log(f))+1
if printN:
print("N is {}".format(N))
sum = 0
for i in range(1, N+1):
sum += pL**(i-1)*net_winnings(b0, f, i)
expectation = pW*sum - pL**N*total_losses(b0, f, N)
return expectation
# b0 = 481.00
# f = 1.01
# print(expected_outcome(b0, f, 0))
b0 = np.arange(1, 500, 1) # initial bets
f = np.arange(1.01, 5, 0.1) # bet increase factor (=2 in typical Martingale System)
b0, f = np.meshgrid(b0, f)
results=[]
for b0i, fi in zip(b0.flatten(), f.flatten()):
# print("Expected outcome for b0 = {0}, f ={1:.2f} is {2}".format(b0i, fi, expected_outcome(b0i, fi)))
results.append(expected_outcome(b0i, fi))
results = np.asarray(results)
i_opt = np.argmax(results)
b_opt = b0.flatten()[i_opt]
f_opt = f.flatten()[i_opt]
result_opt = results[i_opt]
results=np.reshape(results, b0.shape)
for var in ['b_opt', 'f_opt', 'result_opt']:
print("{0} = {1:.04f}".format(var, eval(var)))
#plot
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(b0, f, results, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlabel('Initial Bet ($)')
ax.set_ylabel('Bet Increase Factor')
ax.set_zlabel('Expected Outcome ($)')
# ax.set_zlim(0, ax.get_zlim()[1])
fig.suptitle('Expected Outcome Surface using Martingale System on Roulette')
plt.title('Min/Max Bet: \$1/\$500', fontsize=10)
plt.show()
print('bye')
| 3.046875 | 3 |
pyaud/exceptions.py | jshwi/pyaud | 2 | 12770900 | """
pyaud.exceptions
================
Exceptions for use within the module.
All exceptions made public for if they need to be reraised or excepted.
Exceptions are already built into the architecture but can be used in
new plugins as well.
"""
from typing import Optional as _Optional
class AuditError(Exception):
"""Raise for audit failures that aren't failed subprocesses.
:param cmd: Command that failed. If no argument provided the value
will be None.
"""
def __init__(self, cmd: _Optional[str]) -> None:
super().__init__(f"{cmd} did not pass all checks")
class NameConflictError(Exception):
"""Raise if adding plugin who's name is not unique.
:param plugin: Plugin which could not be registered.
:param name: Name which clashes with another.
"""
def __init__(self, plugin: str, name: str) -> None:
super().__init__(f"plugin name conflict at {plugin}: '{name}'")
class NotARepositoryError(OSError):
"""Raise if there is an error related to a Git repository."""
def __init__(self) -> None:
super().__init__("not a git repository")
class PythonPackageNotFoundError(OSError):
"""Raise if Python package not found in project."""
class CommandNotFoundError(OSError):
"""Raise when subprocess called is not on system."""
def __init__(self, cmd: str) -> None:
super().__init__(f"{cmd}: command not found...")
| 2.859375 | 3 |
fingerExercises/fingerExercises-01/01.2-finger.while-exercise-02.py | sodaPhix/MITx-6.00.1x | 1 | 12770901 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 20:34:16 2019
@author: sodatab
MITx: 6.00.1x
"""
"""
01.2-Finger While Exercise 02
----------------------------
2. Convert the following into code that uses a while loop.
prints Hello!
prints 10
prints 8
prints 6
prints 4
prints 2
"""
"""Answer Script:"""
num = 12
print('Hello!')
while num > 2:
num -= 2
print(num)
| 4.25 | 4 |
manim.py | sailist/manim | 2 | 12770902 | <filename>manim.py<gh_stars>1-10
#!/usr/bin/env python
import manimlib
if __name__ == "__main__":
manimlib.main()
else:
manimlib.stream_starter.start_livestream()
| 1.359375 | 1 |
Huawei/ceshi/T2.py | zephyrus9/NewCoder | 0 | 12770903 | <reponame>zephyrus9/NewCoder
# -*-coding: utf-8 -*-
# Author:
"""题目描述
给出一组正整数,你从第一个数向最后一个数方向跳跃,每次至少跳跃1格,每个数的值表示你从这个位置可以跳跃的最大长度。计算如何以最少的跳跃次数跳到最后一个数。
输入描述:
第一行表示有多少个数n
第二行开始依次是1到n个数,一个数一行
输出描述:
输出一行,表示最少跳跃的次数。"""
import sys
n = input().strip()
nums = []
for i in range(int(n)):
nums.append(input())
print(nums)
smallest_strip = len(nums)
num = [[]]
for i in range(len(nums)):
for j in range(1, nums[i]+1):
num[i][j].append(nums[nums.index(int(i)) + j]) | 3.234375 | 3 |
python-watcher-2.0.0/watcher/tests/common/test_ironic_helper.py | scottwedge/OpenStack-Stein | 0 | 12770904 | # -*- encoding: utf-8 -*-
# Copyright (c) 2017 ZTE Corporation
#
# Authors:<NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from watcher.common import clients
from watcher.common import exception
from watcher.common import ironic_helper
from watcher.common import utils as w_utils
from watcher.tests import base
class TestIronicHelper(base.TestCase):
def setUp(self):
super(TestIronicHelper, self).setUp()
osc = clients.OpenStackClients()
p_ironic = mock.patch.object(osc, 'ironic')
p_ironic.start()
self.addCleanup(p_ironic.stop)
self.ironic_util = ironic_helper.IronicHelper(osc=osc)
@staticmethod
def fake_ironic_node():
node = mock.MagicMock()
node.uuid = w_utils.generate_uuid()
return node
def test_get_ironic_node_list(self):
node1 = self.fake_ironic_node()
self.ironic_util.ironic.node.list.return_value = [node1]
rt_nodes = self.ironic_util.get_ironic_node_list()
self.assertEqual(rt_nodes, [node1])
def test_get_ironic_node_by_uuid_success(self):
node1 = self.fake_ironic_node()
self.ironic_util.ironic.node.get.return_value = node1
node = self.ironic_util.get_ironic_node_by_uuid(node1.uuid)
self.assertEqual(node, node1)
def test_get_ironic_node_by_uuid_failure(self):
self.ironic_util.ironic.node.get.return_value = None
self.assertRaisesRegex(
exception.IronicNodeNotFound,
"The ironic node node1 could not be found",
self.ironic_util.get_ironic_node_by_uuid, 'node1')
| 1.914063 | 2 |
mud/events/changeprop.py | erwanaubry/alamud_IUT_Escape | 0 | 12770905 | <reponame>erwanaubry/alamud_IUT_Escape<filename>mud/events/changeprop.py
# -*- coding: utf-8 -*-
# Copyright (C) 2014 <NAME>, IUT d'Orléans
#==============================================================================
from .event import Event2
class ChangePropEvent(Event2):
NAME = "change-prop"
def get_event_templates(self):
return self.object.get_event_templates()
def perform(self):
props = self.modifs
if isinstance(props, str):
props = [props]
self.object.change_props(props, self.world_context())
def __init__(self, actor, object, modifs):
super().__init__(actor, object)
self.modifs = modifs
| 2.0625 | 2 |
Abstract/GameEngine.py | BoogyWinterfell/friday-fun | 0 | 12770906 | import abc
from typing import List, Dict
from Abstract.GameAction import GameAction
from Abstract.EngineGameInfo import EngineGameInfo
class GameEngine(metaclass=abc.ABCMeta):
@abc.abstractmethod
def resolve_actions(self, actions: Dict[str, List[GameAction]], game_state: EngineGameInfo):
pass
| 2.859375 | 3 |
setup.py | movermeyer/circonus | 3 | 12770907 | <filename>setup.py
#!/usr/bin/env python
from setuptools import find_packages, setup
import codecs
with codecs.open("README.rst", "r", "utf-8") as f:
readme = f.read()
with codecs.open("HISTORY.rst", "r", "utf-8") as f:
history = f.read()
setup(
name="circonus",
version="0.0.22",
description="Interact with the Circonus REST API.",
long_description=readme + "\n\n" + history,
author="Monetate Inc.",
author_email="<EMAIL>",
url="https://github.com/monetate/circonus",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7"
],
keywords="circonus monitoring analytics",
packages=find_packages(),
install_requires=["colour", "requests"]
)
| 1.421875 | 1 |
platform/core/polyaxon/db/models/searches.py | hackerwins/polyaxon | 0 | 12770908 | from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import models
from constants import content_types
from db.models.abstract.diff import DiffModel
from db.models.abstract.nameable import NameableModel
class Search(DiffModel, NameableModel):
"""A saved search query."""
search_content_types = (
(content_types.PROJECT, content_types.PROJECT),
(content_types.EXPERIMENT_GROUP, content_types.EXPERIMENT_GROUP),
(content_types.EXPERIMENT, content_types.EXPERIMENT),
(content_types.JOB, content_types.JOB),
(content_types.BUILD_JOB, content_types.BUILD_JOB),
)
project = models.ForeignKey(
'db.Project',
on_delete=models.CASCADE,
related_name='searches')
content_type = models.CharField(
choices=search_content_types,
max_length=24,
blank=True,
null=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='+')
query = JSONField()
meta = JSONField(
null=True,
blank=True,
default=dict
)
class Meta:
app_label = 'db'
unique_together = (('user', 'project', 'name'), )
| 2.140625 | 2 |
covid/serializers.py | YueZhao2019/covid-information-system-backend | 0 | 12770909 | from rest_framework import serializers
from covid.models import Covid
class CovidSerializer(serializers.ModelSerializer):
class Meta:
model = Covid
fields = '__all__'
| 1.8125 | 2 |
Chambers_Jess/Assignments/stars_part1.py | webguru001/Python-Django-Web | 5 | 12770910 | def draw_stars(list):
for value in list:
print "*" * value
| 2.6875 | 3 |
game/puzzle.py | tcdude/pyweek28 | 0 | 12770911 | <reponame>tcdude/pyweek28
"""
The 3 ring-puzzle to input the final code.
"""
__copyright__ = """
MIT License
Copyright (c) 2019 tcdude
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from panda3d import core
from direct.interval.IntervalGlobal import *
from direct.interval.LerpInterval import *
from . import collision
from . import common
from . import gamedata
from . import modelgen
class Puzzle(gamedata.GameData):
def __init__(self, symbols, pos, collision_handler, app):
super().__init__()
self.__symbols = symbols # the texture arrays
self.__pos = pos # center of the 3 rings
self.__collision_handler = collision_handler
self.__app = app
self.__root = self.render.attach_new_node('Puzzle Root')
# nodes
self.__rings = None
self.__symbol_cards = None
self.__levers = None
# states
self.__found = False
self.__lever_hint = False
self.__lever_first_move = False
self.__setup_rings()
self.__inner_bounds = False
self.__active_lever = -1
self.__ring_offsets = [0, 0, 0]
self.__time_spent = 0
# events
self.accept('raw-e-up', self.__toggle_lever, [-1])
self.accept('raw-r-up', self.__toggle_lever, [1])
self.task_mgr.add(self.__update_puzzle)
# other
self.__lever_text = common.TXT_LEVER_ACTIVATE.replace(
'--L--',
str(self.keyboard_map.get_mapped_button('e')).upper()
).replace(
'--R--',
str(self.keyboard_map.get_mapped_button('r')).upper()
)
self.__combination = []
for i in self.__app.chosen_symbols:
self.__combination.append(i % 6)
# print(self.__combination)
# for i in range(3):
# for _ in range(self.__combination[i]):
# self.__toggle_lever(-1, i)
@property
def get_puzzle_instance(self):
return self
def __update_puzzle(self, task):
if self.__inner_bounds:
self.__time_spent += self.global_clock.get_dt()
else:
self.__time_spent = 0
if self.__time_spent > 40:
self.display_hint(common.TXT_LEVER_LONG_TIME)
self.__time_spent = 0
return task.cont
def __toggle_lever(self, direction, lever=None):
if lever is not None:
self.__ring_offsets[lever] -= direction
self.__ring_offsets[lever] = self.__ring_offsets[lever] % 6
h = self.__rings[lever].get_h()
self.__rings[lever].set_h(h + 60 * direction)
return
if self.__active_lever == -1:
return
if self.__inner_bounds:
dist = self.__levers[self.__active_lever].get_distance(
self.__app.char.node_path
)
if dist < 6:
if not self.__lever_first_move:
self.display_hint(common.TXT_LEVER_FIRST_MOVE)
self.__lever_first_move = True
h = self.__rings[self.__active_lever].get_h()
LerpHprInterval(
self.__rings[self.__active_lever],
1.2,
(h + 60 * direction, 0, 0),
blendType='easeInOut'
).start()
Sequence(
LerpHprInterval(
self.__levers[self.__active_lever],
0.6,
(-90, 45 * direction, 0),
(-90, 0, 0),
blendType='easeInOut'
),
LerpHprInterval(
self.__levers[self.__active_lever],
0.6,
(-90, 0, 0),
(-90, 45 * direction, 0),
blendType='easeInOut'
),
).start()
v = self.__ring_offsets[self.__active_lever] - direction
self.__ring_offsets[self.__active_lever] = v % 6
if self.__ring_offsets == self.__combination:
self.__app.winning_screen()
def __lever_hint_event(self):
self.__inner_bounds = True
if self.__lever_hint:
return
self.display_hint(common.TXT_LEVER_HINT)
self.__lever_hint = True
def __found_event(self):
self.__inner_bounds = False
if self.__found:
return
self.display_hint(common.TXT_FOUND_RINGS)
self.__found = True
def __lever_act_range(self, i):
self.display_hint(self.__lever_text, 0.5, True)
self.__active_lever = i
def __setup_rings(self):
node_path, self.__rings, self.__symbol_cards = modelgen.three_rings()
node_path.reparent_to(self.__root)
node_path.set_pos(self.__pos)
self.__collision_handler.add(
collision.CollisionCircle(self.__pos, common.TR_RADII[0])
)
self.__collision_handler.add(
collision.CollisionCircle(
self.__pos,
common.TR_RADII[0] * 4,
(self.__found_event, ()),
ghost=True
)
)
self.__collision_handler.add(
collision.CollisionCircle(
self.__pos,
common.TR_RADII[0] * 2.2,
(self.__lever_hint_event, ()),
ghost=True
)
)
for i, s in enumerate(self.__symbols):
n = self.__symbol_cards[i // 6][i % 6]
tex = core.Texture('symbol')
tex.setup_2d_texture(
*common.NG_SYM_TEX_SIZE,
core.Texture.T_unsigned_byte,
core.Texture.F_rgba
)
ta = np.ones(common.NG_SYM_TEX_SIZE + (4,), dtype=np.uint8)
ta *= 255
tf = np.array(s) < 255
ta[tf, 0] = int(common.TR_COLORS[i // 6].x * 255)
ta[tf, 1] = int(common.TR_COLORS[i // 6].x * 255)
ta[tf, 2] = int(common.TR_COLORS[i // 6].x * 255)
ta[:, :, 3] = 255
# ta = np.flip(ta, 1)
tex.set_ram_image_as(ta, 'RGBA')
tex.reload()
n.set_texture(tex, 1)
# setup levers
rot = self.__root.attach_new_node('rot')
rot.set_pos(node_path.get_pos(self.__root))
self.__levers = []
for i in range(3):
node_path, lever = modelgen.lever(i)
self.__levers.append(lever)
node_path.reparent_to(rot)
node_path.set_y(common.TR_LEVER_Y)
rot.set_h(i * 90 + 90)
pos = node_path.get_pos(self.__root)
hpr = node_path.get_hpr(self.__root)
node_path.reparent_to(self.__root)
node_path.set_pos_hpr(pos, hpr)
node_path.set_z(node_path, 2.5)
self.__collision_handler.add(
collision.CollisionCircle(
pos,
4,
(self.__lever_act_range, (i, )),
ghost=True
)
)
self.__collision_handler.add(
collision.CollisionCircle(
pos,
1
)
)
| 2.046875 | 2 |
app/tests/conftest.py | ravewillow6383/model_relationships | 0 | 12770912 | <filename>app/tests/conftest.py
import pytest
from app import create_app, db
from config import Config
from app.models import Creature, Winged_Creature
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite://'
@pytest.fixture()
def client():
app = create_app(TestConfig)
app_context = app.app_context()
app_context.push()
db.create_all()
with app.test_client() as client:
yield client
db.session.remove()
db.drop_all()
app_context.pop()
@pytest.fixture()
def sample_creature(client):
creature = Creature(name='Birds')
db.session.add(Creature)
db.session.commit()
return creature
@pytest.fixture()
def sample_winged_creature():
winged_creature = Winged_Creature(name='Flightless cormorant', creature=sample_creature)
db.session.add(winged_creature)
db.commit()
return winged_creature
@pytest.fixture()
def lone_winged_creature():
winged_creature = Winged_Creature(name='Zburator')
db.session.add(winged_creature)
db.commit()
return winged_creature
| 2.359375 | 2 |
explorerscript/source_map.py | End45/ExplorerScript | 0 | 12770913 | # MIT License
#
# Copyright (c) 2020-2021 Parakoopa and the SkyTemple Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import logging
from typing import Dict, Tuple, List, Union, Optional, Iterable
logger = logging.getLogger(__name__)
class SourceMapPositionMark:
"""A position mark encoded in the source code of SSBScript / ExplorerScript."""
def __init__(self, line_number: int, column_number: int, end_line_number: int, end_column_number: int,
name: str, x_offset: int, y_offset: int, x_relative: int, y_relative: int):
self.line_number = line_number
self.column_number = column_number
self.end_line_number = end_line_number
self.end_column_number = end_column_number
self.name = name
self.x_offset = x_offset
self.y_offset = y_offset
self.x_relative = x_relative
self.y_relative = y_relative
@property
def x_with_offset(self) -> Union[int, float]:
"""
Returns the x position with offset, in tiles, as float or int
See also skytemple_files.script.ssa.position.
"""
offset = 0
if self.x_offset == 2 or self.x_offset == 3:
offset = 0.5
elif self.x_offset >= 4:
offset = 2
return self.x_relative + offset
@property
def y_with_offset(self) -> Union[int, float]:
"""
Returns the x position with offset, in tiles, as float or int
See also skytemple_files.script.ssa.position.
"""
offset = 0
if self.y_offset == 2 or self.y_offset == 3:
offset = 0.5
elif self.y_offset >= 4:
offset = 2
return self.y_relative + offset
def __str__(self):
return f'SourceMapPositionMark<' \
f'"{self.name}" @{self.line_number}:{self.column_number}->{self.end_line_number}:{self.end_column_number} - ' \
f'{self.x_relative}:{self.x_offset}, {self.y_relative}:{self.y_offset}>)'
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, SourceMapPositionMark):
return False
return self.line_number == other.line_number and \
self.column_number == other.column_number and \
self.end_line_number == other.end_line_number and \
self.end_column_number == other.end_column_number and \
self.name == other.name and \
self.x_offset == other.x_offset and \
self.y_offset == other.y_offset and \
self.x_relative == other.x_relative and \
self.y_relative == other.y_relative
def serialize(self) -> list:
return [
self.line_number, self.column_number, self.end_line_number, self.end_column_number,
self.name, self.x_offset, self.y_offset, self.x_relative, self.y_relative
]
@classmethod
def deserialize(cls, data_list) -> 'SourceMapPositionMark':
return SourceMapPositionMark(
line_number=data_list[0], column_number=data_list[1],
end_line_number=data_list[2], end_column_number=data_list[3],
name=data_list[4], x_offset=data_list[5], y_offset=data_list[6],
x_relative=data_list[7], y_relative=data_list[8]
)
class SourceMapping:
def __init__(self, line_number: int, column: int):
self.line = line_number
self.column = column
def serialize(self) -> list:
return [
self.line, self.column
]
@classmethod
def deserialize(cls, data_list) -> 'SourceMapping':
return SourceMapping(
data_list[0], data_list[1]
)
class MacroSourceMapping(SourceMapping):
def __init__(self, relpath_included_file: str, macro_name: str,
line_number: int, column: int,
called_in: Optional[Tuple[str, int, int]],
return_addr: Optional[int], parameter_mapping: Dict[str, Union[int, str]]):
super().__init__(line_number, column)
self.relpath_included_file = relpath_included_file
self.macro_name = macro_name
self.return_addr = return_addr
# If this is the first operation in a Macro, this field contains the line number and column
# of the Macro call
# Tuple contains relative_included_file, line number, column number of the call file.
self.called_in = called_in
# The opcode address to jump to when stepping out of this macro
self.return_addr = return_addr
# The mapping of parameter values for the current macro context, only for informational
# purposes. Contains the string representation or integer value
self.parameter_mapping = parameter_mapping
def serialize(self) -> list:
return [
self.relpath_included_file, self.macro_name, self.line, self.column,
self.called_in, self.return_addr, self.parameter_mapping
]
@classmethod
def deserialize(cls, data_list) -> 'MacroSourceMapping':
return MacroSourceMapping(
data_list[0], data_list[1], data_list[2], data_list[3], data_list[4], data_list[5], data_list[6]
)
class SourceMap:
"""
A source map for ExplorerScript and SSBScript back to the SSB binary opcodes.
Takes a routine id and opcode index and returns the line in the source code, that this
operation is at.
The mapped addresses are the addresses relative to the first routine opcode address.
Can be created in the following ways:
- When loading SSB:
- For SSBScript:
- During decompilation.
- For ExplorerScript:
- Either during the decompilation, if no ExplorerScript exists yet.
- From an existing source map file.
- When compiling SSBScript or ExplorerScript the source map is also generated.
This also provides information about position marks used in the source file.
"""
def __init__(
self,
mappings: Dict[int, SourceMapping],
position_marks: List[SourceMapPositionMark],
mappings_macros: Dict[int, MacroSourceMapping],
position_marks_macro: List[Tuple[Optional[str], str, SourceMapPositionMark]]
):
"""
mappings: Actual main source mappings:
Keys are opcode offsets, values are the source mapping
position_marks: Encoded position marks
mappings_macro: Source mappings in macros.
Keys are opcode offsets, values are the macro source mapping
position_marks_macro: Position marks encoded in macros. Values are tuple of:
- relative file path, macro name, position mark
"""
self._mappings = mappings
self._position_marks = position_marks
self._mappings_macros = mappings_macros
self._position_marks_macro = position_marks_macro
@property
def is_empty(self):
return len(self._mappings) == 0
def get_op_line_and_col(self, op_offset: int) -> Optional[SourceMapping]:
if op_offset in self._mappings:
return self._mappings[op_offset]
if op_offset in self._mappings_macros:
return self._mappings_macros[op_offset]
def get_op_line_and_col__direct(self, op_offset: int) -> Optional[SourceMapping]:
if op_offset in self._mappings:
return self._mappings[op_offset]
def get_op_line_and_col__macros(self, op_offset: int) -> Optional[MacroSourceMapping]:
if op_offset in self._mappings_macros:
return self._mappings_macros[op_offset]
def get_position_marks__direct(self) -> List[SourceMapPositionMark]:
return self._position_marks
def get_position_marks__macros(self) -> List[Tuple[Optional[str], str, SourceMapPositionMark]]:
return self._position_marks_macro
def __iter__(self) -> Iterable[Tuple[int, MacroSourceMapping]]:
"""
Iterates over all source map entries, including the macro entries.
If it's a macro entry, macro_name is a string.
"""
for opcode_offset, entry in self._mappings.items():
yield opcode_offset, entry
for opcode_offset, entry in self._mappings_macros.items():
yield opcode_offset, entry
def collect_mappings__macros(self) -> Iterable[Tuple[int, MacroSourceMapping]]:
for opcode_offset, entry in self._mappings_macros.items():
yield opcode_offset, entry
def __eq__(self, other):
if not isinstance(other, SourceMap):
return False
return self._mappings == other._mappings and self._position_marks == other._position_marks
def __str__(self):
return self.serialize()
def serialize(self, pretty=False) -> str:
return json.dumps({
'map': {int(x): m.serialize() for x, m in self._mappings.items()},
'pos_marks': [m.serialize() for m in self._position_marks],
'macros': {
'map': {int(x): m.serialize() for x, m in self._mappings_macros.items()},
'pos_marks': [[y[0], y[1], y[2].serialize()] for y in self._position_marks_macro]
}
}, indent=2 if pretty else None)
@classmethod
def deserialize(cls, json_str: str) -> 'SourceMap':
json_d = json.loads(json_str)
return SourceMap(
{int(x): SourceMapping.deserialize(y) for x, y in json_d['map'].items()},
[SourceMapPositionMark.deserialize(m) for m in json_d['pos_marks']],
{int(x): MacroSourceMapping.deserialize(y) for x, y in json_d['macros']['map'].items()},
[(y[0], y[1], SourceMapPositionMark.deserialize(y[2])) for y in json_d['macros']['pos_marks']]
)
@classmethod
def create_empty(cls):
return cls({}, [], {}, [])
def rewrite_offsets(self, new_mapping: Dict[int, int]):
"""
Replace all opcode offsets (in mappings, macrco mappings, macro return addresses) with new
offsets. The parameter is a dict mapping old offsets to new offsets.
"""
# It may happen, that the new mapping contains fewer opcodes than originally added (eg. if they were optimized)
# but that's ok.
self._mappings = {new_mapping[key]: val for key, val in self._mappings.items() if key in new_mapping}
self._mappings_macros = {new_mapping[key]: val for key, val in self._mappings_macros.items() if key in new_mapping}
max_old_offset = max(new_mapping.keys())
for m in self._mappings_macros.values():
if m.return_addr:
addr = m.return_addr
while addr not in new_mapping:
# if the return addr opcode was optimized away, we take the next index. TODO: Good idea?
addr += 1
if addr > max_old_offset:
addr = None
break
if addr is not None:
m.return_addr = new_mapping[addr]
class SourceMapBuilder:
def __init__(self):
self._mappings = {}
self._pos_marks = []
self._mappings_macros = {}
self._pos_marks_macros = []
self._next_macro_called_in: Optional[SourceMapping] = None
self._macro_context__stack: List[Tuple[int, Dict[str, Union[int, str]]]] = []
#logger.debug("<%d>: Init.", id(self))
def add_opcode(self, op_offset, line_number, column):
self._mappings[op_offset] = SourceMapping(line_number, column)
#logger.debug("<%d>: Adding opcode: %d -> %d, %d", id(self), op_offset, line_number, column)
def add_position_mark(self, position_mark: SourceMapPositionMark):
self._pos_marks.append(position_mark)
#logger.debug("<%d>: Adding PositionMark: %s", id(self), position_mark)
def macro_context__push(self, opcode_to_jump_to: int, parameter_mapping: Dict[str, Union[int, str]]):
"""
Push a new macro return address and parameter mapping to the stack, all added macro ops will
use what's on the top of the stack.
"""
self._macro_context__stack.append((opcode_to_jump_to, parameter_mapping))
#logger.debug("<%d>: -- PUSH MACRO CTX --> [%d, %s]", id(self), opcode_to_jump_to, parameter_mapping)
def macro_context__pop(self):
"""
Pop a macro context from the stack.
"""
self._macro_context__stack.pop()
#logger.debug("<%d>: <-- POP MACRO CTX", id(self))
def next_macro_opcode_called_in(self, if_incl_rel_path: Optional[str], line_number, column):
"""Mark the next added macro opcode as being called in this line/column. This marks a macro call."""
self._next_macro_called_in = (if_incl_rel_path, line_number, column)
#logger.debug("<%d>: Marked next macro opcode as called in %s:%d, %d", id(self), str(if_incl_rel_path), line_number, column)
def add_macro_opcode(self, op_offset, if_incl_rel_path: Optional[str], macro_name: str,
line_number, column):
"""
Add an operation that has it's source code in a macro.
If the macro is in a different file, if_incl_rel_path should contain the relative path to this file
from the original source file that this source map is generated for.
At least one macro return address entry has to be on the call stack!
"""
if len(self._macro_context__stack) < 1:
raise ValueError("There are no return addresses on the macro return address stack, "
"can not add macro opcode.")
called_in = None
if self._next_macro_called_in is not None:
called_in = self._next_macro_called_in
self._next_macro_called_in = None
return_addr, parameter_mapping = self._macro_context__stack[-1]
#logger.debug("<%d>: Adding macro opcode: %s:%s:%d -> %d, %d", id(self), if_incl_rel_path, macro_name, op_offset, line_number, column)
self._mappings_macros[op_offset] = MacroSourceMapping(if_incl_rel_path, macro_name,
line_number,
column, called_in, return_addr, parameter_mapping)
def add_macro_position_mark(self, if_incl_rel_path: Optional[str], macro_name: str, position_mark: SourceMapPositionMark):
"""Add a position mark, that has it's source code in a macro. See notes for add_macro_opcode"""
self._pos_marks_macros.append((if_incl_rel_path, macro_name, position_mark))
#logger.debug("<%d>: Adding Macro PositionMark: %s:%s - %s", id(self), if_incl_rel_path, macro_name, position_mark)
def build(self):
return SourceMap(self._mappings, self._pos_marks,
self._mappings_macros, self._pos_marks_macros)
| 1.648438 | 2 |
vmware_nsxlib/tests/unit/v3/test_cluster_management.py | salv-orlando/vmware-nsxlib | 0 | 12770914 | # Copyright 2019 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
class TestNsxLibClusterManagement(nsxlib_testcase.NsxClientTestCase):
def test_get_restore_status(self):
cluster_api = self.nsxlib.cluster_management
with mock.patch.object(self.nsxlib.client, 'get') as get:
cluster_api.get_restore_status()
get.assert_called_with('cluster/restore/status')
| 1.867188 | 2 |
dedicated_Resnet50.py | SuperbTUM/Glaucoma-Screening-with-DENet | 0 | 12770915 | import torch
import torch.nn as nn
import torchvision.models as models
class ResNet50_Mod(nn.Module):
def __init__(self, input_size=640):
super().__init__()
resnet50 = models.resnet50(pretrained=True)
self.resnet = nn.Sequential(*(list(resnet50.children())[:-2]))
self.avepool = nn.AvgPool2d(kernel_size=7)
self.fc = nn.Linear(int(input_size//112), 2)
def forward(self, x):
x = self.resnet(x)
x = self.avepool(x)
x = self.fc(x)
return x
| 2.984375 | 3 |
app/views/alekseyl/active_record/__init__.py | iu5team/rms | 0 | 12770916 | from task import Task
from employee import Employee
| 1 | 1 |
python/lesson_1/binary_gap.py | fdietzsc/codility-tests | 0 | 12770917 | def solution(N):
b_rep = '{:b}'.format(N)
counter = 0
res = 0
prev_val = 0
count = False
for val in b_rep:
val = int(val)
if val - prev_val == -1:
count = True
counter += 1
if val - prev_val == 0 and count:
counter += 1
if val - prev_val == 1:
count = False
res = max(res, counter)
counter = 0
prev_val = val
return res
| 2.671875 | 3 |
5.Operators/0.arithetic_operator.py | Tazri/Python | 0 | 12770918 | x = 30;
y = 3;
## addition
print("x + y : ",x + y);
## we can use + for concatenate string
print("Hello, " + "World!");
## Subtraction
print("x - y : ",x - y);
## Division
print("x / y : ",x / y);
## here division operator always return float type number
## Multiplication
print("x * y : ",x*y);
# we can use * operator to concatenate same string multiple time.
print("'word '*5 : ",'word'*5);
## Remainder
print("x%6 : ",x%6);
## Floor Division
print("x//7 : ",x//7);
## return division result in floor int type
## exponentition
print("2**4 : ",2**4);
## here 2 is base and 4 is power of base | 4.46875 | 4 |
apps/core/app_forms.py | AlcindoSchleder/icity-hotspot | 1 | 12770919 | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from .models import CoreUser
class UserLoginForm(forms.ModelForm):
password = forms.HiddenInput(attrs={'value': '<PASSWORD>'})
username = forms.EmailInput(
attrs={
'class': 'form-control line-input',
'placeholder': '<EMAIL>'
}
)
class Meta:
model = CoreUser
fields = (
'type_login', 'pk_core_user', 'user_uf', 'user_city'
)
widgets = {
'type_login': forms.HiddenInput(attrs={'value': 'MN'}),
'pk_core_user': forms.TextInput(
attrs={
'class': 'form-control line-input',
'placeholder': 'Doc. Identif.'
}
),
'user_uf': forms.Select(
attrs={
'class': 'form-control line-input',
'choices': CoreUser.UF_CHOICES,
}
),
'user_city': forms.TextInput(
attrs={
'class': 'form-control line-input',
'placeholder': 'Cidade'
}
)
}
labels = {
'pk_core_user': 'C.P.F.',
'user_uf': 'UF: ',
'user_city': 'Cidade: ',
}
# Validar/autenticar campos de login
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Usuário ou senha inválidos.")
return self.cleaned_data
def authenticate_user(self, username, password):
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Usuário ou senha inválidos.")
return user
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Senha'}), min_length=6, label='lock')
confirm = forms.CharField(widget=forms.PasswordInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Confirme a senha'}), min_length=6, label='lock')
username = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Nome de usuário'}), label='person')
email = forms.CharField(widget=forms.EmailInput(attrs={
'class': 'form-control line-input', 'placeholder': 'Email'}), label='email', required=False)
class Meta:
model = User
fields = ('username', 'email', 'password',)
class PasswordResetForm(forms.ModelForm):
email_or_username = forms.CharField(widget=forms.TextInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Email/Usuário'}))
class SetPasswordForm(forms.Form):
new_password = forms.CharField(widget=forms.PasswordInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Nova senha'}), min_length=6)
new_password_confirm = forms.CharField(widget=forms.PasswordInput(
attrs={'class': 'form-control line-input', 'placeholder': 'Confirmar a nova senha'}), min_length=6)
| 2.5625 | 3 |
wasm-js-bridge/gen-bridge.py | nokotan/WasmTools | 0 | 12770920 | #!/usr/bin/python3
import extractWasmExport
import extractComments
from io import StringIO
import json as JsonUtil
import sys
class FunctionDefinition:
def __init__(self, item):
self.parameters = item["params"]
self.returnType = item["returnTypes"][0] if len(item["returnTypes"]) > 0 else "void"
self.name = item["name"]
self.comment = item["comment"]
def toModuleDef(self):
parameterDefinition = ", ".join([ f"{ param['name'] }: { param['type'] }" for param in self.parameters ])
return f"{ self.comment }\nexport function { self.name }({ parameterDefinition }): {self.returnType};"
def toClassFunctionDef(self):
parameterDefinition = ", ".join([ f"{ param['name'] }: { param['type'] }" for param in self.parameters ])
return f"{ self.comment }\n{ self.name }({ parameterDefinition }): {self.returnType};"
class FunctionDefinitionFilter:
def __init__(self, comments):
self.comments = comments
def filterByExports(self, exports):
exportsName = [ export["name"] for export in exports ]
self.comments = [ comment for comment in self.comments if comment["name"] in exportsName ]
def addFunctionTypeDefinition(self, exports):
for comment in self.comments:
correspondingExports = [ export for export in exports if comment["name"] == export["name"] ]
assert len(correspondingExports) == 1
correspondingExport = correspondingExports[0]
comment["params"] = [
{ "name": nativeDef["name"], "type": nativeDef["type"], "exportType": wasmTypeName }
for nativeDef, wasmTypeName in zip(comment["params"], correspondingExport["params"])
]
comment["returnTypes"] = correspondingExport["results"]
def transformType(self):
for comment in self.comments:
comment["params"] = [
{ "name": param["name"], "type": "number" }
for param in comment["params"]
]
comment["returnTypes"] = [
"number" for _ in comment["returnTypes"]
]
def main(argv):
path = argv[1]
sourcePath = argv[2]
args = argv[3:]
commentsExecutor = extractComments.Executor(extractComments.SimpleTreeWalker(), extractComments.DictionaryGeneratingVisitor())
commentsExecutor.path = sourcePath
commentsExecutor.args = args
commentsExecutor.run()
exportExecutor = extractWasmExport.Executor()
exportExecutor.targetFilePath = path
exportOutput = exportExecutor.run()
commentsDefinition = commentsExecutor.generator.getDefinitions()
exportDefinition = JsonUtil.loads(exportOutput)
merger = FunctionDefinitionFilter(commentsDefinition)
merger.filterByExports(exportDefinition)
merger.addFunctionTypeDefinition(exportDefinition)
merger.transformType()
classDefs = "\n".join([ FunctionDefinition(comment).toModuleDef() for comment in merger.comments ])
indentedClassDefs = "\n".join([ f"{ line }" for line in classDefs.split("\n") ])
print(f"""
/* Auto-gererated type definition. */
{ indentedClassDefs }
export const memory: WebAssembly.Memory;
""")
if __name__ == "__main__":
main(sys.argv)
| 2.46875 | 2 |
bert2tf/excepts.py | xiongma/bert2tf | 7 | 12770921 | class YAMLEmptyError(Exception):
"""The yaml configs file is empty, nothing to read from there."""
class BadInputs(Exception):
"""Bad inputs"""
class PeaFailToStart(SystemError):
"""When pea is failed to started"""
class GRPCServerError(Exception):
"""Can not connect to the grpc gateway"""
class BadClient(Exception):
"""A wrongly defined grpc client, can not communicate with bert2tf server correctly """
class NoExplicitMessage(Exception):
"""Waiting until all partial messages are received"""
class UnknownRequestError(Exception):
"""Unknown request type"""
class DriverError(Exception):
"""Base driver error"""
class UnattachedDriver(DriverError):
"""Driver is not attached to any BasePea or executor"""
class NoDriverForRequest(DriverError):
"""No matched driver for this request """
class UnknownControlCommand(RuntimeError):
"""The control command received can not be recognized"""
class RequestLoopEnd(KeyboardInterrupt):
"""The event loop of BasePea ends"""
class FlowTopologyError(Exception):
"""Flow exception when the topology is ambiguous."""
class FlowMissingPodError(Exception):
"""Flow exception when a pod can not be found in the flow."""
class FlowEmptyError(Exception):
"""Flow exception when flow was not built and to call flow external function, such as `predict()` """
| 2.21875 | 2 |
test/test_loss_functions_keras.py | maxsch3/pk-triplet | 1 | 12770922 | import tensorflow as tf
import importlib
import pytest
from triplet_tools import triplet_batch_semihard_loss, triplet_batch_priming_loss, triplet_batch_hard_loss
try:
import keras
except ImportError:
pass
@pytest.mark.skipif(importlib.util.find_spec("keras") is None,
reason='Keras is not installed in this environment (not needed when testing tensorflow 2 )')
class TestLossFunctionsKeras:
def setup_method(self):
self.train_data, self.test_data = self.load_mnist()
def load_mnist(self):
mnist = tf.keras.datasets.mnist
return mnist.load_data()
def make_model(self):
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='linear')
])
return model
def test_batch_priming_loss(self):
model = self.make_model()
loss_func = triplet_batch_priming_loss()
model.compile('adam', loss_func)
hist = model.fit(self.train_data[0], self.train_data[1], epochs=2)
loss_hist = hist.history['loss']
assert loss_hist[-1] < 0.1
assert loss_hist[0] > loss_hist[1]
def test_batch_hard_loss(self):
model = self.make_model()
loss_func = triplet_batch_priming_loss()
model.compile('adam', loss_func)
hist = model.fit(self.train_data[0], self.train_data[1], epochs=1)
bh_loss_func = triplet_batch_hard_loss()
model.compile('adam', bh_loss_func)
hist = model.fit(self.train_data[0], self.train_data[1], epochs=10, batch_size=100, shuffle=True)
loss_hist = hist.history['loss']
assert loss_hist[-1] < 0.5
assert loss_hist[0] > loss_hist[1]
| 2.25 | 2 |
apps/node_man/serializers/iam.py | ZhuoZhuoCrayon/bk-nodeman | 31 | 12770923 | <gh_stars>10-100
# _*_ coding: utf-8 _*_
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from apps.node_man.constants import IAM_ACTION_DICT
class PermissionSerializer(serializers.Serializer):
action = serializers.ChoiceField(
label=_("操作类型"),
required=True,
choices=list(IAM_ACTION_DICT.keys()),
)
instance_id = serializers.IntegerField(label=_("实例ID"), required=False)
instance_name = serializers.CharField(label=_("实例名称"), required=False)
class ApplyPermissionSerializer(serializers.Serializer):
apply_info = PermissionSerializer(label=_("申请权限信息"), many=True, required=True)
| 1.804688 | 2 |
PREP_CAOM/gui/config_generator.py | spacetelescope/MAST_HLSP | 1 | 12770924 | """
..module:: crawl_dictionary
:synopsis: This module is designed to add a given parameter to a provided
dictionary under a designated parent. It searches for the parent
recursively in order to examine all possible levels of nested dictionaries.
If the parent is found, the parameter is added to the dictionary. The
entire dictionary is returned, along with a boolean flag to indicate
whether or not the insertion was successful.
..class:: ConfigGenerator
:synopsis: This class defines a PyQt widget that uses multiple methods to
collect user input in order to generate a .yaml config file needed by
../hlsp_to_xml.py. This will help to ensure that these config files are
properly formatted and include the necessary information. This form
includes functionality to add extra rows for unique parameter definitions,
load an existing .yaml file into the form, reset all changes made to the
form, save all inputs to a .yaml config file, or save a .yaml file and
immediately launch ../hlsp_to_xml.py with said file.
"""
import csv
import os
import sys
import yaml
from hlsp_to_xml import hlsp_to_xml
import lib.GUIbuttons as gb
import lib.HeaderKeyword as hk
from lib.MyError import MyError
from util.read_yaml import read_yaml
try:
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
except ImportError:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
HEADER_KEYWORDS = "resources/hlsp_keywords.csv"
#--------------------
def crawl_dictionary(dictionary, parent, parameter, inserted=False):
""" Recursively look for a given parent within a potential dictionary of
dictionaries. If the parent is found, insert the new parameter and update
the 'inserted' flag. Return both the updated dictionary and inserted flag.
:param dictionary: A dict object containing CAOM parameters. Nested
dictionaries are possible in this object.
:type dictionary: dict
:param parent: The parent to search dictionary keys for. May not be
currently present.
:type parent: str
:param parameter: parameter is a single-key single-value dictionary, to be
inserted to the existing dictionary under the parent key.
:type parameter: dict
:param inserted: A flag to keep track of whether or not parent has been
found and the parameter inserted.
:type inserted: bool
"""
# Assign current dictionary items to tuples
current_keys = tuple(dictionary.keys())
current_values = tuple(dictionary.values())
# If the requested parent already exists, either assign it the parameter
# value if it is empty or update the current value. Set the inserted flag.
if parent in current_keys:
if dictionary[parent] == "":
dictionary[parent] = parameter
else:
dictionary[parent].update(parameter)
inserted = True
# If the requested parent cannot be found, recursively call
# crawl_dictionary on any subdictionaries found within the current one.
else:
for v in current_values:
ind = current_values.index(v)
if isinstance(v, dict):
results = crawl_dictionary(v, parent, parameter, inserted)
sub_dictionary = results[0]
inserted = results[1]
dictionary[current_keys[ind]] = sub_dictionary
# Return both the dictionary and the inserted flag.
return (dictionary, inserted)
#--------------------
class HeaderTypeBox(QComboBox):
""" Create a QComboBox populated with valid header type choices.
"""
def __init__(self, parent):
super().__init__(parent)
self.header_types = ["STANDARD", "HST", "KEPLER"]
for type_ in self.header_types:
self.addItem(type_)
def setTo(self, target):
if target.upper() in self.header_types:
n = self.findText(target.upper())
self.setCurrentIndex(n)
else:
self.setCurrentIndex(0)
#--------------------
class DataTypeBox(QComboBox):
""" Create a QComboBox populated with valid CAOM dataProductType choices.
"""
def __init__(self, parent):
super().__init__(parent)
self.data_types = ["", "IMAGE", "SPECTRUM", "TIMESERIES", "VISIBILITY",
"EVENTLIST", "CUBE", "CATALOG", "MEASUREMENTS"
]
for type_ in self.data_types:
self.addItem(type_)
def setTo(self, target):
if target.upper() in self.data_types:
n = self.findText(target.upper())
self.setCurrentIndex(n)
else:
self.setCurrentIndex(0)
#--------------------
class CAOMKeywordBox(QComboBox):
""" Create a QComboBox populated with valid CAOM parameter choices.
Distinguish between keywords already modified by code and those not
currently in use. Assign each to a default XML parent.
"""
def __init__(self):
super().__init__()
self.setEditable(True)
# Set up the dictionaries
self.inuse = {"algorithm": "metadataList",
"aperture_radius": "metadataList",
"collection": "metadataList",
"instrument_keywords": "metadataList",
"instrument_name": "metadataList",
"intent": "metadataList",
"name": "provenance",
"observationID": "metadataList",
"project": "provenance",
"targetPosition_coordinates_cval1": "metadataList",
"targetPosition_coordinates_cval2": "metadataList",
"targetPosition_coordsys": "metadataList",
"targetPosition_equinox": "metadataList",
"target_name": "metadataList",
"telescope_name": "metadataList",
"type": "metadataList",
"version": "provenance"
}
self.unused = {"dataRelease": "provenance",
"lastExecuted": "provenance",
"metaRelease": "metadataList",
"producer": "provenance",
"proposal_id": "provenance",
"proposal_pi": "provenance",
"proposal_title": "provenance",
"reference": "provenance",
"runID": "metadataList",
"sequenceNumber": "metadataList",
"target_keywords": "metadataList",
"target_moving": "metadataList",
"target_type": "metadataList"
}
# Create a merged dictionary
self.allvalues = dict(self.inuse)
self.allvalues.update(self.unused)
# Use a QFont object to distinguish category seperators
font = QFont()
font.setBold(True)
# Put unused parameters at the top of the list
self.addItem("")
self.addItem("Unused Keywords")
unused_parent = self.model().item(1)
unused_parent.setSelectable(False)
unused_parent.setFont(font)
for c in sorted(self.unused.keys()):
self.addItem(c)
# Add a separator, followed by parameters already in use
self.addItem("---------------")
self.addItem("Keywords In Use")
divider = self.model().item(self.count() - 2)
divider.setSelectable(False)
inuse_parent = self.model().item(self.count() - 1)
inuse_parent.setSelectable(False)
inuse_parent.setFont(font)
for d in sorted(self.inuse.keys()):
self.addItem(d)
def setTo(self, target):
""" Set the combo box to a certain index given a CAOM keyword.
"""
if target in self.allvalues:
n = self.findText(target)
self.setCurrentIndex(n)
else:
self.setCurrentIndex(0)
def getXMLParent(self, keyword):
""" Retrieve the XML parent value for a given CAOM keyword from the
dictionary.
"""
if keyword in self.allvalues.keys():
return self.allvalues[keyword]
else:
return None
#--------------------
class ConfigGenerator(QWidget):
""" This class builds a pyqt GUI for generating a properly-formatted YAML
config file to feed into the template XML generator.
"""
def __init__(self):
super().__init__()
self.file_types = None
self.initUI()
def initUI(self):
""" Create a GUI with input fields for multiple parameters, which will
be aggregated into a .yaml config file.
"""
# Create some formatting items for use throughout.
firstcol = 100
space = QSpacerItem(50, 1)
self.keywords = hk.read_header_keywords_table(HEADER_KEYWORDS)
# Create a section for input of filepath variables. Includes lineedit
# objects and buttons to launch file dialogs if the desired paths are
# local.
filepath_label = QLabel("Filepaths:", self)
data_dir_label = QLabel("HLSP Data: ", filepath_label)
data_dir_label.setAlignment(Qt.AlignRight)
data_dir_label.setToolTip(("Enter the location of the HLSP data files "
"to scan."))
self.data_dir_edit = QLineEdit(data_dir_label)
output_dir_label = QLabel("Output XML File: ", filepath_label)
output_dir_label.setAlignment(Qt.AlignRight)
output_dir_label.setToolTip(("Provide a file path and name for the XML"
" result file."))
self.output_dir_edit = QLineEdit(output_dir_label)
browse_data_dir = QPushButton()
browse_style = browse_data_dir.style()
icon = browse_style.standardIcon(QStyle.SP_DirIcon)
browse_data_dir.setIcon(icon)
browse_data_dir.setIconSize(QSize(14,14))
browse_data_dir.setMaximumWidth(26)
browse_data_dir.setMaximumHeight(22)
browse_output_dir = QPushButton()
browse_output_dir.setIcon(icon)
browse_output_dir.setIconSize(QSize(14,14))
browse_output_dir.setMaximumWidth(26)
browse_output_dir.setMaximumHeight(22)
self.filepaths_grid = QGridLayout()
self.filepaths_grid.addWidget(data_dir_label, 0, 0)
self.filepaths_grid.addWidget(self.data_dir_edit, 0, 1)
self.filepaths_grid.addWidget(browse_data_dir, 0, 2)
self.filepaths_grid.addWidget(output_dir_label, 1, 0)
self.filepaths_grid.addWidget(self.output_dir_edit, 1, 1)
self.filepaths_grid.addWidget(browse_output_dir, 1, 2)
# Set the boolean overwrite parameter with on/off radio button objects.
overwrite_label = QLabel("Overwrite: ", self)
overwrite_label.setMinimumWidth(firstcol)
overwrite_label.setToolTip(("Allow hlsp_to_xml.py to overwrite an "
"existing XML file."))
self.overwrite_on = QRadioButton("On", overwrite_label)
self.overwrite_on.setChecked(True)
self.overwrite_off = QRadioButton("Off", overwrite_label)
self.overwrite_grid = QGridLayout()
self.overwrite_grid.addItem(space, 0, 0)
self.overwrite_grid.addWidget(overwrite_label, 0, 1)
self.overwrite_grid.addWidget(self.overwrite_on, 0, 2)
self.overwrite_grid.addWidget(self.overwrite_off, 0, 3)
# Set the type of .fits headers with a modified QComboBox object.
headertype_label = QLabel("Header Type: ", self)
headertype_label.setMinimumWidth(firstcol)
headertype_label.setToolTip(("Select the FITS header type this HLSP "
"uses."))
self.headertype_box = HeaderTypeBox(headertype_label)
self.headertype_box.setMinimumWidth(175)
self.headertype_grid = QGridLayout()
self.headertype_grid.addItem(space, 0, 0)
self.headertype_grid.addWidget(headertype_label, 0, 1)
self.headertype_grid.addWidget(self.headertype_box, 0, 2)
# Select the most appropriate data type to apply to the observation
# using a modified QComboBox.
datatype_label = QLabel("Data Type: ", self)
datatype_label.setMinimumWidth(firstcol)
datatype_label.setToolTip(("Add special CAOM parameters for various "
"data types."))
self.datatype_box = DataTypeBox(datatype_label)
self.datatype_box.setMinimumWidth(175)
self.datatype_grid = QGridLayout()
self.datatype_grid.addItem(space, 0, 0, -1, 1)
self.datatype_grid.addWidget(datatype_label, 0, 1)
self.datatype_grid.addWidget(self.datatype_box, 0, 2)
# Create a layout for the HLSP-Unique Parameters title and new entry
# button.
uniques_label = QLabel("HLSP-Unique Parameters: ", self)
uniques_label.setToolTip(("Define additional CAOM parameters to "
"insert that are not defined in the FITS "
"headers."))
uniques_space = QSpacerItem(100, 1)
add_parameter = gb.GreyButton("+ add a new parameter", 20)
add_parameter.setMinimumWidth(125)
add_parameter.setMaximumWidth(200)
self.uniques_title_grid = QGridLayout()
self.uniques_title_grid.addWidget(uniques_label, 0, 0)
self.uniques_title_grid.addItem(uniques_space, 0, 1)
self.uniques_title_grid.addWidget(add_parameter, 0, 2)
# Create custom unique parameters to write into the yaml file. This
# list is expandable. Custom parents can be defined in addition to
# metadataList and provenance.
parent_label = QLabel("XML Parent:", uniques_label)
parent_label.setAlignment(Qt.AlignHCenter)
caom_label = QLabel("CAOM Keyword:", uniques_label)
caom_label.setAlignment(Qt.AlignHCenter)
value_label = QLabel("Value:", uniques_label)
value_label.setAlignment(Qt.AlignHCenter)
parent_box = QComboBox(parent_label, editable=True)
self.xml_parents = ["", "metadataList", "provenance"]
for p in self.xml_parents:
parent_box.addItem(p)
caom_box = CAOMKeywordBox()
value_edit = QLineEdit(value_label)
self.uniques_grid = QGridLayout()
self.uniques_grid.addWidget(caom_label, 0, 0)
self.uniques_grid.addWidget(parent_label, 0, 1)
self.uniques_grid.addWidget(value_label, 0, 2)
self.uniques_grid.addWidget(caom_box, 1, 0)
self.uniques_grid.addWidget(parent_box, 1, 1)
self.uniques_grid.addWidget(value_edit, 1, 2)
self.firstrow_uniques = 1
self.nextrow_uniques = 2
self.uniques_grid.setRowStretch(self.nextrow_uniques, 1)
self.uniques_grid.setColumnStretch(0, 0)
self.uniques_grid.setColumnStretch(1, 1)
self.uniques_grid.setColumnStretch(2, 1)
# Create a layout for the Update Header Defaults title and new entry
# button.
headerdefault_label = QLabel("Update Header Defaults: ", self)
headerdefault_label.setToolTip(("Entries here will update default "
"values for .fits headers if they "
"exist or create new ones if they "
"don't."))
headerdefault_space = QSpacerItem(300, 1)
add_headerdefault = gb.GreyButton("+ add a new keyword", 20)
add_headerdefault.setMaximumWidth(200)
self.headerdefault_title_grid = QGridLayout()
self.headerdefault_title_grid.addWidget(headerdefault_label, 0, 0)
self.headerdefault_title_grid.addItem(headerdefault_space, 0, 1)
self.headerdefault_title_grid.addWidget(add_headerdefault, 0, 2)
# Adjust .fits header keyword default values or add new header keywords
# along with the necessary parameters to add to the template file.
# This is an expandable list with fields that will automatically
# populate based on a user's keyword selection.
keyword_label = QLabel("FITS Keyword:", headerdefault_label)
keyword_label.setAlignment(Qt.AlignHCenter)
headcaom_label = QLabel("CAOM Keyword:", headerdefault_label)
headcaom_label.setAlignment(Qt.AlignHCenter)
xmlparent_label = QLabel("XML Parent:", headerdefault_label)
xmlparent_label.setAlignment(Qt.AlignHCenter)
extension_label = QLabel("Extension:", headerdefault_label)
extension_label.setAlignment(Qt.AlignHCenter)
default_label = QLabel("Default Value:", headerdefault_label)
default_label.setAlignment(Qt.AlignHCenter)
self.keyword_box = QComboBox(keyword_label, editable=True)
self.keyword_box.addItem("")
initial_header_type = self.headertype_box.header_types[0].lower()
self.header_keywords = self.keywords[initial_header_type]
for k in self.header_keywords:
self.keyword_box.addItem(k.keyword)
headercaom_box = CAOMKeywordBox()
xmlparent_box = QComboBox(xmlparent_label, editable=True)
for p in self.xml_parents:
xmlparent_box.addItem(p)
extension_edit = QLineEdit(extension_label)
default_edit = QLineEdit(default_label)
self.headerdefault_grid = QGridLayout()
self.headerdefault_grid.addWidget(keyword_label, 0, 0)
self.headerdefault_grid.addWidget(headcaom_label, 0, 1)
self.headerdefault_grid.addWidget(xmlparent_label, 0, 2)
self.headerdefault_grid.addWidget(extension_label, 0, 3)
self.headerdefault_grid.addWidget(default_label, 0, 4)
self.headerdefault_grid.addWidget(self.keyword_box, 1, 0)
self.headerdefault_grid.addWidget(headercaom_box, 1, 1)
self.headerdefault_grid.addWidget(xmlparent_box, 1, 2)
self.headerdefault_grid.addWidget(extension_edit, 1, 3)
self.headerdefault_grid.addWidget(default_edit, 1, 4)
self.firstrow_headers = 1
self.nextrow_headers = 2
self.headerdefault_grid.setRowStretch(self.nextrow_headers, 1)
self.headerdefault_grid.setColumnStretch(0, 0)
self.headerdefault_grid.setColumnStretch(1, 0)
self.headerdefault_grid.setColumnStretch(2, 0)
self.headerdefault_grid.setColumnStretch(3, 1)
self.headerdefault_grid.setColumnStretch(4, 1)
# Create a grid layout and add all the layouts and remaining widgets.
self.meta_grid = QGridLayout()
self.meta_grid.setColumnStretch(1, 1)
self.meta_grid.setColumnStretch(2, 1)
self.meta_grid.setColumnStretch(3, 0)
self.meta_grid.setColumnStretch(4, 0)
self.meta_grid.setColumnStretch(5, 0)
self.meta_grid.setRowStretch(9, 0)
self.meta_grid.setRowStretch(10, 1)
self.meta_grid.addWidget(filepath_label, 0, 0)
self.meta_grid.addLayout(self.overwrite_grid, 0, 4, 1, 2)
self.meta_grid.addLayout(self.filepaths_grid, 1, 0, 2, 4)
self.meta_grid.addLayout(self.headertype_grid, 1, 4)
self.meta_grid.addLayout(self.datatype_grid, 2, 4, 1, 1)
self.meta_grid.addLayout(self.uniques_title_grid, 3, 0, 1, -1)
self.meta_grid.addLayout(self.uniques_grid, 4, 0, 4, -1)
self.meta_grid.addLayout(self.headerdefault_title_grid, 8, 0, 1, -1)
self.meta_grid.addLayout(self.headerdefault_grid, 9, 0, 4, -1)
# Set the window layout and show it.
self.setLayout(self.meta_grid)
self.show()
# Add button actions.
browse_data_dir.clicked.connect(self.hlspClicked)
browse_output_dir.clicked.connect(self.outputClicked)
add_parameter.clicked.connect(self.addParameterClicked)
add_headerdefault.clicked.connect(self.addKeywordClicked)
caom_box.currentIndexChanged.connect(self.caomKeywordSelected)
headercaom_box.currentIndexChanged.connect(self.caomKeywordSelected)
self.headertype_box.currentIndexChanged.connect(self.headerTypeChanged)
self.keyword_box.currentIndexChanged.connect(self.fitsKeywordSelected)
def hlspClicked(self):
""" Launch a file dialog to select a directory containing HLSP data.
"""
navigate = QFileDialog.getExistingDirectory(self,
"Select HLSP Directory",
".")
self.data_dir_edit.clear()
self.data_dir_edit.insert(navigate)
def outputClicked(self):
""" Launch a file dialog to define the XML output file name & path.
"""
navigate = QFileDialog.getSaveFileName(self,
"Save Output XML File",
".")
path = navigate[0]
self.output_dir_edit.clear()
self.output_dir_edit.insert(path)
def headerTypeChanged(self):
""" When the header_type is changed, set the header_keywords to the
new list. Re-populate any existing empty keyword menus. Skip any
rows that have already been populated.
"""
# Get the new header type and reset the header_keywords list
# accordingly.
new_type = self.headertype_box.currentText().lower()
self.header_keywords = self.keywords[new_type]
# Iterate through all rows in the headerdefault_grid. Only update the
# list choices for any rows that are totally empty.
for row in range(self.firstrow_headers, self.nextrow_headers):
key_widg = self.headerdefault_grid.itemAtPosition(row, 0).widget()
caom_widg = self.headerdefault_grid.itemAtPosition(row, 1).widget()
xml_widg = self.headerdefault_grid.itemAtPosition(row, 2).widget()
ext_widg = self.headerdefault_grid.itemAtPosition(row, 3).widget()
def_widg = self.headerdefault_grid.itemAtPosition(row, 4).widget()
caom_text = str(caom_widg.currentText())
xml_text = str(xml_widg.currentText())
ext_text = str(ext_widg.text())
def_text = str(def_widg.text())
if (caom_text == ""
and xml_text == ""
and ext_text == ""
and def_text == ""):
key_widg.clear()
key_widg.addItem("")
for key in self.header_keywords:
key_widg.addItem(key.keyword)
def caomKeywordSelected(self):
""" In the HLSP-Unique Parameters section, we want to update the XML
Parent value when a CAOM Keyword is selected from the CAOMKeywordBox.
"""
# Determine which section is sending the signal and get the position
# of the signal sender.
sender = self.sender()
uniques_index = self.uniques_grid.indexOf(sender)
headers_index = self.headerdefault_grid.indexOf(sender)
if uniques_index >= 0:
section = self.uniques_grid
pos = section.getItemPosition(uniques_index)
elif headers_index >= 0:
section = self.headerdefault_grid
pos = section.getItemPosition(headers_index)
else:
return
row = pos[0]
col = pos[1]
# Get the widgets at this position.
caom_key_box = section.itemAtPosition(row, col).widget()
xml_parent_box = section.itemAtPosition(row, col+1).widget()
# Get the new CAOM keyword and the associated XML Parent value.
new_caom_selected = caom_key_box.currentText()
new_xml_parent = caom_key_box.getXMLParent(new_caom_selected)
# If getXMLParent finds a match, look for this value in the
# contents of xml_parent_box.
if new_xml_parent:
n = xml_parent_box.findText(new_xml_parent)
# If the chosen XML parent already exists, set the QComboBox to
# that index. Otherwise, insert it as new text.
if n >= 0:
xml_parent_box.setCurrentIndex(n)
else:
xml_parent_box.setCurrentText(new_xml_parent)
# If no corresponding XML parent is found, set it to "".
else:
xml_parent_box.setCurrentIndex(0)
def addParameterClicked(self):
""" Add a new unique parameter entry row into the self.nextrow_uniques
position, then update self.nextrow_uniques.
"""
# Make a new 'Parent:' combo box and populate it with self.xml_parents.
new_parent = QComboBox(editable=True)
for p in self.xml_parents:
new_parent.addItem(p)
# Make new line edits for 'CAOM Keyword:' and 'Value:'.
new_caom = CAOMKeywordBox()
new_value = QLineEdit()
# Add the new widgets to the uniques_grid layout.
self.uniques_grid.addWidget(new_caom, self.nextrow_uniques, 0)
self.uniques_grid.addWidget(new_parent, self.nextrow_uniques, 1)
self.uniques_grid.addWidget(new_value, self.nextrow_uniques, 2)
self.uniques_grid.setRowStretch(self.nextrow_uniques, 0)
self.uniques_grid.setRowStretch(self.nextrow_uniques+1, 1)
# Update self.nextrow_uniques.
self.nextrow_uniques += 1
# Connect the new CAOMKeywordBox object to the module that will
# update the XML Parent depending on the value selected.
new_caom.currentIndexChanged.connect(self.caomKeywordSelected)
def fitsKeywordSelected(self):
""" When a user chooses a header keyword in a headerdefault_grid row,
populate the CAOM Property, XML Parent, Extension, and Default Value
(if applicaple) fields based on the chosen keyword.
"""
# Get the position of the signal sender.
sender = self.sender()
ind = self.headerdefault_grid.indexOf(sender)
pos = self.headerdefault_grid.getItemPosition(ind)
row = pos[0]
# Get the sender widget and the new keyword chosen.
this_keyword = self.headerdefault_grid.itemAtPosition(row, 0).widget()
new_keyword = this_keyword.currentText()
# The user may have entered a new header keyword, in which case we
# simply return without populating anything.
try:
new_obj = self.header_keywords.find(new_keyword)
except KeyError:
return
# Ignore any empty string entries.
if new_obj is None:
return
# If the header already exists, populate the remaining row fields with
# data from the HeaderKeyword object. The CAOMKeywordBox change will
# update the XML parent selection automatically, so skip it here.
this_caom = self.headerdefault_grid.itemAtPosition(row, 1).widget()
this_caom.setTo(new_obj.caom)
this_ext = self.headerdefault_grid.itemAtPosition(row, 3).widget()
this_ext.setText(new_obj.headerName)
def addKeywordClicked(self):
""" Create a new row in the headerdefault_grid table for modifying
.fits header keyword properties.
"""
# Make a new keyword combo box and populate it with the current
# header_keywords list.
new_keyword_box = QComboBox(editable=True)
new_keyword_box.addItem("")
for header_key in self.header_keywords:
new_keyword_box.addItem(header_key.keyword)
# Connect the new keyword combo box to the fitsKeywordSelected action.
new_keyword_box.currentIndexChanged.connect(self.fitsKeywordSelected)
# Make a new 'Parent:' combo box and populate it with self.xml_parents.
new_xmlparent = QComboBox(editable=True)
for p in self.xml_parents:
new_xmlparent.addItem(p)
# Make new line edits for 'CAOM Property:', 'Extension:', and "Default
# value".
new_headcaom = CAOMKeywordBox()
new_headcaom.currentIndexChanged.connect(self.caomKeywordSelected)
new_extension = QLineEdit()
new_default = QLineEdit()
# Add the new widgets to the headerdefault_grid layout.
self.headerdefault_grid.addWidget(new_keyword_box,
self.nextrow_headers, 0)
self.headerdefault_grid.addWidget(new_headcaom,
self.nextrow_headers, 1)
self.headerdefault_grid.addWidget(new_xmlparent,
self.nextrow_headers, 2)
self.headerdefault_grid.addWidget(new_extension,
self.nextrow_headers, 3)
self.headerdefault_grid.addWidget(new_default,
self.nextrow_headers, 4)
self.headerdefault_grid.setRowStretch(self.nextrow_headers, 0)
self.headerdefault_grid.setRowStretch(self.nextrow_headers+1, 1)
# Update self.nextrow_headers.
self.nextrow_headers += 1
def clearConfigPaths(self):
self.data_dir_edit.clear()
self.output_dir_edit.clear()
def loadConfigPaths(self, paths_dict):
self.clearConfigPaths()
self.data_dir_edit.insert(paths_dict["InputDir"])
self.output_dir_edit.insert(paths_dict["Output"])
def setProductType(self, data_product_type):
self.datatype_box.setTo(data_product_type)
def setHeaderStandard(self, header_type):
self.headertype_box.setTo(header_type)
def loadDictionaries(self, uniques):
""" Recursively handles loading multi-level dictionaries to the unique
parameters table.
:param uniques: A dictionary containing CAOM parameters. May contain
nested dictionaries.
:type uniques: dict
"""
if uniques is None:
return
parents = uniques.keys()
for p in parents:
sub_dictionary = uniques[p]
copy_dictionary = dict(sub_dictionary)
# Look at the first row to see if you're loading into FIRST_ENTRY
# or NEXT_ENTRY.
first_parent = self.uniques_grid.itemAtPosition(
self.firstrow_uniques,0)
first_widget = first_parent.widget()
for parameter in sub_dictionary.keys():
value = sub_dictionary[parameter]
# If the first widget text is empty, start loading there.
# Otherwise, load to the self.nextrow_uniques position and
# create a new set of widgets using addParameterClicked().
if first_widget.currentText() == "":
row = self.firstrow_uniques
else:
row = self.nextrow_uniques
self.addParameterClicked()
# Get the Parent combo box for the current row.
caom_box = self.uniques_grid.itemAtPosition(row,0).widget()
parent_box = self.uniques_grid.itemAtPosition(row,1).widget()
value_box = self.uniques_grid.itemAtPosition(row,2).widget()
# If the desired parent is already an option, set to that.
# Otherwise add it as a new option in the combo box.
if p in self.xml_parents:
parent_index = self.xml_parents.index(p)
parent_box.setCurrentIndex(parent_index)
else:
parent_box.addItem(p)
parent_box.setCurrentIndex(parent_box.findText(p))
self.xml_parents.append(p)
# Fill in the CAOM line edit box.
caom_box.setTo(parameter)
# If the next level is still a dictionary, repeat this process.
# Otherwise, fill in the Value line edit box.
if isinstance(sub_dictionary[parameter], dict):
self.loadDictionaries(copy_dictionary)
else:
value_box.insert(sub_dictionary[parameter])
del copy_dictionary[parameter]
def loadFromYAML(self, filename):
""" Load configuration parameters to our ConfigGenerator form using a
YAML-formatted file.
:param filename: The location of the YAML-formatted config file.
:type filename: str
"""
# Read the YAML entries into a dictionary. select_files will also be
# opening the config file, so kill the redundant output.
yamlfile = read_yaml(filename, output=False)
# Clear any existing form values before loading the new data.
self.resetClicked()
# Get the 'filepaths' data out of the dictionary and write it into
# the appropriate lineedits
try:
filepaths = yamlfile["filepaths"]
self.data_dir_edit.insert(filepaths["hlsppath"])
self.output_dir_edit.insert(filepaths["output"])
except KeyError:
msg = "'filepaths' either missing or not formatted in config file"
raise MyError(msg)
# Get the 'overwrite' information out of the dictionary and set the
# radio button
try:
if filepaths["overwrite"]:
self.overwrite_on.setChecked(True)
else:
self.overwrite_off.setChecked(True)
except KeyError:
msg = "'overwrite' not provided in config file"
raise MyError(msg)
# Get the 'header_type' data out of the dictionary and set the
# QComboBox.
try:
header_type = yamlfile["header_type"].capitalize()
header_index = self.headertype_box.header_types.index(header_type)
self.headertype_box.setCurrentIndex(header_index)
except KeyError:
msg = "'header_type' not provided in config file"
raise MyError(msg)
# Get the 'data_type' data out of the dictionary and set the
# QComboBox.
try:
data_type = yamlfile["data_type"].upper()
dataType_index = self.datatype_box.data_types.index(data_type)
self.datatype_box.setCurrentIndex(dataType_index)
except KeyError:
msg = "'data_type' not provided in config file"
raise MyError(msg)
# Get the 'unique_parameters' data out of the dictionary using the
# loadDictionaries module and create new rows as needed. Error
# handling just does a pass since not all configs will have extra
# parameters.
try:
uniques = yamlfile["unique_parameters"]
self.loadDictionaries(uniques)
except KeyError:
pass
# Get the 'keyword_updates' data out of the dictionary. Error handling
# just returns since this is the last function and not all configs
# will set keyword values.
try:
keyword_updates = yamlfile["keyword_updates"]
except KeyError:
return
# Load the 'keyword_updates' data into the form and create new rows
# if necessary.
for key in sorted(keyword_updates.keys()):
values = keyword_updates[key]
# If nextrow_headers has not been moved, load into
# firstrow_headers. Otherwise, trigger an addKeywordClicked event
# and load into nextrow_headers.
if self.nextrow_headers == self.firstrow_headers + 1:
row = self.firstrow_headers
else:
row = self.nextrow_headers
self.addKeywordClicked()
load_key = self.headerdefault_grid.itemAtPosition(row, 0).widget()
load_caom = self.headerdefault_grid.itemAtPosition(row, 1).widget()
load_xml = self.headerdefault_grid.itemAtPosition(row, 2).widget()
load_ext = self.headerdefault_grid.itemAtPosition(row, 3).widget()
load_def = self.headerdefault_grid.itemAtPosition(row, 4).widget()
# Get the lists of available keyword and XML parent values that
# currently populate the two QComboBox items.
available_keys = [load_key.itemText(x)
for x in range(load_key.count())]
available_xml = [load_xml.itemText(y)
for y in range(load_xml.count())]
# If the keyword and XML parent values are already available,
# select them in the appropriate box. Otherwise, enter them as
# new values.
if key in available_keys:
load_key.setCurrentIndex(available_keys.index(key))
else:
load_key.setCurrentText(key)
load_caom.setTo(values["caom"])
if values["section"] in available_xml:
load_xml.setCurrentIndex(
available_xml.index(values["section"]))
else:
load_xml.setCurrentText(values["section"])
# Add the headerName and headerDefaultValue text to the lineedit
# objects.
load_ext.setText(values["headerName"])
load_def.setText(values["headerDefaultValue"])
def loadParamFile(self, filename):
""" Load the available information from a .param file created by the
previous metadata-checking steps of HLSP ingestion. This will not
completely fill out the .config file form.
:param filename: The filename for the YAML-formatted file to read
information from.
:type filename: str
"""
# Read the YAML entries into a dictionary. select_files will also be
# opening the config file, so kill the redundant output.
yamlfile = read_yaml(filename, output=False)
# Clear any existing form values before loading the new data.
self.resetClicked()
# Get the 'filepaths' data out of the dictionary and write it into
# the appropriate lineedits
try:
datadir = yamlfile["InputDir"]
except KeyError:
msg = "'InputDir' either missing or not formatted in .param file"
raise MyError(msg)
else:
self.data_dir_edit.insert(datadir)
# Identify the single .fits entry defined in the .param file, if
# present.
try:
fits = yamlfile["fits"]
except KeyError:
msg = "No fits parameters found in .param file"
raise MyError(msg)
else:
if len(fits) > 1:
msg = "More than one .fits product found in .param file"
raise MyError(msg)
else:
fits = fits[0]
# If the single .fits entry is present, get the standard that was
# used for metadata checking.
try:
new_head_type = fits["FileParams"]["Standard"].title()
except KeyError:
msg = "Could not find a .fits standard in .param file"
raise MyError(msg)
else:
if new_head_type in self.headertype_box.header_types:
n = self.headertype_box.header_types.index(new_head_type)
self.headertype_box.setCurrentIndex(n)
# If the single .fits entry is present, get the ProductType information
# defined in the .param file.
try:
new_data_type = fits["FileParams"]["ProductType"].upper()
except KeyError:
msg = "Could not find 'ProductType' parameter in .param file"
raise MyError(msg)
else:
if new_data_type in self.datatype_box.data_types:
n = self.datatype_box.data_types.index(new_data_type)
self.datatype_box.setCurrentIndex(n)
else:
self.datatype_box.setCurrentIndex(0)
def resetClicked(self):
""" Clear any changes to the form.
"""
#Empty the immediately-available elements.
self.clearConfigPaths()
self.overwrite_on.setChecked(True)
self.headertype_box.setCurrentIndex(0)
self.datatype_box.setCurrentIndex(0)
p_one = self.uniques_grid.itemAtPosition(self.firstrow_uniques,0)
p_one.widget().setCurrentIndex(0)
c_one = self.uniques_grid.itemAtPosition(self.firstrow_uniques,1)
c_one.widget().setCurrentIndex(0)
v_one = self.uniques_grid.itemAtPosition(self.firstrow_uniques,2)
v_one.widget().clear()
k_one = self.headerdefault_grid.itemAtPosition(self.firstrow_headers,0)
k_one.widget().setCurrentIndex(0)
h_one = self.headerdefault_grid.itemAtPosition(self.firstrow_headers,1)
h_one.widget().setCurrentIndex(0)
x_one = self.headerdefault_grid.itemAtPosition(self.firstrow_headers,2)
x_one.widget().setCurrentIndex(0)
e_one = self.headerdefault_grid.itemAtPosition(self.firstrow_headers,3)
e_one.widget().clear()
d_one = self.headerdefault_grid.itemAtPosition(self.firstrow_headers,4)
d_one.widget().clear()
# Delete any unique parameter entries beyond the first table row.
delete_these = range(self.nextrow_uniques - 1,
self.firstrow_uniques,
-1)
for a in delete_these:
test = self.uniques_grid.itemAtPosition(a, 0)
if test is None:
continue
widgets_per_row = 3
for b in range(widgets_per_row):
c = self.uniques_grid.itemAtPosition(a, b).widget()
c.setParent(None)
# Reset the nextrow_uniques variable.
self.nextrow_uniques = self.firstrow_uniques + 1
# Delete any header keyword entries beyond the first row.
delete_these = range(self.nextrow_headers - 1,
self.firstrow_headers,
-1)
for x in delete_these:
test = self.headerdefault_grid.itemAtPosition(x,0)
if test is None:
continue
widgets_per_row = 5
for y in range(widgets_per_row):
z = self.headerdefault_grid.itemAtPosition(x,y).widget()
z.setParent(None)
# Reset the nextrow_headers variable.
self.nextrow_headers = self.firstrow_headers + 1
def collectInputs(self):
""" Assemble everything the user has input to the form into a
dictionary.
"""
# Initialize dictionaries to populate.
config = {}
filepaths = {}
# Get the HLSP data filepath. Throw an error if it does not exist.
hlsppath = self.data_dir_edit.text()
if hlsppath == "":
raise MyError("HLSP Data file path is missing!")
else:
filepaths["hlsppath"] = hlsppath
# Get the output filepath from the line edit. Throw an error if it is
# empty. Append with a '.xml' if not already there. Get the overwrite
# flag from the checkbox.
out = self.output_dir_edit.text()
if out == "":
raise MyError("Output file path is missing!")
if not out.endswith(".xml"):
out = ".".join([out, "xml"])
filepaths["output"] = out
filepaths["overwrite"] = self.overwrite_on.isChecked()
config["filepaths"] = filepaths
# Grab the selected fits header type.
config["header_type"] = self.headertype_box.currentText().lower()
# Get the data type. Throw an error if none selected.
dt = self.datatype_box.currentText().lower()
if dt == "":
raise MyError("No data type selected!")
else:
config["data_type"] = dt
# Collect all the unique parameters the user has entered. Start at row
# self.firstrow_uniques and search through all rows the user may have
# added.
uniques = {}
for row in range(self.firstrow_uniques, self.nextrow_uniques):
add_caom = self.uniques_grid.itemAtPosition(row, 0)
add_parent = self.uniques_grid.itemAtPosition(row, 1)
add_value = self.uniques_grid.itemAtPosition(row, 2)
unique_parent = unique_caom = unique_value = None
# Skip totally empty rows, empty values are okay for defining a new
# parent.
if add_parent is None and add_caom is None and add_value is None:
continue
if add_parent is not None:
parent_widget = add_parent.widget()
unique_parent = str(parent_widget.currentText())
if add_caom is not None:
caom_widget = add_caom.widget()
unique_caom = str(caom_widget.currentText())
if add_value is not None:
value_widget = add_value.widget()
unique_value = str(value_widget.text())
if (unique_parent == ""
and unique_caom == ""
and unique_value == ""):
continue
elif unique_parent == "":
unique_parent = "CompositeObservation"
parameter = {}
parameter[unique_caom] = unique_value
insert = crawl_dictionary(uniques, unique_parent, parameter)
# crawl_dictionary returns a tuple:
# (updated dictionary, inserted boolean flag)
new_uniques, inserted = insert
# If crawl_dictionary did not insert the new parameter, the defined
# parent is not currently present in the dictionary, so create a
# new entry.
if inserted:
uniques = new_uniques
else:
uniques[unique_parent] = parameter
config["unique_parameters"] = uniques
# Collect all header keyword entries the user may have provided.
keywords = {}
for row in range(self.firstrow_headers, self.nextrow_headers):
add_key = self.headerdefault_grid.itemAtPosition(row, 0)
add_caom = self.headerdefault_grid.itemAtPosition(row, 1)
add_xml = self.headerdefault_grid.itemAtPosition(row, 2)
add_ext = self.headerdefault_grid.itemAtPosition(row, 3)
add_def = self.headerdefault_grid.itemAtPosition(row, 4)
unique_keyword = None
unique_caom = None
unique_xmlparent = None
unique_extension = None
unique_default = None
# Skip rows with any missing properties, otherwise load the info
# into variables.
if (add_key is None
or add_caom is None
or add_xml is None
or add_ext is None
or add_def is None):
continue
else:
unique_keyword = str(add_key.widget().currentText())
unique_caom = str(add_caom.widget().currentText())
unique_xmlparent = str(add_xml.widget().currentText())
unique_extension = str(add_ext.widget().text())
unique_default = str(add_def.widget().text())
# Skip the row if any of those variables are empty strings.
# Otherwise, add the information to a dictionary of properties
# stored under the given header keyword.
if (unique_keyword == ""
or unique_caom == ""
or unique_xmlparent == ""
or unique_extension == ""
or unique_default == ""):
continue
else:
new_entries = {}
new_entries["caom"] = unique_caom
new_entries["section"] = unique_xmlparent
new_entries["headerName"] = unique_extension
new_entries["headerDefaultValue"] = unique_default
keywords[unique_keyword] = new_entries
config["keyword_updates"] = keywords
# Return the config dictionary
return config
#--------------------
if __name__=="__main__":
app = QApplication(sys.argv)
w = ConfigGenerator()
sys.exit(app.exec_())
| 3.078125 | 3 |
modules/commands/rmActivity.py | devspacemx/centinela | 0 | 12770925 | from common import activities, prefix
from discord.ext import commands
class RemoveActivity(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="rm-activity")
@commands.is_owner()
async def remove_activity(ctx):
activity = ctx.message.content[(
len(prefix) + len("rm-activity")):].strip()
if not activity:
await ctx.send(
"Please paste the activity you would like to"
f" remove.\n```\n{prefix}rm-activity your activity text here\n```"
)
return
removed = activities.remove(activity)
if removed:
await ctx.send(f"The activity `{activity}` was removed.")
else:
await ctx.send("The activity you mentioned does not exist.")
| 2.6875 | 3 |
train.py | bolajixi/Flower_Classifier | 1 | 12770926 | <filename>train.py
import argparse
import time
from collections import OrderedDict
import torch
import torch.nn.functional as F
from torch import nn, optim
from torchvision import datasets, models, transforms
from utils import parse_gpu_arg, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser(description='Train model')
parser.add_argument('--data_dir', type=str, help='File path to dataset')
parser.add_argument('--save_dir', type=str, help='Save trained model checkpoint', default="ImageClassifier/checkpoint.pth")
parser.add_argument('--arch', type=str, help='Model Architecture', default='densenet121', choices=['densenet121', 'vgg13'])
parser.add_argument('--learning_rate', type=float, help='Learning rate', default=0.001)
parser.add_argument('--hidden_units', type=int, help='Number of hidden units', default=512)
parser.add_argument('--epochs', type=int, help='Number of epochs', default=9)
parser.add_argument('--gpu', type=str, help='Make use of GPU if available (true|false), (yes|no), (y|n), (1|0)', default='true')
return parser.parse_args()
def build_network(model, h_layers, model_choice, dropout=0.3):
'''
This function builds a Network with a pretrained model using default hidden layers
and dropout percentage
Paramaters
h_layers: Set number of hidden layers in the network
dropout: Set percentage of dropout per layer
'''
# Building Network
# freeze feature parameters
for param in model.parameters():
param.requires_grad = False
if model_choice == 'densenet121':
input_size = model.classifier.in_features
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, h_layers)),
('relu1', nn.ReLU()),
('dropout2', nn.Dropout(p=dropout)),
('fc2', nn.Linear(h_layers, 102)),
('output', nn.LogSoftmax(dim=1))
]))
elif model_choice == 'vgg13':
input_size_vgg = model.classifier[0].in_features
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size_vgg, 1024)),
('drop', nn.Dropout(p=0.5)),
('relu', nn.ReLU()),
('fc2', nn.Linear(1024, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# model.class_to_idx = class_to_idx
return model
# def get_model(model=models, model_type=args.arch):
# '''
# This function returns a default pretrained model (DenseNet121)
# Paramaters
# model: Gets models from TorchVision modesl module
# model_type: Specifies the type of model to be used
# '''
# model = getattr(model, model_type)(pretrained=True)
# # return model
def train_network(model, criterion, optimizer, epochs, trainloader, validloader, gpu):
device = torch.device("cuda" if gpu and torch.cuda.is_available() else "cpu")
model.to(device) # Move model to GPU via cuda
# Train Network using Back-Propagation and transfered learning from pre-trained network
steps = 0
running_loss = 0
print_every = 40 # 5
start_time = time.time()
for e in range(epochs):
for inputs, labels in trainloader:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Perform Forward-Pass
logps = model.forward(inputs)
loss = criterion(logps, labels)
# Perform Forward-Pass
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
valid_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to(device), labels.to(device) # Use GPu if requested and available
logps = model.forward(inputs)
v_loss = criterion(logps, labels)
valid_loss += v_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print('Epoch {}/{}.. '.format(e + 1, epochs),
'Loss: {:.3f}.. '.format(running_loss / print_every),
'Validation Loss: {:.3f}.. '.format(valid_loss / len(validloader)),
'Accuracy: {:.3f}'.format(accuracy / len(validloader)))
running_loss = 0
model.train()
total_train_time = time.time() - start_time
print(
'\n\nTotal time taken to train network: {:.0f}m {:.2f}s'.format(total_train_time // 60, total_train_time % 60))
def main():
print('Welcome')
args = parse_args()
# data_dir = 'flowers'
rgs = parse_args()
if args.data_dir:
data_dir = args.data_dir
else:
raise argparse.ArgumentError(args.data_dir, 'Path to training dataset required')
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(), transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets, load image into dataloader
trainloader = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=64)
validloader = torch.utils.data.DataLoader(valid_data, shuffle=True, batch_size=64)
testloader = torch.utils.data.DataLoader(test_data, shuffle=True, batch_size=64)
model = getattr(models, args.arch)(pretrained=True) # Get model
if args.arch == 'densenet121':
model_choice = 'densenet121'
else:
model_choice = 'vgg13'
model = build_network(model, args.hidden_units, model_choice)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=float(args.learning_rate))
epochs = int(args.epochs)
gpu = parse_gpu_arg(args.gpu) # Make use of GPU if available
train_network(model, criterion, optimizer, epochs, trainloader, validloader, gpu)
model.class_to_idx = train_data.class_to_idx
save_path = args.save_dir
save_checkpoint(save_path, model, optimizer, model.classifier, args)
if __name__ == "__main__": # Only if this module is in main program, will call main() function
main()
| 2.640625 | 3 |
generate_script.py | mwestera/iterated-char-rnn | 0 | 12770927 | import sys
"""
To run, e.g.:
python generate_script.py 20 config.ini
Prints script for 20 runs with random hyperparameters according to config.ini
"""
if __name__ == '__main__':
# Read command line args
if len(sys.argv) == 1:
print('Argument missing: requested number of runs.')
quit()
num_runs = int(sys.argv[1])
# Defaults
num_parallel_runs = 1
config = 'config.ini'
# See if they're overridden
if len(sys.argv) >= 3:
try:
num_parallel_runs = int(sys.argv[2])
except ValueError:
config = sys.argv[2]
if len(sys.argv) >= 4:
try:
num_parallel_runs = int(sys.argv[3])
except ValueError:
config = sys.argv[3]
# Print commands for making directory
# TODO @Carina add usr_id to directory name (currently neither in command line args nor in config).
print("subdir=$(python create_dirs.py {0})".format(config))
print("cp {0} models/$subdir/.".format(config))
# And print num_runs python calls for training models:
for run in range(1, num_runs+1):
# provide config file, request random, subdir, number of run, and set verbosity low.
arg_str = "-c {0} -r -d $subdir -n {1} -v 1".format(config, run)
# TODO Do parallel runs even work? Perhaps avoid placing an '&' if this is the last run to be added?
parallel = {0: ""}.get(run % num_parallel_runs, "&")
arg_str += ' '+parallel
print("python main.py", arg_str) | 2.828125 | 3 |
SQLLite/sqllite_example.py | mariobot/python_sql | 0 | 12770928 | <reponame>mariobot/python_sql
import sqlite3
from sqlite3 import Error
def create_connection(path):
connection = None
try:
connection = sqlite3.connect(path)
print("You are connecto to SQL Lite database")
except Error as e:
print(f"Error {e} when try to coonect SQL Lite database")
return connection
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
print("Query excecuted successfully")
except Error as e:
print(f"Error {e}")
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"Error {e}")
connection = create_connection("C:\\Desarrollo\\Python_SQL\\SQLLite\\sm_app.sqllite")
# script to create users table
create_users_table = """
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
age INTEGER,
gender TEXT,
nationality TEXT
);
"""
# execute the query for create users table
execute_query(connection,create_users_table)
# script to create posts table
create_posts_table = """
CREATE TABLE IF NOT EXISTS posts(
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
description TEXT NOT NULL,
user_id INTEGER NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id)
);
"""
# execute the query for create the posts table
execute_query(connection, create_posts_table)
# script that create rate comments table
reate_comments_table = """
CREATE TABLE IF NOT EXISTS comments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
text TEXT NOT NULL,
user_id INTEGER NOT NULL,
post_id INTEGER NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id) FOREIGN KEY (post_id) REFERENCES posts (id)
);
"""
# execute the query for create rate comments table
execute_query(connection, reate_comments_table)
# script that create likes table
create_likes_table = """
CREATE TABLE IF NOT EXISTS likes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL,
post_id integer NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id) FOREIGN KEY (post_id) REFERENCES posts (id)
);
"""
# execute the query for create likes table
execute_query(connection, create_likes_table)
# script to insert users in the database
create_users = """
INSERT INTO
users (name, age, gender, nationality)
VALUES
('James', 25, 'male', 'USA'),
('Leila', 32, 'female', 'France'),
('Brigitte', 35, 'female', 'England'),
('Mike', 40, 'male', 'Denmark'),
('Elizabeth', 21, 'female', 'Canada');
"""
execute_query(connection,create_users)
create_posts = """
INSERT INTO
posts (title, description, user_id)
VALUES
("Happy", "I am feeling very happy today", 1),
("Hot Weather", "The weather is very hot today", 2),
("Help", "I need some help with my work", 2),
("Great News", "I am getting married", 1),
("Interesting Game", "It was a fantastic game of tennis", 5),
("Party", "Anyone up for a late-night party today?", 3);
"""
execute_query(connection,create_posts)
create_comments = """
INSERT INTO
comments (text, user_id, post_id)
VALUES
('Count me in', 1, 6),
('What sort of help?', 5, 3),
('Congrats buddy', 2, 4),
('I was rooting for Nadal though', 4, 5),
('Help with your thesis?', 2, 3),
('Many congratulations', 5, 4);
"""
create_likes = """
INSERT INTO
likes (user_id, post_id)
VALUES
(1, 6),
(2, 3),
(1, 5),
(5, 4),
(2, 4),
(4, 2),
(3, 6);
"""
execute_query(connection, create_comments)
execute_query(connection, create_likes)
select_users = "SELECT * FROM users"
result = execute_read_query(connection,select_users)
for u in result:
print(u)
select_posts = "SELECT * FROM posts"
result_posts = execute_read_query(connection, select_posts)
for p in result_posts:
print(p)
# executing a JOIN QUERY
select_users_post = """
SELECT
users.id,
users.name,
posts.description
FROM
posts
INNER JOIN users on users.id = posts.user_id
"""
result_up = execute_read_query(connection, select_users_post)
for up in result_up:
print(up)
# executing a MULTIPLE JOIN QUERY
select_post_comments_users = """
SELECT
posts.description as post,
text as comment,
name
FROM
posts
INNER JOIN users on users.id = comments.user_id
INNER JOIN comments on posts.id = comments.post_id
"""
posts_comments_user = execute_read_query(connection, select_post_comments_users)
for pcu in posts_comments_user:
print(pcu)
select_post_likes = """
SELECT
description as Post,
COUNT(likes.id) as Likes
FROM
likes,
posts
WHERE
posts.id = likes.post_id
GROUP BY
likes.post_id
"""
post_likes = execute_read_query(connection,select_post_likes)
for pl in post_likes:
print(pl)
# updating records
select_post_description = "SELECT description FROM posts where id = 2"
post_description = execute_read_query(connection,select_post_description)
print(post_description)
update_sentence = """
UPDATE
posts
SET
description = "The weather has become so could in this quarantine"
WHERE
id = 2
"""
# Execute the update command
execute_query(connection, update_sentence)
post_description = execute_read_query(connection,select_post_description)
print(post_description)
delete_sentence = "DELETE FROM comments where id = 5"
#execute_query(connection,delete_sentence) | 3.53125 | 4 |
sdk/python/pulumi_azure/redis/cache.py | suresh198526/pulumi-azure | 0 | 12770929 | <reponame>suresh198526/pulumi-azure
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Cache']
class Cache(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capacity: Optional[pulumi.Input[int]] = None,
enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,
family: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
minimum_tls_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
patch_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CachePatchScheduleArgs']]]]] = None,
private_static_ip_address: Optional[pulumi.Input[str]] = None,
redis_configuration: Optional[pulumi.Input[pulumi.InputType['CacheRedisConfigurationArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Redis Cache.
## Example Usage
This example provisions a Standard Redis Cache.
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
# NOTE: the Name used for Redis needs to be globally unique
example_cache = azure.redis.Cache("exampleCache",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
capacity=2,
family="C",
sku_name="Standard",
enable_non_ssl_port=False,
minimum_tls_version="1.2",
redis_configuration=azure.redis.CacheRedisConfigurationArgs())
```
## Default Redis Configuration Values
| Redis Value | Basic | Standard | Premium |
| ------------------------------- | ------------ | ------------ | ------------ |
| enable_authentication | true | true | true |
| maxmemory_reserved | 2 | 50 | 200 |
| maxfragmentationmemory_reserved | 2 | 50 | 200 |
| maxmemory_delta | 2 | 50 | 200 |
| maxmemory_policy | volatile-lru | volatile-lru | volatile-lru |
> **NOTE:** The `maxmemory_reserved`, `maxmemory_delta` and `maxfragmentationmemory-reserved` settings are only available for Standard and Premium caches. More details are available in the Relevant Links section below._
***
A `patch_schedule` block supports the following:
* `day_of_week` (Required) the Weekday name - possible values include `Monday`, `Tuesday`, `Wednesday` etc.
* `start_hour_utc` - (Optional) the Start Hour for maintenance in UTC - possible values range from `0 - 23`.
> **Note:** The Patch Window lasts for `5` hours from the `start_hour_utc`.
## Relevant Links
- [Azure Redis Cache: SKU specific configuration limitations](https://azure.microsoft.com/en-us/documentation/articles/cache-configure/#advanced-settings)
- [Redis: Available Configuration Settings](http://redis.io/topics/config)
## Import
Redis Cache's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:redis/cache:Cache cache1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Cache/Redis/cache1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] capacity: The size of the Redis cache to deploy. Valid values for a SKU `family` of C (Basic/Standard) are `0, 1, 2, 3, 4, 5, 6`, and for P (Premium) `family` are `1, 2, 3, 4`.
:param pulumi.Input[bool] enable_non_ssl_port: Enable the non-SSL port (6379) - disabled by default.
:param pulumi.Input[str] family: The SKU family/pricing group to use. Valid values are `C` (for Basic/Standard SKU family) and `P` (for `Premium`)
:param pulumi.Input[str] location: The location of the resource group.
:param pulumi.Input[str] minimum_tls_version: The minimum TLS version. Defaults to `1.0`.
:param pulumi.Input[str] name: The name of the Redis instance. Changing this forces a
new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CachePatchScheduleArgs']]]] patch_schedules: A list of `patch_schedule` blocks as defined below - only available for Premium SKU's.
:param pulumi.Input[str] private_static_ip_address: The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['CacheRedisConfigurationArgs']] redis_configuration: A `redis_configuration` as defined below - with some limitations by SKU - defaults/details are shown below.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Redis instance.
:param pulumi.Input[int] shard_count: *Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster.
:param pulumi.Input[str] sku_name: The SKU of Redis to use. Possible values are `Basic`, `Standard` and `Premium`.
:param pulumi.Input[str] subnet_id: *Only available when using the Premium SKU* The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] zones: A list of a single item of the Availability Zone which the Redis Cache should be allocated in.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if capacity is None:
raise TypeError("Missing required property 'capacity'")
__props__['capacity'] = capacity
__props__['enable_non_ssl_port'] = enable_non_ssl_port
if family is None:
raise TypeError("Missing required property 'family'")
__props__['family'] = family
__props__['location'] = location
__props__['minimum_tls_version'] = minimum_tls_version
__props__['name'] = name
__props__['patch_schedules'] = patch_schedules
__props__['private_static_ip_address'] = private_static_ip_address
__props__['redis_configuration'] = redis_configuration
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['shard_count'] = shard_count
if sku_name is None:
raise TypeError("Missing required property 'sku_name'")
__props__['sku_name'] = sku_name
__props__['subnet_id'] = subnet_id
__props__['tags'] = tags
__props__['zones'] = zones
__props__['hostname'] = None
__props__['port'] = None
__props__['primary_access_key'] = None
__props__['primary_connection_string'] = None
__props__['secondary_access_key'] = None
__props__['secondary_connection_string'] = None
__props__['ssl_port'] = None
super(Cache, __self__).__init__(
'azure:redis/cache:Cache',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
capacity: Optional[pulumi.Input[int]] = None,
enable_non_ssl_port: Optional[pulumi.Input[bool]] = None,
family: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
minimum_tls_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
patch_schedules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CachePatchScheduleArgs']]]]] = None,
port: Optional[pulumi.Input[int]] = None,
primary_access_key: Optional[pulumi.Input[str]] = None,
primary_connection_string: Optional[pulumi.Input[str]] = None,
private_static_ip_address: Optional[pulumi.Input[str]] = None,
redis_configuration: Optional[pulumi.Input[pulumi.InputType['CacheRedisConfigurationArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
secondary_access_key: Optional[pulumi.Input[str]] = None,
secondary_connection_string: Optional[pulumi.Input[str]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
ssl_port: Optional[pulumi.Input[int]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[str]] = None) -> 'Cache':
"""
Get an existing Cache resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] capacity: The size of the Redis cache to deploy. Valid values for a SKU `family` of C (Basic/Standard) are `0, 1, 2, 3, 4, 5, 6`, and for P (Premium) `family` are `1, 2, 3, 4`.
:param pulumi.Input[bool] enable_non_ssl_port: Enable the non-SSL port (6379) - disabled by default.
:param pulumi.Input[str] family: The SKU family/pricing group to use. Valid values are `C` (for Basic/Standard SKU family) and `P` (for `Premium`)
:param pulumi.Input[str] hostname: The Hostname of the Redis Instance
:param pulumi.Input[str] location: The location of the resource group.
:param pulumi.Input[str] minimum_tls_version: The minimum TLS version. Defaults to `1.0`.
:param pulumi.Input[str] name: The name of the Redis instance. Changing this forces a
new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['CachePatchScheduleArgs']]]] patch_schedules: A list of `patch_schedule` blocks as defined below - only available for Premium SKU's.
:param pulumi.Input[int] port: The non-SSL Port of the Redis Instance
:param pulumi.Input[str] primary_access_key: The Primary Access Key for the Redis Instance
:param pulumi.Input[str] primary_connection_string: The primary connection string of the Redis Instance.
:param pulumi.Input[str] private_static_ip_address: The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['CacheRedisConfigurationArgs']] redis_configuration: A `redis_configuration` as defined below - with some limitations by SKU - defaults/details are shown below.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the Redis instance.
:param pulumi.Input[str] secondary_access_key: The Secondary Access Key for the Redis Instance
:param pulumi.Input[str] secondary_connection_string: The secondary connection string of the Redis Instance.
:param pulumi.Input[int] shard_count: *Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster.
:param pulumi.Input[str] sku_name: The SKU of Redis to use. Possible values are `Basic`, `Standard` and `Premium`.
:param pulumi.Input[int] ssl_port: The SSL Port of the Redis Instance
:param pulumi.Input[str] subnet_id: *Only available when using the Premium SKU* The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] zones: A list of a single item of the Availability Zone which the Redis Cache should be allocated in.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["capacity"] = capacity
__props__["enable_non_ssl_port"] = enable_non_ssl_port
__props__["family"] = family
__props__["hostname"] = hostname
__props__["location"] = location
__props__["minimum_tls_version"] = minimum_tls_version
__props__["name"] = name
__props__["patch_schedules"] = patch_schedules
__props__["port"] = port
__props__["primary_access_key"] = primary_access_key
__props__["primary_connection_string"] = primary_connection_string
__props__["private_static_ip_address"] = private_static_ip_address
__props__["redis_configuration"] = redis_configuration
__props__["resource_group_name"] = resource_group_name
__props__["secondary_access_key"] = secondary_access_key
__props__["secondary_connection_string"] = secondary_connection_string
__props__["shard_count"] = shard_count
__props__["sku_name"] = sku_name
__props__["ssl_port"] = ssl_port
__props__["subnet_id"] = subnet_id
__props__["tags"] = tags
__props__["zones"] = zones
return Cache(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def capacity(self) -> pulumi.Output[int]:
"""
The size of the Redis cache to deploy. Valid values for a SKU `family` of C (Basic/Standard) are `0, 1, 2, 3, 4, 5, 6`, and for P (Premium) `family` are `1, 2, 3, 4`.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter(name="enableNonSslPort")
def enable_non_ssl_port(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the non-SSL port (6379) - disabled by default.
"""
return pulumi.get(self, "enable_non_ssl_port")
@property
@pulumi.getter
def family(self) -> pulumi.Output[str]:
"""
The SKU family/pricing group to use. Valid values are `C` (for Basic/Standard SKU family) and `P` (for `Premium`)
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
The Hostname of the Redis Instance
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The location of the resource group.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="minimumTlsVersion")
def minimum_tls_version(self) -> pulumi.Output[Optional[str]]:
"""
The minimum TLS version. Defaults to `1.0`.
"""
return pulumi.get(self, "minimum_tls_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Redis instance. Changing this forces a
new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="patchSchedules")
def patch_schedules(self) -> pulumi.Output[Optional[Sequence['outputs.CachePatchSchedule']]]:
"""
A list of `patch_schedule` blocks as defined below - only available for Premium SKU's.
"""
return pulumi.get(self, "patch_schedules")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
The non-SSL Port of the Redis Instance
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> pulumi.Output[str]:
"""
The Primary Access Key for the Redis Instance
"""
return pulumi.get(self, "primary_access_key")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> pulumi.Output[str]:
"""
The primary connection string of the Redis Instance.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="privateStaticIpAddress")
def private_static_ip_address(self) -> pulumi.Output[str]:
"""
The Static IP Address to assign to the Redis Cache when hosted inside the Virtual Network. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "private_static_ip_address")
@property
@pulumi.getter(name="redisConfiguration")
def redis_configuration(self) -> pulumi.Output['outputs.CacheRedisConfiguration']:
"""
A `redis_configuration` as defined below - with some limitations by SKU - defaults/details are shown below.
"""
return pulumi.get(self, "redis_configuration")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to
create the Redis instance.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> pulumi.Output[str]:
"""
The Secondary Access Key for the Redis Instance
"""
return pulumi.get(self, "secondary_access_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> pulumi.Output[str]:
"""
The secondary connection string of the Redis Instance.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> pulumi.Output[Optional[int]]:
"""
*Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster.
"""
return pulumi.get(self, "shard_count")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> pulumi.Output[str]:
"""
The SKU of Redis to use. Possible values are `Basic`, `Standard` and `Premium`.
"""
return pulumi.get(self, "sku_name")
@property
@pulumi.getter(name="sslPort")
def ssl_port(self) -> pulumi.Output[int]:
"""
The SSL Port of the Redis Instance
"""
return pulumi.get(self, "ssl_port")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[Optional[str]]:
"""
*Only available when using the Premium SKU* The ID of the Subnet within which the Redis Cache should be deployed. This Subnet must only contain Azure Cache for Redis instances without any other type of resources. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[str]]:
"""
A list of a single item of the Availability Zone which the Redis Cache should be allocated in.
"""
return pulumi.get(self, "zones")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.257813 | 1 |
src/passari/museumplus/fields.py | finnish-heritage-agency/passari | 1 | 12770930 | import datetime
import json
from lxml.etree import Element, fromstring, tostring
from passari.config import CONFIG, MUSEUMPLUS_URL
from passari.museumplus.settings import ZETCOM_NS
from passari.utils import retrieve_xml
async def get_object_field(session, object_id: int, name: str):
"""
Get the value of a single Object field
:param session: aiohttp.Session instance
:param object_id: ID of the Object to retrieve
:param name: Field to retrieve
:returns: Value of the field as string if it exists, None otherwise
"""
# Retrieving the entire document seems to be the only option
xml = await retrieve_xml(
session, f"{MUSEUMPLUS_URL}/module/Object/{object_id}"
)
try:
return xml.find(
f"{{{ZETCOM_NS}}}modules//"
f"{{{ZETCOM_NS}}}moduleItem[@id='{object_id}']"
f"{{{ZETCOM_NS}}}*[@name='{name}']"
f"{{{ZETCOM_NS}}}value"
).text
except AttributeError:
return None
UPDATE_FIELD_TEMPLATE = """
<?xml version="1.0" encoding="UTF-8"?>
<application xmlns="http://www.zetcom.com/ria/ws/module">
<modules>
<module name="Object">
<moduleItem id="{object_id}">
</moduleItem>
</module>
</modules>
</application>
"""[1:]
async def set_object_field(
session, object_id: int, name: str, field_type: str,
value: str):
"""
Set the value of a single Object field. Value will be created if the
field does not exist already.
:param session: aiohttp.Session instance
:param object_id: ID of the Object to retrieve
:param name: Field name to retrieve
:param field_type: Field type (eg. "dataField")
:param value: Value to set
"""
root = fromstring(
UPDATE_FIELD_TEMPLATE.format(object_id=object_id).encode("utf-8")
)
module_elem = root.find(
f"{{{ZETCOM_NS}}}modules//{{{ZETCOM_NS}}}moduleItem"
)
field_elem = Element(field_type)
field_elem.attrib["name"] = name
value_elem = Element("value")
value_elem.text = value
field_elem.append(value_elem)
module_elem.append(field_elem)
data = tostring(root, encoding="utf-8", xml_declaration=True)
response = await session.put(
f"{MUSEUMPLUS_URL}/module/Object/{object_id}/{name}",
headers={"Content-Type": "application/xml"},
data=data
)
response.raise_for_status()
return True
async def add_preservation_event(museum_package, status):
"""
Add a preservation event to the MuseumPlus service
"""
event = {
"filename": museum_package.sip_filename,
"status": status,
"object_modify_date": museum_package.museum_object.modified_date.isoformat(),
"date": datetime.datetime.now(datetime.timezone.utc).isoformat()
}
field_name = CONFIG["museumplus"]["object_preservation_field_name"]
field_type = CONFIG["museumplus"]["object_preservation_field_type"]
# Get the current events
events = await get_object_field(
session=museum_package.session,
object_id=museum_package.museum_object.object_id,
name=field_name
)
if not events:
events = "[]"
try:
events = json.loads(events)
except json.decoder.JSONDecodeError as exc:
raise ValueError(
"Could not decode MuseumPlus preservation log entries. The "
"preservation field's content might be corrupted."
) from exc
events.append(event)
# Update the preservation events
await set_object_field(
session=museum_package.session,
object_id=museum_package.museum_object.object_id,
name=field_name,
field_type=field_type,
value=json.dumps(events)
)
| 2.21875 | 2 |
Extract&Translate.py | CNLarrylai/MB2-ML-trasnaltion | 0 | 12770931 | import re
from bs4 import *
import requests
import random
import json
from hashlib import md5
# 设置翻译API的账号和密码 BAIDU Setup your APIid and Appkey acquired from baidu API
appid = ''
appkey = ''
# 设置从A语音翻译到B语言,其他语言码查看 If you need more language code refer to:`https://api.fanyi.baidu.com/doc/21`
from_lang = 'en'
to_lang = 'zh'
#组合翻译的地址 sent your translation to translate to here
endpoint = 'http://api.fanyi.baidu.com'
path = '/api/trans/vip/translate'
url = endpoint + path
#example query
query = 'Hello World! This is 1st paragraph.\nThis is 2nd paragraph.'
# Generate salt and sign 组装校验码
def make_md5(s, encoding='utf-8'):
return md5(s.encode(encoding)).hexdigest()
#前面都是调用翻译API的部分 Everything about machine translation is above
#下面是提取XML的部分 Everything about extracting texts from Xmls is underneath
#从xml文本里面提取需要翻译的key extract key from XML
with open("totsk_troops.xml", "r", encoding='utf-8') as f:
origin_content = f.read()
strings_src = re.findall(r'"(.*?)"', origin_content)
strings = list()
for string in strings_src:
if '=' in string:
strings.append(string)
#从打开模板XML open Template XML
with open("template.xml", "r", encoding='utf-8') as f:
template = BeautifulSoup(f.read(), 'xml')
common = template.find('strings')
#获取提取前的string id Extract String ID before extraction
def get_id(_string):
index_right = _string.index('}')
index_left = _string.index('=')
temp = _string[index_left + 1:index_right].replace(" ","_")
return temp.replace("'","_")
#用text生成id并且替换 Generate new id based on old text
def replace_id(_string):
index_left = _string.index('}')
temp= _string[index_left + 1:].replace(" ","_")
for r in ((" ", "_"), ("'", "_"),(":", "_"),(")", "_"),("(", "_")):
temp = temp.replace(*r)
return temp
def get_string(_string):
index_left = _string.index('}')
return _string[index_left + 1:]
def translate(_string):
print(_string)
query = _string
salt = random.randint(32768, 65536)
sign = make_md5(appid + query + str(salt) + appkey)
# Build request
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'appid': appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
# Send request
r = requests.post(url, params=payload, headers=headers)
result = r.json()
translate_result = result["trans_result"][0]["dst"]
print(translate_result)
return translate_result
# Show response
#print(json.dumps(result, indent=4, ensure_ascii=False))
for string in strings:
# enable this line to make new id from name
#tag = template.new_tag(name='string', attrs={'id': replace_id(string).lower(), 'text': get_string(string)})
original = get_string(string)
translation = translate(original)
# enable this line to read original String IDs
tag = template.new_tag(name='string', attrs={'id': get_id(string), 'text': translation})
common.append(tag)
with open("totsk_troops_CNs.xml", "w+", encoding='utf-8') as f:
f.write(template.prettify())
| 2.6875 | 3 |
threading/2_Manual_threading.py | BenedictusAryo/python_threading | 1 | 12770932 | # 2 Using Manual threading in python
# Import Threading & Time
import threading
import time
# Start counting
start = time.perf_counter()
# Create simple function that sleep in 1 second
def do_something():
print('Sleeping 1 second..')
time.sleep(1)
print('Done Sleeping..')
# Create threading, start and join
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something)
t1.start()
t2.start()
t1.join()
t2.join()
# Finish counting and show script runtime
finish = time.perf_counter()
print(f"Finished in {round(finish-start,2)} second(s)")
| 3.59375 | 4 |
youwol_utils/utils.py | youwol/py-youwol | 0 | 12770933 | import asyncio
import base64
import itertools
import json
import os
from enum import Enum
from pydantic import BaseModel
from pathlib import Path, PosixPath
from typing import Union, List, cast, Mapping, Callable, Iterable, Any
import aiohttp
from fastapi import HTTPException
from starlette.requests import Request
from youwol_utils import JSON
from youwol_utils.clients.types import DocDb
from youwol_utils.clients.utils import raise_exception_from_response, to_group_id, to_group_scope
flatten = itertools.chain.from_iterable
def find_platform_path():
return Path(__file__.split('/services')[0])
def user_info(request: Request):
return request.state.user_info
def get_user_id(request: Request):
return user_info(request)['sub']
def private_group_id(user):
return f"private_{user['sub']}"
def is_authorized_write(request: Request, group_id):
user = user_info(request)
group_ids = get_user_group_ids(user)
if group_id not in group_ids:
return False
permissions = {
'/youwol-users': ['<EMAIL>']
}
scope = to_group_scope(group_id)
if scope in permissions:
return user['preferred_username'] in permissions[scope]
return True
def get_all_individual_groups(groups: List[str]) -> List[Union[str, None]]:
def get_combinations(elements: List[str]):
result = []
for i in range(1, len(elements)):
result.append('/'.join(elements[0:i]))
return result
parts = [group.split('/') for group in groups if group]
parts_flat = flatten([get_combinations(part) for part in parts])
parts_flat = [e for e in parts_flat if e] + cast(any, [None])
return list(set(groups + parts_flat))
def get_user_group_ids(user) -> List[Union[str, None]]:
group_ids = [to_group_id(g) for g in get_all_individual_groups(user["memberof"]) if g is not None]
return [private_group_id(user)] + group_ids
def get_leaf_group_ids(user) -> List[Union[str, None]]:
group_ids = [to_group_id(g) for g in user["memberof"] if g is not None]
return [private_group_id(user)] + group_ids
def ensure_group_permission(request: Request, group_id: str):
user = user_info(request)
allowed_groups = get_user_group_ids(user)
if group_id not in allowed_groups:
raise HTTPException(status_code=401, detail=f"User can not get/post resource")
def full_local_fake_user(request):
user_name = request.headers.get('user-name', "<EMAIL>")
if user_name == "public":
return {
"sub": to_group_id(user_name), "email_verified": True, "name": "public account",
"preferred_username": "public account", "email": "<EMAIL>",
"memberof": [
"/youwol-users"
],
}
if user_name == "test":
return {
"sub": to_group_id(user_name), "email_verified": True, "name": "test account",
"preferred_username": "test account", "email": "<EMAIL>",
"memberof": ["/youwol-users/postman-tester/subchildtest1",
"/youwol-users/postman-tester/subchildtest2",
"/youwol-users/youwol-devs",
],
}
return {
"sub": "82bcba26-65d7-4072-afc4-a28bb58611c4",
"email_verified": True,
"name": "test account",
"preferred_username": user_name,
"memberof": [
"/youwol-users/postman-tester/subchildtest1",
"/youwol-users/postman-tester/subchildtest2",
"/youwol-users/youwol-devs",
"/youwol-users/arche"
],
"email": user_name,
}
async def get_access_token(client_id: str, client_secret: str, client_scope: str, openid_host: str):
body = {
"client_id": client_id,
"grant_type": "client_credentials",
"client_secret": client_secret,
"scope": client_scope
}
url = f"https://{openid_host}/auth/realms/youwol/protocol/openid-connect/token"
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with await session.post(url, data=body, headers=headers) as resp:
if resp.status == 200:
return await resp.json()
await raise_exception_from_response(resp)
async def get_headers_auth_admin_from_env():
client_id = os.getenv("AUTH_CLIENT_ID")
client_secret = os.getenv("AUTH_CLIENT_SECRET")
client_scope = os.getenv("AUTH_CLIENT_SCOPE")
openid_host = os.getenv("AUTH_HOST")
resp = await get_access_token(client_id=client_id, client_secret=client_secret, client_scope=client_scope,
openid_host=openid_host)
access_token = resp['access_token']
return {"Authorization": f"Bearer {access_token}"}
async def get_headers_auth_admin_from_secrets_file(file_path: Path, url_cluster: str, openid_host: str):
secret = json.loads(file_path.read_text())[url_cluster]
resp = await get_access_token(secret["clientId"], secret["clientSecret"], secret["scope"], openid_host=openid_host)
access_token = resp['access_token']
return {"Authorization": f"Bearer {access_token}"}
def generate_headers_downstream(incoming_headers):
headers = {}
if "Authorization" in incoming_headers:
headers["Authorization"] = incoming_headers.get("Authorization")
if "user-name" in incoming_headers:
headers["user-name"] = incoming_headers.get("user-name")
return headers
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
async def get_group(primary_key: str, primary_value: Union[str, float, int, bool], groups: List[str], doc_db: DocDb,
headers: Mapping[str, str]):
requests = [doc_db.query(query_body=f"{primary_key}={primary_value}#1", owner=group, headers=headers)
for group in groups]
responses = await asyncio.gather(*requests)
group = next((g for i, g in enumerate(groups) if responses[i]["documents"]), None)
return group
def check_permission_or_raise(target_group: Union[str, None], allowed_groups: List[Union[None, str]]):
if not target_group:
return
compatible_groups = [g for g in allowed_groups if target_group in g]
if len(compatible_groups) == 0:
raise HTTPException(status_code=401,
detail=f"scope '{target_group}' not included in user groups")
def get_content_type(file_name: str):
extensions = file_name.split('.')[1:]
if "json" in extensions:
return "application/json"
if "yaml" in extensions:
return "application/yaml"
if "js" in extensions:
return "application/javascript;charset=UTF-8"
if "css" in extensions:
return "text/css"
if "woff2" in extensions:
return "font/woff2"
if 'svg' in extensions:
return "image/svg+xml"
if 'png' in extensions:
return "image/png"
if 'txt' in extensions:
return 'text/plain'
return "application/octet-stream"
def get_content_encoding(file_name: str):
extension = file_name.split('.')[-1]
if extension == "br":
return "br"
if extension == "gzip":
return "gzip"
return ""
async def retrieve_user_info(auth_token: str, openid_host: str):
headers = {"authorization": f"Bearer {auth_token}"}
url = f"https://{openid_host}/auth/realms/youwol/protocol/openid-connect/userinfo"
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with await session.post(url=url, headers=headers) as resp:
if resp.status != 200:
raise HTTPException(status_code=resp.status, detail=await resp.read())
resp = await resp.json()
return resp
async def get_myself_auth_token(secret_path: Path, openid_host):
secret = json.loads(open(str(secret_path)).read())
form = aiohttp.FormData()
form.add_field("username", secret["myself"]["username"])
form.add_field("password", secret["<PASSWORD>"]["password"])
form.add_field("client_id", secret["dev.platform.youwol.com"]["clientId"])
form.add_field("grant_type", "password")
form.add_field("client_secret", secret["dev.platform.youwol.com"]["clientSecret"])
form.add_field("scope", "email profile youwol_dev")
url = f"https://{openid_host}/auth/realms/youwol/protocol/openid-connect/token"
async with aiohttp.ClientSession() as session:
async with await session.post(url=url, data=form) as resp:
resp = await resp.json()
return resp['access_token']
def exception_message(error: Exception):
if isinstance(error, HTTPException):
return error.detail
return str(error)
def decode_id(asset_id) -> str:
b = str.encode(asset_id)
return base64.urlsafe_b64decode(b).decode()
def encode_id(raw_id) -> str:
b = str.encode(raw_id)
return base64.urlsafe_b64encode(b).decode()
def to_json(obj: BaseModel) -> JSON:
def to_serializable(v):
if isinstance(v, Path):
return str(v)
if isinstance(v, PosixPath):
return str(v)
if isinstance(v, Callable):
return "function"
if isinstance(v, Enum):
return v.name
if isinstance(v, Iterable) and not isinstance(v, list) and not isinstance(v, str):
v = list(v)
return v
base = obj.dict()
def to_json_rec(_obj: Any):
if isinstance(_obj, dict):
for k, v in _obj.items():
if not isinstance(v, dict) and not isinstance(v, list):
_obj[k] = to_serializable(v)
if isinstance(v, dict):
to_json_rec(v)
if isinstance(v, list):
for i, e in enumerate(v):
if not isinstance(e, dict) and not isinstance(e, list):
_obj[k][i] = to_serializable(e)
else:
to_json_rec(e)
to_json_rec(base)
return base
| 2.078125 | 2 |
gpw_opcje_db.py | memfagor/gpw_opcje_db | 0 | 12770934 | #!/usr/bin/env python3
import logging
import sqlite3
import os
from json import load
from urllib.request import urlopen
from bs4 import BeautifulSoup
from re import compile
from datetime import date, datetime
def GetScriptPath():
return '/'.join(os.path.abspath(__file__).split('/')[:-1])
def GetConfig(path, fname):
with open(os.path.join(path, fname), 'r') as config_file:
config = load(config_file)
return config
def GetOptions(web_page, config):
def InputToFloat(inpt):
inpt = inpt.replace(' ', '')
inpt = inpt.replace(u'\xa0', u'')
inpt = inpt.replace('%', '')
inpt = inpt.replace(',', '.')
return float(inpt)
def GetOptionType(opt_name, config):
if opt_name[4] in config['opcje_call']:
opt_type = 'call'
elif opt_name[4] in config['opcje_put']:
opt_type = 'put'
else:
opt_type = 'undefined'
return opt_type
def GetTD(obj, pattern):
return obj.find('td', {'class': compile(pattern)})
def GetTDContent(obj, pattern):
return GetTD(obj, pattern).contents[0].strip()
def ISODate(date_str, format_str):
return datetime.strptime(date_str, format_str).isoformat()
opcje = {}
for walor in BeautifulSoup(web_page.read(), 'lxml').findAll('tr'):
pole_nazwa = walor.find('td', {'class': compile('colWalor*')})
if pole_nazwa is None:
continue
nazwa = pole_nazwa.contents[0].strip().upper()
opcje[nazwa] = {}
opcje[nazwa]['type'] = GetOptionType(nazwa, config)
opcje[nazwa]['wigp'] = float(nazwa[-4:])
opcje[nazwa]['exchange'] = InputToFloat(GetTDContent(walor, 'colKurs*'))
opcje[nazwa]['change'] = InputToFloat(GetTDContent(walor, 'colZmiana*'))
opcje[nazwa]['pchange'] = InputToFloat(GetTDContent(walor, 'colZmianaProcentowa*'))
opcje[nazwa]['open'] = InputToFloat(GetTDContent(walor, 'colOtwarcie*'))
opcje[nazwa]['max'] = InputToFloat(GetTDContent(walor, 'calMaxi*'))
opcje[nazwa]['min'] = InputToFloat(GetTDContent(walor, 'calMini*'))
date_str = str(date.today().year) + '.' + GetTDContent(walor, 'colAktualizacja*')
opcje[nazwa]['date'] = ISODate(date_str, "%Y.%d.%m %H:%M")
date_str = GetTDContent(walor, 'colTermin*')
opcje[nazwa]['term'] = ISODate(date_str, "%Y-%m-%d")
opcje[nazwa]['tstamp'] = datetime.now().isoformat()
return opcje
def main():
path = GetScriptPath()
cfg = GetConfig(path, 'gpw_opcje_db.conf')
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s',
filename=os.path.join(path, cfg['logfile']),
level=logging.DEBUG)
if not os.path.isdir(os.path.join(path, 'database')):
logging.warning('Folder "database" does not exist - creating one.')
os.makedirs(os.path.join(path, 'database'), mode=0o755)
try:
strona = urlopen(cfg['adres_opcje'])
except Exception as e:
logging.error(e)
raise SystemExit()
try:
db = sqlite3.connect(os.path.join(path, 'database', cfg['database']))
except Exception as e:
logging.error(e)
raise SystemExit()
cur = db.cursor()
for key, value in GetOptions(strona, cfg).items():
cols = value.keys()
vals = value.values()
query = "CREATE TABLE IF NOT EXISTS {0} (type TEXT, wigp REAL," \
" exchange REAL, change REAL, pchange REAL, open REAL," \
"max REAL, min REAL, date TEXT, term TEXT," \
"tstamp TEXT PRIMARY KEY)".format(key)
cur.execute(query)
query = "INSERT INTO {0} ({1}) VALUES ({2})".format(
key,', '.join(cols),', '.join(['?'] * len(value)))
cur.execute(query, list(vals))
db.commit()
db.close()
if __name__ == "__main__":
main()
| 2.546875 | 3 |
sync_my_tasks/command.py | wilrnh/sync-my-tasks | 2 | 12770935 | """sync-my-tasks.
Usage:
sync-my-tasks (--from-asana --asana-workspace=<name> [--asana-token-file PATH]) (--to-mstodo)
sync-my-tasks (-h | --help)
sync-my-tasks --version
Options:
-h --help Show this screen.
--version Show version.
--from-asana Pull tasks from Asana.
--asana-workspace=<name> Name of workspace
--asana-token-file PATH Path to file containing the Asana Personal token. [default: ./asana-token]
--to-mstodo Push tasks to Microsoft To-Do.
"""
from docopt import docopt
from sync_my_tasks.provider_asana import AsanaProvider
from sync_my_tasks.provider_mstodo import MsTodoProvider
def main():
arguments = docopt(__doc__, version='sync-my-tasks 0.1.0')
# Set up Asana as an export provider
if arguments['--from-asana']:
with open(arguments['--asana-token-file']) as asana_token_file:
asana_token = asana_token_file.read()
export_provider = AsanaProvider(asana_token, arguments['--asana-workspace'])
# Set up Microsoft To-Do as an import provider
if arguments['--to-mstodo']:
import_provider = MsTodoProvider()
# Export tasks to memory
task_lists = export_provider.export_tasks()
# Import tasks from memory
import_provider.import_tasks(task_lists)
if __name__ == '__main__':
main() | 2.359375 | 2 |
projects/anosql/src/python/anosql/adapters/sqlite3.py | arrdem/source | 4 | 12770936 | <gh_stars>1-10
"""
A driver object implementing support for SQLite3
"""
from contextlib import contextmanager
import logging
import re
import sqlite3
log = logging.getLogger(__name__)
class SQLite3DriverAdapter(object):
@staticmethod
def process_sql(_query_name, _op_type, sql):
"""Munge queries.
Args:
_query_name (str): The name of the sql query.
_op_type (anosql.SQLOperationType): The type of SQL operation performed by the sql.
sql (str): The sql as written before processing.
Returns:
str: A normalized form of the query suitable to logging or copy/paste.
"""
# Normalize out comments
sql = re.sub(r"-{2,}.*?\n", "", sql)
# Normalize out a variety of syntactically irrelevant whitespace
#
# FIXME: This is technically invalid, because what if you had `foo ` as
# a table name. Shit idea, but this won't handle it correctly.
sql = re.sub(r"\s+", " ", sql)
sql = re.sub(r"\(\s+", "(", sql)
sql = re.sub(r"\s+\)", ")", sql)
sql = re.sub(r"\s+,", ",", sql)
sql = re.sub(r"\s+;", ";", sql)
return sql
@staticmethod
def select(conn, _query_name, sql, parameters):
cur = conn.cursor()
log.debug({"sql": sql, "parameters": parameters})
cur.execute(sql, parameters)
results = cur.fetchall()
cur.close()
return results
@staticmethod
@contextmanager
def select_cursor(conn: sqlite3.Connection, _query_name, sql, parameters):
cur = conn.cursor()
log.debug({"sql": sql, "parameters": parameters})
cur.execute(sql, parameters)
try:
yield cur
finally:
cur.close()
@staticmethod
def insert_update_delete(conn: sqlite3.Connection, _query_name, sql, parameters):
log.debug({"sql": sql, "parameters": parameters})
conn.execute(sql, parameters)
@staticmethod
def insert_update_delete_many(
conn: sqlite3.Connection, _query_name, sql, parameters
):
log.debug({"sql": sql, "parameters": parameters})
conn.executemany(sql, parameters)
@staticmethod
def insert_returning(conn: sqlite3.Connection, _query_name, sql, parameters):
cur = conn.cursor()
log.debug({"sql": sql, "parameters": parameters})
cur.execute(sql, parameters)
if "returning" not in sql.lower():
# Original behavior - return the last row ID
results = cur.lastrowid
else:
# New behavior - honor a `RETURNING` clause
results = cur.fetchall()
log.debug({"results": results})
cur.close()
return results
@staticmethod
def execute_script(conn: sqlite3.Connection, sql):
log.debug({"sql": sql, "parameters": None})
conn.executescript(sql)
| 2.9375 | 3 |
semseg/models/decoder.py | hzhupku/SemiSeg-AEL | 45 | 12770937 | import torch
import torch.nn as nn
from torch.nn import functional as F
from .base import get_syncbn
from .base import ASPP
class dec_deeplabv3(nn.Module):
def __init__(self, in_planes, num_classes=19, inner_planes=256, sync_bn=False, dilations=(12, 24, 36)):
super(dec_deeplabv3, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aspp = ASPP(in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations)
self.head = nn.Sequential(
nn.Conv2d(self.aspp.get_outplanes(), 256, kernel_size=3, padding=1, dilation=1, bias=False),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
#print('debug1',x.shape)
aspp_out = self.aspp(x)
res = self.head(aspp_out)
return res
class dec_deeplabv3_plus(nn.Module):
def __init__(self, in_planes, num_classes=19, inner_planes=256, sync_bn=False, dilations=(12, 24, 36)):
super(dec_deeplabv3_plus, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aspp = ASPP(in_planes, inner_planes=inner_planes, sync_bn=sync_bn, dilations=dilations)
self.head = nn.Sequential(
nn.Conv2d(self.aspp.get_outplanes(), 256, kernel_size=3, padding=1, dilation=1, bias=False),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1))
self.final = nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.tail = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),
norm_layer(256),nn.ReLU(inplace=True),nn.Dropout2d(0.1))
self.low_conv = nn.Sequential(nn.Conv2d(256, 256, kernel_size=1),
norm_layer(256),
nn.ReLU(inplace=True))
def forward(self, x):
x1, x2, x3, x4 = x
aspp_out = self.aspp(x4)
low_feat = self.low_conv(x1)
aspp_out = self.head(aspp_out)
h,w = low_feat.size()[-2:]
aspp_out = F.interpolate(aspp_out,size=(h,w),mode='bilinear',align_corners=True)
aspp_out = torch.cat((low_feat,aspp_out),dim=1)
aspp_out = self.tail(aspp_out)
res = self.final(aspp_out)
return res
class Aux_Module(nn.Module):
def __init__(self, in_planes, num_classes=19, sync_bn=False):
super(Aux_Module, self).__init__()
norm_layer = get_syncbn() if sync_bn else nn.BatchNorm2d
self.aux = nn.Sequential(
nn.Conv2d(in_planes, 256, kernel_size=3, stride=1, padding=1),
norm_layer(256),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
res = self.aux(x)
return res
| 2.296875 | 2 |
daisy/model/point/MFRecommender.py | paulagd/GCE | 1 | 12770938 | <gh_stars>1-10
import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from IPython import embed
from daisy.model.GCE.gce import GCE
class PointMF(nn.Module):
def __init__(self,
user_num,
max_dim,
factors=100,
optimizer='adam',
epochs=20,
lr=0.01,
reg_1=0.001,
reg_2=0.001,
loss_type='CL',
gpuid='0',
X = None,
A = None,
reindex=False,
GCE_flag=False,
dropout=0,
early_stop=True):
"""
Point-wise MF Recommender Class
Parameters
----------
user_num : int, the number of users
max_dim : int, the number of items or context max dimension
factors : int, the number of latent factor
epochs : int, number of training epochs
lr : float, learning rate
reg_1 : float, first-order regularization term
reg_2 : float, second-order regularization term
loss_type : str, loss function type
gpuid : str, GPU ID
early_stop : bool, whether to activate early stop mechanism
"""
super(PointMF, self).__init__()
os.environ['CUDA_VISIBLE_DEVICES'] = gpuid
cudnn.benchmark = True
self.lr = lr
self.reg_1 = reg_1
self.reg_2 = reg_2
self.epochs = epochs
self.optimizer = optimizer
self.dropout = dropout
self.reindex = reindex
self.GCE_flag = GCE_flag
if GCE_flag:
print('GCE EMBEDDINGS DEFINED')
self.embeddings = GCE(max_dim, factors, X, A) if reindex else ValueError(f'Can not use GCE with'
f'reindex=False')
else:
if reindex:
self.embeddings = nn.Embedding(max_dim, factors)
nn.init.normal_(self.embeddings.weight, std=0.01)
else:
self.embed_user = nn.Embedding(user_num, factors)
self.embed_item = nn.Embedding(max_dim, factors)
nn.init.normal_(self.embed_user.weight, std=0.01)
nn.init.normal_(self.embed_item.weight, std=0.01)
self.loss_type = loss_type
def forward(self, user, item, context):
if self.reindex:
# embed()
if context is None:
embeddings = self.embeddings(torch.stack((user, item), dim=1))
else:
embeddings = self.embeddings(torch.stack((user, item, context), dim=1))
nn.functional.dropout(embeddings, p=self.dropout, training=self.training, inplace=True)
# ix = torch.bmm(embeddings[:, :1, :], embeddings[:, 1:, :].permute(0, 2, 1))
pred = embeddings.prod(dim=1).sum(dim=1)
return pred
else:
embed_user = self.embed_user(user)
embed_item = self.embed_item(item)
pred = (embed_user * embed_item).sum(dim=-1)
return pred
def predict(self, u, i, c):
pred = self.forward(u, i, c).cpu()
return pred | 2.34375 | 2 |
bot/utils/gif_maker.py | christianbernasconi96/clothing_recommendation | 0 | 12770939 | import cv2
import numpy as np
from skimage.segmentation import slic
from skimage import color
from skimage.measure import regionprops
from PIL import Image, ImageDraw
import moviepy.editor as mp
import random
import os
class GifMaker():
def to_mosaic_gif(self, img_path, n_segments = 150, segments_per_frame = 3):
img = cv2.imread(img_path)
# generate superpixels
segments = slic(img, n_segments = n_segments, sigma = 5)
# generate image with superpixels avg color
superpixels_image = color.label2rgb(segments, img, kind='avg')
superpixels_image = cv2.normalize(superpixels_image, None, alpha = 0,
beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_8U)
mask = np.zeros(img.shape[:2], dtype = "uint8")
n_segments = len(np.unique(segments))
frames = []
for (i, segVal) in enumerate(np.unique(segments)):
# construct a mask for the segment
mask[segments == segVal] = 255
a = cv2.bitwise_and(superpixels_image, superpixels_image, mask = mask)
a = np.uint8(a)
a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)
if i % segments_per_frame == 0:
n_segments -= segments_per_frame
frames.append(Image.fromarray(a))
if n_segments > 0:
frames.append(Image.fromarray(a))
path_splitted = os.path.split(img_path)
filename_with_extension = path_splitted[1]
path = path_splitted[0]
filename = filename_with_extension.split('.')[0]
self.__save_gif(path, filename, frames)
self.__to_mp4(path, filename)
def __save_gif(self, path, filename, frames):
filename = filename + '.gif'
save_path = os.path.join(path, filename)
frames[0].save(save_path,
save_all=True, format='GIF', append_images=frames[1:],
optimize=True, quality=20, duration=1, loop=0)
def __to_mp4(self, path, filename):
read_path = os.path.join(path, filename + '.gif')
save_path = os.path.join(path, filename + '.mp4')
clip = mp.VideoFileClip(read_path)
clip.write_videofile(save_path)
if __name__ == "__main__":
img_path = './data/prova_gif.jpg'
g = GifMaker()
g.to_mosaic_gif(img_path)
| 2.515625 | 3 |
home/admin.py | technicaldwiti/Project-Certificate | 0 | 12770940 | from django.contrib import admin
from .models import Student
# Register your models here.
class StudentModelAdmin(admin.ModelAdmin):
list_display = ["__str__"]
class Meta:
model = Student
admin.site.register(Student,StudentModelAdmin) | 2 | 2 |
libs/core/cornflow_core/shared/__init__.py | ggsdc/corn | 2 | 12770941 | <reponame>ggsdc/corn
"""
Exposes the objects and functions of the module
"""
from .utils import db, bcrypt
from .validators import (
check_email_pattern,
check_password_pattern,
validate_and_continue,
)
| 1.46875 | 1 |
src/pyfma/__init__.py | Vutshi/pyfma | 0 | 12770942 | from _pyfma import fma
from .__about__ import __version__
__all__ = ["__version__", "fma"]
| 0.984375 | 1 |
music_genre_classifier/__main__.py | ryansingman/music-genre-classifier | 0 | 12770943 | from typing import List
from typing import Tuple
import numpy as np
import yaml
from music_genre_classifier import dataset
from music_genre_classifier import models
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(prog="Music Genre Classifier")
parser.add_argument("classifier_conf_path", help="path to yaml config for classifier")
parser.add_argument(
"--display_results",
help="if should display results of training",
action="store_true",
)
args = parser.parse_args()
with open(args.classifier_conf_path) as classifier_conf_file:
classifier_conf = yaml.load(classifier_conf_file, Loader=yaml.Loader)
# create dataset
full_ds: np.ndarray = dataset.create_gtzan_dataset(**classifier_conf["dataset"])
# create train, test, validation split
train_ds, test_ds = dataset.split_dataset(full_ds)
# create models from config
model_trainables: List[models.ModelTrainable] = [
models.build_from_config(model_conf, train_ds, test_ds)
for model_conf in classifier_conf["models"]
]
# find hyperparameters (TODO: parallelize this later)
for model in model_trainables:
model.tune()
# train models (TODO: parallelize this later)
for model in model_trainables:
model.train()
# evaluate models (TODO: parallelize this later)
results: List[Tuple[float, float]] = []
for model in model_trainables:
results.append(model.test())
# display results
if args.display_results:
for model, result in zip(model_trainables, results):
print(str(model), model._best_hyperparams.values, result) # type: ignore
| 2.65625 | 3 |
EstruturaDeRepeticao/exercicio33.py | Nicolas-Wursthorn/exercicios-python-brasil | 0 | 12770944 | <reponame>Nicolas-Wursthorn/exercicios-python-brasil<gh_stars>0
# Os números primos possuem várias aplicações dentro da Computação, por exemplo na Criptografia. Um número primo é aquele que é divisível apenas por um e por ele mesmo. Faça um programa que peça um número inteiro e determine se ele é ou não um número primo.
numero = int(input("Digite um número: "))
div = []
count = 0
for i in range(numero):
if numero % (i+1) == 0:
count += 1
div.append(i+1)
div = str(div).strip("[]")
if count == 2:
print("Esse número é primo divisível por", div)
else:
print("Esse número não é primo, divisível por", div) | 4.15625 | 4 |
gui.py | markry11/lyrics-finder | 0 | 12770945 | <filename>gui.py
import tkinter as tk
from readonly_text import ReadonlyText
_font_size = 12
_bg_color='#ffff64'
_fg_color='#000000'
_font = ("Segoe UI", _font_size)
class _Args:
title = ''
artist = ''
class Gui:
def __init__(self, height, width, title):
root = tk.Tk()
root.wm_attributes("-topmost", 1)
scrollbar = tk.Scrollbar(root)
self.text = ReadonlyText(
root,
height=height,
width=width,
bg=_bg_color,
fg=_fg_color,
state=tk.DISABLED,
font=_font,
padx = _font_size,
pady = _font_size)
root.resizable(False, False)
root.title(title)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.text.pack(side=tk.LEFT, fill=tk.Y)
scrollbar.config(command=self.text.yview)
self.text.config(yscrollcommand=scrollbar.set)
root.update_idletasks()
pad_x = 50
pad_y = 100
w = root.winfo_width()
h = root.winfo_height()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = screen_width - w - pad_x
y = screen_height - h - pad_y
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.root = root
def set_text(self, text):
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, 'reset')
self.text.insert(tk.END, text)
self.text.config(state=tk.DISABLED)
def set_message(self, message, query):
self.text.config(state=tk.NORMAL)
self.text.delete(1.0, 'reset'),
self.text.insert(tk.END, f'{message}\n', 'readonly')
self.text.insert(tk.END, query)
self.text.bind('<Return>', self.on_enter)
def on_enter(self, event):
event.widget.unbind('<Return>')
event.widget.config(state=tk.DISABLED)
readonly_index = event.widget.tag_ranges('readonly')
if readonly_index and len(readonly_index) > 0:
query = event.widget.get(readonly_index[-1], tk.END).strip()
args = _Args()
args.title = query
self._on_enter_callback(None, args)
def set_on_enter_callback(self, func):
self._on_enter_callback = func
def set_on_control_f_callback(self, func):
self.root.bind('<Control-f>', func)
def set_on_control_r_callback(self, func):
self.root.bind('<Control-r>', func)
def run(self):
self.root.mainloop() | 2.9375 | 3 |
nlp/2020-10-07/ex06.py | kogepanh/class-numeric | 0 | 12770946 | import MeCab
# text = "昨日の天気は晴れでした。"
text = input()
mecab = MeCab.Tagger()
parses = mecab.parse(text)
parse = parses.split('\n')
for par in parse:
p = par.split(',')
if p[0] == "EOS":
break
print(p[0], "\t", p[-3])
| 3.28125 | 3 |
src/game.py | shrick/shricktris | 0 | 12770947 | # game.py
import pygame
from field import GameField
from preview import Preview
from brick import Brick
from figure import generate_randomized_figures as FigureFactory
from control import Control
from score import Score
import colors
GAME_TITLE = "Shricktris"
START_FPS = 12
START_GAME_STEPOVER = 8
SCREEN_RESOLUTION = (600, 800)
GRID_COLUMNS = 16
GRID_ROWS = 30
class Game:
def __init__(self):
# init pygame components
pygame.init()
pygame.display.set_caption(GAME_TITLE)
self._screen = pygame.display.set_mode(SCREEN_RESOLUTION)
self._background = pygame.Surface(self._screen.get_size()).convert()
self._background.fill(colors.WHITE)
self._font = pygame.font.SysFont(None, 24)
self._set_message("Press PAUSE key to start!", colors.GREEN)
# init game components
rect_pixel_length = 20
self._field = GameField(self._screen, GRID_COLUMNS, GRID_ROWS, rect_pixel_length)
self._preview = Preview(self._screen, SCREEN_RESOLUTION[0] - 100, 20,
Brick.DIMENSION, int(rect_pixel_length / 2))
self._figure_factory = FigureFactory(self._field)
self._figure = next(self._figure_factory)
self._next_figure = next(self._figure_factory)
self._score = Score()
self._control = Control(START_FPS)
# init speed and game state
self._stepover = START_GAME_STEPOVER
self._nostep = self._stepover
self._looping = True
self._was_started = False
self._has_stopped = False
self._is_paused = True
def _set_message(self, text, color):
self._text_image = self._font.render(text, True, colors.GRAY if color is None else color)
def _display_score(self):
if self._has_stopped:
score_text = self._score.get_final_score()
self._set_message(score_text + " Game finished. Press Q to quit!", colors.RED)
else:
score_text = self._score.get_current_score()
self._set_message(score_text, colors.CYAN)
print(score_text)
def _adjust_speed(self, delta):
old_stepover = self._stepover
self._stepover = max(self._stepover + delta, 1)
if self._stepover != old_stepover:
print("[DEBUG] game_stepover = " + str(self._stepover))
def _check_states(self):
# game speed
if self._control.speed_up():
self._adjust_speed(-1)
if self._control.speed_down():
self._adjust_speed(+1)
# game state
if self._control.pause() and not self._has_stopped:
self._is_paused = not self._is_paused
if self._is_paused:
self._set_message("Press PAUSE key to continue.", colors.BLUE)
else:
self._was_started = True
self._set_message("Press PAUSE key to pause.", colors.BLUE)
if self._control.quit():
print("Quitting...")
self._looping = False
def _move_figure(self):
if self._control.step_left():
self._figure.step_left()
if self._control.step_right():
self._figure.step_right()
if self._control.step_down():
self._figure.step_down()
if self._control.fall_down():
self._figure.fall_down()
if self._control.rotate():
self._figure.rotate()
def _resolve_lines(self):
lines = self._field.resolve_lines()
if lines:
self._score.add_lines(lines)
self._display_score()
# increase game speed
self._stepover = max(self._stepover - 1, 1)
print("[DEBUG] game_stepover = " + str(self._stepover))
def _spawn_new_figure(self):
if self._figure.is_freezed():
self._figure = self._next_figure
if self._field.collides(self._figure):
self._has_stopped = True
self._display_score()
else:
self._next_figure = next(self._figure_factory)
print("Next figure: " + self._next_figure.get_name())
def _draw(self):
self._screen.blit(self._background, (0, 0))
self._field.draw_grid()
if self._was_started:
if not self._has_stopped:
self._field.draw_figure(self._figure)
self._preview.draw_figure(self._next_figure)
else:
# hack in some flickering
self._nostep = (self._nostep + 1) % 3
if not self._nostep:
self._field.draw_figure(self._figure, colors.GRAY)
if self._text_image is not None:
rect = self._text_image.get_rect()
rect.topleft = (20, 20)
self._screen.blit(self._text_image, rect)
pygame.display.update()
def loop(self):
while self._looping:
self._control.process_events()
self._check_states()
if not self._is_paused and not self._has_stopped:
self._move_figure()
self._nostep = (self._nostep + 1) % self._stepover
if not self._nostep:
# advance game
self._figure.step_down()
self._resolve_lines()
self._spawn_new_figure()
self._draw()
if not self._has_stopped:
print(self._score.get_final_score())
pygame.quit() | 2.703125 | 3 |
misc.py | MeIsNoob05/videoplayer | 0 | 12770948 | <gh_stars>0
from youtubesearchpython import VideosSearch
import pafy
#Pafy
def url_stream(url: str):
video = pafy.new(url)
videos = video.getbest().url
return videos
# Youtube
def youtube(query: str):
search = VideosSearch(query, limit = 1).result()
thumb = search["result"][0]["thumbnails"][0]["url"].split("?")[0]
link = search["result"][0]["link"]
title = search["result"][0]["title"]
video = url_stream(link)
return thumb, video, title
#User Input
async def user_input(input):
""" retrieve user input """
if ' ' in input or '\n' in input:
return str(input.split(maxsplit=1)[1].strip())
return ''
# Help
HELP = """** Here is a list of commands for Video Streaming Bot**
/vplay - To Stream a Video in Group ( Youtube Search, Youtube Link)
/vstop - To Stop a Video Stream
/vpause - To Pause a Video Stream
/vresume - To Resume Video Stream
/vskip - To Skip The Current Playing Video
/repo - To Get The Repo
/help , /start - To Get Welcome Menu and Commands (works in private)
/alive - To Check If The Bot Is Alive"""
| 3.40625 | 3 |
tools/package-routine.py | Hivemapper/magma | 0 | 12770949 | #!/usr/bin/env python3
""" package-routine.py - package up a single routine into a distributable package
See 'package-routine.web' for how to use multiple files together
Example:
$ ./tools/package-routine.py dgesv
$ ./tools/package-routine.py dgesv
$ ./tools/package-routine.py -h
"""
## imports (all std)
import argparse
import io
import os
import glob
import re
import shutil
import time
import errno
import tarfile
# for 'blas' endings
import magmasubs
# construct & parse given arguments
parser = argparse.ArgumentParser(description='Package a single MAGMA routine into a folder')
parser.add_argument('routine', help='Routine to package up (i.e. sgemm, dgemm, etc)')
parser.add_argument('-o', '--output', default=None, help='Destination tar archive (leave empty for a default)')
parser.add_argument('--interface', default='cuda', choices=['hip', 'cuda'], help='Which interface/backend to use?')
args = parser.parse_args()
if args.routine.startswith('magma'):
args.routine = args.routine
else:
#pass
args.routine = 'magma_' + args.routine
# generate output folder
if not args.output:
args.output = "magma_" + args.interface + '_' + args.routine.replace('magmablas_', '').replace('magma_', '') + '.tar.gz'
# regex for calling a MAGMA function
re_call = re.compile(r" *((?:magma(?:blas)?_|\w*_kernel)\w+)\s*(?:<<<.*>>>)?\s*\(")
# regex for a function definition, i.e. not just a declaration (must be multiline due to how many functions are declared)
#re_funcdef = re.compile(r"(?:extern \"C\"|static inline)? ?\n?(?:[\w\* ]+ *?)\n?(magma(?:blas)?_\w+)( \n)*\([\w\[\]\* ,\n]*\)\n? *\n?\{", re.MULTILINE)
re_funcdef = re.compile(r"(?:extern \"C\"|static inline)? ?\n?[\w\* ]+\s+(\w+)( \n)*\([\w\[\]\* ,\n]*\)\n? *\n?\{", re.MULTILINE)
# regex for a macro definition
re_macdef = re.compile(r"#define *(magma(?:blas)?_\w+)\(")
# regex for an include statement
re_include = re.compile(r"\#include (?:\"|\<)(magma[\w\.]+)(?:\"|\<)")
# SAG==Source Analysis Graph
class SAG:
def __init__(self, allfiles):
self.allfiles = {*allfiles}
# map between filename and tuples containing:
# (defs, calls)
self.map = {}
def __getitem__(self, k):
# check if not computed yet
if k not in self.map:
# Read source from file
fp = open(k, 'r')
src = fp.read()
fp.close()
return self.map[k]
print (f"""Packaging routine: {args.routine} and storing in: {args.output}""")
# escape sequence, so it can be in an fstring
_n = '\n'
# Package for a given interface
if args.interface == 'cuda':
pass
elif args.interface == 'hip':
pass
else:
raise Exception(f"Unknown interface requested: {args.interface}")
# -*- Regex Definitions -*-
# -*- Initialization -*-
# all files possible
allfiles =
sag = SAG({
*glob.glob("src/*.cpp"),
*glob.glob("control/*.cpp"),
*glob.glob("include/*.h"),
*glob.glob(f"interface_{args.interface}/*.cpp"),
*glob.glob(f"magmablas/*.cpp"),
*glob.glob(f"magmablas/*.cu"),
*glob.glob(f"magmablas/*.cuh"),
*glob.glob(f"magmablas/*.h"),
})
if args.interface == "cuda" else (glob.glob(f"magmablas_hip/*.cpp") + glob.glob(f"magmablas_hip/*.hpp") + glob.glob(f"magmablas_hip/*.h")))
)
#print (allfiles)
# set of all files
set_c = { 'control/pthread_barrier.h', 'control/affinity.h', 'control/trace.h', 'control/batched_kernel_param.h', 'include/magma_v2.h', f"interface_{args.interface}/error.h" }
# what functions are requested as part of the package (this may grow to other functions called recursively)
funcs_requested = set()
# list of defined functions (i.e. start with an empty set)
funcs_defined = set()
# functions that will emit a warning, due to some special case
funcs_warn = set()
# if these special cases are encountered, add it to warns
funcs_special_cases = {
}
# errored functions
funcs_err = set()
# functions to ignore
funcs_ignore = {
'magma_warn_leaks',
#'magma_dgetf2_native_fused', 'magma_dgetf2trsm_2d_native',
#'magma_zlaswp_rowparallel_native', 'magma_claswp_rowparallel_native', 'magma_dlaswp_rowparallel_native', 'magma_slaswp_rowparallel_native',
#'magma_zlaswp_columnserial', 'magma_claswp_columnserial', 'magma_dlaswp_columnserial', 'magma_slaswp_columnserial',
#'magma_zlaswp_rowserial_native', 'magma_claswp_rowserial_native', 'magma_dlaswp_rowserial_native', 'magma_slaswp_rowserial_native',
# TODO handle these?
#'magma_zgetmatrix_1D_col_bcyclic', 'magma_cgetmatrix_1D_col_bcyclic', 'magma_dgetmatrix_1D_col_bcyclic', 'magma_sgetmatrix_1D_col_bcyclic',
#'magma_zgetmatrix_1D_row_bcyclic', 'magma_cgetmatrix_1D_row_bcyclic', 'magma_dgetmatrix_1D_row_bcyclic', 'magma_sgetmatrix_1D_row_bcyclic',
#'magma_zsetmatrix_1D_col_bcyclic', 'magma_csetmatrix_1D_col_bcyclic', 'magma_dsetmatrix_1D_col_bcyclic', 'magma_ssetmatrix_1D_col_bcyclic',
#'magma_zsetmatrix_1D_row_bcyclic', 'magma_csetmatrix_1D_row_bcyclic', 'magma_dsetmatrix_1D_row_bcyclic', 'magma_ssetmatrix_1D_row_bcyclic',
}
# set of BLAS routines requested
blas_requested = set()
# -*- Utility Functions
# filter through files, and only return those that exist and have not yet been included
def newfiles(*fls):
for fl in fls:
if fl not in set_c and os.path.exists(fl):
yield fl
# Read entire file
def readall(fl):
# read entire file
src_file = open(fl, 'r')
src = src_file.read()
src_file.close()
return src
# return a set of matches pf 'regex' in 'src'
# NOTE: most are default `group==1`, so that is defaulted
def matches(regex, src, group=1):
ret = set()
for match in re.finditer(regex, src):
ret.add(match.group(group))
return ret
# return a set of functions still needed
def needed_funcs():
return funcs_requested - funcs_defined - funcs_err - funcs_warn - funcs_ignore
_p_files = {}
def p_file(fname, mode):
if mode not in _p_files:
_p_files[mode] = set()
if fname in _p_files[mode]:
#print ("Warning: file checked multiple times: ", fname)
pass
print ("[", mode, "] Checking file:", fname, " " * 80, end='\r')
_p_files[mode].add(fname)
# -*- Search through routines -*-
# attempt to resolve each one
for func in [args.routine]:
funcs_requested.add(func)
ct = 0
fnm = func.replace('magmablas_', '').replace('magma_', '')
# check a list of files
for fl in newfiles(f"src/{fnm}.cpp", f"src/{fnm}_gpu.cpp", f"src/{fnm}2.cpp", f"src/{fnm}2_mgpu", *allfiles):
src = readall(fl)
# add functions which were defined
defs = matches(re_funcdef, src)
if func in defs:
ct += 1
funcs_defined.update(defs)
print (func, fl)
# add references to subroutines & other functions called
funcs_requested.update(matches(re_call, src))
set_c.add(fl)
break
if ct < 1:
raise Exception(f"Unknown routine '{func}'")
print ("Checking for functions:", funcs_requested)
# while there are needed functions to resolve
while needed_funcs():
# get first one
func = next(iter(needed_funcs()))
# ensure it is a magma function
if not ('magma' in func or '_kernel' in func):
raise Exception(f"Need function '{func}', which is not part of MAGMA!")
# turn it into just the MAGMA name (no prefix)
magma_name = func.replace('magma_', '').replace('magmablas_', '')
# iterate through files the routine probably needs
for fl in newfiles(f"src/{magma_name}.cpp", f"src/{''.join([i for i in magma_name if not i.isdigit()])}.cpp"):
p_file(fl, 'defs')
src = readall(fl)
# get matches and see if this file works
defs = matches(re_funcdef, src) | matches(re_macdef, src)
if func in defs:
# found it, so we need to include this file
set_c.add(fl)
funcs_defined.update(defs)
# we need to see what else is requested
funcs_requested.update(matches(re_call, src))
# we found the requested function, so stop looking for it
break
if func not in funcs_defined:
# we haven't found anything valid yet
isFound = False
if func in funcs_special_cases:
funcs_warn.add(func)
isFound = True
if not isFound:
if 'opencl' in func and 'opencl' not in args.interface:
# we don't care about OpenCL functions
funcs_ignore.add(func)
isFound = True
if not isFound:
# check if it is a BLAS routine (in which case, it should
# be provided by someone else)
for rout in magmasubs.blas:
for prout in rout:
if prout in func:
funcs_defined.add(func)
blas_requested.add(magma_name)
isFound = True
break
if not isFound:
#print ("not yet found:", func)
# not a BLAS routine, so now just search everywhere for it
for fl in newfiles(*allfiles):
p_file(fl, 'defs')
src = readall(fl)
# get matches and see if this file works
defs = matches(re_funcdef, src) | matches(re_macdef, src)
if func in defs:
# found it, so we need to include this file
funcs_defined.update(defs)
# we need to see what else is requested (if not a header)
if fl[fl.index('.'):] not in ('.h', '.hh', '.hpp', '.cuh',):
funcs_requested.update(matches(re_call, src))
set_c.add(fl)
isFound = True
break
if not isFound:
funcs_err.add(func)
#raise Exception(f"Could not find '{func}'")
if funcs_err:
raise Exception(f"Could not find functions: {funcs_err}")
print ("Checking for included files", set_c)
# new includse
keepGoing = True
while keepGoing:
new_includes = set()
for fl in set_c:
p_file(fl, 'includes')
src = readall(fl)
for incfl in matches(re_include, src):
possible = [
f"include/{incfl}",
f"control/{incfl}",
f"magmablas/{incfl}" if args.interface == "cuda" else f"magmablas_{args.interface}/{incfl}",
]
isFound = False
for pos in possible:
if pos in set_c:
isFound = True
break
if isFound:
continue
# we need to find
for pos in possible:
if os.path.exists(pos):
new_includes.add(pos)
isFound = True
break
if isFound:
continue
# not found
raise Exception(f"Could not find included file '{incfl}'")
set_c.update(new_includes)
keepGoing = bool(new_includes)
# -*- Output -*-
# make output tarfile
tarcomp = args.output[args.output.rindex('.')+1:]
tf = tarfile.open(args.output, 'w:' + tarcomp)
def addfile(name, src):
bs = src.encode()
fp = io.BytesIO(bs)
info = tarfile.TarInfo(name)
info.size = len(bs)
fp.seek(0)
tf.addfile(info, fp)
addfile('README.md', f"""# MAGMA Package Routine
This is a generic README; check the `.mf` files (manifest files) for specific information about packaged routines.
* FUNCS.mf
* Contains a list of defined functions
* BLAS.mf
* Contains a list of required BLAS
* WARNINGS.mf
* Contains a list of possibly problematic functions
## CUDA Interface
To build with CUDA, source files that end in `.cu` should be compiled with `nvcc`, i.e. the NVIDIA CUDA compiler. Given as makefile rules, you should have (approximately):
(keep in mind, throughout these examples, that some variables are just illustrative; you will have to define or supplement them with the relevant files/definitions in your build system)
```makefile
# rule to compile single object file
%.o: %.cu $(magma_H)
\t$(NVCC) -std=c++11 $< -Xcompiler "-fPIC" -o $@
```
And, to compile MAGMA into your own library (say `libmine.so`), you would modify your existing rule:
```makefile
# rule to compile your library (including MAGMA objects from this folder)
libmine.so: $(MAGMA_CU_O) $(MINE_C_O)
\t$(CC) $^ -lcublas -lcusparse -lcudart -lcudadevrt -shared -o $@
```
Assuming `MAGMA_CU_O` are the object files from MAGMA, and `MINE_C_O` are the object files from your library, this should link them together and create your shared library
## HIP Interface
To build with HIP, source files that end in `.hip.cpp` should be compiled with `hipcc`, i.e. the HIP device compiler.
(keep in mind, throughout these examples, that some variables are just illustrative; you will have to define or supplement them with the relevant files/definitions in your build system)
```makefile
# rule to compile single object file
%.o: %.cu $(magma_H)
\t$(HIPCC) -DHAVE_HIP -std=c++11 -fno-gpu-rdc $< -fPIC -o $@
```
And, to compile MAGMA into your own library (say `libmine.so`), you would modify your existing rule:
```makefile
# rule to compile your library (including MAGMA objects from this folder)
libmine.so: $(MAGMA_HIP_O) $(MINE_C_O)
\t$(CC) $^ -lhipsparse -lhipblas -shared -o $@
```
Assuming `MAGMA_HIP_O` are the object files from MAGMA, and `MINE_C_O` are the object files from your library, this should link them together and create your shared library
""")
addfile('Makefile', f"""# -*- Makefile - generated by `package-routine.py`
# variables
NVCC ?= nvcc
# recursive wildcard
rwildcard = $(foreach d,$(wildcard $(1:=/*)),$(call rwildcard,$d,$2) $(filter $(subst *,%,$2),$d))
# source files
MAGMA_C := $(call rwildcard,.,*.c)
MAGMA_CPP := $(call rwildcard,.,*.cpp)
MAGMA_CU := $(call rwildcard,.,*.cu)
# object files
MAGMA_O := $(patsubst %.cpp,%.o,$(MAGMA_C)) $(patsubst %.cpp,%.o,$(MAGMA_CPP)) $(patsubst %.cu,%.o,$(MAGMA_CU))
MAGMA_CFLAGS := -std=c++11 -DADD_ -DMAGMA_CUDA_ARCH_MIN=600 { {'hip': '-DHAVE_HIP', 'cuda': '-DHAVE_CUDA'}[args.interface] }
default: libmagma_pkg.so test
# single file
%.o: %.cpp
\t$(CXX) $(CFLAGS) -I./include -I./control $(MAGMA_CFLAGS) $< -fPIC -c -o $@
%.o: %.c
\t$(CC) $(CFLAGS) -I./include -I./control $(MAGMA_CFLAGS) $< -fPIC -c -o $@
%.o: %.cu
\t$(NVCC) $(CFLAGS) -I./include -I./control $(MAGMA_CFLAGS) $< -Xcompiler "-fPIC" -c -o $@
# compile magma embedded
# (i.e. `magmapkg`)
libmagma_pkg.so: $(MAGMA_O)
\t$(CC) $^ $(LDFLAGS) -lcublas -lcusparse -lcudart -lcudadevrt -shared -o $@
test: test.c libmagma_pkg.so
\t$(CC) $(CFLAGS) -I./include -I./control $(MAGMA_CFLAGS) $^ $(LDFLAGS) -L./ -lmagma_pkg -o $@
clean: FORCE
\trm -f $(wildcard libmagma_pkg.so control/*.o src/*.o interface_{args.interface}/*.o)
FORCE:
.PHONY: default clean FORCE
""")
addfile('test.c', f"""/* test.c - GENERATED test file to ensure magma compiles & can execute
*
* Generated by `package-routine.py`
*
* @author: <NAME> <<EMAIL>>
*/
#include <magma_v2.h>
#include <stdio.h>
int main(int argc, char** argv) {{
// initialize
int st;
if ((st = magma_init()) != MAGMA_SUCCESS) {{
fprintf(stderr, "magma_init() failed! (code: %i)\\n", st);
return -1;
}}
if ((st = magma_finalize()) != MAGMA_SUCCESS) {{
fprintf(stderr, "magma_finalize() failed! (code: %i)\\n", st);
return -1;
}}
// success
return 0;
}}
""")
addfile('FUNCS.mf', "\n".join(funcs_defined))
addfile('WARNINGS.mf', "\n".join(funcs_warn))
addfile('BLAS.mf', "\n".join(blas_requested))
for fl in set_c:
addfile(fl, readall(fl))
tf.close()
| 2.515625 | 3 |
renderer.py | aymenmir1/ndf-1 | 3 | 12770950 | # conda activate pymesh
import math
import numpy as np
import trimesh
import cv2
import os
import configs.config_loader as cfg_loader
import NDF_combine as NDF
def str2bool(inp):
return inp.lower() in 'true'
class Renderer():
def __init__(self):
self.get_args()
self.create_plane_points_from_bounds()
self.define_screen_points()
self.define_unit_rays()
def get_args(self):
"""
:return:
"""
self.args = cfg_loader.get_config()
# print(self.args.cam_position)
# print(self.args.cam_orientation)
os.makedirs(self.args.folder, exist_ok=True)
def create_plane_points_from_bounds(self):
"""
Creates a plane of points which acts as the screen for rendering
"""
# create an xy plane
x = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size)
y = np.linspace(-self.args.screen_bound, self.args.screen_bound, self.args.size)
X, Y = np.meshgrid(x, y, indexing='ij')
X = X.reshape((np.prod(X.shape),))
Y = Y.reshape((np.prod(Y.shape),))
# append the third dimension coordinate to the xy plane
points_list = np.column_stack((X, Y))
points_list = np.insert(points_list, 2, self.args.screen_depth, axis=1)
self.points_list = points_list
def to_rotation_matrix(self):
"""
Creates rotation matrix from the input euler angles
"""
euler_angles = np.array(self.args.cam_orientation)
R_x = np.array([[1, 0, 0],
[0, math.cos(math.radians(euler_angles[0])), -math.sin(math.radians(euler_angles[0]))],
[0, math.sin(math.radians(euler_angles[0])), math.cos(math.radians(euler_angles[0]))]
])
R_y = np.array([[math.cos(math.radians(euler_angles[1])), 0, math.sin(math.radians(euler_angles[1]))],
[0, 1, 0],
[-math.sin(math.radians(euler_angles[1])), 0, math.cos(math.radians(euler_angles[1]))]
])
R_z = np.array([[math.cos(math.radians(euler_angles[2])), -math.sin(math.radians(euler_angles[2])), 0],
[math.sin(math.radians(euler_angles[2])), math.cos(math.radians(euler_angles[2])), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
self.rot_matrix = R
def to_transf_matrix(self):
"""
Creates a transformation matrix from rotation matrix and translation vector
"""
self.to_rotation_matrix()
temp_trans = np.array([0, 0, 0])
temp_trans = np.reshape(temp_trans, (1, 3))
rot = np.concatenate((self.rot_matrix, temp_trans), axis=0)
rot = np.concatenate((rot, np.reshape(np.array([0, 0, 0, 1]), (4, 1))), axis=1)
inp_trans = np.reshape(self.args.cam_position, (3,))
inp_trans = np.concatenate((inp_trans, [1]), axis=0)
rot[:, 3] = inp_trans
self.trans_mat = rot
def append_one(self, arr):
"""
:param arr:
:return:
"""
append = np.ones(arr.shape[0])
append = np.reshape(append, (append.shape[0], 1))
new_arr = np.concatenate((arr, append), axis=1)
return new_arr
def define_screen_points(self):
"""
Transforms the screen points and camera position using the camera translation and orientation information provided by the user
"""
self.create_plane_points_from_bounds()
self.to_transf_matrix()
cam_loc = np.array([0, 0, 0])
screen_and_cam = np.vstack((cam_loc, self.points_list))
screen_and_cam_hom = self.append_one(screen_and_cam)
# 4 X SIZE^2
screen_and_cam_hom_T = np.transpose(screen_and_cam_hom, (1, 0))
screen_and_cam_hom_T_transformed = np.matmul(self.trans_mat, screen_and_cam_hom_T)
# SIZE^2 X 4
screen_and_cam_hom_transformed = np.transpose(screen_and_cam_hom_T_transformed, (1, 0))
# SIZE^2 X 3
self.screen_and_cam_transformed = screen_and_cam_hom_transformed[:, :3]
if self.args.debug_mode:
trimesh.Trimesh(vertices=self.screen_and_cam_transformed, faces=[]).export('setup_camera_rot.off')
def define_unit_rays(self):
"""
Defines rays from camera to the screen along which
"""
# Separate screen points and camera point
points = self.screen_and_cam_transformed[1:, :]
self.cam_trans = np.reshape(self.screen_and_cam_transformed[0, :], (1, 3))
# Define ray paths from camera
ray_vector = (points - self.cam_trans)
# Normalize ray vectors
norm_ray = np.linalg.norm(ray_vector, ord=2, axis=1)
norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1))
self.unit_rays = ray_vector / norm_ray
def get_lgth_rays(self):
"""
:return:
"""
src_batch = np.repeat([self.args.light_position], self.args.size * self.args.size, axis=0)
rays = src_batch - self.final_points
norm_ray = np.linalg.norm(rays, ord=2, axis=1)
norm_ray = np.reshape(norm_ray, (self.args.size * self.args.size, 1))
self.ray_to_src = rays / norm_ray
def run(self):
"""
Runs the ray marching algorithm
"""
print(self.args)
NDF.loadNDF(
mode = 'test', index = self.args.index,
pointcloud_samples = self.args.pc_samples,
exp_name = self.args.exp_name, data_dir = self.args.data_dir,
split_file = self.args.split_file, sample_distribution = self.args.sample_ratio,
sample_sigmas = self.args.sample_std_dev, res = self.args.input_res
)
depth = np.zeros((self.args.size * self.args.size, 1))
cam_batch = np.repeat(self.cam_trans, self.args.size * self.args.size, axis=0)
points = cam_batch.copy()
iter = 1
ray = self.unit_rays.copy()
indices_cont_all = list(range(self.args.size * self.args.size))
while len(indices_cont_all) > 0:
print('Iter:', iter)
dists_points = NDF.predictRotNDF(points)
dists_points = np.reshape(dists_points, (self.args.size * self.args.size, 1))
indices_stop = np.where(dists_points < self.args.epsilon)[0]
indices_stop2 = np.where(depth > self.args.max_depth)[0]
indices_stop_all = list(set(indices_stop).union(set(indices_stop2)))
# print(len(indices_stop_all))
ray[indices_stop_all] = 0
setA = set(range(self.args.size * self.args.size))
setB = set(indices_stop_all)
indices_cont_all = list(setA.difference(setB))
# print(len(indices_cont_all))
depth[indices_cont_all] = depth[indices_cont_all] + self.args.alpha * dists_points[indices_cont_all]
points = points + (ray * (self.args.alpha * dists_points))
iter = iter + 1
points = points - (self.unit_rays * self.args.step_back)
self.final_points = points.copy()
## NORMALS
self.depth_np = depth.copy()
self.depth_np[self.depth_np > self.args.max_depth] = self.args.max_depth
dists, gradients = NDF.predictRotGradientNDF(points)
self.final_gradients = gradients.copy()
self.normals = np.reshape(gradients, (self.args.size * self.args.size, 3))
def save(self, image, name, size, normalize):
"""
:param image: Input image as np array
:param name: Name of file to be stored
:param size: Size of the image
:param normalize: whether to normalize all values to 0-1
Saves individual images
"""
if normalize:
image = (image + 1)/2
image = np.reshape(image, (self.args.size, self.args.size, size))
image = cv2.transpose(image)
image = cv2.flip(image, 0)
image = image[90:610, :]
cv2.imwrite(os.path.join(self.args.folder, name), np.uint8(255 * image))
def save_images(self):
"""
Saves Images after completion of the rendering algorithm
"""
shade = np.sum(np.multiply(-self.unit_rays, self.normals), axis=1)
shade = np.reshape(shade, (shade.shape[0], 1))
shade[self.depth_np == self.args.max_depth] = 1
self.save(shade, 'shade.jpg', 1, True)
# SHADE WITH LIGhT SOURCE
if self.args.shade:
self.get_lgth_rays()
shd_lgth = np.sum(np.multiply(self.ray_to_src, self.normals), axis=1)
shd_lgth = np.reshape(shd_lgth, (shd_lgth.shape[0], 1))
shd_lgth[self.depth_np == self.args.max_depth ] = 1
self.save(shd_lgth, 'shade_src.jpg', 1, True)
if self.args.normal:
RGB_normals = self.final_gradients.copy()
inds = (self.depth_np == self.args.max_depth)
for j in range(3):
new_arr = np.reshape(RGB_normals[:, j], (self.args.size * self.args.size, 1))
new_arr[inds] = 1
black_pixels_mask = np.all(RGB_normals == [0, 0, 0], axis=-1)
RGB_normals[black_pixels_mask] = np.array([1, 1, 1])
self.save(RGB_normals, 'normals.jpg', 3, True)
if self.args.depth:
depth_normalized = np.copy(self.depth_np / self.args.max_depth)
self.save(depth_normalized, 'depth_final.jpg', 1, False)
if __name__ == "__main__":
renderer = Renderer()
renderer.run()
renderer.save_images()
| 2.828125 | 3 |