max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
users/views/logout.py
|
thulasi-ram/logistika
| 0
|
12776451
|
<reponame>thulasi-ram/logistika
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.generic import TemplateView
class Logout(TemplateView):
template_name = ''
def get(self, request, *args, **kwargs):
referrer = request.build_absolute_uri(reverse('landing'))
if request.user:
logout(request)
return HttpResponseRedirect(referrer)
| 2.078125
| 2
|
polyaxon_client/tracking/__init__.py
|
yu-iskw/polyaxon-client
| 0
|
12776452
|
# -*- coding: utf-8 -*-
from polyaxon_client.tracking.experiment import Experiment
from polyaxon_client.tracking.group import Group
from polyaxon_client.tracking.job import Job
from polyaxon_client.tracking.paths import *
| 1.085938
| 1
|
lib/python2.7/site-packages/setools/nodeconquery.py
|
TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
| 0
|
12776453
|
<reponame>TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
# Copyright 2014-2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
try:
import ipaddress
except ImportError: # pragma: no cover
pass
import logging
from socket import AF_INET, AF_INET6
from .mixins import MatchContext
from .query import PolicyQuery
class NodeconQuery(MatchContext, PolicyQuery):
"""
Query nodecon statements.
Parameter:
policy The policy to query.
Keyword Parameters/Class attributes:
network The IPv4/IPv6 address or IPv4/IPv6 network address
with netmask, e.g. 192.168.1.0/255.255.255.0 or
"192.168.1.0/24".
network_overlap If true, the net will match if it overlaps with
the nodecon's network instead of equality.
ip_version The IP version of the nodecon to match. (socket.AF_INET
for IPv4 or socket.AF_INET6 for IPv6)
user The criteria to match the context's user.
user_regex If true, regular expression matching
will be used on the user.
role The criteria to match the context's role.
role_regex If true, regular expression matching
will be used on the role.
type_ The criteria to match the context's type.
type_regex If true, regular expression matching
will be used on the type.
range_ The criteria to match the context's range.
range_subset If true, the criteria will match if it is a subset
of the context's range.
range_overlap If true, the criteria will match if it overlaps
any of the context's range.
range_superset If true, the criteria will match if it is a superset
of the context's range.
range_proper If true, use proper superset/subset operations.
No effect if not using set operations.
"""
_network = None
network_overlap = False
_ip_version = None
@property
def ip_version(self):
return self._ip_version
@ip_version.setter
def ip_version(self, value):
if value:
if not (value == AF_INET or value == AF_INET6):
raise ValueError(
"The address family must be {0} for IPv4 or {1} for IPv6.".
format(AF_INET, AF_INET6))
self._ip_version = value
else:
self._ip_version = None
@property
def network(self):
return self._network
@network.setter
def network(self, value):
if value:
try:
self._network = ipaddress.ip_network(value)
except NameError: # pragma: no cover
raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
else:
self._network = None
def __init__(self, policy, **kwargs):
super(NodeconQuery, self).__init__(policy, **kwargs)
self.log = logging.getLogger(__name__)
def results(self):
"""Generator which yields all matching nodecons."""
self.log.info("Generating nodecon results from {0.policy}".format(self))
self.log.debug("Network: {0.network!r}, overlap: {0.network_overlap}".format(self))
self.log.debug("IP Version: {0.ip_version}".format(self))
self._match_context_debug(self.log)
for nodecon in self.policy.nodecons():
if self.network:
try:
netmask = ipaddress.ip_address(nodecon.netmask)
except NameError: # pragma: no cover
# Should never actually hit this since the self.network
# setter raises the same exception.
raise RuntimeError("Nodecon IP address/network functions require Python 3.3+.")
# Python 3.3's IPv6Network constructor does not support
# expanded netmasks, only CIDR numbers. Convert netmask
# into CIDR.
# This is <NAME>'s method for counting set bits.
# If the netmask happens to be invalid, this will
# not detect it.
CIDR = 0
int_netmask = int(netmask)
while int_netmask:
int_netmask &= int_netmask - 1
CIDR += 1
net = ipaddress.ip_network('{0}/{1}'.format(nodecon.address, CIDR))
if self.network_overlap:
if not self.network.overlaps(net):
continue
else:
if not net == self.network:
continue
if self.ip_version and self.ip_version != nodecon.ip_version:
continue
if not self._match_context(nodecon.context):
continue
yield nodecon
| 1.882813
| 2
|
LeetCode/python-R1/0303-区域和检索 - 数组不可变D/V1.py
|
huuuuusy/Programming-Practice-Everyday
| 4
|
12776454
|
"""
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
直接将每个数字从0开始到自己的和存入target数组中
最后返回两个数字在target数组的差值即为结果
参考链接:
https://leetcode-cn.com/problems/longest-increasing-subsequence/solution/dong-tai-gui-hua-er-fen-cha-zhao-tan-xin-suan-fa-p/
结果:
执行用时 : 104 ms, 在所有 Python3 提交中击败了82.05%的用户
内存消耗 : 17.5 MB, 在所有 Python3 提交中击败了5.31%的用户
"""
class NumArray:
def __init__(self, nums):
self.nums = nums
sum_i = 0 # sum_i记录每个数字,从下标0加到当前数字的和
target = []
for i in range(len(nums)):
sum_i += nums[i] # sum_i持续添加数字
target.append(sum_i) # 把当前数字的sum_i加到结果中
self.target = target
def sumRange(self, i, j):
return self.target[j]-self.target[i]+self.nums[i]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
if __name__ == "__main__":
nums = [-2, 0, 3, -5, 2, -1]
obj = NumArray(nums)
answer = obj.sumRange(2, 5)
print(answer)
| 3.875
| 4
|
FFTAnalysis/FFT.py
|
JamieAGraham/FID-Analysis-g-2
| 1
|
12776455
|
from __future__ import division
import numpy as np
import scipy as sci
import sys
# Python implementation of the FFT quadratic peak interpolation method. This code can be run with the following format:
# <python FFT.py [filename.txt] [Zero Padding Multiple]>
# The format of the text file is identical to that of the ZC code, see
# ZC.py or readme.md for reference.
# Take command line data
filename = str(sys.argv[1])
Zero_Pad_Length = float(sys.argv[2])
# Take data from file
Data_Input = np.loadtxt(filename)
Data_Input = np.transpose(Data_Input)
# Calculate the length of the data, and the timestep
n = Data_Input[1].size
N = int(np.ceil(n * (Zero_Pad_Length + 1)))
timestep = Data_Input[0][1] - Data_Input[0][0]
# Create an array of the square of the FFT coefficients, and the fequency bins
FFT = [np.absolute(np.fft.rfft(Data_Input[1], n=N)),
np.fft.rfftfreq(N, d=timestep)]
# Find the maximum value of this FFT
maximum = max(FFT[0])
# Find the index (or indices) to which this value is associated.
# Reminder: This requires two passes through the array, and so could be optimized by looping by hand, rather than using enumerate in this fashion.
# Currently this seems
max_indices = [i for i, j in enumerate(FFT[0]) if j == maximum]
# Output
print(np.absolute(FFT[1][max_indices[0]]))
# Calculating the interpolated peak frequency through quadratic peak
# interpolation, where alpha beta and gamma are the points around the
# theoretical maximum.
sample_frequency = 1. / timestep
alpha = FFT[0][max_indices[0] - 1]
beta = FFT[0][max_indices[0]]
gamma = FFT[0][max_indices[0] + 1]
# See associated paper for full explanation of equation, or go to
# http://www.dsprelated.com/freebooks/sasp/Quadratic_Interpolation_Spectral_Peaks.html
# for a fantastic treatment of the subject
fractional_peak_location = 0.5 * (alpha - gamma) / (alpha - 2 * beta + gamma)
# Calculating the frequency by converting between bin number and frequency
Interpolated_peak_freq = FFT[1][max_indices[0]] + \
fractional_peak_location * sample_frequency / n
# Output:
print Interpolated_peak_freq
| 2.859375
| 3
|
fb_mining/friends_analyzis.py
|
JustCheckingHow/StalkingYourGf
| 1
|
12776456
|
def find_deletions(friends_file_path, new_friends_list):
deleted = ""
f1 = open(friends_file_path, "r")
data2 = new_friends_list
for line in f1:
if data2.find(line) == -1:
print ("--" +line),
deleted += line
f1.close()
return deleted
def find_additions(friends_file_path, new_friends_list):
added = ""
f2 = open(friends_file_path, "r")
data1 = new_friends_list
for line in f2:
if data1.find(line) == -1:
print ("++" +line),
added += line
f2.close()
return added
def find_mutual_friends(path1, path2):
f1 = open(path1, "r")
f2 = open(path2, "r")
data2 = f2.read()
for line in f1:
if data2.find(line) != -1:
print ("mutuals: " + line),
f1.close()
f2.close()
| 3.53125
| 4
|
raws/reffile.py
|
Charcoal-Apple/PyDwarf
| 49
|
12776457
|
<gh_stars>10-100
#!/usr/bin/env python
# coding: utf-8
import os
import shutil
import basefile
class reffile(basefile.basefile):
def __init__(self, path=None, dir=None, root=None, **kwargs):
self.dir = dir
self.setpath(path, root, **kwargs)
self.kind = 'ref'
def copy(self):
copy = reffile()
copy.path = self.path
copy.rootpath = self.rootpath
copy.name = self.name
copy.ext = self.ext
copy.loc = self.loc
return copy
def ref(self, **kwargs):
for key, value in kwargs.iteritems(): self.__dict__[key] = value
return self
def bin(self, **kwargs):
self.kind = 'bin'
self.__class__ = binfile.binfile
for key, value in kwargs.iteritems(): self.__dict__[key] = value
self.read()
return self
def raw(self, **kwargs):
self.kind = 'raw'
self.__class__ = rawfile.rawfile
for key, value in kwargs.iteritems(): self.__dict__[key] = value
self.read()
return self
def write(self, path):
dest = self.dest(path, makedir=True)
if self.path != dest:
if os.path.isfile(self.path):
shutil.copy2(self.path, dest)
elif os.path.isdir(self.path):
copytree.copytree(self.path, dest)
else:
raise ValueError('Failed to write file because its path %s refers to neither a file nor a directory.' % self.path)
import copytree
import binfile
import rawfile
| 2.8125
| 3
|
TransferOwnership.py
|
tylergusmyers/Pet_dApp
| 0
|
12776458
|
import streamlit as st
from dataclasses import dataclass
from typing import Any, List
import datetime as datetime
import pandas as pd
import hashlib
@dataclass
class Title:
sender: str
receiver: str
title: str
@dataclass
class Ownership:
record: Title
creator_id: int
prev_hash: str = "0"
timestamp: str = datetime.datetime.utcnow().strftime("%H:%M:%S")
nonce: int = 0
def hash_block(self):
sha = hashlib.sha256()
record = str(self.record).encode()
sha.update(record)
creator_id = str(self.creator_id).encode()
sha.update(creator_id)
timestamp = str(self.timestamp).encode()
sha.update(timestamp)
prev_hash = str(self.prev_hash).encode()
sha.update(prev_hash)
nonce = str(self.nonce).encode()
sha.update(nonce)
return sha.hexdigest()
@dataclass
class TransferOwnership:
chain: List[Ownership]
difficulty: int = 10
def proof_of_work(self, ownership):
calculated_has = ownership.hash_block()
num_of_zeros = "0" + self.difficulty
while not calulated_has.startswith(num_of_zero):
ownership.nonce += 1
calculated_hash = ownership.hash_block()
print("Title Transfered", calculated_hash)
return ownership
def add_block(self, newOwner):
ownership = self.proof_of_work(newOwner)
self.chain += [ownership]
def is_valid(self):
block_has = self.chain[0].hash_block()
for ownership in self.chain[1:]:
if block_hash != ownership.prev_hash:
print("Title cannot be Transferred!")
return False
block_hash = ownership.hash_block()
print("Title has been Transferred")
return True
@st.cache(allow_output_mutation=True)
def setup():
print("Initializing Title Information")
return TransferOwnership([Ownership("Title", 0)])
st.markdown("# Transfer Ownership Title")
st.markdown("## Input address of who you would like to Transfer the Ownership to ")
titleTransfer = setup()
input_sender_id = st.text_input("Current Owner ID")
input_receiver_id = st.text_input("New Owner ID")
input_title = st.text("<NAME>")
if st.button("Transfer Title"):
prev_block = titleTransfer.chain[-1]
prev_block_hash = prev_block.hash_block()
new_owner = Ownership(
title=Title(
sender=input_sender_id, receiver=input_receiver_id, title=input_title
),
creator_id=42,
prev_hash=prev_block_hash,
)
titleTransfer.add_block(new_owner)
st.balloons()
st.markdown("## Transfer Pet Ownership")
Transfer_df = pd.DataFrame(titleTransfer.chain).astype(str)
st.write(Transfer_df)
difficulty = st.sidbar.slider("Block Difficulty", 1, 5, 2)
titleTransfer.difficulty = difficulty
st.sidbar.write("# Owner History")
selected_block = st.sidebar.selectbox(
"Which Owner would you like to see?", titleTransfer.chain
)
st, sidebar.write(selected_block)
if st.button(" Transfer Complete"):
st.write(titleTransfer.is_valid())
# NExt Steps:
"""GAnache Interface,
Drop down menus
Token Creation in Solidity
insert Token into fields
Picture URLs for dog pictures
Add independant funtions into the token then merge later
Update, """
## Pet Token Address
# Drop Down menu for Pet Token Address
#
| 2.765625
| 3
|
endpoints/client_endpoint.py
|
iTecAI/XL3
| 0
|
12776459
|
from fastapi import APIRouter, status, Request, Response
from util import *
from classes import *
from _runtime import server
import logging, random, hashlib
from pydantic import BaseModel
from models import *
logger = logging.getLogger("uvicorn.error")
router = APIRouter()
@router.post('/settings/set/{setting}/',responses={
404: {'model':SimpleResult,'description':'Connection or Setting not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'Cannot edit client settings, as the user is not logged in.','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':SimpleResult,'description':'Successful. Setting is changed','content':{'application/json':{'example':{'result':'Success.'}}}}
})
async def edit_client_settings(fingerprint: str, setting: str, model: ClientSettingsModel, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
if setting in server.connections[fingerprint].user.settings.keys():
server.connections[fingerprint].user.settings[setting] = model.value
logger.info('User '+server.connections[fingerprint].user.username+' changed a setting: '+setting+' = '+model.value)
server.connections[fingerprint].user.update()
return {'result':'Success'}
elif setting == 'email':
server.connections[fingerprint].user.username = model.value
logger.info('User '+server.connections[fingerprint].user.username+' changed a setting: '+setting+' = '+model.value)
server.connections[fingerprint].user.update()
return {'result':'Success'}
else:
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Setting '+setting+' not found.'}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.get('/settings/{setting}/',responses={
404: {'model':SimpleResult,'description':'Connection or Setting not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'Cannot get client settings, as the user is not logged in.','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':SettingResponseModel,'description':'Successful. Returns setting value.','content':{'application/json':{'example':{'result':'Success.','setting':'Setting Name','value':'Setting Value'}}}}
})
async def get_specific_client_setting(fingerprint: str, setting: str, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
if setting in server.connections[fingerprint].user.settings.keys():
return {'result':'Success','setting':setting,'value':server.connections[fingerprint].user.settings[setting]}
elif setting == 'email':
return {'result':'Success','setting':setting,'value':server.connections[fingerprint].user.email}
else:
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Setting '+setting+' not found.'}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.get('/settings/',responses={
404: {'model':SimpleResult,'description':'Connection not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'Cannot get client settings, as the user is not logged in.','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':AllSettingsResponseModel,'description':'Successful. Returns setting value.','content':{'application/json':{'example':{'result':'Success.','settings':{'Setting Name':'Setting Value','Foo':'Bar'}}}}}
})
async def get_client_settings(fingerprint: str, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
_settings = server.connections[fingerprint].user.settings.copy()
_settings['email'] = server.connections[fingerprint].user.username
return {'result':'Success','settings':_settings}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.post('/password/check/',responses={
404: {'model':SimpleResult,'description':'Connection not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'Cannot check password, as the user is not logged in.','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':PasswordCheckResponseModel,'description':'Successful. Returns whether the password matches.','content':{'application/json':{'example':{'result':'Success.','match':True}}}}
})
async def check_password(fingerprint: str, model: PasswordCheckModel, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
if server.connections[fingerprint].user.password_hash == model.hashword:
return {'result':'Success','match':True}
else:
return {'result':'Success','match':False}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.post('/password/change/',responses={
404: {'model':SimpleResult,'description':'Connection not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'Cannot change password, as the user is not logged in.','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
403: {'model':SimpleResult,'description':'Cannot change password, as the previous password provided was incorrect.','content':{'application/json':{'example':{'result':'Previous password incorrect.'}}}},
200: {'model':SimpleResult,'description':'Successful. Returns whether the password matches.','content':{'application/json':{'example':{'result':'Success.'}}}}
})
async def change_password(fingerprint: str, model: PasswordChangeModel, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
if server.connections[fingerprint].user.password_hash == <PASSWORD>:
server.connections[fingerprint].user.password_hash = <PASSWORD>
return {'result':'Success.'}
else:
response.status_code = status.HTTP_403_FORBIDDEN
return {'result':'Previous password incorrect.'}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.get('/characters/',responses={
404: {'model':SimpleResult,'description':'Connection not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'User must be logged in','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':OwnedListResponseModel,'description':'List of owned character IDs','content':{'application/json':{'example':{'result':'Success.','owned':[]}}}}
})
async def get_characters(fingerprint: str, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
return {'result':'Success.','owned':server.connections[fingerprint].user.owned_characters}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
@router.get('/campaigns/',responses={
404: {'model':SimpleResult,'description':'Connection not found','content':{'application/json':{'example.':{'result':'You must be logged in to do that.'}}}},
405: {'model':SimpleResult,'description':'User must be logged in','content':{'application/json':{'example':{'result':'User is not logged in.'}}}},
200: {'model':OwnedListResponseModel,'description':'List of owned campaign IDs','content':{'application/json':{'example':{'result':'Success.','owned':[]}}}}
})
async def get_campaigns(fingerprint: str, response: Response):
if not fingerprint in server.connections.keys():
response.status_code = status.HTTP_404_NOT_FOUND
return {'result':'Connection not found.'}
if server.connections[fingerprint].logged_in:
return {'result':'Success.','owned':server.connections[fingerprint].user.owned_campaigns}
else:
response.status_code = status.HTTP_405_METHOD_NOT_ALLOWED
return {'result':'You must be logged in to do that.'}
| 2.390625
| 2
|
qa/L0_stability_steps/check_results.py
|
MarkMoTrin/model_analyzer
| 115
|
12776460
|
<filename>qa/L0_stability_steps/check_results.py<gh_stars>100-1000
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import yaml
class TestOutputValidator:
"""
Functions that validate the output
of the test
"""
def __init__(self, config, test_name, analyzer_log):
self._config = config
self._models = config['profile_models']
self._analyzer_log = analyzer_log
check_function = self.__getattribute__(f'check_{test_name}')
if check_function():
sys.exit(0)
else:
sys.exit(1)
def check_steps_stability(self):
"""
Makes sure that there were the same number of
configurations tried in each search iteration.
"""
with open(self._analyzer_log, 'r+') as f:
log_contents = f.read()
logs_for_iteration = log_contents.split(
'Profiling server only metrics...')[1:]
logs_for_model = logs_for_iteration[0].split(
"config search for model:")[1:]
expected_step_counts = []
for model_log in logs_for_model:
expected_step_counts.append(model_log.count('[Search Step]'))
for i in range(1, 4):
logs_for_model = logs_for_iteration[i].split(
"config search for model:")[1:]
for j, model_log in enumerate(logs_for_model):
actual_step_count = model_log.count('[Search Step]')
if abs(actual_step_count - expected_step_counts[j]) > 1:
print("\n***\n*** Expected number of search steps for "
f"{self._models[j]} : {expected_step_counts[j]}."
f"Took {actual_step_count}. \n***")
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--config-file',
type=str,
required=True,
help='The path to the config yaml file.')
parser.add_argument('-l',
'--analyzer-log-file',
type=str,
required=True,
help='The full path to the analyzer log.')
parser.add_argument('-t',
'--test-name',
type=str,
required=True,
help='The name of the test to be run.')
args = parser.parse_args()
with open(args.config_file, 'r') as f:
config = yaml.safe_load(f)
TestOutputValidator(config, args.test_name, args.analyzer_log_file)
| 2.296875
| 2
|
src/ai/conditions.py
|
Kupoman/thor
| 1
|
12776461
|
<reponame>Kupoman/thor<filename>src/ai/conditions.py
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_condition(args):
return CONDITION_LUT[args[0]](*args[1:])
class AlwaysCondition:
__slots__ = []
def test(self, data):
return True
class RangeCondition:
__slots__ = ["property", "min", "max"]
def __init__(self, prop, _min, _max):
self.property = prop
if type(_min) in (str, unicode):
_min = float(_min)
if type(_max) in (str, unicode):
_max = float(_max)
self.min = _min
self.max = _max
def test(self, data):
return self.min < getattr(data, self.property) < self.max
CONDITION_LUT = {
"VALUE": RangeCondition,
"ALWAYS": AlwaysCondition,
}
| 2.296875
| 2
|
alertserver/trello_client.py
|
KoMinkyu/bitbucket_alerts_trello
| 1
|
12776462
|
<filename>alertserver/trello_client.py<gh_stars>1-10
from html import unescape
from alertserver.config import Trello as config_trello
import trolly
client = trolly.client.Client(config_trello.api_key, config_trello.token)
assert client is not None
member = client.get_member()
assert member is not None
print('Connected by Member: %s' % member.get_member_information()['email'])
board = client.get_board(id=config_trello.board_id)
assert board is not None
print('Board Name: %s' % board.get_board_information()['name'])
def post_activity(card_id: int, format_string: str, **kwargs):
comment_body = format_string.format(**{
**kwargs,
'card_id': card_id
}).replace('\\n', '\n').replace('\\t', '\t')
card = board.get_card(str(card_id))
assert card is not None
card.add_comments(comment_body)
def post_branch_activity(card_id: int, branch_name: str, link: str):
post_activity(card_id, config_trello.fms_branch, branch_name=branch_name, link=link)
def post_commit_activity(card_id: int, branch_name: str, commit_log: str, link: str):
post_activity(card_id, config_trello.fms_commit, branch_name=branch_name, commit_log=commit_log, link=link)
def post_merge_activity(card_id: int, branch_name: str, link: str):
post_activity(card_id, config_trello.fms_merge, branch_name=branch_name, link=link)
# post_branch_activity(255, 'develop', 'http://www.naver.com')
| 2.390625
| 2
|
pypackage_scripts/__init__.py
|
marisalim/stonybrook_juypterworkflow
| 0
|
12776463
|
<reponame>marisalim/stonybrook_juypterworkflow
x = 5.9
y = 6
| 0.945313
| 1
|
Data_Structure/selection_sort.py
|
RafaFurla/Python-Exercises
| 1
|
12776464
|
<filename>Data_Structure/selection_sort.py
def sort(list_):
"""
This function is a selection sort algorithm. It will put a list in numerical order.
:param list_: a list
:return: a list ordered by numerial order.
"""
for minimum in range(0, len(list_)):
for c in range(minimum + 1, len(list_)):
if list_[c] < list_[minimum]:
temporary = list_[minimum]
list_[minimum] = list_[c]
list_[c] = temporary
return list_
numbers = [30, 18, 70, 100, 20, 5, 10, 50, 8, 14, 47]
print(sort(numbers))
| 4.375
| 4
|
bltk/langtools/taggertools.py
|
saimoncse19/bltk
| 12
|
12776465
|
noun_suffix = ['াই', 'াটা', 'াটাই', 'াটাও', 'াটাকে', 'াটাকেও', 'াটি', 'ামি', 'িক', 'িকা', 'ের', 'েরই', 'েরও', 'েরা',
'েরাও', 'েরে', 'ও', 'আবলি', 'আলা', 'এরা', 'এরে', 'কারী', 'কুলের', 'কে', 'কেই', 'কেও', 'খানা', 'খানি',
'গণ', 'গণে', 'গন', 'গাছা', 'গাছি', 'গিরি', 'গুচ্ছ', 'গুলা', 'গুলি', 'গুলিকে', 'গুলিতে', 'গুলিন',
'গুলির', 'গুলো', 'গুলোও', 'গুলোকে', 'গুলোয়', 'গুলোতে', 'গুলোর', 'গোটা', 'চস', 'টা', 'টাও', 'টাই',
'টাকে', 'টার', 'টায়', 'টি', 'টিও', 'টিকে', 'টিকেই', 'টিতে', 'টিতেও', 'টির', 'টুকু', 'টুকুন', 'দল',
'দলের', 'দনি', 'দিগ', 'দিগকে', 'দিগে', 'দিগেতে', 'দিগের', 'দিগেরে', 'দিতে', 'দের', 'দেরই', 'দেরে',
'নিকর', 'নিচয়', 'নের', 'পানা', 'পানি', 'পাল', 'পুঞ্জ', 'পের', 'বৎ', 'বর্গ', 'বীনি', 'বৃন্দ',
'মণ্ডলী', 'মন্ডলি', 'মালা', 'মালার', 'মালায়', 'যূথ', 'য়', 'য়ই', 'য়ে', 'র', 'রও', 'রই', 'রা', 'রাই',
'রাও', 'রাওতো', 'রাজি', 'রাশি', 'সকল', 'সব', 'সমুদয়', 'সমুহ', 'সমুহের', 'সমূহ', 'সমূহের', 'সহ',
'সাৎ', 'হারা']
verb_suffix = ['াইও', 'াইছি', 'াইছে', 'াইত', 'াইতাছস', 'াইতাছে', 'াইতাম', 'াইতিস', 'াইতে', 'াইতেছ', 'াইতেছি',
'াইতেছিল', 'াইতেছিলাম', 'াইতেছিলি', 'াইতেছিলে', 'াইতেছিলেন', 'াইতেছিস', 'াইতেছে', 'াইতেছেন', 'াইতেন',
'াইনতে', 'াইনি', 'াইব', 'াইবার', 'াইবি', 'াইবে', 'াইবো', 'াইবেন', 'াইয়া', 'াইয়াছ', 'াইয়াছি',
'াইয়াছিল', 'াইয়াছিলাম', 'াইয়াছিলি', 'াইয়াছিলে', 'াইয়াছিলেন', 'াইয়াছিস', 'াইয়াছে', 'াইয়াছেন', 'াইয়ো',
'াইল', 'াইলা', 'াইলাম', 'াইলি', 'াইলে', 'াইলেন', 'াইস', 'াক', 'াচ্ছ', 'াচ্ছি', 'াচ্ছিল', 'াচ্ছিলাম',
'াচ্ছিলি', 'াচ্ছিলুম', 'াচ্ছিলে', 'াচ্ছিলেন', 'াচ্ছিলেম', 'াচ্ছিস', 'াচ্ছে', 'াচ্ছেন', 'াছিস', 'াছে',
'াছেন', 'াতাম', 'াতি', 'াতিস', 'াতুম', 'াতেই', 'াতেও', 'াতেছ', 'াতেন', 'াতেম', 'াননি', 'ানো',
'ানোর', 'ানো', 'ানোই', 'ানোও', 'ানোটা', 'ানোতে', 'ানোতেই', 'ানোয়', 'ানোর', 'ানোরই', 'ানোরও',
'ানোসহ', 'াব', 'াবানি', 'াবার', 'াবি', 'াবে', 'াবো', 'াবেও', 'াবেন', 'াবেনই', 'াবো', 'ায়ও', 'ায়নি',
'ায়ে', 'ায়েছিলাম', 'ায়েছে', 'ায়ো', 'ালাম', 'ালু', 'ালুম', 'ালেই', 'ালেও', 'ালেন', 'ালেনও', 'ালেম',
'িআছিল', 'িছিলে', 'িতাম', 'িতিস', 'িতে', 'িতেও', 'িতেছ', 'িতেছি', 'িতেছিল', 'িতেছিলাম', 'িতেছিলি',
'িতেছিলে', 'িতেছিলেন', 'িতেছিস', 'িতেছেন', 'িতেন', 'িন', 'িনী', 'িবা', 'িবে', 'িবেন', 'িলুম', 'িলেম',
'িয়াছ', 'িয়াছি', 'িয়াছিলাম', 'িয়াছিলি', 'িয়াছিলে', 'িয়াছিলেন', 'িয়াছিস', 'িয়াছে', 'িয়াছেন', 'িয়েছি',
'িয়েছিল', 'িয়েছিলি', 'িয়েছিলে', 'িয়েছিলেন', 'িয়েছিস', 'িয়েছে', 'িয়েছেন', 'িয়েছো', 'িল', 'িলাম',
'িলি', 'িলে', 'িলেন', 'েছ', 'েছি', 'েছিল', 'েছিলাম', 'েছিলি', 'েছিলে', 'েছিলেন', 'েছিস', 'েছে',
'েছেন', 'েতে', 'েন', 'েননি', 'ইছিলাম', 'ইছে', 'ইত', 'ইবে', 'ইয়', 'ইয়া', 'ইস', 'চ্ছিল', 'চ্ছিলাম',
'চ্ছিলি', 'চ্ছিলে', 'চ্ছিলেন', 'চ্ছিস', 'চ্ছে', 'চ্ছেন', 'ছ', 'ছিল', 'ছিলাম', 'ছিলি', 'ছিলুম',
'ছিলেম', 'ছিস', 'ছে', 'ছেই', 'ছেও', 'ছেন', 'ছেনই', 'ছেনও', 'ছেনটা', 'ছেননি', 'ছো', 'তাছি', 'তাছে',
'তাছেন', 'তাম', 'তিছি', 'তিছেন', 'তিস', 'তুত', 'তুম', 'তেছি', 'তেছিলাম', 'তেছেন', 'তেন', 'তেনা',
'তেম', 'নোর', 'নোয়', 'বে', 'বেই', 'বেন', 'বেনই', 'য়েছিলাম', 'লাম', 'লামই', 'লে', 'লেই', 'লেও', 'লেন',
'লেনই', 'লেম', 'লো']
adjective_suffix = ['তাত্ত্বিক', 'শ্রেষ্ঠ', 'ভিত্তিক', 'গ্রস্ত', 'মণ্ডিত', 'গ্রস্থ',
'াত্মক', 'পূর্ণ', 'বিহীন', 'যোগ্য',
'পন্থী', 'বন্দী', 'যুক্ত', 'চালিত', 'পীড়িত', 'বর্তী', 'মুক্ত', 'মূলক', 'দায়ক', 'কারক', 'জনিত',
'বদ্ধ', 'কারী', 'শালী', 'ঘটিত', 'বাদী', 'বদ্ধ', 'প্রদ', 'সূচক', 'বাচক', 'াঙ্গ',
'বহুল', 'হীন', 'কৃত', 'ময়ী', 'বতী', 'শীল', 'মান', 'িত', 'ীন',
'কর', 'িক', 'তর', 'ীয়', 'তম', 'গত', 'ী']
pronouns = {
"আমি": ["আমি", 'আমার', 'আমাদের', 'আমাদেরকে', 'আমাকে', 'আমাকেও', 'আমায়', 'আমাতে', 'আমরা'],
"তুমি": ["তুমি", 'তুই', 'তোমার', 'তোমাদের', 'তোমাকে', 'তোমাদেরকে', 'তোমাতে',
'তোমাকেও', 'তোমারে', 'তোর', 'তোমায়', 'তোমরা'],
"আপনি": ["আপনি", 'আপনার', 'আপনাদের', 'আপনাকে', 'আপনারে', 'আপনাকেও', 'আপনারা'],
"এরা": ["এরা", 'একে', 'এদেরকে', 'এদের', 'এদেরকেও'],
"এটি": ["এটি", 'এটা', 'এটির', 'এটার', 'এটাকে', 'এটিকে', 'এটাতে', 'এটিতে', 'এটিও', 'এটাও', 'এটিই'],
"ওরা": ["ওরা", 'ওদের', 'ওদেরকে', 'ওদেরকেও'],
"ওটি": ["ওটি", 'ওটা', 'ওটির', 'ওটার', 'ওটাকে', 'ওটাকেও', 'ওটিকে', 'ওটাতে', 'ওটিতে', 'ওটিও', 'ওটাও'],
"সে": ['তার', 'তাকে', 'সে', "তিনি"],
"তারা": ["তারা", 'তাদের', 'তাদেরকে', 'তাদেরকেও'],
"সেটি": ['সেটা', 'সেটির', 'সেটার', 'সেটাকে', 'সেটিকে', 'সেটাতে', 'সেটিতে', 'সেটি'],
"সেগুলো": ['সেগুলোতে', 'সেগুলাতে', 'সেগুলোর', 'সেগুলার', 'সেগুলো']
}
def features(sentence: list, index: int):
"""
:author: Saimon
:param sentence: takes a sentence in the form of a list
:param index: index of word in a sentence
:return: A dictionary of the following items:
word: str,
next word: str,
previous word: str,
prefix_1: str,
prefix_2: str,
prefix_3: str,
prefix_4: str
suffix_1: str,
suffix_2: str,
suffix_3: str,
suffix_4: str
has-hyphen: boolean,
is_numeric: boolean,
has_noun_suffix: boolean
has_verb_suffix: boolean
has_adjective_suffix: boolean
"""
return {
'word': sentence[index],
'prefix_1': sentence[index][0],
'prefix_2': sentence[index][:2],
'prefix_3': sentence[index][:3],
'prefix_4': sentence[index][:4],
'suffix_1': sentence[index][-1],
'suffix_2': sentence[index][-2:],
'suffix_3': sentence[index][-3:],
'suffix_4': sentence[index][-4:],
'prev_word': '' if index == 0 else sentence[index - 1],
'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],
'has_hyphen': '-' in sentence[index],
'is_numeric': sentence[index].isdigit(),
'has_noun_suffix': True if any([True for suf in noun_suffix if sentence[index].endswith(suf)]) else False,
'has_adjective_suffix': True if any([True for suf in adjective_suffix if sentence[index].endswith(suf)])
else False,
'has_verb_suffix': True if any([True for suf in verb_suffix if sentence[index].endswith(suf)]) else False,
}
| 1.585938
| 2
|
basic_structure.py
|
spaceghst007/astr-119-session-2
| 0
|
12776466
|
import library as alias #imports functions for
#us to use
def main(): #defines the main func
#do some stuff
#rest of the profram continues from here
#if the main() function exists, fun it
if __name__== "__main__":
main()
#you can do other stuff down here
| 2.34375
| 2
|
Fluid/io/fluid-cloudnative/module/alluxio_runtime_spec.py
|
Rui-Tang/fluid-client-python
| 1
|
12776467
|
<filename>Fluid/io/fluid-cloudnative/module/alluxio_runtime_spec.py
# coding: utf-8
"""
fluid
client for fluid # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from Fluid.io.fluid-cloudnative.module.alluxio_comp_template_spec import AlluxioCompTemplateSpec # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.alluxio_fuse_spec import AlluxioFuseSpec # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.data import Data # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.init_users_spec import InitUsersSpec # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.tieredstore import Tieredstore # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.user import User # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.version_spec import VersionSpec # noqa: F401,E501
class AlluxioRuntimeSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alluxio_version': 'VersionSpec',
'api_gateway': 'AlluxioCompTemplateSpec',
'data': 'Data',
'disable_prometheus': 'bool',
'fuse': 'AlluxioFuseSpec',
'hadoop_config': 'str',
'init_users': 'InitUsersSpec',
'job_master': 'AlluxioCompTemplateSpec',
'job_worker': 'AlluxioCompTemplateSpec',
'jvm_options': 'list[str]',
'master': 'AlluxioCompTemplateSpec',
'properties': 'dict(str, str)',
'replicas': 'int',
'run_as': 'User',
'tieredstore': 'Tieredstore',
'worker': 'AlluxioCompTemplateSpec'
}
attribute_map = {
'alluxio_version': 'alluxioVersion',
'api_gateway': 'apiGateway',
'data': 'data',
'disable_prometheus': 'disablePrometheus',
'fuse': 'fuse',
'hadoop_config': 'hadoopConfig',
'init_users': 'initUsers',
'job_master': 'jobMaster',
'job_worker': 'jobWorker',
'jvm_options': 'jvmOptions',
'master': 'master',
'properties': 'properties',
'replicas': 'replicas',
'run_as': 'runAs',
'tieredstore': 'tieredstore',
'worker': 'worker'
}
def __init__(self, alluxio_version=None, api_gateway=None, data=None, disable_prometheus=None, fuse=None, hadoop_config=None, init_users=None, job_master=None, job_worker=None, jvm_options=None, master=None, properties=None, replicas=None, run_as=None, tieredstore=None, worker=None): # noqa: E501
"""AlluxioRuntimeSpec - a model defined in Swagger""" # noqa: E501
self._alluxio_version = None
self._api_gateway = None
self._data = None
self._disable_prometheus = None
self._fuse = None
self._hadoop_config = None
self._init_users = None
self._job_master = None
self._job_worker = None
self._jvm_options = None
self._master = None
self._properties = None
self._replicas = None
self._run_as = None
self._tieredstore = None
self._worker = None
self.discriminator = None
if alluxio_version is not None:
self.alluxio_version = alluxio_version
if api_gateway is not None:
self.api_gateway = api_gateway
if data is not None:
self.data = data
if disable_prometheus is not None:
self.disable_prometheus = disable_prometheus
if fuse is not None:
self.fuse = fuse
if hadoop_config is not None:
self.hadoop_config = hadoop_config
if init_users is not None:
self.init_users = init_users
if job_master is not None:
self.job_master = job_master
if job_worker is not None:
self.job_worker = job_worker
if jvm_options is not None:
self.jvm_options = jvm_options
if master is not None:
self.master = master
if properties is not None:
self.properties = properties
if replicas is not None:
self.replicas = replicas
if run_as is not None:
self.run_as = run_as
if tieredstore is not None:
self.tieredstore = tieredstore
if worker is not None:
self.worker = worker
@property
def alluxio_version(self):
"""Gets the alluxio_version of this AlluxioRuntimeSpec. # noqa: E501
The version information that instructs fluid to orchestrate a particular version of Alluxio. # noqa: E501
:return: The alluxio_version of this AlluxioRuntimeSpec. # noqa: E501
:rtype: VersionSpec
"""
return self._alluxio_version
@alluxio_version.setter
def alluxio_version(self, alluxio_version):
"""Sets the alluxio_version of this AlluxioRuntimeSpec.
The version information that instructs fluid to orchestrate a particular version of Alluxio. # noqa: E501
:param alluxio_version: The alluxio_version of this AlluxioRuntimeSpec. # noqa: E501
:type: VersionSpec
"""
self._alluxio_version = alluxio_version
@property
def api_gateway(self):
"""Gets the api_gateway of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio API Gateway # noqa: E501
:return: The api_gateway of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioCompTemplateSpec
"""
return self._api_gateway
@api_gateway.setter
def api_gateway(self, api_gateway):
"""Sets the api_gateway of this AlluxioRuntimeSpec.
Desired state for Alluxio API Gateway # noqa: E501
:param api_gateway: The api_gateway of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioCompTemplateSpec
"""
self._api_gateway = api_gateway
@property
def data(self):
"""Gets the data of this AlluxioRuntimeSpec. # noqa: E501
Management strategies for the dataset to which the runtime is bound # noqa: E501
:return: The data of this AlluxioRuntimeSpec. # noqa: E501
:rtype: Data
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this AlluxioRuntimeSpec.
Management strategies for the dataset to which the runtime is bound # noqa: E501
:param data: The data of this AlluxioRuntimeSpec. # noqa: E501
:type: Data
"""
self._data = data
@property
def disable_prometheus(self):
"""Gets the disable_prometheus of this AlluxioRuntimeSpec. # noqa: E501
Disable monitoring for Alluxio Runtime Promethous is enabled by default # noqa: E501
:return: The disable_prometheus of this AlluxioRuntimeSpec. # noqa: E501
:rtype: bool
"""
return self._disable_prometheus
@disable_prometheus.setter
def disable_prometheus(self, disable_prometheus):
"""Sets the disable_prometheus of this AlluxioRuntimeSpec.
Disable monitoring for Alluxio Runtime Promethous is enabled by default # noqa: E501
:param disable_prometheus: The disable_prometheus of this AlluxioRuntimeSpec. # noqa: E501
:type: bool
"""
self._disable_prometheus = disable_prometheus
@property
def fuse(self):
"""Gets the fuse of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio Fuse # noqa: E501
:return: The fuse of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioFuseSpec
"""
return self._fuse
@fuse.setter
def fuse(self, fuse):
"""Sets the fuse of this AlluxioRuntimeSpec.
Desired state for Alluxio Fuse # noqa: E501
:param fuse: The fuse of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioFuseSpec
"""
self._fuse = fuse
@property
def hadoop_config(self):
"""Gets the hadoop_config of this AlluxioRuntimeSpec. # noqa: E501
Name of the configMap used to support HDFS configurations when using HDFS as Alluxio's UFS. The configMap must be in the same namespace with the AlluxioRuntime. The configMap should contain user-specific HDFS conf files in it. For now, only \"hdfs-site.xml\" and \"core-site.xml\" are supported. It must take the filename of the conf file as the key and content of the file as the value. # noqa: E501
:return: The hadoop_config of this AlluxioRuntimeSpec. # noqa: E501
:rtype: str
"""
return self._hadoop_config
@hadoop_config.setter
def hadoop_config(self, hadoop_config):
"""Sets the hadoop_config of this AlluxioRuntimeSpec.
Name of the configMap used to support HDFS configurations when using HDFS as Alluxio's UFS. The configMap must be in the same namespace with the AlluxioRuntime. The configMap should contain user-specific HDFS conf files in it. For now, only \"hdfs-site.xml\" and \"core-site.xml\" are supported. It must take the filename of the conf file as the key and content of the file as the value. # noqa: E501
:param hadoop_config: The hadoop_config of this AlluxioRuntimeSpec. # noqa: E501
:type: str
"""
self._hadoop_config = hadoop_config
@property
def init_users(self):
"""Gets the init_users of this AlluxioRuntimeSpec. # noqa: E501
The spec of init users # noqa: E501
:return: The init_users of this AlluxioRuntimeSpec. # noqa: E501
:rtype: InitUsersSpec
"""
return self._init_users
@init_users.setter
def init_users(self, init_users):
"""Sets the init_users of this AlluxioRuntimeSpec.
The spec of init users # noqa: E501
:param init_users: The init_users of this AlluxioRuntimeSpec. # noqa: E501
:type: InitUsersSpec
"""
self._init_users = init_users
@property
def job_master(self):
"""Gets the job_master of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio job master # noqa: E501
:return: The job_master of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioCompTemplateSpec
"""
return self._job_master
@job_master.setter
def job_master(self, job_master):
"""Sets the job_master of this AlluxioRuntimeSpec.
Desired state for Alluxio job master # noqa: E501
:param job_master: The job_master of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioCompTemplateSpec
"""
self._job_master = job_master
@property
def job_worker(self):
"""Gets the job_worker of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio job Worker # noqa: E501
:return: The job_worker of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioCompTemplateSpec
"""
return self._job_worker
@job_worker.setter
def job_worker(self, job_worker):
"""Sets the job_worker of this AlluxioRuntimeSpec.
Desired state for Alluxio job Worker # noqa: E501
:param job_worker: The job_worker of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioCompTemplateSpec
"""
self._job_worker = job_worker
@property
def jvm_options(self):
"""Gets the jvm_options of this AlluxioRuntimeSpec. # noqa: E501
Options for JVM # noqa: E501
:return: The jvm_options of this AlluxioRuntimeSpec. # noqa: E501
:rtype: list[str]
"""
return self._jvm_options
@jvm_options.setter
def jvm_options(self, jvm_options):
"""Sets the jvm_options of this AlluxioRuntimeSpec.
Options for JVM # noqa: E501
:param jvm_options: The jvm_options of this AlluxioRuntimeSpec. # noqa: E501
:type: list[str]
"""
self._jvm_options = jvm_options
@property
def master(self):
"""Gets the master of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio master # noqa: E501
:return: The master of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioCompTemplateSpec
"""
return self._master
@master.setter
def master(self, master):
"""Sets the master of this AlluxioRuntimeSpec.
Desired state for Alluxio master # noqa: E501
:param master: The master of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioCompTemplateSpec
"""
self._master = master
@property
def properties(self):
"""Gets the properties of this AlluxioRuntimeSpec. # noqa: E501
Configurable properties for Alluxio system. <br> Refer to <a href=\"https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html\">Alluxio Configuration Properties</a> for more info # noqa: E501
:return: The properties of this AlluxioRuntimeSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this AlluxioRuntimeSpec.
Configurable properties for Alluxio system. <br> Refer to <a href=\"https://docs.alluxio.io/os/user/stable/en/reference/Properties-List.html\">Alluxio Configuration Properties</a> for more info # noqa: E501
:param properties: The properties of this AlluxioRuntimeSpec. # noqa: E501
:type: dict(str, str)
"""
self._properties = properties
@property
def replicas(self):
"""Gets the replicas of this AlluxioRuntimeSpec. # noqa: E501
The replicas of the worker, need to be specified # noqa: E501
:return: The replicas of this AlluxioRuntimeSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this AlluxioRuntimeSpec.
The replicas of the worker, need to be specified # noqa: E501
:param replicas: The replicas of this AlluxioRuntimeSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def run_as(self):
"""Gets the run_as of this AlluxioRuntimeSpec. # noqa: E501
Manage the user to run Alluxio Runtime # noqa: E501
:return: The run_as of this AlluxioRuntimeSpec. # noqa: E501
:rtype: User
"""
return self._run_as
@run_as.setter
def run_as(self, run_as):
"""Sets the run_as of this AlluxioRuntimeSpec.
Manage the user to run Alluxio Runtime # noqa: E501
:param run_as: The run_as of this AlluxioRuntimeSpec. # noqa: E501
:type: User
"""
self._run_as = run_as
@property
def tieredstore(self):
"""Gets the tieredstore of this AlluxioRuntimeSpec. # noqa: E501
Tiered storage used by Alluxio # noqa: E501
:return: The tieredstore of this AlluxioRuntimeSpec. # noqa: E501
:rtype: Tieredstore
"""
return self._tieredstore
@tieredstore.setter
def tieredstore(self, tieredstore):
"""Sets the tieredstore of this AlluxioRuntimeSpec.
Tiered storage used by Alluxio # noqa: E501
:param tieredstore: The tieredstore of this AlluxioRuntimeSpec. # noqa: E501
:type: Tieredstore
"""
self._tieredstore = tieredstore
@property
def worker(self):
"""Gets the worker of this AlluxioRuntimeSpec. # noqa: E501
Desired state for Alluxio worker # noqa: E501
:return: The worker of this AlluxioRuntimeSpec. # noqa: E501
:rtype: AlluxioCompTemplateSpec
"""
return self._worker
@worker.setter
def worker(self, worker):
"""Sets the worker of this AlluxioRuntimeSpec.
Desired state for Alluxio worker # noqa: E501
:param worker: The worker of this AlluxioRuntimeSpec. # noqa: E501
:type: AlluxioCompTemplateSpec
"""
self._worker = worker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlluxioRuntimeSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlluxioRuntimeSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.554688
| 2
|
custom_components/bhyve/util.py
|
jdreamerz/bhyve-home-assistant
| 122
|
12776468
|
from homeassistant.util import dt
def orbit_time_to_local_time(timestamp: str):
if timestamp is not None:
return dt.as_local(dt.parse_datetime(timestamp))
return None
def anonymize(device):
device["address"] = "REDACTED"
device["full_location"] = "REDACTED"
device["location"] = "REDACTED"
return device
| 2.796875
| 3
|
Tracker/oauth_appengine.py
|
stvnrhodes/calsol
| 0
|
12776469
|
<filename>Tracker/oauth_appengine.py
#!/usr/bin/env python
# Copyright 2009 <NAME>
# Copyright 2009 Google
"""
An appengine OAuthClient based on the oauth-python reference implementation.
"""
import oauth
from google.appengine.api import urlfetch
from google.appengine.ext import db
class OAuthClient(oauth.OAuthClient):
"""A worker to attempt to execute a request (on appengine)."""
def __init__(self, oauth_consumer, oauth_token, request_token_url='',
access_token_url='', authorization_url=''):
super(OAuthClient, self).__init__(oauth_consumer, oauth_token)
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
# Using headers or payload varies by service...
response = urlfetch.fetch(
url=self.request_token_url,
method=oauth_request.http_method,
#headers=oauth_request.to_header(),
payload=oauth_request.to_postdata())
return oauth.OAuthToken.from_string(response.content)
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
response = urlfetch.fetch(
url=self.access_token_url,
method=oauth_request.http_method,
headers=oauth_request.to_header())
return oauth.OAuthToken.from_string(response.content)
def access_resource(self, oauth_request, deadline=None):
"""-> Some protected resource."""
if oauth_request.http_method == 'GET':
url = oauth_request.to_url()
return urlfetch.fetch(
url=url,
method=oauth_request.http_method)
else:
payload = oauth_request.to_postdata()
return urlfetch.fetch(
url=oauth_request.get_normalized_http_url(),
method=oauth_request.http_method,
payload=payload)
class OAuthDanceHelper(object):
def __init__(self, oauth_client):
self.oauth_client = oauth_client
def GetRequestToken(self, callback, parameters=None):
"""Gets a request token from an OAuth provider."""
request_token_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_client.get_consumer(),
token=None,
callback=callback,
http_method='POST',
http_url=self.oauth_client.request_token_url,
parameters=parameters)
# Request a token that we can use to redirect the user to an auth url.
request_token_request.sign_request(
self.oauth_client.signature_method,
self.oauth_client.get_consumer(),
None)
return self.oauth_client.fetch_request_token(request_token_request)
def GetAuthorizationRedirectUrl(self, request_token, parameters=None):
"""Gets the redirection URL for the OAuth authorization page."""
authorization_request = oauth.OAuthRequest.from_token_and_callback(
request_token,
http_method='GET',
http_url=self.oauth_client.authorization_url,
parameters=parameters)
return authorization_request.to_url()
def GetAccessToken(self, request_token, verifier):
"""Upgrades a request token to an access token."""
access_request = oauth.OAuthRequest.from_consumer_and_token(
self.oauth_client.get_consumer(),
token=request_token,
verifier=verifier,
http_url=self.oauth_client.access_token_url)
access_request.sign_request(
self.oauth_client.signature_method,
self.oauth_client.get_consumer(),
request_token)
return self.oauth_client.fetch_access_token(access_request)
| 2.9375
| 3
|
examples/push_dataset.py
|
Sage-Bionetworks/nlp-sandbox-client
| 3
|
12776470
|
"""
Example code to push a dataset into the data node. A complete
dataset includes "Dataset", "Fhir Store", "Annotation Store",
"Annotation", "Patient", "Note"
To run this code, here are the requirements:
- Install the nlpsandbox-client (`pip install nlpsandbox-client`)
- Start the Data Node locally - Follow instructions here:
https://github.com/nlpsandbox/data-node
- python push_dataset.py
"""
import json
import nlpsandbox
import nlpsandbox.apis
import nlpsandbox.models
from nlpsandbox.rest import ApiException
import nlpsandboxclient.utils
# Defining the host is optional and defaults to http://example.com/api/v1
# See configuration.py for a list of all supported configuration parameters.
host = "http://localhost:8080/api/v1"
configuration = nlpsandbox.Configuration(host=host)
dataset_id = 'test-dataset'
fhir_store_id = 'evaluation'
annotation_store_id = 'goldstandard'
json_filename = "example-patient-bundles.json"
with nlpsandbox.ApiClient(configuration) as api_client:
dataset_api = nlpsandbox.apis.DatasetApi(api_client)
fhir_store_api = nlpsandbox.apis.FhirStoreApi(api_client)
annotation_store_api = nlpsandbox.apis.AnnotationStoreApi(api_client)
patient_api = nlpsandbox.apis.PatientApi(api_client)
note_api = nlpsandbox.apis.NoteApi(api_client)
annotation_api = nlpsandbox.apis.AnnotationApi(api_client)
# The example is always deleted
try:
# get the dataset
dataset = dataset_api.get_dataset(dataset_id)
# delete the dataset
print(f"Deleting exist dataset: {dataset_id}")
dataset_api.delete_dataset(dataset_id)
except ApiException:
pass
# create dataset if not found
print(f"Creating dataset: {dataset_id}")
dataset = dataset_api.create_dataset(
dataset_id,
body={}
)
print(f"Creating Fhir Store: {fhir_store_id}")
fhir_store = fhir_store_api.create_fhir_store(
dataset_id, fhir_store_id,
body={}
)
print(f"Creating Annotation Store: {annotation_store_id}")
annotation_store = annotation_store_api.create_annotation_store(
dataset_id, annotation_store_id,
body={}
)
with open(json_filename) as f:
data = json.load(f)
patient_bundles = data['patient_bundles']
for patient_bundle in patient_bundles:
# Create or get a FHIR Patient
patient = nlpsandboxclient.utils.change_keys(
patient_bundle['patient'],
nlpsandboxclient.utils.camelcase_to_snakecase
)
patient_id = patient.pop("identifier")
print(f"Creating patient: {patient_id}")
patient_api.create_patient(
dataset_id, fhir_store_id, patient_id,
patient_create_request=patient
)
# Create the Note and Annotation objects linked to the patient
note_bundles = patient_bundle['note_bundles']
for note_bundle in note_bundles:
# Determine note Id since noteId isn't part of the 'note'
annotation = note_bundle['annotation']
note_ids = set()
# Loop through annotations to get noteId
for key, value in annotation.items():
if key.startswith("text"):
for annot in value:
note_ids.add(annot['noteId'])
assert len(note_ids) == 1, "Must only have one noteId"
note_id = list(note_ids)[0]
# Create Note
note = nlpsandboxclient.utils.change_keys(
note_bundle['note'],
nlpsandboxclient.utils.camelcase_to_snakecase
)
note['patient_id'] = patient_id
print(f"Creating note ({note_id}) for patient ({patient_id})")
note_api.create_note(
dataset_id, fhir_store_id, note_id,
note_create_request=note
)
# Create annotation
annotation['annotationSource']['resourceSource']['name'] = \
"{fhir_store_name}/fhir/Note/{note_id}".format(
fhir_store_name=fhir_store.name,
note_id=note_id
)
new_annotation = nlpsandboxclient.utils.change_keys(
annotation,
nlpsandboxclient.utils.camelcase_to_snakecase
)
print(f"Creating annotation for note: {note_id}")
annotation = annotation_api.create_annotation(
dataset_id, annotation_store_id,
annotation_id=note_id,
annotation_create_request=new_annotation
)
| 2.828125
| 3
|
alerter/test/channels_manager/channels/test_pagerduty.py
|
SimplyVC/panic
| 41
|
12776471
|
<filename>alerter/test/channels_manager/channels/test_pagerduty.py
import logging
import unittest
from unittest import mock
from src.alerter.alerts.system_alerts import (
OpenFileDescriptorsIncreasedAboveThresholdAlert)
from src.channels_manager.apis.pagerduty_api import PagerDutyApi
from src.channels_manager.channels import PagerDutyChannel
from src.utils.data import RequestStatus
from src.utils.types import PagerDutySeverities
class TestPagerDutyChannel(unittest.TestCase):
def setUp(self) -> None:
self.test_channel_name = 'test_pagerduty_channel'
self.test_channel_id = 'test_pagerduty_id12345'
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.test_integration_key = 'test_integration_key'
self.test_pagerduty_api = PagerDutyApi(self.test_integration_key)
self.test_pagerduty_channel = PagerDutyChannel(self.test_channel_name,
self.test_channel_id,
self.dummy_logger,
self.test_pagerduty_api)
self.test_system_name = 'test_system'
self.test_percentage_usage = 50
self.test_panic_severity = 'WARNING'
self.test_last_monitored = 45
self.test_parent_id = 'parent_1234'
self.test_system_id = 'system_id32423'
self.test_alert = OpenFileDescriptorsIncreasedAboveThresholdAlert(
self.test_system_name, self.test_percentage_usage,
self.test_panic_severity, self.test_last_monitored,
self.test_panic_severity, self.test_parent_id, self.test_system_id
)
def tearDown(self) -> None:
self.dummy_logger = None
self.test_pagerduty_api = None
self.test_pagerduty_channel = None
self.test_alert = None
def test__str__returns_channel_name(self) -> None:
self.assertEqual(self.test_channel_name,
str(self.test_pagerduty_channel))
def test_channel_name_returns_channel_name(self) -> None:
self.assertEqual(self.test_channel_name,
self.test_pagerduty_channel.channel_name)
def test_channel_id_returns_channel_id(self) -> None:
self.assertEqual(self.test_channel_id,
self.test_pagerduty_channel.channel_id)
def test_logger_returns_logger(self) -> None:
self.assertEqual(self.dummy_logger, self.test_pagerduty_channel.logger)
def test__init__initialised_pagerduty_api_correctly(self) -> None:
self.assertEqual(self.test_pagerduty_api.__dict__,
self.test_pagerduty_channel._pager_duty_api.__dict__)
@mock.patch.object(PagerDutyApi, "trigger")
def test_alert_triggers_an_alert_correctly(self, mock_trigger) -> None:
# In this test we will check that PagerDutyApi.trigger() is called
# with the correct parameters.
self.test_pagerduty_channel.alert(self.test_alert)
mock_trigger.assert_called_once_with(
self.test_alert.message, PagerDutySeverities.WARNING,
self.test_alert.origin_id, self.test_alert.timestamp)
@mock.patch.object(PagerDutyApi, "trigger")
def test_alert_returns_success_if_trigger_request_successful(
self, mock_trigger) -> None:
mock_trigger.return_value = None
actual_ret = self.test_pagerduty_channel.alert(self.test_alert)
self.assertEqual(RequestStatus.SUCCESS, actual_ret)
@mock.patch.object(PagerDutyApi, "trigger")
def test_alert_returns_failed_if_trigger_request_unsuccessful(
self, mock_trigger) -> None:
mock_trigger.side_effect = Exception('test')
actual_ret = self.test_pagerduty_channel.alert(self.test_alert)
self.assertEqual(RequestStatus.FAILED, actual_ret)
| 2.171875
| 2
|
utool/util_sqlite.py
|
Erotemic/utool
| 8
|
12776472
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from utool import util_inject
import six
import collections
print, rrr, profile = util_inject.inject2(__name__)
def get_tablenames(cur):
""" Conveinience: """
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
tablename_list_ = cur.fetchall()
tablename_list = [str(tablename[0]) for tablename in tablename_list_ ]
return tablename_list
SQLColumnRichInfo = collections.namedtuple('SQLColumnRichInfo', ('column_id', 'name', 'type_', 'notnull', 'dflt_value', 'pk'))
def get_table_columns(cur, tablename, exclude_columns=[]):
import utool as ut
colnames_ = ut.get_table_columnname_list(cur, tablename)
colnames = tuple([colname for colname in colnames_ if colname not in exclude_columns])
row_list = ut.get_table_rows(cur, tablename, colnames, unpack=False)
column_list = zip(*row_list)
return column_list
def get_table_csv(cur, tablename, exclude_columns=[]):
""" Conveinience: Converts a tablename to csv format
Args:
tablename (str):
exclude_columns (list):
Returns:
str: csv_table
CommandLine:
python -m ibeis.control.SQLDatabaseControl --test-get_table_csv
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.control.SQLDatabaseControl import * # NOQA
>>> # build test data
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> db = ibs.db
>>> tablename = ibeis.const.NAME_TABLE
>>> exclude_columns = []
>>> # execute function
>>> csv_table = db.get_table_csv(tablename, exclude_columns)
>>> # verify results
>>> result = str(csv_table)
>>> print(result)
"""
import utool as ut
colnames_ = ut.get_table_columnname_list(cur, tablename)
colnames = tuple([colname for colname in colnames_ if colname not in exclude_columns])
row_list = ut.get_table_rows(cur, tablename, colnames, unpack=False)
column_list = zip(*row_list)
#=None, column_list=[], header='', column_type=None
#import utool as ut
#column_list, column_names = db.get_table_column_data(tablename, exclude_columns)
# remove column prefix for more compact csvs
column_lbls = [name.replace(tablename[:-1] + '_', '') for name in colnames]
#header = db.get_table_csv_header(tablename)
header = ''
csv_table = ut.make_csv_table(column_list, column_lbls, header)
return csv_table
def get_table_columnname_list(cur, tablename):
colinfo_list_ = get_table_columninfo_list(cur, tablename)
return [info[1] for info in colinfo_list_]
def get_table_columninfo_list(cur, tablename):
"""
Args:
tablename (str): table name
Returns:
column_list : list of tuples with format:
(
[0] column_id : id of the column
[1] name : the name of the column
[2] type_ : the type of the column (TEXT, INT, etc...)
[3] notnull : 0 or 1 if the column can contains null values
[4] dflt_value : the default value
[5] pk : 0 or 1 if the column partecipate to the primary key
)
References:
http://stackoverflow.com/questions/17717829/how-to-get-column-names-from-a-table-in-sqlite-via-pragma-net-c
CommandLine:
python -m utool.util_sqlite --test-get_table_columninfo_list
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sqlite import * # NOQA
"""
cur.execute('PRAGMA TABLE_INFO("{tablename}")'.format(tablename=tablename))
colinfo_list = cur.fetchall()
colrichinfo_list = [SQLColumnRichInfo(*colinfo) for colinfo in colinfo_list]
return colrichinfo_list
def get_primary_columninfo(cur, tablename):
colinfo_list_ = get_table_columninfo_list(cur, tablename)
colinfo_list = [colinfo for colinfo in colinfo_list_ if colinfo.pk]
return colinfo_list
def get_nonprimary_columninfo(cur, tablename):
colinfo_list_ = get_table_columninfo_list(cur, tablename)
colinfo_list = [colinfo for colinfo in colinfo_list_ if not colinfo.pk]
return colinfo_list
def get_table_num_rows(cur, tablename):
cur.execute('SELECT COUNT(*) FROM {tablename}'.format(tablename=tablename))
num_rows = cur.fetchall()[0][0]
return num_rows
def get_table_column(cur, tablename, colname):
""" Conveinience: """
return get_table_rows(cur, tablename, colname)
def get_table_rows(cur, tablename, colnames, where=None, params=None, unpack=True):
import utool as ut
want_single_column = isinstance(colnames, six.string_types)
want_single_param = params is not None and not ut.isiterable(params)
#isinstance(params, six.string_types)
if want_single_column:
colnames = (colnames,)
if colnames is not None and colnames != '*':
assert isinstance(colnames, tuple), 'colnames must be a tuple'
colnames_str = ', '.join(colnames)
else:
colnames_str = '*'
#if isinstance(colnames, six.string_types):
# colnames = (colnames,)
fmtdict = {
'tablename' : tablename,
'colnames' : colnames_str,
'orderby' : '',
}
#ORDER BY rowid ASC
if where is None:
operation_fmt = '''
SELECT {colnames}
FROM {tablename}
{orderby}
'''
else:
fmtdict['where_clause'] = where
operation_fmt = '''
SELECT {colnames}
FROM {tablename}
WHERE {where_clause}
{orderby}
'''
operation_str = operation_fmt.format(**fmtdict)
if params is None:
cur.execute(operation_str)
val_list = cur.fetchall()
elif want_single_param:
cur.execute(operation_str, (params,))
val_list = cur.fetchall()
else:
# Execute many
def executemany_scalar_generator(operation_str, params):
for param in params:
cur.execute(operation_str, param)
vals = cur.fetchall()
#assert len(vals) == 1, 'vals=%r, len(vals)=%r' % (vals, len(vals))
yield vals
val_list = list(executemany_scalar_generator(operation_str, params))
if unpack:
if want_single_column:
# want a single value per parameter
val_list = [val[0] for val in val_list]
if want_single_param:
# wants a single parameter
assert len(val_list) == 1
val_list = val_list[0]
return val_list
def print_database_structure(cur):
import utool as ut
tablename_list = ut.get_tablenames(cur)
colinfos_list = [ut.get_table_columninfo_list(cur, tablename) for tablename in tablename_list]
numrows_list = [ut.get_table_num_rows(cur, tablename) for tablename in tablename_list]
for tablename, colinfo_list, num_rows in ut.sortedby(list(zip(tablename_list, colinfos_list, numrows_list)), numrows_list):
print('+-------------')
print('tablename = %r' % (tablename,))
print('num_rows = %r' % (num_rows,))
#print(ut.repr4(colinfo_list))
print(ut.repr4(ut.get_primary_columninfo(cur, tablename)))
print(ut.repr4(ut.get_nonprimary_columninfo(cur, tablename)))
print('+-------------')
| 2.90625
| 3
|
mowl/projection/catont/model.py
|
bio-ontology-research-group/OntoML
| 0
|
12776473
|
from org.mowl.CatParser import CatParser
import sys
from mowl.graph.graph import GraphGenModel
class CatOnt(GraphGenModel):
def __init__(self, dataset, subclass = True, relations = False):
super().__init__(dataset)
self.parser = CatParser(dataset.ontology)
def parseOWL(self):
edges = self.parser.parse()
return edges
| 2.484375
| 2
|
cx_Freeze/initscripts/SharedLib.py
|
TechnicalPirate/cx_Freeze
| 358
|
12776474
|
"""
Initialization script for cx_Freeze which behaves similarly to the one for
console based applications but must handle the case where Python has already
been initialized and another DLL of this kind has been loaded. As such it
does not block the path unless sys.frozen is not already set.
"""
import sys
if not hasattr(sys, "frozen"):
sys.frozen = True
sys.path = sys.path[:4]
def run():
pass
| 2.453125
| 2
|
src/evaluating_rewards/scripts/pipeline/train_experts.py
|
HumanCompatibleAI/evaluating_rewards
| 42
|
12776475
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line script to train expert policies.
Picks best seed of train_rl for each (environment, reward) pair specified.
"""
import math
import os
from typing import Any, Mapping, Optional
from imitation.util import util
import numpy as np
import ray
import sacred
import tabulate
from evaluating_rewards import serialize
from evaluating_rewards.experiments import env_rewards
from evaluating_rewards.scripts import rl_common, script_utils
experts_ex = sacred.Experiment("train_experts")
rl_common.make_config(experts_ex)
@experts_ex.config
def default_config():
"""Default configuration."""
log_root = serialize.get_output_dir() # where results are written to
configs = {}
run_tag = "default"
_ = locals()
del _
@experts_ex.config
def default_env_rewards(configs):
"""Set default env-reward pair in `configs` entry if it is empty.
This is needed since if we were to define it in `default_config` it would be impossible
to delete it given how Sacred dictionary merging works.
"""
if not configs:
configs = { # noqa: F401
"evaluating_rewards/PointMassLine-v0": {
"evaluating_rewards/PointMassGroundTruth-v0": {"dummy": {}}
},
}
@experts_ex.config
def logging_config(log_root, run_tag):
"""Logging configuration: timestamp plus unique UUID."""
log_dir = os.path.join(log_root, "train_experts", run_tag, util.make_unique_timestamp())
_ = locals()
del _
def _make_ground_truth_configs():
"""Ground truth configs.
Separate function to avoid polluting Sacred ConfigScope with local variables."""
configs = {}
for env, gt_reward in env_rewards.GROUND_TRUTH_REWARDS_BY_ENV.items():
cfg = rl_common.CONFIG_BY_ENV.get(env, {})
configs.setdefault(env, {}).setdefault(str(gt_reward), {})["dummy"] = cfg
return configs
@experts_ex.named_config
def ground_truth():
"""Train RL expert on all configured environments with the ground-truth reward."""
configs = _make_ground_truth_configs()
run_tag = "ground_truth"
_ = locals()
del _
@experts_ex.named_config
def point_maze_wrong_target():
"""Train RL policies on a "wrong" reward in PointMaze to get a bad visitation distribution."""
configs = {
env: {
"evaluating_rewards/PointMazeWrongTargetWithCtrl-v0": {
"dummy": dict(rl_common.CONFIG_BY_ENV[env])
}
}
for env in ("imitation/PointMazeLeftVel-v0", "imitation/PointMazeRightVel-v0")
}
run_tag = "point_maze_wrong_target"
_ = locals()
del _
@experts_ex.named_config
def test():
"""Unit test config."""
locals().update(**rl_common.FAST_CONFIG)
configs = {
"evaluating_rewards/PointMassLine-v0": {
"evaluating_rewards/PointMassGroundTruth-v0": {"dummy": {}},
}
}
run_tag = "test"
_ = locals()
del _
def _filter_key(k: str) -> Optional[str]:
"""Returns None if key k should be omitted; otherwise returns the (possibly modified) key."""
if k.startswith("return_"):
return None
elif k.endswith("_max") or k.endswith("_min"):
return None
else:
k = k.replace("monitor_return", "mr")
k = k.replace("wrapped_return", "wr")
return k
def tabulate_stats(stats: rl_common.Stats) -> str:
"""Pretty-prints the statistics in `stats` in a table."""
res = []
for (env_name, (reward_type, reward_path)), vs in stats.items():
for seed, (x, _log_dir) in enumerate(vs):
row = {
"env_name": env_name,
"reward_type": reward_type,
"reward_path": reward_path,
"seed": seed,
}
row.update(x)
filtered_row = {}
for k, v in row.items():
if k.endswith("_std"):
k = k[:-4] + "_se"
v = v / math.sqrt(row["n_traj"])
new_k = _filter_key(k)
if new_k is not None:
filtered_row[new_k] = v
res.append(filtered_row)
return tabulate.tabulate(res, headers="keys")
def select_best(stats: rl_common.Stats, log_dir: str) -> None:
"""Pick the best seed for each environment-reward pair in `stats`.
Concretely, chooses the seed with highest mean return, and:
- Adds a symlink `best` in the same directory as the seeds;
- Adds a key "best" that is `True` for the winning seed and `False` otherwise.
Note this modifies `stats` in-place.
For experiments where `reward_type` is not `None` (i.e. we are using a wrapped reward),
uses `wrapped_return_mean` for selection. Otherwise, uses `monitor_return_mean` (the
environment ground-truth return).
Args:
stats: The statistics to select the best seed from. Note this is modified in-place.
log_dir: The log directory for this experiment.
"""
for key, single_stats in stats.items():
env_name, (reward_type, reward_path) = key
return_key = "wrapped_return_mean" if reward_type else "monitor_return_mean"
threshold = env_rewards.THRESHOLDS.get(key, -np.inf)
returns = [x[return_key] for x, _log in single_stats]
best_seed = np.argmax(returns)
base_dir = os.path.join(
log_dir,
script_utils.sanitize_path(env_name),
script_utils.sanitize_path(reward_type),
script_utils.sanitize_path(reward_path),
)
# make symlink relative so it'll work even if directory structure is copied/moved
os.symlink(str(best_seed), os.path.join(base_dir, "best"))
for v, _log in single_stats:
v["pass"] = v[return_key] > threshold
v["best"] = False
best_v, _best_log = single_stats[best_seed]
best_v["best"] = True
if not best_v["pass"]:
print(
f"WARNING: ({env_name}, {reward_type}, {reward_path}) did not meet threshold: "
f"{best_v[return_key]} < {threshold}"
)
@experts_ex.main
def train_experts(
ray_kwargs: Mapping[str, Any],
num_cpus_fudge_factor: float,
global_configs: Mapping[str, Any],
configs: Mapping[str, Mapping[str, Mapping[str, Any]]],
log_dir: str,
) -> rl_common.Stats:
"""Entry-point into script to train expert policies specified by config.
Args:
ray_kwargs: arguments passed to `ray.init`.
num_cpus_fudge_factor: factor by which to scale `num_vec` to compute CPU requirements.
global_configs: configuration to apply to all environment-reward pairs.
configs: configuration for each environment-reward pair.
log_dir: the root directory to log experiments to.
Returns:
Statistics `stats` for all policies, where
`stats[(env_name, (reward_type, reward_path))][i]`
are the statistics for seed `i` of the given environment and reward pair.
"""
ray.init(**ray_kwargs)
try:
stats = rl_common.parallel_training(global_configs, configs, num_cpus_fudge_factor, log_dir)
select_best(stats, log_dir)
finally:
ray.shutdown()
print(tabulate_stats(stats))
return stats
if __name__ == "__main__":
script_utils.experiment_main(experts_ex, "train_experts")
| 1.96875
| 2
|
applications/AE2/controllers/default.py
|
Mexarm/web2pytests
| 0
|
12776476
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - api is an example of Hypermedia API support and access control
#########################################################################
def index():
redirect(URL('jobs'))
return
@auth.requires_membership('administradores')
def jobs():
"""
lista de jobs
"""
rows = db(db.job).select(orderby=db.job.created_on)
return locals()
@auth.requires_membership('administradores')
def job():
thisjob=db.job(request.args(0,cast=int))
tasks=db(db.scheduler_task.id.belongs( thisjob.tasks)).select() if thisjob.tasks else None
record_count=db(db.registro.job==thisjob.id).count()
return locals()
@auth.requires_membership('administradores')
def create_job():
form=SQLFORM(db.job).process()
if form.accepted:
session.flash='Job Guardado!'
redirect(URL('jobs'))
elif form.errors:
response.flash='Por favor revise la forma'
return locals()
@auth.requires_membership('administradores')
def upload_records():
table =request.args(0)
jobid=request.args(1,cast=int)
db[table].job.default=jobid
job = db.job(jobid)
form = FORM(INPUT(_type='file', _name='data'), INPUT(_type='submit'))
if form.process().accepted:
ufile = db.registro.job.store(form.vars.data.file,form.vars.data.filename)
ret = scheduler.queue_task(upload_file,pvars=dict(jobid=jobid,tablename=table,csvfile=ufile),timeout=3600)
tasks = job.tasks + [ret.id] if job.tasks else [ret.id]
db(db.job.id==jobid).update(tasks=tasks)
session.flash = 'Upload Task Created for file: ' + form.vars.data.filename
redirect(URL('job',args=jobid))
return locals()
@auth.requires_membership('administradores')
def comunicados():
jobid=request.args(0,cast=int)
__populate_comunicado_for_job(jobid)
rows = db(db.comunicacion.job==jobid).select()
rowcount = len(rows)
return locals()
@auth.requires_membership('administradores')
def mensajes():
jobid=request.args(0,cast=int)
rows = db(db.mensaje.job==jobid).select()
rowcount=len(rows)
return locals()
def asignar():
cid=request.args(0,cast=int)
#comunicacion = db(db.comunicacion.id==cid).select()[0]
comunicacion = db.comunicacion(cid)
jobid=comunicacion.job
thisjobcids=[ r.id for r in db(db.comunicacion.job==comunicacion.job).select() ]
cidprev=thisjobcids[0]
cidnext=thisjobcids[-1]
nombreprev=''
nombrenext=''
if cid - 1 in thisjobcids:
cidprev = cid-1
nombreprev = db.comunicacion[cidprev].nombre
if cid + 1 in thisjobcids:
cidnext = cid+1
nombrenext = db.comunicacion[cidnext].nombre
i = thisjobcids.index(comunicacion.id)+1
total = len (thisjobcids)
typecodes = [ '10-BIENVENIDA','20-CUERPO','30-ANIVERSARIO','40-DESPEDIDA' ]
msj = [ (q,q.split('-')[1],db((db.mensaje.job==jobid) & (db.mensaje.typecode == q)).select()) for q in typecodes ]
components=[]
defoption1 = [OPTION('(vacio)', _value='vacio')]
defoption1 += [OPTION('(condicion)', _value='condicion')]
for t in msj:
rows = t[2]
components.append(t[1])
components.append(LI(SELECT(_name=t[1],*(defoption1+[OPTION(j.descripcion, _value=str(j.id)) for j in rows]))))
components.append(INPUT(_name='expresion_' + t[1]))
components.append(XML("<br>"))
form = FORM (INPUT(_type='submit'),XML("<br><br>"),
*components,
_method='post',
_action='')
if form.accepts(request,session):
for t in msj:
ci=form.vars[t[1]]
if not (ci in ['vacio','condicion']):
db.comunicacion_y_mensaje.insert(comunicacion=comunicacion,mensaje=int(form.vars[t[1]]))
elif form.vars[t[1]] == 'condicion':
db.comunicacion_y_mensaje.insert(condicional=True,condicion=form.vars['expresion_'+t[1]],
comunicacion=comunicacion)
#db.comunicacion_y_mensaje.insert(comunicacion=comunicacion,mensaje=int(form.vars.BIENVENIDA))
#db.comunicacion_y_mensaje.insert(comunicacion=comunicacion,mensaje=int(form.vars.CUERPO))
#db.comunicacion_y_mensaje.insert(comunicacion=comunicacion,mensaje=int(form.vars.ANIVERSARIO))
#db.comunicacion_y_mensaje.insert(comunicacion=comunicacion,mensaje=int(form.vars.DESPEDIDA))
response.flash = 'guardado'
elif form.errors:
response.flash = 'Verifique los campos'
return locals()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_login()
def api():
"""
this is example of API with access control
WEB2PY provides Hypermedia API (Collection+JSON) Experimental
"""
from gluon.contrib.hypermedia import Collection
rules = {
'<tablename>': {'GET':{},'POST':{},'PUT':{},'DELETE':{}},
}
return Collection(db).process(request,response,rules)
| 2.390625
| 2
|
solutions_2018/day8.py
|
EpicWink/advent-of-code-solutions
| 0
|
12776477
|
import logging as lg
lg.basicConfig(
level=lg.DEBUG,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
datefmt="%H:%M:%S")
_logger = lg.getLogger(__name__)
class Node:
def __init__(self, children, metadata):
self.children = children
self.metadata = metadata
def sum_metadata(self):
return sum(self.metadata) + sum([c.sum_metadata() for c in self.children])
def get_value(self):
_logger.debug("Node ({} children) metadata: {}".format(len(self.children), self.metadata))
if self.children:
cs = self.children
return sum([cs[idx - 1].get_value() for idx in self.metadata if 0 <= idx - 1 < len(cs)])
else:
return sum(self.metadata)
class Parser:
_node_class = Node
def __init__(self, data):
self.data = data
self.root = None
@classmethod
def from_data_str(cls, data_str):
return cls(list(map(int, data_str.strip().split(" "))))
def _build_node(self, j):
n_children = self.data[j]
n_metadata = self.data[j + 1]
# _logger.debug("Node at {}: {} children, {} metadata".format(j, n_children, n_metadata))
children = []
k = 2
for _ in range(n_children):
child, size = self._build_node(j + k)
children.append(child)
k += size
metadata = self.data[j + k:j + k + n_metadata]
node = self._node_class(children, metadata)
return node, k + n_metadata
def parse(self):
node, size = self._build_node(0)
assert size == len(self.data)
self.root = node
with open("input_day8.txt", "r") as f:
data_str = f.read()
parser = Parser.from_data_str(data_str)
parser.parse()
print("Answer pt1:", parser.root.sum_metadata())
print("Answer pt2:", parser.root.get_value())
| 2.75
| 3
|
uasyncio_iostream/uasyncio/lock.py
|
petrkr/micropython-samples
| 1
|
12776478
|
<filename>uasyncio_iostream/uasyncio/lock.py
import uasyncio
################################################################################
# Lock (optional component)
# Lock class for primitive mutex capability
import uasyncio
class Lock(uasyncio.Primitive):
def __init__(self):
super().__init__()
self._locked = False
self._awt = None # task that is going to acquire the lock. Needed to prevent race
# condition between pushing the next waiting task and the task actually acquiring
# the lock because during that time another newly started task could acquire the
# lock out-of-order instead of being pushed to the waiting list.
# Also needed to not release another waiting Task if multiple Tasks are cancelled.
async def acquire(self):
if self._locked or self._awt:
# Lock set or just released but has tasks waiting on it,
# put the calling task on the Lock's waiting queue and yield
self.save_current()
try:
yield
except uasyncio.CancelledError:
if self._awt is uasyncio.cur_task:
# Task that was going to acquire got cancelled after being scheduled.
# Schedule next waiting task
self._locked = True
self.release()
raise
self._locked = True
return True
async def __aenter__(self):
await self.acquire()
return self
def locked(self):
return self._locked
def release(self):
if not self._locked:
raise RuntimeError("Lock is not acquired.")
self._locked = False
# Lock becomes available. If task(s) are waiting on it save task which will
self._awt = self.run_next() # get lock and schedule that task
async def __aexit__(self, *args):
return self.release()
uasyncio.Lock = Lock
| 2.984375
| 3
|
_notes/exportMdFileToHtml.py
|
ramellus/phd-notes
| 0
|
12776479
|
<gh_stars>0
import os
from shutil import copyfile, copyfileobj
import shutil
from pathlib import Path
import pathlib
import sys
import re
import html
from urllib.parse import unquote
import urllib.request
from random import seed,random,randint
if(len(sys.argv)!=2 and len(sys.argv)!=3 and len(sys.argv)!=4):
print("Wrong number of arguments!\nUsage: python3 exportMdFileToHtml.py <filename.md> <[y/n](optional) y=default => creates a html-export in export vault> <[y/n](optional) y=default => download extrernal images locally>")
quit()
mainFileToExport = ""
fileToFind = str(sys.argv[1])
for path in Path('.').rglob(fileToFind):
mainFileToExport=path
exportToHtml = True
downloadImages = True
if len(sys.argv) >= 3:
if str(sys.argv[2]).upper() == "N":
print("Exporting: " + str(mainFileToExport) + " to vault")
exportToHtml = False
if len(sys.argv) == 4 and str(sys.argv[3]).upper() == "N":
downloadImages = False
else:
print("Exporting: " + str(mainFileToExport) + " + creates a html-copy in vault")
if(mainFileToExport == ""):
print("File not found!\nRun this script from the root of obsidian vault\nUsage: python3 exportMdFileToHtml.py <filename.md> <[y/n](optional) y=default => creates a html-export in export vault>")
quit()
exportDir = os.path.expanduser('~/export_' + fileToFind.split('/')[-1].replace(".md",""))
print("Path to export vault: " + str(exportDir) + "\n")
if os.path.exists(exportDir) and os.path.isdir(exportDir):
shutil.rmtree(exportDir)
destFile = os.path.join(exportDir,mainFileToExport)
Path(os.path.dirname(destFile)).mkdir(parents=True, exist_ok=True)
assetPath = os.path.join(exportDir,"downloaded_images","test")
Path(os.path.dirname(assetPath)).mkdir(parents=True, exist_ok=True)
copyfile(mainFileToExport, destFile)
filesAllreadyCopied = [mainFileToExport]
def findRelPath(linkPath, currentFile):
#Find filepath rel currentFile a la html
pRoot = Path(".") #root
pCurr = Path(currentFile)
pLink = Path(linkPath)
pCurrRelRoot = str(pCurr.relative_to(pRoot))
pLinkRelRoot = str(pLink.relative_to(pRoot))
pLinkRelRootList = pLinkRelRoot.replace("\\","/").split("/")
for parent in pCurrRelRoot.replace("\\","/").split("/"):
if(parent == pLinkRelRootList[0]):
del pLinkRelRootList[0]
else:
pLinkRelRootList.insert(0,"..")
if(len(pLinkRelRootList)>0):
del pLinkRelRootList[0]
return '/'.join(pLinkRelRootList)
def copyFileToExport(fileToFind, currentFile, traverse=False):
linkedFilePath=""
for path in Path('.').rglob(fileToFind):
linkedFilePath=path
if(linkedFilePath != ""):
destDir = os.path.join(exportDir,linkedFilePath)
Path(os.path.dirname(destDir)).mkdir(parents=True, exist_ok=True)
copyfile(linkedFilePath, destDir)
if(traverse and linkedFilePath not in filesAllreadyCopied): #prevent circle ref
filesAllreadyCopied.append(linkedFilePath)
readFilesRecursive(linkedFilePath)
return findRelPath(linkedFilePath,currentFile)
def findMdFile(line, currentFile):
pattern = re.compile(r"(?<!!)\[\[([^\]]*)\]\]")
for (file) in re.findall(pattern, line):
fileOnly = file.split("#")[0]
ancor = ""
if(len(file.split("#"))>1):
ancor = "#" + file.split("#")[1].replace(" ","_").replace("(","").replace(")","")
newFile = copyFileToExport(fileOnly + '.md', currentFile, traverse=True)
if(exportToHtml):
if(newFile and len(newFile)>0):
line = line.replace('[[' + file + ']]','<a href="./' + newFile + ".html" + ancor + '">' + newFile.replace("\\","/").split("/")[-1].replace(".md","") + ancor + '</a>')
else: ##self ref
line = line.replace('[[' + file + ']]', '<a href="./' + fileOnly + ".md.html" + ancor + '">' + fileOnly.replace("\\","/").split("/")[-1].replace(".md","") + ancor + '</a>')
return line
seed(1)
def findImages(line, currentFile):
antalAssets = 0
pattern = re.compile(r"!\[\[([^\]]*)\]\]")
for (asset) in re.findall(pattern, line):
antalAssets += 1
img = str(copyFileToExport(asset.split("|")[0], currentFile))
if(exportToHtml):
style = 'border-radius: 4px;"'
if('|' in asset):
style = style + 'width:' + asset.split('|')[1] + 'px; border-radius: 3px;'
line = line.replace("![[" + asset + "]]", '<img src="./' + img + '" alt="' + img.replace("\\","/").split("/")[-1] + '" style="' + style + '" >')
pattern = re.compile(r"!\[(.*)\]\((.*)\)")
for size,imglink in re.findall(pattern,line):
antalAssets += 1
if(exportToHtml):
if("http" not in imglink):
originallink = imglink
imglink = str(copyFileToExport(unquote(imglink.replace("\\","/").split("/")[-1]), currentFile))
style = 'border-radius: 4px;"'
if('|' in imglink):
style = style + 'width:' + imglink.split('|')[1] + 'px; border-radius: 3px;'
line = line.replace("", '<img src="./' + imglink + '" alt="' + imglink.replace("\\","/").split("/")[-1] + '" style="' + style + '" >')
elif downloadImages:
imgname = 'utl_download_' + str(randint(0,10000)) + imglink.split("/")[-1]
destFile = os.path.join(exportDir,"downloaded_images",imgname)
with urllib.request.urlopen(imglink) as responese:
with open(destFile,'wb') as fdest:
copyfileobj(responese, fdest)
style = 'border-radius: 4px;"'
line = line.replace("", '<img src="../downloaded_images/' + imgname + '" style="' + style + '" >')
else:
style = 'border-radius: 4px;"'
line = line.replace("", '<img src="' + imglink + '" style="' + style + '" >')
return (line, antalAssets)
def findExternalLinks(line):
pattern = re.compile(r"\[([^\[]*)\]\(([^\[\s]*)\)")
for (text, link) in re.findall(pattern, line):
line = line.replace("[" + text + "](" + link + ")",'<a href="' + link + '" target="_blank">' + text + "</a>")
return line
def findLinkInText(line):
pattern = re.compile(r"((?<!(?:\"|\(|\[))https{0,1}.*?)[ \n]") #?=>un-greedy, (?<!...) = negative look behind
for (link) in re.findall(pattern, line):
line = line.replace(link,'<a href="' + link.strip() + '" target="_blank">' + link + "</a>")
return line
def findCheckboxes(line):
pattern = re.compile(r"- \[[\sx]]")
for (text) in re.findall(pattern, line):
checked = ""
if "x" in text:
checked = "checked"
line = line.replace(text,'<input type="checkbox"' + checked + "/>")
return line
def findCodeBlock(line, InCodeBlock):
if("```" in line):
if(not InCodeBlock):
line = '<pre class="codeblock"><code style="tab-size: 4;" class="' + line.split("```")[-1].replace("shell\n","sh").replace('\n',"") + ' codeblock">' + line.replace("```","")
else:
line = line.replace("```","") + '</pre></code>'
InCodeBlock = not InCodeBlock
return (line, InCodeBlock)
def findCommentBlock(line, InCommentBlock):
pattern = re.compile(r"(%%.*%%)")
for (inlineComment) in re.findall(pattern, line):
line = line.replace(inlineComment,'')
if "%%" in line:
InCommentBlock = not InCommentBlock
line = ''
return (line, InCommentBlock)
def leftMargin(line):
margin = 0
for c in line:
if(c.startswith('\t')):
margin = margin + 20
return margin
def findListItems(line):
pattern = re.compile(r"^([\t]*)[\- ](.*)")
for (tab, text) in re.findall(pattern, line):
line = '<ul style="margin-left:' + str(len(tab) * 20) + 'px;"><li>' + text.strip() + '</li></ul>\n'
return line
def findBolds(line):
pattern = re.compile(r"\*\*([^\*]*)\*\*")
for (text) in re.findall(pattern, line):
line = line.replace("**" + text + "**", '<b>' + text + '</b>')
return line
def findHeadings(line):
pattern = re.compile(r"^([\t]*)[\- ]*([#]{1,}) ([^<]{1,})")
linkHeading = re.compile(r"^([\t]*)[\- ]*([#]{1,}) (<a href.*>([^\/]*)<\/a>)(.*)")
for (tab, heading, link, text, aftertext) in re.findall(linkHeading, line):
line = '<h' + str(len(heading)) + ' style="margin-left:' + str(len(tab) * 20) + 'px;" id="' + (
(text + aftertext).strip().replace(" ","_").replace("(","").replace(")","").replace("#","_") + '">'
+ link + aftertext + '</h' + str(len(heading)) + '>\n')
for (tab, heading, text) in re.findall(pattern, line):
line = '<h' + str(len(heading)) + ' style="margin-left:' + str(len(tab) * 20) + 'px;" id="' + text.strip().replace(" ","_").replace("(","").replace(")","") + '">' + text + '</h' + str(len(heading)) + '>\n'
return line
def findInlineCodeBlocks(line):
pattern = re.compile(r"`([^`]*)`")
for (text) in re.findall(pattern, line):
line = line.replace('`' + text + '`', '<code class="inlineCoed">' + html.escape(text) + '</code>')
return line
def findInlineCodeBlockswrongly(line):
pattern = re.compile(r"```([^`]*)```")
for (text) in re.findall(pattern, line):
line = line.replace('```' + text + '```', '<code class="inlineCoed">' + html.escape(text) + '</code>')
return line
def insertParagraphs(line):
line = line.replace("\n","")
if('<h' not in line and '</pre></code>' not in line):
line = "<p>" + line + "</p>"
return line + "\n"
def findLines(line):
if '---' in line:
line = "<hr>"
return line + "\n"
def readFilesRecursive(path):
with open(path,"r",encoding='utf-8') as readfile:
data = readfile.readlines()
antalAssets = 0
if(exportToHtml):
with open(os.path.join(exportDir,str(path) + ".html"), 'w', encoding='utf-8') as outputfile:
outputfile.write("<!DOCTYPE html>\n")
outputfile.write("<html>\n")
outputfile.write("<head>\n")
outputfile.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>\n')
#code-highlighting with highlight.js:
outputfile.write('<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.3.1/styles/default.min.css">\n')
outputfile.write('<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.3.1/highlight.min.js"></script>\n')
outputfile.write('<script src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.3.1/languages/go.min.js"></script>\n')
outputfile.write('<script>hljs.initHighlightingOnLoad();</script>\n')
outputfile.write("<style>\n")
outputfile.write("\timg { max-width:900px; }\n")
outputfile.write("\t.codeblock { \n\tbackground: #B0B0B0; padding:1px 10px 0px 10px; border-radius: 5px; overflow-x:auto; \n\t}\n")
outputfile.write("\tcode {\n font-family: monospace; font-size: inherit; color: #202020; \n\t}\n")
outputfile.write("\t.inlineCoed {\n font-family: monospace; font-size: inherit; color: #202020; \n\t}\n")
outputfile.write("</style>\n")
outputfile.write("</head>\n")
outputfile.write('<body style="background: #F0F0F0;">\n')
outputfile.write('<div style="margin: 0 auto; width:1380px; position: relative;" >\n')
outputfile.write('<div style="width:1000px; padding:20px; margin:0px; z-index: 5; text-align:left; background-color: #DCDCDC; border-radius: 5px; position:absolute; top:0; left:340px;">\n')
InCodeBlock = False
InComment = False
for line in data:
if(not InCodeBlock):
line = findInlineCodeBlockswrongly(line)
(line, InCodeBlock) = findCodeBlock(line, InCodeBlock)
if(not InCodeBlock):
if InComment:
(line, InComment) = findCommentBlock(line, InComment)
line=''
else:
(line, InComment) = findCommentBlock(line, InComment)
if(InComment):
continue
line = findLines(line)
line = findMdFile(line, currentFile=path)
(line, a) = findImages(line, currentFile=path)
antalAssets += a
line = findInlineCodeBlocks(line)
line = findLinkInText(line)
line = findExternalLinks(line)
line = findCheckboxes(line)
line = findBolds(line)
line = findHeadings(line)
line = findListItems(line)
line = insertParagraphs(line)
elif("<code" not in line):
line = html.escape(line)
outputfile.write(line)
outputfile.write("</div>\n")
b = str(findRelPath(".",path))
outputfile.write('<div style="width:345px; padding-top: 20px;; position:absolute; top:0; left:0; overflow:auto;">\n')
outputfile.write('\t<iframe src="' + str(findRelPath(".",path))[:-1] + 'treeview.html" width="340px" frameBorder="0" height="900px"></iframe>\n')
outputfile.write("</div>\n")
outputfile.write("</div>\n")
outputfile.write("</body>\n")
outputfile.write("</html>\n")
else:
for line in data:
findMdFile(line, currentFile=path)
(line, a) = findImages(line, currentFile=path)
antalAssets += a
print("Exported: " + str(path) + (" (" + str(antalAssets) + " images)" if antalAssets>0 else ''))
readFilesRecursive(mainFileToExport)
if(exportToHtml):
with open(os.path.join(exportDir,"index.html"), 'w') as outputfile:
outputfile.write('<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta http-equiv="Refresh" content="0; url=\'./' + str(mainFileToExport) + '.html\'" />\n\t</head>\n</html>')
with open(os.path.join(exportDir,"treeview.html"), 'w') as outputfile:
outputfile.write("<!DOCTYPE html>\n")
outputfile.write("<html>\n")
outputfile.write("<head>\n")
outputfile.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>\n')
outputfile.write("<head>\n")
outputfile.write('<base target="_parent">\n')
outputfile.write("<style>\n")
outputfile.write('ul{ padding-left: 5px; margin-left: 15px; list-style-type: "- "; }\n')
outputfile.write(".folderClass {list-style-type: disc;}\n")
outputfile.write("</style>\n")
outputfile.write("</head>\n")
outputfile.write('<body style="background: #F0F0F0; ">\n')
filesAllreadyCopied.sort()
outputfile.write("<ul>")
for f in str(filesAllreadyCopied[0]).replace("\\","/").split("/"):
if('.md' in f):
outputfile.write('<li>' + '<a href="./' + str(filesAllreadyCopied[0]) + ".html" + '">' + str(f).replace(".md","") + '</a>' + '</li>\n')
else:
outputfile.write('<li class="folderClass">' + str(f) + "</li>")
outputfile.write("<ul>")
lastFilePath = str(filesAllreadyCopied[0]).replace("\\","/").split("/")
first = True
for currFile in filesAllreadyCopied:
if(not first):
for currFolder in str(currFile).replace("\\","/").split("/"):
if(len(lastFilePath) > 0 and currFolder == lastFilePath[0]):
del lastFilePath[0]
else:
for addedSublist in lastFilePath[:-1]: #close previous lists
outputfile.write("</ul>\n")
lastFilePath = ""
if('.md' in currFolder):
outputfile.write('<li>' + '<a href="./' + str(currFile) + ".html" + '">' + str(currFolder).replace(".md","") + '</a>' + '</li>\n')
else:
outputfile.write('<li class="folderClass">' + str(currFolder) + "</li>\n")
outputfile.write("<ul>\n")
lastFilePath = str(currFile).replace("\\","/").split("/")
first = False
for i in str(filesAllreadyCopied[-1]).replace("\\","/").split("/"):
outputfile.write("</ul>")
outputfile.write("</body>\n")
outputfile.write("</html>\n")
print("Done!\n\nPath to export: " + str(exportDir) + ("/index.html" if exportToHtml else '' ))
| 2.5625
| 3
|
api/endpoints/curso.py
|
lads-ecp/ufma-api
| 1
|
12776480
|
<gh_stars>1-10
from flask_restplus import Resource, reqparse, Api
from flask_jwt import jwt_required
from flask.json import jsonify
from restplus import api as api
from flask import request
from flask import make_response
from database.models import Curso
from database import db
from database.operations import save_to
from rdf.models import Curso as CursoRDF
from simpot import graph
ns = api.namespace('curso', description='Operations related to "curso"')
@api.representation('application/xml')
def xml(data, code, headers):
resp = make_response(data, code)
resp.headers.extend(headers)
return resp
@ns.route('/<string:codigo>')
@api.response(404, 'Subunidade not found.')
class CursoItem(Resource):
parser = reqparse.RequestParser()
for i in ["nome", "codigo", "municipio", "modalidade", "coordenador"]:
parser.add_argument(i, type=str, required=False, help='')
def get(self, codigo):
if Curso.query.filter(Curso.codigo == codigo).first():
curso = Curso.query.filter(Curso.codigo == codigo).one()
cursordf = CursoRDF(curso.codigo, curso.nome)
if request.headers['accept'] == 'application/xml':
return xml(cursordf.g.serialize().decode(), 201, {'Content-Type': 'application/xml'})
else:
return jsonify(curso.json())
return {'Message': 'Curso with the codigo {} is not found'.format(codigo)}
# @api.expect(subunidade_model)
@api.response(201, 'Curso successfully created.')
@jwt_required()
def post(self, codigo):
if Curso.query.filter(Curso.codigo == codigo).first():
return {' Message': 'Curso with the codigo {} already exists'.format(codigo)}
args = CursoItem.parser.parse_args()
item = Curso(args)
save_to(item, db)
return "ok", 201
get_arguments = reqparse.RequestParser()
get_arguments.add_argument('nome', type=str, required=False, help='Nome ou parte do nome de um curso')
get_arguments.add_argument('municipio', type=str, required=False, help='Nome ou parte do nome de um município')
@ns.route('/')
class CursoCollection (Resource):
@api.expect(get_arguments, validate=True)
def get(self):
nome = request.args.get("nome")
municipio = request.args.get("municipio")
query = Curso.query
if (nome):
query = query.filter(Curso.nome.like("%" + nome + "%"))
if (municipio):
query = query.filter(Curso.municipio.like("%" + municipio + "%"))
if request.headers['accept'] == 'application/xml':
dados_rdf = list(map(lambda curso: CursoRDF(curso.codigo, curso.nome), Curso.query.order_by(Curso.nome).all()))
grafo = graph(dados_rdf)
return xml(grafo.serialize().decode(), 201, {'Content-Type': 'application/xml'})
else:
data = list(map(lambda x: x.json(), query.order_by(Curso.nome).all()))
return jsonify({'data': data, 'length': len(data)})
| 2.515625
| 3
|
thoraxe/transcript_info/__init__.py
|
PhyloSofS-Team/exonhomology
| 6
|
12776481
|
"""
transcript_info: Module to read and manage transcript information.
It performs the first exon clustering of the pipeline.
"""
from thoraxe.transcript_info import clusters
from thoraxe.transcript_info import phases
from thoraxe.transcript_info.transcript_info import *
from thoraxe.transcript_info.exon_clustering import *
| 1.320313
| 1
|
third_party/libSBML-5.9.0-Source/src/bindings/python/test/sbml/TestL3Model.py
|
0u812/roadrunner
| 5
|
12776482
|
#
# @file TestL3Model.py
# @brief L3 Model unit tests
#
# @author <NAME> (Python conversion)
# @author <NAME>
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestL3Model.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestL3Model(unittest.TestCase):
global M
M = None
def setUp(self):
self.M = libsbml.Model(3,1)
if (self.M == None):
pass
pass
def tearDown(self):
_dummyList = [ self.M ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Model_NS(self):
self.assert_( self.M.getNamespaces() != None )
self.assert_( self.M.getNamespaces().getLength() == 1 )
self.assert_(( "http://www.sbml.org/sbml/level3/version1/core" == self.M.getNamespaces().getURI(0) ))
pass
def test_L3_Model_areaUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetAreaUnits() )
self.M.setAreaUnits(units)
self.assert_(( units == self.M.getAreaUnits() ))
self.assertEqual( True, self.M.isSetAreaUnits() )
if (self.M.getAreaUnits() == units):
pass
self.M.unsetAreaUnits()
self.assertEqual( False, self.M.isSetAreaUnits() )
if (self.M.getAreaUnits() != None):
pass
pass
def test_L3_Model_conversionFactor(self):
units = "mole";
self.assertEqual( False, self.M.isSetConversionFactor() )
self.M.setConversionFactor(units)
self.assert_(( units == self.M.getConversionFactor() ))
self.assertEqual( True, self.M.isSetConversionFactor() )
if (self.M.getConversionFactor() == units):
pass
self.M.unsetConversionFactor()
self.assertEqual( False, self.M.isSetConversionFactor() )
if (self.M.getConversionFactor() != None):
pass
pass
def test_L3_Model_create(self):
self.assert_( self.M.getTypeCode() == libsbml.SBML_MODEL )
self.assert_( self.M.getMetaId() == "" )
self.assert_( self.M.getNotes() == None )
self.assert_( self.M.getAnnotation() == None )
self.assert_( self.M.getId() == "" )
self.assert_( self.M.getName() == "" )
self.assert_( self.M.getSubstanceUnits() == "" )
self.assert_( self.M.getTimeUnits() == "" )
self.assert_( self.M.getVolumeUnits() == "" )
self.assert_( self.M.getAreaUnits() == "" )
self.assert_( self.M.getLengthUnits() == "" )
self.assert_( self.M.getConversionFactor() == "" )
self.assertEqual( False, self.M.isSetId() )
self.assertEqual( False, self.M.isSetName() )
self.assertEqual( False, self.M.isSetSubstanceUnits() )
self.assertEqual( False, self.M.isSetTimeUnits() )
self.assertEqual( False, self.M.isSetVolumeUnits() )
self.assertEqual( False, self.M.isSetAreaUnits() )
self.assertEqual( False, self.M.isSetLengthUnits() )
self.assertEqual( False, self.M.isSetConversionFactor() )
pass
def test_L3_Model_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(3,1)
sbmlns.addNamespaces(xmlns)
m = libsbml.Model(sbmlns)
self.assert_( m.getTypeCode() == libsbml.SBML_MODEL )
self.assert_( m.getMetaId() == "" )
self.assert_( m.getNotes() == None )
self.assert_( m.getAnnotation() == None )
self.assert_( m.getLevel() == 3 )
self.assert_( m.getVersion() == 1 )
self.assert_( m.getNamespaces() != None )
self.assert_( m.getNamespaces().getLength() == 2 )
self.assert_( m.getId() == "" )
self.assert_( m.getName() == "" )
self.assert_( m.getSubstanceUnits() == "" )
self.assert_( m.getTimeUnits() == "" )
self.assert_( m.getVolumeUnits() == "" )
self.assert_( m.getAreaUnits() == "" )
self.assert_( m.getLengthUnits() == "" )
self.assert_( m.getConversionFactor() == "" )
self.assertEqual( False, m.isSetId() )
self.assertEqual( False, m.isSetName() )
self.assertEqual( False, m.isSetSubstanceUnits() )
self.assertEqual( False, m.isSetTimeUnits() )
self.assertEqual( False, m.isSetVolumeUnits() )
self.assertEqual( False, m.isSetAreaUnits() )
self.assertEqual( False, m.isSetLengthUnits() )
self.assertEqual( False, m.isSetConversionFactor() )
_dummyList = [ m ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Model_extentUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetExtentUnits() )
self.M.setExtentUnits(units)
self.assert_(( units == self.M.getExtentUnits() ))
self.assertEqual( True, self.M.isSetExtentUnits() )
if (self.M.getExtentUnits() == units):
pass
self.M.unsetExtentUnits()
self.assertEqual( False, self.M.isSetExtentUnits() )
if (self.M.getExtentUnits() != None):
pass
pass
def test_L3_Model_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_Model_id(self):
id = "mitochondria";
self.assertEqual( False, self.M.isSetId() )
self.M.setId(id)
self.assert_(( id == self.M.getId() ))
self.assertEqual( True, self.M.isSetId() )
if (self.M.getId() == id):
pass
self.M.unsetId()
self.assertEqual( False, self.M.isSetId() )
if (self.M.getId() != None):
pass
pass
def test_L3_Model_lengthUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetLengthUnits() )
self.M.setLengthUnits(units)
self.assert_(( units == self.M.getLengthUnits() ))
self.assertEqual( True, self.M.isSetLengthUnits() )
if (self.M.getLengthUnits() == units):
pass
self.M.unsetLengthUnits()
self.assertEqual( False, self.M.isSetLengthUnits() )
if (self.M.getLengthUnits() != None):
pass
pass
def test_L3_Model_name(self):
name = "My_Favorite_Factory";
self.assertEqual( False, self.M.isSetName() )
self.M.setName(name)
self.assert_(( name == self.M.getName() ))
self.assertEqual( True, self.M.isSetName() )
if (self.M.getName() == name):
pass
self.M.unsetName()
self.assertEqual( False, self.M.isSetName() )
if (self.M.getName() != None):
pass
pass
def test_L3_Model_substanceUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetSubstanceUnits() )
self.M.setSubstanceUnits(units)
self.assert_(( units == self.M.getSubstanceUnits() ))
self.assertEqual( True, self.M.isSetSubstanceUnits() )
if (self.M.getSubstanceUnits() == units):
pass
self.M.unsetSubstanceUnits()
self.assertEqual( False, self.M.isSetSubstanceUnits() )
if (self.M.getSubstanceUnits() != None):
pass
pass
def test_L3_Model_timeUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetTimeUnits() )
self.M.setTimeUnits(units)
self.assert_(( units == self.M.getTimeUnits() ))
self.assertEqual( True, self.M.isSetTimeUnits() )
if (self.M.getTimeUnits() == units):
pass
self.M.unsetTimeUnits()
self.assertEqual( False, self.M.isSetTimeUnits() )
if (self.M.getTimeUnits() != None):
pass
pass
def test_L3_Model_volumeUnits(self):
units = "mole";
self.assertEqual( False, self.M.isSetVolumeUnits() )
self.M.setVolumeUnits(units)
self.assert_(( units == self.M.getVolumeUnits() ))
self.assertEqual( True, self.M.isSetVolumeUnits() )
if (self.M.getVolumeUnits() == units):
pass
self.M.unsetVolumeUnits()
self.assertEqual( False, self.M.isSetVolumeUnits() )
if (self.M.getVolumeUnits() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestL3Model))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| 1.96875
| 2
|
zaqar/storage/mongodb/__init__.py
|
vkmc/zaqar-websocket
| 1
|
12776483
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
MongoDB Storage Driver for Zaqar.
About the store
---------------
MongoDB is a nosql, eventually consistent, reliable database with support for
horizontal-scaling and capable of handling different levels of throughputs.
Supported Features
------------------
- FIFO
- Unlimited horizontal-scaling [1]_
- Reliability [2]_
.. [1] This is only possible with a sharding environment
.. [2] Write concern must be equal or higher than 2
Supported Deployments
---------------------
MongoDB can be deployed in 3 different ways. The first and most simple one is
to deploy a standalone `mongod` node. The second one is to use a Replica Sets
which gives a master-slave deployment but cannot be scaled unlimitedly. The
third and last one is a sharded cluster.
The second and third methods are the ones recommended for production
environments where durability and scalability are a must-have. The driver
itself forces operators to use such environments by checking whether it is
talking to a replica-set or sharded cluster. Such enforcement can be disabled
by running Zaqar in an unreliable mode.
Replica Sets
------------
When running on a replica-set, Zaqar won't try to be smart and it'll rely as
much as possible on the database and pymongo.
Sharded Cluster
---------------
TBD
"""
from zaqar.storage.mongodb import driver
# Hoist classes into package namespace
ControlDriver = driver.ControlDriver
DataDriver = driver.DataDriver
| 1.570313
| 2
|
backend/conf/__about__.py
|
alexhermida/pinecone
| 2
|
12776484
|
<gh_stars>1-10
__title__ = 'pinecone-backend'
__summary__ = 'Domain.'
__version__ = '0.0.1-dev'
__license__ = 'All rights reserved.'
__uri__ = 'http://vigotech.org/'
__author__ = 'VigoTech'
__email__ = '<EMAIL>'
| 0.902344
| 1
|
pythonProject/MUNDO 3/Desafio 92-Prof.py
|
lucasjlgc/Aulas-de-Python-
| 0
|
12776485
|
from datetime import datetime
dados=dict()
dados['Nome']= str(input('Nome: '))
nasc= int(input('Ano de nascimento: '))
dados['Idade']= datetime.now().year - nasc
dados['ctps'] = int(input('Digite o ctps(0 se nao tem): '))
if dados['ctps']!=0:
dados['contratação']=int(input('Ano de contratação: '))
dados['salario']=float(input('Salário:R$ '))
dados['aposentadoria']=dados['Idade']+((dados['contratação']+35)-datetime.now().year)
for k,v in dados.items():
print(f' - {k} tem o valor {v}')
| 3.5625
| 4
|
kusanagi/sdk/lib/payload/transport.py
|
kusanagi/kusanagi-sdk-python
| 1
|
12776486
|
<filename>kusanagi/sdk/lib/payload/transport.py
# Python 3 SDK for the KUSANAGI(tm) framework (http://kusanagi.io)
# Copyright (c) 2016-2021 <NAME>.L. All rights reserved.
#
# Distributed under the MIT license.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import annotations
import copy
from typing import TYPE_CHECKING
from . import Payload
from . import ns
from .utils import file_to_payload
from .utils import merge_dictionary
from .utils import param_to_payload
if TYPE_CHECKING:
from typing import Any
from typing import List
from typing import Union
from ...file import File
from ...param import Param
from .reply import ReplyPayload
class TransportPayload(Payload):
"""Handles operations on transport payloads."""
# The types are mapped to payload namespaces
TRANSACTION_COMMIT = ns.COMMIT
TRANSACTION_ROLLBACK = ns.ROLLBACK
TRANSACTION_COMPLETE = ns.COMPLETE
# Paths that can me merged from other transport payloads
MERGEABLE_PATHS = (
[ns.DATA],
[ns.RELATIONS],
[ns.LINKS],
[ns.CALLS],
[ns.TRANSACTIONS],
[ns.ERRORS],
[ns.BODY],
[ns.FILES],
[ns.META, ns.FALLBACKS],
[ns.META, ns.PROPERTIES],
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._reply = None
def set(self, path: list, value: Any, prefix: bool = True) -> bool:
ok = super().set(path, value, prefix=prefix)
if self._reply is not None:
self._reply.set([ns.TRANSPORT] + path, value, prefix=prefix)
return ok
def append(self, path: list, value: Any, prefix: bool = True) -> bool:
ok = super().append(path, value, prefix=prefix)
if self._reply is not None:
self._reply.append([ns.TRANSPORT] + path, value, prefix=prefix)
return ok
def extend(self, path: list, values: list, prefix: bool = True) -> bool:
ok = super().extend(path, values, prefix=prefix)
if self._reply is not None:
self._reply.extend([ns.TRANSPORT] + path, values, prefix=prefix)
return ok
def delete(self, path: list, prefix: bool = True) -> bool:
ok = super().delete(path, prefix=prefix)
if self._reply is not None:
self._reply.delete([ns.TRANSPORT] + path, prefix=prefix)
return ok
def merge_runtime_call_transport(self, transport: TransportPayload) -> bool:
"""
Merge a transport returned from a run-time call into the current transport.
:param transport: The transport payload to merge.
:raises: TypeError
"""
if not isinstance(transport, TransportPayload):
raise TypeError(f'Invalid type to merge into transport: {transport.__class__}')
for path in self.MERGEABLE_PATHS:
# Get the value from the other transport
src_value = transport.get(path)
if src_value is None:
continue
# Get the value from the current transport and if is not available init it as a dictionary
dest_value = self.get(path)
if dest_value is None:
dest_value = {}
# NOTE: Skip overriden set to avoid changing the reply payload
super().set(path, dest_value)
merge_dictionary(src_value, dest_value)
# Update the transport in the reply payload with the runtime transport
if self._reply is not None:
# TODO: See if we need to keep the transport updated, or we just need to update the
# transport in the reply, and only keep track of the new files and params in
# the transport payload class. Merging and deepcopying is expensive.
self._reply.set([ns.TRANSPORT], copy.deepcopy(self))
return True
def get_public_gateway_address(self) -> str:
"""Get the public Gateway address."""
return self.get([ns.META, ns.GATEWAY], ['', ''])[1]
def set_reply(self, reply: ReplyPayload) -> TransportPayload:
"""
Set the reply payload.
:param reply: The reply payload.
"""
self._reply = reply
return self
def set_download(self, file: File) -> bool:
"""
Set a file for download.
:param file: The file to use as download contents.
"""
return self.set([ns.BODY], file_to_payload(file))
def set_return(self, value: Any = None) -> bool:
"""
Set the return value.
:param value: The value to use as return value in the payload.
"""
if self._reply is not None:
return self._reply.set([ns.RETURN], value)
return False
def add_data(self, name: str, version: str, action: str, data: Union[dict, list]) -> bool:
"""
Add transport payload data.
When there is existing data in the payload it is not removed. The new data
is appended to the existing data in that case.
:param name: The name of the Service.
:param version: The version of the Service.
:param action: The name of the action.
:param data: The data to add.
"""
gateway = self.get_public_gateway_address()
return self.append([ns.DATA, gateway, name, version, action], data)
def add_relate_one(self, service: str, pk: str, remote: str, fk: str) -> bool:
"""
Add a "one-to-one" relation.
:param service: The name of the local service.
:param pk: The primary key of the local entity.
:param remote: The name of the remote service.
:param fk: The primary key of the remote entity.
"""
gateway = self.get_public_gateway_address()
return self.set([ns.RELATIONS, gateway, service, pk, gateway, remote], fk)
def add_relate_many(self, service: str, pk: str, remote: str, fks: List[str]) -> bool:
"""
Add a "one-to-many" relation.
:param service: The name of the local service.
:param pk: The primary key of the local entity.
:param remote: The name of the remote service.
:param fks: The primary keys of the remote entity.
"""
gateway = self.get_public_gateway_address()
return self.set([ns.RELATIONS, gateway, service, pk, gateway, remote], fks)
def add_relate_one_remote(self, service: str, pk: str, address: str, remote: str, fk: str) -> bool:
"""
Add a remote "one-to-one" relation.
:param service: The name of the local service.
:param pk: The primary key of the local entity.
:param address: The address of the remote Gateway.
:param remote: The name of the remote service.
:param fk: The primary key of the remote entity.
"""
gateway = self.get_public_gateway_address()
return self.set([ns.RELATIONS, gateway, service, pk, address, remote], fk)
def add_relate_many_remote(self, service: str, pk: str, address: str, remote: str, fks: List[str]) -> bool:
"""
Add a remote "one-to-one" relation.
:param service: The name of the local service.
:param pk: The primary key of the local entity.
:param address: The address of the remote Gateway.
:param remote: The name of the remote service.
:param fks: The primary keys of the remote entity.
"""
gateway = self.get_public_gateway_address()
return self.set([ns.RELATIONS, gateway, service, pk, address, remote], fks)
def add_link(self, service: str, link: str, uri: str) -> bool:
"""
Add a link.
:param service: The name of the Service.
:param link: The link name.
:param uri: The URI for the link.
"""
gateway = self.get_public_gateway_address()
return self.set([ns.LINKS, gateway, service, link], uri)
def add_transaction(
self,
type_: str,
service: str,
version: str,
action: str,
target: str,
params: List[Param] = None,
) -> bool:
"""
Add a transaction to be called when the request succeeds.
:param type_: The type of transaction.
:param service: The name of the Service.
:param version: The version of the Service.
:param action: The name of the origin action.
:param target: The name of the target action.
:param params: Optional parameters for the transaction.
:raises: ValueError
"""
if type_ not in (self.TRANSACTION_COMMIT, self.TRANSACTION_ROLLBACK, self.TRANSACTION_COMPLETE):
raise ValueError(f'Invalid transaction type value: {type_}')
transaction = {
ns.NAME: service,
ns.VERSION: version,
ns.CALLER: action,
ns.ACTION: target,
}
if params:
transaction[ns.PARAMS] = [param_to_payload(p) for p in params]
return self.append([ns.TRANSACTIONS, type_], transaction)
def add_call(
self,
service: str,
version: str,
action: str,
callee_service: str,
callee_version: str,
callee_action: str,
duration: int,
params: List[Param] = None,
files: List[File] = None,
timeout: int = None,
transport: TransportPayload = None,
) -> bool:
"""
Add a run-time call.
Current transport payload is used when the optional transport is not given.
:param service: The name of the Service.
:param version: The version of the Service.
:param action: The name of the action making the call.
:param callee_service: The called service.
:param callee_version: The called version.
:param callee_action: The called action.
:param duration: The call duration.
:param params: Optional parameters to send.
:param files: Optional files to send.
:param timeout: Optional timeout for the call.
:param transport: Optional transport payload.
:raises: ValueError
"""
# Validate duration to make sure the calls is not treated as a deferred call by the framework
if duration is None:
raise ValueError('Duration is required when adding run-time calls to transport')
call = {
ns.NAME: callee_service,
ns.VERSION: callee_version,
ns.ACTION: callee_action,
ns.CALLER: action,
ns.DURATION: duration,
}
if params:
call[ns.PARAMS] = [param_to_payload(p) for p in params]
if files:
call[ns.FILES] = [file_to_payload(f) for f in files]
if timeout is not None:
call[ns.TIMEOUT] = timeout
# When a transport is present add the call to it and then merge it into the current transport
if transport is not None:
transport.append([ns.CALLS, service, version], call)
return self.merge_runtime_call_transport(transport)
# When there is no transport just add the call to current transport
return self.append([ns.CALLS, service, version], call)
def add_defer_call(
self,
service: str,
version: str,
action: str,
callee_service: str,
callee_version: str,
callee_action: str,
params: List[Param] = None,
files: List[File] = None,
) -> bool:
"""
Add a deferred call.
:param service: The name of the Service.
:param version: The version of the Service.
:param action: The name of the action making the call.
:param callee_service: The called service.
:param callee_version: The called version.
:param callee_action: The called action.
:param params: Optional parameters to send.
:param files: Optional files to send.
"""
call = {
ns.NAME: callee_service,
ns.VERSION: callee_version,
ns.ACTION: callee_action,
ns.CALLER: action,
}
if params:
call[ns.PARAMS] = [param_to_payload(p) for p in params]
# TODO: Should the file be added to the call too ? Not in the specs.
file_payloads = [file_to_payload(f) for f in files] if files else None
if file_payloads:
call[ns.FILES] = file_payloads
# Add the call to the transport payload
ok = self.append([ns.CALLS, service, version], call)
# When there are files included in the call add them to the transport payload
if ok and file_payloads:
gateway = self.get_public_gateway_address()
self.extend([ns.FILES, gateway, callee_service, callee_version, callee_action], file_payloads)
return ok
def add_remote_call(
self,
address: str,
service: str,
version: str,
action: str,
callee_service: str,
callee_version: str,
callee_action: str,
params: List[Param] = None,
files: List[File] = None,
timeout: int = None,
) -> bool:
"""
Add a run-time call.
Current transport payload is used when the optional transport is not given.
:param address: The address of the remote Gateway.
:param service: The name of the Service.
:param version: The version of the Service.
:param action: The name of the action making the call.
:param callee_service: The called service.
:param callee_version: The called version.
:param callee_action: The called action.
:param params: Optional parameters to send.
:param files: Optional files to send.
:param timeout: Optional timeout for the call.
"""
call = {
ns.GATEWAY: address,
ns.NAME: callee_service,
ns.VERSION: callee_version,
ns.ACTION: callee_action,
ns.CALLER: action,
}
if timeout is not None:
call[ns.TIMEOUT] = timeout
if params:
call[ns.PARAMS] = [param_to_payload(p) for p in params]
# TODO: Should the file be added to the call too ? Not in the specs.
file_payloads = [file_to_payload(f) for f in files] if files else None
if file_payloads:
call[ns.FILES] = file_payloads
# Add the call to the transport payload
ok = self.append([ns.CALLS, service, version], call)
# When there are files included in the call add them to the transport payload
if ok and file_payloads:
gateway = self.get_public_gateway_address()
self.extend([ns.FILES, gateway, callee_service, callee_version, callee_action], file_payloads)
return ok
def add_error(self, service: str, version: str, message: str, code: int, status: str) -> bool:
"""
Add a Service error.
:param service: The name of the Service.
:param version: The version of the Service.
:param message: The error message.
:param code: The error code.
:param status: The status message for the protocol.
"""
gateway = self.get_public_gateway_address()
return self.append([ns.ERRORS, gateway, service, version], {
ns.MESSAGE: message,
ns.CODE: code,
ns.STATUS: status,
})
def has_calls(self, service: str, version: str) -> bool:
"""
Check if there are any type of calls registered for a Service.
:param service: The name of the Service.
:param version: The version of the Service.
"""
for call in self.get([ns.CALLS, service, version], []):
# When duration is None or there is no duration it means the call was not
# executed so is safe to assume a call that has to be executed was found.
if call.get(ns.DURATION) is None:
return True
return False
def has_files(self) -> bool:
"""Check if there are files registered in the transport."""
return self.exists([ns.FILES])
def has_transactions(self) -> bool:
"""Check if there are transactions registered in the transport."""
return self.exists([ns.TRANSACTIONS])
def has_download(self) -> bool:
"""Check if there is a file download registered in the transport."""
return self.exists([ns.BODY])
| 2.09375
| 2
|
simple_playgrounds/agents/sensors/__init__.py
|
Asjidkalam/simple-playgrounds
| 0
|
12776487
|
from .robotic_sensors import *
from .topdown_sensors import *
from .semantic_sensors import *
| 1.046875
| 1
|
evennia/utils/tests/test_evmenu.py
|
victomteng1997/evennia_default_lib
| 0
|
12776488
|
<reponame>victomteng1997/evennia_default_lib<filename>evennia/utils/tests/test_evmenu.py
"""
Unit tests for the EvMenu system
TODO: This need expansion.
"""
from django.test import TestCase
from evennia.utils import evmenu
from mock import Mock
class TestEvMenu(TestCase):
"Run the EvMenu testing."
def setUp(self):
self.caller = Mock()
self.caller.msg = Mock()
self.menu = evmenu.EvMenu(self.caller, "evennia.utils.evmenu", startnode="test_start_node",
persistent=True, cmdset_mergetype="Replace", testval="val",
testval2="val2")
def test_kwargsave(self):
self.assertTrue(hasattr(self.menu, "testval"))
self.assertTrue(hasattr(self.menu, "testval2"))
| 2.203125
| 2
|
searching/binary_search.py
|
TrungLuong1194/retrieval_systems
| 1
|
12776489
|
from utils import generate_random_sequence
def search(seq, l, r, x):
num_comparisons = 0
if r >= l:
mid = l + (r - l) // 2
num_comparisons += 1
if seq[mid] == x:
return mid, num_comparisons
elif seq[mid] > x:
res, num = search(seq, l, mid - 1, x)
num_comparisons += num
return res, num_comparisons
else:
res, num = search(seq, mid + 1, r, x)
num_comparisons += num
return res, num_comparisons
else:
return -1, num_comparisons
if __name__ == '__main__':
random_seq = generate_random_sequence(20)
print('- Random sequence: ' + str(random_seq))
sorted_seq = sorted(random_seq)
print('\n- Sorted sequence: ' + str(sorted_seq))
# Implement binary search
number_search = 534
print('\n- Search number: ' + str(number_search))
result, comps = search(sorted(sorted_seq), 0, len(random_seq) - 1, number_search)
print('Number of comparisons: ' + str(comps))
if result == -1:
print('Not found!')
else:
print('Index: ' + str(result))
number_search = 150
print('\n- Search number: ' + str(number_search))
result, comps = search(sorted(sorted_seq), 0, len(random_seq) - 1, number_search)
print('Number of comparisons: ' + str(comps))
if result == -1:
print('Not found!')
else:
print('Index: ' + str(result))
| 3.84375
| 4
|
DailyCodingProblem/25_Facebook_regular_expression.py
|
RafayAK/CodingPrep
| 5
|
12776490
|
'''
This problem was asked by Facebook.
Implement regular expression matching with the following special characters:
. (period) which matches any single character
* (asterisk) which matches zero or more of the preceding element
That is, implement a function that takes in a string and a valid regular
expression and returns whether or not the string matches the regular expression.
For example, given the regular expression "ra." and the string "ray", your function
should return true. The same regular expression on the string "raymond" should return false.
Given the regular expression ".*at" and the string "chat", your function should return true.
The same regular expression on the string "chats" should return false.
'''
memo_table = {} # store what strings are True or False
def check_regular(reg_exp, string):
if (reg_exp, string) in memo_table: # return if in memo_table
return memo_table[(reg_exp, string)]
if len(reg_exp)==0 and len(string)==0: # both stings empty
memo_table[(reg_exp, string)] = True
return memo_table[(reg_exp, string)]
# string is empty but a 'char*...' like reg expression is left, see if we can end it
if len(reg_exp) > 1 and reg_exp[1] == '*' and len(string)==0:
memo_table[(reg_exp, string)] = check_regular(reg_exp[2:], string)
return memo_table[(reg_exp, string)]
if len(reg_exp)==0 and len(string)!=0: # string is still remaining
memo_table[(reg_exp, string)] = False
return memo_table[(reg_exp, string)]
if len(reg_exp)!=0 and len(string)==0: # reg_exp is still remaining
memo_table[(reg_exp, string)] = False
return memo_table[(reg_exp, string)]
# matched the first char, store result it as bool
first_char_match = True if reg_exp[0] == string[0] or reg_exp[0] == '.' else False
if len(reg_exp) > 1 and reg_exp[1] == '*':
if first_char_match: # if True, check by matching or ignoring the 'char*'
memo_table[(reg_exp, string)] = check_regular(reg_exp[2:], string) or check_regular(reg_exp, string[1:])
else:
# ignore the 'char*', it didn't match
memo_table[(reg_exp, string)] = check_regular(reg_exp[2:], string)
elif first_char_match:
# got a match
memo_table[(reg_exp, string)] = check_regular(reg_exp[1:], string[1:])
else:
# got no match
memo_table[(reg_exp, string)] = False
return memo_table[(reg_exp, string)]
if __name__ == '__main__':
# "aab"
# "c*a*b"
#
reg_exp = ".*"
string = "aab"
print(check_regular(reg_exp,string))
| 4.4375
| 4
|
METABRIC_cBio/parse_metadata.py
|
btc36/WishBuilder
| 0
|
12776491
|
<reponame>btc36/WishBuilder<gh_stars>0
import sys, gzip
clinical_sample_filePath = sys.argv[1]
clinical_patient_filePath=sys.argv[2]
cna_filePath=sys.argv[3]
mutations_extended_filePath=sys.argv[4]
outFilePath = sys.argv[5]
#Function used to interpret data_CNA.txt values
def geneAlterationTranslator(int):
if int== '-2':
return "Homozygous deletion"
if int=='-1':
return "Heterozygous deletion"
if int =='1':
return "Low-level amplification"
if int=='2':
return "High-level amplification"
return 0;
#Ignore first 4 lines (comments)
clinicalPatientFile = open(clinical_patient_filePath, "r")
clinicalPatientFile.readline()
clinicalPatientFile.readline()
clinicalPatientFile.readline()
clinicalPatientFile.readline()
print("Processing data_clinical_patient")
#Start with data_clinical_patient, extract ALL metadata for each patient
metaheaders = clinicalPatientFile.readline().rstrip("\n").split("\t")
clinicalDict = {} #this will store metadata for data_clinical_patient.txt and data_clinical_sample.txt at least
#each patient-key corresponds to a dictionary of metadata-value pairs
for line in clinicalPatientFile:
line=line.rstrip("\n").split("\t")
clinicalDict[line[0]]={}
for header in range(1, len(metaheaders)):
clinicalDict[line[0]][metaheaders[header]]=line[header]
clinicalPatientFile.close()
#print(clinicalDict["MB-0002"]["VITAL_STATUS"])
#Same process, but for data_clinical_sample, add them to the dictionary
print("Processing data_clinical_sample")
clinicalSampleFile=open(clinical_sample_filePath, "r")
clinicalSampleFile.readline()
clinicalSampleFile.readline()
clinicalSampleFile.readline()
clinicalSampleFile.readline()
metaheaders2 = clinicalSampleFile.readline().rstrip("\n").split("\t")
for line in clinicalSampleFile:
line=line.rstrip("\n").split("\t")
sample=line[0]
if sample not in clinicalDict:
clinicalDict[sample]={}
for header in range(2, len(metaheaders2)):
clinicalDict[sample][metaheaders2[header]]=line[header]
clinicalSampleFile.close()
#print(clinicalDict["MB-0002"]["SAMPLE_TYPE"])
#print(clinicalDict["MB-0002"]["VITAL_STATUS"])
#Process data_CNA.txt
print("Processing data_CNA")
cnaFile = open(cna_filePath,"r")
cnaHeaders = cnaFile.readline().rstrip("\n").split("\t")
del cnaHeaders[1]
cnaDict = {}
for sample in range(1, len(cnaHeaders)):
cnaDict[cnaHeaders[sample]] = {}
for line in cnaFile:
line=line.rstrip("\n").split("\t")
del line[1]
metaGene= "CNA__"+line[0]
for word in range(1, len(line)):
# if line[word]!='0' and line[word]!=0:
if geneAlterationTranslator(line[word]) != 0:
cnaDict[cnaHeaders[word]][metaGene]= geneAlterationTranslator(line[word])
#print(cnaDict["MB-0008"])
#print(len(cnaDict["MB-0000"]))
#print(cnaDict["MB-0045"]["CNA__A1BG"])
print("Processing data_mutations_extended")
mutationsFile = open(mutations_extended_filePath, "r")
mutationsFile.readline();
headers = mutationsFile.readline().rstrip("\n").split("\t")
headers[39]="Protein_Variant"
variantDict = {}
#Protein Dict: Key=sample id, value is dictionary with key=gene, value is list of protein variants
proteinDict={}
sample = ""
variant = ""
for line in mutationsFile:
line = line.rstrip("\n").split("\t")
gene = line[0]
#this code gets all Variant_Classification info
if not line[16] in variantDict:
variantDict[line[16]]={}
if not gene in variantDict[line[16]]:
variantDict[line[16]][gene]=set()
variantDict[line[16]][gene].add(line[9])
#this code gets all Protein_Variant info
if not line[16] in proteinDict:
proteinDict[line[16]]={}
if not gene in proteinDict[line[16]]:
proteinDict[line[16]][gene] = []
proteinDict[line[16]][gene].append(line[39])
#End Protein_Variant code
if line[16] != sample and line[9] !=variant: #avoid duplicate variant_classification
sample = line[16]
variant = line[9]
#print(len(proteinDict["MTS-T0058"]["TP53"]))
#print(proteinDict["MTS-T0058"]["TP53"])
#print(proteinDict["MTS-T0058"])
#print(variantDict["MTS-T0058"])
mutationsFile.close()
print("Sorting sample ids")
uniqueSampleIDs = set()
for key in clinicalDict:
uniqueSampleIDs.add(key)
for key in proteinDict:
uniqueSampleIDs.add(key)
for key in variantDict:
uniqueSampleIDs.add(key)
uniqueSampleIDs = sorted(list(uniqueSampleIDs))
#output all our dictionaries to a file
print("Printing all data to file")
#outFile = open(outFilePath, "w")
with gzip.open(outFilePath, 'wb') as outFile:
outText = "Sample\tVariable\tValue\n"
outFile.write(outText.encode())
for sample in uniqueSampleIDs:
#add clinicalDict data first
if sample in clinicalDict:
for meta in clinicalDict[sample]:
outText = sample+"\t"+meta+"\t"+clinicalDict[sample][meta]+"\n"
outFile.write(outText.encode())
#add CNA data
if sample in cnaDict:
for meta in cnaDict[sample]:
#print(cnaDict[sample][meta])
#if cnaDict[sample][meta] == 0:
# print("is 0 int")
outText = sample + "\t"+meta+"\t"+ cnaDict[sample][meta]+"\n"
outFile.write(outText.encode())
#add variant classification info
if sample in variantDict:
for meta in variantDict[sample]:
for value in variantDict[sample][meta]:
outText = sample+"\t"+"SomaticMutation__"+meta+"__Variant_Classification"+"\t"+value+"\n"
outFile.write(outText.encode())
#add protein variant info
if sample in proteinDict:
for meta in proteinDict[sample]:
for value in proteinDict[sample][meta]:
outText = sample+"\t"+"SomaticMutation__"+meta+"__Protein_Variant"+"\t" +value+"\n"
outFile.write(outText.encode())
#outFile.close()
| 2.671875
| 3
|
StreamPy/examples_timed_window_wrapper.py
|
AnomalyInc/StreamPy
| 2
|
12776492
|
<reponame>AnomalyInc/StreamPy<filename>StreamPy/examples_timed_window_wrapper.py
from Stream import Stream, _no_value, _multivalue, TimeAndValue
from Operators import stream_func, stream_agent
from examples_element_wrapper import print_stream
import numpy as np
import random
############################################################
############################################################
# SECTION 1. SINGLE INPUT, SINGLE OUTPUT, STATELESS
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 1'
print 'EXAMPLES OF SINGLE INPUT, SINGLE OUTPUT, STATELESS'
print '**************************************************'
#______________________________________________________
#
# EXAMPLE 1: SINGLE INPUT, SINGLE OUTPUT, STATELESS
#______________________________________________________
print
print '--------------------------------------------------'
print 'SECTION 1. EXAMPLE 1 '
print ' SINGLE INPUT, SINGLE OUTPUT, STATELESS'
print '--------------------------------------------------'
#
# SPECIFICATION:
# Write a function that sums the values in a time-window
# in a single input stream. The elements of the input stream
# are TimeAndValue objects with a time field, and a value
# field. If x and y are elements in the stream and y follows
# x then y's timestamp is greater than x' timestamp.
# A window of length T time units includes exactly those
# elements in the stream with time stamps in the interval:
# [window_start_time : window_start_time + T].
# The window_start_time moves forward at each step by step_size
# time units; so the sequence of windows are
# [0 : T], [step_size : step_size + T],
# [2*step_size : 2*step_size + T], [3*step_size : 3*step_size + T]
# If window_size=4.0 and step_size=2.0 then the output stream
# will consist of the sum of the values with timestamps in the
# intervals [0:4], [2:6], [4:8], ...
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# Write a function on a timed list.
def sum_values_in_timed_list(timed_list):
return sum(v.value for v in timed_list)
# a is the input stream for this example
a = Stream('a timed stream')
print_stream(a)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# z is the output stream for this example.
z = stream_func(
inputs=a, # The input is a single stream
f_type='timed', # Identifes 'timed' wrapper
f=sum_values_in_timed_list, # Function that is wrapped.
num_outputs=1, # Single output stream
window_size=4.0,
step_size=2.0)
z.set_name('sum of a')
print_stream(z)
# Drive the input streams.
t=0.0
for _ in range(20):
t += random.random()
v = random.randint(0,9)
a.append(TimeAndValue(t, v))
############################################################
############################################################
# SECTION 2. SINGLE INPUT, SINGLE OUTPUT, STATEFUL
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 2'
print 'EXAMPLES OF SINGLE INPUT, SINGLE OUTPUT, STATEFUL'
print '**************************************************'
#_____________________________________________________________
# EXAMPLE 1
#_____________________________________________________________
# SPECIFICATION:
# Write a function, exponential_smoothed_timed_windows,
# that computes func(window) for each
# timed window, where func is a parameter. The agent
# returns the exponentially smoothed value of func.
# The smoothing factor, alpha, is a parameter.
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# This computation has state to which smoothing is applied
# Write a function, exponential_smoothed_list, with
# parameters: a timed list and state. This function reads
# the parameter alpha of the stream function; so encapsulate
# exponential_smoothed_list within
# exponential_smoothed_timed_windows
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
def exponential_smoothed_timed_windows(
input_stream, func, alpha,
window_size, step_size,
initial_state):
"""
Parameters
----------
input_stream: Stream
A previously defined stream
This is the only input stream of the agent.
func: function
func operates on a list of TimeAndValue objects
and returns an object that can be smoothed
exponentially.
alpha: number
The exponential smoothing parameter.
window_size, step_size, initial_state:
Already defined.
"""
def exponential_smoothed_list(timed_list, state):
next_state = ((1 - alpha)*func(timed_list) +
alpha*state)
message = next_state
return (message, next_state)
return stream_func(
inputs=input_stream, # single input timed stream
f_type='timed', # identifies 'timed' wrapper
f=exponential_smoothed_list, # function that is wrapped
num_outputs=1, # single output stream
state=initial_state,
window_size=window_size,
step_size=step_size)
print
print '--------------------------------------------------'
print 'SECTION 2. EXAMPLE 1 '
print ' SINGLE INPUT, SINGLE OUTPUT, STATEFUL'
print '--------------------------------------------------'
# b is the input stream for this example
b = Stream('b: timed stream')
print_stream(b)
# y is the output stream for this example.
y = exponential_smoothed_timed_windows(
input_stream=b,
func=sum_values_in_timed_list,
alpha=0.5,
window_size=4,
step_size=2,
initial_state=0)
y.set_name('y')
print_stream(y)
# Drive the input
t=0.0
for _ in range(12):
t += random.random()
v = random.randint(0,9)
b.append(TimeAndValue(t, v))
############################################################
############################################################
# SECTION 3. MULTIPLE INPUTS, SINGLE OUTPUT, STATELESS
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 3'
print 'EXAMPLES OF MULTIPLE INPUTS, SINGLE OUTPUT, STATELESS'
print '**************************************************'
#______________________________________________________
#
# EXAMPLE 1: TWO OR MORE INPUT STREAMS, ONE OUTPUT STREAM
# STATELESS
#______________________________________________________
# SPECIFICATION:
# Write a function that has a single parameter - a list of
# timed streams - and that returns the sum of the values of
# timed windows.
# For example, if the list consists of two timed streams, c
# and d, and:
# c = [(0.1, 100), (0.9, 200), (1.2, 500), (3.1. 800), (6.6, 300)]
# d = [(0.7, 5), (2.3, 25), (3.9, 12), (5.1, 18), (5.2, 12)]
# where for succinctness each pair is (time, value), then
# with a window size and step size of 1.0 the windows are:
# for c: [(0.1, 100), (0.9, 200)], [(1.2, 500)], [], [(3.1. 800)],
# [], []..
# for d: [(0.7, 5)], [], [(2.3, 25)], [(3.9, 12)], [], ...
# Note that we don't yet have the complete windows for the
# interval [5.0, 6.0] for d because we may get later values
# with timestamps less than 6 on stream d.
# The sums for the windows are:
# (100+200+5), (500), (25), (800+12), (),
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# Write a function with a single parameter: a list of timed lists
def sum_values_in_all_timed_lists(list_of_timed_lists):
return (sum(sum (v.value for v in timed_list)
for timed_list in list_of_timed_lists))
print
print '--------------------------------------------------'
print 'SECTION 3. EXAMPLE 1 '
print ' MULTIPLE INPUTS, SINGLE OUTPUT, STATELESS'
print '--------------------------------------------------'
# Create input streams, c and d, for this example.
c = Stream('Input: c')
d = Stream('Input: d')
print_stream(c)
print_stream(d)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# x is the output stream for this example
x = stream_func(
inputs=[c,d], # list of two input timed streams
f_type='timed', # identifies the 'timed' wrapper
f=sum_values_in_all_timed_lists, #function that is wrapped
num_outputs=1, # Single output stream
window_size=2.0,
step_size=2.0)
x.set_name('Output: x')
print_stream(x)
# Drive the input streams
t_c=0.0
t_d=0.0
for _ in range(12):
t_c += random.random()
t_d += random.random()
v_c = random.randint(0,9)
v_d = 100+random.randint(0,9)
c.append(TimeAndValue(t_c, v_c))
d.append(TimeAndValue(t_d, v_d))
#______________________________________________________
#
# EXAMPLE 2: TWO OR MORE INPUT STREAMS, ONE OUTPUT STREAM
# STATELESS
#______________________________________________________
# SPECIFICATION:
# Write a function that has a two input streams and a
# single output stream. An element on the output stream is
# the difference in lengths of the two windows (one window
# per input stream).
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP
# Write a function on a list of two lists.
def diff_of_counts_in_lists(list_of_two_lists):
return len(list_of_two_lists[0]) - len(list_of_two_lists[1])
print
print '--------------------------------------------------'
print 'SECTION 3. EXAMPLE 2 '
print ' MULTIPLE INPUTS, SINGLE OUTPUT, STATELESS'
print '--------------------------------------------------'
# Create input streams, cc and dd, for this example.
cc = Stream('cc')
dd = Stream('dd')
print_stream(cc)
print_stream(dd)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# xx is the output stream for this example
xx = stream_func(
inputs = [cc, dd], # Inputs is a list of two streams
f_type = 'timed', # Identifies wrapper as the 'timed' wrapper
f = diff_of_counts_in_lists, # Function that is wrapped
num_outputs=1, # Single output stream.
window_size=2.0,
step_size=2.0)
xx.set_name('xx')
print_stream(xx)
# Drive the input streams
t_cc=0.0
t_dd=0.0
for _ in range(10):
t_cc += random.random()
t_dd += random.random()
v_cc = random.randint(0,9)
v_dd = random.randint(0,9)
cc.append(TimeAndValue(t_cc, v_cc))
dd.append(TimeAndValue(t_dd, v_dd))
############################################################
############################################################
# SECTION 4. MULTIPLE INPUTS, SINGLE OUTPUT, STATEFUL
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 4'
print 'EXAMPLES OF MULTIPLE INPUTS, SINGLE OUTPUT, STATEFUL'
print '**************************************************'
#______________________________________________________
#
# EXAMPLE 1. TWO OR MORE INPUT STREAMS, ONE OUTPUT STREAM
# STATEFUL
#______________________________________________________
#
# SPECIFICATION:
# Write a function with a list of input streams that
# returns a stream in which element is a 2-tuple
# (max_so_far, max_of_current_window) where
# max_of_current_window is the max over all input
# streams of the sums of the values in each timed
# window, and
# max_so_far is the maximum value of max_of_current_window
# over all the windows seen thus far.
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# Write a function, max_sums_timed_windows, with two
# parameters: a list of timed lists, and a state.
# The state is the maximum value seen thus far.
# The function returns a message which is the 2-tuple
# (max_so_far, max_of_current_window), the maximum
# seen so far, and the current maximum, i.e., the
# maximum over all current windows of the sum of the
# window.
def max_sums_timed_windows(list_of_timed_lists, state):
# The state is the max seen so far.
max_so_far = state
max_of_current_window = \
max(sum(v.value for v in timed_list)
for timed_list in list_of_timed_lists)
# Update the max seen so far.
max_so_far = max(max_so_far, max_of_current_window)
message = (max_so_far, max_of_current_window)
next_state = max_so_far
return (message, next_state)
print
print '--------------------------------------------------'
print 'SECTION 4. EXAMPLE 1 '
print ' MULTIPLE INPUTS, SINGLE OUTPUT, STATEFUL'
print '--------------------------------------------------'
# Create input streams, ee and ff, for this example.
ee = Stream('ee')
ff = Stream('ff')
print_stream(ee)
print_stream(ff)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# w is the output stream of the wrapped function.
w = stream_func(
inputs=[ee, ff], # list of two input timed streams
f_type='timed', # Identifies 'timed' wrapper
f=max_sums_timed_windows, # function being wrapped
num_outputs=1, # Single output stream
state = 0.0, # Initial state
window_size=1.0,
step_size=1.0)
w.set_name('w')
print_stream(w)
# Drive the input streams
t_ee=0.0
t_ff=0.0
for _ in range(8):
t_ee += random.random()
t_ff += random.random()
v_ee = random.randint(0,9)
v_ff = random.randint(0,9)
ee.append(TimeAndValue(t_ee, v_ee))
ff.append(TimeAndValue(t_ff, v_ff))
############################################################
############################################################
# SECTION 5. SINGLE INPUT, MULTIPLE OUTPUT, STATELESS
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 5'
print 'EXAMPLES OF SINGLE INPUT, MULTIPLE OUTPUTS, STATELESS'
print '**************************************************'
#_____________________________________________________________
# EXAMPLE 1: SINGLE INPUT, TWO OR MORE OUTPUTS, STATELESS
#_____________________________________________________________
# SPECIFICATION:
# Write a function that has a single input stream and
# that returns two output streams containing the max
# the min values of windows of the input stream.
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# Write a function, max_sums_timed_windows, with two
# parameters: a list of timed lists
def max_and_min_of_values_in_timed_list(timed_list):
if timed_list:
return (max(v.value for v in timed_list),
min(v.value for v in timed_list)
)
else:
# timed_list is empty
return (None, None)
print
print '--------------------------------------------------'
print 'SECTION 5. EXAMPLE 1 '
print ' SINGLE INPUT, MULTIPLE OUTPUT, STATELESS'
print '--------------------------------------------------'
# Create input stream, g, for this example.
g = Stream('g')
print_stream(g)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# u, v are the two output streams of the wrapped function.
u, v= stream_func(
inputs=g, # Single input stream
f_type='timed', # Identifies wrapper as 'timed' wrapper.
f=max_and_min_of_values_in_timed_list, # function that is wrapped
num_outputs=2, # Two output streams
window_size=2.0,
step_size=2.0)
u.set_name('u')
v.set_name('v')
print_stream(u)
print_stream(v)
# Drive the input stream.
t_g=0.0
for _ in range(10):
t_g += random.random()
v_g = random.randint(0,9)
g.append(TimeAndValue(t_g, v_g))
############################################################
############################################################
# SECTION 6. SINGLE INPUT, MULTIPLE OUTPUT, STATEFUL
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 6'
print 'EXAMPLES OF SINGLE INPUT, MULTIPLE OUTPUTS, STATEFUL'
print '**************************************************'
#_____________________________________________________________
# SECTION 6 EXAMPLE 1: SINGLE INPUT, TWO OR MORE OUTPUTS, STATEFUL
#_____________________________________________________________
# SPECIFICATION:
# Write a function that has a single input stream and
# that returns two output streams. The elements of the
# output stream are the average of the maximum values
# of the timed windows, where the average is taken
# over all the windows seen so far, and similarly for
# the minimum.
# HOW TO DEVELOP THE STREAMING PROGRAM.
# FIRST STEP:
# Write a function, avg_of_max_and_min_in_timed_list, with two
# parameters: a timed list and a state. The function returns
# a message and a (new) state. The message is a 2-tuple
# (avg_of_max, avg_of_min), where each element of the tuple
# becomes a message in a different output stream. The state
# is (num_windows, sum_of_max, sum_of_min) where
# num_windows is the number of time steps so far for which timed_list
# is non-empty.
# sum_of_max is the sum over all time steps of the max for each step.
# sum_of_min is the sum over all time steps of the min for each step.
def avg_of_max_and_min_in_timed_list(timed_list, state):
num_windows, sum_of_max, sum_of_min = state
if timed_list:
# timed_list is nonempty
next_max = max(v.value for v in timed_list)
next_min = min(v.value for v in timed_list)
num_windows += 1
sum_of_max += next_max
sum_of_min += next_min
avg_of_max = sum_of_max/float(num_windows)
avg_of_min = sum_of_min/float(num_windows)
state = (num_windows, sum_of_max, sum_of_min)
message = (avg_of_max, avg_of_min)
return (message, state)
else:
# timed_list is empty
# So, don't change the state.
# In particular, don't increment num_windows
avg_of_max = sum_of_max/float(num_windows)
avg_of_min = sum_of_min/float(num_windows)
message = (avg_of_max, avg_of_min)
return (message, state)
print
print '--------------------------------------------------'
print 'SECTION 6. EXAMPLE 1 '
print ' SINGLE INPUT, MULTIPLE OUTPUTS, STATEFUL'
print '--------------------------------------------------'
# Create input stream, h, for this example.
h = Stream('h: Input stream')
print_stream(h)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# s_stream, t_stream are the two output streams of the wrapped function.
s_stream, t_stream = stream_func(
inputs = h, # Input is a single stream.
f_type = 'timed',
f = avg_of_max_and_min_in_timed_list, # Function that is wrapped.
num_outputs=2, # Two output streams
state = (0, 0.0, 0.0), # Initial num windows, sum max, sum min
window_size=2.0,
step_size=2.0)
s_stream.set_name('avg max')
t_stream.set_name('avg min')
print_stream(s_stream)
print_stream(t_stream)
# Drive the input stream.
t_h=0.0
for _ in range(20):
t_h += random.random()
v_h = random.randint(0,9)
h.append(TimeAndValue(t_h, v_h))
############################################################
############################################################
# SECTION 7. MULTIPLE INPUTS, MULTIPLE OUTPUT, STATELESS
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 7'
print 'EXAMPLES OF MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATELESS'
print '**************************************************'
#_____________________________________________________________
# SECTION 7 EXAMPLE 1: MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATELESS
#_____________________________________________________________
# SPECIFICATION:
# Write a function that has a single parameter, a list of timed
# streams. The function returns a list of two (untimed) streams.
# The k-th element of the first output stream is the maximum
# value across all input streams of the k-th timed window, and
# the corresponding element for the second output stream is the
# minimum value. If the k-th timed windows for all the input
# streams are empty, the k-th element of the output streams are
# both None.
# FIRST STEP:
# Write a function that has a single parameter: a list of timed lists.
# The function returns a 2-tuple: the max and the min of the values
# across all the timed lists if at least one timed list is nonempty,
# and None otherwise.
def max_and_min_values_in_all_timed_lists(list_of_timed_lists):
if any(list_of_timed_lists):
return (max(max(v.value for v in timed_list)
for timed_list in list_of_timed_lists if timed_list),
min(min(v.value for v in timed_list)
for timed_list in list_of_timed_lists if timed_list)
)
else:
return (None, None)
print
print '--------------------------------------------------'
print 'SECTION 7. EXAMPLE 1 '
print ' MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATELESS'
print '--------------------------------------------------'
# Create inputs stream, i_stream and j_stream, for this example.
i_stream = Stream('i_stream: Input stream')
j_stream = Stream('j_stream: Input stream')
# Print the streams so that you can visually check the results.
print_stream(i_stream)
print_stream(j_stream)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# q_stream, r_stream are the two output streams of the wrapped function.
q_stream, r_stream = stream_func(
inputs = [i_stream, j_stream], # list of input timed_streams
f_type = 'timed', # Identifies the 'timed' wrapper.
f = max_and_min_values_in_all_timed_lists,
num_outputs=2, # two output streams
window_size=3.0,
step_size=3.0)
q_stream.set_name('max of i_stream, j_stream timed windows')
r_stream.set_name('min of i_stream, j_stream timed windows')
print_stream(q_stream)
print_stream(r_stream)
# Drive the two input streams.
t_i=0.0
t_j=0.0
for _ in range(20):
t_i += random.random()
t_j += random.random()
v_i = random.randint(0,9)
v_j = random.randint(0,9)
i_stream.append(TimeAndValue(t_i, v_i))
j_stream.append(TimeAndValue(t_j, v_j))
############################################################
############################################################
# SECTION 8. MULTIPLE INPUTS, MULTIPLE OUTPUT, STATEFUL
############################################################
############################################################
print
print '**************************************************'
print 'SECTION 8'
print 'EXAMPLES OF MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATEFUL'
print '**************************************************'
#_____________________________________________________________
# SECTION 8 EXAMPLE 1: MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATEFUL
#_____________________________________________________________
# SPECIFICATION:
# Section 8, example 1 is to Section 7, example 1, what
# Section 6, example 1 is to Section 5, example 1. The
# outputs in this example are the AVERAGES of the max and min
# over timed windows of all input streams (whereas in the
# previous example, the outputs were the max and min values
# without averaging).
# FIRST STEP:
# Write a function that has two parameters: a list of timed lists and
# a state.
# The function returns a tuple consisting of
# (1) a 2-tuple: the max and the min of the values of the timed lists
# (2) the next state.
def avg_of_max_and_min_values_in_all_timed_lists(list_of_timed_lists, state):
num_windows, sum_of_max, sum_of_min = state
if all(list_of_timed_lists):
next_max = max(max(v.value for v in timed_list)
for timed_list in list_of_timed_lists)
next_min = min(min(v.value for v in timed_list)
for timed_list in list_of_timed_lists)
num_windows += 1
sum_of_max += next_max
sum_of_min += next_min
avg_of_max = sum_of_max/float(num_windows)
avg_of_min = sum_of_min/float(num_windows)
state = (num_windows, sum_of_max, sum_of_min)
return ([avg_of_max, avg_of_min], state)
else:
avg_of_max = sum_of_max/float(num_windows)
avg_of_min = sum_of_min/float(num_windows)
return ([avg_of_max, avg_of_min], state)
print
print '--------------------------------------------------'
print 'SECTION 8. EXAMPLE 1 '
print ' MULTIPLE INPUTS, MULTIPLE OUTPUTS, STATEFUL'
print '--------------------------------------------------'
# Create inputs stream, i_stream and j_stream, for this example.
k_stream = Stream('k_stream: Input stream')
l_stream = Stream('l_stream: Input stream')
# Print the streams so that you can visually check the results.
print_stream(k_stream)
print_stream(l_stream)
# SECOND STEP.
# Wrap the function with the 'timed' wrapper.
# o_stream, o_stream are the two output streams of the wrapped function.
o_stream, p_stream = stream_func(
inputs = [k_stream, l_stream], # list of input timed_streams
f_type = 'timed', # Identifies the 'timed' wrapper
f = avg_of_max_and_min_values_in_all_timed_lists,
num_outputs=2, # two output streams
state= (0, 0.0, 0.0), # Initial num windows, sum_max, sum_min
window_size=3.0,
step_size=3.0)
o_stream.set_name('avg of max of k_stream, l_stream timed windows')
p_stream.set_name('avg of min of k_stream, l_stream timed windows')
print_stream(o_stream)
print_stream(p_stream)
# Drive the two input streams.
t_k=0.0
t_l=0.0
for _ in range(30):
t_k += random.random()
t_l += random.random()
v_k = random.randint(0,9)
v_l = random.randint(0,9)
k_stream.append(TimeAndValue(t_k, v_k))
l_stream.append(TimeAndValue(t_l, v_l))
| 1.976563
| 2
|
tests/onegov/election_day/views/test_views_manage.py
|
politbuero-kampagnen/onegov-cloud
| 0
|
12776493
|
<gh_stars>0
from datetime import date
from lxml.html import document_fromstring
from onegov.ballot import ProporzElection
from onegov.election_day.collections import ArchivedResultCollection
from onegov.election_day.layouts import ElectionLayout
from tests.onegov.election_day.common import login
from tests.onegov.election_day.common import upload_election_compound
from tests.onegov.election_day.common import upload_majorz_election
from tests.onegov.election_day.common import upload_party_results
from tests.onegov.election_day.common import upload_proporz_election
from tests.onegov.election_day.common import upload_vote
from webtest import TestApp as Client
from tests.onegov.election_day.common import DummyRequest
def test_view_login_logout(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login = client.get('/').click('Anmelden')
login.form['username'] = '<EMAIL>'
login.form['password'] = '<PASSWORD>'
assert "Unbekannter Benutzername oder falsches Passwort" \
in login.form.submit()
assert 'Anmelden' in client.get('/')
login.form['password'] = '<PASSWORD>'
homepage = login.form.submit().follow()
assert 'Sie sind angemeldet' in homepage
assert 'Abmelden' in homepage
assert 'Anmelden' not in homepage
assert 'Anmelden' in client.get('/').click('Abmelden').follow()
def test_view_manage_elections(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/elections',
expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/elections')
assert "Noch keine Wahlen erfasst" in manage
new = manage.click('Neue Wahl')
new.form['election_de'] = 'Elect a new president'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form['mandates'] = 1
manage = new.form.submit().follow()
assert "Elect a new president" in manage
edit = manage.click('Bearbeiten')
edit.form['election_de'] = 'Elect a new federal councillor'
edit.form['absolute_majority'] = None
manage = edit.form.submit().follow()
assert "Elect a new federal councillor" in manage
assert "Elect a new federal councillor" == archive.query().one().title
delete = manage.click("Löschen")
assert "Wahl löschen" in delete
assert "Elect a new federal councillor" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Wahlen erfasst" in manage
assert archive.query().count() == 0
def test_view_manage_election_compounds(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/election-compounds',
expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/election-compounds')
assert "Noch keine Verbindungen" in manage
# Add two elections
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Elect a new parliament (Region A)'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 10
new.form.submit().follow()
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Elect a new parliament (Region B)'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 5
new.form.submit().follow()
# Add a compound
new = client.get('/manage/election-compounds').click('Neue Verbindung')
new.form['election_de'] = 'Elect a new parliament'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'canton'
new.form['elections'] = ['elect-a-new-parliament-region-a']
manage = new.form.submit().follow()
assert "Elect a new parliament" in manage
edit = manage.click('Bearbeiten')
edit.form['election_de'] = 'Elect a new cantonal parliament'
edit.form['elections'] = [
'elect-a-new-parliament-region-a',
'elect-a-new-parliament-region-b'
]
manage = edit.form.submit().follow()
assert "Elect a new cantonal parliament" in manage
assert "Elect a new cantonal parliament" in [
a.title for a in archive.query()
]
delete = manage.click("Löschen")
assert "Verbindung löschen" in delete
assert "Elect a new cantonal parliament" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Verbindungen" in manage
assert archive.query().count() == 2
def test_view_manage_votes(election_day_app):
archive = ArchivedResultCollection(election_day_app.session())
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/votes', expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/votes')
assert "Noch keine Abstimmungen erfasst" in manage
new = manage.click('Neue Abstimmung')
new.form['vote_de'] = 'Vote for a better yesterday'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
manage = new.form.submit().follow()
assert "Vote for a better yesterday" in manage
edit = manage.click('Bearbeiten')
edit.form['vote_de'] = 'Vote for a better tomorrow'
manage = edit.form.submit().follow()
assert "Vote for a better tomorrow" in manage
assert "Vote for a better tomorrow" == archive.query().one().title
delete = manage.click("Löschen")
assert "Abstimmung löschen" in delete
assert "Vote for a better tomorrow" in delete
assert "Bearbeiten" in delete.click("Abbrechen")
manage = delete.form.submit().follow()
assert "Noch keine Abstimmungen erfasst" in manage
assert archive.query().count() == 0
def test_upload_proporz_election(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
upload_proporz_election(client, canton='zg')
session = election_day_app.session_manager.session()
election = session.query(ProporzElection).one()
assert election.type == 'proporz'
request = DummyRequest(session, election_day_app)
layout = ElectionLayout(election, request, 'lists-panachage')
assert layout.visible
def test_view_clear_results(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
upload_majorz_election(client, canton='zg')
upload_proporz_election(client, canton='zg')
upload_election_compound(client, canton='zg')
upload_party_results(client)
upload_party_results(client, slug='elections/elections')
upload_vote(client)
# Test currently fails for lists / panachage because
# layout.visible is False because' self.proporz is False!?!
marker = "<h2>Resultate</h2>"
i_marker = "<h2>Zwischenergebnisse</h2>"
urls = (
'/election/majorz-election/candidates',
'/election/majorz-election/statistics',
'/election/proporz-election/lists',
'/election/proporz-election/candidates',
'/election/proporz-election/connections',
'/election/proporz-election/party-strengths',
'/election/proporz-election/parties-panachage',
# '/election/proporz-election/lists-panachage',
'/election/proporz-election/statistics',
'/elections/elections/parties-panachage',
'/elections/elections/party-strengths',
'/vote/vote/entities'
)
for url in urls:
page = client.get(url)
if marker not in page and i_marker not in page:
print(url)
assert False
client.get('/election/majorz-election/clear').form.submit()
client.get('/election/proporz-election/clear').form.submit()
client.get('/elections/elections/clear').form.submit()
client.get('/vote/vote/clear').form.submit()
assert all((marker not in client.get(url) for url in urls))
def test_view_manage_upload_tokens(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
assert "Noch keine Token." in client.get('/manage/upload-tokens')
client.get('/manage/upload-tokens/create-token').form.submit()
assert "Noch keine Token." not in client.get('/manage/upload-tokens')
client.get('/manage/upload-tokens').click("Löschen").form.submit()
assert "Noch keine Token." in client.get('/manage/upload-tokens')
def test_view_manage_data_sources(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
login(client)
# Votes
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_vote'
new.form['upload_type'] = 'vote'
new.form.submit().follow()
assert 'ds_vote' in client.get('/manage/sources')
# ... regenerate token
manage = client.get('/manage/sources')
token = manage.pyquery('.data_sources td')[2].text
manage = manage.click('Token neu erzeugen').form.submit().follow()
assert token not in manage
# ... manage
manage = manage.click('Verwalten', href='data-source').follow()
assert 'Noch keine Abstimmungen erfasst' in manage.click('Neue Zuordnung')
new = client.get('/manage/votes/new-vote')
new.form['vote_de'] = "vote-1"
new.form['date'] = date(2013, 1, 1)
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/votes/new-vote')
new.form['vote_de'] = "vote-2"
new.form['date'] = date(2014, 1, 1)
new.form['domain'] = 'federation'
new.form.submit()
new = manage.click('Neue Zuordnung')
assert all((x in new for x in ('vote-1', 'vote-2')))
new.form['district'] = '1111'
new.form['number'] = '2222'
new.form['item'] = 'vote-1'
manage = new.form.submit().follow()
assert all((x in manage for x in ('vote-1', '1111', '2222')))
edit = manage.click('Bearbeiten')
edit.form['district'] = '3333'
edit.form['number'] = '4444'
edit.form['item'] = 'vote-2'
manage = edit.form.submit().follow()
assert all((x not in manage for x in ('vote-1', '1111', '2222')))
assert all((x in manage for x in ('vote-2', '3333', '4444')))
manage = manage.click('Löschen').form.submit().follow()
assert 'Noch keine Zuordnungen' in manage
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_vote' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
# Majorz elections
new = client.get('/manage/elections/new-election')
new.form['election_de'] = "election-majorz"
new.form['date'] = date(2013, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'majorz'
new.form['domain'] = 'federation'
new.form.submit()
new = client.get('/manage/elections/new-election')
new.form['election_de'] = "election-proporz"
new.form['date'] = date(2013, 1, 1)
new.form['mandates'] = 1
new.form['election_type'] = 'proporz'
new.form['domain'] = 'federation'
new.form.submit()
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_majorz'
new.form['upload_type'] = 'majorz'
new.form.submit().follow()
assert 'ds_majorz' in client.get('/manage/sources')
# ... manage
manage = client.get('/manage/sources')
manage = manage.click('Verwalten', href='data-source').follow()
new = manage.click('Neue Zuordnung')
assert 'election-majorz' in new
assert 'election-proporz' not in new
new.form['district'] = '4444'
new.form['number'] = '5555'
new.form['item'] = 'election-majorz'
manage = new.form.submit().follow()
assert all((x in manage for x in ('election-majorz', '4444', '5555')))
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_majorz' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
# Proporz elections
# ... add data source
new = client.get('/manage/sources/new-source')
new.form['name'] = 'ds_proporz'
new.form['upload_type'] = 'proporz'
new.form.submit().follow()
assert 'ds_proporz' in client.get('/manage/sources')
# ... manage
manage = client.get('/manage/sources')
manage = manage.click('Verwalten', href='data-source').follow()
new = manage.click('Neue Zuordnung')
assert 'election-majorz' not in new
assert 'election-proporz' in new
new.form['district'] = '6666'
new.form['number'] = '7777'
new.form['item'] = 'election-proporz'
manage = new.form.submit().follow()
assert all((x in manage for x in ('election-proporz', '6666', '7777')))
# ... delete data source
client.get('/manage/sources').click('Löschen').form.submit()
assert 'ds_proporz' not in client.get('/manage/sources')
assert 'Noch keine Datenquellen' in client.get('/manage/sources')
def test_reset_password(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
request_page = client.get('/auth/login').click('Passwort zurücksetzen')
assert 'Passwort zurücksetzen' in request_page
request_page.form['email'] = '<EMAIL>'
assert '<EMAIL>' in request_page.form.submit()
assert len(election_day_app.smtp.outbox) == 0
request_page.form['email'] = '<EMAIL>'
assert '<EMAIL>' in request_page.form.submit()
assert len(election_day_app.smtp.outbox) == 1
message = election_day_app.smtp.outbox[0]
message = message.get_payload(1).get_payload(decode=True)
message = message.decode('iso-8859-1')
link = list(document_fromstring(message).iterlinks())[0][2]
token = link.split('token=')[1]
reset_page = client.get(link)
assert token in reset_page.text
reset_page.form['email'] = '<EMAIL>'
reset_page.form['password'] = '<PASSWORD>'
reset_page = reset_page.form.submit()
assert "Ungültige Adresse oder abgelaufener Link" in reset_page
assert token in reset_page.text
reset_page.form['email'] = '<EMAIL>'
reset_page.form['password'] = '<PASSWORD>'
reset_page = reset_page.form.submit()
assert "Feld muss mindestens 8 Zeichen beinhalten" in reset_page
assert token in reset_page.text
reset_page.form['email'] = '<EMAIL>'
reset_page.form['password'] = '<PASSWORD>'
assert "Passwort geändert" in reset_page.form.submit()
reset_page.form['email'] = '<EMAIL>'
reset_page.form['password'] = '<PASSWORD>'
reset_page = reset_page.form.submit()
assert "Ungültige Adresse oder abgelaufener Link" in reset_page
login_page = client.get('/auth/login')
login_page.form['username'] = '<EMAIL>'
login_page.form['password'] = '<PASSWORD>'
login_page = login_page.form.submit()
assert "Unbekannter Benutzername oder falsches Passwort" in login_page
login_page.form['username'] = '<EMAIL>'
login_page.form['password'] = '<PASSWORD>'
login_page = login_page.form.submit().follow()
assert "Sie sind angemeldet" in login_page
def test_view_manage_screens(election_day_app):
client = Client(election_day_app)
client.get('/locale/de_CH').follow()
assert client.get('/manage/screens', expect_errors=True).status_code == 403
login(client)
manage = client.get('/manage/screens')
assert 'Noch keine Screens' in manage
# Add two votes
new = client.get('/manage/votes').click('Neue Abstimmung')
new.form['vote_de'] = 'Einfache Vorlage'
new.form['vote_type'] = 'simple'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
new.form.submit().follow()
new = client.get('/manage/votes').click('Neue Abstimmung')
new.form['vote_de'] = 'Vorlage mit Gegenentwurf'
new.form['vote_type'] = 'complex'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'federation'
new.form.submit().follow()
# Add two elections
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Majorz Wahl'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'majorz'
new.form['domain'] = 'region'
new.form['mandates'] = 10
new.form.submit().follow()
new = client.get('/manage/elections').click('Neue Wahl')
new.form['election_de'] = 'Proporz Wahl'
new.form['date'] = date(2016, 1, 1)
new.form['election_type'] = 'proporz'
new.form['domain'] = 'region'
new.form['mandates'] = 5
new.form.submit().follow()
# Add a compound
new = client.get('/manage/election-compounds').click('Neue Verbindung')
new.form['election_de'] = 'Verbund von Wahlen'
new.form['date'] = date(2016, 1, 1)
new.form['domain'] = 'canton'
new.form['elections'] = ['proporz-wahl']
new.form.submit().follow()
# Add a screen
new = client.get('/manage/screens').click('Neuer Screen')
new.form['number'] = '5'
new.form['description'] = 'Mein Screen'
new.form['type'] = 'majorz_election'
new.form['majorz_election'] = 'majorz-wahl'
new.form['structure'] = '<title />'
new.form['css'] = '/* Custom CSS */'
manage = new.form.submit().follow()
assert 'Mein Screen' in manage
assert 'Majorz Wahl' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'proporz_election'
edit.form['proporz_election'] = 'proporz-wahl'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert 'Proporz Wahl' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'election_compound'
edit.form['election_compound'] = 'verbund-von-wahlen'
manage = edit.form.submit().follow()
assert 'Majorz Wahl' not in manage
assert '<NAME>' not in manage
assert 'Verbund von Wahlen' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'simple_vote'
edit.form['simple_vote'] = 'einfache-vorlage'
manage = edit.form.submit().follow()
assert '<NAME>' not in manage
assert '<NAME>' not in manage
assert 'Verbund von Wahlen' not in manage
assert 'Einfache Vorlage' in manage
edit = manage.click('Bearbeiten')
edit.form['type'] = 'complex_vote'
edit.form['complex_vote'] = 'vorlage-mit-gegenentwurf'
manage = edit.form.submit().follow()
assert '<NAME>' not in manage
assert 'Pro<NAME>' not in manage
assert 'Verbund von Wahlen' not in manage
assert 'Einfache Vorlage' not in manage
assert 'Vorlage mit Gegenentwurf' in manage
delete = manage.click('Löschen')
assert 'Screen löschen' in delete
assert 'Bearbeiten' in delete.click('Abbrechen')
manage = delete.form.submit().follow()
assert 'Noch keine Screens' in manage
| 2.171875
| 2
|
python_solutions/chapter_08_recursion_and_dynamic_programming/problem_08_08_permutations_with_dups.py
|
isayapin/cracking-the-coding-interview
| 560
|
12776494
|
def permutations_with_dups(string):
hash_table = {}
permutations = []
for character in string:
if character in hash_table:
hash_table[character] += 1
else:
hash_table[character] = 1
helper('', hash_table, permutations)
return permutations
def helper(string, hash_table, permutations):
if sum(hash_table.values()) <= 0:
permutations.append(string)
else:
for character in hash_table:
local_hash_table = hash_table.copy()
if local_hash_table[character] <= 1:
local_hash_table.pop(character, None)
else:
local_hash_table[character] -= 1
helper(string + character, local_hash_table, permutations)
| 3.515625
| 4
|
src/sudoku.py
|
nahuel-ianni/sudoku-solver
| 0
|
12776495
|
"""
Sudoku solver script using a backtracking algorithm.
"""
def find_empty_location(grid):
"""
Looks for the coordinates of the next zero value on the grid,
starting on the upper left corner, from left to right and top to bottom.
Keyword Arguments:
grid {number matrix} -- The matrix to look for the coordinates on (default: {The instance's grid})
Returns:
tuple -- The (x, y) coordinates of the next zero value on the grid if one is found
"""
for index, row in enumerate(grid):
if 0 in row:
return (row.index(0), index)
def is_completed(grid):
"""
Checks if a grid is completed.
Grids are completed when all cells in them contain non-zero values.
Arguments:
grid {number matrix} -- The matrix to check for unique values on rows and columns
Returns:
bool -- True if all numbers are unique on their respective rows and columns, otherwise, False
"""
return not any(0 in row for row in grid)
def is_unique(digit, cell, grid):
"""
Checks if a given digit is unique across its row, column and subgrid.
Arguments:
digit {number} -- The digit to check for
cell {tuple} -- The (x, y) coordinates of the digit on the grid
grid {number matrix} -- The matrix to check the digit at
Returns:
bool -- True if the digit is unique on its respective row, column and subgrid, otherwise, False
"""
x, y = cell
x_axis = [row[x] for row in grid]
y_axis = grid[y]
col_level = (x // 3) * 3
row_level = (y // 3) * 3
subgrid = []
for index, row in enumerate(grid):
if row_level <= index < row_level + 3:
subgrid += row[col_level : col_level + 3]
return digit not in [*y_axis, *x_axis, *subgrid]
def solve(grid):
"""
Attempts to solve the grid following Sudoku rules, where on a 9x9 grid:
- Only numbers from 1 to 9 are valid
- No duplicates on either rows nor columns
- No duplicates within the special 3x3 subgrids
Arguments:
grid {number matrix} -- The matrix to solve
Returns:
solution -- Returns a list of lists filled with numbers if a solution was found, otherwise, False
"""
if is_completed(grid):
return grid
x, y = find_empty_location(grid)
for digit in range(1, 10):
if is_unique(digit, (x, y), grid):
grid[y][x] = digit
if solve(grid):
return grid
grid[y][x] = 0
return False
| 4.375
| 4
|
space_invaders.py
|
keijolinnamaa/Space_Invaders
| 0
|
12776496
|
<reponame>keijolinnamaa/Space_Invaders
import sys
import os.path
from datetime import datetime
import pygame
from settings import Settings
from ship import Ship
from bullet import Bullet
from alien import Alien
from time import sleep
from game_stats import GameStats
from button import Button
from explosions import Explosions
from scoreboard import ScoreBoard
import random
class SpaceInvaders:
def __init__(self):
pygame.init()
pygame.mixer.init()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width,self.settings.screen_height))
self.bg_image = pygame.image.load("images/starfield.png").convert_alpha()
#self.screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
#self.settings.screen_width = self.screen.get_rect().width
#self.settings.screen_height = self.screen.get_rect().height
self.bg_color = (self.settings.bg_color)
pygame.display.set_caption("Space Invaders")
self.play_button = Button(self, "Play")
self.ship = Ship(self)
self.bullets = pygame.sprite.Group()
self.aliens = pygame.sprite.Group()
self.explosions = pygame.sprite.Group()
self.stats = GameStats(self)
self.stats.highscore = self.get_highscore()
self.sb = ScoreBoard(self)
#self.sound_dir = os.path.join(os.path.dirname((__file__), "sounds")
self.create_fleet()
self.setup_explosions()
self.setup_sounds()
def run_game(self):
"""Main loop"""
pygame.mixer.music.play(loops= -1)
while True:
self.check_events()
if self.stats.game_active:
self.explosions.update()
self.ship.update()
self.update_bullets()
self.update_aliens()
self.update_screen()
def get_highscore(self):
try:
with open ("highscore.txt", "r") as f:
highscore = f.readline().strip()
if highscore == "":
highscore = 0
return int(highscore)
except:
with open ("highscore.txt", "w") as f:
f.write("0")
return 0
def check_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
self.check_play_button(mouse_pos)
def _check_keydown_events(self, event):
if event.key == pygame.K_RIGHT:
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key == pygame.K_SPACE:
self.fire_bullet()
elif event.key == pygame.K_q:
sys.exit()
def _check_keyup_events(self, event):
if event.key == pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key == pygame.K_LEFT:
self.ship.moving_left = False
def setup_sounds(self):
#background
pygame.mixer.music.load("sounds/alex.mp3")
pygame.mixer.music.set_volume(0.5)
#effects
self.shooting_sounds = []
sound_dir = os.path.join(os.path.dirname(__file__), "sounds")
for sound in ['laser.wav', 'laser2.wav']:
self.shooting_sounds.append(pygame.mixer.Sound(os.path.join(sound_dir, sound)))
for s in self.shooting_sounds:
s.set_volume(0.5)
self.explosion_sound = pygame.mixer.Sound(os.path.join(sound_dir, 'explosion.wav'))
self.explosion_sound.set_volume(0.5)
def check_play_button(self, mouse_pos):
button_clicked = self.play_button.rect.collidepoint(mouse_pos)
if button_clicked and not self.stats.game_active:
self.settings.setup_dynamic_settings()
self.sb.prepare_score()
self.sb.prepare_level()
self.sb.prepare_ships()
pygame.mouse.set_visible(False)
self.stats.reset_stats()
self.stats.game_active = True
self.aliens.empty()
self.bullets.empty()
self.create_fleet()
self.ship.center_ship()
def setup_explosions(self):
self.explosion_anim = {}
self.explosion_anim['alien'] = []
self.explosion_anim['ship'] = []
#and also we could make eg. self.explosion_anim['ship'] = []
for i in range(9): #0-8
filename = f'regularExplosion0{i}.png'
img = pygame.image.load("images/"+filename).convert_alpha()
#alien explosion
img_alien = pygame.transform.scale(img, (70,70))
self.explosion_anim['alien'].append(img_alien)
#ship explosion
img_ship = pygame.transform.scale(img, (140,140))
self.explosion_anim['ship'].append(img_ship)
def check_bullet_alien_collisions(self):
collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)
if collisions:
pygame.mixer.Sound.play(self.explosion_sound)
for aliens in collisions.values():
self.stats.score += self.settings.alien_points *len(aliens)
for alien in aliens:
explosion = Explosions(alien.rect.center, 'alien', self)
self.explosions.add(explosion)
self.sb.prepare_score()
self.sb.check_highscore()
if not self.aliens:
self.bullets.empty()
self.create_fleet()
self.settings.increase_speed()
self.stats.level += 1
self.sb.prepare_level()
def fire_bullet(self):
'''Creates new bullet instance and adds it to the bullets sprite group'''
if len(self.bullets) < self.settings.bullets_allowed:
pygame.mixer.Sound.play(self.shooting_sounds[0])
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def create_fleet(self):
alien = Alien(self)
alien_width, alien_height = alien.rect.size
ship_height = self.ship.rect.height
available_space_x = self.settings.screen_width - (2*alien_width)
number_of_aliens_x = available_space_x // (2*alien_width)
available_space_y = (self.settings.screen_height - (3*alien_height) - ship_height)
number_of_aliens_y = available_space_y // (2*alien_height)
for row in range(number_of_aliens_y):
for alien_number in range(number_of_aliens_x):
self.create_alien(alien_number, row)
def create_alien(self, alien_number, row):
alien = Alien(self)
alien_width, alien_height = alien.rect.size
alien.x = alien_width + (2*alien_width*alien_number)
alien.rect.x = alien.x
alien.rect.y = alien_height + (2*alien_height*row)
self.aliens.add(alien)
def check_fleet_edges(self):
for alien in self.aliens.sprites():
if alien.check_edges():
self.change_fleet_direction()
break
def change_fleet_direction(self):
for alien in self.aliens.sprites():
alien.rect.y += self.settings.fleet_drop_speed
self.settings.fleet_direction *= -1
def ship_hit(self):
explosion = Explosions(self.ship.rect.center, 'ship', self)
self.explosions.add(explosion)
pygame.mixer.Sound.play(self.explosion_sound)
for i in range(500):
self.explosions.update()
self.update_screen()
if self.stats.ships_left > 1:
self.stats.ships_left -= 1
self.sb.prepare_ships()
self.aliens.empty()
self.bullets.empty()
self.create_fleet()
self.ship.center_ship()
else:
self.stats.game_active = False
self.stats.reset_stats()
pygame.mouse.set_visible(True)
def check_aliens_bottom(self):
screen_rect = self.screen.get_rect()
for alien in self.aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
self.ship_hit()
break
def update_aliens(self):
self.check_fleet_edges()
self.aliens.update()
if pygame.sprite.spritecollideany(self.ship,self.aliens):
self.ship_hit()
self.check_aliens_bottom()
def update_bullets(self):
self.bullets.update()
# Get rid of bullets that have disappeared from the screen
for bullet in self.bullets.copy():
if bullet.rect.bottom <= 0:
self.bullets.remove(bullet)
self.check_bullet_alien_collisions()
def update_screen(self):
#self.screen.fill(self.bg_color)
self.screen.blit(self.bg_image, self.screen.get_rect()) #backgroundimage
self.ship.blitme()
for bullet in self.bullets.sprites():
bullet.draw_bullet()
#make the most recent drawn screen visible
self.aliens.draw(self.screen)
self.explosions.draw(self.screen)
self.sb.show_score()
if not self.stats.game_active:
self.screen.fill(self.bg_color)
self.play_button.draw_button()
pygame.display.flip()
if __name__ == "__main__":
si = SpaceInvaders()
si.run_game()
| 2.921875
| 3
|
debugger.py
|
AndyCyberSec/dextractor
| 3
|
12776497
|
import subprocess
def debug(pid):
cmd = ['adb', "forward", "tcp:1234", "jdwp:{}".format(pid)]
stream = subprocess.Popen(cmd)
stream.wait()
jdb = ["jdb", "-attach", "localhost:1234"]
stream = subprocess.Popen(jdb)
stream.wait()
| 2.0625
| 2
|
datalad/interface/tests/test_download_url.py
|
yarikoptic/datalad
| 0
|
12776498
|
<reponame>yarikoptic/datalad
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for install-dataset command
"""
__docformat__ = 'restructuredtext'
from os.path import join as opj
from ...api import download_url
from ...tests.utils import eq_, assert_cwd_unchanged, assert_raises, \
with_tempfile
from ...tests.utils import with_tree
from ...tests.utils import serve_path_via_http
from ...tests.utils import swallow_outputs
def test_download_url_exceptions():
assert_raises(ValueError, download_url, ['url1', 'url2'], path=__file__)
# is not in effect somehow :-/ TODO: investigate!
with swallow_outputs() as cmo:
# bogus urls can't be downloaded any ways
with assert_raises(RuntimeError) as cm:
download_url('http://example.com/bogus')
eq_(str(cm.exception), "1 url(s) failed to download")
@assert_cwd_unchanged
@with_tree(tree=[
('file1.txt', 'abc'),
('file2.txt', 'abc'),
])
@serve_path_via_http
@with_tempfile(mkdir=True)
def test_download_url_return(toppath, topurl, outdir):
files = ['file1.txt', 'file2.txt']
urls = [topurl + f for f in files]
outfiles = [opj(outdir, f) for f in files]
with swallow_outputs() as cmo:
out1 = download_url(urls[0], path=outdir)
eq_(out1, outfiles[:1])
# can't overwrite
with assert_raises(RuntimeError), \
swallow_outputs() as cmo:
out2 = download_url(urls, path=outdir)
eq_(out2, outfiles[1:]) # only 2nd one
with swallow_outputs() as cmo:
out3 = download_url(urls, path=outdir, overwrite=True)
eq_(out3, outfiles)
| 2.046875
| 2
|
tests/test_ne.py
|
hyw208/beval
| 5
|
12776499
|
<gh_stars>1-10
import operator
import unittest
from unittest import TestCase
from beval.criteria import Criteria, Const, NotEq, to_criteria, Ctx, In, universal
from test_helper import acura_small
class TestNe(TestCase):
def test_ne_simple(self):
with acura_small as acura:
c = NotEq("make", "Acura")
(ans, err) = c(Ctx(acura))
self.assertFalse(ans)
self.assertIsNone(err)
c = NotEq("make", "Mazda")
(ans, err) = c(Ctx(acura))
self.assertTrue(ans)
self.assertIsNone(err)
c = NotEq("cpu", "Intel")
(ans, err) = c(Ctx(acura))
self.assertEqual(ans, Const.ERROR)
self.assertIsInstance(err, KeyError)
c = NotEq("cpu", "Intel")
(ans, err) = c(Ctx(acura, True))
self.assertEqual(ans, Const.UNKNOWN)
self.assertIsInstance(err, KeyError)
def test_ser(self):
with acura_small as acura:
expected = "make != 'Acura'"
not_eq = to_criteria(expected)
text = str(not_eq)
self.assertEqual(expected, text)
self.assertIsInstance(not_eq, NotEq)
self.assertEqual(not_eq.key, 'make')
self.assertEqual(not_eq.right, 'Acura')
self.assertEqual(not_eq.op, operator.ne)
(ans, err) = not_eq(Ctx(acura))
self.assertFalse(ans)
self.assertIsNone(err)
def test_universal(self):
notEq = NotEq("make", universal)
expected = "make != '*'"
self.assertEqual(expected, str(notEq))
notEq = to_criteria(expected)
self.assertEqual(notEq.right, Const.universal)
self.assertEqual(expected, str(notEq))
for value in ("xyz", 1, 0, 10.3, False, True, object(), "*"):
car = {"make": value}
(ans, err) = notEq(car)
self.assertFalse(ans)
self.assertIsNone(err)
if __name__ == '__main__':
unittest.main()
| 3.0625
| 3
|
Validation/ground_truth_internet2.py
|
dioptra-io/icmp-rate-limiting-classifier
| 0
|
12776500
|
from Files.utils import ipv4_regex, ipv6_regex
import re
if __name__ == "__main__":
"""
This script parse the Internet2 interfaces files and generates router files
"""
router_id_regex = re.compile('<th id=".*?">(.*)</th>')
gt_interface_addresses = (
"resources/internet2/ground-truth/Internet2-interfaces.html"
)
ground_truth_routers_v4 = {}
ground_truth_routers_v6 = {}
with open(gt_interface_addresses) as f:
for line in f:
# Match the id between <>
m_router_id = re.search(router_id_regex, line)
if m_router_id is not None:
m_interface_v4 = re.search(ipv4_regex, line)
m_interface_v6 = re.search(ipv6_regex, line)
router_id = m_router_id.group(1)
if router_id not in ground_truth_routers_v4:
ground_truth_routers_v4[router_id] = set()
if router_id not in ground_truth_routers_v6:
ground_truth_routers_v6[router_id] = set()
if m_interface_v4 is not None:
ground_truth_routers_v4[router_id].add(m_interface_v4.group(0))
if m_interface_v6 is not None:
ground_truth_routers_v6[router_id].add(m_interface_v6.group(0))
gt_routers_dir = "resources/internet2/ground-truth/routers/"
n_ipv4_ips = 0
discard_interfaces = {
"172.16.31.10",
"172.31.254.2",
"172.16.31.10",
"192.168.127.12",
"192.168.127.12",
"fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
}
ipv4_candidates = set()
ipv6_candidates = set()
for router_id, ips in ground_truth_routers_v4.items():
with open(gt_routers_dir + "v4/" + router_id, "w") as f:
for ip in ips:
f.write(ip + "\n")
if ip in ipv4_candidates:
print("Duplicata", router_id, ip)
if ip not in discard_interfaces:
ipv4_candidates.add(ip)
n_ipv4_ips += 1
n_ipv6_ips = 0
for router_id, ips in ground_truth_routers_v6.items():
with open(gt_routers_dir + "v6/" + router_id, "w") as f:
for ip in ips:
f.write(ip + "\n")
if ip in ipv6_candidates:
print("Duplicata", router_id, ip)
if ip not in discard_interfaces:
ipv6_candidates.add(ip)
n_ipv6_ips += 1
gt_dir = "resources/internet2/ground-truth/"
with open(gt_dir + "ips4", "w") as f:
for ip in ipv4_candidates:
f.write(ip + "\n")
with open(gt_dir + "ips6", "w") as f:
for ip in ipv6_candidates:
f.write(ip + "\n")
print("IPv4 addresses: " + str(n_ipv4_ips))
print("IPv6 addresses: " + str(n_ipv6_ips))
| 3.015625
| 3
|
setup.py
|
barnardn/rx_weather
| 0
|
12776501
|
<filename>setup.py
import os
from setuptools import setup
import json
exec(open("./rxw/_version.py").read())
def requirements_from_pipfile(pipfile=None):
if pipfile is None:
pipfile = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'Pipfile.lock')
lock_data = json.load(open(pipfile))
return [package_name for package_name in
lock_data.get('default', {}).keys()]
install_requires = requirements_from_pipfile()
setup(
name="rx-weather",
version=__version__,
packages=[
'rxw',
],
install_requires=install_requires,
entry_points={
'console_scripts':
'rx-weather=rxw.app:main'
},
# metadata for upload to PyPI
author="<NAME>",
author_email="<EMAIL>",
description="demo app to fetch weather using requests and rxpy",
license="MIT",
keywords="rx, python, weather, demo",
url="https://github.com/barnardn/rx_weather",
python_requires='~=3.6',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'
],
)
| 1.914063
| 2
|
pytorch-examples/00-warm-up/3 nn module/3 two_layer_net_custom_module.py
|
shubhajitml/neurCodes
| 1
|
12776502
|
<gh_stars>1-10
# Whenever we want to specify models that are more complex than a simple
# sequence of existing Modules;we define your own Modules by
# subclassing nn.Module and defining a forward which receives input Tensors
# and produces output Tensors using other modules or other autograd operations
# on Tensors
# A fully-connected ReLU network with one hidden layer, trained to predict y from x by minimizing squared Euclidean distance
# coding : utf-8
import time
import torch
class TwoLayerNet(torch.nn.Module):
'''
A fully-connected ReLU network with one hidden layer, trained to predict y from x
by minimizing squared Euclidean distance
'''
def __init__(self, D_in, H, D_out):
"""
In this constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
"""
relu_h = self.linear1(x).clamp(min=0)
y_pred = self.linear2(relu_h)
return y_pred
# N : batch size, D_in : input dimension,
# H : hidden dimension, D_out : output dimension
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Construct the model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out)
# Construct out loss function and an Optimizer. The call to
# model.parameters() in the SGD constructor will contain the learnable parameters
# the two nn.Linear modules which are members of the model
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
start_time = time.time()
for t in range(500):
# Forward Pass : compute predicted y by passing x to the model
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y)
print(t, loss.item())
# Zero gradients, perform a backward pass, and update weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
finish_time = time.time()
print(f'time of execution: ', finish_time - start_time) # in my first run 1.7642054557800293 s
| 3.953125
| 4
|
macrotest/macrotest.py
|
ariegg/webiopi-examples
| 0
|
12776503
|
# Copyright 2016 <NAME> - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WebIOPi test script for all kinds of macros
#
# NOTE: Macros that are called via REST "POST .../macros/macroname..."
# are called by WebIOPi by using macro(*args) or macro() if REST path
# has no agrument values at the end.
# Thus, using keyword arguments via macro(**kwargs) is not possible
# at the moment.
import webiopi
@webiopi.macro
def macroWithoutArguments():
print("n/a")
return "OK"
@webiopi.macro
def macroWithTwoArguments(a, b):
print(a)
print(b)
return "OK"
@webiopi.macro
def macroWithTwoSecondHasDefaultArguments(a, b=2):
print(a)
print(b)
return "OK"
@webiopi.macro
def macroWithTwoBothHaveDefaultArguments(a=1, b=2):
print(a)
print(b)
return "OK"
@webiopi.macro
def macroWithStarOnlyArguments(*a):
print(a)
for arg in a:
print(arg)
return "OK"
@webiopi.macro
def macroWithOnePositionalAndStarArguments(a, *b):
print(a)
print(b)
return "OK"
@webiopi.macro
def macroWithOnePositionalWithDefaultAndStarArguments(a=1, *b):
print(a)
print(b)
return "OK"
@webiopi.macro
def macroWithTwoPositionalSecondHasDefaultAndStarArguments(a, b=2, *c):
print(a)
print(b)
print(c)
return "OK"
| 2.296875
| 2
|
python/1004.max-consecutive-ones-iii.py
|
Zhenye-Na/leetcode
| 10
|
12776504
|
<reponame>Zhenye-Na/leetcode
#
# @lc app=leetcode id=1004 lang=python3
#
# [1004] Max Consecutive Ones III
#
# https://leetcode.com/problems/max-consecutive-ones-iii/description/
#
# algorithms
# Medium (61.32%)
# Likes: 2593
# Dislikes: 40
# Total Accepted: 123.4K
# Total Submissions: 202.1K
# Testcase Example: '[1,1,1,0,0,0,1,1,1,1,0]\n2'
#
# Given a binary array nums and an integer k, return the maximum number of
# consecutive 1's in the array if you can flip at most k 0's.
#
#
# Example 1:
#
#
# Input: nums = [1,1,1,0,0,0,1,1,1,1,0], k = 2
# Output: 6
# Explanation: [1,1,1,0,0,1,1,1,1,1,1]
# Bolded numbers were flipped from 0 to 1. The longest subarray is underlined.
#
# Example 2:
#
#
# Input: nums = [0,0,1,1,0,0,1,1,1,0,1,1,0,0,0,1,1,1,1], k = 3
# Output: 10
# Explanation: [0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1]
# Bolded numbers were flipped from 0 to 1. The longest subarray is
# underlined.
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 10^5
# nums[i] is either 0 or 1.
# 0 <= k <= nums.length
#
#
#
# @lc code=start
class Solution:
def longestOnes(self, nums: List[int], k: int) -> int:
if not nums or len(nums) == 0:
return 0
left, right = 0, 0
if nums[right] == 0:
k -= 1
n = len(nums)
max_length = 0
for left in range(n):
while right + 1 < n and ( (k > 0) or nums[right + 1] == 1 ):
if nums[right + 1] == 0:
k -= 1
right += 1
if k >= 0:
max_length = max(max_length, right - left + 1)
if nums[left] == 0:
k += 1
return max_length
# @lc code=end
| 3.34375
| 3
|
fetcher.py
|
SDAChess/epita_r_place
| 0
|
12776505
|
<gh_stars>0
import requests
def fetch_image(url):
filename = url.split("/")[-1]
r = requests.get(url, stream = True)
if r.status_code == 200:
print('Successfully downloaded image from' + str(url))
return r.raw
else:
print('Failed to download image...')
return None
| 3.09375
| 3
|
fabric_digitalocean/decorators.py
|
CompileInc/fabric-digitalocean
| 0
|
12776506
|
import digitalocean
import os
from fabric.decorators import wraps, _wrap_as_new
from retry.api import retry_call
class TokenError(Exception):
pass
def _list_annotating_decorator(attribute, *values):
"""
From fabric.decorators._list_annotating_decorator
https://github.com/fabric/fabric/blob/master/fabric/decorators.py#L49
"""
def attach_list(func):
@wraps(func)
def inner_decorator(*args, **kwargs):
return func(*args, **kwargs)
_values = values
# Allow for single iterable argument as well as *args
if len(_values) == 1 and not isinstance(_values[0], str):
_values = _values[0]
setattr(inner_decorator, attribute, list(_values))
# Don't replace @task new-style task objects with inner_decorator by
# itself -- wrap in a new Task object first.
inner_decorator = _wrap_as_new(func, inner_decorator)
return inner_decorator
return attach_list
def droplet_generator(region=None, tag=None, ids=[], status=[]):
"""
A generator that yields Droplet IP addresses.
:param region: A DigitalOcean region
:type region: str
:param tag: A DigitalOcean tag name
:type tag: str
:param id: A list of DigitalOcean Droplet IDs
:type id: list
"""
token = os.getenv('FABRIC_DIGITALOCEAN_TOKEN')
if not token:
raise TokenError('The environmental variable FABRIC_DIGITALOCEAN_TOKEN'
' is empty. It must contain a valid DigitalOcean API'
' token.')
client = digitalocean.Manager(token=token)
hosts = []
if not ids:
droplets = client.get_all_droplets(tag_name=tag)
for d in droplets:
if not region or d.region['slug'] == region:
hosts.append(d)
else:
if isinstance(ids, int):
droplet = client.get_droplet(droplet_id=ids)
hosts.append(droplet)
else:
for i in ids:
droplet = client.get_droplet(droplet_id=i)
hosts.append(droplet)
if status and isinstance(status, list):
hosts = [h for h in hosts if h.status in status]
for h in hosts:
yield h.ip_address
@wraps(droplet_generator)
def droplets(region=None, tag=None, ids=[], status=[], retry={}):
"""
Fabric decorator for running a task on DigitalOcean Droplets.
:param region: A DigitalOcean region
:type region: str
:param tag: A DigitalOcean tag name
:type tag: str
:param id: A list of DigitalOcean Droplet IDs
:type id: list
"""
retry_defaults = {'tries': 1, 'delay': 0, 'backoff':1, 'max_delay': None}
droplets = retry_call(_list_annotating_decorator,
fargs=['hosts', droplet_generator(region, tag, ids, status)],
exceptions=(digitalocean.baseapi.DataReadError, digitalocean.baseapi.JSONReadError),
tries=retry.get('tries', retry_defaults['tries']),
delay=retry.get('delay', retry_defaults['delay']),
backoff=retry.get('backoff', retry_defaults['backoff']),
max_delay=retry.get('max_delay', retry_defaults['max_delay']))
return droplets
| 2.28125
| 2
|
bob/pipelines/transformers/linearize.py
|
bioidiap/bob.pipelines
| 1
|
12776507
|
import numpy as np
from sklearn.preprocessing import FunctionTransformer
from ..wrappers import wrap
def linearize(X):
X = np.asarray(X)
return np.reshape(X, (X.shape[0], -1))
class Linearize(FunctionTransformer):
"""Extracts features by simply concatenating all elements of the data into
one long vector."""
def __init__(self, **kwargs):
super().__init__(func=linearize, **kwargs)
def SampleLinearize(**kwargs):
return wrap([Linearize, "sample"], **kwargs)
def CheckpointSampleLinearize(**kwargs):
return wrap([Linearize, "sample", "checkpoint"], **kwargs)
| 3.234375
| 3
|
{{cookiecutter.app_slug}}/databases/data_column/data.py
|
ELC/cookiecutter-python-fullstack
| 5
|
12776508
|
from pathlib import Path
from contextlib import contextmanager
from typing import Any, Iterator, List, Optional
import duckdb
from ..models.task import Task
@contextmanager
def database_connection() -> Iterator[duckdb.DuckDBPyConnection]:
connection: duckdb.DuckDBPyConnection = duckdb.connect(f"{Path(__file__).parent}/db.duckdb")
cursor: duckdb.DuckDBPyConnection = connection.cursor()
try:
yield cursor
connection.commit()
finally:
cursor.close()
connection.close()
def init_database():
query = """DROP TABLE IF EXISTS tasks;
DROP SEQUENCE IF EXISTS __id;
"""
with database_connection() as db:
db.execute(query)
query = "CREATE SEQUENCE IF NOT EXISTS __id START 1;"
with database_connection() as db:
db.execute(query)
query = """
CREATE TABLE IF NOT EXISTS tasks (
id INTEGER PRIMARY KEY,
text TEXT,
day TEXT,
reminder INTEGER
)"""
with database_connection() as db:
db.execute(query)
init_data = [
Task(id=1, text="Doctors Appointment", day="Feb 5th at 2:30pm", reminder=True),
Task(id=2, text="Meeting at School", day="Feb 6th at 1:30pm", reminder=True),
]
for task in init_data:
insert_taks(task)
def insert_taks(task: Task) -> Task:
task_dict = task._asdict()
query = """INSERT INTO tasks (id, text, day, reminder)
VALUES (nextval('__id'), ?, ?, ?)"""
parameters = [task.text, task.day, task.reminder]
with database_connection() as db:
db.execute(query, parameters)
id_ = db.lastrowid
task_dict["id"] = id_
return Task(**task_dict)
def get_all() -> List[Task]:
query = "SELECT id, text, day, reminder FROM tasks"
with database_connection() as db:
db.execute(query)
records = db.fetchall()
tasks = []
for record in records:
task = Task(
id=record[0], text=record[1], day=record[2], reminder=bool(record[3])
)
tasks.append(task)
return tasks
def get_by_id(task: Task) -> Optional[Task]:
query = "SELECT id, text, day, reminder FROM tasks WHERE id = ?"
parameters = [task.id]
with database_connection() as db:
db.execute(query, parameters)
record = db.fetchone()
if not record:
return None
return Task(id=record[0], text=record[1], day=record[2], reminder=bool(record[3]))
def delete_by_id(task: Task) -> None:
query = "DELETE FROM tasks WHERE id = ?"
parameters = [task.id]
with database_connection() as db:
db.execute(query, parameters)
def update_by_id(task: Task) -> Task:
query = """UPDATE tasks
SET text = ?, day = ?, reminder = ?
WHERE id = ?"""
parameters = [task.text, task.day, task.reminder, task.id]
with database_connection() as db:
db.execute(query, parameters)
return task
| 2.875
| 3
|
setup.py
|
fran6w/pandas-method-chaining
| 5
|
12776509
|
from setuptools import setup
requires = ["flake8 > 3.0.0", "attr"]
flake8_entry_point = "flake8.extension"
long_description = """
A flake8 style checker for pandas method chaining, forked from https://github.com/deppen8/pandas-vet]
"""
setup(
name="pandas-method-chaining",
version="0.1.0",
author="<NAME>",
license="MIT",
description="A pandas method chaining checker",
install_requires=requires,
entry_points={
flake8_entry_point: [
"PMC=pandas_method_chaining:Plugin",
]
}
)
| 1.234375
| 1
|
sample/django_sample/app/celery_tasks/urls.py
|
knroy/celery-rmq
| 0
|
12776510
|
<filename>sample/django_sample/app/celery_tasks/urls.py
from django.urls import path
from .views import CeleryTestView
urlpatterns = [
path('', CeleryTestView.as_view(), name='Test')
]
| 1.648438
| 2
|
tests/test_shopitem.py
|
MrLeeh/shopy
| 0
|
12776511
|
"""
test_shopitem.py Copyright 2015 by stefanlehmann
"""
import pytest
from shopy.shop import Shop
from shopy.shopitem import ShopItem
def test_shopitem_repr():
shop = Shop.from_file('amazon.json')
item = ShopItem()
item.name = "testitem"
item.articlenr = "123"
item.price = 12.5
item.shop = shop
assert repr(item) == \
"<ShopItem object (name:'%s', articlenr:'%s'," \
" price:%s, shop:'%s')>" % (
'testitem',
'123',
'12.50',
'Amazon'
)
| 2.6875
| 3
|
land_planning_and_allocation/config/land_planning_and_allocation.py
|
the-bantoo/Land-Planning-And-Allocation
| 0
|
12776512
|
<gh_stars>0
from __future__ import unicode_literals
from frappe import _
def get_data():
config = [
{
"label": _("Sales"),
"items": [
{
"type": "doctype",
"name": "Customer",
"description": _("Customer Database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Plot",
"label": _("Plot"),
"onboard": 1,
}
]
},
{
"label": _("Land Details"),
"items": [
{
"type": "doctype",
"name": "Item",
"label": _("Plot Item"),
},
{
"type": "doctype",
"name": "Project",
"onboard": 1,
},
{
"type": "doctype",
"name": "Subdivision",
"label": _("Subdivision"),
"onboard": 1,
}
]
},
{
"label": _("Reports"),
"items": [
{
"type": "report",
"name": "Item-wise Sales Register",
"is_query_report": True,
"doctype": "Sales Invoice"
},
{
"type": "report",
"name": "Accounts Receivable",
"doctype": "Sales Invoice",
"is_query_report": True
},
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Land Settings",
"label": _("Land Settings"),
"onboard": 1,
},
]
}
]
return config
| 1.773438
| 2
|
eris/setup.py
|
ahamilton/eris
| 0
|
12776513
|
<reponame>ahamilton/eris
#!/usr/bin/env python3.9
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
REPO_PATH = os.path.dirname(os.getcwd())
setup(name="eris",
version="v2022.01.05",
description=("Eris maintains an up-to-date set of reports for every file in a codebase."),
url="https://github.com/ahamilton/eris",
author="<NAME>",
author_email="<EMAIL>",
license="Artistic 2.0",
python_requires=">=3.9.0",
packages=["eris"],
py_modules=["lscolors", "sorted_collection"],
package_data={"eris": ["LS_COLORS.sh", "tools.toml"]},
entry_points={"console_scripts":
["eris=eris.__main__:entry_point", "eris-worker=eris.worker:main",
"eris-webserver=eris.webserver:main", "pydoc_color=eris.pydoc_color:main"]},
install_requires=[
"pyinotify==0.9.6", "pygments==2.10.0", "docopt==0.6.2", "pillow==8.4.0", "toml==0.10.2",
"decorator==5.1.0", "pexpect==4.8.0", f"fill3 @ file://{REPO_PATH}/fill3",
f"termstr @ file://{REPO_PATH}/termstr", f"lscolors @ file://{REPO_PATH}/lscolors"])
| 1.359375
| 1
|
tests/unit/test_scm.py
|
pavelito/opencoverage
| 0
|
12776514
|
import os
from datetime import datetime, timedelta, timezone
from unittest.mock import (
AsyncMock,
MagicMock,
Mock,
patch,
)
import pytest
from opencoverage.clients import scm
from tests import utils
pytestmark = pytest.mark.asyncio
@pytest.fixture(autouse=True)
def _clear():
scm.github._token_cache.clear()
scm.github._private_key_cache.clear()
def test_get_client_unsupported():
settings = Mock()
settings.scm = "missing"
with pytest.raises(TypeError):
scm.get_client(settings, None)
class TestGithub:
@pytest.fixture()
async def client(self, settings):
settings.github_app_pem_file = os.path.join(utils.DATA_DIR, "test.pem")
cli = scm.Github(settings, None)
yield cli
await cli.close()
@pytest.fixture()
def response(self):
res = AsyncMock()
res.status = 200
res.json.return_value = {"foo": "bar"}
res.text.return_value = '{"foo": "bar"}'
yield res
@pytest.fixture()
def req(self, response):
req = AsyncMock()
req.__aenter__.return_value = response
yield req
@pytest.fixture()
def session(self, client, req):
session = MagicMock()
session.post.return_value = req
session.patch.return_value = req
session.put.return_value = req
session.get.return_value = req
with patch("opencoverage.clients.scm.github.aiohttp_client", session):
yield session
@pytest.fixture()
def token(self, client):
with patch.object(client, "get_access_token", return_value="token"):
yield "token"
def test_get_client_github_missing_pem(self):
settings = Mock()
settings.scm = "github"
settings.github_app_pem_file = None
with pytest.raises(TypeError):
assert scm.get_client(settings, None)
async def test_get_client(self, client):
assert client
async def test_get_access_token(self, client, session, response):
response.status = 201
response.json.return_value = scm.github.GithubAccessData(
token="token",
expires_at=datetime.utcnow().replace(tzinfo=timezone.utc)
+ timedelta(hours=1),
permissions={},
repository_selection="repository_selection",
).dict()
token = await client.get_access_token()
assert token == "token"
async def test_get_access_token_failure(self, client, session, response):
response.status = 401
response.json.return_value = {"error": "error"}
with pytest.raises(scm.github.APIException):
await client.get_access_token()
async def test_get_access_token_cache(self, client, session, response):
response.status = 201
response.json.return_value = scm.github.GithubAccessData(
token="token",
expires_at=datetime.utcnow().replace(tzinfo=timezone.utc)
+ timedelta(hours=1),
permissions={},
repository_selection="repository_selection",
).dict()
token = await client.get_access_token()
assert token == "token"
calls = len(session.post.mock_calls)
assert await client.get_access_token() == "token"
assert len(session.post.mock_calls) == calls
async def test_get_pulls_missing(self, client, session, response, token):
response.status = 422
pulls = await client.get_pulls("org", "repo", "commit_hash")
assert len(pulls) == 0
async def test_get_pulls_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.AuthorizationException):
await client.get_pulls("org", "repo", "commit_hash")
async def test_get_pull_diff_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.AuthorizationException):
await client.get_pull_diff("org", "repo", "id")
async def test_create_check(self, client, session, response, token):
response.status = 201
check = scm.github.GithubCheck(
id=123,
status="created",
started_at=datetime.utcnow(),
name="name",
head_sha="head_sha",
)
response.json.return_value = check.dict()
assert await client.create_check("org", "repo", "commit") == "123"
async def test_create_check_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.AuthorizationException):
await client.create_check("org", "repo", "commit")
async def test_update_check(self, client, session, response, token):
await client.update_check("org", "repo", "check_id")
assert session.patch.mock_calls[0].kwargs["json"] == {
"status": "completed",
"conclusion": "failure",
}
async def test_update_check_success(self, client, session, response, token):
await client.update_check("org", "repo", "check_id", running=True, success=True)
assert session.patch.mock_calls[0].kwargs["json"] == {
"status": "in_progress",
"conclusion": "success",
}
async def test_update_check_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.APIException):
await client.update_check("org", "repo", "check_id")
async def test_create_comment(self, client, session, response, token):
response.status = 201
comment = scm.github.GithubComment(id=123, body="text")
response.json.return_value = comment.dict()
assert await client.create_comment("org", "repo", "pull_id", "text") == "123"
async def test_create_comment_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.APIException):
await client.create_comment("org", "repo", "pull_id", "text")
async def test_update_comment(self, client, session, response, token):
response.status = 200
await client.update_comment("org", "repo", "123", "text")
async def test_update_comment_auth_error(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.APIException):
await client.update_comment("org", "repo", "123", "text")
async def test_download_file_401(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.AuthorizationException):
async for chunk in client.download_file("org", "repo", "commit", "filename"):
...
async def test_download_file_404(self, client, session, response, token):
response.status = 404
with pytest.raises(scm.github.NotFoundException):
async for chunk in client.download_file("org", "repo", "commit", "filename"):
...
async def test_file_exists(self, client, session, response, token):
response.status = 200
assert await client.file_exists("org", "repo", "commit", "filename")
async def test_not_file_exists(self, client, session, response, token):
response.status = 404
assert not await client.file_exists("org", "repo", "commit", "filename")
async def test_file_exists_authz(self, client, session, response, token):
response.status = 401
with pytest.raises(scm.github.AuthorizationException):
assert not await client.file_exists("org", "repo", "commit", "filename")
| 2.234375
| 2
|
code/statistics.py
|
Luca-Hackl/Discord-bot
| 0
|
12776515
|
import WebScraping
import DiscordBot
import mysql.connector
import discord
from time import time
from dotenv import load_dotenv
import os
import requests
import json
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
API_URL = "https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_Landkreisdaten/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=false&outSR=4326&f=json"
CSV_FILE_NAME = "RKIData.csv"
def SQLconnect():
load_dotenv()
user = os.getenv('user') #gets user from .env file
password = <PASSWORD>('password') #gets password from .env file
mydb = mysql.connector.connect(
host = "localhost",
user = user,
passwd = password,
database = "mydatabase"
)
return mydb
def SQLsetup():
mydb = SQLconnect() #connects to SQL server
cursor = mydb.cursor()
try:
cursor.execute("CREATE TABLE landkreis (Stadtname VARCHAR(50), Kreis VARCHAR(50), Bundesland VARCHAR(50), Faelle INTEGER, Tode INTEGER, Inzidenz FLOAT, Zuletzt_geupdatet DATE)")
except:
print("Database is set up") #if TABLE already exist it exit, if it doesnt exist it gets created
cursor.close()
mydb.close()
def top5():
mydb = SQLconnect() #connects to SQL server
cursor = mydb.cursor(buffered=True)
sql_select_query = """SELECT * FROM landkreis ORDER BY Zuletzt_geupdatet DESC, Inzidenz DESC""" #SQL query
cursor.execute(sql_select_query)
mynames = []
myvalues = []
for _ in range(0,5):
myresult = cursor.fetchone()
mynames.append(myresult[0])
myvalues.append(myresult[5])
embed = discord.Embed(
title=":red_circle: **Top 5 incidence counties**",
color=15859792
)
embed.add_field(name=mynames[0], value=f"👉 Inzidenz: {str(myvalues[0])}", inline=False)
embed.add_field(name="** ** ", value="** ** ", inline=False)
embed.add_field(name=mynames[1], value=f"👉 Inzidenz: {str(myvalues[1])}", inline=False)
embed.add_field(name="** ** ", value="** ** ", inline=False)
embed.add_field(name=mynames[2], value=f"👉 Inzidenz: {str(myvalues[2])}", inline=False)
embed.add_field(name="** ** ", value="** ** ", inline=False)
embed.add_field(name=mynames[3], value=f"👉 Inzidenz: {str(myvalues[3])}", inline=False)
embed.add_field(name="** ** ", value="** ** ", inline=False)
embed.add_field(name=mynames[4], value=f"👉 Inzidenz: {str(myvalues[4])}", inline=False)
mydb.close()
return embed
def SQLadding():
mydb = SQLconnect() #connects to SQL server
cursor = mydb.cursor()
datetime_1 = datetime.now()
currentdate = datetime_1.date()
#sql_select_query = #SQL query
sql_select_query = """SELECT * FROM landkreis ORDER BY Zuletzt_geupdatet DESC""" #SQL query
cursor.execute(sql_select_query) #takes input from DiscordBot and puts in in %s above
myresult = cursor.fetchall()
for x in myresult:
if currentdate == x[6]:
print("test")
return False, "Already updated data today..."
else:
print("test")
r = requests.get(API_URL)
res = r.json()
countydata = res["features"]
length = len(list(countydata))
for i in range(0, length):
for channel in countydata[i].values(): #takes JSON data and extracts values
Stadtname = channel['GEN']
Kreis = channel['BEZ']
Bundesland = channel['BL']
Faelle = channel['cases']
Tode= channel['deaths']
Inzidenz = channel['cases7_per_100k_txt'].replace(',','.')
Zuletzt_geupdatet = channel['last_update']
day = Zuletzt_geupdatet[0:2]
month = Zuletzt_geupdatet[3:5]
year = Zuletzt_geupdatet[6:10]
date = (year + "-" + month + "-" + day) #conversion to american date format (yyyy-mm-dd)
sql_command = """INSERT INTO landkreis (Stadtname, Kreis, Bundesland, Faelle, Tode, Inzidenz, Zuletzt_geupdatet)
VALUES (%s, %s, %s, %s, %s, %s, %s);"""
data= (Stadtname, Kreis, Bundesland, Faelle, Tode, Inzidenz, date)
cursor.execute(sql_command, data)
mydb.commit()
mydb.close()
return True, "Updated sucessfully..."
def statesearch(state):
mydb = SQLconnect() #connects to SQL server
cursor = mydb.cursor()
datetime_1 = datetime.now()
currentdate = datetime_1.date()
sql_select_query = """SELECT * FROM landkreis WHERE Bundesland = %s AND Zuletzt_geupdatet = %s""" #SQL query
cursor.execute(sql_select_query,(state, currentdate,)) #takes input from DiscordBot and puts in in %s above
myresult = cursor.fetchall() #actually commit query
cursor.close()
mydb.close()
cases = []
death = []
for x in myresult: #search trough results of query
cases.append(int(x[3]))
death.append(int(x[4]))
embed = discord.Embed(
title=f"**{state}**",
)
embed.add_field(name="👥 Fälle (Gesamt)", value=sum(cases), inline=True)
embed.add_field(name="☠️ Tode (Gesamt)", value=sum(death), inline=True)
return embed
#%%
| 2.65625
| 3
|
lib/comm_struct.py
|
yiding-zhou/UFT
| 0
|
12776516
|
<filename>lib/comm_struct.py
#! /usr/bin/ python
# Copyright(c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from enum import Enum, unique
from sqlite3 import collections
UINT32_MAX = 0xffffffff
@unique
class rte_flow_item_type(Enum):
RTE_FLOW_ITEM_TYPE_END = 0
RTE_FLOW_ITEM_TYPE_VOID = 1
RTE_FLOW_ITEM_TYPE_INVERT = 2
RTE_FLOW_ITEM_TYPE_ANY = 3
RTE_FLOW_ITEM_TYPE_PF = 4
RTE_FLOW_ITEM_TYPE_VF = 5
RTE_FLOW_ITEM_TYPE_PHY_PORT = 6
RTE_FLOW_ITEM_TYPE_PORT_ID = 7
RTE_FLOW_ITEM_TYPE_RAW = 8
RTE_FLOW_ITEM_TYPE_ETH = 9
RTE_FLOW_ITEM_TYPE_VLAN = 10
RTE_FLOW_ITEM_TYPE_IPV4 = 11
RTE_FLOW_ITEM_TYPE_IPV6 = 12
RTE_FLOW_ITEM_TYPE_ICMP = 13
RTE_FLOW_ITEM_TYPE_UDP = 14
RTE_FLOW_ITEM_TYPE_TCP = 15
RTE_FLOW_ITEM_TYPE_SCTP = 16
RTE_FLOW_ITEM_TYPE_VXLAN = 17
RTE_FLOW_ITEM_TYPE_E_TAG = 18
RTE_FLOW_ITEM_TYPE_NVGRE = 19
RTE_FLOW_ITEM_TYPE_MPLS = 20
RTE_FLOW_ITEM_TYPE_GRE = 21
RTE_FLOW_ITEM_TYPE_FUZZY = 22
RTE_FLOW_ITEM_TYPE_GTP = 23
RTE_FLOW_ITEM_TYPE_GTPC = 24
RTE_FLOW_ITEM_TYPE_GTPU = 25
RTE_FLOW_ITEM_TYPE_ESP = 26
RTE_FLOW_ITEM_TYPE_GENEVE = 27
RTE_FLOW_ITEM_TYPE_VXLAN_GPE= 28
RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4 = 29
RTE_FLOW_ITEM_TYPE_IPV6_EXT = 30
RTE_FLOW_ITEM_TYPE_ICMP6 = 31
RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS=32
RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA=33
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT=34
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH=35
RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH=36
RTE_FLOW_ITEM_TYPE_MARK = 37
RTE_FLOW_ITEM_TYPE_META = 38
RTE_FLOW_ITEM_TYPE_GRE_KEY = 39
RTE_FLOW_ITEM_TYPE_GTP_PSC = 40
RTE_FLOW_ITEM_TYPE_PPPOES = 41
RTE_FLOW_ITEM_TYPE_PPPOED = 42
RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID= 43
RTE_FLOW_ITEM_TYPE_NSH = 44
RTE_FLOW_ITEM_TYPE_IGMP = 45
RTE_FLOW_ITEM_TYPE_AH = 46
RTE_FLOW_ITEM_TYPE_HIGIG2 = 47
RTE_FLOW_ITEM_TYPE_TAG = 48
# RTE_FLOW_ITEM_TYPE_ANY
rte_flow_item_any = collections.namedtuple('rte_flow_item_any', ['num'])
rte_flow_item_any.__new__.__defaults__ = (0x00000000,)
# Default mask for RTE_FLOW_ITEM_TYPE_ANY.
rte_flow_item_any_mask = rte_flow_item_any(num=0x00000000,)
# RTE_FLOW_ITEM_TYPE_VF
rte_flow_item_vf = collections.namedtuple('rte_flow_item_vf', ['id'])
rte_flow_item_vf.__new__.__defaults__ = (0x00000000,)
# Default mask for RTE_FLOW_ITEM_TYPE_VF.
rte_flow_item_vf_mask = rte_flow_item_vf(id=0x00000000)
# RTE_FLOW_ITEM_TYPE_PHY_PORT
rte_flow_item_phy_port = collections.namedtuple('rte_flow_item_phy_port', ['index'])
rte_flow_item_phy_port.__new__.__defaults__ = (0x00000000,)
# Default mask for RTE_FLOW_ITEM_TYPE_PHY_PORT.
rte_flow_item_phy_port_mask = rte_flow_item_phy_port(index=0x00000000)
# RTE_FLOW_ITEM_TYPE_PORT_ID
rte_flow_item_port_id = collections.namedtuple('rte_flow_item_port_id', ['id'])
rte_flow_item_port_id.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_PORT_ID.
rte_flow_item_port_id_mask = rte_flow_item_port_id(id=0xffffffff)
# RTE_FLOW_ITEM_TYPE_RAW
rte_flow_item_raw = collections.namedtuple('rte_flow_item_raw', ['relative', 'search', 'reserved', 'offset', 'limit', 'length', 'pattern'])
rte_flow_item_raw.__new__.__defaults__ = (0, 0, 0, 0, 0, 0, None)
# Default mask for RTE_FLOW_ITEM_TYPE_RAW.
rte_flow_item_raw_mask = rte_flow_item_raw(1, 1, 0x3fffffff, 0xffffffff, 0xffff, 0xffff, None)
# header ether addr
rte_ether_addr = collections.namedtuple('rte_ether_addr', ['addr_bytes'])
rte_ether_addr.__new__.__defaults__ = (b'',)
# RTE_FLOW_ITEM_TYPE_ETH
rte_flow_item_eth = collections.namedtuple('rte_flow_item_eth', ['dst', 'src', 'type_'])
rte_flow_item_eth.__new__.__defaults__ = (rte_ether_addr(), rte_ether_addr(), 0x0)
# Default mask for RTE_FLOW_ITEM_TYPE_ETH.
rte_flow_item_eth_mask = rte_flow_item_eth(rte_ether_addr(addr_bytes=b"\xff\xff\xff\xff\xff\xff"),
rte_ether_addr(addr_bytes=b"\xff\xff\xff\xff\xff\xff"), 0x0000)
# RTE_FLOW_ITEM_TYPE_VLAN
rte_flow_item_vlan = collections.namedtuple('rte_flow_item_vlan', ['tci', 'inner_type'])
rte_flow_item_vlan.__new__.__defaults__ = (0x0, 0x0)
# Default mask for RTE_FLOW_ITEM_TYPE_ETH.
rte_flow_item_vlan_mask = rte_flow_item_vlan(0x0fff, 0x0000)
# header ipv4
rte_ipv4_hdr = collections.namedtuple('rte_ipv4_hdr', ['version_ihl', 'type_of_service', 'total_length', 'packet_id',
'fragment_offset', 'time_to_live', 'next_proto_id', 'hdr_checksum',
'src_addr', 'dst_addr'])
rte_ipv4_hdr.__new__.__defaults__ = (0, 0, 0, 0, 0, 64, 0, 0, 0, 0)
# RTE_FLOW_ITEM_TYPE_IPV4
rte_flow_item_ipv4 = collections.namedtuple('rte_flow_item_ipv4', ['hdr'])
rte_flow_item_ipv4.__new__.__defaults__ = (rte_ipv4_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_IPV4.
rte_flow_item_ipv4_mask = rte_flow_item_ipv4(rte_ipv4_hdr(src_addr=0xffff, dst_addr=0xffff))
# header ipv6
rte_ipv6_hdr = collections.namedtuple('rte_ipv6_hdr', ['vtc_flow', 'payload_len', 'proto', 'hop_limits',
'src_addr', 'dst_addr'])
rte_ipv6_hdr.__new__.__defaults__ = (0, 0, 0, 0, b'',b'',)
# RTE_FLOW_ITEM_TYPE_IPV6
rte_flow_item_ipv6 = collections.namedtuple('rte_flow_item_ipv6', ['hdr'])
rte_flow_item_ipv6.__new__.__defaults__ = (rte_ipv6_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_IPV6.
rte_flow_item_ipv6_mask = rte_flow_item_ipv6(rte_ipv6_hdr(src_addr=b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
dst_addr=b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"))
# header icmp
rte_icmp_hdr = collections.namedtuple("rte_icmp_hdr", ['icmp_type', 'icmp_code', 'icmp_cksum', 'icmp_ident', 'icmp_seq_nb'])
rte_icmp_hdr.__new__.__defaults__ = (0, 0, 0, 0, 0)
# RTE_FLOW_ITEM_TYPE_ICMP
rte_flow_item_icmp = collections.namedtuple('rte_flow_item_icmp', ['hdr'])
rte_flow_item_icmp.__new__.__defaults__ = (rte_icmp_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP.
rte_flow_item_icmp_mask = rte_flow_item_icmp(rte_icmp_hdr(icmp_type=0xff, icmp_code=0xff))
# header udp
rte_udp_hdr = collections.namedtuple("rte_udp_hdr", ['src_port', 'dst_port', 'dgram_len', 'dgram_cksum'])
rte_udp_hdr.__new__.__defaults__ = (53, 53, 0, 0)
# RTE_FLOW_ITEM_TYPE_UDP
rte_flow_item_udp = collections.namedtuple('rte_flow_item_udp', ['hdr'])
rte_flow_item_udp.__new__.__defaults__ = (rte_udp_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_UDP.
rte_flow_item_udp_mask = rte_flow_item_udp(rte_udp_hdr(src_port=0xffff, dst_port=0xffff))
# header tcp
rte_tcp_hdr = collections.namedtuple('rte_tcp_hdr', ['src_port', 'dst_port', 'sent_seq', 'recv_ack', 'data_off',
'tcp_flags', 'rx_win', 'cksum', 'tcp_urp'])
rte_tcp_hdr.__new__.__defaults__ = (53, 53, 0, 0, 0, 0, 0, 0, 0)
# RTE_FLOW_ITEM_TYPE_TCP
rte_flow_item_tcp = collections.namedtuple("rte_flow_item_tcp", ['hdr'])
rte_flow_item_tcp.__new__.__defaults__ = (rte_tcp_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_TCP.
rte_flow_item_tcp_mask = rte_flow_item_tcp(rte_tcp_hdr(src_port=0xffff, dst_port=0xffff))
# header sctp
rte_sctp_hdr = collections.namedtuple('rte_sctp_hdr', ['src_port', 'dst_port', 'tag', 'cksum'])
rte_sctp_hdr.__new__.__defaults__ = (53, 53, 0, 0)
# RTE_FLOW_ITEM_TYPE_SCTP
rte_flow_item_sctp = collections.namedtuple('rte_flow_item_sctp', ['hdr'])
rte_flow_item_sctp.__new__.__defaults__ = (rte_sctp_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_SCTP.
rte_flow_item_sctp_mask = rte_flow_item_sctp(rte_sctp_hdr(src_port=0xffff, dst_port=0xffff))
# RTE_FLOW_ITEM_TYPE_VXLAN
rte_flow_item_vxlan = collections.namedtuple('rte_flow_item_vxlan', ['flags', 'rsvd0', 'vni', 'rsvd1'])
rte_flow_item_vxlan.__new__.__defaults__ = (0, b'', b'', 0)
# Default mask for RTE_FLOW_ITEM_TYPE_VXLAN.
rte_flow_item_vxlan_mask = rte_flow_item_vxlan(vni=b'\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_E_TAG
rte_flow_item_e_tag = collections.namedtuple('rte_flow_item_e_tag', ['epcp_edei_in_ecid_b', 'rsvd_grp_ecid_b',
'in_ecid_e', 'ecid_e', 'inner_type'])
rte_flow_item_e_tag.__new__.__defaults__ = (0, 0, 0, 0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_E_TAG.
rte_flow_item_e_tag_mask = rte_flow_item_e_tag(rsvd_grp_ecid_b=0x3fff)
# RTE_FLOW_ITEM_TYPE_NVGRE
rte_flow_item_nvgre = collections.namedtuple('rte_flow_item_nvgre', ['c_k_s_rsvd0_ver', 'protocol', 'tni', 'flow_id'])
rte_flow_item_nvgre.__new__.__defaults__ = (0, 0, b'', 0)
# Default mask for RTE_FLOW_ITEM_TYPE_NVGRE.
rte_flow_item_nvgre_mask = rte_flow_item_nvgre(tni=b'\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_MPLS
rte_flow_item_mpls = collections.namedtuple('rte_flow_item_mpls', ['label_tc_s', 'ttl'])
rte_flow_item_mpls.__new__.__defaults__ = (b'', 0)
# Default mask for RTE_FLOW_ITEM_TYPE_MPLS.
rte_flow_item_mpls_mask = rte_flow_item_mpls(label_tc_s=b'\xff\xff\xf0')
# RTE_FLOW_ITEM_TYPE_GRE
rte_flow_item_gre = collections.namedtuple('rte_flow_item_gre', ['c_rsvd0_ver', 'protocol'])
rte_flow_item_gre.__new__.__defaults__ = (0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_GRE.
rte_flow_item_gre_mask = rte_flow_item_gre(protocol=0xffff)
# RTE_FLOW_ITEM_TYPE_FUZZY
rte_flow_item_fuzzy = collections.namedtuple('rte_flow_item_fuzzy', ['thresh'])
rte_flow_item_fuzzy.__new__.__defaults__ = (0x0,)
# Default mask for RTE_FLOW_ITEM_TYPE_FUZZY.
rte_flow_item_fuzzy_mask = rte_flow_item_fuzzy(thresh=0xffffffff)
# RTE_FLOW_ITEM_TYPE_GTP
rte_flow_item_gtp = collections.namedtuple('rte_flow_item_gtp', ['v_pt_rsv_flags', 'msg_type', 'msg_len', 'teid'])
rte_flow_item_gtp.__new__.__defaults__ = (0, 0, 0, 0x0)
# efault mask for RTE_FLOW_ITEM_TYPE_GTP.
rte_flow_item_gtp_mask = rte_flow_item_gtp(teid=0xffffffff)
# header esp
rte_esp_hdr = collections.namedtuple('rte_esp_hdr', ['spi', 'seq'])
rte_esp_hdr.__new__.__defaults__ = (0, 0)
# RTE_FLOW_ITEM_TYPE_ESP
rte_flow_item_esp = collections.namedtuple('rte_flow_item_esp', ['hdr'])
rte_flow_item_esp.__new__.__defaults__ = (rte_esp_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_ESP.
rte_flow_item_esp_mask = rte_flow_item_esp(rte_esp_hdr(spi=0xffffffff))
# RTE_FLOW_ITEM_TYPE_GENEVE
rte_flow_item_geneve = collections.namedtuple('rte_flow_item_geneve', ['ver_opt_len_o_c_rsvd0', 'protocol', 'vni', 'rsvd1'])
rte_flow_item_geneve.__new__.__defaults__ = (0, 0, b'', 0)
# Default mask for RTE_FLOW_ITEM_TYPE_GENEVE.
rte_flow_item_geneve_mask = rte_flow_item_geneve(vni=b'\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_VXLAN_GPE
rte_flow_item_vxlan_gpe = collections.namedtuple('rte_flow_item_vxlan_gpe', ['flags', 'rsvd0', 'protocol', 'vni', 'rsvd1'])
rte_flow_item_vxlan_gpe.__new__.__defaults__ = (0, b'', 0, b'', 0)
# Default mask for RTE_FLOW_ITEM_TYPE_VXLAN_GPE.
rte_flow_item_vxlan_gpe_mask = rte_flow_item_vxlan_gpe(vni=b'\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4
rte_flow_item_arp_eth_ipv4 = collections.namedtuple('rte_flow_item_arp_eth_ipv4', ['hrd', 'pro', 'hln', 'pln', 'op',
'sha', 'spa', 'tha', 'tpa'])
rte_flow_item_arp_eth_ipv4.__new__.__defaults__ = (0, 0, 0, 0, 0, rte_ether_addr(), 0, rte_ether_addr(), 0)
# Default mask for RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4.
rte_flow_item_arp_eth_ipv4_mask = rte_flow_item_arp_eth_ipv4(sha=rte_ether_addr(b'\xff\xff\xff\xff\xff\xff'),
spa=0xffffffff,
tha=rte_ether_addr(b'\xff\xff\xff\xff\xff\xff'),
tpa=0xffffffff)
# RTE_FLOW_ITEM_TYPE_IPV6_EXT
rte_flow_item_ipv6_ext = collections.namedtuple('rte_flow_item_ipv6_ext', ['next_hdr'])
rte_flow_item_ipv6_ext.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_IPV6_EXT.
rte_flow_item_ipv6_ext_mask = rte_flow_item_ipv6_ext(next_hdr=0xff)
# RTE_FLOW_ITEM_TYPE_ICMP6
rte_flow_item_icmp6 = collections.namedtuple('rte_flow_item_icmp6', ['type_', 'code', 'checksum'])
rte_flow_item_icmp6.__new__.__defaults__ = (0, 0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6.
rte_flow_item_icmp6_mask = rte_flow_item_icmp6(type_=0xff, code=0xff)
# RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
rte_flow_item_icmp6_nd_ns = collections.namedtuple('rte_flow_item_icmp6_nd_ns', ['type_', 'code', 'checksum', 'reserved', 'target_addr'])
rte_flow_item_icmp6_nd_ns.__new__.__defaults__ = (0, 0, 0, 0, b'')
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS.
rte_flow_item_icmp6_nd_ns_mask = rte_flow_item_icmp6_nd_ns(target_addr=b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
rte_flow_item_icmp6_nd_na = collections.namedtuple('rte_flow_item_icmp6_nd_na', ['type_', 'code', 'checksum', 'rso_reserved', 'target_addr'])
rte_flow_item_icmp6_nd_na.__new__.__defaults__ = (0, 0, 0, 0, b'')
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA.
rte_flow_item_icmp6_nd_na_mask = rte_flow_item_icmp6_nd_na(target_addr=b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff')
# RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
rte_flow_item_icmp6_nd_opt = collections.namedtuple('rte_flow_item_icmp6_nd_opt', ['type_', 'length'])
rte_flow_item_icmp6_nd_opt.__new__.__defaults__ = (0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT.
rte_flow_item_icmp6_nd_opt_mask = rte_flow_item_icmp6_nd_opt(type_=0xff)
# RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH
rte_flow_item_icmp6_nd_opt_sla_eth = collections.namedtuple('rte_flow_item_icmp6_nd_opt_sla_eth', ['type_', 'length', 'sla'])
rte_flow_item_icmp6_nd_opt_sla_eth.__new__.__defaults__ = (0, 0, rte_ether_addr())
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH.
rte_flow_item_icmp6_nd_opt_sla_eth_mask = rte_flow_item_icmp6_nd_opt_sla_eth(sla=rte_ether_addr(addr_bytes=b'\xff\xff\xff\xff\xff\xff'))
# RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH
rte_flow_item_icmp6_nd_opt_tla_eth = collections.namedtuple('rte_flow_item_icmp6_nd_opt_tla_eth', ['type_', 'length', 'tla'])
rte_flow_item_icmp6_nd_opt_tla_eth.__new__.__defaults__ = (0, 0, rte_ether_addr())
# Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH.
rte_flow_item_icmp6_nd_opt_tla_eth_mask = rte_flow_item_icmp6_nd_opt_tla_eth(tla=rte_ether_addr(addr_bytes=b'\xff\xff\xff\xff\xff\xff'))
# RTE_FLOW_ITEM_TYPE_MARK
rte_flow_item_mark = collections.namedtuple('rte_flow_item_mark', ['id'])
rte_flow_item_mark.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_MARK.
rte_flow_item_mark_mask = rte_flow_item_mark(id=0xffffffff)
# RTE_FLOW_ITEM_TYPE_META
rte_flow_item_meta = collections.namedtuple('rte_flow_item_meta', ['data'])
rte_flow_item_meta.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_META.
rte_flow_item_meta_mask = rte_flow_item_meta(data=UINT32_MAX)
# RTE_FLOW_ITEM_TYPE_GTP_PSC.
rte_flow_item_gtp_psc = collections.namedtuple('rte_flow_item_gtp_psc', ['pdu_type', 'qfi'])
rte_flow_item_gtp_psc.__new__.__defaults__ = (0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_GTP_PSC.
rte_flow_item_gtp_psc_mask = rte_flow_item_gtp_psc(qfi=0x3f)
# RTE_FLOW_ITEM_TYPE_PPPOE
rte_flow_item_pppoe = collections.namedtuple('rte_flow_item_pppoe', ['version_type', 'code', 'session_id', 'length'])
rte_flow_item_pppoe.__new__.__defaults__ = (0, 0, 0, 0)
# RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID
rte_flow_item_pppoe_proto_id = collections.namedtuple('rte_flow_item_pppoe_proto_id', ['proto_id'])
rte_flow_item_pppoe_proto_id.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID.
rte_flow_item_pppoe_proto_id_mask = rte_flow_item_pppoe_proto_id(proto_id=0xffff)
# RTE_FLOW_ITEM_TYPE_NSH
rte_flow_item_nsh = collections.namedtuple('rte_flow_item_nsh', ['version', 'oam_pkt', 'reserved', 'ttl', 'length',
'reserved1', 'mdtype', 'next_proto', 'spi', 'sindex'])
rte_flow_item_nsh.__new__.__defaults__ = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_NSH.
rte_flow_item_nsh_mask = rte_flow_item_nsh(mdtype=0xf, next_proto=0xff, spi=0xffffff, sindex=0xff)
# RTE_FLOW_ITEM_TYPE_IGMP
rte_flow_item_igmp = collections.namedtuple('rte_flow_item_igmp', ['type_', 'max_resp_time', 'checksum', 'group_addr'])
rte_flow_item_igmp.__new__.__defaults__ = (0, 0, 0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_IGMP.
rte_flow_item_igmp_mask = rte_flow_item_igmp(group_addr=0xffffffff)
# RTE_FLOW_ITEM_TYPE_AH
rte_flow_item_ah = collections.namedtuple('rte_flow_item_ah', ['next_hdr', 'payload_len', 'reserved', 'spi', 'seq_num'])
rte_flow_item_ah.__new__.__defaults__ = (0, 0, 0, 0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_AH.
rte_flow_item_ah_mask = rte_flow_item_ah(spi=0xffffffff)
# higig2 frc header.
rte_higig2_frc = collections.namedtuple('rte_higig2_frc', ['ksop', 'tc', 'mcst', 'resv', 'dst_modid', 'dst_pid',
'src_modid', 'src_pid', 'lbid', 'ppd_type', 'resv1', 'dp'])
rte_higig2_frc.__new__.__defaults__ = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# higig2 ppt type0 header
rte_higig2_ppt_type0 = collections.namedtuple('rte_higig2_ppt_type0', ['mirror', 'mirror_done', 'mirror_only', 'ingress_tagged',
'dst_tgid', 'dst_t', 'vc_label2', 'label_present', 'l3',
'res', 'vc_label1', 'vc_label0', 'vid_high', 'vid_low',
'opc', 'res1', 'srce_t', 'pf', 'res2', 'hdr_ext_length'])
rte_higig2_ppt_type0.__new__.__defaults__ = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
# higig2 ppt type1 header
rte_higig2_ppt_type1 = collections.namedtuple('rte_higig2_ppt_type1', ['classification', 'resv', 'vid', 'opcode', 'resv1',
'src_t', 'pfm', 'resv2', 'hdr_ext_len'])
rte_higig2_ppt_type1.__new__.__defaults__ = (0, 0, 0, 0, 0, 0, 0, 0, 0)
# higig2 header
rte_higig2_hdr = collections.namedtuple('rte_higig2_hdr', ['fcr', 'ppt0', 'ppt1'])
rte_higig2_hdr.__new__.__defaults__ = (rte_higig2_frc(), rte_higig2_ppt_type0(), rte_higig2_ppt_type1())
# RTE_FLOW_ITEM_TYPE_HIGIG2
rte_flow_item_higig2_hdr = collections.namedtuple('rte_flow_item_higig2_hdr', ['hdr'])
rte_flow_item_higig2_hdr.__new__.__defaults__ = (rte_higig2_hdr(),)
# Default mask for RTE_FLOW_ITEM_TYPE_HIGIG2.
rte_flow_item_rte_higig2_hdr_mask = rte_flow_item_higig2_hdr(hdr=rte_higig2_hdr(ppt1=rte_higig2_ppt_type1(classification=0xffff, vid=0xfff)))
# RTE_FLOW_ITEM_TYPE_TAG
rte_flow_item_tag = collections.namedtuple('rte_flow_item_tag', ['data', 'index'])
rte_flow_item_tag.__new__.__defaults__ = (0, 0)
# Default mask for RTE_FLOW_ITEM_TYPE_TAG.
rte_flow_item_tag_mask = rte_flow_item_tag(data=0xffffffff, index=0xff)
# RTE_FLOW_ITEM_TYPE_L2TPV3OIP
rte_flow_item_l2tpv3oip = collections.namedtuple('rte_flow_item_l2tpv3oip', ['session_id'])
rte_flow_item_l2tpv3oip.__new__.__defaults__ = (0,)
# Default mask for RTE_FLOW_ITEM_TYPE_L2TPV3OIP.
rte_flow_item_l2tpv3oip_mask = rte_flow_item_l2tpv3oip(session_id=UINT32_MAX)
rte_flow_item = collections.namedtuple('rte_flow_item', ['type_', 'spec', 'last', 'mask'])
rte_flow_item.__new__.__defaults__ = (None, None, None)
@unique
class rte_flow_action_type(Enum):
RTE_FLOW_ACTION_TYPE_END = 0
RTE_FLOW_ACTION_TYPE_VOID = 1
RTE_FLOW_ACTION_TYPE_PASSTHRU = 2
RTE_FLOW_ACTION_TYPE_JUMP = 3
RTE_FLOW_ACTION_TYPE_MARK = 4
RTE_FLOW_ACTION_TYPE_FLAG = 5
RTE_FLOW_ACTION_TYPE_QUEUE = 6
RTE_FLOW_ACTION_TYPE_DROP = 7
RTE_FLOW_ACTION_TYPE_COUNT = 8
RTE_FLOW_ACTION_TYPE_RSS = 9
RTE_FLOW_ACTION_TYPE_PF = 10
RTE_FLOW_ACTION_TYPE_VF = 11
RTE_FLOW_ACTION_TYPE_PHY_PORT = 12
RTE_FLOW_ACTION_TYPE_PORT_ID = 13
RTE_FLOW_ACTION_TYPE_METER = 14
RTE_FLOW_ACTION_TYPE_SECURITY = 15
RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL = 16
RTE_FLOW_ACTION_TYPE_OF_DEC_MPLS_TTL = 17
RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL = 18
RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL = 19
RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_OUT = 20
RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_IN = 21
RTE_FLOW_ACTION_TYPE_OF_POP_VLAN = 22
RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN = 23
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID = 24
RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP = 25
RTE_FLOW_ACTION_TYPE_OF_POP_MPLS = 26
RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS = 27
RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP = 28
RTE_FLOW_ACTION_TYPE_VXLAN_DECAP = 29
RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP = 30
RTE_FLOW_ACTION_TYPE_NVGRE_DECAP = 31
RTE_FLOW_ACTION_TYPE_RAW_ENCAP = 32
RTE_FLOW_ACTION_TYPE_RAW_DECAP = 33
RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC = 34
RTE_FLOW_ACTION_TYPE_SET_IPV4_DST = 35
RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC = 36
RTE_FLOW_ACTION_TYPE_SET_IPV6_DST = 37
RTE_FLOW_ACTION_TYPE_SET_TP_SRC = 38
RTE_FLOW_ACTION_TYPE_SET_TP_DST = 39
RTE_FLOW_ACTION_TYPE_MAC_SWAP = 40
RTE_FLOW_ACTION_TYPE_DEC_TTL = 41
RTE_FLOW_ACTION_TYPE_SET_TTL = 42
RTE_FLOW_ACTION_TYPE_SET_MAC_SRC = 43
RTE_FLOW_ACTION_TYPE_SET_MAC_DST = 44
RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ = 45
RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ = 46
RTE_FLOW_ACTION_TYPE_INC_TCP_ACK = 47
RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK = 48
RTE_FLOW_ACTION_TYPE_SET_TAG = 49
RTE_FLOW_ACTION_TYPE_SET_META = 50
# RTE_FLOW_ACTION_TYPE_JUMP
rte_flow_action_jump = collections.namedtuple('rte_flow_action_jump', ['group'])
rte_flow_action_jump.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_MARK
rte_flow_action_mark = collections.namedtuple('rte_flow_action_mark', ['id'])
rte_flow_action_mark.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_QUEUE
rte_flow_action_queue = collections.namedtuple('rte_flow_action_queue', ['index'])
rte_flow_action_queue.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_COUNT
rte_flow_action_count = collections.namedtuple('rte_flow_action_count', ['shared', 'reserved', 'id'])
rte_flow_action_count.__new__.__defaults__ = (0, 0, 0)
# RTE_FLOW_ACTION_TYPE_COUNT (query)
rte_flow_query_count = collections.namedtuple('rte_flow_query_count', ['reset', 'hits_set', 'bytes_set', 'reserved',
'hits', 'bytes'])
rte_flow_query_count.__new__.__defaults__ = (0, 0, 0, 0, 0, 0)
# Hash function types.
@unique
class rte_eth_hash_function(Enum):
RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
RTE_ETH_HASH_FUNCTION_TOEPLITZ = 2, # Toeplitz
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR =3, # Simple XOR
RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ = 4,
RTE_ETH_HASH_FUNCTION_MAX = 5
# RTE_FLOW_ACTION_TYPE_RSS
rte_flow_action_rss = collections.namedtuple('rte_flow_action_rss', ['func', 'level', 'types', 'key_len',
'queue_num', 'key', 'queue'])
rte_flow_action_rss.__new__.__defaults__ = (0, 0, 0, 0, None, None)
# RTE_FLOW_ACTION_TYPE_VF
rte_flow_action_vf = collections.namedtuple('rte_flow_action_vf', ['reserved', 'original', 'id'])
rte_flow_action_vf.__new__.__defaults__ = (0, 0, 1)
# RTE_FLOW_ITEM_TYPE_PHY_PORT
rte_flow_action_phy_port = collections.namedtuple('rte_flow_action_phy_port', ['original', 'reserved', 'index'])
rte_flow_action_phy_port.__new__.__defaults__ = (0, 0, 1)
# RTE_FLOW_ACTION_TYPE_PORT_ID
rte_flow_action_port_id = collections.namedtuple('rte_flow_action_port_id', ['original', 'reserved', 'id'])
rte_flow_action_port_id.__new__.__defaults__ = (0, 0, 1)
# RTE_FLOW_ACTION_TYPE_METER
rte_flow_action_meter = collections.namedtuple('rte_flow_action_meter', ['mtr_id'])
rte_flow_action_meter.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_SECURITY
rte_flow_action_security = collections.namedtuple('rte_flow_action_security', ['security_session'])
rte_flow_action_security.__new__.__defaults__ = (None,)
# RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL
rte_flow_action_of_set_mpls_ttl = collections.namedtuple('rte_flow_action_of_set_mpls_ttl', ['mpls_ttl'])
rte_flow_action_of_set_mpls_ttl.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL
rte_flow_action_of_set_nw_ttl = collections.namedtuple('rte_flow_action_of_set_nw_ttl', ['nw_ttl'])
rte_flow_action_of_set_nw_ttl.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN
rte_flow_action_of_push_vlan = collections.namedtuple('rte_flow_action_of_push_vlan', ['ethertype'])
rte_flow_action_of_push_vlan.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID
rte_flow_action_of_set_vlan_vid = collections.namedtuple('rte_flow_action_of_set_vlan_vid', ['vlan_vid'])
rte_flow_action_of_set_vlan_vid.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP
rte_flow_action_of_set_vlan_pcp = collections.namedtuple('rte_flow_action_of_set_vlan_pcp', ['vlan_pcp'])
rte_flow_action_of_set_vlan_pcp.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_POP_MPLS
rte_flow_action_of_pop_mpls = collections.namedtuple('rte_flow_action_of_pop_mpls', ['ethertype'])
rte_flow_action_of_pop_mpls.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS
rte_flow_action_of_push_mpls = collections.namedtuple('rte_flow_action_of_push_mpls', ['ethertype'])
rte_flow_action_of_push_mpls.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
rte_flow_action_vxlan_encap = collections.namedtuple('rte_flow_action_vxlan_encap', ['definition'])
# RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP
rte_flow_action_nvgre_encap = collections.namedtuple('rte_flow_action_nvgre_encap', ['definition'])
# RTE_FLOW_ACTION_TYPE_RAW_ENCAP
rte_flow_action_raw_encap = collections.namedtuple('rte_flow_action_raw_encap', ['data', 'preserve', 'size'])
rte_flow_action_raw_encap.__new__.__defaults__ = (None, None, 0)
# RTE_FLOW_ACTION_TYPE_RAW_DECAP
rte_flow_action_raw_decap = collections.namedtuple('rte_flow_action_raw_decap', ['data', 'size'])
rte_flow_action_raw_decap.__new__.__defaults__ = (None, 0)
# RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
# RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
rte_flow_action_set_ipv4 = collections.namedtuple('rte_flow_action_set_ipv4', ['ipv4_addr'])
rte_flow_action_set_ipv4.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
# RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
rte_flow_action_set_ipv6 = collections.namedtuple('rte_flow_action_set_ipv6', ['ipv6_addr'])
rte_flow_action_set_ipv6.__new__.__defaults__ = (b'',)
# RTE_FLOW_ACTION_TYPE_SET_TP_SRC
# RTE_FLOW_ACTION_TYPE_SET_TP_DST
rte_flow_action_set_tp = collections.namedtuple('rte_flow_action_set_tp', ['port'])
rte_flow_action_set_tp.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_SET_TTL
rte_flow_action_set_ttl = collections.namedtuple('rte_flow_action_set_ttl', ['ttl_value'])
rte_flow_action_set_ttl.__new__.__defaults__ = (0,)
# RTE_FLOW_ACTION_TYPE_SET_MAC
rte_flow_action_set_mac = collections.namedtuple('rte_flow_action_set_mac', ['mac_addr'])
rte_flow_action_set_mac.__new__.__defaults__ = (b'',)
# RTE_FLOW_ACTION_TYPE_SET_TAG
rte_flow_action_set_tag = collections.namedtuple('rte_flow_action_set_tag', ['data', 'mask', 'index'])
rte_flow_action_set_tag.__new__.__defaults__ = (0, 0, 0)
# RTE_FLOW_ACTION_TYPE_SET_META
rte_flow_action_set_meta = collections.namedtuple('rte_flow_action_set_meta', ['data', 'mask'])
rte_flow_action_set_meta.__new__.__defaults__ = (0, 0)
# RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
# RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
rte_flow_action_set_dscp = collections.namedtuple('rte_flow_action_set_dscp', ['dscp'])
rte_flow_action_set_dscp.__new__.__defaults__ = (0,)
rte_flow_action = collections.namedtuple('rte_flow_action', ['type_', 'conf'])
rte_flow_action.__new__.__defaults__ = (None,)
@unique
class rte_rte_flow_error_type(Enum):
RTE_FLOW_ERROR_TYPE_NONE = 0
RTE_FLOW_ERROR_TYPE_UNSPECIFIED = 1
RTE_FLOW_ERROR_TYPE_HANDLE = 2
RTE_FLOW_ERROR_TYPE_ATTR_GROUP = 3
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY=4
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS= 5
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS = 6
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER=7
RTE_FLOW_ERROR_TYPE_ATTR = 8
RTE_FLOW_ERROR_TYPE_ITEM_NUM = 9
RTE_FLOW_ERROR_TYPE_ITEM_SPEC = 10
RTE_FLOW_ERROR_TYPE_ITEM_LAST = 11
RTE_FLOW_ERROR_TYPE_ITEM_MASK = 12
RTE_FLOW_ERROR_TYPE_ITEM = 13
RTE_FLOW_ERROR_TYPE_ACTION_NUM = 14
RTE_FLOW_ERROR_TYPE_ACTION_CONF = 15
RTE_FLOW_ERROR_TYPE_ACTION = 16
# rte_flow_error
rte_flow_error = collections.namedtuple('rte_flow_error', ['type_', 'cause', 'message'])
rte_flow_error.__new__.__defaults__ = (None, None, None)
# RTE_FLOW_CONV_OP_RULE
rte_flow_conv_rule = collections.namedtuple('rte_flow_conv_rule', ['attr_ro', 'attr', 'pattern_ro', 'pattern',
'actions_ro', 'actions'])
rte_flow_conv_rule.__new__.__defaults__ = (None, None, None, None, None, None)
@unique
class rte_flow_conv_op(Enum):
RTE_FLOW_CONV_OP_NONE = 0
RTE_FLOW_CONV_OP_ATTR = 1
RTE_FLOW_CONV_OP_ITEM = 2
RTE_FLOW_CONV_OP_ACTION = 3
RTE_FLOW_CONV_OP_PATTERN = 4
RTE_FLOW_CONV_OP_ACTIONS = 5
RTE_FLOW_CONV_OP_RULE = 6
RTE_FLOW_CONV_OP_ITEM_NAME = 7
RTE_FLOW_CONV_OP_ACTION_NAME= 8
RTE_FLOW_CONV_OP_ITEM_NAME_PTR=9
RTE_FLOW_CONV_OP_ACTION_NAME_PTR=10
rte_flow_desc = collections.namedtuple('rte_flow_desc', ['size', 'attr', 'items', 'actions', 'data'])
rte_flow_desc.__new__.__defaults__ = (0, 0, None, None, b'')
rte_flow_attr = collections.namedtuple('rte_flow_attr', ['group', 'priority', 'ingress', 'egress', 'transfer', 'reserved'])
rte_flow_attr.__new__.__defaults__ = (0, 0, 1, 0, 0, 0)
rte_flow_list_result = collections.namedtuple('rte_flow_list_result', ['flow_id', 'description'])
rte_flow_list_result.__new__.__defaults__ = (0, '')
class QosError(ValueError):
def __init__(self, e_code, e_msg):
self.e_code = e_code
self.e_msg = e_msg
| 1.796875
| 2
|
PostDiffMixture/simulations_folder/simulation_analysis_scripts/scatter_plot_functions.py
|
SIGKDDanon/SIGKDD2021DeAnonV2
| 0
|
12776517
|
import matplotlib
matplotlib.use('Agg')
#matplotlib.use("gtk")
#matplotlib.use('Qt5Agg')
from rectify_vars_and_wald_functions import *
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '../../le_experiments/')
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
from pathlib import Path
import glob
import numpy as np
import read_config
from output_format import H_ALGO_ACTION_FAILURE, H_ALGO_ACTION_SUCCESS, H_ALGO_ACTION, H_ALGO_OBSERVED_REWARD
from output_format import H_ALGO_ESTIMATED_MU, H_ALGO_ESTIMATED_V, H_ALGO_ESTIMATED_ALPHA, H_ALGO_ESTIMATED_BETA
from output_format import H_ALGO_PROB_BEST_ACTION, H_ALGO_NUM_TRIALS
import beta_bernoulli
import scipy.stats
from scipy.stats import spearmanr
from scipy.stats import pearsonr
#import thompson_policy
import ipdb
EPSILON_PROB = .000001
DESIRED_POWER = 0.8
DESIRED_ALPHA = 0.05
SMALL_SIZE = 10
MEDIUM_SIZE = 13
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def plot_minssratio_vs_algs(ax, df_list, x_label, y_label):
# ipdb.set_trace()
idx = 0
ind = np.arange(4)
ax.set_xticks(ind)
labels = ('Uniform', 'EG0pt3', 'EG0pt1', 'TS')
ax.set_xticklabels(labels)
for df in df_list:
df[df[y_label] > 1.0] = 1/(df[df[y_label] > 1.0]) #Ratio is smaller sample size/ larger sample size
df_reject = df[df[x_label] == True]
x_idx = np.zeros(len(df_reject[x_label])) + idx
jitter = np.random.normal(0, 0.1, len(x_idx))/2
if idx == 0:
ax.scatter(x_idx + jitter,df_reject[y_label], color = 'red', label = "Rejected Null With Wald Test")
else:
ax.scatter(x_idx + jitter,df_reject[y_label], color = 'red')
df_accept = df[df[x_label] == False]
x_idx = np.zeros(len(df_accept[x_label])) + idx
jitter = np.random.normal(0, 0.1, len(x_idx))/2
if idx == 0:
ax.scatter(x_idx + jitter, df_accept[y_label], color = 'blue', label = "Failed to Reject Null With Wald Test")
else:
ax.scatter(x_idx + jitter, df_accept[y_label], color = 'blue')
idx +=1
def scatter_ratio(df = None, to_check_eg0pt1 = None, to_check_eg0pt3 = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None,\
to_check_ts = None):
'''
'''
if load_df == True:
with open(to_check_eg0pt1, 'rb') as f:
df_eg0pt1 = pickle.load(f)
with open(to_check_eg0pt3, 'rb') as f:
df_eg0pt3 = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ipw != None:
ipw_t1_list = np.load(to_check_ipw)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
# SE = np.sqrt(mean_1*(1 - mean_1)/sample_size_1 + mean_2*(1 - mean_2)/sample_size_2)
df_eg0pt1 = df_eg0pt1.dropna()
wald_pval_eg0pt1 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt1["Wald Rejected"] = wald_pval_eg0pt1 < 0.05
df_eg0pt1.to_csv("overview_csvs/EG0pt1/eg0pt1_overview_noNa_n={}.csv".format(n))
df_eg0pt3 = df_eg0pt3.dropna()
wald_pval_eg0pt3 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt3["Wald Rejected"] = wald_pval_eg0pt3 < 0.05
df_eg0pt3.to_csv("overview_csvs/EG0pt3/eg0pt3_overview_noNa_n={}.csv".format(n))
df_ts = df_ts.dropna()
wald_pval_ts = (1 - scipy.stats.norm.cdf(np.abs(df_ts["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_ts["Wald Rejected"] = wald_pval_ts < 0.05
df_ts.to_csv("overview_csvs/TS/ts_overview_noNa_n={}.csv".format(n))
df_unif = df_unif.dropna()
wald_pval_unif = (1 - scipy.stats.norm.cdf(np.abs(df_unif["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_unif["Wald Rejected"] = wald_pval_unif < 0.05 #print(data)
df_unif.to_csv("overview_csvs/unif/unif_overview_noNa_n={}.csv".format(n))
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps].dropna()
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
df_list = [df_for_num_steps_unif, df_for_num_steps_eg0pt3, df_for_num_steps_eg0pt1, df_for_num_steps_ts]
# df_list = [df_for_num_steps_eg0pt1]
#df_list = [df_for_num_steps_ts]
# df_list = [df_for_num_steps_unif]
y_label = "ratio"
x_label = "Wald Rejected"
plot_minssratio_vs_algs(ax = ax[i], df_list = df_list, x_label = x_label, y_label = y_label)
num_replications = len(df_for_num_steps_eg0pt1)
#
ax[i].set_xlabel("Number of participants = {} = {}".format(size_vars[i], num_steps))
ax[i].legend()
ax[i].set_ylim(0,1.02)
ax[i].set_ylabel("Minimum Sample Size Ratio \n Min($\\frac{n_1}{n_2}$, $\\frac{n_2}{n_1}$)")
i +=1
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
save_dir_ne = "../simulation_analysis_saves/scatter_ratio_waldreject/NoEffect/"
save_dir_e = "../simulation_analysis_saves/scatter_ratio_waldreject/Effect/"
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = save_dir_ne + "{}.png".format(title)
save_str_e = save_dir_e + "{}.png".format(title)
# save_str_ne = "../simulation_analysis_saves/scatter_ratio_waldreject/NoEffect/{}.png".format(title)
# save_str_e = "../simulation_analysis_saves/scatter_ratio_waldreject/Effect/{}.png".format(title)
if "No Effect" in title:
print("saving to ", save_str_ne)
fig.savefig(save_str_ne, bbox_inches = "tight")
elif "With Effect" in title:
print("saving to ", save_str_e, bbox_inches = "tight")
fig.savefig(save_str_e)
#plt.show()
plt.clf()
plt.close()
def plot_correlation(fig, ax, df_list, x_label, y_label, num_steps, ax_idx):
# ipdb.set_trace()
idx = 0
df = df_list[0]
# for df in df_list: #This loop not needed
df_reject = df[df["Wald Rejected"] == True]
xvals = np.abs(df_reject[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
yvals = np.abs(df_reject[y_label.format(2)] - df_reject[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
if ax_idx == 0:
ax.scatter(xvals, yvals, color = 'red', label = "Rejected Null With Wald Test")
else:
ax.scatter(xvals,yvals, color = 'red')
df_accept = df[df["Wald Rejected"] == False]
xvals = np.abs(df_accept[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
yvals = np.abs(df_accept[y_label.format(2)] - df_accept[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
if len(df) == 0:
ipdb.set_trace()
print()
proportion_reject = len(df_reject)/len(df)
yvals_all = np.abs(df[y_label.format(2)] - df[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
xvals_all = np.abs(df[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
proportion_reject = np.round(proportion_reject, 3)
coeff, p = spearmanr(xvals_all, yvals_all)
coeff = np.round(coeff, 3)
p = np.round(p, 3)
coeff_pear, p_pear = pearsonr(xvals_all, yvals_all)
coeff_pear = np.round(coeff_pear, 3)
p_pear = np.round(p_pear, 3)
if ax_idx == 0:
ax.scatter(xvals, yvals, color = 'blue', label = "Failed to Reject Null With Wald Test")
ax.legend(loc = "upper center", bbox_to_anchor = (1.2, 1.276))
else:
ax.scatter(xvals,yvals , color = 'blue')
ax.text(0.02, 0.75,"Proprtion Rejected (Power/Type 1 Error) = {} \nSpearman's Correlation Coefficent = {} \nwith pvalue = {}\n Pearon's Correlation Coefficent = {} \nwith pvalue = {}".format(proportion_reject, coeff, p, coeff_pear, p_pear))
# if ax_idx == 0 and 0:
# leg1 = ax.legend((p_red[0], p_blue[0]), "Rejected Null Hypothesis With Wald Test", "Failed To Reject Null Hypothesis With Wald Test", bbox_to_anchor = (1.0, 1.076))
# ax.add_artist(leg1)
# handles, labels = ax.get_legend_handles_labels()
# fig.legend(handles, ["a","g"], loc='upper right', prop={'size': 50})
def scatter_correlation_helper_outer(df = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, n = None, num_sims = None, load_df = True, \
title = None,\
df_ts = None, effect_size = 0):
alg_key_list = ["TS", "EG0pt1", "EG0pt3", "Uniform"]
alg_key_list = ["TS"]
alg_name_list = ["Thompson Sampling (TS)","Epsilon Greedy 0.1 (EG0.1)","Epsilon Greedy 0.3 (EG0.3)", "Uniform"]
alg_name_list = ["Thompson Sampling (TS)"]
for alg_key, alg_name in zip(alg_key_list, alg_name_list):
if effect_size == 0:
title_scatter_corr = "{} ".format(alg_name) + "Difference in arm means (|$\hatp_1$ - $\hatp_2$|) vs. |Proportion of samples in Condtion 1 - 0.5|" + " For n = {} \n Across {} Simulations \n No Effect $p_1$ = $p_2$ = 0.5".format(n, num_sims)
else:
title_scatter_corr = "{} ".format(alg_name) + "Difference in arm means (|$\hatp_1$ - $\hatp_2$|) vs. |Proportion of samples in Condtion 1 - 0.5|" + " For n = {} \n Across {} Simulations \n With Effect Size {}".format(n, num_sims, effect_size)
scatter_correlation(df_eg0pt1 = df_eg0pt1 , df_eg0pt3 = df_eg0pt3,\
df_unif = df_unif, df_ts = df_ts,\
title = title_scatter_corr, \
n = n, num_sims = num_sims, alg_key = alg_key)
def scatter_correlation(df = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, n = None, num_sims = None, load_df = True, \
title = None,\
df_ts = None, alg_key = "TS"):
'''
maybe something like |proportion condition 1 - 0.5| vs. difference in means? Something which captures the imbalance directly
'''
df_eg0pt1 = df_eg0pt1
wald_pval_eg0pt1 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
#df_eg0pt1["Wald Rejected"] = wald_pval_eg0pt1 < 0.05
df_eg0pt1["Wald Rejected"] = df_eg0pt1["wald_pval"] < 0.05
#df_eg0pt3 = df_eg0pt3.dropna()
wald_pval_eg0pt3 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt3["Wald Rejected"] = df_eg0pt3["wald_pval"] < 0.05
#df_ts = df_ts.dropna()
wald_pval_ts = (1 - scipy.stats.norm.cdf(np.abs(df_ts["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_ts["Wald Rejected"] = df_ts["wald_pval"] < 0.05
# df_unif = df_unif.dropna()
wald_pval_unif = (1 - scipy.stats.norm.cdf(np.abs(df_unif["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_unif["Wald Rejected"] = df_unif["wald_pval"] < 0.05
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
alg_key = "TS" #ALWAYS TS
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps]
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
#df_list = [df_for_num_steps_unif, df_for_num_steps_eg0pt3, df_for_num_steps_eg0pt1, df_for_num_steps_ts]
# df_list = [df_for_num_steps_eg0pt3]
alg_dict = {"TS":df_for_num_steps_ts, "EG0pt1":df_for_num_steps_eg0pt1, "EG0pt3":df_for_num_steps_eg0pt3, "Uniform":df_for_num_steps_unif}
df_list = [alg_dict[alg_key]]
# df_list = [df_for_num_steps_ts]
#df_list = [df_for_num_steps_ts]
# df_list = [df_for_num_steps_unif]
# bins = np.arange(0, 1.01, .025)
x_label = "sample_size_1"
y_label = "mean_{}"
if len(df_list[0]) == 0:
ipdb.set_trace()
plot_correlation(fig, ax = ax[i], df_list = df_list, x_label = x_label, y_label = y_label, num_steps = num_steps, ax_idx = i)
num_replications = len(df_for_num_steps_eg0pt1)
#
#
ax[i].set_xlabel("|Proportion of samples in Condtion 1 - 0.5| For Number of participants = {} = {}".format(size_vars[i], num_steps))
# ax[i].legend()
ax[i].set_ylim(0,1.02)
ax[i].set_xlim(0, 0.501)
ax[i].set_ylabel("Difference in Arm Mean Estimates |$\hatp1$ - $\hatp2$|")
i +=1
fig.suptitle(title)
fig.subplots_adjust(top=0.80)
# fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
save_dir_ne = "../simulation_analysis_saves/scatter_correlation/NoEffect/"
save_dir_e = "../simulation_analysis_saves/scatter_correlation/Effect/"
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = save_dir_ne + "{}.png".format(title)
save_str_e = save_dir_e + "{}.png".format(title)
# save_str_ne = "../simulation_analysis_saves/scatter_correlation/NoEffect/{}/{}.png".format(alg_key, title)
# save_str_e = "../simulation_analysis_saves/scatter_correlation/Effect/{}/{}.png".format(alg_key, title)
if "No Effect" in title:
print("saving to ", save_str_ne)
fig.savefig(save_str_ne, bbox_inches = "tight")
elif "With Effect" in title:
print("saving to ", save_str_e)
fig.savefig(save_str_e, bbox_inches = "tight")
#plt.show()
plt.clf()
plt.close()
| 2.203125
| 2
|
src/main.py
|
dynamitejustice/twitch-cli
| 0
|
12776518
|
<reponame>dynamitejustice/twitch-cli<filename>src/main.py
#!/usr/bin/python3
import os
import sys
import requests
import subprocess
import json
import click
from termcolor import colored, COLORS
from urllib.parse import urlencode
import webbrowser
import numpy as np
from config import *
os.system('color')
TWITCH_CLIENT_ID = 'e0fm2z7ufk73k2jnkm21y0gp1h9q2o'
COLORS.update({
'light_grey': 90,
'light_red': 91,
'light_green': 92,
'light_yellow': 93,
'light_blue': 94,
'light_magenta': 95,
'light_cyan': 96,
'light_white': 97
})
@click.group(invoke_without_command=True)
@click.pass_context
@click.option('--config', help='Configuration file location')
def main(ctx, config):
"""List or play Twitch streams"""
if config is not None:
set_config_path(config)
load_config()
if ctx.invoked_subcommand is None:
cmd_live()
# The cmd_* functions get called when their respective subcommand is executed
# Example: "python3 twitch-cli live" calls "cmd_live"
@main.command('live')
@click.option('--flat', is_flag=True, help='Don\'t show detailed information or prompt')
@click.option('--game', help='Show live streams for a specific game')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
def cmd_live(flat, game, quality):
"""List live channels"""
list_streams(game=game, flat=flat, playback_quality=quality)
@main.command('vods')
@click.option('--flat', is_flag=True, help='Don\'t show detailed information or prompt')
@click.argument('channel')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
def cmd_vods(channel, flat, quality):
"""List past streams of a channel"""
list_vods(channel, flat, playback_quality=quality)
@main.command('play')
@click.option('-q', '--quality', help='Comma-separated stream qualities')
@click.argument('channel')
def cmd_play(channel, quality):
"""Play a livestream"""
play_stream(channel, quality=quality)
# @main.command('follow')
# @click.argument('channel')
# def cmd_follow(channel):
# """Follow a channel"""
# follow_channel(channel)
# @main.command('unfollow')
# @click.argument('channel')
# def cmd_unfollow(channel):
# """Unfollow a channel"""
# unfollow_channel(channel)
@main.command('auth')
@click.option('--force', '-f', is_flag=True, help='Overwrite existing OAuth token')
def cmd_auth(force):
"""Authenticate with Twitch"""
config = get_config()
if (config['oauth'] != '') and (not force):
print('You are already authenticated.')
return
token = authenticate()
if token != '':
config['oauth'] = token
save_config()
print('Authentication complete.')
else:
print('Authentication cancelled.')
def get_available_streams(url):
command = 'streamlink -j {}'.format(url)
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
j_out = json.loads(output.decode())
streams = []
for stream in j_out['streams']:
streams.append(stream)
return streams
def play_url(url, quality=None):
if quality is None:
quality = ''
command = 'streamlink {} {} {}'.format(url, quality, "--twitch-low-latency --twitch-disable-ads")
process = subprocess.Popen(command.split(), stdout=None, stderr=None)
output, error = process.communicate()
def play_stream(channel, quality=None):
"""Load a stream and open the player"""
channel_id = get_channel_id(channel)
if channel_id is None:
print('The channel "{}" does not exist'.format(channel))
return
play_url('twitch.tv/{}'.format(channel), quality=quality)
def list_streams(game=None, flat=False, playback_quality=None):
"""Load the list of streams and prompt the user to chose one."""
config = get_config()
if config['oauth'] == '':
print('You have to provide a Twitch OAuth token to list followed '
'streams.')
print('Run "{} auth" to authenticate.'.format(sys.argv[0]))
sys.exit(1)
if game is not None:
streams = helix_get_streams(game)
else:
streams = helix_get_streams()
if streams is None:
print('Something went wrong while trying to fetch data from the '
'Twitch API')
sys.exit(1)
elif len(streams) == 0:
print('No streams online now')
return
print_stream_list(streams, title='Streams online now', flat=flat)
if not flat:
selection = input('Stream ID: ')
try:
selection = int(selection)
except:
return
else:
return
if not (0 < selection <= len(streams)):
return
play_stream(streams[selection - 1]['user_name'], quality=playback_quality)
def list_vods(channel, flat, playback_quality=None):
vods = get_channel_vods(channel)
if vods is None:
return
elif len(vods) == 0:
print('No recent VODs for {}'.format(channel))
return
print_vod_list(vods, title='{}\'s recent VODs'.format(channel))
if not flat:
selection = input('VOD ID: ')
try:
selection = int(selection)
except:
return
if (0 < selection <= len(vods)):
play_url(vods[selection-1]['url'], quality=playback_quality)
def get_channel_vods(channel):
config = get_config()
user_id = get_channel_id(channel)
if user_id is None:
print('The channel "{}" does not exist'.format(channel))
return
query = { 'user_id' : user_id }
url = 'https://api.twitch.tv/helix/videos?{}'.format(urlencode(query))
headers = {
'client-id': TWITCH_CLIENT_ID,
'Authorization': 'Bearer {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
if 'data' not in response:
return None
return response['data']
def print_stream_list(streams, title=None, flat=False):
if title and not flat:
print(title)
print('')
if flat:
format = '{1[user_name]}'
else:
ind_len = len(str(len(streams)))
bullet = '{0: >' + str(ind_len + 2) + 's}'
display_name = '{1[user_name]}'
status = '{1[title]}'
game = '{1[game_name]}'
viewers = '[{1[viewer_count]} viewers]'
format = (colored(bullet + ' ', 'light_red')
+ colored(display_name + ': ', 'light_blue', attrs=['bold'])
+ colored(game + ' ', 'light_yellow')
+ colored(viewers + '\n', 'light_green')
+ (' ' * (ind_len + 3))
+ colored(status + '\n', 'light_grey'))
i = 1
for stream in streams:
print(format.format('[' + str(i) + ']', stream))
i += 1
def print_vod_list(vods, title=None, flat=False):
if title and not flat:
print(title)
print('')
if flat:
format = '{1[url]}'
else:
ind_len = len(str(len(vods)))
bullet = '{0: >' + str(ind_len + 2) + 's}'
title = '{1[title]}'
duration= 'Duration: {1[duration]}'
date = 'Recorded: {1[created_at]}'
format = (colored(bullet + ' ', 'light_red')
+ colored(title + '\n', 'light_blue', attrs=['bold'])
+ (' ' * (ind_len + 3))
+ colored(date + '\n', 'light_grey',)
+ (' ' * (ind_len + 3))
+ colored(duration + '\n', 'light_grey'))
i = 1
for vod in vods:
print(format.format('[' + str(i) + ']', vod))
i += 1
# def follow_channel(channel):
# own_id = get_own_channel_id()
# channel_id = get_channel_id(channel)
# if channel_id is None:
# print('The channel "{}" does not exist'.format(channel))
# return
# data = '{{"from_id": "{}","to_id": "{}"}}' .format(own_id, channel_id)
# url = 'users/follows'
# response = helixapi_request(url, method='post', data=data)
# print('You now follow {}'.format(channel))
# def unfollow_channel(channel):
# own_id = get_own_channel_id()
# channel_id = get_channel_id(channel)
# if channel_id is None:
# print('The channel "{}" does not exist'.format(channel))
# return
# query = {
# 'from_id' : own_id,
# 'to_id' : channel_id
# }
# url = 'users/follows?{}'.format(urlencode(query))
# response = helixapi_request(url, method='delete')
# print('You don\'t follow {} anymore'.format(channel))
def get_own_channel_id():
url = 'users'
response = helixapi_request(url)
return response['data'][0]['id']
def get_channel_id(name):
query = { 'login': name }
url = 'users?{}'.format(urlencode(query))
response = helixapi_request(url)
if response['data'][0]['created_at'] is None:
return None
return response['data'][0]['id']
def helix_user_follows():
config = get_config()
own = get_own_channel_id()
url = 'https://api.twitch.tv/helix/users/follows?from_id={}&first=100' .format(int(own))
headers = {
'client-id': TWITCH_CLIENT_ID,
'Authorization': 'Bearer {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
if response['total'] == 0:
return None
ids=''
for id_ in response['data']:
ids = ids + 'user_id=' + id_['to_id'] + '&'
return ids[:-1]
def helix_get_streams(game=''):
config = get_config()
games = helix_get_games(game)
user_follows = helix_user_follows()
url = 'https://api.twitch.tv/helix/streams?{}' .format(user_follows + games)
headers = {
'client-id': TWITCH_CLIENT_ID,
'Authorization': 'Bearer {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
flag = not np.any(response['data'])
if flag:
print("No followed streamers are live.")
sys.exit(1)
if 'user_name' not in response['data'][0]:
return None
return response['data']
def helix_get_games(game=''):
if game == '':
return ''
config = get_config()
query = { 'query': game }
url = 'https://api.twitch.tv/helix/search/categories?{}' .format(urlencode(query))
headers = {
'client-id': TWITCH_CLIENT_ID,
'Authorization': 'Bearer {}'.format(config['oauth'])
}
request = requests.get(url, headers=headers)
response = request.json()
flag = not np.any(response['data'])
if flag:
return None
if 'name' not in response['data'][0]:
return None
ids=''
for id_ in response['data']:
ids = ids + 'game_id=' + id_['id'] + '&'
return ids[:-1]
def authenticate():
query = {
'response_type': 'token',
'client_id': TWITCH_CLIENT_ID,
'redirect_uri': 'https://butt4cak3.github.io/twitch-cli/oauth.html',
'scope': 'user:edit:follows'
}
url = ('https://id.twitch.tv/oauth2/authorize?{}'
.format(urlencode(query, safe=':/-')))
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
print('Couldn\'t open a browser. Open this URL in your browser to '
'continue: ')
print(url)
return
token = input('OAuth token: ')
return token.strip()
def helixapi_request(url, method='get', data=None):
config = get_config()
url = 'https://api.twitch.tv/helix/' + url
headers = {
'Authorization': 'Bearer {}'.format(config['oauth']),
'Client-ID': TWITCH_CLIENT_ID
}
if method == 'get':
request = requests.get(url, headers=headers)
elif method == 'post':
headers['Content-Type'] = 'application/json'
request = requests.post(url, headers=headers, data=data)
elif method == 'delete':
request = requests.delete(url, headers=headers)
try:
data = request.json()
except:
print(request.text)
return None
try:
data['status'] == 401
except KeyError:
return data
print("OAuth Token has expired. Please run 'auth --force' to generate a new one.")
sys.exit(1)
if __name__ == '__main__':
main()
| 2.515625
| 3
|
tests/settings.py
|
bodgerbarnett/django-rest-email-manager
| 0
|
12776519
|
<gh_stars>0
SECRET_KEY = "fake-key"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"rest_email_manager",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
},
]
ROOT_URLCONF = "tests.urls"
REST_EMAIL_MANAGER = {
"EMAIL_VERIFICATION_URL": "https://example.com/verify/{key}"
}
| 1.421875
| 1
|
bin/commands/corpus.py
|
davidmcclure/open-syllabus-project
| 220
|
12776520
|
import os
import click
import csv
import random
import sys
from osp.common import config
from osp.common.utils import query_bar
from osp.corpus.corpus import Corpus
from osp.corpus.models import Document
from osp.corpus.models import Document_Format
from osp.corpus.models import Document_Text
from osp.corpus.jobs import ext_format
from osp.corpus.jobs import ext_text
from peewee import create_model_tables
from prettytable import PrettyTable
@click.group()
def cli():
pass
@cli.command()
def init_db():
"""
Create the database tables.
"""
create_model_tables([
Document,
Document_Format,
Document_Text
], fail_silently=True)
@cli.command()
def insert_documents():
"""
Insert documents in the database.
"""
Document.insert_documents()
@cli.command()
def queue_format():
"""
Queue format extraction tasks in the worker.
"""
for doc in query_bar(Document.select()):
config.rq.enqueue(ext_format, doc.id)
@cli.command()
def queue_text():
"""
Queue text extraction tasks in the worker.
"""
for doc in query_bar(Document.select()):
config.rq.enqueue(ext_text, doc.id)
@cli.command()
def format_counts():
"""
Print a table of file format -> count.
"""
t = PrettyTable(['File Type', 'Doc Count'])
t.align = 'l'
for c in Document_Format.format_counts():
t.add_row(c)
click.echo(t)
@cli.command()
def file_count():
"""
Print the total number of files.
"""
corpus = Corpus.from_env()
click.echo(corpus.file_count)
| 2.1875
| 2
|
integration_tests/python_tests/test_monitoring.py
|
redata-team/dbt_re_data
| 50
|
12776521
|
import os
import copy
import yaml
from datetime import datetime, timedelta
from .utils.run import dbt_seed, dbt_run, dbt_test, dbt_command
RUN_TIME = datetime(2021, 5, 2, 0, 0, 0)
DBT_VARS = {
're_data:time_window_start': (RUN_TIME - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S"),
're_data:time_window_end': RUN_TIME.strftime("%Y-%m-%d %H:%M:%S"),
}
def test_monitoring(db):
load_deps = 'dbt deps'
assert os.system(load_deps) == 0
dbt_vars = copy.deepcopy(DBT_VARS)
print (f"Running setup and tests for {db}")
dbt_seed('--vars "{}"'.format(yaml.dump(dbt_vars)), db)
dbt_run('--models transformed', db)
print (f"Computing re_data metrics for {db}")
dbt_run('--exclude transformed --vars "{}"'.format(yaml.dump(dbt_vars)), db)
# updat dbts_vars to run dbt for next day of data
dbt_vars['re_data:time_window_start'] = dbt_vars['re_data:time_window_end']
dbt_vars['re_data:time_window_end'] = (RUN_TIME + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S")
dbt_command(
'dbt run --exclude transformed --fail-fast --vars "{}"'.format(yaml.dump(dbt_vars)),
db
)
dbt_test('--vars "{}"'.format(yaml.dump(dbt_vars)), db)
op_vars = {
'start_date': RUN_TIME.strftime("%Y-%m-%d"),
'end_date': (RUN_TIME + timedelta(days=1)).strftime("%Y-%m-%d"),
'interval': 'days:1',
}
dbt_command(
'dbt run-operation generate_overview --args "{}"'.format(yaml.dump(op_vars)),
db, common_args=''
)
print (f"Running tests completed for {db}")
| 2.265625
| 2
|
pysph/tools/pysph_to_vtk.py
|
nauaneed/pysph
| 293
|
12776522
|
<reponame>nauaneed/pysph
''' convert pysph .npz output to vtk file format '''
from __future__ import print_function
import os
import re
from enthought.tvtk.api import tvtk, write_data
from numpy import array, c_, ravel, load, zeros_like
def write_vtk(data, filename, scalars=None, vectors={'V':('u','v','w')}, tensors={},
coords=('x','y','z'), dims=None, **kwargs):
''' write data in to vtk file
Parameters
----------
data : dict
mapping of variable name to their numpy array
filename : str
the file to write to (can be any recognized vtk extension)
if extension is missing .vts extension is appended
scalars : list
list of arrays to write as scalars (defaults to data.keys())
vectors : dict
mapping of vector name to vector component names to take from data
tensors : dict
mapping of tensor name to tensor component names to take from data
coords : list
the name of coordinate data arrays (default=('x','y','z'))
dims : 3 tuple
the size along the dimensions for (None means x.shape)
**kwargs : extra arguments for the file writer
example file_type=binary/ascii
'''
x = data[coords[0]]
y = data.get(coords[1], zeros_like(x))
z = data.get(coords[2], zeros_like(x))
if dims is None:
dims = array([1,1,1])
dims[:x.ndim] = x.shape
else:
dims = array(dims)
sg = tvtk.StructuredGrid(points=c_[x.flat,y.flat,z.flat],dimensions=array(dims))
pd = tvtk.PointData()
if scalars is None:
scalars = [i for i in data.keys() if i not in coords]
for v in scalars:
pd.scalars = ravel(data[v])
pd.scalars.name = v
sg.point_data.add_array(pd.scalars)
for vec,vec_vars in vectors.items():
u,v,w = [data[i] for i in vec_vars]
pd.vectors = c_[ravel(u),ravel(v),ravel(w)]
pd.vectors.name = vec
sg.point_data.add_array(pd.vectors)
for ten,ten_vars in tensors.items():
vars = [data[i] for i in ten_vars]
tensors = c_[[ravel(i) for i in vars]].T
pd.tensors = tensors
pd.tensors.name = ten
sg.point_data.add_array(pd.tensors)
write_data(sg, filename, **kwargs)
def detect_vectors_tensors(keys):
''' detect the vectors and tensors from given array names
Vectors are identified as the arrays with common prefix followed by
0,1 and 2 in their names
Tensors are identified as the arrays with common prefix followed by
two character codes representing ij indices
(00,01,02,11,12,22) for a symmetric tensor
(00,01,02,10,11,12,20,21,22) for a tensor
Arrays not belonging to vectors or tensors are returned as scalars
Returns scalars,vectors,tensors in a format suitable to be used as arguments
for :py:func:`write_vtk`
'''
d = {}
for k in keys:
d[len(k)] = d.get(len(k), [])
d[len(k)].append(k)
scalars = []
vectors = {}
tensors = {}
for n,l in d.items():
if n<2:
continue
l.sort()
idx = -1
while idx<len(l)-1:
idx += 1
k = l[idx]
# check if last char is 0
if k[-1] == '0':
# check for tensor
if k[-2] == '0':
# check for 9 tensor
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(j)+str(i))
ten.sort()
if l[idx:idx+9] == ten:
tensors[k[:-2]] = ten
idx += 8
continue
# check for symm 6 tensor
ten2 = []
for i in range(3):
for j in range(i+1):
ten2.append(k[:-2]+str(j)+str(i))
ten2.sort()
if l[idx:idx+6] == ten2:
ten = []
for i in range(3):
for j in range(3):
ten.append(k[:-2]+str(min(i,j))+str(max(i,j)))
tensors[k[:-2]] = ten
idx += 5
continue
# check for vector
vec = []
for i in range(3):
vec.append(k[:-1] + str(i))
if l[idx:idx+3] == vec:
vectors[k[:-1]] = vec
idx += 2
continue
scalars.append(k)
return scalars, vectors, tensors
def get_output_details(path):
solvers = {}
if not os.path.isdir(path):
path = os.path.dirname(path)
files = os.listdir(path)
files.sort()
pat = re.compile(r'(?P<solver>.+)_(?P<rank>\d+)_(?P<entity>.+)_(?P<time>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?).npz')
matches = [(f,pat.match(f)) for f in files]
files = []
for filename,match in matches:
if match is None:
continue
files.append(filename)
groups = match.groupdict()
solvername = groups['solver']
solver = solvers.get(solvername)
if solver is None:
solver = [set([]),set([]),set([])]
solvers[solvername] = solver
solver[0].add(groups['rank'])
solver[1].add(groups['entity'])
solver[2].add(groups['time'])
# {solver:(entities,procs,times)}
return solvers
def pysph_to_vtk(path, merge_procs=False, skip_existing=True, binary=True):
''' convert pysph output .npz files into vtk format
Parameters
----------
path : str
directory where .npz files are located
merge_procs : bool
whether to merge the data from different procs into a single file
(not yet implemented)
skip_existing : bool
skip files where corresponding vtk already exist
this is useful if you've converted vtk files while a solver is running
only want to convert the newly added files
binary : bool
whether to use binary format in vtk file
The output vtk files are stored in a directory `solver_name` _vtk within
the `path` directory
'''
if binary:
data_mode = 'binary'
else:
data_mode = 'ascii'
if merge_procs is True:
# FIXME: implement
raise NotImplementedError('merge_procs=True not implemented yet')
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.items():
print('converting solver:', solver)
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
for entity in entities:
print(' entity:', entity)
for proc in procs:
print(' proc:', proc)
print(' timesteps:', len(times))
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
for i, time in enumerate(times):
print('\r',i,)
if skip_existing and os.path.exists(f+str(i)):
continue
d = load(os.path.join(path, f+time+'.npz'))
arrs = {}
for nam,val in d.items():
if val.ndim > 0:
arrs[nam] = val
d.close()
scalars, vectors, tensors = detect_vectors_tensors(arrs)
vectors['V'] = ['u','v','w']
z = zeros_like(arrs['x'])
if 'v' not in arrs:
arrs['v'] = z
if 'w' not in arrs:
arrs['w'] = z
write_vtk(arrs, of+str(i),
scalars=scalars, vectors=vectors, tensors=tensors,
data_mode=data_mode)
times_file.write('%d\t%s\n'%(i,time))
times_file.close()
def extract_text(path, particle_idx, props=['x','y','u','v','p','rho','sigma00','sigma01','sigma11'], ent=None, solvers=None):
if solvers:
raise NotImplementedError
else:
solvers = get_output_details(path)
for solver, (procs, entities, times) in solvers.items():
print('converting solver:', solver)
dir = os.path.join(path,solver+'_vtk')
if not os.path.exists(dir):
os.mkdir(dir)
procs = sorted(procs)
entities = sorted(entities)
times = sorted(times, key=float)
times_file = open(os.path.join(dir,'times'), 'w')
e = ent
if ent is None:
e = entities
for entity in entities:
if entity not in e:
continue
print(' entity:', entity)
for proc in procs:
print(' proc:', proc)
print(' timesteps:', len(times))
f = '%s_%s_%s_'%(solver,proc,entity)
of = os.path.join(dir,f)
files = [open(os.path.join(path,f+'%d.dat'%particle_id), 'w') for particle_id in particle_idx]
print(files)
for file in files:
file.write('i\tt\t'+'\t'.join(props))
for i, time in enumerate(times):
print('\r',i,)
d = load(os.path.join(path, f+time+'.npz'))
s = '\n%d\t%s'%(i,time)
for j,file in enumerate(files):
file.write(s)
for prop in props:
file.write('\t')
file.write(str(d[prop][particle_idx[j]]))
d.close()
for file in files:
file.close()
def test():
l = ['x'+str(i) for i in range(3)]
l.append('a0')
l.append('a1')
for i in range(3):
for j in range(3):
if i == j:
l.append('XX%d'%i)
if i <= j:
l.append('S%d%d'%(i,j))
l.append('T%d%d'%(i,j))
scalars, vectors, tensors = detect_vectors_tensors(l)
assert set(scalars) == set(['a0','a1'])
assert set(vectors) == set(['x','XX'])
assert set(tensors) == set(['S','T'])
if __name__ == '__main__':
import sys
pysph_to_vtk(path=sys.argv[1])
| 2.625
| 3
|
london_air_quality/__init__.py
|
robmarkcole/London-Air-Quality
| 1
|
12776523
|
<reponame>robmarkcole/London-Air-Quality<gh_stars>1-10
from datetime import timedelta
import requests
from typing import List, Dict
AUTHORITIES = [
"<NAME>",
"Barnet",
"Bexley",
"Brent",
"Bromley",
"Camden",
"City of London",
"Croydon",
"Ealing",
"Enfield",
"Greenwich",
"Hackney",
"Hammersmith and Fulham",
"Haringey",
"Harrow",
"Havering",
"Hillingdon",
"Hounslow",
"Islington",
"Kensington and Chelsea",
"Kingston",
"Lambeth",
"Lewisham",
"Merton",
"Newham",
"Redbridge",
"Richmond",
"Southwark",
"Sutton",
"Tower Hamlets",
"Waltham Forest",
"Wandsworth",
"Westminster",
]
LAQ_HOURLY_URL = (
"http://api.erg.kcl.ac.uk/AirQuality/Hourly/MonitoringIndex/GroupName=London/Json"
)
TIMEOUT = 10
class LondonAirQualityException(Exception):
pass
def request_data(url: str, timeout: int = TIMEOUT) -> Dict:
"""
Request data from a URL and return valid data as dictionary.
"""
try:
response = requests.get(url, timeout=TIMEOUT)
if response.status_code == 200:
return response.json()
else:
raise LondonAirQualityException(
f"Status code {response.status_code} returned from {url}"
)
except requests.exceptions.Timeout:
raise LondonAirQualityException(
f"Request timeout, current timeout is {timeout} seconds"
)
except requests.exceptions.ConnectionError as exc:
raise LondonAirQualityException(f"Internet connection error: {exc}")
def parse_hourly_response(hourly_response: Dict) -> Dict:
"""
Return hourly response data to index by Borough.
Allows filtering authorities with no data, and cleans up some data structure.
"""
data = dict.fromkeys(AUTHORITIES)
for authority in AUTHORITIES:
try:
for entry in hourly_response["HourlyAirQualityIndex"]["LocalAuthority"]:
if entry["@LocalAuthorityName"] == authority:
if isinstance(entry["Site"], dict):
entry_sites_data = [entry["Site"]]
else:
entry_sites_data = entry["Site"]
data[authority] = parse_site(entry_sites_data)
except Exception:
data[authority] = {}
return data
def parse_species(species_data: List[Dict]) -> List[Dict]:
"""Iterate over list of species at each site."""
parsed_species_data = []
quality_list = []
for species in species_data:
if species["@AirQualityBand"] != "No data":
species_dict = {}
species_dict["description"] = species["@SpeciesDescription"]
species_dict["code"] = species["@SpeciesCode"]
species_dict["quality"] = species["@AirQualityBand"]
species_dict["index"] = species["@AirQualityIndex"]
species_dict["summary"] = (
species_dict["code"] + " is " + species_dict["quality"]
)
parsed_species_data.append(species_dict)
quality_list.append(species_dict["quality"])
return parsed_species_data, quality_list
def parse_site(entry_sites_data: List[Dict]) -> List[Dict]:
"""Iterate over all sites at an local authority and tidy the data."""
authority_data = []
for site in entry_sites_data:
site_data = {}
species_data = []
site_data["updated"] = site["@BulletinDate"]
site_data["latitude"] = site["@Latitude"]
site_data["longitude"] = site["@Longitude"]
site_data["site_code"] = site["@SiteCode"]
site_data["site_name"] = site["@SiteName"].split("-")[-1].lstrip()
site_data["site_type"] = site["@SiteType"]
if isinstance(site["Species"], dict):
species_data = [site["Species"]]
else:
species_data = site["Species"]
parsed_species_data, quality_list = parse_species(species_data)
if not parsed_species_data:
parsed_species_data.append("no_species_data")
site_data["pollutants"] = parsed_species_data
if quality_list:
site_data["pollutants_status"] = max(
set(quality_list), key=quality_list.count
)
site_data["number_of_pollutants"] = len(quality_list)
else:
site_data["pollutants_status"] = "no_species_data"
site_data["number_of_pollutants"] = 0
authority_data.append(site_data)
return authority_data
def get_hourly_data_flat(hourly_data: Dict) -> List[Dict]:
all_data = []
for authority in hourly_data.keys():
for site in hourly_data[authority]:
for pollutant in site["pollutants"]:
try:
pollutant["borough"] = authority
pollutant["site_code"] = site["site_code"]
pollutant["site_name"] = site["site_name"]
pollutant["latitude"] = site["latitude"]
pollutant["longitude"] = site["longitude"]
pollutant["updated"] = site["updated"]
all_data.append(pollutant)
except:
pass
return all_data
| 3.3125
| 3
|
other/dingding/dingtalk/api/rest/OapiEduFaceSearchRequest.py
|
hth945/pytest
| 0
|
12776524
|
'''
Created by auto_sdk on 2020.01.09
'''
from dingtalk.api.base import RestApi
class OapiEduFaceSearchRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.class_id = None
self.height = None
self.synchronous = None
self.url = None
self.userid = None
self.width = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.face.search'
| 1.734375
| 2
|
evaluate_doe.py
|
mattshax/design_tools
| 1
|
12776525
|
<filename>evaluate_doe.py
import sys,json
import subprocess
import sys
sys.path.append("models")
from model_simple import f_xy
sys.path.append("utils")
from radar import createRadar
from sensitivity import createSensitivity
f = open('inputs_doe.csv')
inputs = f.readlines()
f.close()
input_labels=['x','y']
output_labels=['z']
allInputs=[]
allOutputs=[]
headers=[]
for i,input in enumerate(inputs):
if i==0:
headers=[xx.strip() for xx in input.split(',')]
continue
print(i,'/',len(inputs)-1)
idata = [xx.strip() for xx in input.split(',')]
for ii,id in enumerate(idata):
exec("{}={}".format(headers[ii],float(id)))
# evalute the model
z = f_xy(x,y)
inp=[x,y]
outp=[z]
print('inputs',inp)
print('outputs',outp)
allInputs.append(inp)
allOutputs.append(outp)
# postprocessing of the results
allData = []
row=[]
for i in input_labels:
row.append('in:'+i)
for o in output_labels:
row.append('out:'+o)
row.append('img:plot')
allData.append(",".join(row))
# get all the data organized for min/max values
allDataHash={}
for ir,r in enumerate(allInputs):
for ii,i in enumerate(allInputs[ir]):
try:
allDataHash[input_labels[ii]].append(i)
except:
allDataHash[input_labels[ii]] = []
allDataHash[input_labels[ii]].append(i)
for io,o in enumerate(allOutputs[ir]):
try:
allDataHash[output_labels[io]].append(o)
except:
allDataHash[output_labels[io]] = []
allDataHash[output_labels[io]].append(o)
# create the rows
print("")
for ir,r in enumerate(allInputs):
print("plotting",ir+1,'/',len(allInputs)-1)
row=[]
headers=[]
for ii,i in enumerate(allInputs[ir]):
row.append((i))
headers.append(input_labels[ii])
for io,o in enumerate(allOutputs[ir]):
row.append((o))
headers.append(output_labels[io])
# make the radar plots
plot = createRadar(str(ir),row,headers,allDataHash)
#plot = 'plots/case_'+str(ir)+'.png'
row.append(plot)
allData.append(",".join([str(x) for x in row]))
# generate the output csv data
f = open('outputs_doe.csv','w')
f.write("\n".join(allData))
f.close()
# generate the sensitivity plot
createSensitivity('outputs_doe.csv',input_labels,output_labels)
| 2.34375
| 2
|
osirisdata/urls.py
|
KolibriSolutions/BepMarketplace
| 1
|
12776526
|
# Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from django.conf.urls import url
from . import views
app_name = 'osirisdata'
urlpatterns = [
url('^list/$', views.listOsiris, name='list'),
url('^tometa/$', views.osirisToMeta, name='tometa'),
]
| 1.171875
| 1
|
todoapi/migrations/0004_auto_20190129_2103.py
|
Faysa1/Gestion-Tickets-Taches
| 0
|
12776527
|
# Generated by Django 2.1.4 on 2019-01-29 15:33
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('todoapi', '0003_todolist_taskid'),
]
operations = [
migrations.RemoveField(
model_name='todolist',
name='id',
),
migrations.AlterField(
model_name='todolist',
name='taskid',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| 1.5625
| 2
|
argdispatch.py
|
nazavode/argdispatch
| 1
|
12776528
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Improved single dispatch decorator with custom dispatch argument.
"""
import functools
import inspect
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2015 <NAME>'
__license__ = 'Apache License Version 2.0'
__version__ = '0.1.0'
def argdispatch(argument=None):
""" Type dispatch decorator that allows dispatching on a custom argument.
Parameters
----------
argument : str
The symbolic name of the argument to be considered for type dispatching.
Defaults to ``None``. When ``None``, the decorator acts exactly like the
standard ``functools.singledispatch``.
Returns
-------
callable
The dispatch closure.
"""
# Define dispatch argument:
dispatch_arg_name = argument
def dispatch_decorator(func):
"""Dispatch closure decorator."""
# Apply std decorator:
dispatcher = functools.singledispatch(func)
# Cache wrapped signature:
wrapped_signature = inspect.signature(func)
# Check argument correctness
if dispatch_arg_name is not None and \
dispatch_arg_name not in wrapped_signature.parameters:
raise ValueError('unknown dispatch argument specified')
def wrapper(*args, **kwargs):
"""Dispatch function wrapper."""
if dispatch_arg_name is None:
discriminator = args[0].__class__ # mimic functools.singledispatch behaviour
else:
bound_args = wrapped_signature.bind(*args, **kwargs).arguments
if dispatch_arg_name not in bound_args:
# ...with the new register this should be dead code.
raise TypeError('registered method mismatch')
discriminator = bound_args[dispatch_arg_name].__class__
return dispatcher.dispatch(discriminator)(*args, **kwargs)
def register(cls, reg_func=None):
""" Registration method replacement.
Ensures that situations like the following never happen:
>>> @argdispatch('c')
... def test(a, obj, b=None, c=None):
... pass
...
>>> @test.register(int)
... def _(a, obj):
... pass
>>>
>>> test(1, 2) # ----> TypeError
"""
if reg_func is not None:
# Check signature match:
reg_sig = inspect.signature(reg_func)
if reg_sig != wrapped_signature:
raise TypeError('registered method signature mismatch')
return dispatcher.register(cls, reg_func)
wrapper.register = register
functools.update_wrapper(wrapper, func)
return wrapper
return dispatch_decorator
| 2.359375
| 2
|
s/index_gen.py
|
andiac/compass
| 1
|
12776529
|
<reponame>andiac/compass
import os
f = os.popen("ls");
file_list = f.read().split("\n");
print '''
<!-- Here you can add your Google Analytics Tracking code. If you do so, do not
forget to set the include_analytics attribute to true on the _config.yml file -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-68639768-1', 'auto');
ga('send', 'pageview');
</script>
'''
for one_file in file_list:
print '<p><a href="http://andi.ac/s/'+one_file+'">'+one_file+'</a></p>'
| 2.375
| 2
|
neighbours/migrations/0004_auto_20190529_1722.py
|
lizKimita/Neighbourhoods
| 0
|
12776530
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-29 14:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbours', '0003_auto_20190529_1717'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Profile',
),
migrations.AlterField(
model_name='businesses',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='posts',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 1.523438
| 2
|
sequencing_analysis/genes_fpkm_tracking.py
|
dmccloskey/sequencing_analysis
| 0
|
12776531
|
<gh_stars>0
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
class genes_fpkm_tracking():
'''Helper class to parse the output from cufflinks
http://cole-trapnell-lab.github.io/cufflinks/cufflinks/index.html
'''
def __init__(self,genesFpkmTracking_I=None):
if genesFpkmTracking_I:
self.genesFpkmTracking = genesFpkmTracking_I;
else:
self.genesFpkmTracking = [];
def import_genesFpkmTracking(self,filename_I,experiment_id_I=None,sample_name_I=None):
"""import geneExpDiff
INPUT:
filename_I = input filename
OPTIONAL INPUT:
the following are optional for analyzing a single sample,
but required when analyzing multiple samples
experiment_id_I = string, name of the experiment that generated the sample
sample_name_I = string, name of the sample
"""
io = base_importData();
io.read_tab(filename_I);
genesFpkmTracking = self.format_genesFpkmTracking(io.data);
for d in genesFpkmTracking:
d['experiment_id'] = experiment_id_I;
d['sample_name'] = sample_name_I;
self.genesFpkmTracking = genesFpkmTracking;
def export_genesFpkmTracking(self,filename_O):
"""export genesFpkmTracking"""
io = base_exportData(self.genesFpkmTracking);
io.write_dict2csv(filename_O);
def format_genesFpkmTracking(self,fpkmTracking_I):
"""formats raw string input into their appropriate values"""
for fpkmTracking in fpkmTracking_I:
if 'FPKM' in fpkmTracking and type(fpkmTracking['FPKM'])==type('string'):
fpkmTracking['FPKM'] = eval(fpkmTracking['FPKM']);
if 'FPKM_conf_lo' in fpkmTracking and type(fpkmTracking['FPKM_conf_lo'])==type('string'):
fpkmTracking['FPKM_conf_lo'] = eval(fpkmTracking['FPKM_conf_lo']);
if 'FPKM_conf_hi' in fpkmTracking and type(fpkmTracking['FPKM_conf_hi'])==type('string'):
fpkmTracking['FPKM_conf_hi'] = eval(fpkmTracking['FPKM_conf_hi']);
if 'length' in fpkmTracking and type(fpkmTracking['length'])==type('string'):#length coverage
if fpkmTracking['length'] == '-':
fpkmTracking['length'] = None;
else:
fpkmTracking['length'] = eval(fpkmTracking['length']);
if 'coverage' in fpkmTracking and type(fpkmTracking['coverage'])==type('string'):#coverage coverage
if fpkmTracking['coverage'] == '-':
fpkmTracking['coverage'] = None;
else:
fpkmTracking['coverage'] = eval(fpkmTracking['coverage']);
return fpkmTracking_I;
| 2.625
| 3
|
pyEBOT.py
|
Ndnes/pyEBOT
| 0
|
12776532
|
<filename>pyEBOT.py
import discord # noqa
import os
import event
import configuration
import managedMessages
import logging
from logging import handlers
from pydantic import ValidationError
from utility import loadData, saveData, checkConfig, sendMessagePackets
from constants import Constants
from discord.ext import commands
# TODO: Decide if I want to use descriptors for agruments in funcs and meths.
# example def doSomething(arg1: str, arg2: int)
# TODO: Add exception to member only requirement for daymar rally if participants are
# in collaborators
# TODO: Check if await fetch_guild can be replaced by using self.client.get_guild
# TODO: Check if self.client.get_channel is used anywhere and replace with guild.get_channel
if __name__ == "__main__":
"""Instanciate the discord.py client/bot and load event data if it exists.
"""
# Generate intents (required as of v1.5)
intents = discord.Intents.default()
intents.members = True
# Instanciate the client and set the command prefix.
client = commands.Bot(Constants.CMD_PREFIX, intents=intents) # TODO: Make it possible to change prefix with cmd.
# Remove the default help command.
# client.remove_command('help')
# Setup the logger
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG) # TODO: Set apropriate logging level. (INFO)
handler = handlers.RotatingFileHandler(
filename=Constants.LOG_FILENAME,
mode='a', # Append mode? #TODO: Verify
maxBytes=8*1024*1024, # Max size is 8MB
backupCount=1,
encoding='utf-8'
)
handler.setFormatter(
logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')
)
logger.addHandler(handler)
client.logger = logger
# TODO: Clean up wet code. Deserialization should done in a function.
# Deserialize configuration data.
configData = loadData(Constants.CONFIG_DATA_FILENAME)
if configData is None:
client.logger.info('Config data not found.')
client.config = configuration.Configuration()
configData = client.config.json(indent=2)
saveData(Constants.CONFIG_DATA_FILENAME, configData)
else:
try:
# Attempt to parse persistent config data to config.
client.config = configuration.Configuration.parse_obj(
configData
)
client.logger.info(
'Config data successfully parsed.'
)
except ValidationError as e:
client.logger.warning(
'Exception thrown, error message is as follows:\n'
f'{e}\n'
'Config data was found, but could not be loaded. '
'Starting clean'
)
client.config = configuration.Configuration()
configData = client.config.json(indent=2)
saveData(Constants.CONFIG_DATA_FILENAME, configData)
# Deserialize orgEvent data.
eventData = loadData(Constants.EVENT_DATA_FILENAME)
# If eventData does not exist client.orgEvents will be
# initialized cleanly.
if eventData is None:
client.logger.info('No event record found. Starting clean.')
print('No event record found. Starting clean.')
client.orgEvents = event.OrgEvents()
eventData = client.orgEvents.json(indent=2)
saveData(Constants.EVENT_DATA_FILENAME, eventData)
else:
try:
# Attempt to parse persistent data to orgEvents.
client.orgEvents = event.OrgEvents.parse_obj(
eventData
)
client.logger.info(
'Event record successfully parsed. '
f'found {len(client.orgEvents.events)} events.'
)
print(
'Event record successfully parsed.\n'
f'Found {len(client.orgEvents.events)} events.'
)
except ValidationError as e:
client.logger.warning(
'Exception thrown, error message is as follows:\n'
f'{e}\n'
'Record was found, but could not be loaded. '
'Starting clean'
)
# TODO: Clean up wet code.
client.orgEvents = event.OrgEvents()
eventData = client.orgEvents.json(indent=2)
saveData(Constants.EVENT_DATA_FILENAME, eventData)
# Deserialize guild member data.
guildMemberData = loadData(Constants.GUILD_MEMBER_DATA_FILENAME)
if guildMemberData is None:
client.logger.info('Guild member data not found.')
client.guildMembers = event.GuildMembers()
guildMemberData = client.guildMembers.json(indent=2)
saveData(Constants.GUILD_MEMBER_DATA_FILENAME, guildMemberData)
else:
try:
client.guildMembers = event.GuildMembers.parse_obj(
guildMemberData
)
client.logger.info(
'Guild member data successfully parsed.'
)
except ValidationError as e:
client.logger.warning(
'Exception thrown, error message is as follows:\n'
f'{e}\n'
'Guild member record was found, but could not be loaded. '
)
client.guildMembers = event.GuildMembers()
guildMemberData = client.guildMembers.json(indent=2)
saveData(Constants.GUILD_MEMBER_DATA_FILENAME, guildMemberData)
messageData = loadData(Constants.MESSAGE_DATA_FILENAME)
if messageData is None:
client.logger.info('No message data found.')
print('No message data found.')
client.managedMessages = managedMessages.ManagedMessages()
messageData = client.managedMessages.json(indent=2)
saveData(Constants.MESSAGE_DATA_FILENAME, messageData)
else:
try:
client.managedMessages = managedMessages.ManagedMessages.parse_obj(
messageData
)
client.logger.info('Message data successfully parsed.')
print('Message data successfully parsed.')
except ValidationError as e:
client.logger.warning(
'Exception was thrown. Error message reads as follows:\n'
f'{e}\n'
'Message record was found, but could not be loaded.'
)
print('Message record was found, but could not be loaded.')
print(e)
# Check functions
def isAdmin(ctx):
return ctx.author.id == client.config.adminId
# Events
@client.event
async def on_ready():
client.logger.info('on_ready event triggered.')
print('Ready.')
# Commands
@client.command() # TODO: Add proper authorization checks.
@commands.check(isAdmin)
async def load(ctx, extension):
"""Load a specific cog. Functionality limited until server is configured.
:param extension: Name of the cog to be loaded.
:type extension: string
"""
remainingFields = checkConfig(client.config)
if not remainingFields or extension == 'serverConfig':
client.load_extension(f'cogs.{extension}')
await ctx.send('Cog has been loaded.')
else:
msg = (
'Cannot load cog, Please make sure bot has been configured for '
'this guild. Remaining fields to be configured are as follows:\n'
+ '\n'.join(remainingFields)
)
await ctx.send(msg)
@client.command()
@commands.check(isAdmin)
async def unload(ctx, extension):
"""Unloads a cog.
:param extension: The name of the cog to be unloaded.
:type extension: string
"""
client.unload_extension(f'cogs.{extension}')
@client.command()
@commands.check(isAdmin)
async def loadAll(ctx):
"""Load all cogs. Server must be configured before command can be invoked.
"""
remainingFields = checkConfig(client.config)
if not remainingFields:
for filename in os.listdir('./cogs'):
exclusionList = [
'__init__.py',
'experimentalCog.py', # TODO: Clean up exclusion list.
'messageWithoutCommand.py',
'asyncCog.py',
'serverConfig.py', # TODO: Check what cogs are already loaded and exclude that way.
'devTools.py'
]
if filename not in exclusionList:
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
await ctx.send('Cogs have been loaded.')
else:
msg = (
'Cannot load cogs, Please make sure bot has been configured for '
'this guild. Remaining fields to be configured are as follows:\n'
+ '\n'.join(remainingFields)
)
await sendMessagePackets(ctx, msg)
@client.command()
@commands.check(isAdmin)
async def reload(ctx, extension):
"""Reloads a cog.
:param extension: The name of the cog to load.
:type extension: string
"""
client.reload_extension(f'cogs.{extension}')
# Load cogs
remainingFields = checkConfig(client.config)
if not remainingFields:
for filename in os.listdir('./cogs'):
# Files in exclusion list will not be loaded.
exclusionList = [
'__init__.py',
'experimentalCog.py',
'messageWithoutCommand.py',
'asyncCog.py'
]
if filename not in exclusionList:
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
else:
client.load_extension('cogs.devTools')
client.load_extension('cogs.serverConfig')
# TODO: Make sure every assignments are encapsulated somehow to conform to
# sphinx documentation.
# Get client token from file.
token = loadData('token.json')
# Run client by passing in token.
client.run(token)
| 2.34375
| 2
|
python/find_significant_sequences_example.py
|
google/expt-analysis
| 5
|
12776533
|
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# author: <NAME>
""" This is an example of using the code to sequence the data"""
import sys
import os
import numpy as np
import pandas as pd
import random
import time
import datetime
import numpy as np
import time as time
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import math as math
import re as re
import inspect
import scipy as scipy
import functools
import itertools
import operator
import warnings
import json
import IPython
import hashlib
import base64
def GetContent(fn):
with open(fn, 'r') as f:
content = f.read()
return content
## specify the path for the source code
path = ''
srcFns = [path + 'expt-analysis/python/data_analysis.py',
path + 'expt-analysis/python/sequential_data.py',
path + 'expt-analysis/python/sequences_statistics_v1.py',
path + 'expt-analysis/python/sequences_statistics_v2.py',
path + 'expt-analysis/python/unit_analysis.py']
for fn in srcFns: exec(GetContent(fn=fn))
# Define a location for SQL Tables Log File (SQL tables are optional)
# and a writePath for the seq data files
# make sure these paths do exist
sqlTablesLogFile = '~/data/seq_data/seq_data_info.csv'
writePath = '~/data/seq_data/'
## define a figs path
figsPath = '~/data/seq_data/figs/'
## define a tables path for writing results tables
tablesPath = '~/data/seq_data/tables/'
# Example with simulated data demo purpose:
## step 1: simulate usage data
df = Sim_depUsageData(userNum=200, subSeqLen=4, repeatPattern=None)
## step 2: sequence the data
dataNamesList = ['test']
dataDesc = 'seq'
fnSuff = '.csv'
# trim is the length of sequences we are considering for finding significance
trim = 3
condDict = None
out = WriteSeqTable_forSql(
df=df,
seqDimCols=['prod', 'form_factor'],
partitionCols0=['user_id'],
sliceCols=['date', 'country'],
seqPropCols=['form_factor'],
timeGapMin=5,
timeCol='time',
timeColEnd='end_time',
trim=trim,
countDistinctCols=['user_id', 'seq_id'],
condDict=None,
addSeqPropMixedCols=['form_factor'],
ordered=True,
writePath=writePath,
dataNamesList=dataNamesList,
dataDesc=dataDesc,
fnSuff=fnSuff,
defineSqlTab=False,
DefineSqlTable_fromFile=DefineSqlTable_fromFile,
ExecSqlQuery=ExecSqlQuery,
sqlTablePrefix="",
timeGapDict=None,
writeTableLogFn=sqlTablesLogFile)
# run this if you have implemented SQL query execution
# and like to use SQL tables rather than files
sqlStr = out['sqlStr']
Mark(sqlStr, color='purple', bold=True)
#ExecSqlQuery(sqlStr)
## look at the info table
for fn in srcFns: exec(GetContent(fn=fn))
seqTablesDf = ReadCsv(fn=sqlTablesLogFile)
Mark(
seqTablesDf,
text='set of available sql tables for finding sequences',
color='purple',
bold=True)
## step 3: get that particular table we need using the info table
rowNum = 0
row = seqTablesDf.iloc[rowNum]
trim = row['trim']
Mark(trim, 'trim')
seqPropCols = []
if str(row['seqPropCols']) != 'nan':
seqPropCols = row['seqPropCols'].split(';')
Mark(seqPropCols, 'seqPropCols are as follows:')
seqPropCols = (
[x + '_parallel' for x in seqPropCols] +
[x + '_mix' for x in seqPropCols])
countDistinctCols = []
if str(row['countDistinctCols']) != 'nan':
countDistinctCols = row['countDistinctCols'].split(';')
Mark(countDistinctCols, 'countDistinctCols are as follows:')
Mark(seqPropCols)
sqlTableName = row['sqlTableName']
fileName = row["writePath"] + row["fileName"] + ".csv"
Mark(sqlTableName, 'This is the sql table name you requested.')
## if want to load data through file
seqDf = ReadCsv(fileName)
Mark(seqDf.shape, 'data size (seqDf.shape):')
Mark(seqDf[:2], 'example seq data:')
## if want to load data via SQL (assuming the SQL functions are implemented)
# seqDf2 = ReadSqlTable(table=sqlTableName)
# Mark(seqDf2.shape, 'data size (seqDf.shape):')
# Mark(seqDf2[:2], 'example seq data:')
## step 4: finding the sig sequences which satisfy particular conditions
sliceCols = ['country']
auxSliceCols = ['trimmed_form_factor_parallel', 'trimmed_form_factor_parallel_mix']
## calculate significance
seqDfWithSignif = AddSeqProbCiMetrics(
seqDf=seqDf.copy(),
trim=int(trim),
addCounts=True,
sliceCols=sliceCols,
auxSliceCols=auxSliceCols,
seqCol='seq',
countDistinctCols=countDistinctCols,
seqCountMin=3)
## also calculate penetration:
# need to pass penetItemCols to do that
seqDfWithSignif2 = FindSigSeq_withPenet(
seqDf=seqDf.copy(),
trim=int(trim),
seqCol='seq',
sliceCols=sliceCols,
auxSliceCols=auxSliceCols,
countDistinctCols=countDistinctCols,
penetItemCols=['user_id', 'date'],
seqCountMin=3)
condDict = {
#'country':['JP', 'US', 'FR']
#'trimmed_form_factor_parallel_mix':['COMP']
}
## a set of values to be regex
regDict = {}
plt.figure()
Mark(text="SIG PLOTS + PENETRATION PLOT", color='blue', bold=True)
sigDict = Plt_sigSeq_compareSlices(
seqDfWithSignif=seqDfWithSignif2.copy(),
sliceCols=sliceCols,
condDict=condDict,
metricCol='relative_prob2',
metricColLower='relative_prob_lower2',
metricColUpper='relative_prob_upper2',
removeBlankSeqs=True,
relativeProbLowerLim = 1.05,
orderByCol='relative_prob2',
addPenetPlots=True,
seqNumLimit = None,
rotation=0,
logScale=True,
figSize=[8, 8],
saveFig=True,
figPath=figsPath,
figFnPrefix=sqlTableName.replace('.', '_'),
figFnExt='png',
Open=OpenFile)
sigDf = sigDict['df']
sigDf = sigDict['df']
if (sigDf is not None):
Mark(x=sigDf.shape, text="sigDf.shape:", color="green", bold=True)
Mark(x=sigDf[:6], text="sigDf snapshot:", color="blue", bold=True)
Write_sigSeqDf(
sigDf=sigDf,
sqlTableName=sqlTableName,
path=tablesPath,
regDict=regDict,
condDict=condDict)
else:
Mark(text='no data was found', color='red')
| 2.296875
| 2
|
app/project/users/forms.py
|
ritchie46/base-flask-mvc
| 1
|
12776534
|
<filename>app/project/users/forms.py
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Length, EqualTo, Email
class RegisterForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Repeat password', validators=[DataRequired(), EqualTo('password')])
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password', validators=[DataRequired()])
class EmailForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
class PasswordForm(Form):
password = PasswordField('Password', validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Repeat password', validators=[DataRequired(), EqualTo('password')])
old_password = PasswordField('Old password', validators=[DataRequired()])
| 2.90625
| 3
|
authors/apps/rating/serializers.py
|
andela/ah-codeofduty
| 0
|
12776535
|
<reponame>andela/ah-codeofduty
"""
Rating Serializers module
"""
from django.db.models import Sum, Avg
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from .models import Rating
from ..articles.serializers import ArticleSerializer
from ..articles.models import Article
from ..authentication.serializers import UserSerializer
from ..authentication.models import User
class RateSerializers(serializers.ModelSerializer):
"""
Rate Serializer
:params: serializers.ModelSerializer:
parent class parameter
"""
rating = serializers.IntegerField()
rater = UserSerializer(read_only=True)
article = ArticleSerializer(read_only=True)
class Meta():
"""
Meta class
:adds more setting to the RateSerializer class:
"""
model = Rating
fields = ('rating', 'article', 'rater')
def set_average_rating(self, obj, rating):
average = rating
try:
ratings = Rating.objects.filter(article=obj.id)
if ratings:
average = ratings.all().aggregate(
Avg('rating'))['rating__avg']
obj.average_rating = average
obj.save()
except Exception as e:
print(e)
def create(self, validated_data):
"""
:params: validated_data:
ensures data being passed is going to be valid
"""
# validate rating data
rate = validated_data['rating']
# get user and slug context from request
slug = self.context.get('slug')
user = self.context.get('user')
# validate user data
validated_data['rater'] = user
# check if article exists
article = check_article_exists(slug)
# validate article data
validated_data['article'] = article
# check for correct rating range
if rate not in range(1, 6):
raise serializers.ValidationError(
'Error, rating is between 1 to 5')
author = article.author
# check if rater is the author of the article
if author == user:
raise serializers.ValidationError(
'Error, you can\'t rate your own article')
# check if user has ever rated the article
# if not save the user rating
# else update the rating
rating_instance = None
try:
rating_instance = Rating.objects.get(rater=user, article=article)
except:
"Error, article does not exist"
if rating_instance:
""" Update the rating """
rating_instance.rating = rate
rating_instance.save()
else:
Rating.objects.create(**validated_data)
self.set_average_rating(article, rate)
return validated_data
def check_article_exists(slug):
"""
Check if article exists
:param: slug: article slug that verifies
the article exits
:return: article
"""
try:
article = Article.objects.get(slug=slug)
except Article.DoesNotExist:
raise serializers.ValidationError(
'Error, Article does not exist')
return article
| 2.5
| 2
|
ai/action/eyedisplay/eyedisplay.py
|
elephantrobotics-joey/marsai
| 32
|
12776536
|
<gh_stars>10-100
import cv2
import copy
import random
from PIL import Image, ImageDraw, ImageFont
import sys
sys.path.append(".")
import ai.actionplanner
import ai.action.eyedisplay.OLED_Driver as OLED
DEFAULT_SP = 0.01 # default speed / time seg
ENJOY_SP = 0.06
PIC_COUNT = 10
OPEN = 0
CLOSE = 9
NEARLY_OPEN = 3
NEARLY_CLOSE = 7
class EyeDisplay:
def __init__(self, color = 'blue'):
OLED.Device_Init()
self.path = 'ai/action/eyedisplay/'
self.color = color
self.eye_output = []
self.eye_raw = self.get_eye_image_raw()
self.last_num = 0
for i in range(PIC_COUNT):
self.eye_output.append(self.get_eye_data(self.eye_raw, i+1))
# initial
self.display_eye(OPEN)
def get_eye_image_raw(self):
self.layer_eye_background = Image.open(self.path + "eyeball/eye_background/" + self.color + ".png").convert("RGBA")
self.layer_eye_puil = Image.open(self.path + "eyeball/eye_pupil/" + self.color + ".png").convert("RGBA")
self.layer_eye_ball = Image.open(self.path + "eyeball/eye_ball.png").convert("RGBA")
eye_raw = Image.new("RGBA", self.layer_eye_background.size)
eye_raw.paste(self.layer_eye_background, (0, 0), self.layer_eye_background)
eye_raw.paste(self.layer_eye_puil, (0, 0), self.layer_eye_puil)
eye_raw.paste(self.layer_eye_ball, (0, 0), self.layer_eye_ball)
return eye_raw
def get_eye_data(self, eye_image_raw, lid_num):
# input eye lid
name_eye_lid = "eye_hor_" + str(lid_num) + ".png"
eyelid_image = Image.open(self.path + "eyeball/eyelid/" + name_eye_lid).convert("RGBA")
# add lid
eye_image_raw.paste(eyelid_image, (0,0), eyelid_image)
# add mask
layer_eye_mask = Image.open(self.path + "eyeball/mask.png").convert("RGBA")
eye_image_raw.paste(layer_eye_mask, (0,0), layer_eye_mask)
return OLED.get_display_data(eye_image_raw)
def display_eye(self,num): # display_eye (OPEN~CLOSE) 0 is open and ? is close
num = min(num, CLOSE)
num = max(num, OPEN)
OLED.display_data(self.eye_output[num])
self.last_num = num
def display_ini(self):
OLED.Device_Init()
def blink(self,times = 1, timesleep = DEFAULT_SP):
for i in range(times):
self.eye_move_to(CLOSE, timesleep)
self.eye_move_to(NEARLY_OPEN, timesleep)
pass
self.eye_move_to(OPEN, timesleep)
def eye_move_to(self, dis_num, timesleep = DEFAULT_SP):
ls = []
if dis_num > self.last_num:
a = self.last_num
while a < dis_num:
a += 1
ls.append(a)
elif dis_num < self.last_num:
a = self.last_num
while a > dis_num:
a-=1
ls.append(a)
else:
pass
self.display_eye_list(ls, timesleep)
return ls
def display_eye_list(self, ls , timesleep=0):
if ls:
sz = len(ls)
i = 0
while i < sz:
self.display_eye(ls[i])
i+=1
ai.actionplanner.ActionPlanner.sleep(timesleep)
def enjoy(self):
a = random.random()
b = random.random()
self.eye_move_to(CLOSE, ENJOY_SP)
if a > 0.35:
self.eye_move_to(NEARLY_CLOSE, ENJOY_SP)
self.eye_move_to(CLOSE, ENJOY_SP)
ai.actionplanner.ActionPlanner.sleep(b)
def random_move(self):
a = random.random()
sp = random.random()
c = random.random()
if a > 0.2: # 80% will be 0 or 1
b = int(random.random()*NEARLY_OPEN)
self.eye_move_to(b)
else:
b = int(random.random()*3)
self.blink(b)
ai.actionplanner.ActionPlanner.sleep(c)
'''
a = EyeDisplay('blue')
a.enjoy()
ai.actionplanner.ActionPlanner.sleep(2)
while 1:
a.random_move()
#a.enjoy()
ai.actionplanner.ActionPlanner.sleep(0.5)
pass
'''
| 2.28125
| 2
|
sdmetrics/base.py
|
ZhuofanXie/SDMetrics
| 1
|
12776537
|
<filename>sdmetrics/base.py
"""BaseMetric class."""
class BaseMetric:
"""Base class for all the metrics in SDMetrics.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = None
goal = None
min_value = None
max_value = None
@classmethod
def get_subclasses(cls, include_parents=False):
"""Recursively find subclasses of this metric.
If ``include_parents`` is passed as ``True``, intermediate child classes
that also have subclasses will be included. Otherwise, only classes
without subclasses will be included to ensure that they are final
implementations and are ready to be run on data.
Args:
include_parents (bool):
Whether to include subclasses which are parents to
other classes. Defaults to ``False``.
"""
subclasses = dict()
for child in cls.__subclasses__():
grandchildren = child.get_subclasses(include_parents)
subclasses.update(grandchildren)
if include_parents or not grandchildren:
subclasses[child.__name__] = child
return subclasses
@staticmethod
def compute(real_data, synthetic_data):
"""Compute this metric.
Args:
real_data:
The values from the real dataset.
synthetic_data:
The values from the synthetic dataset.
Returns:
Union[float, tuple[float]]:
Metric output or outputs.
"""
raise NotImplementedError()
| 2.890625
| 3
|
catalog/bindings/gmd/abstract_rs_reference_system_type.py
|
NIVANorge/s-enda-playground
| 0
|
12776538
|
<reponame>NIVANorge/s-enda-playground
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.ex_extent_property_type import ExExtentPropertyType
from bindings.gmd.rs_identifier_property_type import RsIdentifierPropertyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class AbstractRsReferenceSystemType(AbstractObjectType):
"""
Description of the spatial and temporal reference systems used in the
dataset.
"""
class Meta:
name = "AbstractRS_ReferenceSystem_Type"
name: Optional[RsIdentifierPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
"required": True,
},
)
domain_of_validity: List[ExExtentPropertyType] = field(
default_factory=list,
metadata={
"name": "domainOfValidity",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
| 1.953125
| 2
|
paper_rq/__init__.py
|
dldevinc/paper-rq
| 0
|
12776539
|
<reponame>dldevinc/paper-rq
__version__ = "0.3.3"
default_app_config = "paper_rq.apps.Config"
| 1.054688
| 1
|
python/python-algorithm-intervew/8-linked-list/19-reverse-linked-list2-1.py
|
bum12ark/algorithm
| 1
|
12776540
|
"""
인덱스 m에서 n까지를 역순으로 만들어라. 인덱스 m은 1부터 시작한다.
- 입력
1->2->3->4->5->None, m = 2, n = 4
- 출력
1->4->3->2->5->None
"""
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def print_list(self):
cur = self
while cur:
print(cur.val, end='->')
cur = cur.next
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
current = head
while current:
next = current.next
next.next = current
current.next = next.next
prev =
pass
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
pass
| 4.125
| 4
|
Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/UnitTests/Decorator_UnitTest.py
|
davidbrownell/Common_Environment
| 1
|
12776541
|
<reponame>davidbrownell/Common_Environment
# Placeholder unit test
import os
import sys
import unittest
from CommonEnvironment.CallOnExit import CallOnExit
# ---------------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ---------------------------------------------------------------------------
sys.path.insert(0, os.path.join(_script_dir, ".."))
with CallOnExit(lambda: sys.path.pop(0)):
from Decorator import *
# ----------------------------------------------------------------------
class Test(unittest.TestCase):
@AsFunc("Foo")
class MyClass(object):
@staticmethod
def Foo():
return "Bar"
def __init__(self, value):
self.value = value
def Another(self):
return "Another: {}".format(self.value)
# ----------------------------------------------------------------------
def test_Standard(self):
self.assertEquals(Test.MyClass(), "Bar")
self.assertEqual(Test.MyClass(Test.MyClass.StandardInit, 100).Another(), "Another: 100")
# ---------------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
| 2.40625
| 2
|
lwlPackage/BaiduPic/dataPro.py
|
2892211452/myPackage
| 0
|
12776542
|
#分别传入网络连接和本地路径
def request_download(imgUrl, Path):
import requests
r = requests.get(imgUrl)
with open(Path, 'wb') as f:
f.write(r.content)
if __name__ == "__main__":
request_download('https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/u=2018604370,3101817315&fm=26&gp=0.jpg', 'images/1.jpg')
| 3.546875
| 4
|
playground/image_captions.py
|
BarracudaPff/code-golf-data-pythpn
| 0
|
12776543
|
photos = Photo.objects.all()
captions = []
for idx, photo in enumerate(photos):
if idx > 2:
break
thumbnail_path = photo.thumbnail.url
with open("." + thumbnail_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
encoded_string = str(encoded_string)[2:-1]
resp_captions = requests.post("http://localhost:5000/", data=encoded_string)
captions.append(resp_captions.json())
| 2.796875
| 3
|
ManufacturingNet/models/logistic_regression.py
|
lalitjg/ManufacturingNet
| 9
|
12776544
|
"""LogRegression trains a logistic regression model implemented by
Scikit-Learn on the given dataset. Before training, the user is
prompted for parameter input. After training, model metrics are
displayed, and the user can make new predictions.
View the documentation at https://manufacturingnet.readthedocs.io/.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (accuracy_score, confusion_matrix, make_scorer,
roc_auc_score, roc_curve)
from sklearn.model_selection import (GridSearchCV, cross_val_score,
train_test_split)
class LogRegression:
"""Class framework for logistic regression model."""
def __init__(self, attributes=None, labels=None):
"""Initializes a LogisticRegression object."""
self.attributes = attributes
self.labels = labels
self.test_size = None
self.cv = None
self.graph_results = None
self.fpr = None
self.tpr = None
self.bin = False
self.gridsearch = False
self.gs_params = None
self.gs_result = None
self.regression = None
self.classes = None
self.coefficients = None
self.intercept = None
self.n_iter = None
self.accuracy = None
self.precision = None
self.recall = None
self.roc_auc = None
self.confusion_matrix = None
self.cross_val_scores = None
# Accessor methods
def get_attributes(self):
"""Accessor method for attributes."""
return self.attributes
def get_labels(self):
"""Accessor method for labels."""
return self.labels
def get_classes(self):
"""Accessor method for classes."""
return self.classes
def get_regression(self):
"""Accessor method for regression."""
return self.regression
def get_coefficents(self):
"""Accessor method for coefficients."""
return self.coefficients
def get_n_iter(self):
"""Accessor method for n_iter."""
return self.n_iter
def get_accuracy(self):
"""Accessor method for accuracy."""
return self.accuracy
def get_roc_auc(self):
"""Accessor method for roc_auc."""
return self.roc_auc
def get_confusion_matrix(self):
"""Accessor method for confusion_matrix."""
return self.confusion_matrix
def get_cross_val_scores(self):
"""Accessor method for cross_val_scores."""
return self.cross_val_scores
# Modifier methods
def set_attributes(self, new_attributes=None):
"""Modifier method for attributes."""
self.attributes = new_attributes
def set_labels(self, new_labels=None):
"""Modifier method for labels."""
self.labels = new_labels
# Wrapper for logistic regression model
def run(self):
"""Performs logistic regression on dataset and updates relevant
instance data.
"""
if self._check_inputs():
# Instantiate LogisticRegression() object using helper method
self.regression = self._create_model()
# Split into training and testing set
dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \
train_test_split(self.attributes, self.labels,
test_size=self.test_size)
# Train the model and get resultant coefficients
# Handle exception if arguments are incorrect
try:
self.regression.fit(dataset_X_train, np.ravel(dataset_y_train))
except Exception as e:
print("An exception occurred while training the regression",
"model. Check your inputs and try again.")
print("Here is the exception message:")
print(e)
self.regression = None
return
# Get resultant model instance data
self.classes = self.regression.classes_
self.coefficients = self.regression.coef_
self.intercept = self.regression.intercept_
self.n_iter = self.regression.n_iter_
# Make predictions using testing set
y_prediction = self.regression.predict(dataset_X_test)
# Metrics
self.accuracy = accuracy_score(y_prediction, dataset_y_test)
probas = self.regression.predict_proba(dataset_X_test)
# If classification is binary, calculate roc_auc
if probas.shape[1] == 2:
self.bin = True
self.roc_auc = \
roc_auc_score(self.regression.predict(dataset_X_test),
probas[::, 1])
self.fpr, self.tpr, _ = roc_curve(
dataset_y_test, probas[::, 1])
# Else, calculate confusion matrix
else:
self.confusion_matrix = \
confusion_matrix(dataset_y_test, y_prediction)
self.cross_val_scores = cross_val_score(self.regression,
self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_results()
def predict(self, dataset_X=None):
"""Predicts the output of each datapoint in dataset_X using the
regression model. Returns the predictions.
"""
# Check that run() has already been called
if self.regression is None:
print("The regression model seems to be missing. Have you called",
"run() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.regression.predict(dataset_X)
except Exception as e:
print("The model failed to run. Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nLogRegression Predictions:\n", y_prediction, "\n")
return y_prediction
# Helper methods
def _create_model(self):
"""Runs UI for getting parameters and creating model."""
print("\n==================================")
print("= LogRegression Parameter Inputs =")
print("==================================\n")
print("Default values:",
"test_size = 0.25",
"cv = 5",
"graph_results = False",
"penalty = 'l2'",
"dual = False",
"tol = 0.0001",
"C = 1.0",
"fit_intercept = True",
"intercept_scaling = 1",
"class_weight = None",
"random_state = None",
"solver = 'lbfgs'",
"max_iter = 100",
"multi_class = 'auto'",
"verbose = False",
"warm_start = False",
"n_jobs = None",
"l1_ratio = None", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
self.graph_results = False
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LogisticRegression()
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
penalty = "l2"
dual = False
tol = 0.0001
C = 1.0
fit_intercept = True
intercept_scaling = 1
class_weight = None
random_state = None
solver = "lbfgs"
max_iter = 100
multi_class = "auto"
verbose = 0
warm_start = False
n_jobs = None
l1_ratio = None
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nUse GridSearch to find the best "
+ "hyperparameters (y/N)? ").lower()
if user_input == "q":
break_early = True
break
elif user_input in {"n", "y", ""}:
break
else:
print("Invalid input.")
if break_early:
break
while user_input == "y":
print("\n= GridSearch Parameter Inputs =\n")
print("Enter 'q' to skip GridSearch.")
self.gridsearch = True
params = {}
print("\nWarnings:")
print("Solvers 'lbfgs', 'newton-cg', 'sag', and 'saga' support",
"only 'l2' or no penalty.")
print("Solver 'liblinear' requires a penalty.")
print("Penalty 'elasticnet' is only supported by the",
"'saga' solver.")
print("Failing to heed these warnings may crash GridSearch!")
while True:
print("\nEnter the classifier penalties to evaluate.")
print("Options: 1-'l1', 2-'l2', 3-'elasticnet'. Enter 'all'",
"for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
pen_params = ["l1", "l2", "elasticnet"]
break
else:
pen_dict = {1: "l1", 2: "l2", 3: "elasticnet"}
try:
pen_params_int = \
list(map(int, list(user_input.split(","))))
if len(pen_params_int) > len(pen_dict):
raise Exception
pen_params = []
for each in pen_params_int:
if not pen_dict.get(each):
raise Exception
pen_params.append(pen_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["penalty"] = pen_params
print("penalties:", pen_params)
while True:
print("\nEnter the solvers to evaluate.")
print("Options: 1-'newton-cg', 2-'lbfgs', 3-'liblinear',",
"4-'sag', 5-'saga'. Enter 'all' for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
sol_params = ["newton-cg", "lbfgs", "liblinear", "sag",
"saga"]
break
else:
sol_dict = {1: "newton-cg", 2: "lbfgs", 3: "liblinear",
4: "sag", 5: "saga"}
try:
sol_params_int = \
list(map(int, list(user_input.split(","))))
if len(sol_params_int) > len(sol_dict):
raise Exception
sol_params = []
for each in sol_params_int:
if not sol_dict.get(each):
raise Exception
sol_params.append(sol_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["solver"] = sol_params
print("solvers:", sol_params)
print("\n= End of GridSearch inputs. =\n")
self.gs_params = params
best_params = self._run_gridsearch()
solver = best_params["solver"]
penalty = best_params["penalty"]
break
break_early = False
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while True:
user_input = \
input("\nGraph the ROC curve? Only binary classification "
+ "is supported (y/N): ").lower()
if user_input == "y":
self.graph_results = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("graph_results =", self.graph_results)
if break_early:
break
while not self.gridsearch:
print("\nWhich algorithm should be used in the optimization",
"problem?")
user_input = input("Enter 1 for 'newton-cg', 2 for 'lbfgs', 3 "
+ "for 'liblinear', 4 for 'sag', or 5 for "
+ "'saga': ").lower()
if user_input == "1":
solver = "newton-cg"
break
elif user_input == "3":
solver = "liblinear"
break
elif user_input == "4":
solver = "sag"
break
elif user_input == "5":
solver = "saga"
break
elif user_input in {"2", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("solver =", solver)
if break_early:
break
while not self.gridsearch:
print("\nWhich norm should be used in penalization?")
user_input = input("Enter 1 for 'l1', 2 for 'l2', 3 for "
+ "'elasticnet', or 4 for 'none': ").lower()
if solver in {"newton-cg", "lbfgs", "sag"} \
and user_input not in {"2", "4"}:
print("Invalid input.")
print("Solvers 'newton-cg', 'sag', and 'lbfgs' support",
"only 'l2' or no penalty.")
continue
if user_input == "3" and solver != "saga":
print("Invalid input.")
print("'elasticnet' is only supported by the 'saga' solver.")
continue
if user_input == "4" and solver == "liblinear":
print("Invalid input.")
print("Solver 'liblinear' requires a penalty.")
continue
if user_input == "1":
penalty = "l1"
break
elif user_input == "3":
penalty = "elasticnet"
break
elif user_input == "4":
penalty = "none"
break
elif user_input in {"2", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("penalty =", penalty)
if break_early:
break
while True:
user_input = input("\nUse dual formulation (y/N)? ").lower()
if user_input == "y":
dual = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("dual =", dual)
if break_early:
break
while True:
user_input = input("\nEnter a positive number for the tolerance "
+ "for stopping criteria: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = input("\nEnter a positive number for the inverse "
+ "of regularization strength C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
print("C =", C)
if break_early:
break
while True:
user_input = \
input("\nInclude a y-intercept in the model (Y/n)? ").lower()
if user_input == "n":
fit_intercept = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("fit_intercept =", fit_intercept)
if break_early:
break
while fit_intercept:
user_input = input("\nEnter a number for the intercept "
+ "scaling factor: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
intercept_scaling = float(user_input)
break
except Exception:
print("Invalid input.")
if fit_intercept:
print("intercept_scaling =", intercept_scaling)
if break_early:
break
while True:
user_input = input("\nAutomatically balance the class weights "
+ "(y/N)? ").lower()
if user_input == "y":
class_weight = "balanced"
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("class_weight =", class_weight)
if break_early:
break
print("\nTo set manual weights, call",
"get_regression().set_params() to set the class_weight",
"parameter.")
while True:
user_input = \
input("\nEnter an integer for the random number seed: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
random_state = int(user_input)
break
except Exception:
print("Invalid input.")
print("random_state =", random_state)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
print("\nPlease choose a multiclass scheme.")
user_input = input("Enter 1 for one-vs-rest, 2 for multinomial, "
+ "or 3 to automatically choose: ").lower()
if user_input == "1":
multi_class = "ovr"
break
elif user_input == "2":
multi_class = "multinomial"
break
elif user_input in {"3", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("multi_class =", multi_class)
if break_early:
break
while True:
user_input = input("\nEnable verbose output during training "
+ "(y/N)? ").lower()
if user_input == "y":
verbose = 1
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("verbose =", bool(verbose))
if break_early:
break
while True:
user_input = \
input("\nEnable warm start? This will use the previous "
+ "solution for fitting (y/N): ").lower()
if user_input == "y":
warm_start = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("warm_start =", warm_start)
if break_early:
break
while multi_class == "ovr":
print("\nEnter a positive number of CPU cores to use.")
user_input = input("Enter -1 to use all cores: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0 and user_input != -1:
raise Exception
n_jobs = user_input
break
except Exception:
print("Invalid input.")
if multi_class == "ovr":
print("n_jobs =", n_jobs)
if break_early:
break
while penalty == "elasticnet":
user_input = input("\nEnter a decimal for the Elastic-Net "
+ "mixing parameter [0,1]: ")
try:
if user_input.lower() in {"q", ""}:
break
user_input = float(user_input)
if user_input < 0 or user_input > 1:
raise Exception
l1_ratio = user_input
break
except Exception:
print("Invalid input.")
if penalty == "elasticnet":
print("l1_ratio =", l1_ratio)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LogisticRegression(penalty=penalty, dual=dual, tol=tol, C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state, solver=solver,
max_iter=max_iter, multi_class=multi_class,
verbose=verbose, warm_start=warm_start,
n_jobs=n_jobs, l1_ratio=l1_ratio)
def _output_results(self):
"""Outputs model metrics after run() finishes."""
print("\n=========================")
print("= LogRegression Results =")
print("=========================\n")
print("Classes:\n", self.classes)
print("\nNumber of Iterations:\n", self.n_iter)
print("\n{:<20} {:<20}".format("Accuracy:", self.accuracy))
if self.bin:
print("\n{:<20} {:<20}".format("ROC AUC:", self.roc_auc))
else:
print("\nConfusion Matrix:\n", self.confusion_matrix)
print("\nCross Validation Scores: ", self.cross_val_scores)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
if self.bin and self.graph_results:
plt.plot(self.fpr, self.tpr)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc=4)
plt.show()
print("\n\nCall predict() to make predictions for new data.")
print("\n===================")
print("= End of results. =")
print("===================\n")
def _run_gridsearch(self):
"""Runs GridSearch with the parameters given in run(). Returns
the best parameters."""
acc_scorer = make_scorer(accuracy_score)
clf = LogisticRegression()
dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \
train_test_split(self.attributes, self.labels,
test_size=self.test_size)
# Run GridSearch
grid_obj = GridSearchCV(clf, self.gs_params, scoring=acc_scorer)
grid_obj = grid_obj.fit(dataset_X_train, dataset_y_train)
# Set the clf to the best combination of parameters
clf = grid_obj.best_estimator_
# Fit the best algorithm to the data
clf.fit(dataset_X_train, dataset_y_train)
predictions = clf.predict(dataset_X_test)
self.gs_result = accuracy_score(dataset_y_test, predictions)
# Return the best parameters
print("\nBest GridSearch Parameters:\n", grid_obj.best_params_, "\n")
return grid_obj.best_params_
def _check_inputs(self):
"""Verifies if the instance data is ready for use in logistic
regression model.
"""
# Check if attributes exists
if self.attributes is None:
print("attributes is missing; call set_attributes(new_attributes)",
"to fix this! new_attributes should be a populated numpy",
"array of your independent variables.")
return False
# Check if labels exists
if self.labels is None:
print("labels is missing; call set_labels(new_labels) to fix this!",
"new_labels should be a populated numpy array of your",
"dependent variables.")
return False
# Check if attributes and labels have same number of rows (samples)
if self.attributes.shape[0] != self.labels.shape[0]:
print("attributes and labels don't have the same number of rows.",
"Make sure the number of samples in each dataset matches!")
return False
return True
| 3.796875
| 4
|
api/routers/email.py
|
temanisparsh/mailing-system
| 2
|
12776545
|
<gh_stars>1-10
from flask import Blueprint, make_response, request, jsonify, current_app
from flask.views import MethodView
from controllers import email
controller = email.controller
router = Blueprint('email', __name__)
router.add_url_rule('/draft', view_func = controller['draft'])
router.add_url_rule('/<email_id>', view_func = controller['email'])
router.add_url_rule('/folder/<email_id>/<folder_id>', view_func = controller['folder'])
router.add_url_rule('/mark/<email_id>/<category>', view_func = controller['mark'])
router.add_url_rule('/send', view_func = controller['send'])
router.add_url_rule('/all', view_func = controller['find_all'])
| 2.1875
| 2
|
services/load-test/locustfile.py
|
DragonBanana/serverless-sock-shop
| 0
|
12776546
|
<gh_stars>0
import base64
from time import sleep
from locust import HttpUser, TaskSet, task
from random import randint, choice, getrandbits
class WebTasks(TaskSet):
@task
def load(self):
user = f"u{getrandbits(64)}"
password = f"<PASSWORD>)}"
session = bytes(f"{user}:{password}", "utf-8")
base64string = base64.encodebytes(session)[:-1].decode('ascii')
catalogue = self.client.get("/catalogue").json()
category_item = choice(catalogue)
item_id = category_item["id"]
self.client.get("/")
self.client.post("/register", json={"username":user,"password":password,"email":"<EMAIL>","firstName":"<EMAIL>","lastName":"<EMAIL>"})
self.client.get("/login", headers={"Authorization":f"Basic {base64string}"})
self.client.post("/addresses", json={"number":"24","street":"Via gorizia","city":"Sesto san giovanni","postcode":"20099","country":"Germany"})
self.client.post("/cards", json={"longNum":"123123123123","expires":"1231231","ccv":"12312312"})
self.client.get("/category.html")
self.client.get("/detail.html?id={}".format(item_id))
self.client.delete("/cart")
self.client.post("/cart", json={"id": item_id, "quantity": 1})
self.client.get("/basket.html")
sleep(1)
self.client.post("/orders")
class Web(HttpUser):
tasks = [WebTasks]
min_wait = 0.1
max_wait = 0.6
| 2.375
| 2
|
ektelo/algorithm/dawa/partition_engines/l1partition.py
|
dpcomp-org/ektelo
| 32
|
12776547
|
from __future__ import division
from builtins import str
import numpy
import os
import sys
import logging
from ektelo.algorithm.dawa.cutils import cutil
from ektelo.algorithm.dawa.partition_engines import partition_engine
from ektelo import util
class l1partition_engine(partition_engine.partition_engine):
"""Use the L1 partition method."""
def __init__(self):
self.init_params = util.init_params_from_locals(locals())
@staticmethod
def Run(x, epsilon, ratio,seed):
return L1partition(x, epsilon, ratio, gethist=True,seed =seed)
class l1partition_approx_engine(partition_engine.partition_engine):
"""Use the approximate L1 partition method."""
def __init__(self):
self.init_params = util.init_params_from_locals(locals())
@staticmethod
def Run(x, epsilon, ratio,seed):
return L1partition_approx(x, epsilon, ratio, gethist=True,seed = seed)
def L1partition(x, epsilon, ratio=0.5, gethist=False,seed=None):
"""Compute the noisy L1 histogram using all interval buckets
Args:
x - list of numeric values. The input data vector
epsilon - double. Total private budget
ratio - double in (0, 1). use ratio*epsilon for partition computation and (1-ratio)*epsilon for querying
the count in each partition
gethist - boolean. If set to truth, return the partition directly (the privacy budget used is still ratio*epsilon)
Return:
if gethist == False, return an estimated data vector. Otherwise, return the partition
"""
assert seed is not None, "seed must be set"
prng = numpy.random.RandomState(seed)
assert (x.dtype == numpy.dtype(int) or x.dtype == numpy.dtype("int32")), "Input vector must be int! %s given" %x.dtype
y=x.astype('int32') #numpy type int32 is not not JSON serializable
check = (x ==y)
assert check.sum() == len(check), "Casting error from int to int32"
x=y
n = len(x)
hist = cutil.L1partition(n+1, x, epsilon, ratio, prng.randint(500000))
hatx = numpy.zeros(n)
rb = n
if gethist:
bucks = []
for lb in hist[1:]:
bucks.insert(0, [lb, rb-1])
rb = lb
if lb == 0:
break
logging.debug(' L1-PART: number of buckets %s' % str(bucks[:5]) )
return bucks
else:
for lb in hist[1:]:
hatx[lb:rb] = util.old_div(max(0, sum(x[lb:rb]) + prng.laplace(0, util.old_div(1.0,(epsilon*(1-ratio))), 1)), float(rb - lb))
rb = lb
if lb == 0:
break
return hatx
def L1partition_approx(x, epsilon, ratio=0.5, gethist=False,seed =None):
"""Compute the noisy L1 histogram using interval buckets of size 2^k
Args:
x - list of numeric values. The input data vector
epsilon - double. Total private budget
ratio - double in (0, 1) the use ratio*epsilon for partition computation and (1-ratio)*epsilon for querying
the count in each partition
gethist - boolean. If set to truth, return the partition directly (the privacy budget used is still ratio*epsilon)
Return:
if gethist == False, return an estimated data vector. Otherwise, return the partition
"""
assert seed is not None, "seed must be set"
prng = numpy.random.RandomState(seed)
n = len(x)
# check that the input vector x is of appropriate type
assert (x.dtype == numpy.dtype(int) or x.dtype == numpy.dtype("int32")), "Input vector must be int! %s given" %x.dtype
y=x.astype('int32') #numpy type int32 is not not JSON serializable
check = (x ==y)
assert check.sum() == len(check), "Casting error from int to int32"
x=y
hist = cutil.L1partition_approx(n+1, x, epsilon, ratio, prng.randint(500000))
hatx = numpy.zeros(n)
rb = n
if gethist:
bucks = []
for lb in hist[1:]:
bucks.insert(0, [lb, rb-1])
rb = lb
if lb == 0:
break
return bucks
else:
for lb in hist[1:]:
hatx[lb:rb] = util.old_div(max(0, sum(x[lb:rb]) + prng.laplace(0, util.old_div(1.0,(epsilon*(1-ratio))), 1)), float(rb - lb))
rb = lb
if lb == 0:
break
return hatx
| 2.328125
| 2
|
agent/BHAgent.py
|
Theoprasus/Abides
| 0
|
12776548
|
<filename>agent/BHAgent.py
'''
from agent.examples.SubscriptionAgent import SubscriptionAgent
import pandas as pd
import random as rd
from math import floor
from copy import deepcopy
class BHAgent(SubscriptionAgent):
""" AN agent that simply wake at a random frequency and place a market order investing a percentage(weight) of his cash
"""
def __init__(self, id, name, type, symbol, starting_cash, levels = 1, subscription_freq = 10e9, log_orders=False, random_state=None, weight = 0.50):
""" Constructor for ExampleExperimentalAgentTemplate.
:param id: Agent's ID as set in config
:param name: Agent's human-readable name as set in config
:param type: Agent's human-readable type as set in config, useful for grouping agents semantically
:param symbol: Name of asset being traded
:param starting_cash: Dollar amount of cash agent starts with.
:param levels: Number of levels of orderbook to subscribe to
:param subscription_freq: Frequency of orderbook updates subscribed to (in nanoseconds)
:param log_orders: bool to decide if agent's individual actions logged to file.
:param random_state: numpy RandomState object from which agent derives randomness
"""
super().__init__(id, name, type, symbol, starting_cash, levels, subscription_freq, log_orders=log_orders, random_state=random_state)
self.traded = False
self.current_bids = None # subscription to market data populates this list
self.current_asks = None # subscription to market data populates this list
self.weight = weight
self.holdings[self.symbol]= 0
def wakeup(self, currentTime):
""" Action to be taken by agent at each wakeup.
:param currentTime: pd.Timestamp for current simulation time
"""
super().wakeup(currentTime)
self.getCurrentSpread(self.symbol)
self.setWakeup(currentTime + self.getWakeFrequency())
def getCurrentMidPrice(self):
""" Retrieve mid price from mid and ask.
nice addition should return the midprice
:return:
"""
try:
bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
if bid and ask:
mid = int((ask + bid) / 2)
except (TypeError, IndexError):
return None
def receiveMessage(self, currentTime, msg):
""" Action taken when agent receives a message from the exchange
:param currentTime: pd.Timestamp for current simulation time
:param msg: message from exchange
:return:
"""
super().receiveMessage(currentTime, msg) # receives subscription market data
if msg.body['msg'] == 'QUERY_SPREAD' and (self.getCurrentMidPrice() is not None):
quantity = floor(self.starting_cash * self.weight / self.getCurrentMidPrice())
if quantity > self.holdings[self.symbol] and (quantity != self.holdings[self.symbol]):
self.placeMarketOrder(self.symbol, quantity - self.holdings[self.symbol], True)
#self.traded = True # not needed anymore
def getWakeFrequency(self):
""" Set next wakeup time for agent. """
return pd.Timedelta(str(rd.randint(1, 10))+"min")
def placeLimitOrder(self, quantity, is_buy_order, limit_price):
""" Place a limit order at the exchange.
:param quantity (int): order quantity
:param is_buy_order (bool): True if Buy else False
:param limit_price: price level at which to place a limit order
:return:
"""
super().placeLimitOrder(self.symbol, quantity, is_buy_order, limit_price)
def placeMarketOrder(self, quantity, is_buy_order):
""" Place a market order at the exchange.
:param quantity (int): order quantity
:param is_buy_order (bool): True if Buy else False
:return:
"""
super().placeMarketOrder(self.symbol, quantity, is_buy_order)
def cancelAllOrders(self):
""" Cancels all resting limit orders placed by the experimental agent.
"""
for _, order in self.orders.items():
self.cancelOrder(order)
'''
from agent.TradingAgent import TradingAgent
from util.util import log_print
from math import sqrt, floor
import numpy as np
import pandas as pd
class BHAgent(TradingAgent):
def __init__(self, id, name, type, symbol='IBM', starting_cash=100000, log_orders=False, random_state=None, wakeup_time = None, weigth = 0.3 ):
# Base class init.
super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state)
self.wakeup_time = wakeup_time,
self.symbol = symbol # symbol to trade
# The agent uses this to track whether it has begun its strategy or is still
# handling pre-market tasks.
self.trading = False
# The agent begins in its "complete" state, not waiting for
# any special event or condition.
self.state = 'AWAITING_WAKEUP'
# The agent must track its previous wake time, so it knows how many time
# units have passed.
self.prev_wake_time = None
self.weigth = weigth
self.hasTraded = False
self.size = np.random.randint(20, 50)
def kernelStarting(self, startTime):
# self.kernel is set in Agent.kernelInitializing()
# self.exchangeID is set in TradingAgent.kernelStarting()
super().kernelStarting(startTime)
self.oracle = self.kernel.oracle
def kernelStopping(self):
# Always call parent method to be safe.
super().kernelStopping()
# Print end of day valuation.
H = int(round(self.getHoldings(self.symbol), -2) / 100)
#noise trader surplus is marked to EOD
bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
if bid and ask:
rT = int(bid + ask)/2
else:
rT = self.last_trade[ self.symbol ]
# final (real) fundamental value times shares held.
surplus = rT * H
log_print("surplus after holdings: {}", surplus)
# Add ending cash value and subtract starting cash value.
surplus += self.holdings['CASH'] - self.starting_cash
surplus = float(surplus) / self.starting_cash
self.logEvent('FINAL_VALUATION', surplus, True)
log_print(
"{} final report. Holdings {}, end cash {}, start cash {}, final fundamental {}, surplus {}",
self.name, H, self.holdings['CASH'], self.starting_cash, rT, surplus)
print("Final relative surplus", self.name, surplus)
def wakeup(self, currentTime):
# Parent class handles discovery of exchange times and market_open wakeup call.
super().wakeup(currentTime)
self.state = 'INACTIVE'
if not self.mkt_open or not self.mkt_close:
# TradingAgent handles discovery of exchange times.
return
else:
if not self.trading:
self.trading = True
# Time to start trading!
log_print("{} is ready to start trading now.", self.name)
# Steady state wakeup behavior starts here.
# If we've been told the market has closed for the day, we will only request
# final price information, then stop.
if self.mkt_closed and (self.symbol in self.daily_close_price):
# Market is closed and we already got the daily close price.
return
if self.wakeup_time[0] >currentTime:
self.setWakeup(self.wakeup_time[0])
if self.mkt_closed and (not self.symbol in self.daily_close_price):
self.getCurrentSpread(self.symbol)
self.state = 'AWAITING_SPREAD'
return
if type(self) == BHAgent:
self.getCurrentSpread(self.symbol)
self.state = 'AWAITING_SPREAD'
else:
self.state = 'ACTIVE'
def placeOrder(self):
bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
if bid and ask and (self.hasTraded == False):
quantity = floor(self.starting_cash * self.weigth / (round((bid + ask) / 2)))
# if quantity > self.holdings[self.symbol] and (quantity != self.holdings[self.symbol]):
self.placeMarketOrder(self.symbol, quantity, True)
self.hasTraded = True
def receiveMessage(self, currentTime, msg):
# Parent class schedules market open wakeup call once market open/close times are known.
super().receiveMessage(currentTime, msg)
# We have been awakened by something other than our scheduled wakeup.
# If our internal state indicates we were waiting for a particular event,
# check if we can transition to a new state.
if msg.body['msg'] == "ORDER_EXECUTED":
self.hasTraded = True
if msg.body['msg'] == "ORDER_CANCELLED":
self.hasTraded = False
if self.state == 'AWAITING_SPREAD':
# We were waiting to receive the current spread/book. Since we don't currently
# track timestamps on retained information, we rely on actually seeing a
# QUERY_SPREAD response message.
if msg.body['msg'] == 'QUERY_SPREAD':
# This is what we were waiting for.
# But if the market is now closed, don't advance to placing orders.
if self.mkt_closed: return
# We now have the information needed to place a limit order with the eta
# strategic threshold parameter.
self.placeOrder()
self.state = 'AWAITING_WAKEUP'
# Internal state and logic specific to this agent subclass.
# Cancel all open orders.
# Return value: did we issue any cancellation requests?
def cancelOrders(self):
if not self.orders: return False
for id, order in self.orders.items():
self.cancelOrder(order)
return True
def getWakeFrequency(self):
return pd.Timedelta(self.random_state.randint(low=0, high=100), unit='ns')
| 3.09375
| 3
|
python_Scripts/autoregression_final_models_MA.py
|
BanafshehKhaki/pHandDOprediction-models
| 1
|
12776549
|
<reponame>BanafshehKhaki/pHandDOprediction-models
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
import seaborn as sns
import time
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
import re
import datetime
import sklearn.metrics as skm
import functions as func
import os
import warnings
warnings.filterwarnings("ignore")
def temporal_horizon(df, pd_steps, target):
pd_steps = pd_steps * 6
target_values = df[[target]]
target_values = target_values.drop(
target_values.index[0: pd_steps], axis=0)
target_values.index = np.arange(0, len(target_values[target]))
df = df.drop(
df.index[len(df.index)-pd_steps: len(df.index)], axis=0)
df['Target_'+target] = target_values
print('Target_'+target)
return df
def ARIMAregression(train, test, p, d, q):
history = [x for x in train]
predictions = list()
# walk-forward validation
for t in range(len(test)):
model = ARIMA(history, order=(p, d, q))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
# print('predicted=%f, expected=%f' % (yhat, obs))
# evaluate forecasts
r2_score = skm.r2_score(test, predictions)
print('Test r2_score: %.3f' % r2_score)
# plot forecasts against actual outcomes
# plt.plot(test)
# plt.plot(predictions, color='red')
# plt.show()
return predictions
def movingAverage(train_X, train_y):
# calculate residuals for # persistence model on training set
train_resid = [train_y[i]-train_X[i] for i in range(len(train_X))]
# model the training set residuals
model = AR(train_resid)
model_fit = model.fit()
window = model_fit.k_ar
coef = model_fit.params
lag = train_resid[len(train_resid)-window:]
return coef, lag
def AutoRegression(train):
model = AR(train)
model_fit = model.fit()
window = model_fit.k_ar
coef = model_fit.params
lag = train[-window:]
return coef, lag
def custom_cv_2folds(X, kfolds, th):
n = X.shape[0]
print('******** creating custom CV:')
i = 1
while i <= kfolds:
np.random.seed(i)
idx = np.empty(0, dtype=int)
for index in np.arange(0, n-(th*6), step=(th*6), dtype=int):
randwindowpoint = np.random.randint(0, 6, size=1)
idx = np.append(idx, [randwindowpoint+index])
# print(idx)
print(idx[0: 10])
yield idx[: int(len(idx)*0.7)], idx[int(len(idx)*0.7):]
i = i+1
def custom_cv_kfolds_testdataonly(X, kfolds, th):
n = X.shape[0]
# print(n)
print('******** creating custom CV:')
i = 1
while i <= kfolds:
np.random.seed(i)
idx = np.empty(0, dtype=int)
for index in np.arange(0, n-(th*6), step=(th*6), dtype=int):
randwindowpoint = np.random.randint(0, 6, size=1)
idx = np.append(idx, [randwindowpoint+index])
# print(idx)
print(idx[0:10])
yield idx[:int(len(idx))]
i = i+1
def predict(coef, lag, window):
yhat = coef[0]
for d in range(1, window):
yhat += coef[d] * lag[-d]
return yhat
def main():
models = ['MA']
targets = ['ph', 'dissolved_oxygen'] # 'pHcategory', 'DOcategory'
sondefilename = 'leavon'
for model_name in models:
print(model_name)
for target in targets:
if target.find('category') > 0:
cat = 1
directory = 'Results/bookThree/1sonde/output_Cat_' + \
model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta'}
else:
cat = 0
directory = 'Results/bookThree/1sonde/output_Reg_' + \
model_name+'/final_models/'
data = {'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV',
'file_names': 'file_names', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mpe': 'mpe', 'rmse': 'rmse', 'R2': 'R2'}
if not os.path.exists(directory):
os.makedirs(directory)
directoryresult = directory + 'Results/'
if not os.path.exists(directoryresult):
os.makedirs(directoryresult)
print(directoryresult)
testsondefilename = 'utlcp'
resultFileName = 'results_'+testsondefilename + '_' + \
target+str(time.time())+'.csv'
dfheader = pd.DataFrame(data=data, index=[0])
dfheader.to_csv(directoryresult+resultFileName,
index=False, header=False)
path = 'Sondes_data/train_Summer/'
testpath = 'Sondes_data/test_Summer/'
method = 'OrgData'
for n_steps in [1]:
for PrH_index in [1, 3, 6, 12, 24, 36, 48, 60, 72]: # 1, 3, 6, 12,
# files = [f for f in os.listdir(path) if f.endswith(
# '.csv') and f.startswith(sondefilename)]
# file = files[0]
# print('Window: '+str(n_steps) + ' TH: ' +
# str(PrH_index)+' '+method+' '+target)
# dataset = pd.read_csv(path+file)
# ######################
# # FOR MA
# ######################
# dataset = temporal_horizon(dataset, PrH_index, target)
# train = dataset[target]
# train_target = dataset['Target_'+target]
# print(train.head())
# print(train_target.head())
# custom_cv = func.custom_cv_kfolds_testdataonly(
# train, 1)
# for train_index in custom_cv:
# train = train[train_index].values
# train_target = train_target[train_index].values
# coef, lag = movingAverage(
# train, train_target)
# np.save(directory+'MA_model_'+target +
# '_'+str(PrH_index)+'.npy')
# np.save(directory+'MA_data_'+target +
# '_'+str(PrH_index)+'.npy', lag)
coef = np.load(directory+'MA_model_'+target +
'_'+str(PrH_index)+'.npy')
lag = np.load(directory+'MA_data_'+target +
'_'+str(PrH_index)+'.npy')
######################
# TEST sets
######################
# start_time = time.time()
# testsondefilename = re.sub('wo_', '', sondefilename)
files = [f for f in os.listdir(testpath) if f.endswith(
'.csv')and f.startswith(testsondefilename)]
file1 = files[0]
print('Window: ' + str(len(lag)) + ' TH: ' +
str(PrH_index)+' '+method+' '+target+file1)
testdataset = pd.read_csv(testpath+file1)
testdataset = temporal_horizon(
testdataset, PrH_index, target)
test = testdataset[target]
test_target = testdataset['Target_'+target]
# print(test.head())
# print(test_target.head())
i = 1
custom_cv = func.custom_cv_kfolds_testdataonly(
test, 100)
for test_index in custom_cv:
test_y = test[test_index].values
# for MA
test_y_targets = test_target[test_index].values
# walk forward over time steps in test
history = [lag[i] for i in range(len(lag))]
predictions = list()
for t in range(len(test_y)):
# persistence
yhat = test_y[t]
# predict error
length = len(history)
window = len(coef)
hl = [history[i]
for i in range(length-window, length)]
pred_error = predict(coef, hl, window)
yhat = yhat + pred_error
predictions.append(yhat)
error = test_y_targets[t] - yhat
history.append(error)
if cat == 1:
predictions = np.array(
predictions).astype(int)
fpath = 'predictions_' + method+target+'_Window' + \
str(n_steps) + '_TH' + \
str(PrH_index)+'_CV' + \
str(i) + testsondefilename
# '_vals_'+str(p)+'_'+str(d) + \
# '_'+str(q)+'_'+\
# print(len(predictions))
# print(len(test_y_targets))
cm0 = func.forecast_accuracy(
predictions, test_y_targets, cat)
if i % 10 == 0:
plt.scatter(np.arange(len(test_y_targets)),
test_y, s=1)
plt.scatter(np.arange(len(predictions)),
predictions, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
plt.savefig(directoryresult+fpath+'.png')
plt.close()
data = {'Actual': test_y_targets,
'Predictions': predictions}
df = pd.DataFrame(data=data)
df.to_csv(directoryresult+fpath, index=False)
if cat == 1:
data = {'target_names': target, 'method_names': method, 'window_nuggets': n_steps, 'temporalhorizons': PrH_index, 'CV': i,
'file_names': testsondefilename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}
elif cat == 0:
data = {'target_names': target, 'method_names': method, 'window_nuggets': n_steps, 'temporalhorizons': PrH_index, 'CV': i,
'file_names': testsondefilename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}
df = pd.DataFrame(data=data, index=[0])
df.to_csv(directoryresult+resultFileName,
index=False, mode='a', header=False)
i = i+1
if __name__ == "__main__":
main()
| 2.84375
| 3
|
4. Algorithms - Sorting/4 - QuickSort.py
|
PacktPublishing/Data-Structures-and-Algorithms-The-Complete-Masterclass
| 25
|
12776550
|
def quickSort(my_array):
qshelper(my_array, 0, len(my_array) - 1)
return my_array
def qshelper(my_array, start, end):
if start >= end:
return
pivot = start
left = start + 1
right = end
while right >= left:
if my_array[left] > my_array[pivot] and my_array[right] < my_array[pivot]:
my_array[left], my_array[right] = my_array[right], my_array[left]
if my_array[left] <= my_array[pivot]:
left += 1
if my_array[right] >= my_array[pivot]:
right -= 1
my_array[pivot], my_array[right] = my_array[right], my_array[pivot]
qshelper(my_array, start, right - 1)
qshelper(my_array, right + 1, end)
| 3.796875
| 4
|