blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb8412cedc39fa28a1baad4fe8b90e486f742869 | ea6cf2360eead61c3534beeb1f394d0fe0905ee9 | /myapp/migrations/0001_initial.py | 6f78a0420ad000f89a3642691ef23eb52793f9e8 | [] | no_license | 123tian/mypro | 9f5d89cdf13945525ea943fbe38f47a8c5d52219 | e0c1d5691130139f16359fdf0099e8d12c310eb9 | refs/heads/master | 2020-07-13T15:22:49.251644 | 2019-08-30T00:50:11 | 2019-08-30T00:50:11 | 205,105,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-08-28 07:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True, verbose_name='名称')),
('adddress', models.CharField(max_length=128, verbose_name='地址')),
],
options={
'db_table': 'pub_db',
'verbose_name_plural': '出版社',
'verbose_name': '出版社',
},
),
]
| [
"3053366571@qq.com"
] | 3053366571@qq.com |
cf8df78c19fed7972b683782a743137388fcee12 | 6b518cf14ea3f59fd59136dbd2a7ac70234bb96e | /pspipe.py | 4523f7e32db887641957d2c80753873e9e831bcc | [] | no_license | simula67/advanced-python-course-material | 8064a1adddff45b0980d4bd1948fdeb2f88aec89 | 98870da337cbc001bcf4215ce44f82f0430fd3ce | refs/heads/master | 2016-09-06T12:29:37.397321 | 2015-06-29T05:10:19 | 2015-06-29T05:10:19 | 38,228,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | __author__ = 'antonjoj'
import subprocess
cat = subprocess.Popen('type datafiles\\passwd', shell=True, stdout=subprocess.PIPE)
find = subprocess.Popen('find \"root\"', stdout=subprocess.PIPE, shell=True, stdin=cat.stdout)
for line in find.communicate():
if line:
print line | [
"simula67@gmail.com"
] | simula67@gmail.com |
9cf98b7b4745bf18117c0e68108e370d4226cd25 | 24e21c68bc2c4f1c3f58b96ae13512968a919024 | /memoryAndMulti/threadDemo.py | 557b1eb374bb59d12ee08ff31de2c68f27abdcf2 | [] | no_license | maketubu7/spiderDemo | 0308e88815c2035fa33acd1c4ca85329d2435034 | 9c5e78fdafba37a08e51c2e988c54957feed5b0f | refs/heads/master | 2021-02-09T18:43:32.493539 | 2020-11-11T09:13:21 | 2020-11-11T09:13:21 | 244,314,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*-
# @Time : 2020/2/27 0:10
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : threadDemo.py
# @Software: PyCharm
from threading import Thread
import threading,time
from typing import Optional
def loop():
print(threading.current_thread().name)
n = 0
while n < 5:
print(n)
n += 1
def use_thread():
print(threading.current_thread().name)
t = Thread(target=loop,name='loop_thread')
##启动
t.start()
##挂起
t.join()
class my_thread(Thread):
def __init__(self):
super(my_thread,self).__init__()
self.n = 0
def run(self):
while self.n < 5:
print(self.n)
print(threading.current_thread().name)
time.sleep(1)
self.n += 1
if __name__ == "__main__":
# use_thread()
t = my_thread()
t.start()
t.join() | [
"601176930@qq.com"
] | 601176930@qq.com |
53dc368768d6cfff5f959b78f918c8c2190e3a95 | c8abe556d3f01071d2df5d784746e0ea8a590544 | /code/box_plots.py | 7d7e5c5bfb3355d5d036e8188730a156d2f1866c | [] | no_license | 02450-Intro-to-ML-ETHOTOS/automobiles | 0fc0b8816edd074f74b3e75bb56e9908149ecccb | 3a9f62e4ea2718188cd5067258be568550a0bed4 | refs/heads/master | 2023-04-09T19:53:20.335351 | 2021-04-20T12:30:14 | 2021-04-20T12:30:14 | 343,370,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | from pca import *
from scipy.stats import zscore
#Extract Attributes wheel-base, length, width, height
#dropped_data = dropped_data.iloc [:, [0,1,2,3]]
#attributeNames = np.array(dropped_data.columns)
X = np.array(dropped_data)
#The data matrix should be standardized to have zero mean and
#unit standard deviation
X = zscore(X, ddof=1)
#------------------------------------------------------------------------------
### BoX of selected colums
figure(figsize=(10,7))
boxplot(X)
r = np.arange(1,X.shape[1]+1)
xticks(r,attributeNames)
xticks(rotation = 45)
ylabel('m')
title('Automobiles - boxplot')
show()
#------------------------------------------------------------------------------
price_plot = dropped_data.iloc[:, [12]]
attributeName = np.array(price_plot.columns)
price_plot = np.array(price_plot)
### Boxplot of prices
boxplot(price_plot)
r = np.arange(1,price_plot.shape[1]+1)
xticks(r,attributeName)
xticks(rotation = 45)
ylabel('dollar')
title('Automobiles - boxplot')
show()
#------------------------------------------------------------------------------
figure(figsize=(25,7))
for c in range(C):
subplot(1,C,c+1)
class_mask = (y==c) # binary mask to extract elements of class c
# or: class_mask = nonzero(y==c)[0].tolist()[0] # indices of class c
boxplot(X[class_mask,:])
#title('Class: {0}'.format(classNames[c]))
title('Class: '+classNames[c])
xticks(range(1,len(attributeNames)+1), [a[:7] for a in attributeNames], rotation=45)
y_up = X.max()+(X.max()-X.min())*0.1; y_down = X.min()-(X.max()-X.min())*0.1
ylim(y_down, y_up)
show() | [
"70723194+Erikinol@users.noreply.github.com"
] | 70723194+Erikinol@users.noreply.github.com |
36d479e9d62294ff7f9e197330fafe40334c2523 | b5e5182e0031b84af7eeccd8cdbf9dac457a4c03 | /plugins/InformaticaPlugin/operators/execute_profile.py | 96f5e257636bff5b44e96145725ba289b4b4872b | [
"MIT"
] | permissive | consag/informatica-airflow-plugin | 0c8f86dd4b466a74515f65b648489dd95b1e75ef | 50c2b713d71b80eb0a5202c84b9e1d3173f0512f | refs/heads/master | 2023-05-12T09:10:56.966475 | 2022-09-16T21:58:02 | 2022-09-16T21:58:02 | 219,062,936 | 3 | 0 | MIT | 2023-05-08T20:32:30 | 2019-11-01T21:08:28 | Python | UTF-8 | Python | false | false | 1,551 | py | from airflow.models import BaseOperator
from airflow import utils as airflow_utils, AirflowException
from execution import runProfile
from InformaticaPlugin.operators import available_arguments
import os
class ExecuteProfile(BaseOperator):
@airflow_utils.apply_defaults
def __init__(self, profile_path, **kwargs):
self.infa_arguments = []
self.pre_command = None
for key, value in kwargs.items():
if key == 'target':
self.pre_command = '. ' + os.environ.get('configDir', '.') + '/scheduler_env.' + value + '.sh'
else:
if key in available_arguments:
self.infa_arguments.append(available_arguments[key] + " " + value)
super(ExecuteProfile, self).__init__(
**kwargs)
def execute(self, context):
print("dag: " + self.dag.full_filepath)
print("dag_id: " + self.dag_id)
print("task_type: " + self.task_type)
print("task id: " + self.task_id)
print("infa_arguments: " + ' '.join(self.infa_arguments))
if self.pre_command is None:
print("no pre_command provided.")
else:
print("pre_command: " + self.pre_command)
infa = runProfile.ExecuteInformaticaProfile(self.infa_arguments, log_on_console=False,
pre_command=self.pre_command)
result = infa.runit(infa.arguments)
if result.rc != 0:
raise AirflowException("RunProfile failed: " + result.message)
| [
"jac@jacbeekers.nl"
] | jac@jacbeekers.nl |
1ef6fa14c5eddb4800ee9cb3c0f1c9d522ce8cf8 | 826a4e7a2d80a802186480cd88201d4d547c9239 | /Assignment 5/TextProcessor.py | 526088cae4c65bbc6f25d7ebdb5975d006c28853 | [] | no_license | ThomasBakkenMoe/IINI4014 | 3b971d952ddad0ffbe6d78a4c44e8dfe2f868e36 | ed43382ef1fff34c951444ea051cb6a925badc15 | refs/heads/master | 2022-12-18T00:26:35.085578 | 2020-09-21T13:02:31 | 2020-09-21T13:02:31 | 297,340,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | import os
import re
class WordAndNumber:
'''
Object that contains a word and the frequency of that word
Constructor takes:
word: string: the saved word
frequency: int: the frequency of the saved word
'''
def __init__(self, word, frequency):
self.word = word
self.frequency = frequency
def __repr__(self):
str = "{}: {}"
return str.format(self.word, self.frequency)
def findFile(filename):
'''
Function that finds a file in the subdirectory tree beneath the python file
takes:
:param filename: string: the name of the file to be found
:return: string: the relative path to the file
'''
for root, dirs, files in os.walk(os.getcwd(), topdown=False):
if filename in files:
return os.path.join(root, filename)
raise Exception("File not found")
def getWordFreqs(filename):
'''
Function that creates a dictionary with the frequency of words in a file.
Before the words are counted, each line in the file has any special characters removed with Regex, all letters are
set to lowercase, and line breaks are removed. Finally the line is split by word into a list and the words
are counted.
:param filename: string: the name of the file
:return: List: list of WordAndNumber objects. The list is sorted by frequency, decreasing
'''
returnList = []
filename = findFile(filename)
reader = open(filename, "r", encoding='utf-8')
currentLine = ""
while True:
currentLine = reader.readline()
if currentLine == '':
break
currentLine = re.sub('[^A-Za-z0-9]+', " ", currentLine).lower().replace("\n", "").split(' ')
found = False
# This loop loops through each word in the 'currentLine' list and checks if a word of that type
# has been registered before. If such a word has already been registered, the frequency is incremented.
# if not, the word is registered in the returnList as a WordAndNumber object.
for word in currentLine:
for element in returnList:
if element.word == word:
element.frequency += 1
found = True
break
if not found:
returnList.append(WordAndNumber(word, 1))
found = False
return sorted(returnList, key=lambda element: element.frequency, reverse=True)
def getWordsLine(filename, checkWord):
'''
Function that creates a list of line numbers where a check word is present
The function loops through the file line-by-line.
:param filename: string: name of the file to be checked
:param checkWord: string: the keyword that is used when checking each line in the file
:return:
'''
returnList = []
filename = findFile(filename)
if not checkWord.startswith(" "):
checkWord = " " + checkWord
if not checkWord.endswith(" "):
checkWord = checkWord + " "
reader = open(filename, "r", encoding='utf-8')
currentLine = ""
lineNumber = 1
while True:
currentLine = reader.readline()
if currentLine == '':
break
if checkWord in currentLine:
returnList.append(lineNumber)
lineNumber += 1
return returnList
if __name__ == "__main__":
print(getWordsLine("test.txt", "hi"))
print(getWordFreqs("11-0.txt")) | [
"thomasbakkenmoe@gmail.com"
] | thomasbakkenmoe@gmail.com |
3c3a5c6f1ca843658cc6f58129f94c4f986fe500 | 83fb1e03b3c83cd6b794f5ac7b3705ed439a25a0 | /temperature chart.py | 608f03e6f5017da81a58acdf16f8dffc3dc6e8af | [] | no_license | lenatester100/class_assignment | e15d57c3f6288d7a5d459e43ee45e0b1506dd8e3 | 859340822bbd43318486ebff84e00a047d507fa7 | refs/heads/master | 2021-01-11T14:24:41.709114 | 2017-02-17T00:36:53 | 2017-02-17T00:36:53 | 81,388,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def main():
print("celsius", "fahrenheit")
a=0
while a<=100:
a = a + 5
celsiustemp = a
fahrenheittemp = (celsiustemp *9/5+32)
print(celsiustemp," ", fahrenheittemp)
main() | [
"noreply@github.com"
] | lenatester100.noreply@github.com |
fda1f90a4be88c7944f2879764d5c153faed9cb0 | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/ipv6/Cisco_IOS_XR_ipv6_ma_subscriber_cfg.py | a66d84f0a2924a7e9df63458243f00228eb1dd1d | [
"Apache-2.0"
] | permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | """ Cisco_IOS_XR_ipv6_ma_subscriber_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv6\-ma\-subscriber package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-subscriber\-infra\-tmplmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYDataValidationError
class Ipv6ReachableVia_Enum(Enum):
"""
Ipv6ReachableVia_Enum
Ipv6 reachable via
"""
"""
Source is reachable via interface on which
packet was received
"""
RECEIVED = 1
@staticmethod
def _meta_info():
from ydk.models.ipv6._meta import _Cisco_IOS_XR_ipv6_ma_subscriber_cfg as meta
return meta._meta_table['Ipv6ReachableVia_Enum']
| [
"manradha@cisco.com"
] | manradha@cisco.com |
ace60bf3e38f65536ff99a19866beecf5e035254 | 9d51cd52883fc2dec636ad16552592cabd491696 | /sentence.py | 57c4638e7473c67bc8e8917350fdf80700eda150 | [] | no_license | nathan108642/stringtest | 6f84ee31458e533232f86334d1e4d75ba4b8dee0 | 856c6e47f5d3fe44b34fc9691ba9455024792884 | refs/heads/master | 2020-05-17T20:31:14.919602 | 2014-02-05T10:26:24 | 2014-02-05T10:26:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | start = "hello,"
question = input ("are you coming out tonight")
end = "no,i'm going out to dinner"
sentence = start + question = answer
| [
"nathanhayter@hotmail.com"
] | nathanhayter@hotmail.com |
a55f91c3b4e428b323ddb4834febff18bff53cb7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02818/s319321320.py | ec787c89f517dd3576a0c30e3d24e3bf48cf1b60 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # ABC149
# B Greesy Takahashi
# takはA枚、aokiはB枚、TAKはK回
a, b, k = map(int, input().split())
if k > a:
if k - a > b:
print(0,0)
else:
print(0,b - (k - a))
else:
print(a-k,b)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d0323d73f123fb6fa6c9c340dcdb2c274040f8c8 | 42b2b574e400afa875888136bb2a8843c59f2700 | /07_Misc/Creative Cheating [crypto] (150)/cheating.py | b6a80a0cc0dd8eb457d0c31b1299b2d30809edba | [] | no_license | yizhimanpadewoniu/CTFLike | d7c572d3c4f39486ff01180c021fde3993a1f252 | d822891ede0456d77ea0b191d3726a35d5fec4f5 | refs/heads/master | 2020-05-22T21:35:22.995073 | 2018-11-07T12:41:14 | 2018-11-07T12:41:14 | 186,530,148 | 0 | 1 | null | 2019-05-14T02:31:41 | 2019-05-14T02:31:41 | null | UTF-8 | Python | false | false | 1,432 | py | import base64
import re
import gmpy
from Crypto.PublicKey import RSA
class RSAPerson(object):
def __init__(self, e, p, q):
self.n = p * q
self.e = e
self.p = p
self.q = q
self.d = long(gmpy.invert(e, (p-1)*(q-1)))
self.key = RSA.construct((long(self.n), long(self.e), self.d))
def sign(self, message):
return self.key.sign(message, '')
def verify(self, message, signature):
return self.key.publickey().verify(message, [signature])
def encrypt(self, message):
return self.key.publickey().encrypt(message)
def decrypt(self, message):
return self.key.decrypt(message)
alice = RSAPerson(
0x10001,
38456719616722997,
44106885765559411
)
bob = RSAPerson(
0x10001,
49662237675630289,
62515288803124247
)
regex = re.compile(r'SEQ = (\d+); DATA = 0x(.*?)L; SIG = 0x(.*?)L;')
packets = []
with open('stream.txt') as lines:
for line in lines:
decoded = base64.b64decode(line)
match = regex.match(decoded).groups()
seq = int(match[0])
signature = int(match[2], 16)
data = int(match[1], 16)
data = bob.decrypt(data)
if alice.verify(data, signature):
data = chr(data)
packets.append((
seq,
data,
signature
))
print ''.join([packet[1] for packet in sorted(packets)])
| [
"dengnanyi@163.com"
] | dengnanyi@163.com |
1a1bd3525f993dcb297270edc0c3315f6f3a3478 | 0e7b1f88ad2d8d37e1f4071209a41a3c7e7c4172 | /pyexec/flask/pytestoutput.py | 7be7ea347f45c308459365a643e38d3093cdf420 | [] | no_license | veryfreebird/codebase | a74a524b526f2ac4418b0a08485c777d3a6235ff | 32f80b56e7b1d9aff4cfef27cd3b75063667b97e | refs/heads/master | 2023-08-31T11:40:31.981743 | 2023-08-28T08:11:36 | 2023-08-28T08:11:36 | 53,237,893 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import pytest
from datetime import datetime
def test_add():
assert 1 == 2
def gen_report_name():
prefix = '测试报告'
ts = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
return prefix + ts + '.html'
if __name__ == '__main__':
report_name = gen_report_name()
pytest.main([f'--html=output/{report_name}']) | [
"79344359@qq.com"
] | 79344359@qq.com |
5408d8eb40da578adce33eff9310ab27978446dd | 8b0fdeee0f998f9d347789741251b447d8640e7e | /tf_encrypted/protocol/aby3/fp.py | 633a7ae92f9472c0379b97e252111907d757e358 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | tf-encrypted/tf-encrypted | 639bd31190b7da7378de2c738e5e120309a4f027 | 2e7b569617697e49177e8b73c32957e1b939bbaf | refs/heads/master | 2023-08-17T14:33:23.975918 | 2023-02-08T02:25:50 | 2023-02-08T02:25:50 | 126,222,784 | 1,007 | 196 | Apache-2.0 | 2023-05-10T12:26:14 | 2018-03-21T18:22:13 | Python | UTF-8 | Python | false | false | 11,638 | py | # This file include the floating-point operations
from math import ceil
from math import log2
import numpy as np
import tensorflow as tf
from ..protocol import TFEPrivateTensor
from ..protocol import TFEPublicTensor
def _fp_div(
prot,
a: "TFEPrivateTensor",
b: "TFEPrivateTensor",
nonsigned: bool,
precision: int = 1,
):
with tf.name_scope("fp_div"):
return a * _fp_recip_private(prot, b, nonsigned, precision=precision)
def _fp_div_private_private(
prot,
a: "TFEPrivateTensor",
b: "TFEPrivateTensor",
nonsigned: bool,
precision: int = 1,
):
return _fp_div(prot, a, b, nonsigned, precision=precision)
def _fp_div_public_private(
prot,
a: "TFEPublicTensor",
b: "TFEPrivateTensor",
nonsigned: bool,
precision: int = 1,
):
return _fp_div(prot, a, b, nonsigned, precision=precision)
def _fp_div_private_public(
prot,
a: "TFEPrivateTensor",
b: "TFEPublicTensor",
nonsigned: bool,
precision: int = 1,
):
return a / b
def _fp_div_public_public(
prot,
a: "TFEPublicTensor",
b: "TFEPublicTensor",
nonsigned: bool,
precision: int = 1,
):
return a / b
def _fp_sqrt2(prot, a: "TFEPrivateTensor", precision: int = 1):
c15 = prot.define_constant(1.5)
c05 = prot.define_constant(0.5)
y = approx_sqrt_inv(prot, a, precision=precision) # y approixmates 1 / sqrt(a)
g = a * y
h = y * c05
for _ in range(precision):
"""
Over iterations,
g -> sqrt(b)
h -> sqrt(1 / b) * 0.5
1 iteration should give less than 10^{-4} relative error
"""
r = c15 - g * h # r = 1 + error
g = g * r
h = h * r
return g, h
def _fp_recip_private(prot, x: "TFEPrivateTensor", nonsigned, precision: int = 1):
"""
Approxiamtedly compute 1/x from x.
Apply the quintic iteration from
http://numbers.computation.free.fr/Constants/Algorithms/inverse.html
"""
with tf.name_scope("fp_reciprocal"):
sgf, exp = __fp_normalize(
prot, x, nonsigned
) # x = sgf / exp, then 1 / x = 1/sgf * exp
one = prot.define_constant(1.0)
two = prot.define_constant(2, apply_scaling=False)
# 2.9281928 - 2 * s approxiates 1/s with small relative errors
# in the interval s \in [0.5, 1.)
# By using integer factor '2', we can save one truncation.
inv_sgf = 2.9281928 - two * sgf
appr_recip = inv_sgf * exp # ~ 1/x
for _ in range(precision):
"""One iteration should give us very good approximation
(10^{-5} relative error ratio).
More iterations, more precision."""
res = one - x * appr_recip
res2 = res * res
appr_recip = appr_recip + appr_recip * (one + res2) * (
res + res2
) # quintic iteration
return appr_recip
def _fp_inv_sqrt_private(prot, a: "TFEPrivateTensor", precision: int = 1):
# low precision
return approx_sqrt_inv(prot, a, precision=precision)
# high precision with extra 3 rounds of communication
# two = prot.define_constant(2, apply_scaling=False)
# _, h = _fp_sqrt2(prot, a)
# return h * two
def _fp_sqrt_private(prot, a: "TFEPrivateTensor", precision: int = 1):
g, _ = _fp_sqrt2(prot, a, precision=precision)
return g
def prefix_ORs(b: "TFEPrivateTensor", k: int):
r"""
b := (..., b3, b2, b1, b0) where b0 is the least significant bit
compute y := (y_{k-1}, y_{k-1}, ..., y0) where y_i \in {0, 1}
y_i = bit_or_{i <= j < k} bj
The first yi = 1 is the first significant bit that bi = 1.
"""
# running ORs from MSB to LSB.
n, e = int(ceil(log2(k))), 1
with tf.name_scope("prefix_ORs"):
for i in range(0, n):
b = b | (b >> e)
e = e << 1
return b
def _do_fp_log_private(prot, x: "TFEPrivateTensor", base: "float", precision: int = 1):
k = prot.fixedpoint_config.precision_fractional
m = k + prot.fixedpoint_config.precision_integral
n = prot.int_factory.nbits
logn = (
int(log2(n) + 1) * 2
) # enough bit length to represent the exponent \in [0, 128)
assert x.is_scaled, "Er.. tricky here."
assert (
2 * k > m
), "We assume 2^{-j} can be represent with 2k-bit precisions for all j in [0, m)"
assert base >= 2.0, "log(x, base) shoule with base >= 2."
adjust = 1.0 / np.log2(base)
with tf.name_scope("fp_log"):
# bit-decomposition. Make sure the higher bits are all 0 via shifting.
x_bits = (prot.a2b(x, m) << (n - m)) >> (n - m)
y_bits = prefix_ORs(x_bits, m)
z_bits = y_bits ^ (y_bits >> 1)
rev_z_bits = prot.bit_reverse(z_bits)
exponent = prot.b2a(rev_z_bits >> (n - 2 * k - 1), 2 * k) # NOTE: shift 1-less.
exponent.is_scaled = True
log_exponent = (
prot.b2a(prot.xor_indices(z_bits), logn) * 2**k
) # j + k with k-bit precision
log_exponent.is_scaled = True
log_exponent = log_exponent - k
if base != 2.0:
log_exponent = log_exponent * adjust
frac = x * exponent # frac is in the interval [1., 2.]
# The approximation coefficients are for log2(x), we need to
# adjust them via multiplying 1./log2(base)
""" """
log_frac = ((-0.4326728 * adjust) * frac + (2.276597 * adjust)) * frac + (
-1.843924 * adjust
)
return log_frac + log_exponent
def _fp_log2_private(prot, x: "TFEPrivateTensor"):
return _do_fp_log_private(prot, x, 2.0)
def _fp_log10_private(prot, x: "TFEPrivateTensor"):
return _do_fp_log_private(prot, x, 10.0)
def _fp_ln_private(prot, x: "TFEPrivateTensor"):
return _do_fp_log_private(prot, x, np.e)
def __fp_normalize(prot, b: "TFEPrivateTensor", nonsigned=False):
r"""
Given [b], to compute [sgf], and [exp] such that b = sgf / exp
where sgf \in [0.5, 1)
"""
k = prot.fixedpoint_config.precision_fractional
m = k + prot.fixedpoint_config.precision_integral
n = b.backing_dtype.nbits
assert b.is_scaled, "Er.. tricky here."
assert (
2 * k > m
), "We assume 2^{-j} can be represent with 2k-bit precisions for all j in [0, m)"
with tf.name_scope("fp_normalize"):
# juhou: sign.is_scaled is False
if not nonsigned:
msb = prot.bit_extract(b, m)
two = np.ones(shape=msb.shape, dtype=int) * 2
sign = 1 - prot.mul_ab(prot.define_constant(two), msb)
x = sign * b # abs(b)
else:
sign, x = 1, b
# bit-decomposition. Make sure the higher bits are all 0 via shifting.
x_bits = (prot.a2b(x, m) << (n - m)) >> (n - m)
"""
y_j = 1 <-> for all i <= j, y_i = 1.
z_j = 1 <-> 2^j < x = 2^j + x0 < 2^{j+1}.
And there is only one z_j = 1, and others are all 0.
"""
y_bits = prefix_ORs(x_bits, m)
z = y_bits ^ (y_bits >> 1)
r"""
There exists one z_j = 1 and other z_i = 0.
As as result, \sum_i 2^i * z_i = 2^j (i.e., b2a(z))
If we reverse the bits {z_i} to obtain {z'_i} for z'_i = z_{128 - i}.
Then \sum_i 2^i * z'_i = 2^{128 - j}.
Also, if we right shift {z'_i} by (128 - m)-steps.
Then we obtain 2^{m - j}, which equals to the fixed-point
representation of 2^{-j} within m-bits precision.
"""
# NOTE: We couldn't obtain 2^{-j} with only k-bits precision
# because j > k might be possible (i.e., b = 2^k * b' for
# real value b' > 1).
# Instead, we preserve 2k-bit precision, i.e., c = 2^{-j} with 2k bits precision
_exp = prot.b2a(prot.bit_reverse(z) >> (n - 2 * k), 2 * k)
_exp.is_scaled = True
sgf = x * _exp # significant should in [0.5, 1). Thus, sgf should be positive.
exp = _exp if nonsigned else sign * _exp
return sgf, exp
def approx_sqrt_inv(prot, x: "TFEPrivateTensor", precision: int = 1):
"""
From x, to compute an approximation of 1/sqrt(x).
"""
def select(x, y, bit):
"""
return y if bit = 0 else x.
"""
c = np.ones(shape=bit.shape) * (x - y)
return prot.mul_ab(prot.define_constant(c), bit) + y
k = prot.fixedpoint_config.precision_fractional
n = x.backing_dtype.nbits
# using top half bits as integer, bottom half bits as fraction.
s = (n // 2) - k
xs = x << s
assert k >= s
assert n % 2 == 0
assert x.is_scaled, "Er.. tricky here."
with tf.name_scope("inv_sqrt"):
# bit-decomposition.
x_bits = prot.a2b(xs, n)
y_bits = prefix_ORs(x_bits, n)
z_bits = (
y_bits ^ (y_bits >> 1)
) << 1 # note: x = c * 2^m where c \in [0.25, 0.5)
rev_z_bits = prot.bit_reverse(z_bits)
frac = prot.b2a(rev_z_bits, n)
# By default, bottom k bits used as fraction,
# do truncate manually here.
frac.is_scaled = False
normalized = frac * x # normalized \in [0.25, 0.5)
normalized = prot.truncate(normalized, amount=k + s)
normalized.is_scaled = True
"""
f(b) = 4.7979 * b^2 - 5.9417 * b + 3.1855 approixmates 1/sqrt(b) in [0.25, 0.5)
with less than 0.7% relative error
"""
sqrt_inv = ((4.7979 * normalized) - 5.9417) * normalized + 3.1855
# more iteration, more precision
for i in range(precision):
sqrt_inv = sqrt_inv * (3 - normalized * sqrt_inv * sqrt_inv)
sqrt_inv = sqrt_inv * 0.5
"""
Indeed, the exponetent part is 2^{j+k+s}
where k is the scaling factor, s = (n // 2) - k
We want to compute sqrt(2^{-j}) with k-bit precision,
i.e., sqrt(2^{-j}) * 2^k.
In other words, we compute sqrt(2^{-j}) * 2^k from 2^{j+k+s}.
1. We first obtain 2^{-(j+k+s)} from 2^{j+k+s}.
2. Then we compute 2^{floor(-(j+k+s)/2)}. Rewrite it as
2^{floor(-(j+k+s)/2)} = c * 2^{floor(-j/2)} * 2^{floor(-(k+s)/2)}
where c depends on the parity of j, and k + s.
3. We compute the parity of j+k+s, i.e., check the LSB of j+k+s.
4. Suppose k+s is even, 2^{floor((s-k)/2)} = 2^{(s-k)/2}.
Then we can cancel this term via 2^{s-k/2}.
If lsb(j+k+s) = 0 <-> j is even. In this case,
2^{floor(-j/2)} = 2^{-j/2} = sqrt(2^{-j}).
If lsb(j+k+s) = 1 <-> j is odd. Then
2^{floor(-j/2)} * 2^{-1/2} = sqrt(2^{-j}).
Suppose k+s is odd,
We need 2^{(s-k)//2} * 2 to cancel 2^{floor(-(k+s)/2)}.
If lsb(j+k+s) = 0 <-> j is odd. In this case,
2^{floor(-j/2)} * 2^{-1/2} = sqrt(2^{-j}).
If lsb(j+k+s) = 1 <-> j is even. Then
2^{floor(-j/2)} = 2^{-j/2} = sqrt(2^{-j}).
"""
sum_jks = prot.xor_indices(z_bits)
lsb = prot.bit_extract(sum_jks, 0) # lsb = 0 <-> j+k+s is even
exponet = prot.b2a(
prot.bit_gather(rev_z_bits | rev_z_bits >> 1, 0, 2), k + s
) # 2^{floor(-(j+k+s)/2)}
esk = 2 ** ((k - s) // 2)
if (k + s) & 1 == 0: # k+s is even which means lsb = 1 <=> j is odd
exponet = exponet * select(esk, esk * np.sqrt(2.0), lsb)
else: # k+s is odd which means lsb = 1 <=> j is even
exponet = exponet * select(esk * np.sqrt(2.0), esk * 2, lsb)
return sqrt_inv * exponet
| [
"noreply@github.com"
] | tf-encrypted.noreply@github.com |
4312c5132af6818ca35ed0f704d81bfac2ddb825 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/wallbox/test_config_flow.py | 6b5a05a3486830b64b8d0d53f7b409dfb288bb79 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 5,028 | py | """Test the Wallbox config flow."""
import json
from unittest.mock import patch
import requests_mock
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.wallbox import InvalidAuth, config_flow
from homeassistant.components.wallbox.const import DOMAIN
from homeassistant.core import HomeAssistant
test_response = json.loads(
'{"charging_power": 0,"max_available_power": 25,"charging_speed": 0,"added_range": 372,"added_energy": 44.697}'
)
async def test_show_set_form(hass: HomeAssistant) -> None:
"""Test that the setup form is served."""
flow = config_flow.ConfigFlow()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.wallbox.config_flow.WallboxHub.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_authenticate(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=403,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=403,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=404,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_validate_input(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=200,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["title"] == "Wallbox Portal"
assert result2["data"]["station"] == "12345"
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
0d7d6b9040e1904e68f4c40434dae2fa937d9729 | 61a4a77fb2347172390a8528a068207d50804d61 | /python/function.py | 9f4ddc9d868a1a66f2efc2163aba671fecd73d89 | [] | no_license | king636/myLearning | 84180c05dbc418bd30919928ed95fb3788d191d8 | 7a3c3eee70bbe425c3575b2ac4837d9e4dabed1a | refs/heads/master | 2021-09-06T04:25:27.255477 | 2018-02-02T09:41:54 | 2018-02-02T09:41:54 | 113,003,979 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,305 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 函数用法
# python内置的函数
# 1. abs()取绝对值,js也有内置函数:Math.abs()
# 20
print(abs(-20))
# 参数不对,报错:TypeError: abs() takes exactly one argument (2 given)
# js不报错
# abs(-20,1)
# 参数类型不对,报错:TypeError: bad operand type for abs(): 'str'
# js的Math.abs()参数类型不对,返回NaN
# abs('a')
# 内置函数2:max,取最大值 js用Math.max()
# 8
print(max(1,8,3,5))
# 内置函数3:
# int()
# str()
# bool()
# 1
print(int('1'))
# 报错:ValueError: invalid literal for int() with base 10: 'a'
# print(int('a'))
# 1234
print(str(1234))
# None
print(str(None))
# True
print(bool(1))
# False
print(bool(None))
# False
print(bool(''))
# True
print(bool('a'))
# 内置函数:hex()
# 0xa
print(hex(10))
############################################
# 定义函数
# 函数不写return时,返回None.而js是undefined
def my_abs(x):
if x >= 0:
return x;
else:
return -x;
# 99
print(my_abs(-99))
## 函数如何在别的文件中导入?参看function_my_abs.py,使用from function import my_abs来导入
# js中我只知道只能通过宿主(比如浏览器),在html文件中导入js文件,然后使用其函数.
# 单纯的js文件应该无法像python一样导入其他文件的函数,所以python更像c,可以导入头文件.js是脚本,寄托于宿主.
# 空函数与pass关键字,占位用,确保代码正常运行
def fun():
pass
# pass放到判断中,代表什么也不做.如果不用pass,会有语法错误
x = 20
if x > 10:
pass
# my_abs函数扩展,增加类型检查,以及抛出异常
# 检查参数是否是int或者float类型,如果不是,抛出TypeError异常 oprand:操作数
# js中检查x的类型:typeof x !== 'number'
# python使用raise Error对象来抛出异常,js使用throw('...')
def my_abs_extend(x):
if not isinstance(x,(int,float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
#TypeError: bad operand type
# my_abs_extend('a')
### 返回多个值,事实上与js中的解构赋值类似,分析如下:
import math
# move函数根据点移动时的坐标 位移 和角度,算出新的点的坐标
# 这里参数angle=0定义了默认值,默认参数的使用:与js类似,必须写在后面
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
x, y = move(100, 100, 60, math.pi / 6)
# 151.96152422706632 70.0
print(x,y)
# 事实上相当于以下,move函数返回的是tuple
r = move(100, 100, 60, math.pi / 6)
# (151.96152422706632, 70.0)
print(r)
# 这里的作用就是解构赋值,js中数组对应的解构赋值:[a,b] = [151.96152422706632,70.0]
(a,b) = r
# 151.96152422706632 70.0
print(a,b)
# 与js一样(不过js是用[]),解构赋值的层级也需要对应
(a,b,(c,d)) = (1,2,(3,4))
# c = 3
print('c = ' + str(c))
# 当然也可以和js一样,python的list也可以解构赋值(tuple和list基本是一样的,除了不可变性质)
[a,b,[c,d]] = [10,20,[30,40]]
print('c = ' + str(c))
# js可以从对象中解构赋值,python是否可以从dict解构赋值呢?
# 报错:SyntaxError: can't assign to literal
# {a,b} = {
# 'name':'nick',
# 'age':30,
# }
# 根据上面报错的提示,不能赋值给literal, 修改为tuple
(a,b) = {
'name':'nick',
'age':30,
}
# 赋值的是key:name: name,age: age
print('name: ' + a + ',age: ' + b)
# 不像js,python的dict只能解构到key,要获取value,如下:
person = {
'name':'nick',
'age':30,
}
(a,b) = person
# name: nick
print('name: ' + person[a])
# 等价于
# name: nick
print('name: ' + person['name'])
# 加入层次和使用list
person = {
'name':'nick',
'age':30,
'address':{
'city':'Xiamen',
'country':'China'
}
}
# 报错:ValueError: too many values to unpack (expected 2)
# 所以dict的解构不能使用层次,那么就需要二次解构了
# (a,b,(c,d)) = person
(a,b,c) = person
# address
print(c)
(a,b) = person[c]
# city
# 这里经过二次解构
print(a)
### 上面的tuple换成list也是一样的,说明list和tuple在解构赋值时是一样用法
[a,b,c] = person
# address
print(c)
[a,b] = person[c]
# city
print(a)
### 默认参数,可变参数,关键字参数,命名关键字参数
## 注意默认参数可能会发生改变的情况
def abc(L = []):
L.append('END')
return L
# 第一次调用默认参数:['END']
print(abc())
# 第二次调用:['END', 'END']
# L每次调用后被改变,因为函数定义的时候python就算出默认参数变量L的值.
# js也有类似的问题,所以默认参数一定要指向不可变对象
print(abc())
# 默认参数的问题可以修改如下:
def xyz(L = None):
if L is None:
L = []
L.append('END')
return L
# ['END']
print(xyz())
# ['END']
print(xyz())
## 可变参数
# 定义一个函数,参数可传入list或者tuple
# 通过导入collections模块,判断参数是否是Iterable(list或tuple), 如果是才能使用for...in
import collections
def calc(numbers):
if not isinstance(numbers,collections.Iterable):
raise TypeError('is not iterable')
sum = 0
for n in numbers:
sum = sum + n * n
return sum
# 用法:
# 14
print(calc([1,2,3]))
# 14
print(calc((1,2,3)))
# TypeError: is not iterable
# print(calc(1))
# 改为可变参数,加*
def calc(*numbers):
if not isinstance(numbers,collections.Iterable):
raise TypeError('is not iterable')
sum = 0
for n in numbers:
sum = sum + n * n
return sum
# 14
print(calc(1,2,3))
# 不能再传入list了,报错:TypeError: can't multiply sequence by non-int of type 'list'
# print(calc([1,2,3]))
# 这时要改为:
li = [1,2,3]
# 14
print(calc(li[0],li[1],li[2]))
# 再改:*list 表示把list的所有元素作为可变参数传入
# 14
print(calc(*li))
# 0
print(calc())
## 关键字参数 **
# 关键字参数组装成dict,参数个数任意
def registe(name,age,**other):
print('name:',name,',age:',',other:',other)
# name: nick ,age: ,other: {}
registe('nick',30)
# name: nick ,age: ,other: {'city': 'Xiamen', 'country': 'China'}
registe('nick',30,city='Xiamen',country='China')
# 关键字参数用处:比如注册时,输入必须信息后,还可以输入额外信息
# 与可变参数类型:
extra = {
'city':'Xiamen',
'country':'China'
}
# name: Bob ,age: ,other: {'city': 'Xiamen', 'country': 'China'}
registe('Bob',50,city=extra['city'],contry=extra['country'])
# 简化用法
# name: Bob ,age: ,other: {'city': 'Xiamen', 'country': 'China'}
registe('Bob',50,**extra)
## 命名关键字参数
# 对于关键字参数,如果要限制参数名字,使用命名关键字参数,用*隔开
def registe(name,age,*,city,country):
print(name,age,city,country)
# TypeError: registe() missing 2 required keyword-only arguments: 'city' and 'country'
# 可见,命名关键字参数个数一致,且参数名要写,还要写对
# registe('Nick',30)
# nick 20 Shanghai China
registe('nick',20,city='Shanghai',country='China')
# 如果用可变参数,那么就不用*隔开了
def registe(name,age,*other,city,country):
print(name,age,other,city,country)
# cathy 18 () Xian China
registe('cathy',18,city='Xian',country='China')
# cathy 18 ((1, 2, 3),) Xian China
registe('cathy',18,(1,2,3),city='Xian',country='China')
# kate 20 () Xiamen China
registe('kate',20,**extra)
#### 参数组合,上面的参数可以组合使用,但是必须确保顺序:
# 必选参数 默认参数 可变参数 命名关键字参数 关键字参数
# 注意:命名关键字参数在关键字参数前面
def f1(a,b,c=0,*change,d,**extra):
print('a=',a,'b=',b,'c=',c,'change=',change,'d=',d,'extra=',extra)
def f2(a,b,*,d,**extra):
print('a=',a,'b=',b,'d=',d,'extra=',extra)
# a= 1 b= 2 c= 3 change= ((3, 4),) d= abc extra= {'name': 'nick', 'age': 30}
f1(1,2,3,(3,4),d='abc',name='nick',age=30)
# 命名关键字d是不能省略的
# TypeError: f1() missing 1 required keyword-only argument: 'd'
# f1(1,2)
# a= 1 b= 2 c= 0 change= () d= 30 extra= {}
f1(1,2,d=30)
### 重点来了:任意函数参数都可以通过fun(*arg1,**arg2)来调用,不管实际的参数怎么定义
t = (1,2,3,4)
d = {'d':50,'city':'Beijing','country':'China'}
# a= 1 b= 2 c= 3 change= (4,) d= 50 extra= {'city': 'Beijing', 'country': 'China'}
f1(*t,**d)
# 因为有命名关键字参数d,所以dict d中必须有'd'
d = {'city':'Beijing','country':'China'}
# 报错:TypeError: f1() missing 1 required keyword-only argument: 'd'
# f1(*t,**d)
## 递归的使用
# 使用递归要注意防止栈溢出:函数调用是通过栈的数据结构实现的
# 每当进入一个函数调用,栈就会加一层栈帧,当函数退出,栈就减一层栈帧.
# 而栈的大小有限,如果递归层级太多(调用函数自身太多次),可能会栈溢出
# 比如使用递归计算阶乘:1*2*3*4*5...*n!
def fact(n):
if n == 1:
return 1
return fact(n - 1) * n
# 120
print(fact(5))
# RecursionError: maximum recursion depth exceeded in comparison
# print(fact(1000))
## 防止栈溢出,可以通过尾递归优化
# 尾递归:函数返回的时候调用了函数自身(递归),return的不能是表达式,比如上面的:fact(n - 1) * n是表达式.
# 尾递归优化:当满足尾递归时,编译器会对其优化,使得不管递归调用多少次,都只会加一层栈帧
# 上面的递归函数进行改写:
def fact_iter(num,product=1):
if num == 1:
return product
return fact_iter(num - 1, num * product)
# 120
print(fact_iter(5))
# RecursionError: maximum recursion depth exceeded in comparison
# 这里仍然报错,是因为使用的python解释器并没有对尾递归进行优化
# print(fact_iter(1000))
#### 汉诺塔的移动:http://www.baike.com/wiki/%E6%B1%89%E8%AF%BA%E5%A1%94
# 三根柱子A,B,C A上从下往上按从大到小的顺序堆了N个盘子,计算将盘子堆到另外一根柱子上(这里从A到C)所需次数,一次只能移动一个盘子
# 并且大盘子不能堆在小盘子上.
# 按照数学归纳法:1个盘子->1次 2个盘子->3次 3个盘子->7次 4个盘子->15次...归纳出:2^n - 1次
# 找规律分析:
# 1个盘子:A-->C
# 2个盘子:A-->B A-->C B-->C
# 3个盘子:A-->C A-->B C-->B A-->C B-->A B-->C A-->C
# ...
# 过程理解:n个盘子,先从A移动n-1个到B(当前过程是复杂的);A还剩下一个时,从A移动到C;最后从B将n-1个移动到C
# 用递归实现:
def move(n,a,b,c):
if n==1:
print('move',a,'-->',c)
else:
move(n-1,a,c,b)
move(1,a,b,c)
move(n-1,b,a,c)
# move A --> C
move(1,'A','B','C')
# move A --> B
# move A --> C
# move B --> C
move(2,'A','B','C')
# move A --> C
# move A --> B
# move C --> B
# move A --> C
# move B --> A
# move B --> C
# move A --> C
move(3,'A','B','C')
### 递归的理解不要陷入死胡同,总结出规律,如果确认是递归问题,那么理清过程去调用函数即可,别陷太深.... | [
"ChenBin@yaxon.com"
] | ChenBin@yaxon.com |
3f21ba69b37a994780c6046ac37ba58a78d26472 | 29e4005d970b66b1640e9570590addca66d8a53a | /czblog/settings.py | 35cdb6bd06d2d9225e8a6514798180faa3040b6c | [] | no_license | cz9025/czblog | ec07582fdd06d77349fd518b2a7d94daeffc535b | 50b712607bbe33c687e790dcc93a82e1f32f58bd | refs/heads/master | 2020-04-13T04:25:43.642086 | 2020-01-03T09:38:04 | 2020-01-03T09:38:04 | 162,960,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | # -*- coding: utf-8 -*-
"""
Django settings for czblog project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cfam^kgsdky$rs$is#s(5zklw1*7%b75#oslzjshlzz2$m@79z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'xadmin',
'crispy_forms',
'blog',
'center',
'news',
'shop',
'resume',
'myblog',
'interface',
'csdn',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'czblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media', ############wenjian
],
},
},
]
WSGI_APPLICATION = 'czblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'czblog',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
# LANGUAGE_CODE = 'zh-hans'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
AUTH_USER_MODEL = 'center.UserInfo'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# 放服务器上时,需打开 已作废
# STATIC_ROOT='/usr/local/src/webroot/blog/static/'
# MEDIA_ROOT = '/usr/local/src/webroot/media/'
# windows上需关闭
# STATIC_ROOT=os.path.join(BASE_DIR,'static')
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
| [
"897308902@qq.com"
] | 897308902@qq.com |
d3e98871213596ac415e24442f42d914dfd5ad66 | 28c06b1ebc6b361b96be62f5274c3f6affa991ab | /sat_modules/utils.py | daffe19f47096fd638002e1edfa21655ac17d88a | [] | no_license | caifti/sat | c5df52fa86c2de960f11ab267f38c4ebc38ca489 | 54380d66b75d46c37e48269b25682cce50157f5f | refs/heads/master | 2020-09-09T09:14:30.010315 | 2019-06-21T07:50:22 | 2019-06-21T07:50:22 | 221,409,359 | 0 | 0 | null | 2019-11-13T08:27:24 | 2019-11-13T08:27:24 | null | UTF-8 | Python | false | false | 3,492 | py | # -*- coding: utf-8 -*-
# Copyright 2018 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Satellite utils
Author: Daniel Garcia Diaz
Date: May 2018
"""
#Submodules
from sat_modules import config
#APIs
import zipfile, tarfile
import argparse
import numpy as np
import os, shutil
import json
import datetime
import utm
from netCDF4 import Dataset
from six import string_types
def valid_date(sd, ed):
"""
check if the format date input is string("%Y-%m-%d") or datetime.date
and return it as format datetime.strptime("YYYY-MM-dd", "%Y-%m-%d")
Parameters
----------
sd(start_date) : str "%Y-%m-%d"
ed(end_date) : str "%Y-%m-%d"
Returns
-------
sd : datetime
datetime.strptime("YYYY-MM-dd", "%Y-%m-%d")
ed : datetime
datetime.strptime("YYYY-MM-dd", "%Y-%m-%d")
Raises
------
FormatError
Unsupported format date
ValueError
Unsupported date value
"""
if isinstance(sd, datetime.date) and isinstance(ed, datetime.date):
return sd, ed
elif isinstance(sd, string_types) and isinstance(ed, string_types):
try:
sd = datetime.datetime.strptime(sd, "%Y-%m-%d")
ed = datetime.datetime.strptime(ed, "%Y-%m-%d")
if sd < ed:
return sd, ed
else:
msg = "Unsupported date value: '{} or {}'.".format(sd, ed)
raise argparse.ArgumentTypeError(msg)
except:
msg = "Unsupported format date: '{} or {}'.".format(sd, ed)
raise argparse.ArgumentTypeError(msg)
else:
msg = "Unsupported format date: '{} or {}'.".format(sd, ed)
raise argparse.ArgumentTypeError(msg)
def valid_region(r):
"""
check if the regions exits
Parameters
----------
r(region) : str e.g: "CdP"
Raises
------
FormatError
Not a valid region
"""
if r in config.regions:
pass
else:
msg = "Not a valid region: '{0}'.".format(r)
raise argparse.ArgumentTypeError(msg)
def path():
"""
Configure the tree of datasets path.
Create the folder and the downloaded_files file.
Parameters
----------
path : datasets path from config file
"""
file = 'downloaded_files.json'
list_region = config.regions
local_path = config.local_path
try:
with open(os.path.join(local_path, file)) as data_file:
json.load(data_file)
except:
if not (os.path.isdir(local_path)):
os.mkdir(local_path)
dictionary = {"Sentinel-2": {}, "Landsat 8": {}}
for region in list_region:
os.mkdir(os.path.join(local_path, region))
dictionary['Sentinel-2'][region] = []
dictionary['Landsat 8'][region] = []
with open(os.path.join(local_path, 'downloaded_files.json'), 'w') as outfile:
json.dump(dictionary, outfile)
| [
"garciad@ifca.unican.es"
] | garciad@ifca.unican.es |
3a7b75886db5b91af55617c1f0a203df4479f839 | fcad3e866d3e3815ba5e7d2c9b5d1ab2bb3917b9 | /face.py | 258946bff9c751a23f86c487b4fc7b8e497febce | [] | no_license | ranjeetbidwe/Kaggle-Autism | 520b20cf55b79bcb29aee28f711bbb46ff9891c7 | bda23282cf004cbe48fad86890d66da306ede835 | refs/heads/master | 2022-04-26T04:30:00.698400 | 2020-04-27T20:46:21 | 2020-04-27T20:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,609 | py | import os
import glob
import time
import keras
from PIL import Image
from os import listdir
from shutil import copyfile
from os.path import isfile, join
from matplotlib import pyplot as plt
from keras_vggface.vggface import VGGFace
from keras.engine import Input
from keras import applications
from keras.models import Model
import tensorflow as tf
import numpy as np
from keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D, Flatten, Dense, Dropout
from keras_vggface.utils import preprocess_input
from sklearn.metrics import classification_report, confusion_matrix
from MLEXPS.MLEXPS import *
from keras import backend as K
import random
random.seed(42)
tf.random.set_seed(42)
# Providing more training examples within certain distributions of age, gender, and race will increase the model's accuracy.
Height = 224
Width = 224
BatchSize = 24
lr_rate=.0015
Version = 5
load_model = False
model_path = ''
accuracy = 0
accuracyCount = 0
trainableCount = 30
def SaveModelImage(Model, Title):
keras.utils.vis_utils.plot_model(Model, to_file=Title, show_shapes=True, show_layer_names=True)
return
def Summary(Model):
print(Model.summary())
return
def resnet():
BaseModel = applications.resnet50.ResNet50(weights= None, include_top=False, input_shape= (224,224,3))
last_layer = BaseModel.get_layer('activation_49').output
print('here')
return model
def MakeModel(dlsize):
BaseModel = VGGFace(model='senet50', include_top=False, input_shape=(Height, Width, 3), pooling='avg')
last_layer = BaseModel.get_layer('avg_pool').output
x = keras.layers.Flatten(name='flatten')(last_layer)
x = keras.layers.Dense(128, kernel_regularizer = keras.regularizers.l2(l = 0.015), activation='relu')(x)
x = keras.layers.Dropout(rate=.4, seed=42)(x)
out = keras.layers.Dense(2, activation='softmax', name='classifier')(x)
DerivedModel = keras.Model(BaseModel.input, out)
# # Everything is trainingable
# # Weights are used at init
# for layer in DerivedModel.layers:
# layer.trainable = True
#
#
# # Everything in the base model is frozen
# # Only top layers are trainable
# for layer in BaseModel.layers:
# layer.trainable = False
# base
for layer in DerivedModel.layers:
layer.trainable = False
for layer in DerivedModel.layers[-trainableCount:]:
layer.trainable = True
DerivedModel.compile(keras.optimizers.Adam(lr=lr_rate), loss='categorical_crossentropy', metrics=['accuracy'])
return DerivedModel
def clearWeights(model):
weights = model.get_weights()
for weight in weights:
weight = K.zeros(weight.shape, dtype=np.float64)
model.set_weights(weights)
return model
def preprocess_input_new(x):
img = preprocess_input(keras.preprocessing.image.img_to_array(x), version = 2)
return keras.preprocessing.image.array_to_img(img)
class EarlyStoppingAtMinLoss(tf.keras.callbacks.Callback):
def __init__(self, trainableCount=30):
print('working')
super(EarlyStoppingAtMinLoss, self).__init__()
self.epochCount = []
self.trainableCount = trainableCount
self.max = 0
def on_train_begin(self, logs=None):
self.accuracyCount = 0
self.accuracy = 0
def on_epoch_end(self, epoch, logs=None):
self.max = len(self.model.layers)
print("Ending Epoch")
if logs['val_accuracy'] > self.accuracy:
self.accuracy = logs['val_accuracy']
self.accuracyCount = 0
else:
self.accuracyCount+=1
if self.accuracyCount >= 10 * (len(self.epochCount)+1):
self.epochCount.append(epoch)
print('Adding train layers')
self.accuracyCount = 0
self.trainableCount += 10
if self.trainableCount >= self.max:
self.trainableCount = self.max
for layer in self.model.layers:
layer.trainable = False
for layer in self.model.layers[-self.trainableCount:]:
layer.trainable = True
self.model.compile(keras.optimizers.Adam(lr=lr_rate), loss='categorical_crossentropy', metrics=['accuracy'])
print(self.epochCount)
if __name__ == "__main__":
timestr = time.strftime("%Y%m%d-%H%M%S")
model = MakeModel(1024)
# model = resnet()
# model = clearWeights(model)
model.compile(keras.optimizers.Adam(lr=lr_rate), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
TrainPath = 'D:/Autism-Data/Kaggle/v' + str(Version) + '/train'
ValidPath = 'D:/Autism-Data/Kaggle/v' + str(Version) + '/valid'
TestPath = 'D:/Autism-Data/Kaggle/v' + str(Version) + '/test'
TrainGen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocess_input_new,
horizontal_flip=True,
rotation_range=45,
width_shift_range=.01,
height_shift_range=.01).flow_from_directory(
TrainPath,
target_size=(Height, Width),
batch_size=BatchSize)
ValidGen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocess_input_new).flow_from_directory(
ValidPath,
target_size=(Height, Width),
batch_size=BatchSize,
shuffle=False)
TestGen = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocess_input_new).flow_from_directory(
TestPath,
target_size=(Height, Width),
batch_size=BatchSize,
shuffle=False)
os.makedirs("models/h5/" + str(timestr), exist_ok=True)
filepath = "models/h5/" + str(timestr) + "/" + "weights-improvement-{epoch:02d}-{val_accuracy:.4f}.hdf5"
SaveModelImage(model, "models/h5/" + str(timestr) + "/" + "Graph.png")
copyfile('face.py', "models/h5/" + str(timestr) + "/face.py")
checkpoint = keras.callbacks.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
reduce_lr = keras.callbacks.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.9, patience=5, min_lr=0.00001)
ModelCallbacks = keras.callbacks.callbacks.LambdaCallback(
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None)
first = 5
if not load_model:
# data = model.fit_generator(
# generator = TrainGen,
# validation_data= ValidGen,
# epochs=first,
# callbacks=[ModelCallbacks, reduce_lr, checkpoint],
# verbose=1)
models = [model]
args = [{'generator':TrainGen,
'validation_data':TestGen,
'epochs':first,
'callbacks':[ModelCallbacks, reduce_lr, EarlyStoppingAtMinLoss()],
'verbose':1}]
ml = MLEXPS()
ml.setTopic('Autism')
ml.setCopyFileList(['face.py'])
ml.setModels(models)
ml.setArgList(args)
ml.generator = True
ml.saveBestOnly = False
ml.startExprQ()
else:
model = load_model(model_path)
Y_pred = model.predict_generator(TestGen)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(TestGen.classes, y_pred))
print('Classification Report')
target_names = ['Autistic', 'Non_Autistic']
print(classification_report(TestGen.classes, y_pred, target_names=target_names))
| [
"Bobar312@gmail.com"
] | Bobar312@gmail.com |
27cd1801d257361237f2eacb2dbcb8e287f6685b | 3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff | /tests/unit/modules/network/fortios/test_fortios_log_eventfilter.py | 32a1e9c532163bad832b3009b0d154dc776ce8a7 | [] | no_license | ansible-collection-migration/ansible.fortios | f7b1a7a0d4b69c832403bee9eb00d99f3be65e74 | edad6448f7ff4da05a6c856b0e7e3becd0460f31 | refs/heads/master | 2020-12-18T13:08:46.739473 | 2020-02-03T22:10:49 | 2020-02-03T22:10:49 | 235,393,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,342 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.ansible.fortios.plugins.modules import fortios_log_eventfilter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.modules.fortios_log_eventfilter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_eventfilter_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_eventfilter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_eventfilter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_eventfilter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'random_attribute_not_valid': 'tag',
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
aef286b0f3fd7f3e265e4d48a993b148f77f3f96 | ac89c1feea035c5e04c36036f692f059b78f9ce2 | /src/device_abstract.py | b0b1cf5497509e0f6ad565cb31626cfc9c4cd21e | [
"Apache-2.0"
] | permissive | urpylka/filesync | 3b4a64ad73138d2fdb0b51f394e7310edefafa2b | 9167148f27fefdfe56a7b1e2d84479cec56885c5 | refs/heads/master | 2021-07-11T14:58:53.987749 | 2021-06-29T06:57:11 | 2021-06-29T06:57:11 | 156,345,341 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set ts=4 sw=4 et:
# Copyright 2018-2019 Artem Smirnov
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from threading import Event
class device_abstract(object):
"""
1. Как описать общий механизм работы функции,
если разные ее исполенения возвращают разный результат?
Может возвращать ключ-значение? И это ключ-значение писать в DB?
Или функция должна возвращать True/False?
2. А еще наверное нужно передавать какие-то блокировки в downloader/uploader
Если передавать большой файл может случится так, что нужно будет притормозить передачу
Например, Arming коптера
3. Может сделать фукнции donwload и upload с использованием потоков,
таким образом подставив file.open() можно будет писать в файл,
а если их направить друг на друга они будут писать без сохранения в local
4. Library for continue a interrupting downloads
5. Загрузка чанками requests
https://stackoverflow.com/questions/13909900/progress-of-python-requests-post
6. Надо добавить в вывод "Not implemented method"
наименование класса в котором это все вызывается
"""
@staticmethod
def to_string(dic): return "device_abstract://"
@staticmethod
def get_fields():
return []
def __init__(self, **kwargs):
self.kwargs = kwargs
self.is_remote_available = Event()
t = Thread(target=self._connect, args=())
t.daemon = True
t.start()
def _connect(self):
raise NotImplementedError("Not implemented method '_connect()'")
def download(self, device_path, target_stream, chunk_size=1024, offset=0):
"""
Пока не знаю
1. Функция исполняется в вызывающем потоке
2. Если что-то пошло не так, выбрасывать исключение
3. Если функция возвращает какие-то значения, их нужно передавать по ссылке через аргуемент
"""
raise NotImplementedError("Not implemented method 'download()'")
def upload(self, source_stream, device_path, chunk_size=1024, offset=0):
"""
Пока не знаю
1. Функция исполняется в вызывающем потоке
2. Если что-то пошло не так, выбрасывать исключение
3. Если функция возвращает какие-то значения, их нужно передавать по ссылке через аргуемент
"""
raise NotImplementedError("Not implemented method 'upload()'")
def get_list(self):
"""
1. Функция исполняется в вызывающем потоке
2. Функция должна возвращать список файлов (пустой список, если файлов нет)
или, если что-то пошло не так, выбрасывать исключение
3. Функция должна возвращать список словарей {"path":"", "size":"", "hash":""}
"""
raise NotImplementedError("Not implemented method 'get_list()'")
def ls(self, path):
"""
1. Функция исполняется в вызывающем потоке
2. Функция должна возвращать список файлов (пустой список, если файлов нет)
или, если что-то пошло не так, выбрасывать исключение
3. Функция должна возвращать список путей
"""
raise NotImplementedError("Not implemented method 'ls()'")
def is_dir(self, path):
"""
"""
raise NotImplementedError("Not implemented method 'is_dir()'")
def mkdir(self, path):
"""
"""
raise NotImplementedError("Not implemented method 'mkdir()'")
def get_size(self, path):
"""
"""
raise NotImplementedError("Not implemented method 'get_size()'")
def rename(self, old_path, new_path):
"""
Вообще одно и тоже, что и move
1. Функция исполняется в вызывающем потоке
2. Если что-то пошло не так, выбрасывать исключение
3. Если функция возвращает какие-то значения, их нужно передавать по ссылке через аргуемент
"""
raise NotImplementedError("Not implemented method 'rename()'")
def delete(self, remote_path):
"""
1. Функция исполняется в вызывающем потоке
2. Если что-то пошло не так, выбрасывать исключение
3. Если функция возвращает какие-то значения, их нужно передавать по ссылке через аргуемент
"""
raise NotImplementedError("Not implemented method 'delete()'")
| [
"urpylka@gmail.com"
] | urpylka@gmail.com |
8946fdaa77811cae476e4d34aeade4fcaa751ef1 | e200ad8a6e1aec6a3d3a55ed6223587a38460e27 | /app.py | 01e89251b10057eab1aca954f56b213b0fac7163 | [] | no_license | user-cube/QRCode_Flask | 3d1c09a3ba999c95430681d811f8acc6d2cc9eb6 | 6d8761005b3f0cf478f2ddbb383f2dafea0a92e8 | refs/heads/master | 2020-08-28T07:10:26.408452 | 2019-11-18T11:01:23 | 2019-11-18T11:01:23 | 217,631,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from flask import Flask, render_template, request, send_file
from flask_qrcode import QRcode
import home
app = Flask(__name__)
qrcode = QRcode(app)
paths = home.Home.home(app)
@app.route("/")
@app.route('/home')
def index():
return render_template("home.html", paths=paths)
@app.route('/qrcode')
def qrcodeGenerator():
return render_template("qrcode.html")
@app.route('/qrcodeLogo')
def qrcodeLogo():
return render_template("qrcodeLogo.html")
@app.route("/genqrcode", methods=["GET"])
def get_qrcode():
data = request.args.get("data", "")
return send_file(qrcode(data, mode="raw", box_size=100, error_correction='H'), mimetype="image/png")
@app.route("/genqrcodeLogo", methods=["GET"])
def get_qrcodeImage():
data = request.args.get("data", "")
image = request.args.get("image", "")
return send_file(qrcode(data, mode="raw", box_size=100, error_correction='H', icon_img=image), mimetype="image/png")
if __name__ == "__main__":
app.run() | [
"ruicoelho@ua.pt"
] | ruicoelho@ua.pt |
1c4a53ccb57b78886be553a6979ec66c482364a8 | 3805d27bbd55a40594d8d5aebe9aa92f299e5a7f | /data_builder/mhdparser.py | d18b7f1b546ef52f3d1271c5292d11f852136cd9 | [] | no_license | novageno/lung | 7398b564d266f9401a43daac86e2ddc8ea497170 | 4ac74f842f07e1a411644e3a8a6d57aee9e0af4e | refs/heads/master | 2021-01-16T19:23:43.769888 | 2017-08-13T06:04:30 | 2017-08-13T06:04:30 | 100,157,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 12 18:22:44 2017
@author: genonova
"""
import simpleITK as sitk
class MhdParser():
def parse_mhd(self,mhd_dir):
| [
"lilhope@163.com"
] | lilhope@163.com |
50988401345a82502f2e380001be76f2af3666d2 | 25642cf53a941d9abac3f9b492ab76165e7ad4e9 | /event-notification-server/sunBotApi/migrations/0006_event_closed.py | 2e6fe9d95735a50ff509645afc96c2ab79d1117d | [] | no_license | Amwap/event-notification-bot | 112f3be678d0e373a11b46b3b2342d92fa01bc75 | 8ea3f64cde606a161739f9bc0d1ece477f9a1cf5 | refs/heads/master | 2023-06-10T02:59:03.744669 | 2021-06-25T20:56:36 | 2021-06-25T20:56:36 | 380,349,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.2.2 on 2021-05-14 23:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sunBotApi', '0005_auto_20210515_0101'),
]
operations = [
migrations.AddField(
model_name='event',
name='closed',
field=models.BooleanField(default=False, verbose_name='Закрыть приём заявок'),
),
]
| [
"Amwap"
] | Amwap |
7acaed873ed4e401b1f0ffb7abe466b74f9b4a75 | be26a7a4b2f6097dc930a3aa696bbdd173512485 | /sliding-puzzle/calvins_solution/sliding_puzzle_a_star.py | c95f6a5bace623cdd413f0a323e40be01909cf2c | [] | no_license | jchen8787/inting | c68361e941e01e2fdff8a6a472553cafe7baae04 | 0ae5caff8e1d33ad22d35ee95c7604066a34acf7 | refs/heads/master | 2020-05-26T17:52:33.557631 | 2019-09-10T20:36:08 | 2019-09-10T20:36:08 | 188,326,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | from copy import copy
from pdb import set_trace
class GameState(object):
def __init__(self, board, move_history):
self.board = board
self.move_history = move_history
self._zero_tile_position = None
@property
def zero_tile_position(self):
if self._zero_tile_position:
return self._zero_tile_position
for i in range(0, len(self.board)):
if self.board[i] == 0:
self._zero_tile_position = i
return self._zero_tile_position
@property
def available_moves(self):
moves = dict()
pos = self.zero_tile_position
row, col = pos / 3, pos % 3
if row > 0:
moves['U'] = 3 * (row - 1) + col
if row < 2:
moves['D'] = 3 * (row + 1) + col
if col > 0:
moves['L'] = 3 * row + (col - 1)
if col < 2:
moves['R'] = 3 * row + (col + 1)
return moves
@property
def cost(self):
"""The total cost is a sum of the g and h cost, a.k.a the f function
"""
return self.g_cost + self.h_cost
@property
def g_cost(self):
"""The distance away from the source node, a.k.a the root state, which is best represented by the number of steps
away from the root state.
"""
return len(self.move_history)
@property
def h_cost(self):
"""Manhattan distance
This heuristc function is not the best for sliding puzzle but it is simplest of all. The best heuristic function is
Nilsson's sequencing score function.
"""
final_state = [1, 2, 3, 4, 5, 6, 7, 8, 0]
score = 0
for idx, el in enumerate(self.board):
score += abs(idx - final_state.index(el))
return score
@property
def is_solution(self):
final_state = [1, 2, 3, 4, 5, 6, 7, 8, 0]
for i in range(0, len(self.board)):
if final_state[i] != self.board[i]:
return False
return True
def __str__(self):
return "".join(map(lambda el: str(el), self.board))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def find_min_in_queue(queue):
"""For the lack of a PriorityQueueMap, I am doing an O(n) operation to find the state with the lowest cost
"""
min_state = None
for key in queue:
if min_state is None:
min_state = queue[key]
if min_state.cost > queue[key].cost:
min_state = queue[key]
return min_state
def a_star(board):
closed_states = dict()
# Initialize the Queue Map
open_states = dict()
# Initialize root state and put it into the open states
root_state = GameState(board, "")
open_states[root_state] = root_state
while len(open_states) > 0:
current_state = find_min_in_queue(open_states)
del open_states[current_state]
if current_state.is_solution:
return current_state
closed_states[current_state] = current_state
current_pos = current_state.zero_tile_position
for dir in current_state.available_moves:
new_board = copy(current_state.board)
dest = current_state.available_moves[dir]
new_board[current_pos], new_board[dest] = new_board[dest], new_board[current_pos]
new_move_history = current_state.move_history + dir
new_state = GameState(new_board, new_move_history)
if closed_states.get(new_state):
continue
extended_path_cost = new_state.cost
if open_states.get(new_state) and open_states.get(new_state).cost <= extended_path_cost:
continue
open_states[new_state] = new_state
return None
state = a_star([6, 2, 8, 7, 3, 5, 1, 4, 0])
print state
print state.move_history
print len(state.move_history)
| [
"jusjsc@gmail.com"
] | jusjsc@gmail.com |
b7b8ce02d0aba506b2683b3c8862f61ba4fd4293 | 9095c1a0da8c6ffe914ee6dd9c4708062fd95c9a | /vtpl_api/models/source_type.py | 99b3143d277011d407f04a5955fab602b32550ca | [
"MIT"
] | permissive | vtpl1/vtpl_api_py | 2e5338bd08677f12fc7304fb6ac7a32f32af1c93 | d289c92254deb040de925205c583de69802a1c6b | refs/heads/master | 2020-09-10T23:34:21.828350 | 2019-11-15T07:26:53 | 2019-11-15T07:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class SourceType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""SourceType - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SourceType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"monotosh.das@videonetics.com"
] | monotosh.das@videonetics.com |
29cd5aa3c4e1875cf4d2d691c2218d861a2d333c | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_meshes.py | 1ea42ad249869c9afd8713ee9ab0cb63fbd9752a | [] | no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 1,015 | py | import string
from header_common import *
from module_info import *
from module_meshes import *
from process_common import *
from process__swyhelper import *
def save_meshes():
ofile = open(export_dir + "meshes.txt","w")
ofile.write("%d\n"%len(meshes))
for i_mesh in xrange(len(meshes)):
mesh = meshes[i_mesh]
ofile.write("mesh_%s %d %s %s %s %s %s %s %s %s %s %s\n"%(mesh[0],mesh[1],replace_spaces(mesh[2]),swytrailzro(mesh[3]),swytrailzro(mesh[4]),swytrailzro(mesh[5]),swytrailzro(mesh[6]),swytrailzro(mesh[7]),swytrailzro(mesh[8]),swytrailzro(mesh[9]),swytrailzro(mesh[10]),swytrailzro(mesh[11])))
ofile.close()
def save_python_header():
if (wb_compile_switch):
ofile = open("./IDs/ID_meshes_wb.py","w")
else:
ofile = open("./IDs/ID_meshes_mb.py","w")
for i_mesh in xrange(len(meshes)):
ofile.write("mesh_%s = %d\n"%(meshes[i_mesh][0],i_mesh))
ofile.write("\n\n")
ofile.close()
print "Exporting meshes..."
save_python_header()
save_meshes()
| [
"swyterzone@gmail.com"
] | swyterzone@gmail.com |
88f88a537c87284e71ef254d24a05d22fc3a9233 | 6a928130337dafece1a6158badd00d1d46571003 | /reportForm/wsgi.py | 28a489cea41932132be6da890e260ca78c6ee72b | [] | no_license | Yanl05/reportForm | bb5a36cff3fac3aca76b5bc50c92fe54282250a8 | 45a915b29102c1f49035df93217782ea563cdb9f | refs/heads/master | 2023-04-18T00:40:19.355040 | 2021-04-29T14:37:59 | 2021-04-29T14:37:59 | 362,485,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for untitled project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportForm.settings')
application = get_wsgi_application()
| [
"756593069@qq.com"
] | 756593069@qq.com |
d846750416962cc14802ebec76b38e76d668611c | b3eef5d20818cbb2462acaedac3544a703987a60 | /maze_generator/render.py | 64e4f7d2262508990b92d4380b483585956fe865 | [] | no_license | JohnnyDeuss/maze-generator | 5a36cf69e7d51c8e09a40f77d36fa13054d13e95 | ae7107d787e80206d96d2a92776f606ac3ce5210 | refs/heads/main | 2023-02-15T01:38:54.508688 | 2021-01-02T22:06:49 | 2021-01-02T22:08:36 | 326,081,260 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | from PIL import Image, ImageDraw
def render_maze(g, path_width=20, wall_width=2, border_width=4, output_file="maze.png"):
"""
Render a maze graph and output it to the given file.
"""
w, h = list(g.nodes)[-1]
w += 1
h += 1
img_w = w * path_width + (w - 1) * wall_width + 2 * border_width
img_h = h * path_width + (h - 1) * wall_width + 2 * border_width
img = Image.new("1", (img_w, img_h))
draw = ImageDraw.Draw(img)
# Draw entrance
draw.rectangle(
[0, border_width, border_width, border_width + path_width - 1], fill=1
)
# Draw exit
draw.rectangle(
[
img_w - border_width,
img_h - path_width - border_width,
img_w,
img_h - border_width - 1,
],
fill=1,
)
def draw_line(a, b):
# Ensure `a` is the upper left point.
if b[0] < a[0] or b[1] < a[1]:
a, b = b, a
x_a, y_a = a
x_b, y_b = b
x_a = border_width + x_a * (path_width + wall_width)
y_a = border_width + y_a * (path_width + wall_width)
x_b = border_width + x_b * (path_width + wall_width) + path_width - 1
y_b = border_width + y_b * (path_width + wall_width) + path_width - 1
draw.rectangle([x_a, y_a, x_b, y_b], fill=1, outline=1)
for e in g.edges:
draw_line(*e)
return img
| [
"johnnydeuss@gmail.com"
] | johnnydeuss@gmail.com |
6a7bd840b05232033b4479a414b2dba8cac470bb | d2fae2d0ff36fde8d8402bdac1de5b6760f050b7 | /app/tests/Test_passwordchecker.py | 031f23e09f40532aa833df7d554126e8cd5b2beb | [] | no_license | DennisMufasa/mongodb-flask_app | 8701d817d757a5144b9a98ba4293a948c537b6c5 | 53c3447850d16d630428a020fe28949ff84c4a03 | refs/heads/master | 2022-12-09T11:31:59.085865 | 2020-08-31T02:32:57 | 2020-08-31T02:32:57 | 260,714,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # third-party import
import unittest
# local import
from ..api.v1.models.utils import password_checker
class Test_Password_checker(unittest.TestCase):
def test_password_len(self):
password_check1 = password_checker('boo')
password_check2 = password_checker('lysergicaciddyethylammide')
self.assertEqual(password_check1, 'password too short')
self.assertEqual(password_check2, 'password too long')
if __name__ == "__main__":
unittest.main()
| [
"denny.muasa@gmail.com"
] | denny.muasa@gmail.com |
4c4b3601e4073098253b9a1d67c3a5e373a569bd | 546187161f8f0e234b4bf4e749bab477c32a352a | /arranging coins.py | fb508cafd1082fe4ee8b9ecf48efdc16b0ebb64d | [] | no_license | lzj322/leetcode | 0fd71389c784715ce8adf975873a7e40d348b837 | 30ccbf3ae12d895f34c78e63c149c309468ab61b | refs/heads/master | 2021-08-12T06:33:58.422303 | 2017-11-14T14:13:05 | 2017-11-14T14:13:05 | 110,697,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | 'LeetCode 441 arranging coins'
import math
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int(((8*n+1)**0.5-1)/2)
if __name__ == '__main__':
s=Solution()
l=[0,1,2,3,4,5,6,7,8,9]
ans=list(map(s.arrangeCoins,l))
print (ans)
| [
"lzj910322@gmail.com"
] | lzj910322@gmail.com |
624821fbcf83feda5993b98a3444652ea2c64d7a | 30d5f8094a42696be671917e8d1918363ccba963 | /vd_design.py | 7b255d16da74bad6bebd9e30f392359978bef17c | [] | no_license | ortariot/VMC-desktop | e8ffa6ba75c19bc5a785ed77da2bab6372a0878c | 4df6b2a69f74cebf1c5b2cd722584fb1483a2b48 | refs/heads/master | 2023-05-31T15:56:56.304464 | 2021-06-11T12:04:20 | 2021-06-11T12:04:20 | 375,114,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,886 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'valve_design_desktop.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(573, 475)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy(
).hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy(
).hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.vlv1_frame = QtWidgets.QFrame(self.centralwidget)
self.vlv1_frame.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vlv1_frame.sizePolicy(
).hasHeightForWidth())
self.vlv1_frame.setSizePolicy(sizePolicy)
self.vlv1_frame.setFrameShape(QtWidgets.QFrame.Panel)
self.vlv1_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.vlv1_frame.setObjectName("vlv1_frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.vlv1_frame)
self.verticalLayout.setObjectName("verticalLayout")
self.vlv1_button = QtWidgets.QPushButton(self.vlv1_frame)
self.vlv1_button.setObjectName("vlv1_button")
self.verticalLayout.addWidget(self.vlv1_button)
self.vlv1_lcdNumber = QtWidgets.QLCDNumber(self.vlv1_frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vlv1_lcdNumber.sizePolicy(
).hasHeightForWidth())
self.vlv1_lcdNumber.setSizePolicy(sizePolicy)
self.vlv1_lcdNumber.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(25)
font.setBold(False)
self.vlv1_lcdNumber.setFont(font)
self.vlv1_lcdNumber.setObjectName("vlv1_lcdNumber")
self.verticalLayout.addWidget(self.vlv1_lcdNumber)
spacerItem = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout.addItem(spacerItem)
self.widget = QtWidgets.QWidget(self.vlv1_frame)
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.vlv1_verticalSlider = QtWidgets.QSlider(self.widget)
self.vlv1_verticalSlider.setEnabled(True)
self.vlv1_verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.vlv1_verticalSlider.setObjectName("vlv1_verticalSlider")
self.vlv1_verticalSlider.setEnabled(False)
self.horizontalLayout.addWidget(self.vlv1_verticalSlider)
self.verticalLayout.addWidget(self.widget)
spacerItem1 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout.addItem(spacerItem1)
self.horizontalLayout_5.addWidget(self.vlv1_frame)
self.vlv2_frame = QtWidgets.QFrame(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vlv2_frame.sizePolicy(
).hasHeightForWidth())
self.vlv2_frame.setSizePolicy(sizePolicy)
self.vlv2_frame.setFrameShape(QtWidgets.QFrame.Panel)
self.vlv2_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.vlv2_frame.setObjectName("vlv2_frame")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.vlv2_frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.vlv2_button = QtWidgets.QPushButton(self.vlv2_frame)
self.vlv2_button.setObjectName("vlv2_button")
self.verticalLayout_2.addWidget(self.vlv2_button)
self.vlv2_lcdNumber = QtWidgets.QLCDNumber(self.vlv2_frame)
self.vlv2_lcdNumber.setMinimumSize(QtCore.QSize(0, 30))
self.vlv2_lcdNumber.setObjectName("vlv2_lcdNumber")
self.verticalLayout_2.addWidget(self.vlv2_lcdNumber)
spacerItem2 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_2.addItem(spacerItem2)
self.widget1 = QtWidgets.QWidget(self.vlv2_frame)
self.widget1.setObjectName("widget1")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.vlv2_verticalSlider = QtWidgets.QSlider(self.widget1)
self.vlv2_verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.vlv2_verticalSlider.setObjectName("vlv2_verticalSlider")
self.vlv2_verticalSlider.setEnabled(False)
self.horizontalLayout_2.addWidget(self.vlv2_verticalSlider)
self.verticalLayout_2.addWidget(self.widget1)
spacerItem3 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_2.addItem(spacerItem3)
self.horizontalLayout_5.addWidget(self.vlv2_frame)
self.vlv3_frame = QtWidgets.QFrame(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vlv3_frame.sizePolicy(
).hasHeightForWidth())
self.vlv3_frame.setSizePolicy(sizePolicy)
self.vlv3_frame.setFrameShape(QtWidgets.QFrame.Panel)
self.vlv3_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.vlv3_frame.setObjectName("vlv3_frame")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.vlv3_frame)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.vlv3_button = QtWidgets.QPushButton(self.vlv3_frame)
self.vlv3_button.setObjectName("vlv3_button")
self.verticalLayout_3.addWidget(self.vlv3_button)
self.vlv3_lcdNumber = QtWidgets.QLCDNumber(self.vlv3_frame)
self.vlv3_lcdNumber.setMinimumSize(QtCore.QSize(0, 30))
self.vlv3_lcdNumber.setObjectName("vlv3_lcdNumber")
self.verticalLayout_3.addWidget(self.vlv3_lcdNumber)
spacerItem4 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_3.addItem(spacerItem4)
self.widget2 = QtWidgets.QWidget(self.vlv3_frame)
self.widget2.setObjectName("widget2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.vlv3_verticalSlider = QtWidgets.QSlider(self.widget2)
self.vlv3_verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.vlv3_verticalSlider.setObjectName("vlv3_verticalSlider")
self.vlv3_verticalSlider.setEnabled(False)
self.horizontalLayout_3.addWidget(self.vlv3_verticalSlider)
self.verticalLayout_3.addWidget(self.widget2)
spacerItem5 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_3.addItem(spacerItem5)
self.horizontalLayout_5.addWidget(self.vlv3_frame)
self.vlv4_frame = QtWidgets.QFrame(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.vlv4_frame.sizePolicy(
).hasHeightForWidth())
self.vlv4_frame.setSizePolicy(sizePolicy)
self.vlv4_frame.setFrameShape(QtWidgets.QFrame.Panel)
self.vlv4_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.vlv4_frame.setObjectName("vlv4_frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.vlv4_frame)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.vlv4_button = QtWidgets.QPushButton(self.vlv4_frame)
self.vlv4_button.setObjectName("vlv4_button")
self.verticalLayout_4.addWidget(self.vlv4_button)
self.vlv4_lcdNumber = QtWidgets.QLCDNumber(self.vlv4_frame)
self.vlv4_lcdNumber.setMinimumSize(QtCore.QSize(0, 30))
self.vlv4_lcdNumber.setObjectName("vlv4_lcdNumber")
self.verticalLayout_4.addWidget(self.vlv4_lcdNumber)
spacerItem6 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem6)
self.widget3 = QtWidgets.QWidget(self.vlv4_frame)
self.widget3.setObjectName("widget3")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.widget3)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.vlv4_verticalSlider = QtWidgets.QSlider(self.widget3)
self.vlv4_verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.vlv4_verticalSlider.setObjectName("vlv4_verticalSlider")
self.vlv4_verticalSlider.setEnabled(False)
self.horizontalLayout_4.addWidget(self.vlv4_verticalSlider)
self.verticalLayout_4.addWidget(self.widget3)
spacerItem7 = QtWidgets.QSpacerItem(72, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_4.addItem(spacerItem7)
self.horizontalLayout_5.addWidget(self.vlv4_frame)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow",
"Valve monster comander desktop"))
self.vlv1_button.setText(_translate("MainWindow", "VALVE1 ON"))
self.vlv2_button.setText(_translate("MainWindow", "VALVE2 ON"))
self.vlv3_button.setText(_translate("MainWindow", "VALVE3 ON"))
self.vlv4_button.setText(_translate("MainWindow", "VALVE4 ON"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"v.n.homutov@gmail.com"
] | v.n.homutov@gmail.com |
397f63bed9a3a36120ebb0a445e6a6d487b5a736 | 670df3389c2dcad92e10f350dd40490eb6656f89 | /src/networks/__init__.py | b7310380cf242e537dc0b4340da8ee7096d8bef3 | [] | no_license | MoeinSorkhei/Thorax-Disease-Classification | 8ed5494ef5db1ee86bf4eabf4ffdcf24eb80cf27 | ef1f5f0289d62e132372974352bf974bf8b70e4c | refs/heads/master | 2022-07-05T07:48:06.877423 | 2020-05-16T12:48:57 | 2020-05-16T12:48:57 | 261,700,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from .pre_trained import *
from .trans_pool_pred import *
from .init_models import *
| [
"m.moein.sorkhei@gmail.com"
] | m.moein.sorkhei@gmail.com |
e52b144b5a63429930080239d02e6ed875812900 | c70683220a370751606b15876491cee31342b533 | /calculadora/test/test_calculadora.py | 8ec52d042a283d250d04bdc222014a0a6892d00a | [] | no_license | AbnerAbreu/TDD | 1c1a6345d200ed85c17d7a95db4023dfa7d78986 | fb705c78e446075917d1b7167c917becdc91f349 | refs/heads/master | 2020-08-08T18:56:55.685881 | 2019-10-01T20:46:56 | 2019-10-01T20:46:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.test import TestCase
from calculadora import views
class CalculadoraTests(TestCase):
def test_soma(self):
self.assertEqual(views.soma(2, 2), 4)
| [
"vferreira@mastertech.tech"
] | vferreira@mastertech.tech |
e51a73b1b803cf62864be83be78a2bba41b8c063 | 5101bfc97d22e8e4465ba9f86779a9b64a469d63 | /U-Net_for_Retinal Vessel Segmentation/train.py | 866900ce43cd92686fa0df6ec9c4d82354d15773 | [] | no_license | WangMeow1998/Medical-Image-Segmentation | 44214aedbefc42490a402e37491350531603a6a7 | 719220dc2471e1de42f8d1823295c3ef2cd35ce4 | refs/heads/master | 2023-02-02T10:29:21.520466 | 2020-12-24T03:51:06 | 2020-12-24T03:51:06 | 315,533,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | from unet_model import UNet
from utils.DALOADER import DealDataset
from torch import optim
import torch.nn as nn
import torch
from tqdm import tqdm
import sys
class Logger():
def __init__(self, filename="log.txt"):
self.terminal = sys.stdout
self.log = open(filename, "w")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class SoftDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True):
super(SoftDiceLoss, self).__init__()
def forward(self, logits, targets):
num = targets.size(0)
smooth = 1
probs = torch.sigmoid(logits)
m1 = probs.view(num, -1)
m2 = targets.view(num, -1)
intersection = (m1 * m2)
score = 2. * (intersection.sum(1) + smooth) / (m1.sum(1) + m2.sum(1) + smooth)
score = 1 - score.sum() / num
return score
def dice_coeff(pred, target):
smooth = 1.
num = pred.size(0)
m1 = pred.view(num, -1) # Flatten
m2 = target.view(num, -1) # Flatten
intersection = (m1 * m2).sum()
return (2. * intersection + smooth) / (m1.sum() + m2.sum() + smooth)
def train_net(net, device, data_path, epochs=300, batch_size=2, lr=0.0001):
# 加载训练集
name_dataset = DealDataset(data_path)
train_loader = torch.utils.data.DataLoader(dataset=name_dataset, batch_size=batch_size, shuffle=True) #shuffle 填True 就会打乱
#定义算法
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=1e-8)
# optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.3, last_epoch=-1)
#定义Loss
loss1 = nn.BCEWithLogitsLoss()
loss2 = SoftDiceLoss()
# best_loss统计,初始化为正无穷
# best_loss = float('inf')
# best_dice = -best_loss
sys.stdout = Logger()
for epoch in tqdm(range(epochs)):
# 训练模式
net.train()
for batch in train_loader:
optimizer.zero_grad()
image = batch['image']
label = batch['label']
image = image.to(device=device, dtype=torch.float32)
label = label.to(device=device, dtype=torch.float32)
pred = net(image)
# 计算loss
c1 = loss1(pred, label)
c2 = loss2(pred, label)
loss = c1+c2
# loss = c2
pred = torch.sigmoid(pred)
pred = (pred>0.5).float()
print(pred.sum())
dice = dice_coeff(pred, label)
print('Dice/train', dice.item(),'\t','Loss/train', loss.item())
# 保存loss值最小的网络参数
# if dice > best_dice:
# best_dice = dice
# torch.save(net.state_dict(), 'best_model2021.pth')
# 更新参数
loss.backward()
optimizer.step()
torch.save(net.state_dict(), f'CP_epoch{epoch + 1}.pth')
# scheduler.step()
if __name__ == "__main__":
# 选择设备,有cuda用cuda,没有就用cpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 加载网络,图片单通道1,分类为1。
net = UNet(n_channels=3, n_classes=1)
# 将网络拷贝到deivce中
net.to(device=device)
# 指定训练集地址,开始训练
# data_path = "../input/my-eye/data/CHASE/train"
data_path = "./data/CHASE/train"
train_net(net, device, data_path)
| [
"wangmeow1998@163.com"
] | wangmeow1998@163.com |
374f86075d5c187fad6bfde503fbdb0362a57e76 | 4985143dce9379c939d562d277350f0d8224f06a | /venv/bin/django-admin.py | e788bb50f4c0599e5161f6209905f79392df6d1e | [] | no_license | jkinathan/Task_todo | a74ae010dc703ba0ed4654a569b57a5ce7634857 | e19da9ab9dede272b6c148b686e6e77e3da1687a | refs/heads/master | 2023-03-23T13:51:41.816050 | 2021-03-20T09:32:32 | 2021-03-20T09:32:32 | 274,080,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #!/home/jo-kinathany/Desktop/Task_todo/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"jkinobe@gmail.com"
] | jkinobe@gmail.com |
faa2e47e01b26f98eb24501a23c59d2dd2f3081a | 70bc77336e4544031ad7d7d29a2e964ef2626076 | /base/models.py | bf4ba34fec4fc78262b81397124b4041d26e64fd | [] | no_license | DronMDF/vanadis | 9af7a8c9281bf0eb17df593f5c9fc9345e474612 | de692207bbd127c5a9952e3144653492a0ba969f | refs/heads/master | 2020-04-17T08:11:18.411429 | 2016-12-21T20:50:05 | 2016-12-21T20:50:05 | 66,539,179 | 1 | 0 | null | 2016-12-21T20:50:06 | 2016-08-25T08:20:03 | Python | UTF-8 | Python | false | false | 654 | py | from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100, db_index=True)
repo_url = models.CharField(max_length=256, null=True)
class Object(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
oid = models.BigIntegerField(db_index=True)
issues_count = models.IntegerField()
class Issue(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
object = models.ForeignKey(Object, on_delete=models.CASCADE, db_index=True)
line = models.IntegerField()
position = models.IntegerField()
text = models.CharField(max_length=256)
| [
"dron.valyaev@gmail.com"
] | dron.valyaev@gmail.com |
056dc9af3862ebbbd133e049bcacd156c6262d0c | 18d53974bd1eeaa22fc93715041b11903f73a434 | /SEIRMU/new/OutofSample/draw_picture/paint_line.py | 1ffef5d7f8fcad2403c7a6d5241c4b397f23155a | [
"MIT"
] | permissive | KL-ice/SEIR-AIM | b363451ea1f7d1c099ad2fe29fbb4657ce864b1a | c12a67186a6d8a59deb56b7a29ce86f170fc9d0c | refs/heads/main | 2023-07-18T23:22:53.107921 | 2021-09-30T04:55:54 | 2021-09-30T04:55:54 | 411,366,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,652 | py | import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from matplotlib.font_manager import *
from pylab import *
from matplotlib.pyplot import MultipleLocator
from matplotlib.ticker import AutoMinorLocator
from read_data import *
from config import *
import matplotlib.font_manager
def paint_confirm(pred_data, truth_data):
lenth_pred = len(pred_data)
lenth_truth = len(truth_data)
x_pred = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
x_truth = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_truth]
y_truth = truth_data
#调整图片大小
plt.figure(figsize=(8,6))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
tick_params(which='both',top='on',bottom='on',left='on',right='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(6000000,130000000)
ax.set_ylim(6000000,60000000)
# 设置网格线
ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
plt.plot(x_pred,y_pred,label="Prediction Confirmed Cases",color="#D95319",linewidth=3)
plt.plot(x_truth,y_truth,label="Confirmed Cases by Database",color="#0072BD",linewidth=3)
plt.tick_params(labelsize=15)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=18)
plt.ylabel("Confirmed Cases",fontsize=18)
# plt.title("Confirmed Cases",fontsize=20)
# plt.gcf().autofmt_xdate()
# ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend(loc='center left', bbox_to_anchor=(0.03, 0.78), ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.2,
fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.savefig('./result/confirm.jpg', dpi=1200, pad_inches=0.0)
def paint_unemployment(pred_data, truth_data):
lenth_pred = len(pred_data)
lenth_truth = len(truth_data)
x_pred = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
x_truth = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_truth]
y_truth = truth_data
#调整图片大小
plt.figure(figsize=(8,6))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
tick_params(which='both',top='on',bottom='on',left='on',right='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
# 设置网格线
ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
plt.plot(x_pred,y_pred,label="Prediction Unemployment Rate",color="#D95319",linewidth=3)
plt.plot(x_truth,y_truth,label="Unemployment Rate by Database",color="#0072BD",linewidth=3)
plt.tick_params(labelsize=15)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=18)
plt.ylabel("Unemployment Rate",fontsize=18)
plt.title("Unemployment Rate",fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend(loc='center right',bbox_to_anchor=(1, 0.7), ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.savefig('./result/unemployment.svg', dpi=1200, pad_inches=0.0)
def paint_inf(pred_data, truth_data):
lenth_pred = len(pred_data)
lenth_truth = len(truth_data)
x_pred = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
x_truth = [dt.datetime.strptime('2020-09-26', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_truth]
y_truth = truth_data
#调整图片大小
plt.figure(figsize=(8,6))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
tick_params(which='both',top='on',bottom='on',left='on',right='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
# 设置网格线
ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
plt.plot(x_pred,y_pred,label="Prediction Infection Rate",color="#D95319",linewidth=3)
plt.plot(x_truth,y_truth,label="Infection Rate by Database",color="#0072BD",linewidth=3)
plt.tick_params(labelsize=15)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=18)
plt.ylabel("Infection Rate",fontsize=18)
plt.title("Infection Rate",fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend(loc='center right',bbox_to_anchor=(1, 0.7), ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.subplots_adjust(left=0.15, top=None, wspace=None, hspace=None)
plt.savefig('./result/infection_rate.svg', dpi=1200, pad_inches=0.0)
def paint_Rt(pred_data, truth_data):
lenth_pred = len(pred_data)
lenth_truth = len(truth_data)
x_pred = [dt.datetime.strptime('2020-03-20', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
x_truth = [dt.datetime.strptime('2020-03-20', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_truth]
y_truth = truth_data
#调整图片大小
plt.figure(figsize=(8,6))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
tick_params(which='both',top='on',bottom='on',left='on',right='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
# 设置网格线
ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
plt.plot(x_pred,y_pred,label="Prediction Rt",color="#D95319",linewidth=3)
plt.plot(x_truth,y_truth,label="Rt by Database",color="#0072BD",linewidth=3)
plt.tick_params(labelsize=15)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=18)
plt.ylabel("Rt",fontsize=18)
plt.title("Reproduction Number", fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.legend(loc='center right',bbox_to_anchor=(0.98, 0.73),ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
def paint_blm_confirm(pred_data):
lenth_pred = len(pred_data)
x_pred = [dt.datetime.strptime('2020-06-27', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
#调整图片大小
plt.figure(figsize=(12,5.5))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
tick_params(which='both',bottom='on',left='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
# 设置网格线
# ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
# ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_color('none')
ax.spines['left'].set_linewidth(1)
# ax.spines['right'].set_linewidth(1)
ax.spines['right'].set_color('none')
plt.plot(x_pred,y_pred,label="prediction Rt",color="#D95319",linewidth=3)
plt.tick_params(labelsize=18)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=23, fontweight='bold')
plt.ylabel("Increased confirmed \ncases caused by BLM",fontsize=23, fontweight='bold')
# plt.title("Increased Cumulative Confirmed Cases Caused by BLM", fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
# plt.legend(loc='upper right',ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
# fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.savefig('./result/blm confirm.svg', dpi=1200, pad_inches=0.05, bbox_inches = 'tight')
def paint_blm_unemployment(pred_data):
lenth_pred = len(pred_data)
x_pred = [dt.datetime.strptime('2020-06-27', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
#调整图片大小
plt.figure(figsize=(12,5.5))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
tick_params(which='both',bottom='on',left='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
# 设置网格线
# ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
# ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_color('none')
ax.spines['left'].set_linewidth(1)
# ax.spines['right'].set_linewidth(1)
ax.spines['right'].set_color('none')
plt.plot(x_pred,y_pred,label="prediction Rt",color="#D95319",linewidth=3)
plt.tick_params(labelsize=18)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=23, fontweight='bold')
plt.ylabel("Increased unemployment \nrate caused by BLM",fontsize=23, fontweight='bold')
# plt.title("Increased Unemployment Rate Caused by BLM", fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
# plt.legend(loc='upper right',ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
# fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.savefig('./result/blm unemployment.svg', dpi=1200, pad_inches=0.05, bbox_inches = 'tight')
def paint_blm_index(pred_data):
lenth_pred = len(pred_data)
x_pred = [dt.datetime.strptime('2020-05-20', '%Y-%m-%d').date() + dt.timedelta(days = x) for x in range(1000)][:lenth_pred]
y_pred = pred_data
#调整图片大小
plt.figure(figsize=(12,5.5))
# 设定坐标轴刻度朝内
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
tick_params(which='both',bottom='on',left='on')
#设定字体
plt.rc('font',family='Times New Roman')
#设定标题字体
plt.rcParams['font.sans-serif'] = ['Times New Roman']
ax = plt.gca()
# ax.set_ylim(0,13000000)
ax.set_ylim(0,1)
# 设置网格线
# ax.grid(axis='x',which='major',color= 'gray',alpha = 0.4)
# ax.grid(axis='y',which='major',color= 'gray',alpha = 0.4)
# 设置坐标轴边框宽度
# ax.spines['bottom'].set_color('#0072BD')
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_color('none')
ax.spines['left'].set_linewidth(1)
# ax.spines['right'].set_linewidth(1)
ax.spines['right'].set_color('none')
plt.plot(x_pred,y_pred,label="prediction Rt",color="#0072BD",linewidth=3, clip_on=False, zorder=3)
plt.tick_params(labelsize=18)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.xlabel("Date",fontsize=23, fontweight='bold')
plt.ylabel("\nBLM Index",fontsize=23, fontweight='bold')
# plt.title("BLM Index", fontsize=20)
plt.gcf().autofmt_xdate()
ax.get_yaxis().get_major_formatter().set_scientific(False)
# plt.legend(loc='upper right',ncol=1,borderaxespad=0.3, edgecolor='gray',labelspacing=0.1,handletextpad=0.7,columnspacing=0.3,handlelength=1.7,borderpad=0.1,
# fancybox=True,frameon=True,fontsize=20,shadow=False, framealpha=0.5)
# plt.show()
plt.subplots_adjust(left=0.1, bottom=0.25, right=None, top=None)
plt.savefig('./result/blm index.svg', dpi=1200, pad_inches=0.05, bbox_inches = 'tight')
if __name__ == "__main__":
# paint confirm cases
# pred_confirm = read_pred_confirm(pred_confirm_path)
# truth_confirm = read_truth_confirm(truth_confirm_path)
# paint_confirm(pred_confirm, truth_confirm)
# paint unemployment rate
# pred_unemployment = read_pred_unemployment(pred_unemployment_path)
# truth_unemployment = read_truth_unemployment(truth_unemployment_path)
# paint_unemployment(pred_unemployment, truth_unemployment)
# paint unemployment rate
# pred_Rt = read_pred_Rt(pred_Rt_path)
# truth_Rt = read_truth_Rt(truth_Rt_path)
# paint_Rt(pred_Rt, truth_Rt)
# paint blm different confirm
pred_blm_confirm = read_blm_confirm(blm_confirm_path, noblm_pred_confirm_path)
print(pred_blm_confirm)
paint_blm_confirm(pred_blm_confirm)
# paint blm different unemployment
pred_blm_unemployment = read_blm_unemployment(blm_unemployment_path, noblm_pred_unemployment_path)
print(pred_blm_unemployment)
paint_blm_unemployment(pred_blm_unemployment)
blm_index = read_blm_inedx(blm_index_path)
paint_blm_index(blm_index)
# pred_inf = [0.00954041164368391, 0.010497291572391987, 0.011120161972939968, 0.011480739340186119, 0.011714714579284191, 0.011915579438209534, 0.01210733037441969, 0.012298648245632648, 0.012515188194811344, 0.012772245332598686, 0.013041410595178604, 0.013296606950461864, 0.013511604629456997, 0.013682755641639233, 0.013837314210832119, 0.013980587013065815, 0.01415819302201271, 0.014362465590238571, 0.014596445485949516, 0.014872822910547256, 0.015195727348327637, 0.015555099584162235, 0.015946270897984505, 0.016329947859048843, 0.016726627945899963, 0.017150476574897766, 0.017611948773264885, 0.01811842992901802, 0.0186744574457407, 0.019233373925089836, 0.019821060821413994, 0.02044534869492054, 0.021105162799358368, 0.0217897966504097, 0.022479863837361336, 0.023150412365794182, 0.02383909747004509, 0.024510089308023453, 0.025131208822131157, 0.025675132870674133, 0.026120556518435478, 0.026453059166669846, 0.026665957644581795, 0.026752926409244537, 0.026725631207227707, 0.026600774377584457, 0.02639753557741642, 0.02613612823188305, 0.025836963206529617, 0.02552018314599991, 0.025211604312062263, 0.024926593527197838, 0.0246753953397274, 0.024463104084134102, 0.02429005317389965, 0.02415289357304573, 0.024046216160058975, 0.02396765537559986, 0.023909378796815872, 0.02386578358709812, 0.023832842707633972, 0.02380770817399025, 0.023788345977663994, 0.023773277178406715, 0.023760177195072174, 0.02374868467450142, 0.023738589137792587, 0.023729639127850533, 0.02372164838016033, 0.023714477196335793, 0.023708004504442215, 0.023702135309576988, 0.023696795105934143, 0.023691847920417786, 0.023687273263931274, 0.023683026432991028, 0.023679085075855255, 0.02367541566491127, 0.023671990260481834, 0.023668792098760605, 0.023665815591812134, 0.02366304025053978, 0.0236604493111372, 0.02365802973508835, 0.023655762895941734, 0.023653637617826462, 0.023651644587516785, 0.023649776354432106, 0.02364801988005638, 0.023646365851163864, 0.02364480309188366, 0.023643335327506065, 0.02364194579422474, 0.02364063635468483, 0.02363939955830574, 0.023638226091861725, 0.023637115955352783, 0.023636065423488617, 0.023635070770978928, 0.02363412454724312, 0.02363322302699089, 0.023632371798157692, 0.023631559684872627, 0.023630788549780846, 0.02363005466759205, 0.023629354313015938, 0.023628689348697662, 0.023628052324056625, 0.023627446964383125, 0.023626862093806267, 0.023626312613487244, 0.02362578734755516, 0.023625284433364868, 0.02362479828298092, 0.02362433634698391, 0.02362389862537384, 0.023623475804924965, 0.023623069748282433, 0.023622682318091393, 0.023622315376996994, 0.023621955886483192, 0.023621613159775734, 0.023621289059519768, 0.023620974272489548, 0.023620672523975372, 0.02362038940191269, 0.023620113730430603, 0.023619843646883965, 0.02361958660185337, 0.023619340732693672, 0.02361910417675972, 0.023618875071406364, 0.023618659004569054, 0.02361844852566719, 0.023618245497345924, 0.023618051782250404, 0.02361786924302578, 0.023617688566446304, 0.023617513477802277, 0.023617349565029144, 0.02361719124019146, 0.023617036640644073, 0.023616887629032135, 0.023616742342710495, 0.0236166063696146, 0.023616474121809006]
# truth_inf = [0.010669372654577562, 0.010743240777903455, 0.011088392290809719, 0.011265271501965275, 0.011462000790084845, 0.011562583441632683, 0.01156611761364354, 0.011653536320369123, 0.012039351765481387, 0.012346229607414416, 0.012462875007794968, 0.012730140084263297, 0.012777295531378526, 0.012847473469267073, 0.013122772616106913, 0.01342940903331596, 0.013825499055621235, 0.014240617405671027, 0.014371340671943785, 0.014751034383418807, 0.014813940578132718, 0.01513148986078562, 0.01564035684584562, 0.015997450537550747, 0.016503159356639333, 0.016873487000925773, 0.01725682576936934, 0.017514821092231986, 0.01788690190946341, 0.01866813324533322, 0.019538106392953718, 0.020196194105407496, 0.02105420048583124, 0.02160278622977868, 0.022103806515040448, 0.02257332422559926, 0.02320140776025004, 0.02374017975137745, 0.024515457371323938, 0.0247350483605454, 0.025252417470816486, 0.02557000287228027, 0.025888138607007626, 0.026453451187721327, 0.027004958855033203, 0.027219782122427445, 0.027054776460013908, 0.026819453020947334, 0.026500476140760496, 0.026123419498499372, 0.025989503849476524, 0.025796745229456727, 0.02558611066731834, 0.025133127836314745, 0.024320086338151945, 0.023748778904985637, 0.0231924095516537]
# paint_inf(pred_inf, truth_inf)
| [
"wbkingice@sina.com"
] | wbkingice@sina.com |
4eacd86775bb8afb4f12c25a8a0907c0cc30960e | 0a674e831b4616d8013511b88c1097c29474042d | /tutorials/tf_modularity.py | 294fc17e29699ea9f545f741077b3c652f180045 | [] | no_license | ncullen93/tensorflow_portfolio | eac119cba6ffd69146326828dccfc635daff60c0 | b14145dc7d8cd2fe86e036033d2890548ae09456 | refs/heads/master | 2021-03-19T12:22:17.617012 | 2017-10-14T16:42:14 | 2017-10-14T16:42:14 | 86,193,597 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | """
Code for reusing common tensorflow functionality
"""
def relu(X, n_out=1):
with tf.name_scope('relu'):
w_shape = (int(X.get_shape()[1]),n_out)
w = tf.Variable(tf.random_normal(w_shape), name='weights')
b = tf.Variable(0.0, name='bias')
z = tf.add(tf.matmul(X,w),b, name='z')
return tf.nn.relu(z, name='relu')
n_features = 3
X = tf.placeholder(tf.float32, shape=(None,n_features), name='X')
relus = [relu(X) for i in range(5)]
relu_addition = tf.add_n(relus, name='output') | [
"ncullen@Nicks-MacBook-Pro.local"
] | ncullen@Nicks-MacBook-Pro.local |
8b09a98c3ac1acf69e5c84f6bbeeb54671c20bc6 | 11ce41733d6f31153fe14f800c9dd0be18615862 | /news/admin.py | 50285420a545e93e7a3d322e73e11bb5a4d627f4 | [
"MIT"
] | permissive | techacademypython/django_image_crop_views | 6ff6731944f5d09721452a71b0745089d1b035ef | 2f9c51ae80705dc23607e157baa4f5767957a2f1 | refs/heads/master | 2023-05-05T13:12:23.642970 | 2019-09-03T16:38:24 | 2019-09-03T16:38:24 | 206,105,932 | 0 | 0 | MIT | 2022-11-22T04:13:41 | 2019-09-03T15:07:05 | Python | UTF-8 | Python | false | false | 375 | py | from django.contrib import admin
from image_cropping import ImageCroppingMixin
# Register your models here.
from news.models import NewsModel
class NewsModelAdmin(ImageCroppingMixin, admin.ModelAdmin):
readonly_fields = ["preview_count"]
fields = [
"image", "name", "text", "cropping", "preview_count"
]
admin.site.register(NewsModel, NewsModelAdmin)
| [
"munisisazade@gmail.com"
] | munisisazade@gmail.com |
de2b6b74989a2467127597423d029e5b5810eb06 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/compute/azure-mgmt-vmwarecloudsimple/generated_samples/list_customization_policies.py | 7deb911d492ae31425baf1a5011cba20636db4d2 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,604 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.vmwarecloudsimple import VMwareCloudSimple
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-vmwarecloudsimple
# USAGE
python list_customization_policies.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = VMwareCloudSimple(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.customization_policies.list(
region_id="myResourceGroup",
pc_name="myPrivateCloud",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmwarecloudsimple/resource-manager/Microsoft.VMwareCloudSimple/stable/2019-04-01/examples/ListCustomizationPolicies.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
0eb1cfa23de5563d2ffe22de91ace92cb6fa8e27 | 9e98b6ce7b6bc8a521a07196a43ccf4b7f697f78 | /env/bin/easy_install | 2bba1c50de5452763acaaf86a978462ffde83587 | [] | no_license | gomeztagle-alan/lab2 | 53f25a20281b3a46cac164fbe5b16c217f5beb25 | 84e80e053160f6d9d818cc85f4274f848e62be6a | refs/heads/master | 2023-02-10T10:54:12.073327 | 2019-07-16T22:54:46 | 2019-07-16T22:54:46 | 194,698,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/home/arcu5/Desktop/e14a/week1/lab2/env/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gosanocubed@gmail.com"
] | gosanocubed@gmail.com | |
a2d97895a54972443c74c32f10d1ee5f07775db6 | 2de1aaa4d9bde03e21601bfeddf7e85863b0d54a | /django/jason_test/booktest/views.py | 1592057bc085ff5a7c9206432df946588d5f918e | [] | no_license | jasondzy/Python | d0a0ffad8aaac1f75d9e7c5ecd5cc9c33bd712ea | 9fe9cf23f7defa3581e7bcfe4cf8ec6a830b6cd7 | refs/heads/master | 2021-01-01T18:46:00.739387 | 2018-02-11T14:59:26 | 2018-02-11T14:59:26 | 98,430,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,706 | py | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.conf import settings
from .models import *
from django.core.paginator import Paginator
import json
# Create your views here.
def index(request):
return render(request,'booktest/index.html')
# return HttpResponse('ok')
def upload(request):
if request.method == 'POST':
picture = request.FILES['picture']
fname = '%s/cars/%s'%(settings.MEDIA_ROOT, picture.name)
# return HttpResponse(settings.MEDIA_ROOT)
with open(fname,'wb') as pic:
for c in picture.chunks():
pic.write(c)
return HttpResponse('ok')
else:
return HttpResponse('error')
def my_custom_page_not_found_view(request):
return HttpResponse('fail 404 fail')
def pages(request,id):
if id == '':
id = '1'
list = HeroInfo.objects.all()
paginator = Paginator(list,5)
page = paginator.page(int(id))
context = {'page':page}
return render(request,'booktest/pages.html',context)
def book_index(request):
return render(request,'booktest/book_index.html')
def ajax_get(request):
# print('hello')
book_list = BookInfo.objects.all()
# print(book_list)
# return JsonResponse({'data':book_list}) #这样返回数据会报错
l = []
for list in book_list:
l.append((list.id,list.btitle)) #这里必须要将获得的book_list进行遍历,取出元素放在一个数组中才行,否则会报错
return JsonResponse({'data':l}) #具体为什么这样的原因还有待解析
def get_bookinfo(request):
print('test')
id = request.GET['id'] #这里使用的是ajax的方式进行数据的传递
print(id)
list=HeroInfo.objects.filter(hBook_id=id)#这里的filter返回的是一个查询集,并不是一个对象,而是一个对象集合
print(list)
hero_list = []
for i in list:
hero_list.append((i.id,i.hname))
return JsonResponse({'data':hero_list})
#富文本编辑器
def editor(request):
data = BookInfo.objects.all()
list = []
for l in data:
list.append([l.id,l.btitle])
print(list)
context = {'data':list}
return render(request,'booktest/editor.html',context)
def editor_handle(request):
html = request.POST['hcontent'] #此处获取的是大文本提交的内容
id = request.POST['select'] #此处获取的是要提交的id号
print('id:',id)
print(html)
book = BookInfo.objects.get(pk=id) #注意filter方法返回的是一个查询集合,是一个集合,get返回的是一个对象
print(book.btitle)
book.bcontent = html
book.save()
return HttpResponse('ok') | [
"812724347@qq.com"
] | 812724347@qq.com |
2eebba53d96810f87e30cf377556f367d5ae17b1 | cc91403e4302d70127562591ab3fda7a212e6312 | /asqcenv/lib/python3.9/site-packages/asqc/asqc.py | ade74103aca22e8cf2e1c774024a023bed7f5a03 | [] | no_license | gklyne/asqc | fdfb59e243f92968e2147ef28ce6c85394d9cab0 | 1fcd4d80727b8385e8707be12d1e45fe26d2229a | refs/heads/master | 2022-07-28T09:05:07.923525 | 2022-07-14T19:44:52 | 2022-07-14T19:44:52 | 3,959,374 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,669 | py | #!/usr/bin/env python
"""
ASQC - A SPARQL query client
"""
import sys
import os
import os.path
import urllib.parse
import urllib
import urllib.request
# import io.StringIO
import io
import json
import re
import optparse
import logging
import traceback
from .SparqlHttpClient import SparqlHttpClient
from .SparqlXmlResults import writeResultsXML
from .StdoutContext import SwitchStdout
from .StdinContext import SwitchStdin
import rdflib
# Set up to use SPARQL
# rdflib.plugin.register(
# 'sparql', rdflib.query.Processor,
# 'rdfextras.sparql.processor', 'Processor')
# rdflib.plugin.register(
# 'sparql', rdflib.query.Result,
# 'rdfextras.sparql.query', 'SPARQLQueryResult')
# Register serializers (needed?)
#rdflib.plugin.register('n3', Serializer,
# 'rdflib.plugins.serializers.n3','N3Serializer')
#rdflib.plugin.register('turtle', Serializer,
# 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
#rdflib.plugin.register('nt', Serializer,
# 'rdflib.plugins.serializers.nt', 'NTSerializer')
#rdflib.plugin.register('xml', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
#rdflib.plugin.register('pretty-xml', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'PrettyXMLSerializer')
#rdflib.plugin.register('json-ld', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
#plugin.register('json-ld', Serializer,
# 'rdfextras.serializers.jsonld', 'JsonLDSerializer')
# Type codes and mapping for RDF and query variable p[arsing and serializing
RDFTYP = ["RDFXML","N3","TURTLE","NT","JSONLD","RDFA","HTML5"]
VARTYP = ["JSON","CSV","XML"]
RDFTYPPARSERMAP = (
{ "RDFXML": "xml"
, "N3": "n3"
, "TURTLE": "n3"
, "NT": "nt"
, "JSONLD": "jsonld"
, "RDFA": "rdfa"
, "HTML5": "rdfa+html"
})
RDFTYPSERIALIZERMAP = (
{ "RDFXML": "pretty-xml"
, "N3": "n3"
, "TURTLE": "turtle"
, "NT": "nt"
, "JSONLD": "jsonld"
})
# Logging object
log = logging.getLogger(__name__)
from . import __init__
class asqc_settings(object):
VERSION = "1.0.9" # __init__.__version__ @@@@
# Helper function for templated SPARQL results formatting and parsing
def formatBindings(template, bindings):
"""
Return bindings formatted with supplied template
"""
formatdict = {}
for (var, val) in bindings.iteritems():
formatdict[var] = val["value"]
if val["type"] == "bnode":
vf = "_:%(value)s"
elif val["type"] == "uri":
vf = "<%(value)s>"
elif val["type"] == "literal":
vf = '"%(value)s"'
elif val["type"] == "typed-literal":
vf = '"%(value)s"^^<%(datatype)s>'
formatdict[var+"_repr"] = vf%val
return template.decode(encoding='string_escape')%formatdict
# Helper function for CSV formatting query result from JSON
def char_escape(c):
if c == '"': return '""'
if ord(c) >= 128: return r"\u" + "%04x"%ord(c)
return c
def termToCSV(result):
if result == None:
return None
resval = result['value']
restyp = result['type']
if restyp == "uri":
return "<" + resval + ">"
if restyp == "bnode":
return "_:" + resval
# strval = '"' + resval.replace('"', '""') + '"'
strval = '"' + "".join([char_escape(c) for c in resval]) + '"'
strlang = result.get('xml:lang', None)
if restyp == "literal":
if strlang:
return strval + '@' + strlang
else:
return strval
if restyp == "typed-literal":
return strval + '^^' + result['datatype']
raise rdflib.query.ResultException('Unknown term type: %s (%s)'%(term, type(term)))
# Helper functions for JSON formatting and parsing
# Mostly copied from rdflib SPARQL code (rdfextras/sparql/results/jsonresults)
def termToJSON(term):
if isinstance(term, rdflib.URIRef):
return { 'type': 'uri', 'value': str(term) }
elif isinstance(term, rdflib.Literal):
if term.datatype!=None:
return { 'type': 'typed-literal',
'value': str(term),
'datatype': str(term.datatype) }
else:
r={'type': 'literal',
'value': str(term) }
if term.language!=None:
r['xml:lang']=term.language
return r
elif isinstance(term, rdflib.BNode):
return { 'type': 'bnode', 'value': str(term) }
elif term==None:
return None
else:
raise rdflib.query.ResultException('Unknown term type: %s (%s)'%(term, type(term)))
def bindingToJSON(binding):
res={}
for var in binding:
t = termToJSON(binding[var])
if t != None: res[str(var)] = t
return res
def parseJsonTerm(d):
"""rdflib object (Literal, URIRef, BNode) for the given json-format dict.
input is like:
{ 'type': 'uri', 'value': 'http://famegame.com/2006/01/username' }
{ 'type': 'bnode', 'value': '123abc456' }
{ 'type': 'literal', 'value': 'drewp' }
{ 'type': 'literal', 'value': 'drewp', xml:lang="en" }
{ 'type': 'typed-literal', 'value': '123', datatype="http://(xsd)#int" }
"""
t = d['type']
if t == 'uri':
return rdflib.URIRef(d['value'])
elif t == 'literal':
if 'xml:lang' in d:
return rdflib.Literal(d['value'], lang=d['xml:lang'])
return rdflib.Literal(d['value'])
elif t == 'typed-literal':
return rdflib.Literal(d['value'], datatype=rdflib.URIRef(d['datatype']))
elif t == 'bnode':
return rdflib.BNode(d['value'])
else:
raise NotImplementedError("json term type %r" % t)
def parseJsonBindings(bindings):
newbindings = []
for row in bindings:
outRow = {}
for k, v in row.items():
outRow[k] = parseJsonTerm(v)
newbindings.append(outRow)
return newbindings
# Helper functions to form join of mutiple binding sets
def joinBinding(result_binding, constraint_binding):
for k in result_binding:
if k in constraint_binding:
if result_binding[k] != constraint_binding[k]:
return None
joined_binding = result_binding.copy()
joined_binding.update(constraint_binding)
return joined_binding
def joinBindings(result_bindings, constraint_bindings):
return [ bj
for bj in [ joinBinding(b1, b2) for b1 in result_bindings for b2 in constraint_bindings ]
if bj ]
def joinBindingsToJSON(result_bindings, constraint_bindings):
return [ bindingToJSON(bj)
for bj in [ joinBinding(b1, b2) for b1 in result_bindings for b2 in constraint_bindings ]
if bj ]
# Helper functions for accessing data at URI reference, which may be a path relative to current directory
def resolveUri(uriref, base, path=""):
"""
Resolve a URI reference against a supplied base URI and path.
(The path is a local file system path, and may need converting to use URI conventions)
"""
upath = urllib.request.pathname2url(path)
if os.path.isdir(path) and not upath.endswith('/'):
upath = upath + '/'
return urllib.parse.urljoin(urllib.parse.urljoin(base, upath), uriref)
def retrieveUri(uriref):
uri = resolveUri(uriref, "file://", os.getcwd())
log.debug("retrievUri: %s"%(uri))
request = urllib.request.Request(uri)
try:
response = io.TextIOWrapper(urllib.request.urlopen(request), encoding="utf-8")
result = response.read()
except:
result = None
return result
# Helper function for determining type of query
def queryType(query):
"""
Returns "ASK", "SELECT", "CONSTRUCT", "DESCRIBE" or None
"""
iriregex = "<[^>]*>"
baseregex = ".*base.*"+iriregex
prefixregex = ".*prefix.*"+iriregex
queryregex = "^("+baseregex+")?("+prefixregex+")*.*(ask|select|construct|describe).*$"
match = re.match(queryregex, query, flags=re.IGNORECASE|re.DOTALL)
if match:
return match.group(3).upper()
return None
# Main program functions
def getQuery(options, args):
"""
Get query string from command line option or argument.
"""
if options.query:
return retrieveUri(options.query)
elif len(args) >= 2:
return args[1]
return None
def getPrefixes(options):
"""
Get prefix string from command line option.
"""
defaultPrefixes = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
"""
# PREFIX xml: <http://www.w3.org/XML/1998/namespace>
configbase = os.path.expanduser("~")
prefixUri = options.prefix or resolveUri(
".asqc-prefixes", "file://", configbase)
if prefixUri.startswith("~"):
prefixUri = configbase+prefixUri[1:]
log.debug("Prefix URI %s"%(prefixUri))
prefixes = retrieveUri(prefixUri)
return prefixes or defaultPrefixes
def getBindings(options):
bndtext = None
bindings = (
{ "head": { "vars": [] }
, "results": { "bindings": [{}] }
})
if options.bindings and options.bindings != "-":
bndtext = retrieveUri(options.bindings)
elif options.bindings == "-":
if options.rdf_data or options.endpoint:
bndtext = sys.stdin.read()
else:
# Can't read bindings from stdin if trying to read RDF from stdin
return None
else:
bndtext = None
if bndtext:
try:
bindings = json.loads(bndtext)
bindings['results']['bindings'] = parseJsonBindings(bindings['results']['bindings'])
except Exception as e:
bindings = None
return bindings
def getRdfData(options):
"""
Reads RDF data from files specified using -r or from stdin
"""
if not options.rdf_data:
options.rdf_data = ['-']
rdfgraph = rdflib.Graph()
for r in options.rdf_data:
base = ""
if r == "-":
rdftext = sys.stdin.read()
else:
log.debug("Reading RDF from %s"%(r))
rdftext = retrieveUri(r)
base = r
rdfformatdefault = RDFTYPPARSERMAP[RDFTYP[0]]
rdfformatselect = RDFTYPPARSERMAP.get(options.format_rdf_in, rdfformatdefault)
try:
log.debug("Parsing RDF format %s"%(rdfformatselect))
if rdfformatselect == "rdfa+html":
rdfgraph.parse(data=rdftext, format="rdfa", media_type="text/html", publicID=base)
else:
rdfgraph.parse(data=rdftext, format=rdfformatselect, publicID=base)
except Exception as e:
log.debug("RDF Parse failed: %s"%(repr(e)))
log.debug("traceback: %s"%(traceback.format_exc()))
return None
return rdfgraph
def queryRdfData(progname, options, prefixes, query, bindings):
"""
Submit query against RDF data.
Result is tuple of status and dictionary/list structure suitable for JSON encoding,
or an rdflib.graph value.
"""
rdfgraph = getRdfData(options)
if not rdfgraph:
print( "%s: Could not read RDF data, or syntax error in input"%progname )
print( " Use -r <file> or supply RDF on stdin; specify input format if not RDF/XML" )
return (2, None)
query = prefixes + query
log.debug("queryRdfData query:\n%s\n"%(query))
try:
resps = [rdfgraph.query(query, initBindings=b) for b in bindings['results']['bindings']]
except AssertionError as e:
print( "Query failed (query syntax problem?)" )
print( "Submitted query:" )
print( query )
return (2, None)
res = { "head": {} }
if resps[0].type == 'ASK':
res["boolean"] = any([ r.askAnswer for r in resps ])
return (0 if res["boolean"] else 1, res)
elif resps[0].type == 'SELECT':
res["head"]["vars"] = resps[0].vars
res["results"] = {}
res["results"]["bindings"] = [ bindingToJSON(b) for r in resps for b in r.bindings ]
return (0 if len(res["results"]["bindings"]) > 0 else 1, res)
elif resps[0].type == 'CONSTRUCT':
res = rdflib.graph.ReadOnlyGraphAggregate( [r.graph for r in resps] )
return (0 if len(res) > 0 else 1, res)
else:
assert False, "Unexpected query response type %s"%resp.type
return (2, None)
def querySparqlEndpoint(progname, options, prefixes, query, bindings):
"""
Issue SPARQL query to SPARQL HTTP endpoint.
Requests either JSON or RDF/XML depending on query type.
Returns JSON-like dictionary/list structure or RDF graph, depending on query type.
These are used as basis for result formatting by outputResult function
"""
query = prefixes + query
resulttype = "application/RDF+XML"
resultjson = False
querytype = queryType(query)
if querytype in ["ASK", "SELECT"]:
# NOTE application/json doesn't work with Fuseki
# See: http://gearon.blogspot.co.uk/2011/09/sparql-json-after-commenting-other-day.html
resulttype = "application/sparql-results+json"
resultjson = True
if options.verbose:
print( "== Query to endpoint ==" )
print( query )
print( "== resulttype: "+resulttype )
print( "== resultjson: "+str(resultjson) )
sc = SparqlHttpClient(endpointuri=options.endpoint)
((status, reason), result) = sc.doQueryPOST(query, accept=resulttype, JSON=False)
if status != 200:
assert False, "Error from SPARQL query request: %i %s"%(status, reason)
if options.verbose:
print( "== Query response ==" )
print( result )
if resultjson:
result = json.loads(result)
status = 1
if querytype == "SELECT":
result['results']['bindings'] = parseJsonBindings(result['results']['bindings'])
result['results']['bindings'] = joinBindingsToJSON(
result['results']['bindings'],
bindings['results']['bindings'])
if result['results']['bindings']: status = 0
elif bindings:
assert False, "Can't use supplied bindings with endpoint query other than SELECT"
elif querytype == "ASK":
# Just return JSON from Sparql query
if result['boolean']: status = 0
else:
# return RDF
rdfgraph = rdflib.Graph()
try:
# Note: declaring xml prefix in SPAQL query can result in invalid XML from Fuseki (v2.1)
# See: https://issues.apache.org/jira/browse/JENA-24
rdfgraph.parse(data=result)
result = rdfgraph # Return parsed RDF graph
if len(result) > 0: status = 0
except Exception as e:
assert False, "Error parsing RDF from SPARQL endpoint query: "+str(e)
return (status, result)
def outputResult(progname, options, result):
outstr = sys.stdout
if options.output and options.output != "-":
print( "Output to other than stdout not implemented" )
if isinstance(result, rdflib.Graph):
rdfformatdefault = RDFTYPSERIALIZERMAP[RDFTYP[0]]
rdfformatselect = RDFTYPSERIALIZERMAP.get(options.format_rdf_out, rdfformatdefault)
result.serialize(destination=outstr, format=rdfformatselect, base=None)
elif isinstance(result, str):
outstr.write(result)
else:
if options.format_var_out == "JSON" or options.format_var_out == None:
outstr.write(json.dumps(result))
outstr.write("\n")
elif options.format_var_out == "XML":
writeResultsXML(outstr, result)
elif options.format_var_out == "CSV":
qvars = result["head"]["vars"]
outstr.write(", ".join(qvars))
outstr.write("\n")
for bindings in result["results"]["bindings"]:
### print("---- bindings: "+repr(bindings))
vals = [ termToCSV(bindings.get(str(v),{'type': 'literal', 'value': ''})) for v in qvars ]
outstr.write(", ".join(vals))
outstr.write("\n")
else:
for bindings in result["results"]["bindings"]:
#log.debug("options.format_var_out '%s'"%(repr(options.format_var_out)))
formattedrow = formatBindings(options.format_var_out, bindings)
#log.debug("formattedrow '%s'"%(repr(formattedrow)))
outstr.write(formattedrow)
return
def run(configbase, options, args):
status = 0
if options.examples:
print( "%s/examples"%(os.path.dirname(os.path.abspath(__file__))) )
return 0
progname = os.path.basename(args[0])
query = getQuery(options, args)
if not query:
print( "%s: Could not determine query string (need query argument or -q option)"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
prefixes = getPrefixes(options)
if not prefixes:
print( "%s: Could not determine query prefixes"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
## log.debug("Prefixes:\n%s\n"%(prefixes))
bindings = getBindings(options)
if not bindings:
print( "%s: Could not determine incoming variable bindings"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
if options.verbose:
print( "== Options ==" )
print( repr(options) )
print( "== Prefixes ==" )
print( prefixes )
print( "== Query ==" )
print( query )
print( "== Initial bindings ==" )
print( bindings )
if options.endpoint:
(status,result) = querySparqlEndpoint(progname, options, prefixes, query, bindings)
else:
(status,result) = queryRdfData(progname, options, prefixes, query, bindings)
if result:
outputResult(progname, options, result)
return status
def parseCommandArgs(argv):
"""
Parse command line arguments
argv -- argument list from command line
Returns a pair consisting of options specified as returned by
OptionParser, and any remaining unparsed arguments.
"""
# create a parser for the command line options
parser = optparse.OptionParser(
usage=("\n"+
" %prog [options] [query]\n"+
" %prog --help for an options summary\n"+
" %prog --examples to display the path containing example queries"),
description="A sparql query client, designed to be used as a filter in a command pipeline. "+
"Pipelined data can be RDF or query variable binding sets, depending on the options used.",
version="%prog "+asqc_settings.VERSION)
parser.add_option("--examples",
action="store_true",
dest="examples",
default=False,
help="display path of examples directory and exit")
parser.add_option("-b", "--bindings",
dest="bindings",
default=None,
help="URI or filename of resource containing incoming query variable bindings "+
"(default none). "+
"Specify '-' to use stdin. "+
"This option works for SELECT queries only when accessing a SPARQL endpoint.")
parser.add_option("--debug",
action="store_true",
dest="debug",
default=False,
help="run with full debug output enabled")
parser.add_option("-e", "--endpoint",
dest="endpoint",
default=None,
help="URI of SPARQL endpoint to query.")
parser.add_option("-f", "--format",
dest="format",
default=None,
help="Format for input and/or output: "+
"RDFXML, N3, NT, TURTLE, JSONLD, RDFA, HTML5, JSON, CSV or template. "+
"XML, N3, NT, TURTLE, JSONLD, RDFA, HTML5 apply to RDF data, "+
"others apply to query variable bindings. "+
"Multiple comma-separated values may be specified; "+
"they are applied to RDF or variable bindings as appropriate. "+
"'template' is a python formatting template with '%(var)s' for query variable 'var'. "+
"If two values are given for RDF or variable binding data, "+
"they are applied to input and output respectively. "+
"Thus: RDFXML,JSON = RDF/XML and JSON result bindings; "+
"RDFXML,N3 = RDF/XML input and Turtle output; etc.")
parser.add_option("-o", "--output",
dest="output",
default='-',
help="URI or filename of RDF resource for output "+
"(default stdout)."+
"Specify '-'to use stdout.")
parser.add_option("-p", "--prefix",
dest="prefix",
default="~/.asqc-prefixes",
help="URI or filename of resource containing query prefixes "+
"(default %default)")
parser.add_option("-q", "--query",
dest="query",
help="URI or filename of resource containing query to execute. "+
"If not present, query must be supplied as command line argument.")
parser.add_option("-r", "--rdf-input",
action="append",
dest="rdf_data",
default=None,
help="URI or filename of RDF resource to query "+
"(default stdin or none). "+
"May be repeated to merge multiple input resources. "+
"Specify '-' to use stdin.")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="display verbose output")
parser.add_option("--query-type",
dest="query_type",
default=None,
help="Type of query output: SELECT (variable bindings, CONSTRUCT (RDF) or ASK (status). "+
"May be used when system cannot tell the kind of result by analyzing the query itself. "+
"(Currently not used)")
parser.add_option("--format-rdf-in",
dest="format_rdf_in",
default=None,
help="Format for RDF input data: RDFXML, N3, NT, TURTLE, JSONLD, RDFA or HTML5. "+
"RDFA indicates RDFa embedded in XML (or XHTML); "+
"HTML5 indicates RDFa embedded in HTML5.")
parser.add_option("--format-rdf-out",
dest="format_rdf_out",
default=None,
help="Format for RDF output data: RDFXML, N3, NT, TURTLE or JSONLD.")
parser.add_option("--format-var-in",
dest="format_var_in",
default=None,
help="Format for query variable binding input data: JSON or CSV.")
parser.add_option("--format-var-out",
dest="format_var_out",
default=None,
help="Format for query variable binding output data: JSON, CSV or template. "+
"The template option is a Python format string applied to a dictionary of query result variables.")
# parse command line now
(options, args) = parser.parse_args(argv)
if len(args) < 1: parser.error("No command present")
if len(args) > 2: parser.error("Too many arguments present: "+repr(args))
def pick_next_format_option(s,kws):
t = s
for k in kws:
if s.upper().startswith(k):
s = s[len(k):]
if s == "": return (k, "")
if s.startswith(','): return (k, s[1:])
break
return (t, "")
if options.format:
fs = options.format
while fs:
fn,fs = pick_next_format_option(fs, RDFTYP+VARTYP)
if fn in RDFTYP:
if not options.format_rdf_in:
options.format_rdf_in = fn
if fn in RDFTYPSERIALIZERMAP:
options.format_rdf_out = fn
else:
if not options.format_var_in and fn in VARTYP:
options.format_var_in = fn
options.format_var_out = fn
if options.verbose:
print( "RDF graph input format: "+repr(options.format_rdf_in) )
print( "RDF graph output format: "+repr(options.format_rdf_out) )
print( "Var binding input format: "+repr(options.format_var_in) )
print( "Var binding output format: "+repr(options.format_var_out) )
return (options, args)
def runCommand(configbase, argv):
"""
Run program with supplied configuration base directory, Base directory
from which to start looking for research objects, and arguments.
This is called by main function (below), and also by test suite routines.
Returns exit status.
"""
log.debug("runCommand: configbase %s, argv %s"%(configbase, repr(argv)))
(options, args) = parseCommandArgs(argv)
if not options or options.debug:
logging.basicConfig(level=logging.DEBUG)
status = 2
if options:
status = run(configbase, options, args)
return status
def runMain():
"""
Main program transfer function for setup.py console script
"""
configbase = os.path.expanduser("~")
return runCommand(configbase, sys.argv)
if __name__ == "__main__":
"""
Program invoked from the command line.
"""
# main program
status = runMain()
sys.exit(status)
#--------+---------+---------+---------+---------+---------+---------+---------+
| [
"gk-github@ninebynine.org"
] | gk-github@ninebynine.org |
610057694e00c3e4fac05320e103e3137f135d00 | 76192480d7469e3d7f6ac8d8bbc3334445e5fddc | /app.py | 07f63fbd87e728aa0ad6e9cd795f03b20816c8e7 | [] | no_license | forgeworks/splendor | b7d383a154bf72701a00d005f9aafbd3e90a6b30 | f99d66b76971f318637944a8ce5921367ee4aa21 | refs/heads/master | 2023-05-12T03:07:17.860147 | 2020-04-03T17:38:55 | 2020-04-03T17:38:55 | 155,748,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from flask import Flask
from api_example import GreetingV1
app = Flask(__name__)
app.register_blueprint(GreetingV1())
| [
"deadwisdom@gmail.com"
] | deadwisdom@gmail.com |
e884b3995dc86624872f534df6a05bcb1cc306c6 | e5e55e853dc834177f03cf8bb5161e847baa9830 | /consts.py | 807ce52fe670cf0645cb541a4bae87c9750e1bc0 | [
"MIT"
] | permissive | mmmdamin/mastermind | 2b62864443db1c9bc2dab47d5aed818f6cd0e991 | d775e0492165acba8951fe308330f38bf88947d6 | refs/heads/master | 2021-09-02T07:34:47.015269 | 2017-12-31T14:29:10 | 2017-12-31T14:29:10 | 115,865,993 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | GAME_LENGTH = 4
COLORS = {
1: "red",
2: "blue",
3: "green",
4: "yellow",
5: "white",
}
MIN_COLOR = min(COLORS.keys())
MAX_COLOR = max(COLORS.keys())
MAX_TURNS = 1
WIN_MESSAGE = "You won!"
LOSE_MESSAGE = "You lose! initial state {}"
| [
"sabbaghian@arsh.co"
] | sabbaghian@arsh.co |
67dccdaf388e326388afec57b7acdf38c78908a9 | eba0e40667d6082b5eeefdbaf2862e3f02fd774c | /mr_utils/sim/ssfp/quantitative_field_mapping.py | 44a85af73a56bb265904c32bd1da3b6aaf216bbc | [] | no_license | zongjg/mr_utils | a0ec98ed2d03a6d52d81be8ef108993f92baeee1 | 08cb43dcf53fd6fddd3304e3514a608842310a34 | refs/heads/master | 2022-01-04T16:25:41.065177 | 2019-05-11T20:20:22 | 2019-05-11T20:20:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | '''Quantitative field mapping for bSSFP.
Collect quantitative MR maps (T1, T2, flip angle), then, assuming that these
won't change during the duration of the scan, we can use these to take a single
bSSFP scan each time point and solve for the off-resonance. Thus we get a
field map at time point.
'''
import numpy as np
from mr_utils.utils import find_nearest
from mr_utils.sim.ssfp import ssfp
# from mr_utils import view
def get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs):
'''Simulate bSSFP response across all possible off-resonances.
Parameters
==========
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
dfs : float
Off-resonance values to simulate over.
Returns
=======
resp : array_like
Frequency response of SSFP signal across entire spectrum.
'''
# Feed ssfp sim an array of parameters to be used with all the df values
T1s = np.ones(dfs.shape)*T1
T2s = np.ones(dfs.shape)*T2
PDs = np.ones(dfs.shape)*PD
resp = ssfp(T1s, T2s, TR, alpha, dfs, phase_cyc=phase_cyc, M0=PDs)
# Returns a vector of simulated Mxy with index corresponding to dfs
return resp
def quantitative_fm_scalar(Mxy, dfs, T1, T2, PD, TR, alpha, phase_cyc):
'''For scalar T1, T2, PD.
Parameters
==========
Mxy : float
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
Returns
=======
float
Off-resonace value that most closely matches Mxy prior.
'''
# Simulate over the total range of off-resonance values
resp = get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs)
# Find the response that matches Mxy most closely
idx, _val = find_nearest(resp, Mxy)
# Return the df's value, because that's really what the caller wanted
return dfs[idx]
def quantitative_fm(Mxys, dfs, T1s, T2s, PDs, TR, alpha, phase_cyc, mask=None):
'''Find field map given quantitative maps.
Parameters
==========
Mxys : array_like
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1s : array_like
scalar T1 longitudinal recovery value in seconds.
T2s : array_like
scalar T2 transverse decay value in seconds.
PDs : array_like
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
mask : array_like
Boolean mask to tell which pixels we should compute df for.
Returns
=======
fm : array_like
Field map.
'''
resps = {}
orig_size = np.asarray(T1s).shape
if mask is None:
mask = np.ones(Mxys.shape)
Mxys = np.asarray(Mxys).flatten()
T1s = np.asarray(T1s).flatten()
T2s = np.asarray(T2s).flatten()
PDs = np.asarray(PDs).flatten()
mask = np.asarray(mask).flatten()
fm = np.zeros(Mxys.size)
for ii in range(Mxys.size):
if mask[ii]:
# Cache results for later in case we come across the same T1,T2,PD
if (PDs[ii], T1s[ii], T2s[ii]) not in resps:
resps[(PDs[ii], T1s[ii], T2s[ii])] = get_df_responses(
T1s[ii], T2s[ii], PDs[ii], TR, alpha, phase_cyc, dfs)
# Find the appropriate off-resonance value for this T1,T2,PD,Mxy
idx, _val = find_nearest(
resps[(PDs[ii], T1s[ii], T2s[ii])], Mxys[ii])
fm[ii] = dfs[idx]
else:
fm[ii] = 0
return fm.reshape(orig_size)
| [
"nicholas.bgp@gmail.com"
] | nicholas.bgp@gmail.com |
a4e0c192f3c8f4463eae05876b00114d00ab91c7 | 8ce23f191870868c86c7616882e6043b1102cb0d | /tools/text_processing/join_files_on_column_fuzzy/join_files_on_column_fuzzy.py | 1e19f1dcfe4a4d3ab0743078894f5c196b0b2559 | [] | no_license | StevenVerbruggen/galaxytools | 56f99d0d629cb6d9e3db290c64f30b920de04f26 | 7d7365197e2cba2eb048121c9f0ee5546f06c520 | refs/heads/master | 2021-01-16T17:51:39.721403 | 2020-12-01T08:35:51 | 2020-12-01T08:35:51 | 100,017,016 | 0 | 0 | null | 2017-08-11T09:42:20 | 2017-08-11T09:42:20 | null | UTF-8 | Python | false | false | 4,755 | py | #!/usr/bin/env python
import os
import argparse
import sys
def main(args):
if args.header:
h1 = True
h2 = True
else:
h1 = False
h2 = False
cache = list()
out = open(args.outfile, 'w+')
write_buffer = list()
def _readline(header = False):
with open(args.f2) as handle2:
for line in handle2:
line = line.strip()
if header:
header = False
yield line
continue
if not line:
continue
columns = line.split(args.sep)
value2 = columns[args.c2-1]
yield columns, float(value2)
def fill_cache():
try:
cache.append(next(it))
except StopIteration:
pass
it = _readline(header = h2)
with open(args.f1) as handle1:
for line in handle1:
line = line.strip()
if h1:
h1 = False
seconda_header = next(it)
if args.add_distance:
out.write('%s\t%s\t%s\n' % (line, seconda_header, args.unit))
else:
out.write('%s\t%s\n' % (line, seconda_header))
continue
if not line:
continue
columns = line.split(args.sep)
value1 = float(columns[args.c1-1])
_cache = list()
fill_cache()
while cache:
_c, value2 = cache.pop(0)
upper_bound = value1 + args.distance
if args.unit == 'absolute':
if value2 <= upper_bound and value2 >= (value1 - args.distance):
line_template = '%s\n'
abs_dist = abs(value1 - value2)
if args.add_distance:
line_template = '%s\t' + str(abs_dist) + '\n'
write_buffer.append([abs_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif value2 > upper_bound:
# if the value from list 2 is bigger then the current value, he will be taken into the next round
_cache.append([_c, value2])
elif value2 < upper_bound:
# if the value from list 2 is smaller then the currecnt value, check the next one of list 2
fill_cache()
elif args.unit == 'ppm':
ppm_dist = abs((value1 - value2) / value1 * 1000000)
if ppm_dist <= args.distance:
line_template = '%s\n'
if args.add_distance:
line_template = '%s\t' + str(ppm_dist) + '\n'
write_buffer.append([ppm_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif ppm_dist > args.distance:
_cache.append([_c, value2])
elif ppm_dist < args.distance:
fill_cache()
if args.closest and write_buffer:
write_buffer.sort(key=lambda x: x[0])
out.write(write_buffer[0][1])
else:
for _dist, line in write_buffer:
out.write(line)
write_buffer = list()
cache = _cache
out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge two files on a common column the fuzzy way.')
parser.add_argument('--f1', required=True)
parser.add_argument('--f2', required=True)
parser.add_argument('--c1', type=int, required=True, help="Column in file 1 to be merged on.")
parser.add_argument('--c2', type=int, required=True, help="Column in file 2 to be merged on.")
parser.add_argument('--outfile', required=True)
parser.add_argument('--header', action='store_true', help="The files have a header line at the beginning.")
parser.add_argument('--closest', action='store_true', help="Only report the closest match.")
parser.add_argument('--add_distance', action='store_true', help="Add addional column with the distance between the two values.")
parser.add_argument('--sep', type=str, default="\t", help="Files are separated by this separator.")
parser.add_argument('--distance', type=float, default="0.2", help="Maximal allowed distance.")
parser.add_argument('--unit', choices=['ppm', 'absolute'], default='absolute')
args = parser.parse_args()
main(args)
| [
"bjoern.gruening@gmail.com"
] | bjoern.gruening@gmail.com |
a0f7ba2af8f27035c0405bb59bf919df9026b98a | ffa667819d306d634b15d089f5cef9dbaf652abb | /longan_sqlite/util.py | f5bf9740129cb819b172f44add348691f7be5d2b | [
"MIT"
] | permissive | xiaobaiso/longan-sqlite3 | 084a08e5e5e70b165fcc60da224cb2bbe6c761b8 | 6a260c913f96bdf90b7e0f6bb3f9d8f1a57e9f57 | refs/heads/master | 2020-03-18T08:54:50.466931 | 2018-05-21T04:44:42 | 2018-05-21T04:44:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from .flesh import Flesh
def convert_dicts(fields, items):
"""
:param fields:
:param items:
:return: list(Flesh)
:rtype: list
"""
ret_items = []
for i in items:
item_dict = {}
for k, v in enumerate(fields):
item_dict[v] = i[k]
ret_items.append(Flesh(item_dict))
return ret_items
def add_quotes(value):
if isinstance(value, str):
value = '"{}"'.format(value)
return str(value)
| [
"yaoma@58ganji.com"
] | yaoma@58ganji.com |
7fc429a82b9d8ab58b2b4f3d058c32c644b665b4 | c43ab8680e571136899564070928bfa9d54612d3 | /addons/io_scene_gltf2/__init__.py | 2ea8edd3846ad4a2159f02ddd41824637c83729a | [
"Apache-2.0"
] | permissive | cuulee/glTF-Blender-IO | 447132a934a381b736dc4eaa1cc885d2d777c7b0 | fb1bc8dbaffb416502a2ac713b1fe3ad1d4c481e | refs/heads/master | 2020-03-26T01:53:11.525100 | 2018-08-11T07:33:26 | 2018-08-11T07:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,859 | py | # Copyright (c) 2018 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import bpy
import os
from bpy_extras.io_utils import ImportHelper
from bpy.types import Operator
from .blender.imp.io import *
from .blender.imp.scene import *
from .blender.imp.util import *
from bpy.props import (CollectionProperty,
StringProperty,
BoolProperty,
EnumProperty,
FloatProperty,
IntProperty)
from bpy_extras.io_utils import (ExportHelper)
#
# Globals
#
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure & Norbert Nopper',
'blender': (2, 79, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
'warning': '',
'wiki_url': ''
'',
'support': 'COMMUNITY',
'category': 'Import-Export'}
#
# Functions / Classes.
#
class GLTF2ExportSettings(bpy.types.Operator):
"""Save the export settings on export (saved in .blend).
Toggle off to clear settings"""
bl_label = "Save Settings"
bl_idname = "scene.gltf2_export_settings_set"
def execute(self, context):
operator = context.active_operator
operator.will_save_settings = not operator.will_save_settings
if not operator.will_save_settings and context.scene.get(operator.scene_key, False):
# clear settings
del context.scene[operator.scene_key]
return {"FINISHED"}
class ExportGLTF2_Base():
export_copyright = StringProperty(
name='Copyright',
description='',
default=''
)
export_embed_buffers = BoolProperty(
name='Embed buffers',
description='',
default=False
)
export_embed_images = BoolProperty(
name='Embed images',
description='',
default=False
)
export_strip = BoolProperty(
name='Strip delimiters',
description='',
default=False
)
export_indices = EnumProperty(
name='Maximum indices',
items=(('UNSIGNED_BYTE', 'Unsigned Byte', ''),
('UNSIGNED_SHORT', 'Unsigned Short', ''),
('UNSIGNED_INT', 'Unsigned Integer', '')),
default='UNSIGNED_INT'
)
export_force_indices = BoolProperty(
name='Force maximum indices',
description='',
default=False
)
export_texcoords = BoolProperty(
name='Export texture coordinates',
description='',
default=True
)
export_normals = BoolProperty(
name='Export normals',
description='',
default=True
)
export_tangents = BoolProperty(
name='Export tangents',
description='',
default=True
)
export_materials = BoolProperty(
name='Export materials',
description='',
default=True
)
export_colors = BoolProperty(
name='Export colors',
description='',
default=True
)
export_cameras = BoolProperty(
name='Export cameras',
description='',
default=False
)
export_camera_infinite = BoolProperty(
name='Infinite perspective Camera',
description='',
default=False
)
export_selected = BoolProperty(
name='Export selected only',
description='',
default=False
)
export_layers = BoolProperty(
name='Export all layers',
description='',
default=True
)
export_extras = BoolProperty(
name='Export extras',
description='',
default=False
)
export_yup = BoolProperty(
name='Convert Z up to Y up',
description='',
default=True
)
export_apply = BoolProperty(
name='Apply modifiers',
description='',
default=False
)
export_animations = BoolProperty(
name='Export animations',
description='',
default=True
)
export_frame_range = BoolProperty(
name='Export within playback range',
description='',
default=True
)
export_frame_step = IntProperty(
name='Frame step size',
description='Step size (in frames) for animation export.',
default=1,
min=1,
max=120
)
export_move_keyframes = BoolProperty(
name='Keyframes start with 0',
description='',
default=True
)
export_force_sampling = BoolProperty(
name='Force sample animations',
description='',
default=False
)
export_current_frame = BoolProperty(
name='Export current frame',
description='',
default=True
)
export_skins = BoolProperty(
name='Export skinning',
description='',
default=True
)
export_bake_skins = BoolProperty(
name='Bake skinning constraints',
description='',
default=False
)
export_morph = BoolProperty(
name='Export morphing',
description='',
default=True
)
export_morph_normal = BoolProperty(
name='Export morphing normals',
description='',
default=True
)
export_morph_tangent = BoolProperty(
name='Export morphing tangents',
description='',
default=True
)
export_lights = BoolProperty(
name='Export KHR_lights_punctual',
description='',
default=False
)
export_displacement = BoolProperty(
name='Export KHR_materials_displacement',
description='',
default=False
)
will_save_settings = BoolProperty(default=False)
# Custom scene property for saving settings
scene_key = "glTF2ExportSettings"
#
def invoke(self, context, event):
settings = context.scene.get(self.scene_key)
self.will_save_settings = False
if settings:
try:
for (k,v) in settings.items():
setattr(self, k, v)
self.will_save_settings = True
except AttributeError:
self.report({"ERROR"}, "Loading export settings failed. Removed corrupted settings")
del context.scene[self.scene_key]
return ExportHelper.invoke(self, context, event)
def save_settings(self, context):
# find all export_ props
all_props = self.properties
export_props = {x:all_props.get(x) for x in dir(all_props)
if x.startswith("export_") and all_props.get(x) is not None}
context.scene[self.scene_key] = export_props
def execute(self, context):
from .blender.exp import gltf2_blender_export
if self.will_save_settings:
self.save_settings(context)
# All custom export settings are stored in this container.
export_settings = {}
export_settings['gltf_filepath'] = bpy.path.ensure_ext(self.filepath, self.filename_ext)
export_settings['gltf_filedirectory'] = os.path.dirname(export_settings['gltf_filepath']) + '/'
export_settings['gltf_format'] = self.export_format
export_settings['gltf_copyright'] = self.export_copyright
export_settings['gltf_embed_buffers'] = self.export_embed_buffers
export_settings['gltf_embed_images'] = self.export_embed_images
export_settings['gltf_strip'] = self.export_strip
export_settings['gltf_indices'] = self.export_indices
export_settings['gltf_force_indices'] = self.export_force_indices
export_settings['gltf_texcoords'] = self.export_texcoords
export_settings['gltf_normals'] = self.export_normals
export_settings['gltf_tangents'] = self.export_tangents and self.export_normals
export_settings['gltf_materials'] = self.export_materials
export_settings['gltf_colors'] = self.export_colors
export_settings['gltf_cameras'] = self.export_cameras
if self.export_cameras:
export_settings['gltf_camera_infinite'] = self.export_camera_infinite
else:
export_settings['gltf_camera_infinite'] = False
export_settings['gltf_selected'] = self.export_selected
export_settings['gltf_layers'] = self.export_layers
export_settings['gltf_extras'] = self.export_extras
export_settings['gltf_yup'] = self.export_yup
export_settings['gltf_apply'] = self.export_apply
export_settings['gltf_animations'] = self.export_animations
if self.export_animations:
export_settings['gltf_current_frame'] = False
export_settings['gltf_frame_range'] = self.export_frame_range
export_settings['gltf_move_keyframes'] = self.export_move_keyframes
export_settings['gltf_force_sampling'] = self.export_force_sampling
else:
export_settings['gltf_current_frame'] = self.export_current_frame
export_settings['gltf_frame_range'] = False
export_settings['gltf_move_keyframes'] = False
export_settings['gltf_force_sampling'] = False
export_settings['gltf_skins'] = self.export_skins
if self.export_skins:
export_settings['gltf_bake_skins'] = self.export_bake_skins
else:
export_settings['gltf_bake_skins'] = False
export_settings['gltf_frame_step'] = self.export_frame_step
export_settings['gltf_morph'] = self.export_morph
if self.export_morph:
export_settings['gltf_morph_normal'] = self.export_morph_normal
else:
export_settings['gltf_morph_normal'] = False
if self.export_morph and self.export_morph_normal:
export_settings['gltf_morph_tangent'] = self.export_morph_tangent
else:
export_settings['gltf_morph_tangent'] = False
export_settings['gltf_lights'] = self.export_lights
export_settings['gltf_displacement'] = self.export_displacement
export_settings['gltf_binary'] = bytearray()
export_settings['gltf_binaryfilename'] = os.path.splitext(os.path.basename(self.filepath))[0] + '.bin'
return gltf2_blender_export.save(self, context, export_settings)
def draw(self, context):
layout = self.layout
#
col = layout.box().column()
col.label('Embedding:', icon='PACKAGE')
col.prop(self, 'export_copyright')
if self.export_format == 'ASCII':
col.prop(self, 'export_embed_buffers')
col.prop(self, 'export_embed_images')
col.prop(self, 'export_strip')
col = layout.box().column()
col.label('Nodes:', icon='OOPS')
col.prop(self, 'export_selected')
col.prop(self, 'export_layers')
col.prop(self, 'export_extras')
col.prop(self, 'export_yup')
col = layout.box().column()
col.label('Meshes:', icon='MESH_DATA')
col.prop(self, 'export_apply')
col.prop(self, 'export_indices')
col.prop(self, 'export_force_indices')
col = layout.box().column()
col.label('Attributes:', icon='SURFACE_DATA')
col.prop(self, 'export_texcoords')
col.prop(self, 'export_normals')
if self.export_normals:
col.prop(self, 'export_tangents')
col.prop(self, 'export_colors')
col = layout.box().column()
col.label('Objects:', icon='OBJECT_DATA')
col.prop(self, 'export_cameras')
if self.export_cameras:
col.prop(self, 'export_camera_infinite')
col = layout.box().column()
col.label('Materials:', icon='MATERIAL_DATA')
col.prop(self, 'export_materials')
col = layout.box().column()
col.label('Animation:', icon='OUTLINER_DATA_POSE')
col.prop(self, 'export_animations')
if self.export_animations:
col.prop(self, 'export_frame_range')
col.prop(self, 'export_frame_step')
col.prop(self, 'export_move_keyframes')
col.prop(self, 'export_force_sampling')
else:
col.prop(self, 'export_current_frame')
col.prop(self, 'export_skins')
if self.export_skins:
col.prop(self, 'export_bake_skins')
col.prop(self, 'export_morph')
if self.export_morph:
col.prop(self, 'export_morph_normal')
if self.export_morph_normal:
col.prop(self, 'export_morph_tangent')
addon_prefs = context.user_preferences.addons[__name__].preferences
if addon_prefs.experimental:
col = layout.box().column()
col.label('Experimental:', icon='RADIO')
col.prop(self, 'export_lights')
col.prop(self, 'export_displacement')
row = layout.row()
row.operator(
GLTF2ExportSettings.bl_idname,
GLTF2ExportSettings.bl_label,
icon="%s" % "PINNED" if self.will_save_settings else "UNPINNED")
class ExportGLTF2_GLTF(bpy.types.Operator, ExportGLTF2_Base, ExportHelper):
'''Export scene as glTF 2.0 file'''
bl_idname = 'export_scene.gltf'
bl_label = 'Export glTF 2.0'
filename_ext = '.gltf'
filter_glob = StringProperty(default='*.gltf', options={'HIDDEN'})
export_format = 'ASCII'
class ExportGLTF2_GLB(bpy.types.Operator, ExportGLTF2_Base, ExportHelper):
'''Export scene as glTF 2.0 file'''
bl_idname = 'export_scene.glb'
bl_label = 'Export glTF 2.0 binary'
filename_ext = '.glb'
filter_glob = StringProperty(default='*.glb', options={'HIDDEN'})
export_format = 'BINARY'
def menu_func_export_gltf(self, context):
self.layout.operator(ExportGLTF2_GLTF.bl_idname, text='glTF 2.0 (.gltf)')
def menu_func_export_glb(self, context):
self.layout.operator(ExportGLTF2_GLB.bl_idname, text='glTF 2.0 (.glb)')
from bpy.types import AddonPreferences
class ExportGLTF2_AddonPreferences(AddonPreferences):
bl_idname = __name__
experimental = BoolProperty(name='Enable experimental glTF export settings', default=False)
def draw(self, context):
layout = self.layout
layout.prop(self, "experimental")
class ImportglTF2(Operator, ImportHelper):
bl_idname = 'import_scene.gltf'
bl_label = "glTF 2.0 (.gltf/.glb)"
filename_ext = ".gltf"
filter_glob = StringProperty(default="*.gltf;*.glb", options={'HIDDEN'})
loglevel = bpy.props.EnumProperty(items=Log.getLevels(), description="Log Level", default=Log.default())
def execute(self, context):
return self.import_gltf2(context)
def import_gltf2(self, context):
bpy.context.scene.render.engine = 'CYCLES'
self.gltf = glTFImporter(self.filepath, self.loglevel)
self.gltf.log.critical("Starting loading glTF file")
success, txt = self.gltf.read()
if not success:
self.report({'ERROR'}, txt)
return {'CANCELLED'}
self.gltf.log.critical("Data are loaded, start creating Blender stuff")
self.gltf.blender_create()
self.gltf.debug_missing()
self.gltf.log.critical("glTF import is now finished")
self.gltf.log.removeHandler(self.gltf.log_handler)
# Switch to newly created main scene
bpy.context.screen.scene = bpy.data.scenes[self.gltf.blender.scene]
return {'FINISHED'}
def menu_func_import(self, context):
self.layout.operator(ImportglTF2.bl_idname, text=ImportglTF2.bl_label)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export_gltf)
bpy.types.INFO_MT_file_export.append(menu_func_export_glb)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export_gltf)
bpy.types.INFO_MT_file_export.remove(menu_func_export_glb)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
| [
"nopper@ux3d.io"
] | nopper@ux3d.io |
b897b084b288350d1a287661007953393d395943 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBHinc_M_800_TuneZ2star_8TeV_madgraph_cff.py | 352c4947ebcf1ce31ccf35f0dd2e24c3165cb26a | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 4,231 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 800.0D0 ! bprime quarks mass',
'PMAS(7,2) = 8.000D0 ! bprime quark width',
'PMAS(7,3) = 80.00D0 ! Max value above which the BW shape is truncated',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'MDME(58,1)=0 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.0D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 1.0D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] | sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch |
21c03646d3b3e6dae55e626e2eb651912903a68a | e0d46d7d1048648836af5f5243adc5e84e391382 | /forwardgram.py | b540d98db563934ac11526cad28fe18b551ecefb | [] | no_license | DanilenkoDanil/forward | a2ca0a96aa826754f1c23caa016fc70d3eaf5fe1 | 8a0b5d2f0cadf22990a13fdddb23ce5d0a95a338 | refs/heads/main | 2023-08-07T21:43:51.561629 | 2021-09-22T23:18:13 | 2021-09-22T23:18:13 | 329,336,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | from telethon import TelegramClient, events, sync
from telethon.tl.types import InputChannel
import yaml
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('telethon').setLevel(level=logging.WARNING)
logger = logging.getLogger(__name__)
def start(config):
client = TelegramClient(config["session_name"],
config["api_id"],
config["api_hash"])
client.start()
input_channels_entities = []
output_channel_entities = []
for d in client.iter_dialogs():
if d.name in config["input_channel_names"]:
input_channels_entities.append(InputChannel(d.entity.id, d.entity.access_hash))
if d.name in config["output_channel_names"]:
output_channel_entities.append(InputChannel(d.entity.id, d.entity.access_hash))
if not output_channel_entities:
logger.error(f"Could not find any output channels in the user's dialogs")
sys.exit(1)
if not input_channels_entities:
logger.error(f"Could not find any input channels in the user's dialogs")
sys.exit(1)
logging.info(
f"Listening on {len(input_channels_entities)} channels. Forwarding messages to {len(output_channel_entities)} channels.")
@client.on(events.NewMessage(chats=input_channels_entities))
async def handler(event):
for output_channel in output_channel_entities:
await client.forward_messages(output_channel, event.message)
client.run_until_disconnected()
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} {{CONFIG_PATH}}")
sys.exit(1)
with open(sys.argv[1], 'rb') as f:
config = yaml.safe_load(f)
start(config) | [
"noreply@github.com"
] | DanilenkoDanil.noreply@github.com |
fa0707a68b09bbfbbd1c0b2ee3e6266f543f5dc2 | f073c033419dffa5237d470c0206f97adaf9db99 | /database_learning/employee.py | b13488a97ac3565a636dbfba264664844f9e15ca | [] | no_license | schnippo/spanishconjugator | 618b858580ca46c1a2fee7aae167025e3e72de41 | ab8477ef94bf077b339d046b7e82a923be90ca10 | refs/heads/master | 2020-04-16T14:04:28.909452 | 2019-01-14T12:07:00 | 2019-01-14T12:07:00 | 165,653,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | class Employee:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
@property
def email(self):
return '{}.{}@company.com'.format(self.first, self.last)
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
def __repr__(self):
return "Employee('{}','{}','{}'".format(self.first,self.last, self.pay)
# john = Employee('John', 'Buttle', 50000)
# print(john.email)
# print(repr(john)) | [
"noreply@github.com"
] | schnippo.noreply@github.com |
d330066fb6ba0e836748a43a60059fe223936d8f | 7e53ed2d6074a025fe960f72c21672cc23dcab14 | /vt/tests/test_vt.py | c1d05f734a3b1993f80ddc0c57020fbbb90a49cb | [
"MIT"
] | permissive | kyokley/vittlify-cli | 154410638b3a33640c01ab915dbf24d4e6afe13f | e3be7f3c7b0c00d59defe73af9aed0ec792800cc | refs/heads/master | 2023-02-17T21:08:29.452548 | 2021-12-05T15:54:06 | 2021-12-05T15:54:06 | 58,974,128 | 0 | 0 | MIT | 2023-02-08T02:27:24 | 2016-05-16T23:09:51 | Python | UTF-8 | Python | false | false | 41,993 | py | import shlex
import unittest
import mock
import pytest
import requests
from vt.utils import VittlifyError
from vt.vt import (
Status,
add,
categories,
complete,
display_all_shopping_lists,
display_item,
display_shopping_list,
display_shopping_list_categories,
help,
modify,
move,
run,
show,
term,
)
class TestDisplayShoppingList(unittest.TestCase):
def setUp(self):
self.get_shopping_list_info_patcher = mock.patch('vt.vt.get_shopping_list_info')
self.mock_get_shopping_list_info = self.get_shopping_list_info_patcher.start()
self.get_shopping_list_items_patcher = mock.patch(
'vt.vt.get_shopping_list_items'
)
self.mock_get_shopping_list_items = self.get_shopping_list_items_patcher.start()
self.get_completed_patcher = mock.patch('vt.vt.get_completed')
self.mock_get_completed = self.get_completed_patcher.start()
self.get_all_shopping_list_items_patcher = mock.patch(
'vt.vt.get_all_shopping_list_items'
)
self.mock_get_all_shopping_list_items = (
self.get_all_shopping_list_items_patcher.start()
)
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
test_shopping_list = {'name': 'test_list'}
self.mock_get_shopping_list_info.return_value = test_shopping_list
test_items = [
{'name': 'item1'},
{'name': 'item2'},
{'name': 'item3'},
]
self.mock_get_shopping_list_items.return_value = test_items
self.mock_get_all_shopping_list_items.return_value = test_items
self.mock_get_completed.return_value = test_items
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_shopping_list_info_patcher.stop()
self.get_shopping_list_items_patcher.stop()
self.get_completed_patcher.stop()
self.get_all_shopping_list_items_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_not_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
def test_not_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED, extended=True)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
class TestDisplayItem(unittest.TestCase):
def setUp(self):
self.get_item_patcher = mock.patch('vt.vt.get_item')
self.mock_get_item = self.get_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.test_guid = 'test_guid'
def tearDown(self):
self.get_item_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_(self):
display_item(self.test_guid)
self.mock_get_item.assert_called_once_with(self.test_guid)
self.mock_format_row.assert_called_once_with(
self.mock_get_item.return_value, None, include_comments=True, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestDisplayAllShoppingLists(unittest.TestCase):
def setUp(self):
self.get_all_shopping_lists_patcher = mock.patch('vt.vt.get_all_shopping_lists')
self.mock_get_all_shopping_lists = self.get_all_shopping_lists_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.mock_get_all_shopping_lists.return_value = [
{'name': 'list1'},
{'name': 'list2'},
{'name': 'list3'},
]
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_all_shopping_lists_patcher.stop()
self.format_row_patcher.stop()
def test_(self):
display_all_shopping_lists()
self.mock_get_all_shopping_lists.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call({'name': 'list1'}, None, no_wrap=False),
mock.call({'name': 'list2'}, None, no_wrap=False),
mock.call({'name': 'list3'}, None, no_wrap=False),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'], title='All Lists'
)
class TestShowNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', '')
self.DEFAULT_LIST_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.DEFAULT_LIST_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
self.assertRaises(IndexError, show, args)
def test_list_no_guid(self):
args = shlex.split("list")
self.assertRaises(IndexError, show, args)
def test_list_empty_guid_extended(self):
args = shlex.split("list '' -e")
self.assertRaises(IndexError, show, args)
def test_list_no_guid_extended(self):
args = shlex.split("list -e")
self.assertRaises(IndexError, show, args)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
self.assertRaises(IndexError, show, args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
self.assertRaises(IndexError, show, args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
class TestShowDefaultList:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.parse_options_patcher = mock.patch('vt.vt.parse_options')
self.mock_parse_options = self.parse_options_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_shopping_list_categories_patcher = mock.patch(
'vt.vt.display_shopping_list_categories'
)
self.mock_display_shopping_list_categories = (
self.display_shopping_list_categories_patcher.start()
)
mocker.patch.object(term, 'red', autospec=True)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
self.mock_parse_options.return_value = {}
yield
self.DEFAULT_LIST_patcher.stop()
self.parse_options_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
self.display_shopping_list_categories_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_no_guid(self):
args = shlex.split("list")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_empty_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list '' -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
with pytest.raises(IndexError):
show(args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
with pytest.raises(IndexError):
show(args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
def test_display_list_categories(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
args = shlex.split("test_guid")
categories(args)
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_list_categories_raises(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
self.mock_display_shopping_list_categories.side_effect = VittlifyError(
'Got an error'
)
args = shlex.split("test_guid")
categories(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_shopping_list_raises(self):
self.mock_display_shopping_list.side_effect = VittlifyError('Got an error')
args = shlex.split("list test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_display_item_raises(self):
self.mock_display_item.side_effect = VittlifyError('Got an error')
args = shlex.split("show test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
def test_display_all_shopping_lists_raises(self):
self.mock_display_all_shopping_lists.side_effect = VittlifyError('Got an error')
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
term.red.assert_called_once_with('Got an error')
class TestComplete:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.complete_item_patcher = mock.patch('vt.vt.complete_item')
self.mock_complete_item = self.complete_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.apply_strikethrough_patcher = mock.patch('vt.vt.apply_strikethrough')
self.mock_apply_strikethrough = self.apply_strikethrough_patcher.start()
self.mock_complete_item.return_value = {'name': 'test_name'}
self.mock_apply_strikethrough.return_value = 'struck_through'
yield
self.complete_item_patcher.stop()
self.apply_strikethrough_patcher.stop()
def test_complete(self):
args = shlex.split("test_guid")
complete(args)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=False)
self.mock_apply_strikethrough.assert_called_once_with('test_name')
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}struck_through{term.normal} as done.'
)
def test_uncomplete(self):
args = shlex.split("test_guid")
complete(args, uncomplete=True)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=True)
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}test_name{term.normal} undone.'
)
def test_done_extended(self):
args = shlex.split("-e")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
def test_completed_no_extended(self):
args = shlex.split("")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(mode=Status.COMPLETED)
def test_completed_extended(self):
args = shlex.split("--extended")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
class TestModify(unittest.TestCase):
def setUp(self):
self.modify_item_patcher = mock.patch('vt.vt.modify_item')
self.mock_modify_item = self.modify_item_patcher.start()
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.modify_item_patcher.stop()
self.display_item_patcher.stop()
def test_no_options(self):
args = shlex.split("test_guid this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with('test_guid', 'this is a comment')
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_short_options(self):
args = shlex.split("test_guid -a this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_options(self):
args = shlex.split("test_guid --append this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
class TestAddDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('default_list', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestAddNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', None)
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
self.assertRaises(IndexError, add, args)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestMove:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.move_item_patcher = mock.patch('vt.vt.move_item')
self.mock_move_item = self.move_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
yield
self.move_item_patcher.stop()
def test_(self):
args = shlex.split('test_guid to_list_guid')
move(args)
self.mock_move_item.assert_called_once_with('test_guid', 'to_list_guid')
self.mock_print.assert_called_once_with(
f'Moved item {term.blue}test_guid{term.normal} to list {term.blue}to_list_guid{term.normal}'
)
class TestRun:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.show_patcher = mock.patch('vt.vt.show')
self.mock_show = self.show_patcher.start()
self.complete_patcher = mock.patch('vt.vt.complete')
self.mock_complete = self.complete_patcher.start()
self.modify_patcher = mock.patch('vt.vt.modify')
self.mock_modify = self.modify_patcher.start()
self.add_patcher = mock.patch('vt.vt.add')
self.mock_add = self.add_patcher.start()
self.move_patcher = mock.patch('vt.vt.move')
self.mock_move = self.move_patcher.start()
mocker.patch.object(term, 'red', autospec=True)
self.SHOW_TRACEBACK_patcher = mock.patch('vt.vt.SHOW_TRACEBACK', False)
self.SHOW_TRACEBACK_patcher.start()
self.PROXY_patcher = mock.patch('vt.vt.PROXY', False)
self.PROXY_patcher.start()
self.VITTLIFY_URL_patcher = mock.patch('vt.vt.VITTLIFY_URL', 'vittlify_url')
self.VITTLIFY_URL_patcher.start()
self.help_patcher = mock.patch('vt.vt.help')
self.mock_help = self.help_patcher.start()
yield
self.show_patcher.stop()
self.complete_patcher.stop()
self.modify_patcher.stop()
self.add_patcher.stop()
self.move_patcher.stop()
self.SHOW_TRACEBACK_patcher.stop()
self.PROXY_patcher.stop()
self.VITTLIFY_URL_patcher.stop()
self.help_patcher.stop()
def test_list(self):
test_args = shlex.split('list test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_lists(self):
test_args = shlex.split('lists')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_item(self):
test_args = shlex.split('item test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_show(self):
test_args = shlex.split('show test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_done(self):
test_args = shlex.split('done test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_complete(self):
test_args = shlex.split('complete test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_undone(self):
test_args = shlex.split('undone test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected, uncomplete=True)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_uncomplete(self):
test_args = shlex.split('uncomplete test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected, uncomplete=True)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_modify(self):
test_args = shlex.split("modify test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_edit(self):
test_args = shlex.split("edit test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_comment(self):
test_args = shlex.split("comment test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_comments(self):
test_args = shlex.split("comments test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_add(self):
test_args = shlex.split("add 'this is a new item'")
expected = ['this is a new item']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
self.mock_add.assert_called_once_with(expected)
assert not self.mock_move.called
assert not self.mock_help.called
def test_move(self):
test_args = shlex.split("move old_guid new_guid")
expected = ['old_guid', 'new_guid']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_help.called
self.mock_move.assert_called_once_with(expected)
def test_mv(self):
test_args = shlex.split("mv old_guid new_guid")
expected = ['old_guid', 'new_guid']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_help.called
self.mock_move.assert_called_once_with(expected)
def test_index_error(self):
self.mock_add.side_effect = IndexError()
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with('Incorrect number of arguments provided')
def test_connection_error(self):
self.mock_add.side_effect = requests.exceptions.ConnectionError()
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with(
'Unable to connect to Vittlify instance at vittlify_url'
)
def test_http_error(self):
self.mock_add.side_effect = requests.exceptions.HTTPError('500 Message')
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with('Server responded with 500 Message')
def test_help(self):
test_args = shlex.split("help command")
expected = ['command']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
self.mock_help.assert_called_once_with(expected)
class TestDisplayShoppingListCategories:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.get_shopping_list_info_patcher = mock.patch('vt.vt.get_shopping_list_info')
self.mock_get_shopping_list_info = self.get_shopping_list_info_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
mocker.patch.object(term, 'red', autospec=True)
self.mock_get_shopping_list_info.return_value = {'name': 'test_list'}
yield
self.get_shopping_list_info_patcher.stop()
self.print_table_patcher.stop()
def test_no_categories(self):
display_shopping_list_categories('test_guid')
self.mock_get_shopping_list_info.assert_called_once_with('test_guid')
term.red.assert_called_once_with("No categories found for test_list.")
def test_has_categories(self):
self.mock_get_shopping_list_info.return_value = {
'name': 'test_list',
'categories': [
{'name': 'type A'},
{'name': 'type B'},
],
}
display_shopping_list_categories('test_guid')
self.mock_print_table.assert_called_once_with(
[['type A'], ['type B']], title='test_list'
)
class TestHelp(unittest.TestCase):
def setUp(self):
self.general_help_patcher = mock.patch('vt.vt.GENERAL_HELP')
self.mock_general_help = self.general_help_patcher.start()
self.lists_help_patcher = mock.patch('vt.vt.LISTS_HELP')
self.mock_lists_help = self.lists_help_patcher.start()
self.list_help_patcher = mock.patch('vt.vt.LIST_HELP')
self.mock_list_help = self.list_help_patcher.start()
self.done_help_patcher = mock.patch('vt.vt.DONE_HELP')
self.mock_done_help = self.done_help_patcher.start()
self.undone_help_patcher = mock.patch('vt.vt.UNDONE_HELP')
self.mock_undone_help = self.undone_help_patcher.start()
self.comment_help_patcher = mock.patch('vt.vt.COMMENT_HELP')
self.mock_comment_help = self.comment_help_patcher.start()
self.move_help_patcher = mock.patch('vt.vt.MOVE_HELP')
self.mock_move_help = self.move_help_patcher.start()
self.categories_help_patcher = mock.patch('vt.vt.CATEGORIES_HELP')
self.mock_categories_help = self.categories_help_patcher.start()
self.categorize_help_patcher = mock.patch('vt.vt.CATEGORIZE_HELP')
self.mock_categorize_help = self.categorize_help_patcher.start()
def tearDown(self):
self.general_help_patcher.stop()
self.lists_help_patcher.stop()
self.list_help_patcher.stop()
self.done_help_patcher.stop()
self.undone_help_patcher.stop()
self.comment_help_patcher.stop()
self.move_help_patcher.stop()
self.categories_help_patcher.stop()
self.categorize_help_patcher.stop()
def test_no_args(self):
expected = self.mock_general_help
actual = help([])
self.assertEqual(expected, actual)
def test_unknown_command(self):
expected = self.mock_general_help
actual = help(['unknown command'])
self.assertEqual(expected, actual)
def test_lists(self):
expected = self.mock_lists_help
actual = help(['lists'])
self.assertEqual(expected, actual)
def test_list(self):
expected = self.mock_list_help
actual = help(['list'])
self.assertEqual(expected, actual)
def test_done(self):
expected = self.mock_done_help
actual = help(['done'])
self.assertEqual(expected, actual)
def test_complete(self):
expected = self.mock_done_help
actual = help(['complete'])
self.assertEqual(expected, actual)
def test_undone(self):
expected = self.mock_undone_help
actual = help(['undone'])
self.assertEqual(expected, actual)
def test_uncomplete(self):
expected = self.mock_undone_help
actual = help(['uncomplete'])
self.assertEqual(expected, actual)
def test_comment(self):
expected = self.mock_comment_help
actual = help(['comment'])
self.assertEqual(expected, actual)
def test_modify(self):
expected = self.mock_comment_help
actual = help(['modify'])
self.assertEqual(expected, actual)
def test_comments(self):
expected = self.mock_comment_help
actual = help(['comments'])
self.assertEqual(expected, actual)
def test_edit(self):
expected = self.mock_comment_help
actual = help(['edit'])
self.assertEqual(expected, actual)
def test_move(self):
expected = self.mock_move_help
actual = help(['move'])
self.assertEqual(expected, actual)
def test_mv(self):
expected = self.mock_move_help
actual = help(['mv'])
self.assertEqual(expected, actual)
def test_categories(self):
expected = self.mock_categories_help
actual = help(['categories'])
self.assertEqual(expected, actual)
def test_categorize(self):
expected = self.mock_categorize_help
actual = help(['categorize'])
self.assertEqual(expected, actual)
def test_label(self):
expected = self.mock_categorize_help
actual = help(['label'])
self.assertEqual(expected, actual)
| [
"kyokley2@gmail.com"
] | kyokley2@gmail.com |
41ef33c1c1af378a664ea82f485c5a12ebeedd1c | a0fb29f99a852089193e4cc9a11e7263dc3f8b5f | /mayan/apps/metadata/literals.py | aba1309e370f89d0f6259a24ca393df9dc3e1f1c | [
"Apache-2.0"
] | permissive | ikang9712/Mayan-EDMS | 0e22a944d63657cea59c78023b604a01a622b52a | d6e57e27a89805329fe0c5582caa8e17882d94e6 | refs/heads/master | 2023-07-28T19:41:55.269513 | 2021-09-07T14:16:14 | 2021-09-07T14:16:14 | 402,884,683 | 1 | 0 | NOASSERTION | 2021-09-03T20:00:09 | 2021-09-03T20:00:09 | null | UTF-8 | Python | false | false | 227 | py | from .parsers import MetadataParser
from .validators import MetadataValidator
DEFAULT_METADATA_AVAILABLE_VALIDATORS = MetadataValidator.get_import_paths()
DEFAULT_METADATA_AVAILABLE_PARSERS = MetadataParser.get_import_paths()
| [
"roberto.rosario@mayan-edms.com"
] | roberto.rosario@mayan-edms.com |
c93ba3313bf6c3ee32e36cad9c787f55c5d4548b | 8395ffb48750359d1bd51a201a41c7fe124998bc | /apc2015/perception/single_utils/src/generate_naive_cloud.py | 4195bb9783faaf79d4485ed09ada91429266c3d6 | [] | no_license | duke-iml/ece490-s2016 | ab6c3d3fb159a28a9c38487cdb1ad3993008b854 | f9cc992fbaadedc8a69678ba39f0c9d108e6910d | refs/heads/master | 2020-04-12T09:03:56.601000 | 2016-11-29T21:36:48 | 2016-11-29T21:36:48 | 49,226,568 | 2 | 6 | null | 2016-11-29T21:36:49 | 2016-01-07T19:42:34 | Python | UTF-8 | Python | false | false | 2,659 | py | #!/usr/bin/env python
from __future__ import division
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import subprocess
import time
import psutil
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
from common_utils import *
from math import pi, sin, cos, tan, atan, sqrt
pid = None
file_name = None
rgb_mat = None
depth_mat = None
bridge = CvBridge()
w = 320
h = 240
diag_ang = 74/180*pi
diag = sqrt(w**2+h**2)
lift = diag/2 / tan(diag_ang/2)
def receive_rgb(data):
global rgb_mat
rgb_mat = bridge.imgmsg_to_cv2(data, "bgr8")
if depth_mat is not None:
process()
def receive_depth(data):
global depth_mat
depth_mat = bridge.imgmsg_to_cv2(data, "mono16")
depth_mat = depth_mat[:,:,0]
if rgb_mat is not None:
process()
def process():
psutil.Process(pid).kill()
cv2.imwrite(file_name+".bmp", rgb_mat)
cv2.imwrite(file_name+".depth.bmp", depth_mat)
assert depth_mat.shape == (h, w)
point_cloud = []
for i in range(h):
for j in range(w):
depth = depth_mat[i, j]
b1, g1, r1 = list(rgb_mat[i*2, j*2, :].flatten())
b2, g2, r2 = list(rgb_mat[i*2+1, j*2, :].flatten())
b3, g3, r3 = list(rgb_mat[i*2, j*2+1, :].flatten())
b4, g4, r4 = list(rgb_mat[i*2+1, j*2+1, :].flatten())
b1 = int(b1)
b2 = int(b2)
b3 = int(b3)
b4 = int(b4)
g1 = int(g1)
g2 = int(g2)
g3 = int(g3)
g4 = int(g4)
r1 = int(r1)
r2 = int(r2)
r3 = int(r3)
r4 = int(r4)
r = int((r1+r2+r3+r4)/4)
g = int((g1+g2+g3+g4)/4)
b = int((b1+b2+b3+b4)/4)
rgb = rgb_to_pcl_float(r1, g1, b1)
if depth==32001:
continue
assert depth<20000
coord = (j+0.5-w/2, i+0.5-h/2)
real_x = coord[0]/lift*depth
real_y = coord[1]/lift*depth
point_cloud.append([real_x/1000, real_y/1000, depth/1000, rgb])
write_pcd_file(point_cloud, file_name)
rospy.signal_shutdown("Point cloud made, shutting down...\n")
def main():
global file_name
if len(sys.argv)>=2:
file_name = sys.argv[1]
else:
file_name = 'point_cloud.pcd'
global pid
process = subprocess.Popen('hardware_layer/RealSense_ROS_Emitter', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = process.pid
time.sleep(3)
rospy.init_node('naive_point_cloud', disable_signals=True)
rgb_sub = rospy.Subscriber("/realsense/rgb", Image, receive_rgb, queue_size=1)
depth_sub = rospy.Subscriber("/realsense/depth", Image, receive_depth, queue_size=1)
rospy.spin()
if __name__ == '__main__':
main()
| [
"hauser.kris@gmail.com"
] | hauser.kris@gmail.com |
1a6991bb0eca5e3ab71d0ba1a68d1884bc4012b5 | 8778be5676e3657be3db6d71bb81b57ef8020dae | /lab2/webcam_prediction.py | e723c7d678f951f7d107b5fe986ae7fd7900e902 | [] | no_license | JerelynCo/Pattern-Recognition | ab922e35c36fb51d2d1c2996d2483d5b152c471e | ff36571abf8c12b5f9bae7e5dd89afcce35f5e1e | refs/heads/master | 2016-08-12T20:16:24.736727 | 2016-04-19T05:40:52 | 2016-04-19T05:40:52 | 53,573,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,886 | py | import numpy as np
import cv2
import math
import json
def calculateProbability(x, mean, stdev):
exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edges = auto_canny(blurred)
contoured_image, contours, hierarchy = cv2.findContours(
edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# ddepth for sobel
ddepth = cv2.CV_8U
sobel_x = cv2.Sobel(blurred, ddepth, 1, 0)
sobel_y = cv2.Sobel(blurred, ddepth, 1, 0)
sobel = cv2.addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0)
counter = 0
image_out = frame.copy()
for contour in contours:
# get box bounding contour
[x, y, w, h] = cv2.boundingRect(contour)
if h < image_out.shape[1] * 0.05 or w < image_out.shape[0] * 0.05:
continue
crop_img_sobel = sobel[y:y + h, x:x + w]
resized = cv2.resize(crop_img_sobel, (8, 8)).flatten() / 255
with open('classifier/train_summary.json', 'r') as f:
train_summary = json.loads(f.read())
if(predict(train_summary, resized) == '1'):
cv2.rectangle(edges, (x, y), (x + w, y + h), (255, 0, 0), 2)
else:
cv2.rectangle(edges, (x, y), (x + w, y + h), (0, 0, 255), 2)
counter += 1
# Display the resulting frame
cv2.imshow('frame', edges)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"hadrianpaulo@gmail.com"
] | hadrianpaulo@gmail.com |
0df124adf5c65ca34ee6bc7af7252e7d3ae24fad | 0b0570c7799b5997bd7ed828d52142de591229b8 | /fizzbuzz.py | 178fcfb5fd13c1d0e2a9d0257365682dc67f1543 | [] | no_license | triaddojo/fizzbuzz-jiro | 2c8c7d56fe702cf023010d45d5d7c897f8266874 | c643d4ea0c861aa49e61f554398597272a1cd572 | refs/heads/master | 2020-05-26T09:05:34.137623 | 2019-05-23T07:26:45 | 2019-05-23T07:26:45 | 188,178,480 | 0 | 2 | null | 2019-05-23T07:52:48 | 2019-05-23T06:55:20 | Python | UTF-8 | Python | false | false | 208 | py | for number in range(1, 101):
if number % 15 == 0:
print("fizzbuzz")
elif number % 3 == 0:
print("fizz")
elif number % 5 == 0:
print("buzz")
else:
print(number)
| [
"akira.wakatsuki@tri-ad.global"
] | akira.wakatsuki@tri-ad.global |
f0d08ba48ad4eb4e0d09ebaa588e70674bf6d362 | 3248dd56d1b6ed34eec801a16752328a6016e9fe | /SentimentAnalysis/asgi.py | 83b2a88105aacd9830a3a04bad7f6ff45a8787db | [] | no_license | shamil-t/sentiment-analysis-imdb-django | d17905e3fdcde770ba987e37272bd0a4047b3e79 | fea9349bd3cc55cf550920e65a53254e23aff20b | refs/heads/main | 2023-03-17T01:14:41.376834 | 2021-02-27T03:36:02 | 2021-02-27T03:36:02 | 342,748,491 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for SentimentAnalysis project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SentimentAnalysis.settings')
application = get_asgi_application()
| [
"tshamil90@gmail.com"
] | tshamil90@gmail.com |
f1648f00e3328ab6336ed5fbb02a0739fce86d09 | c545e20c20d4f3a9e580b543e0ff79555972ab94 | /prepare_seq2seq.py | fb95072fec543d356525e3bf627763e0b5eab1e9 | [] | no_license | StNiki/MT_CW3 | d6cd6f33865653413e4bc1c7c992cfb96e28d894 | b3c4f276396e8539f7473390a1cbf84770d28780 | refs/heads/master | 2021-01-21T14:43:24.967123 | 2017-06-26T21:34:06 | 2017-06-26T21:34:06 | 95,326,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,261 | py | # coding: utf-8
# ## Prepare parallel corpus
#
# **Based on TensorFlow code: https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/data_utils.py**
# In[ ]:
import os
import re
import pickle
from tqdm import tqdm
import sys
# In[ ]:
from nmt_config import *
# In[ ]:
data_fname = {"en": os.path.join(data_dir, "text_all.en"),
"fr": os.path.join(data_dir, "text_all.fr")}
# In[ ]:
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile(b"([.,!?\"':~;)(])")
_DIGIT_RE = re.compile(br"\d")
# In[ ]:
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.sub(b"", w) for w in _WORD_SPLIT.split(space_separated_fragment))
return [w.lower() for w in words if w]
# In[ ]:
def extract_k_lines(fr_fname, en_fname, k):
num_lines = 0
with open(data_fname["fr"],"rb") as f_fr, open(data_fname["en"],"rb") as f_en:
with open(fr_fname,"wb") as out_fr, open(en_fname,"wb") as out_en:
for i, (line_fr, line_en) in enumerate(zip(f_fr, f_en)):
if num_lines >= k:
break
words_fr = basic_tokenizer(line_fr)
words_en = basic_tokenizer(line_en)
if len(words_fr) > 0 and len(words_en) > 0:
# write to tokens file
out_fr.write(b" ".join(words_fr) + b"\n")
out_en.write(b" ".join(words_en) + b"\n")
num_lines += 1
print("Total lines={0:d}, valid lines={1:d}".format(i, num_lines))
print("finished writing {0:s} and {1:s}".format(fr_fname, en_fname))
# In[ ]:
def create_vocab(text_fname, num_train, max_vocabulary_size, freq_thresh):
vocab = {}
w2i = {}
i2w = {}
with open(text_fname,"rb") as in_f:
for i, line in enumerate(in_f):
if i >= num_train:
break
words = line.strip().split()
for w in words:
word = _DIGIT_RE.sub(b"0", w)
word = _WORD_SPLIT.sub(b"", w)
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
print("vocab length before: {0:d}".format(len(vocab)))
vocab = {k:vocab[k] for k in vocab if vocab[k] > freq_thresh}
print("vocab length after: {0:d}".format(len(vocab)))
vocab_list = START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print("Finished generating vocabulary")
if len(vocab_list) > max_vocabulary_size:
print("Vocab size={0:d}, trimmed to max={1:d}".format(len(vocab_list), max_vocabulary_size))
vocab_list = vocab_list[:max_vocabulary_size]
else:
print("Vocab size={0:d}".format(len(vocab_list)))
for i, w in enumerate(vocab_list):
w2i[w] = i
i2w[i] = w
print("finished vocab processing for {0:s}".format(text_fname))
for k in vocab:
if vocab[k] <= freq_thresh:
print("Ahaaaaa!!!", k, vocab[k])
return vocab, w2i, i2w
# In[ ]:
def create_input_config(k, num_train=NUM_TRAINING_SENTENCES, freq_thresh=FREQ_THRESH):
# Output file names
if not os.path.exists(input_dir):
os.makedirs(input_dir)
en_name = os.path.join(input_dir, "text.en")
fr_name = os.path.join(input_dir, "text.fr")
en_tokens_name = os.path.join(input_dir, "tokens.en")
fr_tokens_name = os.path.join(input_dir, "tokens.fr")
vocab_path = os.path.join(input_dir, "vocab.dict")
w2i_path = os.path.join(input_dir, "w2i.dict")
i2w_path = os.path.join(input_dir, "i2w.dict")
# extract k lines
extract_k_lines(fr_name, en_name, k)
# create vocabularies
vocab = {"en":{}, "fr":{}}
w2i = {"en":{}, "fr":{}}
i2w = {"en":{}, "fr":{}}
print("*"*50)
print("en file")
print("*"*50)
vocab["en"], w2i["en"], i2w["en"] = create_vocab(en_name,
num_train=NUM_TRAINING_SENTENCES,
max_vocabulary_size=max_vocab_size["en"],
freq_thresh=FREQ_THRESH)
print("*"*50)
print("fr file")
print("*"*50)
vocab["fr"], w2i["fr"], i2w["fr"] = create_vocab(fr_name,
num_train=NUM_TRAINING_SENTENCES,
max_vocabulary_size=max_vocab_size["fr"],
freq_thresh=FREQ_THRESH)
print("*"*50)
pickle.dump(vocab, open(vocab_path, "wb"))
pickle.dump(w2i, open(w2i_path, "wb"))
pickle.dump(i2w, open(i2w_path, "wb"))
print("finished creating input config for {0:d} lines".format(k))
# In[ ]:
create_input_config(k=NUM_SENTENCES, num_train=NUM_TRAINING_SENTENCES, freq_thresh=FREQ_THRESH)
# In[ ]:
| [
"noreply@github.com"
] | StNiki.noreply@github.com |
12d670bebcfaf06a04d1a065f01c5f4d66c7390e | 71ae0df6d2c10f2c37c40f527a78716edb23275e | /main.spec | 3188163d8287f3b0768c29b22de3a711232c7c87 | [] | no_license | MediaNik5/CannonPy | b1637aabe9fe45441fa27515cd7eef649a175f08 | 0390c4aebb2a3c56073fb490fe5d350cfe995d02 | refs/heads/master | 2023-03-18T12:30:22.621039 | 2021-03-10T13:31:06 | 2021-03-10T13:31:06 | 346,366,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=['C:\\gdrive\\Works\\projects\\python\\Cannon'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )
| [
"stafilopok@mail.ru"
] | stafilopok@mail.ru |
09eb58fd8a9817910ab1f48df9c299357ccadf50 | 1dff039deadc84ee5d9c92f6bee99baae03fb0dd | /horizons/gui/style.py | fd601b65699f124417027f6556eaad13bc7f351b | [] | no_license | totycro/unknown-horizons-quadtree | bf9321a204c907fea877125b95519d8574c9543a | 6112ee6961714f6b963652d4ec25e2823732be8d | refs/heads/master | 2021-01-18T14:21:53.161268 | 2011-03-22T23:04:27 | 2011-03-22T23:04:27 | 851,912 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,414 | py | # ###################################################
# Copyright (C) 2009 The Unknown Horizons Team
# team@unknown-horizons.org
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from fife import fife
STYLES= {
'default': {
'default' : {
'border_size': 2,
'margins': (0, 0),
'base_color' : fife.Color(40, 40, 40, 0),
'foreground_color' : fife.Color(255, 255, 255),
'background_color' : fife.Color(40, 40, 40, 255),
'selection_color' : fife.Color(80, 80, 80, 255),
'font' : 'libertine_small'
},
'Button' : {
'border_size': 1,
'margins' : (10, 5)
},
'CheckBox' : {
'border_size': 0,
'background_color' : fife.Color(0, 0, 0, 0)
},
'RadioButton' : {
'border_size': 0,
'background_color' : fife.Color(0, 0, 0, 0)
},
'Label' : {
'border_size': 0,
'background_color' : fife.Color(40, 40, 40, 0),
'font' : 'libertine_small'
},
'ClickLabel' : {
'border_size': 0,
'font' : 'libertine_small'
},
'ListBox' : {
'border_size': 0,
'font' : 'libertine_small'
},
'Window' : {
'border_size': 1,
'margins': (10, 10),
'titlebar_height' : 30,
'font' : 'libertine_large',
'base_color' : fife.Color(60, 60, 60)
},
'TextBox' : {
'font' : 'libertine_small'
},
('Container','HBox','VBox') : {
'opaque' : 0,
'border_size': 0
},
('Icon', 'ImageButton', 'DropDown') : {
'border_size': 0
},
'ScrollArea' : {
'border_size': 0,
'horizontal_scrollbar' : 1,
'base_color' : fife.Color(60, 60, 60),
'background_color' : fife.Color(60, 60, 60)
},
#'TextField' : {
# 'base_color' : fife.Color(60, 60, 60),
# 'background_color' : fife.Color(0, 0, 0)
#}
'Slider' : {
'base_color' : fife.Color(80,80,40,50),
},
},
'menu': { #Used in the main menu and game menu
'default' : {
'border_size': 0,
'margins': (0, 0),
'opaque': 0,
'base_color' : fife.Color(0, 0, 0, 0),
'foreground_color' : fife.Color(255, 255, 255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0, 0, 0, 0),
'font' : 'libertine_mainmenu'
},
'Button' : {
'border_size': 0,
'margins' : (10, 5)
},
'Label' : {
'border_size': 0,
'font' : 'libertine_mainmenu'
}
},
'menu_black': { # style for build menu etc.
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(255,255,255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0,0,0,0),
'font' : 'libertine_small_black'
},
'Button' : {
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'margins': (0,0),
'font' : 'libertine_14_black'
}
},
'resource_bar': {
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0, 0, 0, 0),
'foreground_color' : fife.Color(0, 0, 0, 0),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0, 0, 0, 0),
'font' : 'libertine_small_black'
},
'Button' : {
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'alpha':0,
'font' : 'libertine_small_black'
}
},
'message_text': {
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(255,255,255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0,0,0,0),
'font' : 'libertine_small'
},
'Button' : {
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'margins': (0,0),
'font' : 'libertine_small'
}
},
'city_info': { # style for city info
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(255,255,255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0,0,0,0),
'font' : 'libertine_large'
},
'Button' : {
'font' : 'libertine_18',
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'font' : 'libertine_18'
},
'TooltipLabel': {
'font' : 'libertine_18'
}
},
'headline': { # style for headlines
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(255,255,255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0,0,0,0),
'font' : 'libertine_headline'
},
'Button' : {
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'font' : 'libertine_headline'
}
},
'book': { # style for book widgets
'default' : {
'border_size': 0,
'margins': (0,0),
'font' : 'libertine_14_black',
'foreground_color' : fife.Color(80,80,40),
},
'Label' : {
'font' : 'libertine_14_black',
},
'CheckBox' : {
'selection_color' : fife.Color(255,255,255,200),
'background_color' : fife.Color(255,255,255,128),
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(80,80,40),
},
'DropDown' : {
'selection_color' : fife.Color(255,255,255,200),
'background_color' : fife.Color(255,255,255,128),
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(80,80,40),
'font' : 'libertine_14_black',
},
'Slider' : {
'base_color' : fife.Color(80,80,40,128),
},
'TextBox' : {
'font' : 'libertine_14_black',
'opaque': 0
},
'ListBox' : {
'background_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(80,80,40),
'selection_color' : fife.Color(255,255,255,128),
'font' : 'libertine_14_black',
},
'ScrollArea' : {
'background_color' : fife.Color(255,255,255,64),
'foreground_color' : fife.Color(80,80,40),
'base_color' : fife.Color(0,0,0,0),
'font' : 'libertine_14_black',
'horizontal_scrollbar' : 0,
},
'HBox' : {
'font' : 'libertine_14_black',
'foreground_color' : fife.Color(80,80,40),
'opaque': 0
},
'TextField' : {
'selection_color' : fife.Color(255,255,255),
'background_color' : fife.Color(255,255,255,64),
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(80,80,40),
'font' : 'libertine_14_black',
}
},
'tooltip': { # style for tooltips
'default' : {
'border_size': 0,
'margins': (0,0),
'opaque': 0,
'base_color' : fife.Color(0,0,0,0),
'foreground_color' : fife.Color(255,255,255),
'background_color' : fife.Color(0, 0, 0, 0),
'selection_color' : fife.Color(0,0,0,0),
'font' : 'libertine_headline'
},
'Button' : {
'border_size': 0,
'margins' : (0,0)
},
'Label' : {
'font' : 'libertine_tooltip'
}
},
}
| [
"totycro@unknown-horizons.org"
] | totycro@unknown-horizons.org |
1ffc8b3649921a8cf943112df31655726ca74210 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/143/usersdata/210/62277/submittedfiles/av2_p3_m2.py | 26e0d4120d107718e01aaba735ca255a96ae8f9d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # -*- coding: utf-8 -*-
def degrais(a):
soma=0
degrau=0
for i in range(0,len(a)-1,1):
soma=(a[i]-a[i+1])
if soma<0:
soma=soma*(-1)
if soma>degrau:
degrau=soma
return degrau
h=int(input('digite o valor de h:'))
j=[]
for i in range(0,h,1):
numero=int(input('digite o numero:'))
j.append(numero)
x=degrais(j)
print(x)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
55ba0c58bb1c484542456f12df2c5861a5e9826e | 28c155fc7f808365d2f37fff2c794c3994c89353 | /learning_web_2_bug/contact/migrations/0001_initial.py | 7351287297948ce7a16346e6139964d1f4dcbab8 | [] | no_license | bwtzxcvb/rockfracturegroup | 97e454ddda1b4407b835c7ef10da6596c53356d3 | 4e1b61d30928cea82c26ee2d81e8e8b8e391abe5 | refs/heads/master | 2023-04-27T18:58:01.901009 | 2019-10-24T10:42:47 | 2019-10-24T10:42:47 | 211,757,570 | 0 | 0 | null | 2023-04-21T20:38:23 | 2019-09-30T02:14:39 | Python | UTF-8 | Python | false | false | 764 | py | # Generated by Django 2.2.4 on 2019-09-10 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('message', models.TextField()),
('message_date', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('-message_date',),
},
),
]
| [
"1927552890@qq.com"
] | 1927552890@qq.com |
876ca7ebfa0fdf067bdeaee9c405205dda120f80 | fd5010485620821c32e5d2332ff7e4ef0b824dfe | /docs/study/zh-cn/study/QuecPythonTest/code/01_LED.py | 80821f9f2b9f0285a3d728d004a4b991497cc57a | [
"MIT"
] | permissive | jks-liu/wiki | cfc1d1abbfc667dd461c40fe25f9ce96bdb2f8fd | 397b33f00d128b4b8337a750df59e20356831802 | refs/heads/main | 2023-02-23T08:49:12.092973 | 2021-01-27T00:08:39 | 2021-01-27T00:08:39 | 333,250,497 | 0 | 0 | MIT | 2021-01-26T23:56:55 | 2021-01-26T23:56:54 | null | UTF-8 | Python | false | false | 1,967 | py | # 实验1: 跑马灯
# API资料参考连接: https://python.quectel.com/wiki/#/zh-cn/api/?id=pin
from machine import Pin
import utime
IOdictRead = {} # 记录已经初始化的GPIO口
IOdictWrite = {} # 记录已经初始化的GPIO口
def GPIO_Read(gpioX, Pull=Pin.PULL_DISABLE, level=1):
if IOdictWrite.get(gpioX, None):
del IOdictWrite[gpioX]
gpioIO = IOdictRead.get(gpioX, None)
if gpioIO:
return gpioIO.read()
else:
IOdictRead[gpioX] = (Pin(gpioX, Pin.IN, Pull, level))
gpioIO = IOdictRead.get(gpioX, None)
return gpioIO.read()
def GPIO_Write(gpioX, level, Pull=Pin.PULL_DISABLE):
if IOdictRead.get(gpioX, None):
del IOdictRead[gpioX]
gpioIO = IOdictWrite.get(gpioX, None)
if gpioIO:
gpioIO.write(level)
else:
IOdictWrite[gpioX] = (Pin(gpioX, Pin.OUT, Pull, level))
gpioIO = IOdictWrite.get(gpioX, None)
gpioIO.write(level)
LED1 = Pin.GPIO1 # 定义LED引脚
LED2 = Pin.GPIO2 # 定义LED引脚
LED3 = Pin.GPIO3 # 定义LED引脚
LED4 = Pin.GPIO4 # 定义LED引脚
LED5 = Pin.GPIO5 # 定义LED引脚
def IO_On(gpioX): # 某个引脚置0
GPIO_Write(gpioX, 0) # 调用写函数
def IO_Off(gpioX): # 某个引脚置1
GPIO_Write(gpioX, 1) # 调用写函数
def IO_All_Off(): # 全部引脚置1
IO_Off(LED1)
IO_Off(LED2)
IO_Off(LED3)
IO_Off(LED4)
IO_Off(LED5)
def main():
while True:
IO_All_Off() # 灭
IO_On(LED1) # 亮
utime.sleep_ms(200) # 延时
IO_All_Off() # 灭
IO_On(LED2) # 亮
utime.sleep_ms(200) # 延时
IO_All_Off() # 灭
IO_On(LED3) # 亮
utime.sleep_ms(200) # 延时
IO_All_Off() # 灭
IO_On(LED4) # 亮
utime.sleep_ms(200) # 延时
IO_All_Off() # 灭
IO_On(LED5) # 亮
utime.sleep_ms(200) # 延时
if __name__ == "__main__":
main()
| [
"rivern.yuan@quectel.com"
] | rivern.yuan@quectel.com |
7e70251ae9261b6cc83c7ebf3233459f5515f267 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/oslogin/v1beta/oslogin-v1beta-py/google/cloud/oslogin_v1beta/services/os_login_service/transports/grpc_asyncio.py | 1c6fbed331f6c7d8d3fb9b348ed3be8e16af48ff | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,112 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.oslogin.common import common_pb2 # type: ignore
from google.cloud.oslogin_v1beta.types import oslogin
from google.protobuf import empty_pb2 # type: ignore
from .base import OsLoginServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import OsLoginServiceGrpcTransport
class OsLoginServiceGrpcAsyncIOTransport(OsLoginServiceTransport):
"""gRPC AsyncIO backend transport for OsLoginService.
Cloud OS Login API
The Cloud OS Login API allows you to manage users and their
associated SSH public keys for logging into virtual machines on
Google Cloud Platform.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'oslogin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'oslogin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def delete_posix_account(self) -> Callable[
[oslogin.DeletePosixAccountRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete posix account method over gRPC.
Deletes a POSIX account.
Returns:
Callable[[~.DeletePosixAccountRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_posix_account' not in self._stubs:
self._stubs['delete_posix_account'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/DeletePosixAccount',
request_serializer=oslogin.DeletePosixAccountRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_posix_account']
@property
def delete_ssh_public_key(self) -> Callable[
[oslogin.DeleteSshPublicKeyRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete ssh public key method over gRPC.
Deletes an SSH public key.
Returns:
Callable[[~.DeleteSshPublicKeyRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_ssh_public_key' not in self._stubs:
self._stubs['delete_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/DeleteSshPublicKey',
request_serializer=oslogin.DeleteSshPublicKeyRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_ssh_public_key']
@property
def get_login_profile(self) -> Callable[
[oslogin.GetLoginProfileRequest],
Awaitable[oslogin.LoginProfile]]:
r"""Return a callable for the get login profile method over gRPC.
Retrieves the profile information used for logging in
to a virtual machine on Google Compute Engine.
Returns:
Callable[[~.GetLoginProfileRequest],
Awaitable[~.LoginProfile]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_login_profile' not in self._stubs:
self._stubs['get_login_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/GetLoginProfile',
request_serializer=oslogin.GetLoginProfileRequest.serialize,
response_deserializer=oslogin.LoginProfile.deserialize,
)
return self._stubs['get_login_profile']
@property
def get_ssh_public_key(self) -> Callable[
[oslogin.GetSshPublicKeyRequest],
Awaitable[common_pb2.SshPublicKey]]:
r"""Return a callable for the get ssh public key method over gRPC.
Retrieves an SSH public key.
Returns:
Callable[[~.GetSshPublicKeyRequest],
Awaitable[~.SshPublicKey]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_ssh_public_key' not in self._stubs:
self._stubs['get_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/GetSshPublicKey',
request_serializer=oslogin.GetSshPublicKeyRequest.serialize,
response_deserializer=common_pb2.SshPublicKey.FromString,
)
return self._stubs['get_ssh_public_key']
@property
def import_ssh_public_key(self) -> Callable[
[oslogin.ImportSshPublicKeyRequest],
Awaitable[oslogin.ImportSshPublicKeyResponse]]:
r"""Return a callable for the import ssh public key method over gRPC.
Adds an SSH public key and returns the profile
information. Default POSIX account information is set
when no username and UID exist as part of the login
profile.
Returns:
Callable[[~.ImportSshPublicKeyRequest],
Awaitable[~.ImportSshPublicKeyResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'import_ssh_public_key' not in self._stubs:
self._stubs['import_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/ImportSshPublicKey',
request_serializer=oslogin.ImportSshPublicKeyRequest.serialize,
response_deserializer=oslogin.ImportSshPublicKeyResponse.deserialize,
)
return self._stubs['import_ssh_public_key']
@property
def update_ssh_public_key(self) -> Callable[
[oslogin.UpdateSshPublicKeyRequest],
Awaitable[common_pb2.SshPublicKey]]:
r"""Return a callable for the update ssh public key method over gRPC.
Updates an SSH public key and returns the profile
information. This method supports patch semantics.
Returns:
Callable[[~.UpdateSshPublicKeyRequest],
Awaitable[~.SshPublicKey]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_ssh_public_key' not in self._stubs:
self._stubs['update_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/UpdateSshPublicKey',
request_serializer=oslogin.UpdateSshPublicKeyRequest.serialize,
response_deserializer=common_pb2.SshPublicKey.FromString,
)
return self._stubs['update_ssh_public_key']
__all__ = (
'OsLoginServiceGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
7c9495dcac44bea7cf3f3cf9a7bd33f81a983d2b | ff3b670a0f618a95d957b813b2607b21947e3dfd | /virtual/lib/python3.7/encodings/koi8_r.py | 3a393b48573bf829d26d6512141cdf0ac75fe1b0 | [] | no_license | iguzmanl/CSC-325-Project1 | 2391f28c5b88136daae631bc806db9c5f075ddf7 | 4c636aad8d7cac5e8f261bdd3b46cf55d77083d7 | refs/heads/master | 2020-04-20T22:15:29.923159 | 2019-03-04T05:40:23 | 2019-03-04T05:40:23 | 169,133,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | /Users/sabaramadan/anaconda3/lib/python3.7/encodings/koi8_r.py | [
"sabaramadan@Sabas-MacBook-Air.local"
] | sabaramadan@Sabas-MacBook-Air.local |
0eabf1e2f017b72b5cf73e83020af51b9ad7596d | d90e21b3250d5b3441465e71ad3e773e78058707 | /rnd/HaskellRSLCompiler/test/parse/test.py | bef9a72ceb82bbb48832da89c306ea29b20a4752 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | staticagent/lucille | 4ecf78ccede3efa9cd305d960c4172d1c2074065 | ff81b332ae78181dbbdc1ec3c3b0f59992e7c0fa | refs/heads/master | 2021-08-26T03:43:00.239208 | 2009-07-02T14:48:55 | 2009-07-02T14:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | #!/usr/bin/env python
import os, sys
import subprocess
import re
import glob
errlog = []
def run(f):
cmd = "../../lslc"
p = subprocess.Popen([cmd, f], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
outs = [l for l in p.stdout]
errs = [l for l in p.stderr]
errline = re.compile("TODO")
failed = False
for l in errs:
if errline.search(l):
failed = True
if failed:
print "[FAIL] ", f
errlog.append("==== [" + f + "] ====")
for l in errs:
errlog.append(l[:-1])
errlog.append("=====================")
errlog.append("\n")
else:
print "[OK ] ", f
def main():
for f in glob.glob("*.sl"):
run(f)
f = open("errlog.log", "w")
for l in errlog:
print >>f, l
if __name__ == '__main__':
main()
| [
"syoyo@lucillerender.org"
] | syoyo@lucillerender.org |
b12d7388d56385a38eb8484599d6e6bfe729eeb6 | 00fa9db88ddf3cb70bc33a54e28b7ffe9d9bf42f | /scripts/generate-split-params.py | 6a822851c12799fb262cd32c86f22f180b1a0316 | [] | no_license | Sixtease/cz-parliament-speech-corpus | 445cc1ed2b9e73f1b406a8ab41b95a9b96a5a8f1 | e6ba8171ebef8b3f9d8167c3f8561650f43b3130 | refs/heads/master | 2021-05-21T04:23:37.875930 | 2020-11-23T18:58:43 | 2020-11-23T18:58:43 | 252,540,847 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | #!/usr/bin/env python
"""
first argument is file with one split point in seconds per line
on stdin is alignment data in tab-separated format
audio start
audio end
guessed silence length
predicted word
gold-standard word
gold-standard line number
prediction - gold-standard match
output are tab-separated fields:
audio start
audio end
starting line
end line
"""
import sys
import numpy as np
splitsfn = sys.argv[1]
splitsfh = open(splitsfn, 'r', encoding = 'UTF-8')
splits = [float(x.rstrip()) for x in splitsfh.readlines()]
splitsfh.close()
def init_block():
return {
'matches': [],
'startlineno': None,
}
def process_block(block):
if block['matches'][ 0] < 0.5:
sys.stderr.write('discard: unreliable start\n')
return
if block['matches'][-1] < 0.5:
sys.stderr.write('discard: unreliable end\n')
return
if np.mean(block['matches']) < 0.7:
sys.stderr.write('discard: unreliable mean\n')
return
if len(block['matches']) < 5:
sys.stderr.write('discard: too few words\n')
return
l = block['audioend'] - block['audiostart']
if l < 12:
sys.stderr.write('discard: too short\n')
return
if l > 30:
sys.stderr.write('discard: too long\n')
return
sys.stderr.write('accept\n')
print("%f\t%f\t%s\t%s" % (block['audiostart'], block['audioend'], block['startlineno'], block['endlineno']))
start_line = 0
start_time = 0
spliti = 0
block = init_block()
last_split = 0
for line in sys.stdin:
(startstr, endstr, slenstr, predword, goldword, goldlinenostr, matchstr) = line.rstrip().split("\t")
if block['startlineno'] == None:
block['startlineno'] = goldlinenostr
block['audiostart'] = last_split
match = float(matchstr)
block['matches'].append(match)
block['endlineno'] = goldlinenostr
end = float(endstr)
if spliti < len(splits) and end > splits[spliti]:
block['audioend'] = end
last_split = splits[spliti]
process_block(block)
block = init_block()
spliti = spliti + 1
process_block(block)
| [
"jan@sixtease.net"
] | jan@sixtease.net |
4dd6d2950da0380ec4d98105143ddc84def25d6e | a878fbb811d5f49b0524b4a03710b96d7f6f88b6 | /old/data_suff/utils.py | 5d2180887b52f252c95ad5e3212b82a5e3be003b | [
"MIT"
] | permissive | alonshpigler/CovidScreening | caad134a98aaafe88dbd2cd5d24802ef833f5200 | f057c6197db6550693ff6b606b3d8c264508aba5 | refs/heads/main | 2023-07-02T17:50:09.292805 | 2021-08-15T10:45:47 | 2021-08-15T10:45:47 | 361,657,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | import os
# from skimage.io import imread
import pandas as pd
from util import load_csv
#
# filename = 'D:\\RxRx19a-images.zip'
# with ZipFile(filename) as archive:
# for entry in archive.infolist():
# with archive.open(entry) as file:
# img = Image.open(file)
# print(img.size, img.mode, len(img.getdata()))
#
DEFAULT_BASE_PATH = 'C:/Covid-Screening/data_layer/raw_data'
DEFAULT_METADATA_BASE_PATH = os.path.join(DEFAULT_BASE_PATH, 'metadata')
DEFAULT_IMAGES_BASE_PATH = os.path.join(DEFAULT_BASE_PATH, 'images')
DEFAULT_CHANNELS = (1, 2, 3, 4, 5)
def _load_dataset(base_path, dataset, include_controls=True):
df = load_csv(os.path.join(base_path, dataset + '.csv'))
if include_controls:
controls = load_csv(
os.path.join(base_path, dataset + '_controls.csv'))
df['well_type'] = 'treatment'
df = pd.concat([controls, df], sort=True)
df['cell_type'] = df.experiment.str.split("-").apply(lambda a: a[0])
df['dataset'] = dataset
dfs = []
for site in (1, 2):
df = df.copy()
df['site'] = site
dfs.append(df)
res = pd.concat(dfs).sort_values(
by=['id_code', 'site']).set_index('id_code')
return res
_load_dataset(DEFAULT_BASE_PATH,'metadata',False) | [
"alonshp@post.bgu.ac.il"
] | alonshp@post.bgu.ac.il |
ccb00e1329cbbb5be30d2038403c2e5e4303ba75 | 48660fe39e8b6c49df12bde91e01710f20782f00 | /resources/routes/send_qr.py | 9cc0c022cd5ff0c29dd33ebbf98d16415396e413 | [] | no_license | cringeburger/smart-city-backend | 2ed92de8858cf09642f962d323106b52c183f5ab | d4a186d05a91eae802a93eba9981527e1b6432cd | refs/heads/master | 2023-04-28T14:26:12.884626 | 2021-05-23T20:07:38 | 2021-05-23T20:07:38 | 369,588,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from flask import request, send_file
from resources import app
from resources.modules.qr_generator import generate_qr
from datetime import date
@app.get('/send_qr')
def send_qr():
user_token = request.args['user_token']
# mail_domen = request.args['mail_domen']
# subject = request.args['subject']
filename = 'qr_'+str(date.today()) + '_' + user_token
generate_qr('https://www.youtube.com/watch?v=dQw4w9WgXcQ', filename)
return send_file('generated_qr\\' + filename+ '.png', mimetype='image/png') | [
"istrebitel.3.12@gmail.com"
] | istrebitel.3.12@gmail.com |
f589f57cc509b15cda51ec3667766cf1f98dff37 | 988dc7a525c26cc68ce7f64ddcdc77bce0995231 | /lists/tests/test_views.py | a5b3c6df929b5df087c1f226335ace45d16ea0b4 | [] | no_license | bohlool/django-tdd-book | 32be2f7f0ecaa04b6aca040778fc284079179a0e | 0f086df4151176585587555533f41a7754b0bb16 | refs/heads/master | 2023-03-18T01:50:47.545168 | 2018-08-01T08:40:51 | 2018-08-01T08:40:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,810 | py | from django.test import TestCase
from django.utils.html import escape
from lists.models import Item, List
from django.contrib.auth import get_user_model
from lists.forms import (
DUPLICATE_ITEM_ERROR, EMPTY_ITEM_ERROR,
ExistingListItemForm, ItemForm#, NewListForm
)
from unittest.mock import patch, Mock
from django.http import HttpRequest
import unittest
from lists.views import new_list
User = get_user_model()
# Create your tests here.
class HomePageTest(TestCase):
def test_uses_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_item_form(self):
response = self.client.get('/')
#print(response.context['form'])
self.assertIsInstance(response.context['form'], ItemForm)
class NewListViewIntegratedTest(TestCase):
def test_can_save_a_POST_request(self):
self.client.post('/lists/new', data={'text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_list_owner_is_saved_if_user_is_authenticated(self):
user = User.objects.create(email='a@b.com')
self.client.force_login(user)
self.client.post('/lists/new', data={'text': 'new item'})
list_ = List.objects.first()
self.assertEqual(list_.owner, user)
def test_for_invalid_input_doesnt_save_but_shows_errors(self):
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
@patch('lists.views.NewListForm')
class NewListViewUnitTest(unittest.TestCase):
def setUp(self):
self.request = HttpRequest()
self.request.POST['text'] = 'new list item'
self.request.user = Mock()
def test_passes_POST_data_to_NewListForm(self, mockNewListForm):
new_list(self.request)
mockNewListForm.assert_called_once_with(data=self.request.POST)
def test_saves_form_with_owner_if_form_valid(self, mockNewListForm):
mock_form = mockNewListForm.return_value
mock_form.is_valid.return_value = True
new_list(self.request)
mock_form.save.assert_called_once_with(owner=self.request.user)
def test_does_not_save_if_form_invalid(self, mockNewListForm):
mock_form = mockNewListForm.return_value
mock_form.is_valid.return_value = False
new_list(self.request)
self.assertFalse(mock_form.save.called)
@patch('lists.views.render')
def test_renders_home_template_with_form_if_form_invalid(
self, mock_render, mockNewListForm
):
mock_form = mockNewListForm.return_value
mock_form.is_valid.return_value = False
response = new_list(self.request)
self.assertEqual(response, mock_render.return_value)
mock_render.assert_called_once_with(
self.request, 'home.html', {'form': mock_form}
)
@patch('lists.views.redirect')
def test_redirects_to_form_returned_object_if_form_valid(
self, mock_redirect, mockNewListForm
):
mock_form = mockNewListForm.return_value
mock_form.is_valid.return_value = True
response = new_list(self.request)
self.assertEqual(response, mock_redirect.return_value)
mock_redirect.assert_called_once_with(mock_form.save.return_value)
class ListViewTest(TestCase):
def test_uses_list_template(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertTemplateUsed(response, 'list.html')
def test_passes_correct_list_to_template(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertEqual(response.context['list'], correct_list)
del(other_list) # getting warnings about unused variables, fixing
def test_displays_only_items_for_that_list(self):
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='other list item 1', list=other_list)
Item.objects.create(text='other list item 2', list=other_list)
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'other list item 1')
self.assertNotContains(response, 'other list item 2')
def test_can_save_a_POST_request_to_an_existing_list(self):
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
f'/lists/{correct_list.id}/',
data={'text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
del(other_list) # getting warnings about unused variables, fixing
def test_POST_redirects_to_list_view(self):
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
f'/lists/{correct_list.id}/',
data={'text' : 'A new item for an existing list'}
)
self.assertRedirects(response, f'/lists/{correct_list.id}/')
del(other_list) # getting warnings about unused variables, fixing
def test_displays_item_form(self):
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
def post_invalid_input(self):
list_ = List.objects.create()
return self.client.post(
f'/lists/{list_.id}/',
data={'text': ''}
)
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_list_template(self):
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ItemForm)
def test_for_invalid_input_shows_error_on_page(self):
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_duplicate_item_validation_errors_end_up_on_lists_page(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='textey')
response = self.client.post(
f'/lists/{list1.id}/',
data={'text': 'textey'}
)
expected_error = escape(DUPLICATE_ITEM_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'list.html')
self.assertEqual(Item.objects.all().count(), 1)
del(item1)
class MyListsTest(TestCase):
def test_my_lists_url_renders_my_lists_template(self):
User.objects.create(email='a@b.com')
response = self.client.get('/lists/users/a@b.com/')
self.assertTemplateUsed(response, 'my_lists.html')
def test_passes_correct_owner_to_template(self):
User.objects.create(email='wrong@owner.com')
correct_user = User.objects.create(email='a@b.com')
response = self.client.get('/lists/users/a@b.com/')
self.assertEqual(response.context['owner'], correct_user)
class SharingTest(TestCase):
def test_post_redirects_to_list_page(self):
list_ = List.objects.create()
response = self.client.post(
f'/lists/{list_.id}/share',
data={'sharee': 'share@ab.com'}
)
#self.assertEqual(response.status_code, 302)
self.assertRedirects(response, list_.get_absolute_url())
def test_created_user_appears_on_shared_list(self):
sharee = User.objects.create(email='sharee@ab.com')
# do we need to get a logged in user?
list_ = List.objects.create()
self.client.post(
f'/lists/{list_.id}/share',
data={'sharee': sharee.email}
)
self.assertIn(sharee, list_.shared_with.all())
| [
"ben_jacobson@live.com"
] | ben_jacobson@live.com |
68062176f60959a3d7bd5ed6cfbb91d826843649 | ca5b5c217e0053645c2664d777699e9a5050715e | /python/primitive.py | 1352c69b984be863d2167845164d2ffaa39c93e0 | [
"MIT"
] | permissive | rodluger/starrynight | 1405ffdb5a0dd0fefc0ae34e7cdaf7eab4735356 | d3f015e466621189cb271d4d18b538430b14a557 | refs/heads/master | 2021-10-26T03:32:15.220725 | 2021-10-22T15:16:48 | 2021-10-22T15:16:48 | 236,542,672 | 7 | 1 | MIT | 2020-06-03T19:51:10 | 2020-01-27T16:58:05 | Jupyter Notebook | UTF-8 | Python | false | false | 15,076 | py | from special import hyp2f1, J, ellip
from utils import *
from vieta import Vieta
from linear import dP2
import matplotlib.pyplot as plt
import numpy as np
__ALL__ = ["compute_P", "compute_Q", "comput_T"]
def compute_U(vmax, s1):
"""
Given s1 = sin(0.5 * kappa), compute the integral of
cos(x) sin^v(x)
from 0.5 * kappa1 to 0.5 * kappa2 recursively and return an array
containing the values of this function from v = 0 to v = vmax.
"""
U = np.empty(vmax + 1)
U[0] = pairdiff(s1)
term = s1 ** 2
for v in range(1, vmax + 1):
U[v] = pairdiff(term) / (v + 1)
term *= s1
return U
def compute_I(nmax, kappa, s1, c1):
# Lower boundary
I = np.empty(nmax + 1)
I[0] = 0.5 * pairdiff(kappa)
# Recurse upward
s2 = s1 ** 2
term = s1 * c1
for v in range(1, nmax + 1):
I[v] = (1.0 / (2 * v)) * ((2 * v - 1) * I[v - 1] - pairdiff(term))
term *= s2
return I
def _compute_W_indef(nmax, s2, q2, q3):
"""
Compute the expression
s^(2n + 2) (3 / (n + 1) * 2F1(-1/2, n + 1, n + 2, 1 - q^2) + 2q^3) / (2n + 5)
evaluated at n = [0 .. nmax], where
s = sin(1/2 kappa)
q = (1 - s^2 / k^2)^1/2
by either upward recursion (stable for |1 - q^2| > 1/2) or downward
recursion (always stable).
"""
W = np.empty(nmax + 1)
if np.abs(1 - q2) < 0.5:
# Setup
invs2 = 1 / s2
z = (1 - q2) * invs2
s2nmax = s2 ** nmax
x = q2 * q3 * s2nmax
# Upper boundary condition
W[nmax] = (
s2
* s2nmax
* (3 / (nmax + 1) * hyp2f1(-0.5, nmax + 1, nmax + 2, 1 - q2) + 2 * q3)
/ (2 * nmax + 5)
)
# Recurse down
for b in range(nmax - 1, -1, -1):
f = 1 / (b + 1)
A = z * (1 + 2.5 * f)
B = x * f
W[b] = A * W[b + 1] + B
x *= invs2
else:
# Setup
z = s2 / (1 - q2)
x = -2 * q3 * (z - s2) * s2
# Lower boundary condition
W[0] = (2 / 5) * (z * (1 - q3) + s2 * q3)
# Recurse up
for b in range(1, nmax + 1):
f = 1 / (2 * b + 5)
A = z * (2 * b) * f
B = x * f
W[b] = A * W[b - 1] + B
x *= s2
return W
def compute_W(nmax, s2, q2, q3):
return pairdiff(
np.array([_compute_W_indef(nmax, s2[i], q2[i], q3[i]) for i in range(len(s2))])
)
def compute_J(nmax, k2, km2, kappa, s1, s2, c1, q2, dF, dE):
"""
Return the array J[0 .. nmax], computed recursively using
a tridiagonal solver and a lower boundary condition
(analytic in terms of elliptic integrals) and an upper
boundary condition (computed numerically).
"""
# Boundary conditions
z = s1 * c1 * np.sqrt(q2)
resid = km2 * pairdiff(z)
f0 = (1 / 3) * (2 * (2 - km2) * dE + (km2 - 1) * dF + resid)
fN = J(nmax, k2, kappa)
# Set up the tridiagonal problem
a = np.empty(nmax - 1)
b = np.empty(nmax - 1)
c = np.empty(nmax - 1)
term = k2 * z * q2 ** 2
for i, v in enumerate(range(2, nmax + 1)):
amp = 1.0 / (2 * v + 3)
a[i] = -2 * (v + (v - 1) * k2 + 1) * amp
b[i] = (2 * v - 3) * k2 * amp
c[i] = pairdiff(term) * amp
term *= s2
# Add the boundary conditions
c[0] -= b[0] * f0
c[-1] -= fN
# Construct the tridiagonal matrix
A = np.diag(a, 0) + np.diag(b[1:], -1) + np.diag(np.ones(nmax - 2), 1)
# Solve
soln = np.linalg.solve(A, c)
return np.concatenate(([f0], soln, [fN]))
def K(I, delta, u, v):
"""Return the integral K, evaluated as a sum over I."""
return sum([Vieta(i, u, v, delta) * I[i + u] for i in range(u + v + 1)])
def L(J, k, delta, u, v, t):
"""Return the integral L, evaluated as a sum over J."""
return k ** 3 * sum(
[Vieta(i, u, v, delta) * J[i + u + t] for i in range(u + v + 1)]
)
def compute_H(uvmax, xi, gradient=False):
c = np.cos(xi)
s = np.sin(xi)
cs = c * s
cc = c ** 2
ss = s ** 2
H = np.empty((uvmax + 1, uvmax + 1))
dH = np.empty((uvmax + 1, uvmax + 1, len(xi)))
H[0, 0] = pairdiff(xi)
dH[0, 0] = 1
H[1, 0] = pairdiff(s)
dH[1, 0] = c
H[0, 1] = -pairdiff(c)
dH[0, 1] = s
H[1, 1] = -0.5 * pairdiff(cc)
dH[1, 1] = cs
for u in range(2):
for v in range(2, uvmax + 1 - u):
H[u, v] = (-pairdiff(dH[u, v - 2] * cs) + (v - 1) * H[u, v - 2]) / (u + v)
dH[u, v] = dH[u, v - 2] * ss
for u in range(2, uvmax + 1):
for v in range(uvmax + 1 - u):
H[u, v] = (pairdiff(dH[u - 2, v] * cs) + (u - 1) * H[u - 2, v]) / (u + v)
dH[u, v] = dH[u - 2, v] * cc
if gradient:
return H, dH
else:
return H
def _compute_T2_indef(b, xi):
"""
Note: requires b >= 0.
"""
s = np.sin(xi)
c = np.cos(xi)
t = s / c
sgn = np.sign(s)
bc = np.sqrt(1 - b ** 2)
bbc = b * bc
# Special cases
if xi == 0:
return -(np.arctan((2 * b ** 2 - 1) / (2 * bbc)) + bbc) / 3
elif xi == 0.5 * np.pi:
return (0.5 * np.pi - np.arctan(b / bc)) / 3
elif xi == np.pi:
return (0.5 * np.pi + bbc) / 3
elif xi == 1.5 * np.pi:
return (0.5 * np.pi + np.arctan(b / bc) + 2 * bbc) / 3
# Figure out the offset
if xi < 0.5 * np.pi:
delta = 0
elif xi < np.pi:
delta = np.pi
elif xi < 1.5 * np.pi:
delta = 2 * bbc
else:
delta = np.pi + 2 * bbc
# We're done
return (
np.arctan(b * t)
- sgn * (np.arctan(((s / (1 + c)) ** 2 + 2 * b ** 2 - 1) / (2 * bbc)) + bbc * c)
+ delta
) / 3
def compute_P(ydeg, bo, ro, kappa):
"""Compute the P integral."""
# Basic variables
delta = (bo - ro) / (2 * ro)
k2 = (1 - ro ** 2 - bo ** 2 + 2 * bo * ro) / (4 * bo * ro)
k = np.sqrt(k2)
km2 = 1.0 / k2
fourbr15 = (4 * bo * ro) ** 1.5
k3fourbr15 = k ** 3 * fourbr15
tworo = np.empty(ydeg + 4)
tworo[0] = 1.0
for i in range(1, ydeg + 4):
tworo[i] = tworo[i - 1] * 2 * ro
# Pre-compute the helper integrals
x = 0.5 * kappa
s1 = np.sin(x)
s2 = s1 ** 2
c1 = np.cos(x)
q2 = 1 - np.minimum(1.0, s2 / k2)
q3 = q2 ** 1.5
U = compute_U(2 * ydeg + 5, s1)
I = compute_I(ydeg + 3, kappa, s1, c1)
W = compute_W(ydeg, s2, q2, q3)
# Compute the elliptic integrals
F, E, PIprime = ellip(bo, ro, kappa)
J = compute_J(ydeg + 1, k2, km2, kappa, s1, s2, c1, q2, F, E)
# Now populate the P array
P = np.zeros((ydeg + 1) ** 2)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if (mu / 2) % 2 == 0:
# Same as in starry
P[n] = 2 * tworo[l + 2] * K(I, delta, (mu + 4) // 4, nu // 2)
elif mu == 1:
if l == 1:
# Same as in starry, but using expression from Pal (2012)
P[2] = dP2(bo, ro, k2, kappa, s1, s2, c1, F, E, PIprime)
elif l % 2 == 0:
# Same as in starry
P[n] = (
tworo[l - 1]
* fourbr15
* (
L(J, k, delta, (l - 2) // 2, 0, 0)
- 2 * L(J, k, delta, (l - 2) // 2, 0, 1)
)
)
else:
# Same as in starry
P[n] = (
tworo[l - 1]
* fourbr15
* (
L(J, k, delta, (l - 3) // 2, 1, 0)
- 2 * L(J, k, delta, (l - 3) // 2, 1, 1)
)
)
elif (mu - 1) / 2 % 2 == 0:
# Same as in starry
P[n] = (
2
* tworo[l - 1]
* fourbr15
* L(J, k, delta, (mu - 1) // 4, (nu - 1) // 2, 0)
)
else:
"""
A note about these cases. In the original starry code, these integrals
are always zero because the integrand is antisymmetric about the
midpoint. Now, however, the integration limits are different, so
there's no cancellation in general.
The cases below are just the first and fourth cases in equation (D25)
of the starry paper. We can re-write them as the first and fourth cases
in (D32) and (D35), respectively, but note that we pick up a factor
of `sgn(cos(phi))`, since the power of the cosine term in the integrand
is odd.
The other thing to note is that `u` in the call to `K(u, v)` is now
a half-integer, so our Vieta trick (D36, D37) doesn't work out of the box.
"""
if nu % 2 == 0:
res = 0
u = int((mu + 4.0) // 4)
v = int(nu / 2)
for i in range(u + v + 1):
res += Vieta(i, u, v, delta) * U[2 * (u + i) + 1]
P[n] = 2 * tworo[l + 2] * res
else:
res = 0
u = (mu - 1) // 4
v = (nu - 1) // 2
for i in range(u + v + 1):
res += Vieta(i, u, v, delta) * W[i + u]
P[n] = tworo[l - 1] * k3fourbr15 * res
n += 1
return P
def compute_Q(ydeg, lam, gradient=False):
# Pre-compute H
if gradient:
H, dH = compute_H(ydeg + 2, lam, gradient=True)
else:
H = compute_H(ydeg + 2, lam)
# Allocate
Q = np.zeros((ydeg + 1) ** 2)
dQdlam = np.zeros(((ydeg + 1) ** 2, len(lam)))
# Note that the linear term is special
Q[2] = pairdiff(lam) / 3
dQdlam[2] = np.ones_like(lam) / 3
# Easy!
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
Q[n] = H[(mu + 4) // 2, nu // 2]
if gradient:
dQdlam[n] = dH[(mu + 4) // 2, nu // 2]
n += 1
# Enforce alternating signs for (lower, upper) limits
dQdlam *= np.repeat([-1, 1], len(lam) // 2).reshape(1, -1)
if gradient:
return Q, dQdlam
else:
return Q
def compute_T(ydeg, b, theta, xi):
# Pre-compute H
H = compute_H(ydeg + 2, xi)
# Vars
ct = np.cos(theta)
st = np.sin(theta)
ttinvb = st / (b * ct)
invbtt = ct / (b * st)
b32 = (1 - b ** 2) ** 1.5
bct = b * ct
bst = b * st
# Recurse
T = np.zeros((ydeg + 1) ** 2)
# Case 2 (special)
T[2] = pairdiff([np.sign(b) * _compute_T2_indef(np.abs(b), x) for x in xi])
# Special limit: sin(theta) = 0
if np.abs(st) < STARRY_T_TOL:
sgnct = np.sign(ct)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
T[n] = sgnct ** l * b ** (1 + nu // 2) * H[(mu + 4) // 2, nu // 2]
else:
if mu == 1:
if (l % 2) == 0:
T[n] = -sgnct * b32 * H[l - 2, 4]
elif l > 1:
T[n] = -b * b32 * H[l - 3, 5]
else:
T[n] = sgnct ** (l - 1) * (
b32 * b ** ((nu + 1) // 2) * H[(mu - 1) // 2, (nu + 5) // 2]
)
n += 1
return T
# Special limit: cos(theta) = 0
elif np.abs(ct) < STARRY_T_TOL:
sgnst = np.sign(st)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
T[n] = b ** ((mu + 2) // 2) * H[nu // 2, (mu + 4) // 2]
if sgnst == 1:
T[n] *= (-1) ** (mu // 2)
else:
T[n] *= (-1) ** (nu // 2)
else:
if mu == 1:
if (l % 2) == 0:
T[n] = (
(-sgnst) ** (l - 1) * b ** (l - 1) * b32 * H[1, l + 1]
)
elif l > 1:
T[n] = b ** (l - 2) * b32 * H[2, l]
if sgnst == 1:
T[n] *= (-1) ** l
else:
T[n] *= -1
else:
T[n] = (
b32 * b ** ((mu - 3) // 2) * H[(nu - 1) // 2, (mu + 5) // 2]
)
if sgnst == 1:
T[n] *= (-1) ** ((mu - 1) // 2)
else:
T[n] *= (-1) ** ((nu - 1) // 2)
n += 1
return T
# Cases 1 and 5
jmax = 0
Z0 = 1
for nu in range(0, 2 * ydeg + 1, 2):
kmax = 0
Z1 = Z0
for mu in range(0, 2 * ydeg - nu + 1, 2):
l = (mu + nu) // 2
n1 = l ** 2 + nu
n5 = (l + 2) ** 2 + nu + 1
Z2 = Z1
for j in range(jmax + 1):
Z_1 = -bst * Z2
Z_5 = b32 * Z2
for k in range(kmax + 1):
p = j + k
q = l + 1 - (j + k)
fac = -invbtt / (k + 1)
T[n1] += Z_1 * (bct * H[p + 1, q] - st * H[p, q + 1])
Z_1 *= (kmax + 1 - k) * fac
if n5 < (ydeg + 1) ** 2:
T[n5] += Z_5 * (bct * H[p + 1, q + 2] - st * H[p, q + 3])
Z_5 *= (kmax - k) * fac
T[n1] += Z_1 * (bct * H[p + 2, q - 1] - st * H[p + 1, q])
Z2 *= (jmax - j) / (j + 1) * ttinvb
kmax += 1
Z1 *= -bst
jmax += 1
Z0 *= bct
# Cases 3 and 4
Z0 = b32
kmax = 0
for l in range(2, ydeg + 1, 2):
n3 = l ** 2 + 2 * l - 1
n4 = (l + 1) ** 2 + 2 * l + 1
Z = Z0
for k in range(kmax + 1):
p = k
q = l + 1 - k
T[n3] -= Z * (bst * H[p + 1, q] + ct * H[p, q + 1])
if l < ydeg:
T[n4] -= Z * (
bst * st * H[p + 2, q]
+ bct * ct * H[p, q + 2]
+ (1 + b ** 2) * st * ct * H[p + 1, q + 1]
)
Z *= -(kmax - k) / (k + 1) * invbtt
kmax += 2
Z0 *= bst ** 2
return T
| [
"rodluger@gmail.com"
] | rodluger@gmail.com |
1ba87cd411f46c264b9fd8759ef716c3d9e27938 | c06efd90533c51c2b29b7e92cd13723388de25ee | /actions/patchStorageV1beta1StorageClass.py | a57bbce258efa5ad9e6ef149ec1d897e8648932f | [] | no_license | ajohnstone/stackstorm-kubernetes | 490e4a73daad3713d7c5b5b639d5f30ff1ab3e58 | 99ffad27f5947583a2ab1b56e80c06003d014c47 | refs/heads/master | 2021-01-11T23:29:49.642435 | 2016-12-07T13:20:34 | 2016-12-07T13:20:34 | 78,588,572 | 0 | 0 | null | 2017-01-11T00:48:59 | 2017-01-11T00:48:59 | null | UTF-8 | Python | false | false | 746 | py | from lib import k8s
from st2actions.runners.pythonrunner import Action
class patchStorageV1beta1StorageClass(Action):
def run(self,body,name,config_override=None,pretty=None):
myk8s = k8s.K8sClient(self.config)
args = {}
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if config_override is not None:
args['config_override'] = config_override
if pretty is not None:
args['pretty'] = pretty
return (True, myk8s.runAction('patchStorageV1beta1StorageClass', **args))
| [
"andy@impulsed.net"
] | andy@impulsed.net |
003433cb893cff17a7ae9e5807ff49deed068997 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2/dircache.pyi | 523b850bc3e93f867de75c9bef0100e3b6d22c54 | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2/dircache.pyi | [
"mersoj@rpi.edu"
] | mersoj@rpi.edu |
5b954273bea26961fa06977aa411ddafbbcf44d9 | a31bb4bb77fb3a40b00ac6b3e4cdaf3bb4fa54d7 | /day1/while.py | 576c054112ff82b57df49e5e7b9422d3faa9b507 | [] | no_license | lin790292154/python_note | 457069a5d0adc6991321f5eb4644e1c91c597fff | 3b35a3087117aadc0b8b63a260ef55e5b20f5e96 | refs/heads/master | 2021-08-31T19:58:15.775229 | 2017-12-22T17:14:40 | 2017-12-22T17:14:40 | 110,909,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # write by kratos
'''
count = 0
while True:
print("count=",count)
count = count + 1
'''
age_of_oldboy = 66
count = 0
while count < 3 :
guess_age = int(input("what is oldboy age ?"))
if age_of_oldboy == guess_age :
print("you are right")
break
elif age_of_oldboy > guess_age:
print("To small")
else:
print("To big")
count += 1
else:
print("you have tried too many time,fuck off")
| [
"lin790292154@163.com"
] | lin790292154@163.com |
571fa44c13cd529ee4eb84aca37e4931096a4b98 | d22dc2f7cbf5370c7afc381b27efffcdf8a7d00a | /prob05.py3 | 42e2e8b5719a19f62ba366bbaed5041b56a6acbd | [] | no_license | xorkevin/CodeWarsHP2016 | 13150154f969bb1765ecf4206c7f5106f93e8089 | 2732c3944e2431d43d574428b70b0e48a1e623ee | refs/heads/master | 2021-01-10T01:21:43.580960 | 2016-03-05T20:11:12 | 2016-03-05T20:11:12 | 53,221,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py3 | import sys
f = sys.stdin.read()
f = f.split('\n')
f.pop(0)
f.pop()
for line in f:
x, y = line.split(' ')
x = int(x)
y = list(enumerate(y))
y = list(filter(lambda tup: tup[0]%x != 0, y))
k = ''
for tup in y:
k += tup[1]
print('' + str(k) + ' ' + str(len(k))) | [
"wangkevin448@gmail.com"
] | wangkevin448@gmail.com |
d894c9f24bd98116d77798510af27cc57005c85c | f70c7ed21258a865c702c551aa8e251b88ff8f1d | /galloCRM/venv/bin/pip3 | 98d0d33d109ce13e0b03692ddfa17295bb175409 | [] | no_license | gallofb/apple_CRM | 9318f6bebce9bf81507870d625c72b47a16a46b8 | b8c11a3ebced320af897d23f0954b93da9006a4c | refs/heads/master | 2020-04-23T16:54:50.712398 | 2019-04-07T10:18:08 | 2019-04-07T10:18:08 | 171,314,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/gallo/apple/galloCRM/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"854591086@qq.com"
] | 854591086@qq.com | |
a6630d2aeb6791c9a89d193f911eb7ef74dbe858 | b9da0a46452a67d4d2e1c6e76d982295b8e9bec4 | /double_joint_arm/robot/robot_state_ideal_speed.py | 402873c81f5f1c2078042928112a15291143bf68 | [] | no_license | lessthantrue/RobotProjects | d68d0cfba090ca0e117c78baae7afdc13203ec6c | e84069ad90dca9e3d4873febe1bcb40f396d656e | refs/heads/master | 2023-02-06T00:02:02.070109 | 2020-12-27T06:00:33 | 2020-12-27T06:00:33 | 291,508,533 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import numpy as np
import kinematics
from matrix_utils import *
def close(v1, v2, tol=0.0001):
return abs(np.linalg.norm(v1-v2)) < tol
class Robot():
def __init__(self, t1_0, t2_0, x0=1, y0=1):
self.len1 = 1
self.len2 = 2
# frames
self.Rw_0 = translation2d(x0, y0)
self.R0_1_base = translation2d(self.len1, 0)
self.R1_2_base = translation2d(self.len2, 0)
self.R0_1 = rotation2d(t1_0) @ self.R0_1_base
self.R1_2 = rotation2d(t2_0) @ self.R1_2_base
self.x0, self.y0 = x0, y0
self.t1, self.t2 = t1_0, t2_0
self.w1, self.w2 = 0, 0
# c1, c2 = w1, w2
def act(self, c1, c2, dt):
self.t1 += (self.w1 + c1) / 2 * dt
self.t2 += (self.w2 + c2) / 2 * dt
self.w1 = c1
self.w2 = c2
self.R0_1 = rotation2d(self.t1) @ self.R0_1_base
self.R1_2 = rotation2d(self.t2) @ self.R1_2_base
def getStateVector(self):
return np.array([self.t1, self.t2, self.w1, self.w2])
def getVelVector(self):
return np.array([self.w1, self.w2, 0, 0])
def getJoints(self):
q0 = self.Rw_0 @ np.array([0, 0, 1])
q1 = self.Rw_0 @ self.R0_1 @ np.array([0, 0, 1])
q2 = self.Rw_0 @ self.R0_1 @ self.R1_2 @ np.array([0, 0, 1])
return (q0, q1, q2) | [
"nickm@outofthisworld.net"
] | nickm@outofthisworld.net |
27e3a773e1f3b1c7193ce9a831b0b54a38653ad7 | cf5f24e5a32f8cafe90d4253d727b1c0457da6a4 | /algorithm/BOJ_1629.py | 11a30af639ff558eb56b49660735d2acd32acf3e | [] | no_license | seoljeongwoo/learn | 537659ca942875f6846646c2e21e1e9f2e5b811e | 5b423e475c8f2bc47cb6dee09b8961d83ab08568 | refs/heads/main | 2023-05-04T18:07:27.592058 | 2021-05-05T17:32:50 | 2021-05-05T17:32:50 | 324,725,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # import sys
# input = sys.stdin.readline
# A,B,C=map(int,input().split())
# def solve(a,b):
# if b==1: return a
# ret = solve(a,b//2)%C
# ret = (ret*ret)%C
# if b%2==1: ret = (ret*a)%C
# return ret
# print(solve(A,B)%C)
print(pow(*map(int,input().split()))) | [
"noreply@github.com"
] | seoljeongwoo.noreply@github.com |
6470a8daf591db0dcf2949014f4adbe1d19ddaf5 | 5d2b04b5454cd3b2d75bf153c96d5fc97e40b2f2 | /apps/registro/forms.py | 3e23888c85c0dcf3edf5a5897bd25393c443e8ae | [
"Unlicense"
] | permissive | hector-delgado/django-crud-in-docker-container | 66162c460c72c6b4bc4d17a13a0e67603f1a5627 | 6fc78f6b8e88cc9836946cb5f77c3fe39575934e | refs/heads/master | 2022-06-18T06:17:25.910698 | 2020-05-04T17:15:00 | 2020-05-04T17:15:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from django import forms
from apps.registro.models import Usuario
class UsuarioForm(forms.ModelForm):
class Meta:
model = Usuario
fields = [
'nombre',
'apellidos',
'edad',
'telefono',
'email',
'domicilio',
]
labels = {
'nombre': 'Nombre',
'apellidos': 'Apellidos',
'edad': 'Edad',
'telefono': 'Telefono',
'email': 'E-mail',
'domicilio': 'Domicilio',
}
widgets = {
'nombre': forms.TextInput(attrs={'class':'form-control'}),
'apellidos': forms.TextInput(attrs={'class':'form-control'}),
'edad': forms.TextInput(attrs={'class':'form-control'}),
'telefono': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.TextInput(attrs={'class':'form-control'}),
'domicilio': forms.TextInput(attrs={'class':'form-control'}),
} | [
"hector.delgado.irt@gmail.com"
] | hector.delgado.irt@gmail.com |
a82e3b55c739f26cf5bb75daf2ce5b13f71cf467 | 7997c79e65a20e80ba76887181378ad110389121 | /making_queries/apps.py | c21846fc2f0c4688682d053d2abe910725fe1e01 | [] | no_license | caerang/django-practice | 68b7940087fdab51dc2e4ab589e0d1bab3e469e6 | cd9497053ec16f19c70daba03c0117d710a8b64b | refs/heads/master | 2022-12-06T04:32:33.084302 | 2022-11-28T07:09:46 | 2022-11-28T07:09:46 | 80,974,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from django.apps import AppConfig
class MakingQueriesConfig(AppConfig):
name = 'making_queries'
| [
"mylovercorea@gmail.com"
] | mylovercorea@gmail.com |
0cd738fe1ed690b6bda48d0d7ac79cc298310385 | e42174d0b04e2ff33955a1d49297d8deaa6a1cfe | /Main2.py | 75e16c0fa236f8c6b7ec947ccca6ba715002a0bc | [] | no_license | Fudan-iGEM/2021-software-parse | ead1669940aff560ed74ccc54e6d488657792714 | e604a1cedd49f7b50a7d5bdcab2a5f4475d0bcf4 | refs/heads/main | 2023-08-01T08:02:13.727207 | 2021-09-18T14:45:13 | 2021-09-18T14:45:13 | 419,995,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,400 | py | import requests
from bs4 import BeautifulSoup
import time
import re
import multiprocessing
from multiprocessing import Pool
from openpyxl import workbook
from openpyxl import load_workbook
#from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
#全篇的sleep函数可以优化
#打不开的情况需要try函数优化
#整体上,以年为记的part为全局变量
class Part:
# part_num(BBa.), part_name(CAP), part_id(内部代码), part_type(com/pro...),star(包含队伍使用和独特的标记)
def __init__(self, part_num, part_name, part_id, part_url,
short_desc, part_type, team, year, sequence, contents,
stars, assemble_std, linking_parts, parts_used, using_parts, len):
self.part_num = part_num
self.part_name = part_name
self.part_id = part_id
self.part_url = part_url
self.short_desc = short_desc
self.year = year
self.sequence = sequence
#stars 需要再细化
self.stars = stars
self.assemble_std = assemble_std
self.contents = contents
# self.linking_parts = linking_parts
# how to get
self.parts_used = parts_used
self.using_parts = using_parts
self.len = len
self.part_type = part_type
self.team = team
def print_parts(self):
print(f"part_num = {self.part_num}")
print(f"part_name = {self.part_name}")
print(f"part_id = {self.part_id}")
print(f"part_url = {self.part_url}")
print(f"part_type = {self.part_type}")
print(f"part_team = {self.team}")
print(f"part_year = {self.year}")
print(f"part_sequence = {self.sequence}")
print(f"part_stars = {self.stars}")
print(f"part_desc = {self.short_desc}")
print(f"part_assemble_std = {self.assemble_std}")
print(f"contents"f" = {self.contents}")
print(f"parts_used = {self.parts_used}")
print(f"using_parts = {self.using_parts}")
print(f"len = {self.len}")
print("------------------------------")
all_team_with_urls = []
whole_Parts = []
def inter():
# 需要标注输入格式,届时可以通过前端重写
print("Which year would you want to scan for? Input 'years'")
return 0
#已完成,返回的是某一年的所有队伍的url
def web_analysis_and_get_team_lists(year):
# year 可变
# 此处的地址可能需要更改
#desktop地址: ‘D:\chromedriver.exe’
#laptop地址:'C:\Python x64\Python\chromedriver.exe'
print(f"---Start getting team lists in {year}---")
driver = webdriver.Chrome('C:\\Users\GhoST\AppData\Local\Google\Chrome\Application\ChromeDriver.exe')
front_url = "https://igem.org/Team_Parts?year="
url = front_url + year
#此处未检查!
while 1:
try:
driver.get(url)
WebDriverWait(driver, 60, 1).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="topBanner"]/a/img')), message='')
break
except:
print("刷新")
driver.refresh()
pass
time.sleep(1)
one_team_with_url = []
the_list = driver.find_elements_by_xpath('/html/body/div/div[3]/div/div/div/div[4]/table/tbody/tr/td/div/a')
for item in the_list:
one_team_with_url = [year, str(item.text), str(item.get_attribute('href'))]
all_team_with_urls.append(one_team_with_url)
print(f"---Ending getting team lists in {year}---")
driver.close()
return all_team_with_urls
#未完成
def set_star_database():
return
#输入某一年所有队伍的url,输出这一年所有part的基础信息(全局变量中)
def get_parts_urls(all_team_with_urls):
print("---Start getting parts urls and basic info--- ")
for a_team in all_team_with_urls:
year = a_team[0]
team = a_team[1]
url = a_team[2]
# desktop地址: ‘D:\chromedriver.exe’
# laptop地址:'C:\Python x64\Python\chromedriver.exe'
driver = webdriver.Chrome('C:\\Users\GhoST\AppData\Local\Google\Chrome\Application\ChromeDriver.exe')
while 1:
try:
driver.get(url)
WebDriverWait(driver, 5, 1).until(EC.presence_of_element_located((By.XPATH, '//*[@id="new_menubar"]/ul/li[1]/div[1]')), message = '')
break
except:
print("刷新")
driver.refresh()
pass
time.sleep(1)
#先将基础属性放在列表里
part_num_list = []
part_numurl_list = []
part_type_list = []
part_desc = []
part_designer = []
part_len = []
#得到第一张表的数据(favored)
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[1]/tbody/tr/td[3]/a'):
part_num_list.append(str(item.text))
part_numurl_list.append(item.get_attribute('href'))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[1]/tbody/tr/td[4]'):
part_type_list.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[1]/tbody/tr/td[5]'):
part_desc.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[1]/tbody/tr/td[6]'):
part_designer.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[1]/tbody/tr/td[7]'):
part_len.append(str(item.text))
#为第第一张表(favored)创建类
#star第一个1代表favor
for i in range(0, len(part_num_list)):
new_part = Part(part_num_list[i], '', '', part_numurl_list[i], part_desc[i], part_type_list[i], team, year, '', '', '1', '', [], [], [], part_len[i])
whole_Parts.append(new_part)
part_num_list = []
part_numurl_list = []
part_type_list = []
part_desc = []
part_designer = []
part_len = []
#得到第二张表的数据(NOT favored)
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[2]/tbody/tr/td[3]/a'):
part_num_list.append(str(item.text))
part_numurl_list.append(item.get_attribute('href'))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[2]/tbody/tr/td[4]'):
part_type_list.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[2]/tbody/tr/td[5]'):
part_desc.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[2]/tbody/tr/td[6]'):
part_designer.append(str(item.text))
for item in driver.find_elements_by_xpath('/html/body/div/div[4]/div/div/table[2]/tbody/tr/td[7]'):
part_len.append(str(item.text))
# 为第第一张表(favored)创建类
# star第一个1代表favor
for i in range(0, len(part_num_list)):
new_part = Part(part_num_list[i], '', '', part_numurl_list[i], part_desc[i], part_type_list[i], team, year, '', '', '0', '', [], [], [], part_len[i])
whole_Parts.append(new_part)
print("---End getting parts urls and basic info--- ")
driver.close()
return
# 未完成,从全局PART中,开始进行操作
def get_parts_details():
for a_part in whole_Parts:
print(f"---Start getting details of {a_part.part_num}---")
url = a_part.part_url
driver = webdriver.Chrome('C:\\Users\GhoST\AppData\Local\Google\Chrome\Application\ChromeDriver.exe')
i = 0
gotten = False
while 1:
try:
driver.get(url)
WebDriverWait(driver, 10, 1).until(EC.presence_of_element_located((By.XPATH, '//*[@id="new_menubar"]/ul/li[1]/div[1]')), message = '')
gotten = True
break
except:
i= i + 1
if i > 5:
break
print("刷新")
driver.refresh()
pass
time.sleep(1)
#以上打开了part的主网页界面
if not gotten :
#new_part = Part(part_num_list[i], '', '', part_numurl_list[i], part_desc[i], part_type_list[i], team, year, '', '', '0', '', [], [], [], part_len[i])
continue
#-------------------------------------------
get_using_parts_and_other_info(driver, a_part)
get_assemble_std(driver, a_part)
get_used_parts(driver, a_part)
#GET_SEQUENCE 自带关闭整个窗口的作用,所以所有数据获取请在这句之前玩完成
get_sequence(driver, a_part)
# -------------------------------------------
print(f"---End getting details of {a_part.part_num}---")
store_parts()
print(f"---Details of parts in {a_part.year} are saved---")
return
#已完成,used代表使用了该part的part,需要额外打开页面
def get_used_parts(driver, a_part):
try:
item = driver.find_elements_by_xpath('//*[@id="part_status_wrapper"]/div[4]/a')
url = str(item[0].get_attribute('href'))
except:
a_part.parts_used = 'None'
return
while 1:
try:
driver.get(url)
WebDriverWait(driver, 10, 1).until(
EC.presence_of_element_located((By.XPATH,'/html/body')),
message='')
break
except:
print("刷新")
driver.refresh()
pass
time.sleep(1)
used_parts = []
list = driver.find_elements_by_class_name('noul_link.part_link')
for item in list:
used_parts.append(str(item.text))
if len(used_parts) == 0:
used_parts.append('None')
a_part.parts_used = used_parts
driver.back()
return
#已完成
def get_assemble_std(driver, a_part):
assemble_lists = []
for item in driver.find_elements_by_xpath('//*[@id="assembly_compatibility"]/div/ul/li'):
if str(item.get_attribute("class")) == "boxctrl box_green" :
assemble_lists.append('1')
else:
assemble_lists.append('0')
#assemble_lists.append(str(item.get_attribute("class")))
a_part.assemble_std = assemble_lists
return
#这一部分写part主页面内的所有内容。using代表该part的组成part,不需要额外打开页面加载;
#已完成,不关闭窗口
def get_using_parts_and_other_info(driver, a_part):
if a_part.part_type != 'Composite':
a_part.using_parts = ['self']
else:
using_parts_list = []
for item in driver.find_elements_by_xpath('//*[@id="seq_features_div"]/div[1]/div[4]/div/div[2]'):
using_parts_list.append(str(item.text))
#以下确认了编号的统一
for i in range(0, len(using_parts_list)):
if 'BBa' in using_parts_list[i]:
continue
else:
using_parts_list[i] = 'BBa_'+ using_parts_list[i]
a_part.using_parts = using_parts_list
return
#自带关闭,已完成
def get_sequence(driver, a_part):
sequence_entrance = driver.find_elements_by_xpath('//*[@id="seq_features_div"]/div[1]/div[1]/span[5]')
#webdriver.ActionChains(driver).move_to_element(sequence_entrance[0]).click(sequence_entrance[0]).perform().find_elements_by_xpath("/html/body/pre/text()")
try:
webdriver.ActionChains(driver).move_to_element(sequence_entrance[0]).click(sequence_entrance[0]).perform()
except:
print(f"{a_part.part_num} 没有序列或序列获取失败")
return
time.sleep(1)
#切换窗口到新跳出的窗口
handles = driver.window_handles
index_handle = driver.current_window_handle#备注:可能需要在操作前,先关闭其他浏览器窗口
for handle in handles:
if handle != index_handle:
driver.switch_to.window(handle)
sequence = driver.find_elements_by_xpath("/html/body/pre")#备注:所有xpath出来都是list,记得切换为元素
a_part.sequence = str(sequence[0].text)
driver.close()
handle = driver.window_handles[0]
driver.switch_to.window(handle)
driver.close()
return
#下一个需要完成的,一年一存
def store_parts():
wb = workbook.Workbook()
ws1 = wb.active
ws1.append(['part_num', 'part_name', 'part_id', 'part_url',
'short_desc', 'part_type', 'team', 'year', 'sequence', 'contents',
'stars', 'assemble_std', 'parts_used', 'using_parts', 'len'])
for a_part in whole_Parts:
ws1.append([a_part.part_num, a_part.part_name, a_part.part_id, a_part.part_url, a_part.short_desc, \
a_part.part_type, a_part.team, a_part.year, a_part.sequence, a_part.contents, a_part.stars,\
' '.join(a_part.assemble_std),
' '.join(a_part.parts_used), ' '.join(a_part.using_parts), a_part.len])
wb.save(f'D:\\{a_part.year}collection.xlsx')
return
def main():
#year = '2004'
# for year in range(2020):
#all_team_with_urls = web_analysis_and_get_team_lists(str(year))
#anothoer test_example: ['2020','teamB',' http://parts.igem.org/cgi/partsdb/pgroup.cgi?pgroup=iGEM2020&group=Fudan']
'''
all_team_with_urls = [['2019', 'teamA', 'http://parts.igem.org/cgi/partsdb/pgroup.cgi?pgroup=iGEM2020&group=GDSYZX']]
get_parts_urls(all_team_with_urls) # 所有信息存在全局变量 whole_Parts 中
get_parts_details() # 所有信息存在全局变量 whole_Parts 中,并且一个一存/一年一存
return 0
'''
years = [2021]
for year in years:
all_team_with_urls = web_analysis_and_get_team_lists(str(year))
get_parts_urls(all_team_with_urls) #所有信息存在全局变量 whole_Parts 中
get_parts_details() #所有信息存在全局变量 whole_Parts 中,并且一年一存
whole_Parts = []
all_team_with_urls = []
return 0
main() | [
"TomGhostSmith@gmail.com"
] | TomGhostSmith@gmail.com |
f23530b0fcab203fccb0a43b9d3560015edbb1df | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/safExtCnfg.py | f32c8cf1cee106166c91aab4c960446be0295d8e | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | """safExtCnfg standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.base
from pylon.resources.standard import standard
class safExtCnfg(pylon.resources.base.Inheriting):
"""safExtCnfg standard property type. Safety mode. Mode that a device
has to be brought to when a safety external request state is pending."""
def __init__(self):
super().__init__(
)
self._original_name = 'SCPTsafExtCnfg'
self._property_scope, self._property_key = 0, 257
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = safExtCnfg()
pass
| [
"lcoppa@rocketmail.com"
] | lcoppa@rocketmail.com |
f34d29f947502d325641f91a58d256f2f869ef4f | c106ec4af915b485d625bfa835f769c2af1d34e3 | /trainxgb.py | 0f43497d436d2b81bd39167a3f8480667b78e5cf | [] | no_license | Men0x/aobd_project | 6820f38f13f2d9ce4c175a035aa4ef30e7925495 | d5dcf419775527cfe9025f8d87317ed5a10cd8db | refs/heads/master | 2023-01-04T05:34:43.768099 | 2020-10-30T21:45:06 | 2020-10-30T21:45:06 | 306,396,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,296 | py | import os
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import XGBClassifier
import mlflow
import mlflow.sklearn
from preprocessing_data import preprocessing_train, preprocessing_test
import logging
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
train = preprocessing_train()
X = train.drop(columns=['TARGET'])
y = train['TARGET']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
lr = float(sys.argv[1]) if len(sys.argv) > 1 else 0.1
ne = int(sys.argv[2]) if len(sys.argv) > 500 else 100
nj = int(sys.argv[3]) if len(sys.argv) > 3 else 1
with mlflow.start_run():
model = XGBClassifier(learning_rate=lr, n_estimators=ne, n_jobs=nj)
model.fit(X_train, y_train)
predicted_qualities = model.predict(X_test)
precision,recall,fscore,support=score(y_test, predicted_qualities)
precision0 = precision[0]
precision1 = precision[1]
recall0 = recall[0]
recall1 = recall[1]
fscore0 = fscore[0]
fscore1 = fscore[1]
support0 = support[0]
support1 = support[1]
accuracy = accuracy_score(y_test, predicted_qualities)
print("XGB Model (learning_rate=%f, n_estimators=%f, n_jobs=%f):" % (lr, ne, nj))
mlflow.log_param("learning_rate", lr)
mlflow.log_param("n_estimators", ne)
mlflow.log_param("n_jobs", nj)
mlflow.log_metric("precision0", precision0)
mlflow.log_metric("precision1", precision1)
mlflow.log_metric("recall0", recall0)
mlflow.log_metric("recall1", recall1)
mlflow.log_metric("fscore0", fscore0)
mlflow.log_metric("fscore1", fscore1)
mlflow.log_metric("support0", support0)
mlflow.log_metric("support1", support1)
mlflow.log_metric("accuracy", accuracy)
mlflow.sklearn.log_model(model, "XGBoostClassifier") | [
"38214356+Men0x@users.noreply.github.com"
] | 38214356+Men0x@users.noreply.github.com |
df760f3fb2bae9441d342cf168781c8ce3d3cf92 | 261fa6004234ccae2b1a4ff455ae54aefecbb172 | /ui_extensions/content_export/views.py | cc9e021e8399ec531eb798666ee498596ae79847 | [
"Apache-2.0"
] | permissive | svang001/cloudbolt-forge | 671575eecd54e1207b7dde144db2fdb6c43c9ddf | 3796900115876f8a9ee333b75f45e3d60d7705d7 | refs/heads/master | 2023-02-23T23:03:33.225739 | 2021-01-19T20:09:21 | 2021-01-19T20:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | import requests
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.html import mark_safe
from cbhooks.models import (
HookPointAction, RecurringActionJob, ServerAction, ResourceAction, TriggerPoint
)
from extensions.models import UIExtension, XUIIndexer
from extensions.views import admin_extension
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import dialog_view
from utilities.permissions import cbadmin_required
from xui.content_export.forms import ExportContentForm
@admin_extension(title='Exportable Contents', description='All Exportable CloudBolt Contents')
@cbadmin_required
def export_content_list(request):
"""
View for listing metadata for all exportable contents.
"""
proto = request.META['wsgi.url_scheme']
host = request.META['HTTP_HOST']
resp = requests.get('{}://{}/api/v2/exportable-content/?version=dev'.format(proto, host), verify=False)
exportable_contents = []
response = resp.json()
from api.v2.serializers import keys_hyphens_to_underscores
if 'server-actions' in response:
for sa in response['server-actions']:
sa['id'] = sa['package-url'].split('/')[-2]
sa['collections'] = 'server-actions'
exportable_contents.append(keys_hyphens_to_underscores(sa))
if 'orchestration-actions' in response:
for oa in response['orchestration-actions']:
oa['id'] = oa['package-url'].split('/')[-2]
oa['collections'] = 'orchestration-actions'
exportable_contents.append(keys_hyphens_to_underscores(oa))
if 'ui-extension-packages' in response:
XUIIndexer().index()
for ui in response['ui-extension-packages']:
id = ui['package-url'].split('/')[-1]
ui['id'] = UIExtension.objects.get(name=id).id
ui['collections'] = 'ui-extension-packages'
exportable_contents.append(keys_hyphens_to_underscores(ui))
if 'blueprints' in response:
for bp in response['blueprints']:
bp['id'] = bp['package-url'].split('/')[-2]
bp['collections'] = 'blueprints'
exportable_contents.append(keys_hyphens_to_underscores(bp))
if 'recurring-jobs' in response:
for job in response['recurring-jobs']:
job['id'] = job['package-url'].split('/')[-2]
job['collections'] = 'recurring-jobs'
exportable_contents.append(keys_hyphens_to_underscores(job))
if 'resource-actions' in response:
for ra in response['resource-actions']:
ra['id'] = ra['package-url'].split('/')[-2]
ra['collections'] = 'resource-actions'
exportable_contents.append(keys_hyphens_to_underscores(ra))
list_context = {
'exportable_contents': exportable_contents,
'pagetitle': 'Exportable Contents',
}
return render(request, 'content_export/templates/list.html', list_context)
@dialog_view
@cbadmin_required
def export_content_edit(request, id=None, collections=''):
"""
Edit exportable contents
"""
if collections == 'blueprints':
instance = ServiceBlueprint.objects.get(id=id)
elif collections == 'resource-actions':
instance = ResourceAction.objects.get(id=id)
elif collections == 'server-actions':
instance = ServerAction.objects.get(id=id)
elif collections == 'recurring-jobs':
instance = RecurringActionJob.objects.get(id=id)
elif collections == 'orchestration-actions':
instance = HookPointAction.objects.get(id=id)
elif collections == 'ui-extension-packages':
instance = UIExtension.objects.get(id=id)
if request.method == 'POST':
form = ExportContentForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
msg = "Metadata details for {} have been saved.".format(instance)
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = ExportContentForm(instance=instance)
return {
'title': 'Edit Exportable Metadata',
'form': form,
'action_url': reverse('export_content_edit', args=[id, collections]),
'use_ajax': True,
'submit': 'Save',
'extra_onready_js': mark_safe("$('.render_as_datepicker').datepicker({dateFormat: 'yy-mm-dd'});")
}
| [
"klaratta@cloudboltsoftware.com"
] | klaratta@cloudboltsoftware.com |
26b05b882a260c212c2dd400b26cb5baa8526671 | f1a91c56cef7f8f21c0e154118278f394acf8a0c | /app/util/import_util.py | ca363568c8866118a1bc41ab376ad7e8f2b11b6a | [] | no_license | imsazzad/fastapi-python-template | ad5935d556a4798a2504deaebf6b6a3a03ebb0a3 | ec28b908e0cf419083ce1ec8235a71ee542d85c8 | refs/heads/main | 2023-08-21T00:09:21.482963 | 2021-10-01T05:24:17 | 2021-10-01T05:24:17 | 412,339,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import glob
import importlib
import os
import pathlib
import sys
class ImportUtil:
@staticmethod
def import_modules_from_directory_as_list(module_directory: str) -> list:
sys.path.append(module_directory)
py_files: list = glob.glob(os.path.join(module_directory, '*.py'))
modules: list = []
for py_file in py_files:
module_name = pathlib.Path(py_file).stem
if module_name == '__init__':
continue
else:
modules.append(importlib.import_module(module_name))
return modules
| [
"abdu.hasib@Infolytx.com"
] | abdu.hasib@Infolytx.com |
5da1e0d740b5a0dba162b287dd9c836577381ca3 | 753410fcbb13267827464af08120543279a4d7dc | /ride_height.py | 5c20d103670047c48b5352f7cd5e4c0247ce6284 | [] | no_license | donour/sharks | 93b18c34419bbd408d5022188f3f7baae233785c | d243408cb68cda18f8121d39ada393d49a01af91 | refs/heads/master | 2020-06-02T07:50:35.925352 | 2013-11-16T21:07:02 | 2013-11-16T21:07:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | #!/usr/bin/python
import quick2wire.i2c as i2c
bus = i2c.I2CMaster()
adc_address1 = 0x68
adc_address2 = 0x69
varDivisior = 16 # from pdf sheet on adc addresses and config
varMultiplier = (2.4705882/varDivisior)/1000
def changechannel(address, adcConfig):
bus.transaction(i2c.writing_bytes(address, adcConfig))
def getadcreading(address):
h, m, l ,s = bus.transaction(i2c.reading(address,4))[0]
t = h << 8 | m
# check if positive or negative number and invert if needed
if (h > 128):
t = ~(0x020000 - t)
return t * varMultiplier
def setadc(addr):
mode = 1
sr = 2 # 0:240, 1:60, 2:15, 3:3.75
gain = 0 # gain = 2^x
config_register = 0;
config_register |= 0 << 5
config_register |= mode << 4
config_register |= sr << 2
config_register |= gain
bus.transaction(i2c.writing_bytes(addr, config_register))
start = 0.0
setadc(adc_address1)
changechannel(adc_address2, 0x9C)
def height():
return getadcreading(adc_address1)
if __name__ == "__main__":
import sys,time
while True:
s = "\r%.6f" % height()
sys.stdout.write(s)
time.sleep(0.1)
| [
"donour@cs.unm.edu"
] | donour@cs.unm.edu |
54d614a2458bb581353bf4aeba881b56472296c9 | 4d69d32a3dd1c45d3e0dafc9bacd4f1de5c6ea6d | /tests/handlers/test_sync.py | a01ab471f5944031638f4f1ffee6f8dc07131890 | [
"Apache-2.0"
] | permissive | codemonk-sunhui/synapse | 6fd1ab9359cbd0c84852cbce5c65858d98c60e66 | 7824a751a769e692113d0dc19fe9a3ba99007940 | refs/heads/master | 2020-03-27T06:52:49.273620 | 2018-11-23T10:19:46 | 2018-11-23T10:19:46 | 146,142,219 | 0 | 0 | Apache-2.0 | 2018-08-26T01:33:32 | 2018-08-26T01:33:32 | null | UTF-8 | Python | false | false | 2,628 | py | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.errors import Codes, ResourceLimitError
from synapse.api.filtering import DEFAULT_FILTER_COLLECTION
from synapse.handlers.sync import SyncConfig, SyncHandler
from synapse.types import UserID
import tests.unittest
import tests.utils
from tests.utils import setup_test_homeserver
class SyncTestCase(tests.unittest.TestCase):
""" Tests Sync Handler. """
@defer.inlineCallbacks
def setUp(self):
self.hs = yield setup_test_homeserver(self.addCleanup)
self.sync_handler = SyncHandler(self.hs)
self.store = self.hs.get_datastore()
@defer.inlineCallbacks
def test_wait_for_sync_for_user_auth_blocking(self):
user_id1 = "@user1:server"
user_id2 = "@user2:server"
sync_config = self._generate_sync_config(user_id1)
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 1
# Check that the happy case does not throw errors
yield self.store.upsert_monthly_active_user(user_id1)
yield self.sync_handler.wait_for_sync_for_user(sync_config)
# Test that global lock works
self.hs.config.hs_disabled = True
with self.assertRaises(ResourceLimitError) as e:
yield self.sync_handler.wait_for_sync_for_user(sync_config)
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
self.hs.config.hs_disabled = False
sync_config = self._generate_sync_config(user_id2)
with self.assertRaises(ResourceLimitError) as e:
yield self.sync_handler.wait_for_sync_for_user(sync_config)
self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEED)
def _generate_sync_config(self, user_id):
return SyncConfig(
user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]),
filter_collection=DEFAULT_FILTER_COLLECTION,
is_guest=False,
request_key="request_key",
device_id="device_id",
)
| [
"neil@matrix.org"
] | neil@matrix.org |
b09e483727dc8717cd45a0f85c80edd42152255e | ec09ccf35bcc968b7b1b6cd2aec5a150ecbc2e01 | /test/hpa_test.py | 475e98ef79603c23cd3ba21807622bbec5f1fd3a | [
"CC0-1.0"
] | permissive | ssupdoc/k8-simulation | 1717b35d8aa1b6ab0076ceadacdfe475f81afcd3 | 7834d3faaed3e86b547554c6228540c316621011 | refs/heads/master | 2022-12-23T21:58:27.083986 | 2020-10-04T01:05:24 | 2020-10-04T01:05:24 | 285,696,159 | 1 | 0 | CC0-1.0 | 2020-10-04T01:05:25 | 2020-08-07T00:03:20 | Python | UTF-8 | Python | false | false | 1,068 | py | from src.api_server import APIServer
from src.load_balancer import LoadBalancer
from src.hpa import HPA
from src.pod import Pod
import unittest
DEPLOYMENT_INFO = ['Deployment_AA', 2, 2]
HPA_INFO = ['Deployment_AA', 75, 10, 5]
_hpaCtlLoop = 2
apiServer = APIServer()
apiServer.CreateDeployment(DEPLOYMENT_INFO)
deployment = apiServer.etcd.deploymentList[0]
podName = deployment.deploymentLabel + "_" + str(apiServer.GeneratePodName())
pod = Pod(podName, deployment.cpuCost, deployment.deploymentLabel)
pod.status = "RUNNING"
pod.requests = [ 'Req 1' ]
pod.available_cpu -= 1
podList = [pod, pod]
hpa = HPA(apiServer, _hpaCtlLoop, HPA_INFO)
class TestUtilisation(unittest.TestCase):
def test_average_utilisation(self):
load = hpa.calculateAvgUtil(deployment, podList)
self.assertEqual(load, 0.5)
class TestController(unittest.TestCase):
def test_controller_update(self):
hpa.updateController(10, 12)
self.assertEqual(hpa.controller.kp, 10)
self.assertEqual(hpa.controller.ki, 12)
self.assertEqual(hpa.pValue, 10)
self.assertEqual(hpa.iValue, 12)
| [
"sriramemailsyou@gmail.com"
] | sriramemailsyou@gmail.com |
c0f8ac78473e82ea1426614dacf74fc4180ef9e9 | dd44f2823b7ea9c8e20dce41844627d9371989d8 | /macroPCA-old.py | 2495a1fe23fc8434df5096a63eaf432a0cd4f3a4 | [] | no_license | jmfreeland/macroAnalysis | affa7a3432f76bcee3d968bfd3ec701a7b3c1871 | 4a0e551832f4f2de7fbd2e4060e69ae7e7096068 | refs/heads/master | 2022-12-05T01:28:11.669592 | 2020-08-27T08:04:39 | 2020-08-27T08:04:39 | 257,497,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | # -*- coding: utf-8 -*-
"""
Macro PCA:
Objective: Identify high-level drivers of movement in a collection of macro-sensitive tradable securities
outputs: PCA loadings, time-series of factor movements
todo: try with fractionally differenced series because why not
-Find stationary series for factor 1 and adjust risk appetite accordingly
-Find trends for all factors, invest in alighment with trends
.
-
"""
#import required models
import pandas as pd
import yfinance as yf
import numpy as np
from sklearn.decomposition import PCA
import seaborn as sns
import matplotlib.pyplot as plt
#set tickers for data gathering, collect data
#test_stocks = ['SPY', 'TLT', 'IEF','XLU','IGV','QQQ','GLD','GDX','MBB','XBI','MOO','TIP','EEM','UUP']
test_stocks = ['SPY','QQQ','TLT','IEF','SHY','TIP','AGG','LQD','IWM','IVW','IVE','IYF','IYE','IYM']
tickerData = {}
tickerDF = {}
for ticker in test_stocks:
tickerData[ticker] = yf.Ticker(ticker)
#get the historical prices for this ticker
tickerDF[ticker] = tickerData[ticker].history(period='1d', start='2000-1-1', end='2020-12-31')
#choose column on which to run PCA, organize a historical table for analysis
test_col = 'Close'
pca_data = pd.DataFrame(columns=test_stocks)
for ticker in test_stocks:
pca_data.loc[:,ticker] = tickerDF[ticker].loc[:,test_col]
#diagnostic - see when your series begin
for ticker in test_stocks:
print(ticker , " " , tickerDF[ticker].index[1])
#create pca form sklearn and run it on outrights
pca = PCA(n_components=len(test_stocks)-1)
test_data = pca_data.dropna()
outright_pca = pca.fit(test_data)
outright_loadings = outright_pca.components_
outright_variances = outright_pca.explained_variance_
outright_stdev = np.sqrt(outright_variances)
#for i in range(0,outright_loadings.shape[0]):
for i in range(0,5):
fig = plt.figure(figsize=(16,9), dpi=300)
fig.suptitle(('Macro PCA loadings: factor ' + str(i+1)))
sns.barplot(x=test_stocks,y=outright_loadings[i])
outright_time_series = pd.DataFrame(outright_pca.transform(test_data), index=test_data.index)
outright_time_series.columns = outright_time_series.columns+1
fig = plt.figure(figsize=(16,9), dpi=300)
time_plot = sns.lineplot(data=outright_time_series, dashes=False)
fig = plt.figure(figsize=(16,9), dpi=300)
time_plot = sns.lineplot(data=outright_time_series.iloc[:,1:], dashes=False)
| [
"josh.freeland@gmail.com"
] | josh.freeland@gmail.com |
5e67a2f09a383a84ad7ca7dd67a4e00604fb28ab | 313869ac13ee6cfdaf2de5cb76adf3dec981513f | /venv/Lib/site-packages/pandas/core/groupby/groupby.py | 580a12327ddf5080bc0457a1effcbeade1d3ea69 | [] | no_license | praful-pra1/Machine-Learning-GMCA | c4a5a4fa49b17bd0461d17b40dc169970ee2acde | f93dcf2b8557be4c57ea99f4e8a3756140d2ba6c | refs/heads/master | 2022-12-25T01:09:08.318614 | 2020-10-04T04:57:45 | 2020-10-04T04:57:45 | 301,042,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95,215 | py | """
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
from pandas.core.util.numba_ import maybe_use_numba
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_groupby_agg_method_template = """
Compute {fname} of group values.
Parameters
----------
numeric_only : bool, default {no}
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default {mc}
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed {fname} of values within each group.
"""
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a {klass} or when passed to {klass}.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
{klass}
See Also
--------
{klass}.groupby.apply
{klass}.groupby.transform
{klass}.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
{examples}
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_allowlist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
ax = self.obj._info_axis
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_allowlist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_allowlist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._obj_with_exclusions, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._obj_with_exclusions), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self._python_apply_general(curried, self._obj_with_exclusions)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f, self._selected_obj)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f, self._selected_obj)
return result
def _python_apply_general(
self, f: F, data: FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
Returns
-------
Series or DataFrame
data after applying f
"""
keys, values, mutated = self.grouper.apply(f, data, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
filled_series = self.grouper.size().fillna(0)
assert filled_series is not None
return filled_series.gt(0).any() and func_nm not in base.cython_cast_blocklist
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _agg_general(
self,
numeric_only: bool = True,
min_count: int = -1,
*,
alias: str,
npfunc: Callable,
):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if maybe_use_numba(engine):
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f, self._selected_obj)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(bool)
return vals.view(np.uint8), bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
numeric_only=False,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
return self._get_cythonized_result(
"group_var_float64",
aggregate=True,
needs_counts=True,
needs_values=True,
needs_2d=True,
cython_dtype=np.dtype(np.float64),
post_processing=lambda vals, inference: np.sqrt(vals),
ddof=ddof,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
result = self.std(ddof=ddof)
if result.ndim == 1:
result /= np.sqrt(self.count())
else:
cols = result.columns.get_indexer_for(
result.columns.difference(self.exclusions).unique()
)
# TODO(GH-22046) - setting with iloc broken if labels are not unique
# .values to remove labels
result.iloc[:, cols] = (
result.iloc[:, cols].values / np.sqrt(self.count().iloc[:, cols]).values
)
return result
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self) -> FrameOrSeriesUnion:
"""
Compute group sizes.
Returns
-------
DataFrame or Series
Number of rows in each group as a Series if as_index is True
or a DataFrame if as_index is False.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
if not self.as_index:
result = result.rename("size").reset_index()
return self._reindex_output(result, fill_value=0)
@doc(_groupby_agg_method_template, fname="sum", no=True, mc=0)
def sum(self, numeric_only: bool = True, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="add", npfunc=np.sum
)
@doc(_groupby_agg_method_template, fname="prod", no=True, mc=0)
def prod(self, numeric_only: bool = True, min_count: int = 0):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod
)
@doc(_groupby_agg_method_template, fname="min", no=False, mc=-1)
def min(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="min", npfunc=np.min
)
@doc(_groupby_agg_method_template, fname="max", no=False, mc=-1)
def max(self, numeric_only: bool = False, min_count: int = -1):
return self._agg_general(
numeric_only=numeric_only, min_count=min_count, alias="max", npfunc=np.max
)
@doc(_groupby_agg_method_template, fname="first", no=False, mc=-1)
def first(self, numeric_only: bool = False, min_count: int = -1):
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
"""Helper function for first item that isn't NA.
"""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="first",
npfunc=first_compat,
)
@doc(_groupby_agg_method_template, fname="last", no=False, mc=-1)
def last(self, numeric_only: bool = False, min_count: int = -1):
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
"""Helper function for last item that isn't NA.
"""
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="last",
npfunc=last_compat,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
numeric_only=False,
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len)._values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
numeric_only=False,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name="groupby")
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform(
"rank",
numeric_only=False,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform("cumprod", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform("cumsum", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummax(self, axis=0, **kwargs):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
def _get_cythonized_result(
self,
how: str,
cython_dtype: np.dtype,
aggregate: bool = False,
numeric_only: bool = True,
needs_counts: bool = False,
needs_values: bool = False,
needs_2d: bool = False,
min_count: Optional[int] = None,
needs_mask: bool = False,
needs_ngroups: bool = False,
result_is_index: bool = False,
pre_processing=None,
post_processing=None,
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
numeric_only : bool, default True
Whether only numeric datatypes should be computed
needs_counts : bool, default False
Whether the counts should be a part of the Cython call
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_2d : bool, default False
Whether the values and result of the Cython call signature
are 2-dimensional.
min_count : int, default None
When not None, min_count for the Cython call
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. This function is also responsible for
raising a TypeError if the values have an invalid type. Raises
if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(post_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
"Cannot use 'pre_processing' without specifying 'needs_values'!"
)
grouper = self.grouper
labels, _, ngroups = grouper.group_info
output: Dict[base.OutputKey, np.ndarray] = {}
base_func = getattr(libgroupby, how)
error_msg = ""
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
values = obj._values
if numeric_only and not is_numeric_dtype(values):
continue
if aggregate:
result_sz = ngroups
else:
result_sz = len(values)
result = np.zeros(result_sz, dtype=cython_dtype)
if needs_2d:
result = result.reshape((-1, 1))
func = partial(base_func, result)
inferences = None
if needs_counts:
counts = np.zeros(self.ngroups, dtype=np.int64)
func = partial(func, counts)
if needs_values:
vals = values
if pre_processing:
try:
vals, inferences = pre_processing(vals)
except TypeError as e:
error_msg = str(e)
continue
if needs_2d:
vals = vals.reshape((-1, 1))
vals = vals.astype(cython_dtype, copy=False)
func = partial(func, vals)
func = partial(func, labels)
if min_count is not None:
func = partial(func, min_count)
if needs_mask:
mask = isna(values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if needs_2d:
result = result.reshape(-1)
if result_is_index:
result = algorithms.take_nd(values, result)
if post_processing:
result = post_processing(result, inferences)
key = base.OutputKey(label=name, position=idx)
output[key] = result
# error_msg is "" on an frame/series with no rows or columns
if len(output) == 0 and error_msg != "":
raise TypeError(error_msg)
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name="groupby")
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by periods observations.
If freq is passed, the index will be increased using the periods and the freq.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, optional
Frequency string.
axis : axis to shift, default 0
Shift direction.
fill_value : optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Object shifted within each group.
See Also
--------
Index.shift : Shift values of Index.
tshift : Shift the time index, using the index’s frequency
if available.
"""
if freq is not None or axis != 0 or not isna(fill_value):
return self.apply(lambda x: x.shift(periods, freq, axis, fill_value))
return self._get_cythonized_result(
"group_shift_indexer",
numeric_only=False,
cython_dtype=np.dtype(np.int64),
needs_ngroups=True,
result_is_index=True,
periods=periods,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, axis=0):
"""
Calculate pct_change of each value to previous entry in group.
Returns
-------
Series or DataFrame
Percentage changes within each group.
"""
if freq is not None or axis != 0:
return self.apply(
lambda x: x.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def tail(self, n=5):
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
def _reindex_output(
self, output: OutputFrameOrSeries, fill_value: Scalar = np.NaN
) -> OutputFrameOrSeries:
"""
If we have categorical groupers, then we might want to make sure that
we have a fully re-indexed output to the levels. This means expanding
the output space to accommodate all values in the cartesian product of
our groups, regardless of whether they were observed in the data or
not. This will expand the output space if there are missing groups.
The method returns early without modifying the input if the number of
groupings is less than 2, self.observed == True or none of the groupers
are categorical.
Parameters
----------
output : Series or DataFrame
Object resulting from grouping and applying an operation.
fill_value : scalar, default np.NaN
Value to use for unobserved categories if self.observed is False.
Returns
-------
Series or DataFrame
Object (potentially) re-indexed to include all possible groups.
"""
groupings = self.grouper.groupings
if groupings is None:
return output
elif len(groupings) == 1:
return output
# if we only care about the observed values
# we are done
elif self.observed:
return output
# reindexing only applies to a Categorical grouper
elif not any(
isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings
):
return output
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names
).sortlevel()
if self.as_index:
d = {
self.obj._get_axis_name(self.axis): index,
"copy": False,
"fill_value": fill_value,
}
return output.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `output`. An idea is to do:
# output = output.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `output`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = (
(i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis
)
g_nums, g_names = zip(*in_axis_grps)
output = output.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
output = output.set_index(self.grouper.result_index).reindex(
index, copy=False, fill_value=fill_value
)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
output = output.reset_index(level=g_nums)
return output.reset_index(drop=True)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Optional[Union[Sequence, Series]] = None,
random_state=None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
"""
from pandas.core.reshape.concat import concat
if weights is not None:
weights = Series(weights, index=self._selected_obj.index)
ws = [weights[idx] for idx in self.indices.values()]
else:
ws = [None] * self.ngroups
if random_state is not None:
random_state = com.random_state(random_state)
samples = [
obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
for (_, obj), w in zip(self, ws)
]
return concat(samples, axis=self.axis)
@doc(GroupBy)
def get_groupby(
obj: NDFrame,
by: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
) -> GroupBy:
klass: Type[GroupBy]
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else:
raise TypeError(f"invalid type: {obj}")
return klass(
obj=obj,
keys=by,
axis=axis,
level=level,
grouper=grouper,
exclusions=exclusions,
selection=selection,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
mutated=mutated,
dropna=dropna,
)
| [
"prafulpar"
] | prafulpar |
1a07d3114e74fadea676842c7d35a5dae102c80b | 96fd91e48b5e08987206616d4a476f7fcb629742 | /packaging_project/pkg1/imported_module.py | b6a2cba73091d758f76b88bec7b639c917176043 | [
"MIT"
] | permissive | QikaiYang/ultimate-utils | 048efc2dd0812bd9fc9d24206acc753b288594bf | 50db0b96f2b3144ef008e29757990c688615951d | refs/heads/master | 2023-03-31T13:23:17.856199 | 2021-03-30T21:25:03 | 2021-03-30T21:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | print('\n------> you are importing: imported_module.py\n')
def func_from_imported_module():
print("I'm a function from func_from_imported_module") | [
"miranda9@illinois.edu"
] | miranda9@illinois.edu |
490fcdfb16141de4f142150d27b614173af087da | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/quality_control_and_material_testing/doctype/bulk_density_of_masonary/bulk_density_of_masonary.py | 682281f533740a8c16ef57cb3acb6c2e523d8ca2 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class BulkDensityOfmasonary(Document):
pass
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
ea5f7a09a051a8dfce6a47bf73c4c80e1df83072 | 146e54c9b4654ae24cf61e7bdc63ec974034b6a4 | /kafka/getFromSQS_kafka.py | 10907cec0257a337a69700a4b20cef6cbca0af78 | [] | no_license | sajal50/twittTrends | 094e86712428e4fc394bb698a70c5574485bb09c | e2b1c2e1ab2f71c14a78a2bc2e37e6282898133a | refs/heads/master | 2020-06-20T12:08:47.926582 | 2017-01-15T12:26:02 | 2017-01-15T12:26:02 | 74,866,195 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,599 | py | from multiprocessing import Pool, TimeoutError, Lock
import threading
import time
import os
import multiprocessing
from watson_developer_cloud import AlchemyLanguageV1
import boto3
import config as Config #Using config file to read the config settings from a separate config file.
from kafka import KafkaConsumer
import json
KAFKA_HOST = 'localhost:9092'
TOPIC = 'test'
lock = Lock()
alchemy_language = AlchemyLanguageV1(api_key=Config.ALCHEMY_API_KEY)
def getFromKafka():
lock.acquire()
consumer = KafkaConsumer(TOPIC, bootstrap_servers=[KAFKA_HOST],consumer_timeout_ms=10000)
for message in consumer:
tweet = json.loads(message.value)
if tweet is not None :
#getting sentiment
result = json.loads(json.dumps( alchemy_language.sentiment( text = tweet['text']), indent = 2))
docSentiment = result['docSentiment']
sentiment = docSentiment['type']
tweet['sentiment'] = sentiment
tweet = json.dumps(tweet)
print tweet
client = boto3.client('sns')
response = client.publish(
TargetArn=Config.SNS_ARN,
Message=json.dumps({'default': tweet}),
MessageStructure='json'
)
lock.release()
if __name__ == '__main__':
#getFromKafka()
pool = Pool(processes=4) # start 4 worker processes
while 1:
multiple_results = [pool.apply_async(getFromKafka, ()) for i in range(4)]
print [res.get(timeout=100) for res in multiple_results]
time.sleep(2)
| [
"sajal50@gmail.com"
] | sajal50@gmail.com |
1a4a25bdcf44d7428170dabdff37efd293eeb2bd | 290c4de61df3c6c1abe3bdf0203145fbe520225c | /celestia/utility/config.py | 1045050afb8152d5fa1e78d2a4ac779c33a1158a | [
"BSD-2-Clause"
] | permissive | kidaak/CelestiaSunrise | e1634b9daf620528a9713ba01a4561de8fa82170 | 5ace8cbf517e09e198a62e6dd1733de588ae2cfb | refs/heads/master | 2021-01-24T21:19:47.927620 | 2015-08-01T03:52:38 | 2015-08-01T03:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: config.py
# by Arzaroth Lekva
# arzaroth@arzaroth.com
#
import sys
import json
from clint import resources
CONFIG_FILE = "config.json"
BASE_CONFIG = {
"startup_check": False
}
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class Config(object):
_config = None
@ClassProperty
@classmethod
def config(cls):
if cls._config is None:
config = resources.user.read(CONFIG_FILE)
if config is None:
config = BASE_CONFIG
cls.commit()
else:
config = json.loads(config)
cls._config = config
return cls._config
@classmethod
def commit(cls):
resources.user.write(CONFIG_FILE, json.dumps(cls.config))
resources.init('Arzaroth', 'CelesiaSunrise')
| [
"lekva@arzaroth.com"
] | lekva@arzaroth.com |
a32469f383f73a5e739265066cfc490c1b37b63d | 82eba08b9a7ee1bd1a5f83c3176bf3c0826a3a32 | /ZmailServer/src/python/pylibs/conf.py | 7077b51dcd1f57e8b2f178471fe6c88beda587ce | [
"MIT"
] | permissive | keramist/zmailserver | d01187fb6086bf3784fe180bea2e1c0854c83f3f | 762642b77c8f559a57e93c9f89b1473d6858c159 | refs/heads/master | 2021-01-21T05:56:25.642425 | 2013-10-21T11:27:05 | 2013-10-22T12:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | #
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2010, 2011, 2012 VMware, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
import os
class Config:
def __init__(self):
self.config = {}
self.progname = "zmconfigd"
if (os.getenv("zmail_server_hostname") is not None):
self.hostname = os.getenv("zmail_server_hostname")
else:
self.hostname = os.popen("/opt/zmail/bin/zmhostname").readline().strip()
if (self.hostname is None or self.hostname == ""):
os._exit(1)
self.wd_all = False
self.debug = False
self.baseDir = "/opt/zmail"
self.logStatus = {
4 : "Debug",
3 : "Info",
2 : "Warning",
1 : "Error",
0 : "Fatal"
}
self.configFile = self.baseDir+"/conf/zmconfigd.cf";
self.logFile = self.baseDir+"/log/"+self.progname+".log";
self.pidFile = self.baseDir+"/log/"+self.progname+".pid";
self.interval = 60
if self.debug:
self.interval = 10
self.restartconfig = False
self.watchdog = True
self.wd_list = [ "antivirus" ]
self.loglevel = 3
def __setitem__(self, key, val):
self.config[key] = val
def __getitem__(self, key):
try:
return self.config[key]
except Exception, e:
return None
def setVals(self, state):
self.ldap_is_master = state.localconfig["ldap_is_master"]
self.ldap_root_password = state.localconfig["ldap_root_password"]
self.ldap_master_url = state.localconfig["ldap_master_url"]
self.loglevel = 3
if state.localconfig["ldap_starttls_required"] is not None:
self.ldap_starttls_required = (state.localconfig["ldap_starttls_required"].upper() != "FALSE")
if state.localconfig["zmconfigd_log_level"] is not None:
self.loglevel = int(state.localconfig["zmconfigd_log_level"])
self.interval = 60
if state.localconfig["zmconfigd_interval"] is not None and state.localconfig["zmconfigd_interval"] != "":
self.interval = int(state.localconfig["zmconfigd_interval"])
self.debug = False
if state.localconfig["zmconfigd_debug"] is not None:
self.debug = state.localconfig["zmconfigd_debug"]
if state.localconfig["zmconfigd_watchdog"] is not None:
self.watchdog = (state.localconfig["zmconfigd_watchdog"].upper() != "FALSE")
if state.localconfig["zmconfigd_enable_config_restarts"] is not None:
self.restartconfig = (state.localconfig["zmconfigd_enable_config_restarts"].upper() != "FALSE")
if state.localconfig["zmconfigd_watchdog_services"] is not None:
self.wd_list = state.localconfig["zmconfigd_watchdog_services"].split()
| [
"bourgerie.quentin@gmail.com"
] | bourgerie.quentin@gmail.com |
0e4607a31bfe4d869574f4f1f64d51451bfc2ea9 | 386e9bf84397502fea2662df723f4b04ea1703c9 | /03_01_find_the_access_codes/answer.py | 19a52eed9d02b49a5efadc33b604fb4f8eba07e7 | [] | no_license | w9/google-foobar | 1f1b3e33519e41dc779a93efdf0514ab12848cfc | 92c9eacfd5eb3f41539bd94e842ea9567a264976 | refs/heads/master | 2021-01-23T19:25:44.403029 | 2017-09-09T23:46:49 | 2017-09-09T23:46:49 | 102,820,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from itertools import repeat
from functools import partial
def transition(m, a):
""" Matrix-vector multiplication.
"""
return [sum([a[i] for i in r]) for r in m]
def get_trans_mat(l):
""" *Divisible and preceeding in list* is a partial order.
This function gives the transition matrix of the corresponding DAG,
in a sparse format.
"""
return [[i for i in range(j) if i < j and l[j] % l[i] == 0]
for j in range(len(l))]
def answer(l):
trans_mat = get_trans_mat(l)
walk = partial(transition, trans_mat)
one_step = walk(list(repeat(1, len(l))))
two_step = walk(one_step)
return sum(two_step)
| [
"zhuxun2@gmail.com"
] | zhuxun2@gmail.com |
56ab994254b3b1de4c46198dd4067152d1c0b8b9 | 47703c8cfd6b6cbbec7ceb2509da1bc049dd621f | /udoy_013.py | de28dafdc46a3bbd08f2137b5bbcbf693cf22f3f | [] | no_license | udoy382/PyCode | 0638a646bd4cac4095a58135aea97ba4ccfd5535 | 69efde580f019cd41061501554b6193688a0a06f | refs/heads/main | 2023-03-26T17:45:15.943887 | 2021-03-25T14:22:42 | 2021-03-25T14:22:42 | 324,485,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # Short Hand If Else Notation In Python #22
a = int(input("enter a\n"))
b = int(input("enter b\n"))
# 1st
# if a>b: print("A B se bada hai bhai")
# 2nd
# print("B A se bada hai bhai") if a<b else print("A B se bada hai bhai") | [
"srudoy436@gmail.com"
] | srudoy436@gmail.com |
c93bf244e3f0b22a16f1e79d6056e4dbe376e1f5 | acba9be504eb44718edd8daa10bd7e36e6d9f3ee | /dstbasic/l4_uname_file.py | 1b9dc4220b8027c17d931d4763e74bb5dc1f46dc | [] | no_license | coderdr31/DNote | 48901b3bb7fb38b0e041eb6f33d547ac44f31b78 | 23d43792befd6457a68f1d3f986d72fdfcdffdad | refs/heads/master | 2021-10-08T02:33:02.484256 | 2018-12-06T14:03:16 | 2018-12-06T14:03:16 | 114,640,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | #!/usr/bin/python3
# coding: utf-8
##################################################################
## uname 查看版本
uname -m # i686-32位系统, x86_64-64位系统
##################################################################
## file 判断文件类型和文件编码格式
file -b # 列出文件辨识结果时,不显示文件名称
| [
"1683751393@qq.com"
] | 1683751393@qq.com |
574587cc19a341530f3d079bed48ec4e649f274e | 07efa2ec19ec3a4090e20be1dff7576a98fd341c | /Project Euler/p36.py | 94de73e6225875ad388c90bf18f65be784051b97 | [] | no_license | raywong220/Backyard | ff3678d208857b63728fd9ebe744884b00ee947b | eab91b3c122aa24c68fc547c485ed3681c8bf5b1 | refs/heads/master | 2023-02-03T16:52:52.141134 | 2020-12-21T11:06:49 | 2020-12-21T11:06:49 | 254,303,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
# Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
from time import process_time
sum = 0
# Since even number always ends with 0 in binary, not Palindromic
for i in range(1, 1000001, 2):
b = bin(i)[2:]
if str(i) == str(i)[::-1] and b == b[::-1]:
sum += i
print(sum)
print(process_time()) | [
"noreply@github.com"
] | raywong220.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.