blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57db87c9797abc2707a464d3b187ceba70140495
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/CDK2_input/L1Q/1Q-17_MD_NVT_rerun/set_1ns_equi_1.py
|
6e267d5a25d2c1095b57350c91596b9b1a2e1503
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/CDK2/L1Q/MD_NVT_rerun/ti_one-step/1Q_17/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../1Q-17_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
25d5976832ad59276a925791f17f1af25df88294
|
869153aa415924529a3dc739df098c5d4bb83ce4
|
/17_contour/2_approxpoly.py
|
510e1441c6bea77de31e526f8100e5df2c9a617f
|
[] |
no_license
|
ccwu0918/class_opencv_python
|
b8c4dfb64e3fa84ba4b79b96c31b98600ae4c829
|
fa15bcd23eb388359180a00ce096b79ec0bdc2ec
|
refs/heads/main
| 2023-06-28T05:54:05.010580
| 2021-08-05T12:53:09
| 2021-08-05T12:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
import cv2
RECT, HEXAGON = 0, 1
frame = cv2.imread("./images/poly.png")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 50, 150)
edged = cv2.dilate(edged, None, iterations=1)
contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print('=== 處理前')
print('矩形點數量:{}'.format(len(contours[RECT])))
print('六邊形點數量:{}'.format(len(contours[HEXAGON])))
approx_rect = cv2.approxPolyDP(contours[RECT], 30, True)
approx_hex = cv2.approxPolyDP(contours[HEXAGON], 30, True)
print('=== 處理後')
print('矩形點數量:{}'.format(len(approx_rect)))
print('六邊形點數量:{}'.format(len(approx_hex)))
cv2.drawContours(frame, [approx_rect], -1, (0, 0, 255), 5)
cv2.drawContours(frame, [approx_hex], -1, (0, 0, 255), 5)
cv2.imshow('frame', frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"shinjia168@gmail.com"
] |
shinjia168@gmail.com
|
27aab3a9cc7d68c1f9995b8141f2835218ef5fa5
|
9aaa39f200ee6a14d7d432ef6a3ee9795163ebed
|
/Algorithm/Python/099. Recover Binary Search Tree.py
|
e52e96232688e101cc8945d37d8bb4ed53c5c8a1
|
[] |
no_license
|
WuLC/LeetCode
|
47e1c351852d86c64595a083e7818ecde4131cb3
|
ee79d3437cf47b26a4bca0ec798dc54d7b623453
|
refs/heads/master
| 2023-07-07T18:29:29.110931
| 2023-07-02T04:31:00
| 2023-07-02T04:31:00
| 54,354,616
| 29
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2016-07-03 20:01:22
# @Last modified by: WuLC
# @Last Modified time: 2016-07-03 20:01:47
# @Email: liangchaowu5@gmail.com
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# O(n) space
class Solution(object):
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
result, p1, p2= [], None, None
self.inorderTraverse(root, result)
for i in xrange(len(result)):
if i+1<len(result) and result[i].val > result[i+1].val and p1 == None:
p1 = result[i]
elif i-1 >= 0 and result[i].val < result[i-1].val:
p2 = result[i]
if p1 and p2:
p1.val, p2.val = p2.val, p1.val
def inorderTraverse(self, root, result):
if root == None: return
self.inorderTraverse(root.left, result)
result.append(root)
self.inorderTraverse(root.right, result)
|
[
"liangchaowu5@gmail.com"
] |
liangchaowu5@gmail.com
|
1bab0b849a1e1e4e3abed62cacf5cf2d4d838a36
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/E/eirikst/postliste-risr-kommune.py
|
53a2cf8607cef901cdcca0d7cbed4121d934916b
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,408
|
py
|
# -*- coding: UTF-8 -*-
# Based on the scraper advanced-scraping-pdf
# See also
# https://views.scraperwiki.com/run/pdf-to-html-preview-1/?url=http%3A%2F%2Fwww.stortinget.no%2FGlobal%2Fpdf%2Fpostjournal%2Fpj-2012-05-09.pdf
import scraperwiki
import json
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
import lxml.html
#import resource
import sys
#import urlparse
#import gc
import re
#lazycache=scraperwiki.swimport('lazycache')
#postlistelib=scraperwiki.swimport('postliste-python-lib')
agency = 'Risør kommune'
import mechanize
# ASPX pages are some of the hardest challenges because they use javascript and forms to navigate
# Almost always the links go through the function function __doPostBack(eventTarget, eventArgument)
# which you have to simulate in the mechanize form handling library
# This example shows how to follow the Next page link
url = 'http://159.171.0.169/ris/Modules/innsyn.aspx?mode=pl&SelPanel=0&ObjectType=ePhorteRegistryEntry&VariantType=Innsyn&ViewType=List&Query=RecordDate%3a%28-7%29+AND+DocumentType%3a%28I%2cU%29'
br = mechanize.Browser()
# sometimes the server is sensitive to this information
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url)
html = response.read()
for pagenum in range(6):
print "Page %d page length %d" % (pagenum, len(html))
#print html
#print "Clinicians found:", re.findall("PDetails.aspx\?ProviderId.*?>(.*?)</a>", html)
mnextlink = re.search("javascript:__doPostBack\('ctl00\$ctl00\$ctl00\$WebPartManager\$wp1243460126ViewPart\$ctl04',''\).>Neste", html)
#print mnextlink
if not mnextlink:
break
br.select_form(name='aspnetForm')
br.form.set_all_readonly(False)
br['__EVENTTARGET'] = 'ctl00$ctl00$ctl00$WebPartManager$wp1243460126ViewPart$ctl04' #'ProviderSearchResultsTable1$NextLinkButton'
br['__EVENTARGUMENT'] = ''
br.submit()
html = br.response().read()
#print len(html)
# def report_errors(errors):
# if 0 < len(errors):
# print "Errors:"
# for e in errors:
# print e
# exit(1)
# def out_of_cpu(arg, spent, hard, soft):
# report_errors(arg)
#
# def process_pdf(parser, pdfurl, errors):
# errors = []
# postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
# try:
# pdfcontent = scraperwiki.scrape(pdfurl)
# parser.preprocess(pdfurl, pdfcontent)
# pdfcontent = None
# # except ValueError, e:
# # errors.append(e)
# except IndexError, e:
# errors.append(e)
#
# def process_page_queue(parser, errors):
# try:
# parser.process_pages()
# postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
# except scraperwiki.CPUTimeExceededError, e:
# errors.append("Processing pages interrupted")
#
# def process_journal_pdfs(parser, listurl, errors):
# # print "Finding PDFs on " + listurl
# # u = urllib.parse.urlparse(listurl)
# html = scraperwiki.scrape(listurl)
# root = lxml.html.fromstring(html)
# html = None
# for ahref in root.cssselect("table a"):
# href = ahref.attrib['href']
# url = urlparse.urljoin(listurl, href)
# if -1 != href.find("file://"):
# # print "Skipping non-http URL " + url
# continue
# if parser.is_already_scraped(url):
# True
# # print "Skipping already scraped " + url
# else:
# # print "Will process " + url
# process_pdf(parser, url, errors)
#
# def test_small_pdfs():
# # Test with some smaller PDFs
# errors = []
# process_pdf("http://home.nuug.no/~pere/uio-postjournal/2011-16.pdf", errors)
# process_pdf("http://home.nuug.no/~pere/uio-postjournal/2011-52.pdf", errors)
# process_page_queue(errors)
# report_errors(errors)
# exit(0)
#
# #test_small_pdfs()
# errors = []
# parser = postlistelib.PDFJournalParser(agency=agency)
# process_journal_pdfs(parser, "http://www.havn.oslo.kommune.no/postjournal/", errors)
# process_page_queue(parser, errors)
# report_errors(errors)
# -*- coding: UTF-8 -*-
# Based on the scraper advanced-scraping-pdf
# See also
# https://views.scraperwiki.com/run/pdf-to-html-preview-1/?url=http%3A%2F%2Fwww.stortinget.no%2FGlobal%2Fpdf%2Fpostjournal%2Fpj-2012-05-09.pdf
import scraperwiki
import json
from BeautifulSoup import BeautifulSoup
import datetime
import dateutil.parser
import lxml.html
#import resource
import sys
#import urlparse
#import gc
import re
#lazycache=scraperwiki.swimport('lazycache')
#postlistelib=scraperwiki.swimport('postliste-python-lib')
agency = 'Risør kommune'
import mechanize
# ASPX pages are some of the hardest challenges because they use javascript and forms to navigate
# Almost always the links go through the function function __doPostBack(eventTarget, eventArgument)
# which you have to simulate in the mechanize form handling library
# This example shows how to follow the Next page link
url = 'http://159.171.0.169/ris/Modules/innsyn.aspx?mode=pl&SelPanel=0&ObjectType=ePhorteRegistryEntry&VariantType=Innsyn&ViewType=List&Query=RecordDate%3a%28-7%29+AND+DocumentType%3a%28I%2cU%29'
br = mechanize.Browser()
# sometimes the server is sensitive to this information
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url)
html = response.read()
for pagenum in range(6):
print "Page %d page length %d" % (pagenum, len(html))
#print html
#print "Clinicians found:", re.findall("PDetails.aspx\?ProviderId.*?>(.*?)</a>", html)
mnextlink = re.search("javascript:__doPostBack\('ctl00\$ctl00\$ctl00\$WebPartManager\$wp1243460126ViewPart\$ctl04',''\).>Neste", html)
#print mnextlink
if not mnextlink:
break
br.select_form(name='aspnetForm')
br.form.set_all_readonly(False)
br['__EVENTTARGET'] = 'ctl00$ctl00$ctl00$WebPartManager$wp1243460126ViewPart$ctl04' #'ProviderSearchResultsTable1$NextLinkButton'
br['__EVENTARGUMENT'] = ''
br.submit()
html = br.response().read()
#print len(html)
# def report_errors(errors):
# if 0 < len(errors):
# print "Errors:"
# for e in errors:
# print e
# exit(1)
# def out_of_cpu(arg, spent, hard, soft):
# report_errors(arg)
#
# def process_pdf(parser, pdfurl, errors):
# errors = []
# postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
# try:
# pdfcontent = scraperwiki.scrape(pdfurl)
# parser.preprocess(pdfurl, pdfcontent)
# pdfcontent = None
# # except ValueError, e:
# # errors.append(e)
# except IndexError, e:
# errors.append(e)
#
# def process_page_queue(parser, errors):
# try:
# parser.process_pages()
# postlistelib.exit_if_no_cpu_left(0, out_of_cpu, errors)
# except scraperwiki.CPUTimeExceededError, e:
# errors.append("Processing pages interrupted")
#
# def process_journal_pdfs(parser, listurl, errors):
# # print "Finding PDFs on " + listurl
# # u = urllib.parse.urlparse(listurl)
# html = scraperwiki.scrape(listurl)
# root = lxml.html.fromstring(html)
# html = None
# for ahref in root.cssselect("table a"):
# href = ahref.attrib['href']
# url = urlparse.urljoin(listurl, href)
# if -1 != href.find("file://"):
# # print "Skipping non-http URL " + url
# continue
# if parser.is_already_scraped(url):
# True
# # print "Skipping already scraped " + url
# else:
# # print "Will process " + url
# process_pdf(parser, url, errors)
#
# def test_small_pdfs():
# # Test with some smaller PDFs
# errors = []
# process_pdf("http://home.nuug.no/~pere/uio-postjournal/2011-16.pdf", errors)
# process_pdf("http://home.nuug.no/~pere/uio-postjournal/2011-52.pdf", errors)
# process_page_queue(errors)
# report_errors(errors)
# exit(0)
#
# #test_small_pdfs()
# errors = []
# parser = postlistelib.PDFJournalParser(agency=agency)
# process_journal_pdfs(parser, "http://www.havn.oslo.kommune.no/postjournal/", errors)
# process_page_queue(parser, errors)
# report_errors(errors)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
3dce37f70d0e342a89d689ef5496ace1bf7fb07e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_014/ch47_2020_04_13_13_29_30_615294.py
|
5ec09dffb565c008c5f35752896118970f26a078
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def estritamente_crescente(lista):
i = 0
lista_nova = []
while i < len(lista):
if lista[i+1] > lista[i]:
lista_nova.append(lista[i+1])
i += 1
return lista_nova
|
[
"you@example.com"
] |
you@example.com
|
0aaa9a9fc44222dce38e3bae24286b8df567512b
|
16e69196886254bc0fe9d8dc919ebcfa844f326a
|
/edc/subject/consent_old/forms/consent_catalogue_form.py
|
b9b24101a14789ffb046f04fa0cfa7ea7a8d8238
|
[] |
no_license
|
botswana-harvard/edc
|
b54edc305e7f4f6b193b4498c59080a902a6aeee
|
4f75336ff572babd39d431185677a65bece9e524
|
refs/heads/master
| 2021-01-23T19:15:08.070350
| 2015-12-07T09:36:41
| 2015-12-07T09:36:41
| 35,820,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
from edc.base.form.forms import BaseModelForm
class ConsentCatalogueForm (BaseModelForm):
def clean(self, consent_instance=None):
cleaned_data = self.cleaned_data
return cleaned_data
|
[
"ew2789@gmail.com"
] |
ew2789@gmail.com
|
a07c61061a32428c289217787b8315b3a1f0900a
|
ab2f1f18f64d9f2d49a4eea5c6a78ee1275662de
|
/trex_client/external_libs/scapy-2.3.1/python2/scapy/__init__.py
|
443b36753f7f543c88564c4b8abe0f9bd5fd84ad
|
[
"MIT"
] |
permissive
|
alwye/trex-http-proxy
|
d09d7fabe60add4a445e5ceb71f5f2a6d209e0a0
|
e30f5af03aaaad518b5def6e1804c3741dd5d0c6
|
refs/heads/master
| 2021-08-16T22:32:56.643253
| 2021-06-08T19:52:35
| 2021-06-08T19:52:35
| 60,734,923
| 4
| 3
|
MIT
| 2021-06-08T19:39:18
| 2016-06-08T22:27:35
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Scapy: create, send, sniff, dissect and manipulate network packets.
Usable either from an interactive console or as a Python library.
http://www.secdev.org/projects/scapy
"""
if __name__ == "__main__":
from scapy.main import interact
interact()
|
[
"alzverev@cisco.com"
] |
alzverev@cisco.com
|
e2478d9052aaf48da3b02682e1e7e55c5fca3f5b
|
db3caf3438942838bd1b948f57cdc9dc729b1ab0
|
/data_loader/multitask_classify_loader.py
|
e05c1d4aaef4e56139a290cb9221becd29aaf961
|
[] |
no_license
|
HELL-TO-HEAVEN/iswc2020_prodcls
|
711fb7506f86f887edcc712bc962d8c42259c9a0
|
ef83dcd6cc7055a2edbc7628ca76a11c0a09d4bd
|
refs/heads/master
| 2023-01-24T11:41:15.259735
| 2020-11-18T11:31:04
| 2020-11-18T11:31:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,378
|
py
|
# -*- coding: utf-8 -*-
"""
@author: Alex Yang
@contact: alex.yang0326@gmail.com
@file: multitask_classify_loader.py
@time: 2020/5/21 21:12
@desc:
"""
import os
import nltk
import numpy as np
from tensorflow.keras.utils import Sequence, to_categorical
from config import NLTK_DATA
from utils import pad_sequences_1d, get_bert_tokenizer
from .base_loader import load_data
class MultiTaskClsDataGenerator(Sequence):
def __init__(self,
data_type,
batch_size,
use_multi_task=True,
input_type='name_desc',
use_word_input=True,
word_vocab=None,
use_bert_input=False,
use_pair_input=False,
bert_model_type=None,
max_len=None,
cate1_vocab=None,
cate2_vocab=None,
cate3_vocab=None,
all_cate_vocab=None,
use_mask_for_cate2=False,
use_mask_for_cate3=False,
cate3_mask_type=None,
cate1_to_cate2=None,
cate_to_cate3=None,
train_on_cv=False,
cv_random_state=42,
cv_fold=5,
cv_index=0,
exchange_pair=False,
exchange_threshold=0.1,
cate3_count_dict=None,
use_pseudo_label=False,
pseudo_path=None,
pseudo_random_state=42,
pseudo_rate=0.1,
pseudo_index=0):
self.data_type = data_type
self.train_on_cv = train_on_cv
self.data = load_data(data_type, train_on_cv=train_on_cv, cv_random_state=cv_random_state,
cv_fold=cv_fold, cv_index=cv_index)
# data augmentation, only for training set
if self.data_type == 'train':
if use_pseudo_label:
self.add_pseudo_label(pseudo_path, pseudo_random_state, pseudo_rate, pseudo_index)
if exchange_pair:
self.exchange_pair_data(cate3_count_dict, exchange_threshold)
self.data_size = len(self.data['name'])
self.indices = np.arange(self.data_size)
if self.data_type == 'train':
np.random.shuffle(self.indices) # only shuffle for training set, we can't shuffle validation and test set!!
self.batch_size = batch_size
self.steps = int(np.ceil(self.data_size / self.batch_size))
self.use_multi_task = use_multi_task
self.input_type = input_type
self.use_word_input = use_word_input
self.word_vocab = word_vocab
self.use_bert_input = use_bert_input
if use_word_input:
nltk.data.path.append(NLTK_DATA)
assert word_vocab is not None
self.word_vocab = word_vocab
elif use_bert_input:
assert bert_model_type is not None
assert max_len is not None
self.bert_model_type = bert_model_type
self.bert_tokenizer = get_bert_tokenizer(bert_model_type)
if input_type != 'name_desc':
assert not use_pair_input
self.use_pair_input = use_pair_input
self.max_len = max_len
self.cate1_vocab = cate1_vocab
self.cate2_vocab = cate2_vocab
self.cate3_vocab = cate3_vocab
self.all_cate_vocab = all_cate_vocab
if not use_multi_task:
assert self.all_cate_vocab is not None
else:
assert self.cate1_vocab is not None and self.cate2_vocab is not None and self.cate3_vocab is not None
self.use_mask_for_cate2 = use_mask_for_cate2
self.use_mask_for_cate3 = use_mask_for_cate3
self.cate3_mask_type = cate3_mask_type
if self.use_mask_for_cate2:
assert self.use_multi_task
assert cate1_to_cate2 is not None
self.cate1_to_cate2_matrix = self.create_mask_matrix(cate1_to_cate2, len(cate1_vocab), len(cate2_vocab))
else:
self.cate1_to_cate2_matrix = None
if self.use_mask_for_cate3:
assert self.use_multi_task
assert self.cate3_mask_type in ['cate1', 'cate2']
self.cate_to_cate3_matrix = self.create_mask_matrix(
cate_to_cate3,
len(cate1_vocab) if self.cate3_mask_type == 'cate1' else len(cate2_vocab),
len(cate3_vocab)
)
else:
self.cate_to_cate3_matrix = None
def exchange_pair_data(self, cate3_count_dict, exchange_threshold):
added_data = {
'id': [], 'name': [], 'desc': [], 'cate1': [], 'cate2': [], 'cate3': []
}
for i in range(len(self.data['id'])):
cate3 = self.data['cate3'][i]
if cate3 in cate3_count_dict and cate3_count_dict[cate3] <= exchange_threshold:
added_data['id'].append(self.data['id'][i])
# exchange name and desc
added_data['name'].append(self.data['desc'][i])
added_data['desc'].append(self.data['name'][i])
# keep the labels
added_data['cate1'].append(self.data['cate1'][i])
added_data['cate2'].append(self.data['cate2'][i])
added_data['cate3'].append(self.data['cate3'][i])
for key in added_data:
self.data[key].extend(added_data[key])
def add_pseudo_label(self, pseudo_path, pseudo_random_state=42, pseudo_rate=0.1, pseudo_index=0):
pseudo_label_data = {
'id': [], 'name': [], 'desc': [], 'cate1': [], 'cate2': [], 'cate3': []
}
with open(pseudo_path, 'r', encoding='utf8') as reader:
lines = reader.readlines()
pseudo_data_size = len(lines)
if pseudo_rate < 1:
np.random.seed(pseudo_random_state)
sample_pseudo_size = int(pseudo_data_size * pseudo_rate)
sample_indices = np.random.choice(pseudo_data_size, sample_pseudo_size,
replace=False)
elif pseudo_rate == 1:
sample_indices = range(pseudo_data_size)
else:
sample_pseudo_size = int(pseudo_data_size / pseudo_rate)
start = pseudo_index * sample_pseudo_size
end = (pseudo_index + 1) * sample_pseudo_size
np.random.seed(pseudo_random_state)
indices = np.random.permutation(pseudo_data_size)
sample_indices = indices[start: end]
for idx in sample_indices:
line = lines[idx]
text_id, name, desc, cate1, cate2, cate3 = line.strip().split('##')
pseudo_label_data['id'].append(text_id)
pseudo_label_data['name'].append(name)
pseudo_label_data['desc'].append(desc)
pseudo_label_data['cate1'].append(cate1)
pseudo_label_data['cate2'].append(cate2)
pseudo_label_data['cate3'].append(cate3)
for key in pseudo_label_data:
self.data[key].extend(pseudo_label_data[key])
def __len__(self):
return self.steps
def on_epoch_end(self):
np.random.shuffle(self.indices)
def __getitem__(self, index):
batch_index = self.indices[index * self.batch_size: (index + 1) * self.batch_size]
batch_input_ids, batch_input_masks, batch_input_types = [], [], []
batch_cate1_ids, batch_cate2_ids, batch_cate3_ids = [], [], [] # labels of multi task taining
batch_all_cate_ids = [] # labels of single task taining
for i in batch_index:
text = self.prepare_text(self.data['name'][i], self.data['desc'][i])
# prepare input
if self.use_word_input:
word_ids = [self.word_vocab.get(w, 1) for w in nltk.tokenize.word_tokenize(text)]
batch_input_ids.append(word_ids)
elif self.use_bert_input:
if self.use_pair_input:
try:
inputs = self.bert_tokenizer.encode_plus(text=text[0], text_pair=text[1],
max_length=self.max_len,
pad_to_max_length=True,
truncation_strategy='only_second')
except Exception:
inputs = self.bert_tokenizer.encode_plus(text=text[0], text_pair=text[1],
max_length=self.max_len,
pad_to_max_length=True,
truncation_strategy='longest_first')
else:
inputs = self.bert_tokenizer.encode_plus(text=text, max_length=self.max_len, pad_to_max_length=True)
batch_input_ids.append(inputs['input_ids'])
batch_input_masks.append(inputs['attention_mask'])
if 'token_type_ids' in inputs:
batch_input_types.append(inputs['token_type_ids'])
else:
raise ValueError('must use word or bert as input')
if self.data_type == 'test': # no labels for test data
continue
# prepare label for training or validation set
if not self.use_multi_task:
all_cate = f"{self.data['cate1'][i]}|{self.data['cate2'][i]}|{self.data['cate3'][i]}"
if (self.data_type == 'dev' or self.train_on_cv) and all_cate not in self.all_cate_vocab:
batch_all_cate_ids.append(0)
else:
batch_all_cate_ids.append(self.all_cate_vocab[all_cate])
else:
batch_cate1_ids.append(self.cate1_vocab[self.data['cate1'][i]])
if (self.data_type == 'dev' or self.train_on_cv) and self.data['cate2'][i] not in self.cate2_vocab:
batch_cate2_ids.append(0)
else:
batch_cate2_ids.append(self.cate2_vocab[self.data['cate2'][i]])
if (self.data_type == 'dev' or self.train_on_cv) and self.data['cate3'][i] not in self.cate3_vocab:
batch_cate3_ids.append(0)
else:
batch_cate3_ids.append(self.cate3_vocab[self.data['cate3'][i]])
# feature input
if self.use_word_input:
batch_inputs = pad_sequences_1d(batch_input_ids, max_len=self.max_len)
else:
batch_inputs = [np.array(batch_input_ids), np.array(batch_input_masks)]
if batch_input_types:
batch_inputs.append(np.array(batch_input_types))
if self.data_type == 'test': # no labels for test data
return batch_inputs
# label masking (only for training dataset)
if self.use_multi_task and self.data_type == 'train':
if self.use_mask_for_cate2:
if not isinstance(batch_inputs, list):
batch_inputs = [batch_inputs]
batch_inputs.append(self.cate1_to_cate2_matrix[np.array(batch_cate1_ids)])
if self.use_mask_for_cate3:
if not isinstance(batch_inputs, list):
batch_inputs = [batch_inputs]
if self.cate3_mask_type == 'cate1':
batch_inputs.append(self.cate_to_cate3_matrix[np.array(batch_cate1_ids)])
elif self.cate3_mask_type == 'cate2':
batch_inputs.append(self.cate_to_cate3_matrix[np.array(batch_cate2_ids)])
else:
raise ValueError(f'`cate3_mask_type` not understood')
# ground truth labels
if not self.use_multi_task:
batch_labels = to_categorical(batch_all_cate_ids, num_classes=len(self.all_cate_vocab))
else:
batch_labels = [
to_categorical(batch_cate1_ids, num_classes=len(self.cate1_vocab)),
to_categorical(batch_cate2_ids, num_classes=len(self.cate2_vocab)),
to_categorical(batch_cate3_ids, num_classes=len(self.cate3_vocab))
]
return batch_inputs, batch_labels
def prepare_text(self, name, desc):
if self.input_type == 'name':
return name
elif self.input_type == 'desc':
if not desc:
return name
else:
return desc
elif self.input_type == 'name_desc':
if desc:
if self.use_pair_input:
return name, desc
else:
return f"{name} {desc}"
else:
if self.use_pair_input:
return name, name
else:
return name
else:
raise ValueError(f'`input_type` not understood: {self.input_type}')
@staticmethod
def create_mask_matrix(cate1_to_cate2, n_cate1, n_cate2):
mask_matrix = np.zeros(shape=(n_cate1, n_cate2))
for cate1 in cate1_to_cate2:
for cate2 in cate1_to_cate2[cate1]:
mask_matrix[cate1][cate2] = 1
return mask_matrix
|
[
"2041462766@qq.com"
] |
2041462766@qq.com
|
9329443d1478f64209887aa32825726f941e27ba
|
1b36425f798f484eda964b10a5ad72b37b4da916
|
/posthog/migrations/0222_fix_deleted_primary_dashboards.py
|
ef8d4a6cc13d5483ef0aa1cd227c44747efb0d4c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dorucioclea/posthog
|
0408baa2a7ae98e5bea352c516f741ddc17c0a3e
|
8848981baf237117fb22d28af0770a0165881423
|
refs/heads/master
| 2023-01-23T11:01:57.942146
| 2023-01-13T09:03:00
| 2023-01-13T09:03:00
| 241,222,000
| 0
| 0
|
MIT
| 2020-02-17T22:34:37
| 2020-02-17T22:34:36
| null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
import structlog
from django.db import connection, migrations
from django.db.models import Q
# 0220_set_primary_dashboard set the primary dashboard for teams, but
# it didn't account for deleted dashboards. This migration fixes projects
# that have a primary dashboard set to a deleted dashboard.
def fix_for_deleted_primary_dashboards(apps, _):
logger = structlog.get_logger(__name__)
logger.info("starting 0222_fix_deleted_primary_dashboards")
Team = apps.get_model("posthog", "Team")
expected_team_dashboards = []
with connection.cursor() as cursor:
# Fetch a list of teams and the id of the dashboard that should be set as the primary dashboard
# The primary dashboard should be the oldest pinned dashboard, if one exists
# or the oldest dashboard, if no pinned dashboards exist
# Notes:
# - We use id as a proxy for dashboard age because dashboards use a simple incrementing id
# - We ignore teams that already have a primary dashboard set
# - Remove deleted dashboards
cursor.execute(
"""
SELECT posthog_team.id,
COALESCE(
MIN(
CASE
WHEN posthog_dashboard.pinned THEN posthog_dashboard.id
ELSE NULL
END
),
MIN(
CASE
WHEN NOT posthog_dashboard.pinned THEN posthog_dashboard.id
ELSE NULL
END
)
) AS primary_dashboard_id
FROM posthog_team
INNER JOIN posthog_dashboard ON posthog_dashboard.team_id = posthog_team.id
WHERE NOT posthog_dashboard.deleted
GROUP BY posthog_team.id
"""
)
expected_team_dashboards = cursor.fetchall()
team_to_primary_dashboard = dict(expected_team_dashboards)
teams_to_update = Team.objects.filter(Q(primary_dashboard__deleted=True) | Q(primary_dashboard__isnull=True)).only(
"id", "primary_dashboard_id"
)
for team in teams_to_update:
team.primary_dashboard_id = team_to_primary_dashboard.get(team.id, None)
Team.objects.bulk_update(teams_to_update, ["primary_dashboard_id"], batch_size=500)
# Because of the nature of this migration, there's no way to reverse it without potentially destroying customer data
# However, we still need a reverse function, so that we can rollback other migrations
def reverse(apps, _):
pass
class Migration(migrations.Migration):
atomic = False
dependencies = [
("posthog", "0221_add_activity_log_model"),
]
operations = [migrations.RunPython(fix_for_deleted_primary_dashboards, reverse)]
|
[
"noreply@github.com"
] |
dorucioclea.noreply@github.com
|
ce041d314e8b73e5a06532fc7a6ea5a036ac0a5b
|
6547747b93196f4f4d74248de81e943cdd253d3e
|
/projects/views.py
|
6e9a7b56a0beef4fee5c92c2dfd926ef3ebfe757
|
[] |
no_license
|
cyberkuroneko/PCN_CODEV_project
|
1551fa2768dc1a08f452636c42191e380ea602f6
|
66e5004ad045ad9d83f880affbf6cfa60eab2706
|
refs/heads/master
| 2022-12-18T08:55:07.879527
| 2020-09-12T07:06:43
| 2020-09-12T07:06:43
| 294,866,626
| 0
| 0
| null | 2020-09-12T04:03:16
| 2020-09-12T04:03:16
| null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django.views.generic import ListView
from .models import Projects
# Create your views here.
class ProjectsHomePageView(ListView):
template_name = 'projects_home.html'
model = Projects
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
de03e9aca5ea4d28dfd81d74ba7ff87890be777f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03015/s226920110.py
|
5c2c5ed57fb8a4d7c4a1620946f7cbd6703e4a3e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
MOD = 10**9+7
L = input()
N = len(L)
ans = 0
c = 0
for i in range(N):
if L[i] == '1':
ans += pow(3,N-i-1,MOD)*pow(2,c,MOD)
ans %= MOD
c += 1
ans += pow(2,c,MOD)
ans %= MOD
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3aa2ceaf8175c3dc9b6b0f5c72ae1b49efbe0cc4
|
597c4f48332251552a602122bb3d325bc43a9d7f
|
/chapter11_dynamic_programming/04_recursive_memo/02_memo_recursive.py
|
74fcfce3fb9665a82e4dde4dd6df3caee6c34d14
|
[] |
no_license
|
Kyeongrok/python_algorithm
|
46de1909befc7b17766a57090a7036886361fd06
|
f0cdc221d7908f26572ae67b5c95b12ade007ccd
|
refs/heads/master
| 2023-07-11T03:23:05.782478
| 2023-06-22T06:32:31
| 2023-06-22T06:32:31
| 147,303,654
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
n = 10
memo = [0] * n
print(memo, len(memo))
def checkMemo(memo, n):
if memo[n] == 0:
return memo
else:
n = n - 1
memo[n] = n
checkMemo(memo[n], n)
result = checkMemo(memo, n)
print(result)
|
[
"oceanfog1@gmail.com"
] |
oceanfog1@gmail.com
|
359abbbcf3e86f4744a33647da97ba7bd44c27b7
|
6be845bf70a8efaf390da28c811c52b35bf9e475
|
/windows/Resources/Python/Core/Lib/hotshot/log.py
|
a8655b0a99f0d251de073b6e4842948aa55050c8
|
[] |
no_license
|
kyeremalprime/ms
|
228194910bf2ed314d0492bc423cc687144bb459
|
47eea098ec735b2173ff0d4e5c493cb8f04e705d
|
refs/heads/master
| 2020-12-30T15:54:17.843982
| 2017-05-14T07:32:01
| 2017-05-14T07:32:01
| 91,180,709
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,436
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: log.py
import _hotshot
import os.path
import parser
import symbol
from _hotshot import WHAT_ENTER, WHAT_EXIT, WHAT_LINENO, WHAT_DEFINE_FILE, WHAT_DEFINE_FUNC, WHAT_ADD_INFO
__all__ = [
'LogReader', 'ENTER', 'EXIT', 'LINE']
ENTER = WHAT_ENTER
EXIT = WHAT_EXIT
LINE = WHAT_LINENO
class LogReader:
def __init__(self, logfn):
self._filemap = {}
self._funcmap = {}
self._reader = _hotshot.logreader(logfn)
self._nextitem = self._reader.next
self._info = self._reader.info
if 'current-directory' in self._info:
self.cwd = self._info['current-directory']
else:
self.cwd = None
self._stack = []
self._append = self._stack.append
self._pop = self._stack.pop
return
def close(self):
self._reader.close()
def fileno(self):
"""Return the file descriptor of the log reader's log file."""
return self._reader.fileno()
def addinfo(self, key, value):
"""This method is called for each additional ADD_INFO record.
This can be overridden by applications that want to receive
these events. The default implementation does not need to be
called by alternate implementations.
The initial set of ADD_INFO records do not pass through this
mechanism; this is only needed to receive notification when
new values are added. Subclasses can inspect self._info after
calling LogReader.__init__().
"""
pass
def get_filename(self, fileno):
try:
return self._filemap[fileno]
except KeyError:
raise ValueError, 'unknown fileno'
def get_filenames(self):
return self._filemap.values()
def get_fileno(self, filename):
filename = os.path.normcase(os.path.normpath(filename))
for fileno, name in self._filemap.items():
if name == filename:
return fileno
raise ValueError, 'unknown filename'
def get_funcname(self, fileno, lineno):
try:
return self._funcmap[fileno, lineno]
except KeyError:
raise ValueError, 'unknown function location'
def next(self, index=0):
while 1:
what, tdelta, fileno, lineno = self._nextitem()
if what == WHAT_ENTER:
filename, funcname = self._decode_location(fileno, lineno)
t = (filename, lineno, funcname)
self._append(t)
return (
what, t, tdelta)
if what == WHAT_EXIT:
try:
return (
what, self._pop(), tdelta)
except IndexError:
raise StopIteration
if what == WHAT_LINENO:
filename, firstlineno, funcname = self._stack[-1]
return (
what, (filename, lineno, funcname), tdelta)
if what == WHAT_DEFINE_FILE:
filename = os.path.normcase(os.path.normpath(tdelta))
self._filemap[fileno] = filename
elif what == WHAT_DEFINE_FUNC:
filename = self._filemap[fileno]
self._funcmap[fileno, lineno] = (filename, tdelta)
elif what == WHAT_ADD_INFO:
if tdelta == 'current-directory':
self.cwd = lineno
self.addinfo(tdelta, lineno)
else:
raise ValueError, 'unknown event type'
def __iter__(self):
return self
def _decode_location(self, fileno, lineno):
try:
return self._funcmap[fileno, lineno]
except KeyError:
if self._loadfile(fileno):
filename = funcname = None
try:
filename, funcname = self._funcmap[fileno, lineno]
except KeyError:
filename = self._filemap.get(fileno)
funcname = None
self._funcmap[fileno, lineno] = (filename, funcname)
return (
filename, funcname)
def _loadfile(self, fileno):
try:
filename = self._filemap[fileno]
except KeyError:
print 'Could not identify fileId', fileno
return 1
if filename is None:
return 1
else:
absname = os.path.normcase(os.path.join(self.cwd, filename))
try:
fp = open(absname)
except IOError:
return
st = parser.suite(fp.read())
fp.close()
funcdef = symbol.funcdef
lambdef = symbol.lambdef
stack = [
st.totuple(1)]
while stack:
tree = stack.pop()
try:
sym = tree[0]
except (IndexError, TypeError):
continue
if sym == funcdef:
self._funcmap[fileno, tree[2][2]] = (
filename, tree[2][1])
elif sym == lambdef:
self._funcmap[fileno, tree[1][2]] = (
filename, '<lambda>')
stack.extend(list(tree[1:]))
return
|
[
"kyeremalprime@gmail.com"
] |
kyeremalprime@gmail.com
|
620f06ddbdef017d8c36b66631587e5dfd47c971
|
e322d01555aebbcf9f23a68fa9160e75d4397969
|
/YouCompleteMe/third_party/ycmd/third_party/racerd/scripts/run_wrk_benchmarks.py
|
85985137ca89c3ec08b6f907c789b7fd518d499d
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
liqiang0330/i3ForDebian9
|
3b2bb5ce104f25cadab7a57cdc7096fadeb4a9ef
|
37a63bdaf18dab847e57d328cdcb678668ab6207
|
refs/heads/master
| 2022-10-25T13:30:26.723690
| 2018-03-17T05:22:55
| 2018-03-17T05:22:55
| 162,018,419
| 1
| 1
|
Apache-2.0
| 2022-10-08T20:30:23
| 2018-12-16T16:11:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
#!/usr/bin/env python
# vim: ts=4 sw=4 et cc=80 tw=79
import subprocess
import tempfile
import os
from os import path
# Support overriding RACERD. Assume racerd is on path by default.
if os.environ.has_key('RACERD'):
RACERD = os.environ['RACERD']
else:
RACERD = 'racerd'
def get_scripts_dir():
"""
Return absolute path of scripts directory
"""
return path.abspath(path.dirname(__file__))
def write_wrk_script(completion_string):
"""
Read the wrk template script, replace the completion string and column
number, write a temporary file, and return the file path
"""
# Read the file
tpl_file = open(get_wrk_template_path(), 'r')
tpl = tpl_file.read()
# Replace template params
tpl = tpl.replace('[[[completion]]]', completion_string)
tpl = tpl.replace('[[[column]]]', str(len(completion_string)))
# Write temp file
lua_file_fd, lua_file_path = tempfile.mkstemp(suffix='.lua', text=True)
lua_file = os.fdopen(lua_file_fd, 'w')
lua_file.write(tpl)
lua_file.close()
return lua_file_path
def get_wrk_template_path():
"""
A template lua script for wrk is used to generate completion requests. This
function returns the path to the template script
"""
return path.join(get_scripts_dir(), 'wrk_completion_bench.lua.tpl')
def start_racerd():
"""
Spawn a racerd process on random port. Returns the process and host string.
# Example
(process, host) = start_racerd()
"""
process = subprocess.Popen(
[RACERD, 'serve', '--secret-file=hah', '--port=0'],
stdout = subprocess.PIPE
)
racerd_listen_line = process.stdout.readline()
racerd_host = racerd_listen_line.split(' ')[3]
return (process, racerd_host.strip())
def run_wrk(script_path, host):
"""
Spawn a `wrk` process with 1 thread, 1 connection, and run for 1 second.
These should probably be changed to environment variables in the future.
"""
base_url = 'http://' + host
output = subprocess.check_output(
['wrk', '-t1', '-c1', '-d1s', '-s', script_path, base_url]
)
lines = output.splitlines()
# Line 3 in the second column by whitespace has the average request length.
latency_line = lines[3]
latency_avg = latency_line.split()[1]
return latency_avg
def print_report(completion_str, latency_avg):
"""
Print a report for given run
"""
print 'Completion for "' + completion_str + '" averaged ' + latency_avg
def bench_completion(completion_str):
"""
Start racerd and run wrk for a given completion string
"""
# Write wrk script for this completion
wrk_script_path = write_wrk_script(completion_str)
# Start racerd and run wrk
process, host = start_racerd()
latency_avg = run_wrk(wrk_script_path, host)
# Print a report
print_report(completion_str, latency_avg)
# cleanup
process.terminate()
os.remove(wrk_script_path)
completions = [
'use ::std::',
'use ::std::io::',
'use ::std::path::',
'use ::std::path::P'
]
for c in completions:
bench_completion(c)
|
[
"yuan705791627@gmail.com"
] |
yuan705791627@gmail.com
|
933df71bf9732ccc8ea0ee553ba02320d8126c6e
|
9a8416deb357d9d0714c3b09dc2c70e8fffa05e7
|
/Collect/MOD12/__init__.py
|
5eac4f4f7da1cb1c2bcd13bde5033ead4cf5d375
|
[
"Apache-2.0"
] |
permissive
|
ali1100/wa
|
055d0989bb414e443319ee996a8049b5051a9e74
|
700e5014533c45f38a245c3abdeacc537cb307bc
|
refs/heads/master
| 2021-05-13T12:03:02.161649
| 2018-09-19T06:51:41
| 2018-09-19T06:51:41
| 117,149,867
| 0
| 1
|
Apache-2.0
| 2018-09-19T06:38:19
| 2018-01-11T20:29:13
|
Python
|
UTF-8
|
Python
| false
| false
| 798
|
py
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD12
Description:
This module downloads MOD12 LC data from
http://e4ftl01.cr.usgs.gov/. Use the MOD12.LC_yearly function to
download and create yearly LC images in Gtiff format.
The data is available between 2001-01-01 till 2014-01-01 .
Examples:
from wa.Collect import MOD12
MOD17.LC_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
MOD17.LC_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20',
latlim=[41, 45], lonlim=[-8, -5])
"""
from .LC_yearly import main as LC_yearly
__all__ = ['LC_yearly']
__version__ = '0.1'
|
[
"timhessels@hotmail.com"
] |
timhessels@hotmail.com
|
ba0f5ce1975bf1f8c6ce378a443255fcb0443019
|
79f42fd0de70f0fea931af610faeca3205fd54d4
|
/base_lib/ChartDirector/pythondemo_cgi/finance.py
|
932f4132e25cedc156bfb1d23e9cb27aa6704e2e
|
[
"IJG"
] |
permissive
|
fanwen390922198/ceph_pressure_test
|
a900a6dc20473ae3ff1241188ed012d22de2eace
|
b6a5b6d324e935915090e791d9722d921f659b26
|
refs/heads/main
| 2021-08-27T16:26:57.500359
| 2021-06-02T05:18:39
| 2021-06-02T05:18:39
| 115,672,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,765
|
py
|
#!/usr/bin/python
from FinanceChart import *
# Create a finance chart demo containing 100 days of data
noOfDays = 100
# To compute moving averages starting from the first day, we need to get extra data points before
# the first day
extraDays = 30
# In this exammple, we use a random number generator utility to simulate the data. We set up the
# random table to create 6 cols x (noOfDays + extraDays) rows, using 9 as the seed.
rantable = RanTable(9, 6, noOfDays + extraDays)
# Set the 1st col to be the timeStamp, starting from Sep 4, 2002, with each row representing one
# day, and counting week days only (jump over Sat and Sun)
rantable.setDateCol(0, chartTime(2002, 9, 4), 86400, 1)
# Set the 2nd, 3rd, 4th and 5th columns to be high, low, open and close data. The open value starts
# from 100, and the daily change is random from -5 to 5.
rantable.setHLOCCols(1, 100, -5, 5)
# Set the 6th column as the vol data from 5 to 25 million
rantable.setCol(5, 50000000, 250000000)
# Now we read the data from the table into arrays
timeStamps = rantable.getCol(0)
highData = rantable.getCol(1)
lowData = rantable.getCol(2)
openData = rantable.getCol(3)
closeData = rantable.getCol(4)
volData = rantable.getCol(5)
# Create a FinanceChart object of width 640 pixels
c = FinanceChart(640)
# Add a title to the chart
c.addTitle("Finance Chart Demonstration")
# Set the data into the finance chart object
c.setData(timeStamps, highData, lowData, openData, closeData, volData, extraDays)
# Add the main chart with 240 pixels in height
c.addMainChart(240)
# Add a 5 period simple moving average to the main chart, using brown color
c.addSimpleMovingAvg(5, 0x663300)
# Add a 20 period simple moving average to the main chart, using purple color
c.addSimpleMovingAvg(20, 0x9900ff)
# Add HLOC symbols to the main chart, using green/red for up/down days
c.addHLOC(0x008000, 0xcc0000)
# Add 20 days bollinger band to the main chart, using light blue (9999ff) as the border and
# semi-transparent blue (c06666ff) as the fill color
c.addBollingerBand(20, 2, 0x9999ff, 0xc06666ff)
# Add a 75 pixels volume bars sub-chart to the bottom of the main chart, using green/red/grey for
# up/down/flat days
c.addVolBars(75, 0x99ff99, 0xff9999, 0x808080)
# Append a 14-days RSI indicator chart (75 pixels high) after the main chart. The main RSI line is
# purple (800080). Set threshold region to +/- 20 (that is, RSI = 50 +/- 25). The upper/lower
# threshold regions will be filled with red (ff0000)/blue (0000ff).
c.addRSI(75, 14, 0x800080, 20, 0xff0000, 0x0000ff)
# Append a 12-days momentum indicator chart (75 pixels high) using blue (0000ff) color.
c.addMomentum(75, 12, 0x0000ff)
# Output the chart
print("Content-type: image/png\n")
binaryPrint(c.makeChart2(PNG))
|
[
"fanwen@sscc.com"
] |
fanwen@sscc.com
|
fa46f790dc1f979f39ed1aee253c88fa73042a0a
|
b8f160d2e8c09d5fdce7171924765b74edb97141
|
/page/gouwuche_page.py
|
c0b3a147f1fc40bfc6b6c43f52c26d943ad66baf
|
[] |
no_license
|
danyubiao/mryx
|
7bf3e0f2bae42ef2fe4c9238569194c767b3f754
|
2325c7854c5625babdb51b5c5e40fa860813a400
|
refs/heads/master
| 2023-01-05T01:17:28.259444
| 2020-10-20T02:02:20
| 2020-10-20T02:02:20
| 304,574,484
| 0
| 6
| null | 2020-10-20T02:02:21
| 2020-10-16T09:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 933
|
py
|
# @Time : 2020/10/19 11:00
# @Author : 白光华
# @Email : 1277987895@gmail.com
# @File : gouwuche_page
# @Project : app测试
"""购物车封装定位"""
import self
from appium import webdriver
from time import sleep
from appium.webdriver.common.mobileby import MobileBy as By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
from appium.webdriver.common.touch_action import TouchAction
from model.driver import driver
from page.base_page import BasePage
class GouWu(BasePage):
#定位购物车
gouwuche_location =(By.ID,"cn.missfresh.application:id/cartTab")
#定位断言元素
duanyan_gouwu =(By.ID,"cn.missfresh.application:id/tv_delete")
#点击购物车
def click_dianji(self):
self.driver.find_element(*self.gouwuche_location).click()
def duanyan_gw(self):
text =self.text(self.duanyan_gouwu)
return text
|
[
"you@example.com"
] |
you@example.com
|
fdc5040420ae8c8fccd6fcbcfd6571e629ec3fe2
|
3a1fea0fdd27baa6b63941f71b29eb04061678c6
|
/src/ch08/instructions/references/ArrayLength.py
|
1e62ebf2864f856112a9c4c1871c01f914b060df
|
[] |
no_license
|
sumerzhang/JVMByPython
|
56a7a896e43b7a5020559c0740ebe61d608a9f2a
|
1554cf62f47a2c6eb10fe09c7216518416bb65bc
|
refs/heads/master
| 2022-12-02T17:21:11.020486
| 2020-08-18T06:57:10
| 2020-08-18T06:57:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: ArrayLength.py
@time: 2019/9/17 21:31
@desc: arraylength指令用于获取数组长度,只需要一个操作数,即从操作数栈顶弹出的数组引用。
"""
from ch08.instructions.base.Instruction import NoOperandsInstruction
from ch08.rtda.Frame import Frame
class ARRAY_LENGTH(NoOperandsInstruction):
def execute(self, frame: Frame):
stack = frame.operand_stack
arr_ref = stack.pop_ref()
# 如果数组引用是null,则抛出NullPointerException异常
if arr_ref is None:
raise RuntimeError("java.lang.NullPointerException")
# 否则获得数组长度,推入操作数栈
arr_len = arr_ref.array_length()
stack.push_numeric(arr_len)
|
[
"huruifeng1202@163.com"
] |
huruifeng1202@163.com
|
38475fa87788a196aa49972f1a1cf47a600c69cf
|
000c243b4c30bd089867f73ca1bcfede1c3ef801
|
/catkin_ws/build/mapviz/mapviz/cmake/mapviz-genmsg-context.py
|
a2f6c76c8fcf220d76e8e24263350aab52a67984
|
[] |
no_license
|
dangkhoa1210/SLAM-AND-NAVIGATION-FOR-MOBILE-ROBOT-OUTDOOR-INDOOR-
|
b4d9bf2757d839d9766d512c2272731300320925
|
7273ea9e966353440d3993dcba112bc0a2262b98
|
refs/heads/master
| 2023-07-15T14:07:17.123812
| 2021-09-02T10:12:30
| 2021-09-02T10:12:30
| 402,361,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = "/home/khoa/catkin_ws/src/mapviz/mapviz/srv/AddMapvizDisplay.srv"
pkg_name = "mapviz"
dependencies_str = "marti_common_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "marti_common_msgs;/home/khoa/catkin_ws/src/marti_messages/marti_common_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"dangkhoaphamdang1210@gmail.com"
] |
dangkhoaphamdang1210@gmail.com
|
905e03a56f2de81f5788eb73a86993424cd54536
|
4a9e5f6f2bd6f8768533bc250b7f0bb8efb9620c
|
/hackerearth/ALGO_Semi/A.py
|
ca9b80f57b8303df7bdccf36bc10c081d92073c9
|
[
"MIT"
] |
permissive
|
akshaynagpal/competitive-programming
|
28c9a6070d82ce9140a7173ddbb0080ac5b3e1fc
|
0a54f43e3e0f2135c9c952400c5a628244b667d1
|
refs/heads/master
| 2021-01-18T22:40:59.078826
| 2017-04-22T18:44:25
| 2017-04-22T18:44:25
| 41,152,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
import itertools
import sys
num_tests = int(raw_input())
for i in range(num_tests):
x, y, z = raw_input().split()
a = int(x)
b = int(y)
q = int(z)
q_list = [int(i) for i in raw_input().split()]
add = a + b
sub = max(a,b) - min(a,b)
adder_list = [a,b,add,sub]
for query in q_list:
if query < min(a,b):
sys.stdout.write('0')
elif query in adder_list:
sys.stdout.write('1')
else:
two_list = list(itertools.combinations_with_replacement(adder_list,2))
sum_two_list = []
for i in range(len(two_list)):
sum_two_list.append(sum(two_list[i]))
if query in sum_two_list:
sys.stdout.write('1')
else:
three_list = list(itertools.combinations_with_replacement(adder_list,3))
sum_three_list = []
for i in range(len(three_list)):
sum_three_list.append(sum(three_list[i]))
if query in sum_three_list:
sys.stdout.write('1')
else:
four_list = list(itertools.combinations_with_replacement(adder_list,4))
sum_four_list = []
for i in range(len(four_list)):
sum_four_list.append(sum(four_list[i]))
if query in sum_four_list:
sys.stdout.write('1')
else:
five_list = list(itertools.combinations_with_replacement(adder_list,5))
sum_five_list = []
for i in range(len(five_list)):
sum_five_list.append(sum(five_list[i]))
if query in sum_five_list:
sys.stdout.write('1')
else:
six_list = list(itertools.combinations_with_replacement(adder_list,6))
sum_six_list = []
for i in range(len(six_list)):
sum_six_list.append(sum(six_list[i]))
if query in sum_six_list:
sys.stdout.write('1')
else:
sys.stdout.write('0')
print ""
|
[
"akshay2626@gmail.com"
] |
akshay2626@gmail.com
|
bf6df2386f449624250a877f305d84c85d383dc2
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/projects/sfb532demo/assess_shell/lcc_table_hf_RFEM_.py
|
245d825388e379a9febd6ad314699c3f43a77dc7
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 56,520
|
py
|
'''
Created on Jun 29, 2010
@author: alexander
'''
from enthought.traits.api import \
HasTraits, Directory, List, Int, Float, Any, Enum, \
on_trait_change, File, Constant, Instance, Trait, \
Array, Str, Property, cached_property, WeakRef, \
Dict, Button, Color, Bool, DelegatesTo, Callable
from enthought.util.home_directory import \
get_home_directory
from enthought.traits.ui.api import \
View, Item, DirectoryEditor, TabularEditor, HSplit, Tabbed, VGroup, \
TableEditor, Group, ListEditor, VSplit, HSplit, VGroup, HGroup, Spring, \
Include
from enthought.mayavi import \
mlab
from enthought.traits.ui.table_column import \
ObjectColumn
from enthought.traits.ui.menu import \
OKButton, CancelButton
from enthought.traits.ui.tabular_adapter \
import TabularAdapter
from numpy import \
array, loadtxt, arange, sqrt, zeros, arctan, sin, cos, ones_like, \
vstack, savetxt, hstack, argsort, fromstring, zeros_like, shape, \
copy, c_, newaxis, argmax, where, sqrt, frompyfunc, sum, \
ones, transpose, shape, append, argmin, fabs, identity, unique, vdot, \
max as ndmax, min as ndmin
from ls_table_hf import \
LSTable, ULS, SLS
from math import pi
from string import split
import os
#from scipy.io import read_array
from promod.simdb import \
SimDB
import pickle
import string
import csv
from os.path import join
# Access to the top level directory of the database
#
simdb = SimDB()
class LC( HasTraits ):
'''Loading case class
'''
# path to the directory containing the state data files
#
data_dir = Directory
# name of the file containing the hinge forces
# or the displacements
#
file_name = Str( input = True )
plt_export = Property
def _get_plt_export( self ):
basename = 'hf_' + self.name + '_'
return os.path.join( data_dir, basename )
# data filter (used to hide unwanted values, e.g. high sigularities etc.)
#
data_filter = Callable( input = True )
def _data_filter_default( self ):
return lambda x: x # - do nothing by default
# name of the loading case
#
name = Str( input = True )
# category of the loading case
#
category = Enum( 'dead-load', 'additional dead-load', 'imposed-load', input = True )
# list of keys specifying the names of the loading cases
# that can not exist at the same time, i.e. which are exclusive to each other
#
exclusive_to = List( Str, input = True )
def _exclusive_to_default( self ):
return []
# combination factors (need to be defined in case of imposed loads)
#
psi_0 = Float( input = True )
psi_1 = Float( input = True )
psi_2 = Float( input = True )
# security factors ULS
#
gamma_fav = Float( input = True )
def _gamma_fav_default( self ):
if self.category == 'dead-load':
return 1.00
if self.category == 'additional dead-load':
return 0.00
if self.category == 'imposed-load':
return 0.00
gamma_unf = Float( input = True )
def _gamma_unf_default( self ):
if self.category == 'dead-load':
return 1.35
if self.category == 'additional dead-load':
return 1.35
if self.category == 'imposed-load':
return 1.50
# security factors SLS:
# (used to distinguish combinations where imposed-loads
# or additional-dead-loads are favorable or unfavorable.)
#
gamma_fav_SLS = Float( input = True )
def _gamma_fav_SLS_default( self ):
if self.category == 'dead-load':
return 1.00
elif self.category == 'additional dead-load' or \
self.category == 'imposed-load':
return 0.00
gamma_unf_SLS = Float( input = True )
def _gamma_unf_SLS_default( self ):
return 1.00
def _read_data( self, file_name ):
'''read state data and geo data from csv-file using ';' as filed delimiter and ' ' (blank)
as text delimiter.
'''
print '*** read state data from file: %s ***' % ( file_name )
input_arr = loadtxt( file_name , delimiter = ';', skiprows = 2, usecols = ( 2, 3, 4, 8, 9 ) )
# hinge position [m]
#
X_hf = input_arr[:, 0][:, None]
Y_hf = input_arr[:, 1][:, None]
Z_hf = input_arr[:, 2][:, None]
#INPUT Matthias - RFEM
X_hf += 3.5
Y_hf += 3.5
# local hinge forces [kN]
#
N_ip = input_arr[:, 3][:, None]
V_op = input_arr[:, 4][:, None]
V_ip = 0. * V_op
return {'X_hf' : X_hf, 'Y_hf' : Y_hf, 'Z_hf' : Z_hf,
'N_ip' : N_ip, 'V_ip' : V_ip, 'V_op' : V_op,
}
# original data (before filtering)
#
data_orig = Property( Dict, depends_on = 'file_name' )
@cached_property
def _get_data_orig( self ):
return self._read_data( self.file_name )
# data (after filtering)
#
data_dict = Property( Dict, depends_on = 'file_name, data_filter' )
@cached_property
def _get_data_dict( self ):
d = {}
for k, arr in self.data_orig.items():
d[k] = self.data_filter( arr )
return d
#---------------------------------------------
# NEEDS TO BE CHANGED FOR DISPLACEMENT OR FORCES DEPENDING ON OPTION
#---------------------------------------------
# use this line to evaluate the displacement files using LCCTable as combination tool
# (for the files with spring elements the displacement in the .csv-files is only given for U_x, U_y and U_z)
# if do == hf (hinge force evaluation)
#
sr_columns = List( ['N_ip', 'V_ip', 'V_op'] )
# sr_columns = List( ['U_x', 'U_y', 'U_z', 'dU_y', 'dU_z'] )
geo_columns = List( ['X_hf', 'Y_hf', 'Z_hf'] )
# if do == dp (edge displacement evaluation)
#
# sr_columns = List( ['U_x', 'U_y', 'U_z'] )
# geo_columns = List( ['X_u', 'Y_u', 'Z_u'] )
sr_arr = Property( Array )
def _get_sr_arr( self ):
'''return the stress resultants of the loading case
as stack of all sr-column arrays.
'''
data_dict = self.data_dict
return hstack( [ data_dict[ sr_key ] for sr_key in self.sr_columns ] )
geo_data_dict = Property( Array )
def _get_geo_data_dict( self ):
'''Dict of coords as sub-Dict of read in data dict
'''
data_dict = self.data_dict
geo_data_dict = {}
for geo_key in self.geo_columns:
geo_data_dict[ geo_key ] = data_dict[ geo_key ]
return geo_data_dict
def _read_data_u( self, file_name_u ):
'''read state data and geo date from csv-file using ';' as filed delimiter and ' ' (blank)
as text delimiter for displacement.
'''
print '*** read state data from file: %s ***' % ( file_name_u )
# get the column headings defined in the first row
# of the csv hinge force input file
# column_headings = array(["elem_no", "N_ip", "V_ip", "V_op"])
#
file = open( file_name_u, 'r' )
lines = file.readlines()
column_headings = lines[0].split( ';' )
# remove '\n' from last string element in list
#
column_headings[-1] = column_headings[-1][:-1]
column_headings_arr = array( column_headings )
# geo_data:
#
X_idx = where( 'X[m]' == column_headings_arr )[0]
Y_idx = where( 'Y[m]' == column_headings_arr )[0]
Z_idx = where( 'Z[m]' == column_headings_arr )[0]
# state_data:
#
U_x = where( 'u-x[m]' == column_headings_arr )[0]
U_y = where( 'u-y[m]' == column_headings_arr )[0]
U_z = where( 'u-z[m]' == column_headings_arr )[0]
dU_y_idx = where( 'du-y[m]' == column_headings_arr )[0]
dU_z_idx = where( 'du-z[m]' == column_headings_arr )[0]
file.close()
input_arr = loadtxt( file_name_u , delimiter = ';', skiprows = 1 )
# global coords [m]
#
self.X_u = input_arr[:, X_idx]
self.Y_u = input_arr[:, Y_idx]
self.Z_u = input_arr[:, Z_idx]
# hinge forces [kN]
#
self.U_x = input_arr[:, dU_y_idx]
self.U_y = input_arr[:, dU_y_idx]
self.U_z = input_arr[:, dU_y_idx]
self.dU_y_idx = input_arr[:, dU_y_idx]
self.dU_z_idx = input_arr[:, dU_z_idx]
class LCC( HasTraits ):
lcc_id = Int
#lcc_table = WeakRef()
ls_table = Instance( LSTable )
assess_value = Property()
def _get_assess_value( self ):
return self.ls_table.assess_value
traits_view = View( Item( 'ls_table@', show_label = False ),
resizable = True,
scrollable = True
)
# The definition of the demo TableEditor:
lcc_list_editor = TableEditor(
columns_name = 'lcc_table_columns',
editable = False,
selection_mode = 'row',
selected = 'object.lcc',
show_toolbar = True,
auto_add = False,
configurable = True,
sortable = True,
reorderable = False,
sort_model = False,
auto_size = False,
)
class LCCTable( HasTraits ):
'''Loading Case Manager.
Generates and sorts the loading case combinations
of all specified loading cases.
'''
# define ls
#
ls = Trait( 'ULS',
{'ULS' : ULS,
'SLS' : SLS } )
# lcc-instance for the view
#
lcc = Instance( LCC )
#-------------------------------
# Define loading cases:
#-------------------------------
# path to the directory containing the state data files
#
data_dir = Directory
# list of load cases
#
lc_list_ = List( Instance( LC ) )
lc_list = Property( List, depends_on = '+filter' )
def _set_lc_list( self, value ):
self.lc_list_ = value
def _get_lc_list( self ):
# for lc in self.lc_list_:
# if lc.data_filter != self.data_filter:
# lc.data_filter = self.data_filter
return self.lc_list_
lcc_table_columns = Property( depends_on = 'lc_list_, +filter' )
def _get_lcc_table_columns( self ):
return [ ObjectColumn( label = 'Id', name = 'lcc_id' ) ] + \
[ ObjectColumn( label = lc.name, name = lc.name )
for idx, lc in enumerate( self.lc_list ) ] + \
[ ObjectColumn( label = 'assess_value', name = 'assess_value' ) ]
geo_columns = Property( List( Str ), depends_on = 'lc_list_, +filter' )
def _get_geo_columns( self ):
'''derive the order of the geo columns
from the first element in 'lc_list'. The internal
consistency is checked separately in the
'check_consistency' method.
'''
return self.lc_list[0].geo_columns
sr_columns = Property( List( Str ), depends_on = 'lc_list_, +filter' )
def _get_sr_columns( self ):
'''derive the order of the stress resultants
from the first element in 'lc_list'. The internal
consistency is checked separately in the
'check_consistency' method.
'''
return self.lc_list[0].sr_columns
#-------------------------------
# check consistency
#-------------------------------
def _check_for_consistency( self ):
''' check input files for consitency:
'''
return True
#-------------------------------
# lc_arr
#-------------------------------
lc_arr = Property( Array )
def _get_lc_arr( self ):
'''stack stress resultants arrays of all loading cases together.
This yields an array of shape ( n_lc, n_elems, n_sr )
'''
sr_arr_list = [ lc.sr_arr for lc in self.lc_list ]
# for x in sr_arr_list:
# print x.shape
return array( sr_arr_list )
#-------------------------------
# Array dimensions:
#-------------------------------
n_sr = Property( Int )
def _get_n_sr( self ):
return len( self.sr_columns )
n_lc = Property( Int )
def _get_n_lc( self ):
return len( self.lc_list )
n_lcc = Property( Int )
def _get_n_lcc( self ):
return self.combi_arr.shape[0]
n_elems = Property( Int )
def _get_n_elems( self ):
return self.lc_list[0].sr_arr.shape[0]
#-------------------------------
# auxilary method for get_combi_arr
#-------------------------------
def _product( self, args ):
"""
Get all possible permutations of the security factors
without changing the order of the loading cases.
The method corresponds to the build-in function 'itertools.product'.
Instead of returning a generator object a list of all
possible permutations is returned. As argument a list of list
needs to be defined. In the original version of 'itertools.product'
the function takes a tuple as argument ("*args").
"""
pools = map( tuple, args ) #within original version args defined as *args
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
return result
# ------------------------------------------------------------
# 'combi_arr' - array containing indices of all loading case combinations:
# ------------------------------------------------------------
# list of indices of the position of the imposed loads in 'lc_list'
#
# imposed_idx_list = Property( List, depends_on = 'lc_list_, lc_list_.+input' )
imposed_idx_list = Property( List, depends_on = 'lc_list_' )
@cached_property
def _get_imposed_idx_list( self ):
'''list of indices for the imposed loads
'''
imposed_idx_list = []
for i_lc, lc in enumerate( self.lc_list ):
cat = lc.category
if cat == 'imposed-load':
imposed_idx_list.append( i_lc )
return imposed_idx_list
# array containing the psi with name 'psi_key' for the specified
# loading cases defined in 'lc_list'. For dead-loads no value for
# psi exists. In this case a value of 1.0 is defined.
# This yields an array of shape ( n_lc, )
#
def _get_psi_arr( self, psi_key ):
'''psi_key must be defined as:
'psi_0', 'psi_1', or 'psi_2'
Returns an 1d-array of shape ( n_lc, )
'''
# get list of ones (used for dead-loads):
#
psi_list = [1] * len( self.lc_list )
# overwrite ones with psi-values in case of imposed-loads:
#
for imposed_idx in self.imposed_idx_list:
psi_value = getattr( self.lc_list[ imposed_idx ], psi_key )
psi_list[ imposed_idx ] = psi_value
return array( psi_list, dtype = 'float_' )
# list containing names of the loading cases
#
lc_name_list = Property( List, depends_on = 'lc_list_' )
@cached_property
def _get_lc_name_list( self ):
'''list of names of all loading cases
'''
return [ lc.name for lc in self.lc_list ]
show_lc_characteristic = Bool( True )
# combination array:
#
combi_arr = Property( Array, depends_on = 'lc_list_, combination_SLS' )
@cached_property
def _get_combi_arr( self ):
'''array containing the security and combination factors
corresponding to the specified loading cases.
This yields an array of shape ( n_lcc, n_lc )
Properties defined in the subclasses 'LCCTableULS', 'LCCTableSLS':
- 'gamma_list' = list of security factors (gamma)
- 'psi_lead' = combination factors (psi) of the leading imposed load
- 'psi_non_lead' = combination factors (psi) of the non-leading imposed loads
'''
# printouts:
#
if self.ls == 'ULS':
print '*** load case combinations for limit state ULS ***'
else:
print '*** load case combinations for limit state SLS ***'
print '*** SLS combination used: % s ***' % ( self.combination_SLS )
#---------------------------------------------------------------
# get permutations of safety factors ('gamma')
#---------------------------------------------------------------
#
permutation_list = self._product( self.gamma_list )
combi_arr = array( permutation_list )
# check if imposed loads are defined
# if not no further processing of 'combi_arr' is necessary:
#
if self.imposed_idx_list == []:
# if option is set to 'True' the loading case combination table
# is enlarged with an identity matrix in order to see the
# characteristic values of each loading case.
#
if self.show_lc_characteristic:
combi_arr = vstack( [ identity( self.n_lc ), combi_arr ] )
return combi_arr
#---------------------------------------------------------------
# get leading and non leading combination factors ('psi')
#---------------------------------------------------------------
# go through all possible cases of leading imposed loads
# For the currently investigated imposed loading case the
# psi value is taken from 'psi_leading_arr' for all other
# imposed loads the psi value is taken from 'psi_non_lead_arr'
# Properties are defined in the subclasses
#
psi_lead_arr = self.psi_lead_arr
psi_non_lead_arr = self.psi_non_lead_arr
# for SLS limit state case 'rare' all imposed loads are multiplied
# with 'psi_2'. In this case no distinction between leading or
# non-leading imposed loads needs to be performed.
#
if all( psi_lead_arr == psi_non_lead_arr ):
combi_arr_psi = combi_arr * psi_lead_arr
# generate a list or arrays obtained by multiplication
# with the psi-factors.
# This yields a list of length = number of imposed-loads.
#
else:
combi_arr_psi_list = []
for imposed_idx in self.imposed_idx_list:
# copy in order to preserve initial state of the array
# and avoid in place modification
psi_arr = copy( psi_non_lead_arr )
psi_arr[imposed_idx] = psi_lead_arr[imposed_idx]
combi_arr_lead_i = combi_arr[where( combi_arr[:, imposed_idx] != 0 )] * psi_arr
combi_arr_psi_list.append( combi_arr_lead_i )
combi_arr_psi_no_0 = vstack( combi_arr_psi_list )
# missing cases without any dead load have to be added
# get combinations with all!! imposed = 0
#
lcc_all_imposed_zero = where( ( combi_arr[:, self.imposed_idx_list] == 0 )
.all( axis = 1 ) )
# add to combinations
#
combi_arr_psi = vstack( ( combi_arr[lcc_all_imposed_zero], combi_arr_psi_no_0 ) )
#---------------------------------------------------------------
# get exclusive loading cases ('exclusive_to')
#---------------------------------------------------------------
# get a list of lists containing the indices of the loading cases
# that are defined exclusive to each other.
# The list still contains duplicates, e.g. [1,2] and [2,1]
#
exclusive_list = []
for i_lc, lc in enumerate( self.lc_list ):
# get related load case number
#
for exclusive_name in lc.exclusive_to:
if exclusive_name in self.lc_name_list:
exclusive_idx = self.lc_name_list.index( exclusive_name )
exclusive_list.append( [ i_lc, exclusive_idx ] )
# eliminate the duplicates in 'exclusive_list'
#
exclusive_list_unique = []
for exclusive_list_entry in exclusive_list:
if sorted( exclusive_list_entry ) not in exclusive_list_unique:
exclusive_list_unique.append( sorted( exclusive_list_entry ) )
# delete the rows in combination array that contain
# loading case combinations with imposed-loads that have been defined
# as exclusive to each other.
#
combi_arr_psi_exclusive = combi_arr_psi
# print 'combi_arr_psi_exclusive', combi_arr_psi_exclusive
for exclusive_list_entry in exclusive_list_unique:
# check where maximum one value of the exclusive load cases is unequal to one
# LC1 LC2 LC3 (all LCs are defined as exclusive to each other)
#
# e.g. 1.5 0.9 0.8 (example of 'combi_arr_psi')
# 1.5 0.0 0.0
# 0.0 0.0 0.0 (combination with all imposed loads = 0 after multiplication wit psi and gamma)
# ... ... ...
#
# this would yield the following mask_arr (containing ones or zeros):
# e.g. 1.0 1.0 1.0 --> sum = 3 --> true combi --> accepted combination
# 1.0 0.0 0.0 --> sum = 1 --> false combi --> no accepted combination
# e.g. 0.0 0.0 0.0 --> sum = 0 --> true combi --> accepted combination (only body-loads)
# ... ... ...
#
mask_arr = where( combi_arr_psi_exclusive[ :, exclusive_list_entry ] != 0, 1.0, 0.0 )
# print 'mask_arr', mask_arr
true_combi = where( sum( mask_arr, axis = 1 ) <= 1.0 )
# print 'true_combi', true_combi
combi_arr_psi_exclusive = combi_arr_psi_exclusive[ true_combi ]
#---------------------------------------------------------------
# create array with only unique load case combinations
#---------------------------------------------------------------
# If the psi values of an imposed-load are defined as zero this
# may led to zero entries in 'combi_arr'. This would yield rows
# in 'combi_arr' which are duplicates. Those rows are removed.
# Add first row in 'combi_arr_psi_exclusive' to '_unique' array
# This array must have shape (1, n_lc) in order to use 'axis'-option
#
combi_arr_psi_exclusive_unique = combi_arr_psi_exclusive[0][None, :]
for row in combi_arr_psi_exclusive:
# Check if all factors in one row are equal to the rows in 'unique' array.
# If this is not the case for any row the combination is added to 'unique'.
# Broadcasting is used for the bool evaluation:
#
if ( row == combi_arr_psi_exclusive_unique ).all( axis = 1.0 ).any() == False:
combi_arr_psi_exclusive_unique = vstack( ( combi_arr_psi_exclusive_unique, row ) )
# if option is set to 'True' the loading case combination table
# is enlarged with an identity matrix in order to see the
# characteristic values of each loading case.
#
# if self.show_lc_characteristic:
# combi_arr_psi_exclusive_unique = vstack( [ identity( self.n_lc ), combi_arr_psi_exclusive_unique ] )
return combi_arr_psi_exclusive_unique
#-------------------------------
# lcc_arr
#-------------------------------
lcc_arr = Property( Array, depends_on = 'lc_list_' )
@cached_property
def _get_lcc_arr( self ):
'''Array of all loading case combinations following the
loading cases define in 'lc_list' and the combinations
defined in 'combi_arr'.
This yields an array of shape ( n_lcc, n_elems, n_sr )
'''
self._check_for_consistency()
combi_arr = self.combi_arr
# 'combi_arr' is of shape ( n_lcc, n_lc )
# 'lc_arr' is of shape ( n_lc, n_elems, n_sr )
#
lc_arr = self.lc_arr
# Broadcasting is used to generate array containing the multiplied lc's
# yielding an array of shape ( n_lcc, n_lc, n_elems, n_sr )
#
lc_combi_arr = lc_arr[ None, :, :, :] * combi_arr[:, :, None, None ]
# Then the sum over index 'n_lc' is evaluated yielding
# an array of all loading case combinations.
# This yields an array of shape ( n_lcc, n_elem, n_sr )
#
lcc_arr = sum( lc_combi_arr, axis = 1 )
return lcc_arr
#-------------------------------
# geo_arr
#-------------------------------
geo_data_dict = Property( Dict, depends_on = 'lc_list_' )
@cached_property
def _get_geo_data_dict( self ):
'''Array of global coords derived from the first loading case defined in lc_list.
Coords are identical for all LC's.
'''
return self.lc_list[0].geo_data_dict
#-------------------------------
# min/max-values
#-------------------------------
def get_min_max_state_data( self ):
''' get the surrounding curve of all 'lcc' values
'''
lcc_arr = self.lcc_arr
min_arr = ndmin( lcc_arr, axis = 0 )
max_arr = ndmax( lcc_arr, axis = 0 )
return min_arr, max_arr
max_sr_grouped_dict = Property( Dict )
@cached_property
def _get_max_sr_grouped_dict( self ):
''' get the surrounding curve for each stress resultant
shape lcc_array ( n_lcc, n_elems, n_sr )
'''
sr_columns = self.sr_columns
lcc_arr = self.lcc_arr
dict = {}
for i, sr in enumerate( self.sr_columns ):
idx_1 = argmax( abs( lcc_arr[:, :, i] ), axis = 0 )
idx_2 = arange( 0, idx_1.shape[0], 1 )
dict[sr] = lcc_arr[idx_1, idx_2, :]
return dict
# choose linking type (in-plane shear dof blocked or not)
#
link_type = Enum( 'exc_V_ip', 'inc_V_ip' )
# length of the shell (needed to plot the hinge forces plots correctly)
#
length_xy_quarter = 3.5 # m
def export_hf_max_grouped( self, filename ):
"""exports with one leading value
"""
from matplotlib import pyplot
sr_columns = self.sr_columns
dict = self.max_sr_grouped_dict
length_xy_quarter = self.length_xy_quarter
def save_bar_plot( x, y, filename = 'bla', title = 'Title',
xlabel = 'xlabel', ylabel = 'ylavel',
width = 0.1, xmin = 0, xmax = 1000 ,
ymin = -1000, ymax = 1000, figsize = [10, 5] ):
fig = pyplot.figure( facecolor = "white", figsize = figsize )
ax1 = fig.add_subplot( 1, 1, 1 )
ax1.bar( x , y , width = width, align = 'center', color = 'green' )
ax1.set_xlim( xmin, xmax )
ax1.set_ylim( ymin, ymax )
ax1.set_xlabel( xlabel, fontsize = 22 )
ax1.set_ylabel( ylabel, fontsize = 22 )
if title == 'N_ip max':
title = 'max $N_{ip}$'
if title == 'V_ip max':
title = 'max $V_{ip}$'
if title == 'V_op max':
title = 'max $V_{op}$'
ax1.set_title( title )
fig.savefig( filename, orientation = 'portrait', bbox_inches = 'tight' )
pyplot.clf()
X = array( self.geo_data_dict['X_hf'] )
Y = array( self.geo_data_dict['Y_hf'] )
# symmetric axes
#
idx_sym = where( abs( Y[:, 0] - 2.0 * length_xy_quarter ) <= 0.0001 )
X_sym = X[idx_sym].reshape( -1 )
idx_r0_r1 = where( abs( X[:, 0] - 2.0 * length_xy_quarter ) <= 0.0001 )
X_r0_r1 = Y[idx_r0_r1].reshape( -1 )
for sr in sr_columns:
F_int = dict[sr] #first row N_ip, second V_ip third V_op
F_sym = F_int[idx_sym, :].reshape( -1, len( sr_columns ) )
F_r0_r1 = F_int[idx_r0_r1, :].reshape( -1, len( sr_columns ) )
save_bar_plot( X_sym, F_sym[:, 0].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$N_{ip}$ [kN]',
filename = filename + 'N_ip' + '_sym_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 4.0 * length_xy_quarter, figsize = [10, 5], ymin = -40, ymax = +40 )
if self.link_type == 'inc_V_ip':
save_bar_plot( X_sym, F_sym[:, 1].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$V_{ip}$ [kN]',
filename = filename + 'V_ip' + '_sym_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 4.0 * length_xy_quarter, figsize = [10, 5], ymin = -40, ymax = +40 )
save_bar_plot( X_sym, F_sym[:, 2].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]',
filename = filename + 'V_op' + '_sym_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 2.0 * length_xy_quarter, figsize = [10, 5], ymin = -10, ymax = +10 )
# r0_r1
#
save_bar_plot( X_r0_r1, F_r0_r1[:, 0].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]',
filename = filename + 'N_ip' + '_r0_r1_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 2.0 * length_xy_quarter, figsize = [5, 5], ymin = -40, ymax = +40 )
if self.link_type == 'inc_V_ip':
save_bar_plot( X_r0_r1, F_r0_r1[:, 1].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$V_{ip}$ [kN]',
filename = filename + 'V_ip' + '_r0_r1_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 2.0 * length_xy_quarter, figsize = [5, 5], ymin = -40, ymax = +40 )
save_bar_plot( X_r0_r1, F_r0_r1[:, 2].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]',
filename = filename + 'V_op' + '_r0_r1_' + sr + '_max',
title = sr + ' max',
xmin = 0.0, xmax = 2.0 * length_xy_quarter, figsize = [5, 5], ymin = -10, ymax = +10 )
def export_u( self, filename ):
"""exports u values, maximum and minimum of each lc and combination
"""
length_xy_quarter = self.length_xy_quarter
n_lc = self.n_lc
n_sr = self.n_sr
# max value charakteristic
#
max_export_arr = zeros( ( n_lc, n_sr ) )
min_export_arr = zeros( ( n_lc, n_sr ) )
for i in range( 0, n_lc ):
for j in range( 0, n_sr ):
max_export_arr[i, j] = ndmax( self.lc_arr[i, :, j] )
min_export_arr[i, j] = ndmin( self.lc_arr[i, :, j] )
# from combinated values
#
# from outer_edges (= r0_r1_bottom, r0_left, r1_right)
#
idx_bottom = where( abs( self.geo_data_dict['Y_u'].flatten() ) <= 0.001 )
idx_left = where( abs( self.geo_data_dict['X_u'].flatten() ) <= 0.001 )
idx_right = where( abs( self.geo_data_dict['X_u'].flatten() - 4.0 * length_xy_quarter ) <= 0.001 )
idx_out = unique( hstack( [idx_bottom, idx_left, idx_right] ) )
# print 'idx_out', idx_out
# for internal edge
#
idx_sym = where( abs( self.geo_data_dict['Y_u'].reshape( -1 ) - 2.0 * length_xy_quarter ) <= 0.001 )
idx_r0_r1 = where( abs( self.geo_data_dict['X_u'].reshape( -1 ) - 2.0 * length_xy_quarter ) <= 0.001 )
idx_int = unique( hstack( [idx_sym, idx_left, idx_r0_r1] ) )
# look in lcc_arr (= array with the combined values)
# for the combination with the maximum or minimum combined value
# NOTE: lcc_arr.shape = ( n_lcc, n_elems, n_sr )
#
max_komb_out = zeros( ( n_sr ) )
min_komb_out = zeros( ( n_sr ) )
for j in range( 0, n_sr ):
max_komb_out[j] = ndmax( lct.lcc_arr[:, idx_out, j].reshape( -1 ), axis = 0 )
min_komb_out[j] = ndmin( lct.lcc_arr[:, idx_out, j].reshape( -1 ), axis = 0 )
print"\n"
print"-------------------"
print self.sr_columns[j] + " - MAX"
print"-------------------"
idx_max = where ( lct.lcc_arr[:, :, j] == max_komb_out[j] )
print 'idx_max', idx_max
idx_comb = idx_max[0][0]
print 'idx_comb', idx_comb
idx_point = idx_max[1][0]
print 'idx_point', idx_point
idx_sr = j
print 'max_komb_out', max_komb_out[j]
print 'combi_arr', self.combi_arr[idx_comb, :]
print 'lc_arr', self.lc_arr[:, idx_point, j]
print self.lc_arr[:, idx_point, j] * self.combi_arr[idx_comb, :] / max_komb_out[j]
print "design value ", vdot( self.lc_arr[:, idx_point, j], self.combi_arr[idx_comb, :] )
print 'at position X,Y,Z', self.geo_data_dict['X_u'][idx_point], self.geo_data_dict['Y_u'][idx_point], self.geo_data_dict['Z_u'][idx_point]
print"-------------------"
print self.sr_columns[j] + " - MIN"
print"-------------------"
idx_min = where ( lct.lcc_arr[:, :, j] == min_komb_out[j] )
print 'idx_min', idx_min
idx_comb = idx_min[0][0]
print 'idx_comb', idx_comb
idx_point = idx_min[1][0]
print 'idx_point', idx_point
idx_sr = j
print min_komb_out[j]
print self.combi_arr[idx_comb, :]
print self.lc_arr[:, idx_point, j]
print self.lc_arr[:, idx_point, j] * self.combi_arr[idx_comb, :] / min_komb_out[j]
print "design value ", vdot( self.combi_arr[idx_comb, :], self.lc_arr[:, idx_point, j] )
print 'at position X,Y,Z', self.geo_data_dict['X_u'][idx_point], self.geo_data_dict['Y_u'][idx_point], self.geo_data_dict['Z_u'][idx_point]
max_komb_int = zeros( ( n_sr ) )
min_komb_int = zeros( ( n_sr ) )
for j in range( 0, n_sr ):
max_komb_int[j] = ndmax( lct.lcc_arr[:, idx_int, j].reshape( -1 ), axis = 0 )
min_komb_int[j] = ndmin( lct.lcc_arr[:, idx_int, j].reshape( -1 ), axis = 0 )
max_export_arr = vstack( ( max_export_arr, max_komb_out, max_komb_int ) )
min_export_arr = vstack( ( min_export_arr, min_komb_out, min_komb_int ) )
def csv_u( data, filename = 'U_data.csv' ):
'''exports X_U_export and U_export data to csv - worksheet
'''
file = open( filename, 'w' )
writer = csv.writer( file, delimiter = ";", lineterminator = "\n" )
#first row
#
writer.writerow( ['-'] + self.sr_columns )
# not combinated rows
#
for i in range( 0, self.n_lc ):
a = [lc_list[i].name] + list( data[i] )
writer.writerow( a )
# combinated rows
#
writer.writerow( ['komb_out'] + list( data[-2] ) )
writer.writerow( ['komb_int'] + list( data[-1] ) )
file = file.close()
csv_u( max_export_arr, filename = filename + 'max_U' + '.csv' )
csv_u( min_export_arr, filename = filename + 'min_U' + '.csv' )
def plot_interaction_s6cm( self ):
"""get interaction pairs max"""
lcc_arr = self.lcc_arr
### N_s6cm_d results from 'V_op_d'*1.5
# assume a distribution of stresses as for a simple
# supported beam with cantilever corresponding
# to the distance of the screws to each other and to the edge
# of the TRC shell (33cm/17cm)
#
N_s6cm_d = lcc_arr[:, :, 2] * 33./17.
### V_s6cm_d results from 'N_ip_d'/2
# assume an equal distribution (50% each) of the
# normal forces to each screw
#
V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5
# resistance ac characteristic value obtained from the
# experiment and EN DIN 1990
#
N_ck = 28.3
V_ck = 63.8
gamma_s = 1.53
beta_N = N_s6cm_d / ( N_ck / gamma_s )
beta_V = abs( V_s6cm_d / ( V_ck / gamma_s ) )
beta_inter = ( beta_N ) + ( beta_V )
idx_max_hinge = beta_inter.argmax( axis = 0 )
idx_hinge = arange( 0, len( idx_max_hinge ), 1 )
plot_beta_N = beta_N[idx_max_hinge, idx_hinge]
plot_beta_V = beta_V[idx_max_hinge, idx_hinge]
self.interaction_plot( plot_beta_N, plot_beta_V )
def interaction_plot( self, eta_N, eta_V ):
from matplotlib import pyplot
fig = pyplot.figure( facecolor = "white", figsize = [10, 10] )
ax1 = fig.add_subplot( 1, 1, 1 )
x = arange( 0, 1.01, 0.01 )
y = ( 1 - x )
limit = eta_N + eta_V
ax1.set_xlabel( '$V_{Ed}/V_{Rd}$' , fontsize = 24 )
ax1.set_ylabel( '$N_{Ed}/N_{Rd}$', fontsize = 24 )
ax1.plot( x , y, '--', color = 'black'
, linewidth = 2.0 )
ax1.plot( eta_V, eta_N, 'o', markersize = 8 )
ax1.plot( eta_V, eta_N, 'o', color = 'green', markersize = 8 )
# ax1.plot( eta_V[where( limit < 1 )] , eta_N[where( limit < 1 )], 'o', markersize = 8 )
# ax1.plot( eta_V[where( limit > 1 )] , eta_N[where( limit > 1 )], 'o', color = 'red', markersize = 8 )
for xlabel_i in ax1.get_xticklabels():
xlabel_i.set_fontsize( 18 )
for ylabel_i in ax1.get_yticklabels():
ylabel_i.set_fontsize( 18 )
# ax1.plot( x , 1 - x, '--', color = 'black', label = 'lineare Interaktion' )
ax1.set_xlim( 0, 2.0 )
ax1.set_ylim( 0, 2.0 )
ax1.legend()
pyplot.show()
pyplot.clf()
def export_hf_lc( self ):
"""exports with one leading value
"""
from matplotlib import pyplot
sr_columns = self.sr_columns
dict = self.max_sr_grouped_dict
length_xy_quarter = self.length_xy_quarter
def save_bar_plot( x, y, filename = 'bla',
xlabel = 'xlabel', ylabel = 'ylavel', ymin = -10 , ymax = 10,
width = 0.1, xmin = 0, xmax = 1000 , figsize = [10, 5] ):
fig = pyplot.figure( facecolor = "white", figsize = figsize )
ax1 = fig.add_subplot( 1, 1, 1 )
ax1.bar( x , y , width = width, align = 'center', color = 'blue' )
ax1.set_xlim( xmin, xmax )
ax1.set_ylim( ymin, ymax )
ax1.set_xlabel( xlabel, fontsize = 22 )
ax1.set_ylabel( ylabel, fontsize = 22 )
fig.savefig( filename, orientation = 'portrait', bbox_inches = 'tight' )
pyplot.clf()
X = array( self.geo_data_dict['X_hf'] )
Y = array( self.geo_data_dict['Y_hf'] )
# symmetric axes
#
idx_sym = where( abs( Y[:, 0] - 2.0 * length_xy_quarter ) <= 0.0001 )
X_sym = X[idx_sym].reshape( -1 )
idx_r0_r1 = where( abs( X[:, 0] - 2.0 * length_xy_quarter ) <= 0.0001 )
X_r0_r1 = Y[idx_r0_r1].reshape( -1 )
F_int = self.lc_arr
for i, lc_name in enumerate( self.lc_name_list ):
filename = self.lc_list[i].plt_export
max_N_ip = max( int( ndmax( F_int[i, :, 0], axis = 0 ) ) + 1, 1 )
max_V_ip = max( int( ndmax( F_int[i, :, 1], axis = 0 ) ) + 1, 1 )
max_V_op = max( int( ndmax( F_int[i, :, 2], axis = 0 ) ) + 1, 1 )
F_int_lc = F_int[i, :, :] #first row N_ip, second V_ip third V_op
F_sym = F_int_lc[idx_sym, :].reshape( -1, len( sr_columns ) )
F_r0_r1 = F_int_lc[idx_r0_r1, :].reshape( -1, len( sr_columns ) )
save_bar_plot( X_sym, F_sym[:, 0].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$N_{ip}$ [kN]',
filename = filename + 'N_ip' + '_sym',
xmin = 0.0, xmax = 4.0 * length_xy_quarter,
ymin = -max_N_ip, ymax = max_N_ip, figsize = [10, 5] )
save_bar_plot( X_sym, F_sym[:, 1].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$V_{ip}$ [kN]',
filename = filename + 'V_ip' + '_sym',
xmin = 0.0, xmax = 4.0 * length_xy_quarter,
ymin = -max_V_ip, ymax = max_V_ip, figsize = [10, 5] )
save_bar_plot( X_sym, F_sym[:, 2].reshape( -1 ),
xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]',
filename = filename + 'V_op' + '_sym',
xmin = 0.0, xmax = 4.0 * length_xy_quarter,
ymin = -max_V_op, ymax = max_V_op, figsize = [10, 5] )
# r0_r1
#
save_bar_plot( X_r0_r1, F_r0_r1[:, 0].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]',
filename = filename + 'N_ip' + '_r0_r1',
xmin = 0.0, xmax = 2.0 * length_xy_quarter,
ymin = -max_N_ip, ymax = max_N_ip, figsize = [5, 5] )
save_bar_plot( X_r0_r1, F_r0_r1[:, 1].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$V_{ip}$ [kN]',
filename = filename + 'V_ip' + '_r0_r1',
xmin = 0.0, xmax = 2.0 * length_xy_quarter,
ymin = -max_V_ip, ymax = max_V_ip, figsize = [5, 5] )
save_bar_plot( X_r0_r1, F_r0_r1[:, 2].reshape( -1 ),
xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]',
filename = filename + 'V_op' + '_r0_r1',
xmin = 0.0, xmax = 2.0 * length_xy_quarter,
ymin = -max_V_op, ymax = max_V_op, figsize = [5, 5] )
#-------------------------------
# lcc_lists
#-------------------------------
lcc_list = Property( List, depends_on = 'lc_list_' )
@cached_property
def _get_lcc_list( self ):
'''list of loading case combinations (instances of LCC)
'''
combi_arr = self.combi_arr
lcc_arr = self.lcc_arr
sr_columns = self.sr_columns
geo_columns = self.geo_columns
n_lcc = self.n_lcc
# print 'combi_arr', combi_arr
# print 'lcc_arr', lcc_arr
# print 'sr_columns', sr_columns
# print 'geo_columns', geo_columns
# print 'n_lcc', n_lcc
# return a dictionary of the stress resultants
# this is used by LSTable to determine the stress
# resultants of the current limit state
#
lcc_list = []
for i_lcc in range( n_lcc ):
state_data_dict = {}
for i_sr, name in enumerate( sr_columns ):
state_data_dict[ name ] = lcc_arr[ i_lcc, :, i_sr ][:, None]
geo_data_dict = self.geo_data_dict
lcc = LCC( #lcc_table = self,
factors = combi_arr[ i_lcc, : ],
lcc_id = i_lcc,
ls_table = LSTable( geo_data = geo_data_dict,
state_data = state_data_dict,
ls = self.ls )
)
for idx, lc in enumerate( self.lc_list ):
lcc.add_trait( lc.name, Int( combi_arr[ i_lcc, idx ] ) )
lcc_list.append( lcc )
return lcc_list
# ------------------------------------------------------------
# View
# ------------------------------------------------------------
traits_view = View( VGroup(
VSplit(
Item( 'lcc_list', editor = lcc_list_editor,
show_label = False ),
Item( 'lcc@', show_label = False ),
),
),
resizable = True,
scrollable = True,
height = 1.0,
width = 1.0
)
class LCCTableULS( LCCTable ):
'''LCCTable for ultimate limit state
'''
# set limit state to 'ULS'
# (attribute is used by 'LSTable')
#
ls = 'ULS'
# 'gamma' - safety factors
#
gamma_list = Property( List, depends_on = 'lc_list_' )
@cached_property
def _get_gamma_list( self ):
return [[ lc.gamma_fav, lc.gamma_unf ] for lc in self.lc_list ]
# 'psi' - combination factors (psi) for leading
# and non leading load cases
#
psi_non_lead_arr = Property( Array, depends_on = 'lc_list_' )
@cached_property
def _get_psi_non_lead_arr( self ):
return self._get_psi_arr( 'psi_0' )
psi_lead_arr = Property( Array, depends_on = 'lc_list_' )
@cached_property
def _get_psi_lead_arr( self ):
return ones( len( self.lc_list ) )
class LCCTableSLS( LCCTable ):
'''LCCTable for serviceability limit state
'''
# set limit state to 'SLS'
# (attribute is used by 'LSTable')
#
ls = 'SLS'
# possible definitions of the serviceability limit state
# are: 'rare', 'freq', 'perm'
#
combination_SLS = Enum( 'rare', 'freq', 'perm' )
def _combination_SLS_default( self ):
return 'rare'
# 'gamma' - safety factors
#
gamma_list = Property( List, depends_on = 'lc_list_' )
@cached_property
def _get_gamma_list( self ):
# generate [1.0]-entry in case of body-loads:
#
gamma_list = [[ 1.0 , 3.0]] * len( self.lc_list ) # for creeping
# gamma_list = [[ 1.0]] * len( self.lc_list ) # for creeping
# overwrite those in case of imposed-loads:
#
for imposed_idx in self.imposed_idx_list:
gamma_fav_SLS = getattr( self.lc_list[ imposed_idx ], 'gamma_fav_SLS' )
gamma_unf_SLS = getattr( self.lc_list[ imposed_idx ], 'gamma_unf_SLS' )
gamma_list[ imposed_idx ] = [ gamma_unf_SLS, gamma_fav_SLS ]
return gamma_list
# 'psi' - combination factors
#
psi_lead_dict = Property( Dict )
def _get_psi_lead_dict( self ):
return {'rare' : ones_like( self._get_psi_arr( 'psi_0' ) ) ,
'freq' : self._get_psi_arr( 'psi_1' ),
'perm' : self._get_psi_arr( 'psi_2' )}
psi_non_lead_dict = Property( Dict )
def _get_psi_non_lead_dict( self ):
return {'rare' : self._get_psi_arr( 'psi_0' ) ,
'freq' : self._get_psi_arr( 'psi_2' ),
'perm' : self._get_psi_arr( 'psi_2' )}
# combination factors (psi) for leading
# and non leading load cases
#
psi_lead_arr = Property( Array, depends_on = 'lc_list_, combination_SLS' )
@cached_property
def _get_psi_lead_arr( self ):
return self.psi_lead_dict[ self.combination_SLS ]
psi_non_lead_arr = Property( Array, depends_on = 'lc_list_, combination_SLS' )
@cached_property
def _get_psi_non_lead_arr( self ):
return self.psi_non_lead_dict[ self.combination_SLS ]
if __name__ == '__main__':
#---------------------------------------------
# 2 shells:
# new geometry 7m x 7m
#---------------------------------------------
#------------------------
# evaluate the combination for the displpacements (SLS) or the hinge forces (ULS)
#------------------------
# choose!
#
do = 'hf'
# do = 'dp'
if do == 'dp':
# NOTE: switch those values from hf to dp in the source code directly!
# !!!!!!! sr_columns and geo_columns need to be changed for dp and hf options
# for hf : N_IP.....
# for dp: u_z .....
# as state in quellcode above
# NOTE: switch those values from hf to dp in the source code directly!
sr_columns = ['U_x', 'U_y', 'U_z']
# sr_columns = ['U_x', 'U_y', 'U_z', 'dU_y', 'dU_z']
geo_columns = ['X_u', 'Y_u', 'Z_u']
if do == 'hf':
sr_columns = ['N_ip', 'V_ip', 'V_op']
geo_columns = ['X_hf', 'Y_hf', 'Z_hf']
#------------------------
# specify linking option:
#------------------------
link_case = 'equal_100cm_7m'
link_type = 'exc_V_ip'
# link_type = 'inc_V_ip'
spring = 'no_spring'
# spring = 'spring'
if spring == 'spring':
spring_type = 'spring_1_'
else:
spring_type = ''
#------------------------
# specify directory containig csv-files of the hinge forces and the displacements:
#------------------------
#---------------------------------------------
# 2 shells:
# new geometry with new loading cases plus waterfilling
#---------------------------------------------
data_dir = os.path.join( simdb.simdb_dir,
'simdata', 'input_data_mushroof_stb',
'ZiE_state_data_2shells_delta_h_865mm_2011-08-16',
'hf' )
#------------------------
# define loading cases:
#------------------------
# NOTE:
#
lc_list = [
# LC1:
# own weight:
#
LC( name = 'g', category = 'dead-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC1.csv' ),
),
# LC2:
# s_sym:
#
LC( name = 's_sym', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC2.csv' ),
exclusive_to = ['s_asym', 'WF'],
psi_0 = 0.5, psi_1 = 0.2, psi_2 = 0.0
),
# LC3:
# s_asym:
#
LC( name = 's_asym', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC3.csv' ),
exclusive_to = ['s_sym', 'WF'],
psi_0 = 0.5, psi_1 = 0.2, psi_2 = 0.0
),
# LC4:
# w_neg:
#
LC( name = 'w_neg', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC4.csv' ),
exclusive_to = ['w_pos', 'w_asym', 'w_int', 'WF'],
psi_0 = 0.6, psi_1 = 0.2, psi_2 = 0.0
),
# LC5:
# w_pos:
#
LC( name = 'w_pos', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC5.csv' ),
exclusive_to = ['w_neg', 'w_asym', 'w_int', 'WF'],
psi_0 = 0.6, psi_1 = 0.2, psi_2 = 0.0
),
# LC6:
# w_asym:
#
LC( name = 'w_asym', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC6.csv' ),
exclusive_to = ['w_pos', 'w_neg', 'w_int', 'WF'],
psi_0 = 0.6, psi_1 = 0.2, psi_2 = 0.0
),
# LC7:
# w_int: Bauzustand
#
LC( name = 'w_int', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC7.csv' ),
exclusive_to = ['w_pos', 'w_neg', 'w_asym', 'WF', 'T_shrinkage'],
psi_0 = 0.6, psi_1 = 0.2, psi_2 = 0.0
),
# LC8:
# 1 kN man load (corner):
#
LC( name = 'Q_corner', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC8.csv' ),
exclusive_to = ['Q_edge', 'WF'], psi_0 = 0.0, psi_1 = 0.0, psi_2 = 0.0 ),
# LC9:
# 1 kN man load (edge, center):
#
LC( name = 'Q_edge', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC9.csv' ),
exclusive_to = ['Q_corner', 'WF'], psi_0 = 0.0, psi_1 = 0.0, psi_2 = 0.0 ),
# LC10:
# temperature (sommer):
#
LC( name = 'T_pos', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC10.csv' ),
exclusive_to = ['T_neg', 'WF'], psi_0 = 0.6, psi_1 = 0.5, psi_2 = 0.0 ),
# LC11:
# temperature (winter):
#
LC( name = 'T_neg', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC11.csv' ),
exclusive_to = ['T_pos', 'WF'], psi_0 = 0.6, psi_1 = 0.5, psi_2 = 0.0 ),
# LC12:
# shrinkage:
# combination coefficients taken from case 'general imposed load'
#
LC( name = 'T_shrinkage', category = 'imposed-load',
data_dir = data_dir,
file_name = os.path.join( data_dir, 'LC12.csv' ),
exclusive_to = ['WF'], psi_0 = 0.8, psi_1 = 0.7, psi_2 = 0.5 ),
# # LC13:
## # water filling:
## # combination coefficients taken from case 'general imposed load'
## #
# LC( name = 'WF', category = 'imposed-load',
# data_dir = data_dir,
# file_name = os.path.join( data_dir, 'LC13.csv' ),
# gamma_unf = 1.00,
# exclusive_to = ['S_asym', 'S_sym', 'Q_corner', 'Q_edge', 'T_neg', 'T_pos'],
# psi_0 = 0.8, psi_1 = 0.7, psi_2 = 0.5 ),
]
#-------------------------------------------------------
# evaluate the displacement files csv:
#-------------------------------------------------------
#
if do == 'dp':
lct = LCCTableSLS( data_dir = data_dir,
lc_list = lc_list,
# cut_z_fraction = 0.2,
combination_SLS = 'rare',
## combination_SLS = 'freq',
## combination_SLS = 'perm',
## show_lc_characteristic = True
)
# export of the max edge displacement
#
# lct.export_u( filename = spring + "_" + link_type + "_" + link_case )
# LCC-TABLE
lct.configure_traits()
#-------------------------------------------------------
# evaluate the "hinge forces"-csv-files
#-------------------------------------------------------
#
elif do == 'hf':
# combination
#
lct = LCCTableULS( data_dir = data_dir,
lc_list = lc_list,
# cut_z_fraction = 0.05,
# remove only the lowest point = connection shell/column
# as this is a singularity of the FE-shell-model
#
# cut_z_fraction = 0.0000001,
show_lc_characteristic = True
)
# INTERACTION plot
lct.plot_interaction_s6cm()
# lct.plot_interaction_tp()
# LCC-TABLE
# lct.configure_traits()
# Internal Force EXPORT
#
# lct.link_type = link_type
# lct.export_hf_max_grouped( os.path.join( data_dir, 'hf' ) )
# lct.export_hf_lc()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
a9767ff8584097a72971d1e8644b417eb926a01d
|
78d23de227a4c9f2ee6eb422e379b913c06dfcb8
|
/LeetCode/846.py
|
384d03f6466286d2c3fad09a5cdd6413b61dcffb
|
[] |
no_license
|
siddharthcurious/Pythonic3-Feel
|
df145293a3f1a7627d08c4bedd7e22dfed9892c0
|
898b402b7a65073d58c280589342fc8c156a5cb1
|
refs/heads/master
| 2020-03-25T05:07:42.372477
| 2019-09-12T06:26:45
| 2019-09-12T06:26:45
| 143,430,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from collections import Counter
class Solution:
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
L = len(hand)
if L%W != 0:
return False
counter = Counter(hand)
while counter:
tmin = min(counter)
for k in range(tmin, tmin+W):
v = counter.get(k)
if not v:
return False
if v == 1:
del counter[k]
else:
counter[k] = v-1
return True
if __name__ == "__main__":
s = Solution()
hand = [1, 2, 3, 6, 2, 3, 4, 7, 8]
W = 3
s.isNStraightHand(hand, W)
|
[
"sandhyalalkumar@gmail.com"
] |
sandhyalalkumar@gmail.com
|
0cf52c06d5eb89b3b7872b6c884f54600c6c493f
|
0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c
|
/custom_components/dyson_local/vacuum.py
|
24dc1f8dd48f166658531d591b7f870cd2bb09db
|
[
"Unlicense"
] |
permissive
|
bacco007/HomeAssistantConfig
|
d91a5368344f50abbea881bd1e6dfc57a0e456ca
|
8548d9999ddd54f13d6a307e013abcb8c897a74e
|
refs/heads/master
| 2023-08-30T07:07:33.571959
| 2023-08-29T20:00:00
| 2023-08-29T20:00:00
| 230,585,631
| 98
| 16
|
Unlicense
| 2023-09-09T08:28:39
| 2019-12-28T09:05:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,333
|
py
|
"""Vacuum platform for Dyson."""
from typing import Any, Callable, List, Mapping
from .vendor.libdyson import (
Dyson360Eye,
VacuumEyePowerMode,
VacuumHeuristPowerMode,
VacuumState,
)
from homeassistant.components.vacuum import (
ATTR_STATUS,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STATUS,
StateVacuumEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, STATE_PAUSED
from homeassistant.core import HomeAssistant
from . import DysonEntity
from .const import DATA_DEVICES, DOMAIN
SUPPORTED_FEATURES = (
SUPPORT_START
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_STATUS
| SUPPORT_STATE
| SUPPORT_BATTERY
)
DYSON_STATUS = {
VacuumState.FAULT_CALL_HELPLINE: "Error: Call helpline",
VacuumState.FAULT_CONTACT_HELPLINE: "Error: Contact helpline",
VacuumState.FAULT_CRITICAL: "Error: Critical",
VacuumState.FAULT_GETTING_INFO: "Error: Getting info",
VacuumState.FAULT_LOST: "Error: Lost",
VacuumState.FAULT_ON_DOCK: "Error: On dock",
VacuumState.FAULT_ON_DOCK_CHARGED: "Error: On dock charged",
VacuumState.FAULT_ON_DOCK_CHARGING: "Error: On dock charging",
VacuumState.FAULT_REPLACE_ON_DOCK: "Error: Replace device on dock",
VacuumState.FAULT_RETURN_TO_DOCK: "Error: Return to dock",
VacuumState.FAULT_RUNNING_DIAGNOSTIC: "Error: Running diagnostic",
VacuumState.FAULT_USER_RECOVERABLE: "Error: Blocked",
VacuumState.FULL_CLEAN_ABANDONED: "Abandoned",
VacuumState.FULL_CLEAN_ABORTED: "Returning home",
VacuumState.FULL_CLEAN_CHARGING: "Charging",
VacuumState.FULL_CLEAN_DISCOVERING: "Discovering",
VacuumState.FULL_CLEAN_FINISHED: "Finished",
VacuumState.FULL_CLEAN_INITIATED: "Initiated",
VacuumState.FULL_CLEAN_NEEDS_CHARGE: "Need charging",
VacuumState.FULL_CLEAN_PAUSED: "Paused",
VacuumState.FULL_CLEAN_RUNNING: "Cleaning",
VacuumState.FULL_CLEAN_TRAVERSING: "Traversing",
VacuumState.INACTIVE_CHARGED: "Stopped - Charged",
VacuumState.INACTIVE_CHARGING: "Stopped - Charging",
VacuumState.INACTIVE_DISCHARGING: "Stopped - Discharging",
VacuumState.MAPPING_ABORTED: "Mapping - Returning home",
VacuumState.MAPPING_CHARGING: "Mapping - Charging",
VacuumState.MAPPING_FINISHED: "Mapping - Finished",
VacuumState.MAPPING_INITIATED: "Mapping - Initiated",
VacuumState.MAPPING_NEEDS_CHARGE: "Mapping - Needs charging",
VacuumState.MAPPING_PAUSED: "Mapping - Paused",
VacuumState.MAPPING_RUNNING: "Mapping - Running",
}
DYSON_STATES = {
VacuumState.FAULT_CALL_HELPLINE: STATE_ERROR,
VacuumState.FAULT_CONTACT_HELPLINE: STATE_ERROR,
VacuumState.FAULT_CRITICAL: STATE_ERROR,
VacuumState.FAULT_GETTING_INFO: STATE_ERROR,
VacuumState.FAULT_LOST: STATE_ERROR,
VacuumState.FAULT_ON_DOCK: STATE_ERROR,
VacuumState.FAULT_ON_DOCK_CHARGED: STATE_ERROR,
VacuumState.FAULT_ON_DOCK_CHARGING: STATE_ERROR,
VacuumState.FAULT_REPLACE_ON_DOCK: STATE_ERROR,
VacuumState.FAULT_RETURN_TO_DOCK: STATE_ERROR,
VacuumState.FAULT_RUNNING_DIAGNOSTIC: STATE_ERROR,
VacuumState.FAULT_USER_RECOVERABLE: STATE_ERROR,
VacuumState.FULL_CLEAN_ABANDONED: STATE_RETURNING,
VacuumState.FULL_CLEAN_ABORTED: STATE_RETURNING,
VacuumState.FULL_CLEAN_CHARGING: STATE_DOCKED,
VacuumState.FULL_CLEAN_DISCOVERING: STATE_CLEANING,
VacuumState.FULL_CLEAN_FINISHED: STATE_DOCKED,
VacuumState.FULL_CLEAN_INITIATED: STATE_CLEANING,
VacuumState.FULL_CLEAN_NEEDS_CHARGE: STATE_RETURNING,
VacuumState.FULL_CLEAN_PAUSED: STATE_PAUSED,
VacuumState.FULL_CLEAN_RUNNING: STATE_CLEANING,
VacuumState.FULL_CLEAN_TRAVERSING: STATE_CLEANING,
VacuumState.INACTIVE_CHARGED: STATE_DOCKED,
VacuumState.INACTIVE_CHARGING: STATE_DOCKED,
VacuumState.INACTIVE_DISCHARGING: STATE_DOCKED,
VacuumState.MAPPING_ABORTED: STATE_RETURNING,
VacuumState.MAPPING_CHARGING: STATE_PAUSED,
VacuumState.MAPPING_FINISHED: STATE_CLEANING,
VacuumState.MAPPING_INITIATED: STATE_CLEANING,
VacuumState.MAPPING_NEEDS_CHARGE: STATE_RETURNING,
VacuumState.MAPPING_PAUSED: STATE_PAUSED,
VacuumState.MAPPING_RUNNING: STATE_CLEANING,
}
EYE_POWER_MODE_ENUM_TO_STR = {
VacuumEyePowerMode.QUIET: "Quiet",
VacuumEyePowerMode.MAX: "Max",
}
EYE_POWER_MODE_STR_TO_ENUM = {
value: key for key, value in EYE_POWER_MODE_ENUM_TO_STR.items()
}
HEURIST_POWER_MODE_ENUM_TO_STR = {
VacuumHeuristPowerMode.QUIET: "Quiet",
VacuumHeuristPowerMode.HIGH: "High",
VacuumHeuristPowerMode.MAX: "Max",
}
HEURIST_POWER_MODE_STR_TO_ENUM = {
value: key for key, value in HEURIST_POWER_MODE_ENUM_TO_STR.items()
}
ATTR_POSITION = "position"
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Dyson vacuum from a config entry."""
device = hass.data[DOMAIN][DATA_DEVICES][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
if isinstance(device, Dyson360Eye):
entity = Dyson360EyeEntity(device, name)
else: # Dyson360Heurist
entity = Dyson360HeuristEntity(device, name)
async_add_entities([entity])
class DysonVacuumEntity(DysonEntity, StateVacuumEntity):
"""Dyson vacuum entity base class."""
@property
def state(self) -> str:
"""Return the state of the vacuum."""
return DYSON_STATES[self._device.state]
@property
def status(self) -> str:
"""Return the status of the vacuum."""
return DYSON_STATUS[self._device.state]
@property
def battery_level(self) -> int:
"""Return the battery level of the vacuum cleaner."""
return self._device.battery_level
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._device.is_connected
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORTED_FEATURES
@property
def extra_state_attributes(self) -> Mapping[str, Any]:
"""Expose the status to state attributes."""
return {
ATTR_POSITION: str(self._device.position),
ATTR_STATUS: self.status,
}
def pause(self) -> None:
"""Pause the device."""
self._device.pause()
def return_to_base(self, **kwargs) -> None:
"""Return the device to base."""
self._device.abort()
class Dyson360EyeEntity(DysonVacuumEntity):
"""Dyson 360 Eye robot vacuum entity."""
@property
def fan_speed(self) -> str:
"""Return the fan speed of the vacuum cleaner."""
return EYE_POWER_MODE_ENUM_TO_STR[self._device.power_mode]
@property
def fan_speed_list(self) -> List[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(EYE_POWER_MODE_STR_TO_ENUM.keys())
def start(self) -> None:
"""Start the device."""
if self.state == STATE_PAUSED:
self._device.resume()
else:
self._device.start()
def set_fan_speed(self, fan_speed: str, **kwargs) -> None:
"""Set fan speed."""
self._device.set_power_mode(EYE_POWER_MODE_STR_TO_ENUM[fan_speed])
class Dyson360HeuristEntity(DysonVacuumEntity):
"""Dyson 360 Heurist robot vacuum entity."""
@property
def fan_speed(self) -> str:
"""Return the fan speed of the vacuum cleaner."""
return HEURIST_POWER_MODE_ENUM_TO_STR[self._device.current_power_mode]
@property
def fan_speed_list(self) -> List[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(HEURIST_POWER_MODE_STR_TO_ENUM.keys())
def start(self) -> None:
"""Start the device."""
if self.state == STATE_PAUSED:
self._device.resume()
else:
self._device.start_all_zones()
def set_fan_speed(self, fan_speed: str, **kwargs) -> None:
"""Set fan speed."""
self._device.set_default_power_mode(HEURIST_POWER_MODE_STR_TO_ENUM[fan_speed])
|
[
"thomas@thomasbaxter.info"
] |
thomas@thomasbaxter.info
|
2fecfee104269eac54eb1ff97981413680f0aca5
|
60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14
|
/beginner_contest/183/A.py
|
0c3a7cb3882bd72537672f3fac59c59a477676b3
|
[
"MIT"
] |
permissive
|
FGtatsuro/myatcoder
|
12a9daafc88efbb60fc0cd8840e594500fc3ee55
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
refs/heads/master
| 2021-06-13T15:24:07.906742
| 2021-05-16T11:47:09
| 2021-05-16T11:47:09
| 195,441,531
| 0
| 0
|
MIT
| 2021-05-16T11:47:10
| 2019-07-05T16:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 131
|
py
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
x = int(input())
if x >= 0:
print(x)
else:
print(0)
|
[
"204491+FGtatsuro@users.noreply.github.com"
] |
204491+FGtatsuro@users.noreply.github.com
|
03873d4ea28c94d0fce1511dcf5e24a1225b1d9c
|
3fa7b041caf5dfe8e10d2086bc3127859e610227
|
/python2/2/knock12.py
|
5f22d6b6bfe7e69c59c395befd593b2fb8ad9add
|
[] |
no_license
|
himkt/nlp-100knock
|
693b71e1d3ef5d65276d2694f309a7e39b5e002c
|
3be136307271f0aa5f46aef366bac0c53513c5ca
|
refs/heads/master
| 2020-04-16T15:14:19.962351
| 2016-08-19T10:30:43
| 2016-08-19T10:30:43
| 32,132,964
| 31
| 4
| null | 2021-03-05T14:39:46
| 2015-03-13T06:02:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 623
|
py
|
# -*- coding: utf-8 -*-
# @author = himkt
# 2015/07/16
'''
12. 1列目をcol1.txtに,2列目をcol2.txtに保存
各行の1列目だけを抜き出したものをcol1.txtに,
2列目だけを抜き出したものをcol2.txtとしてファイルに保存せよ.
確認にはcutコマンドを用いよ.
'''
f = open('../data/hightemp.txt', 'r')
f_col1 = open('./col1.txt','w')
f_col2 = open('./col2.txt','w')
for line in f:
col1, col2, temperature, timestamp = line.split("\t")#.decode('utf-8').split("\t")
f_col1.write("%s\n" % col1)#.encode('utf-8'))
f_col2.write("%s\n" % col2)#.encode('utf-8'))
|
[
"himkt@klis.tsukuba.ac.jp"
] |
himkt@klis.tsukuba.ac.jp
|
9de9ff070cc553ba6c7017ee27c8c3c37ec80577
|
aa0d55b2aa22da0af6545ce0da46d04dbdc3bffc
|
/cpgames/core/games/breakoutclone/modules/utils.py
|
f8df36ce3d52ef56483f54fbabdceb11d2d70859
|
[
"Apache-2.0"
] |
permissive
|
cyanghsieh/Games
|
19fdad463cf12cbd503a399ed2700c0dae615714
|
07767df6d181b9eae89ce0a8b883d19afb450cc1
|
refs/heads/master
| 2023-05-11T11:11:09.777569
| 2023-02-22T14:28:18
| 2023-02-22T14:28:18
| 283,113,319
| 0
| 0
|
MIT
| 2020-07-28T05:49:13
| 2020-07-28T05:49:12
| null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
'''
Function:
工具函数
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
'''导入关卡文件'''
def loadLevel(levelpath):
brick_positions = []
fp = open(levelpath, 'r', encoding='utf-8')
y = -1
for line in fp.readlines():
if (not line.strip()) or (line.startswith('#')):
continue
else:
y += 1
x = -1
for c in line:
x += 1
if c == 'B':
brick_positions.append([x, y])
return brick_positions
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
f6e89855368091cfa463917ef3abf2fed8677d25
|
e540a64d8a23ee83b3d2a5842636a2ea7486a52f
|
/test_petcd/test_unit/client_get_64a46cd84bc94765b8a167e7f6582eab.py
|
31be52dca66015fffe2eeb5d46d462c3d7b5578f
|
[
"Apache-2.0"
] |
permissive
|
alunduil/petcd
|
27066c735667d1de2308a165b5c8dfe59f3d2dc7
|
a9579259b6ecc12182cc57a0549a7618c0c70a27
|
refs/heads/master
| 2021-01-22T16:26:14.682404
| 2015-07-25T15:26:54
| 2015-07-25T15:26:54
| 46,867,580
| 0
| 0
| null | 2015-11-25T14:47:20
| 2015-11-25T14:47:19
| null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from torment import fixtures
from torment import helpers
from test_petcd import test_helpers
from test_petcd.test_unit import AsyncEtcdClientGetFixture
expected = {
'key': '/foo',
'quorum': False,
'recursive': False,
'sorted': False,
'wait': False,
'wait_index': None,
}
arguments = [
{ 'quorum': ( True, ), },
{ 'recursive': ( True, ), },
{ 'sorted': ( True, ), },
{ 'wait_index': ( 0, 1, ), },
{ 'wait': ( True, ), },
]
for combination in test_helpers.powerset(arguments):
for subset in test_helpers.evert(combination):
fixtures.register(globals(), ( AsyncEtcdClientGetFixture, ), {
'parameters': {
'kwargs': functools.reduce(helpers.extend, list(subset), { 'key': '/foo', }),
},
'expected': functools.reduce(helpers.extend, [ expected ] + list(subset), { 'key': '/foo', }),
})
|
[
"alunduil@alunduil.com"
] |
alunduil@alunduil.com
|
4e5e90b4a520933dfbc2f413ddc16e9fa3593d1a
|
d7315d1769ea60f50604673bc7b677a69de2a940
|
/code/step1/fdr_test.py
|
fdc6c0bfa44279a1f9ebd4de060b0dfc1cc8c1b9
|
[] |
no_license
|
hoon4233/Stock-Prediction
|
405226de24be6751e5d3bb6f481cae98f6053959
|
10ffdd3399fc8e207637cc85e8450aa4f9bd47b2
|
refs/heads/master
| 2023-04-27T19:50:13.768265
| 2021-05-06T03:12:53
| 2021-05-06T03:12:53
| 364,770,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
import FinanceDataReader as fdr
df = fdr.DataReader('005930')
print(df)
|
[
"wogns3141@gmail.com"
] |
wogns3141@gmail.com
|
0d04e6bc9a229cb2cd1c8e3d668a8821c929b8d8
|
223b7276b54fee31f1510145ac4c4e13c6b6a3e0
|
/HearthStoneSpider/tools/remote_server.py
|
3cb8649cedbf55d55671b9f396de4bb25c38b519
|
[] |
no_license
|
CoderEnko007/HearthStoneSpider
|
6c2d5e63fc6ee64e4838d5b77937da66972fcd82
|
7bafa78c10dcdf3d17dd33a0fbf9abfe7726284a
|
refs/heads/master
| 2023-02-18T11:05:16.644753
| 2022-09-18T07:01:58
| 2022-09-18T07:01:58
| 144,394,353
| 0
| 0
| null | 2023-02-07T21:53:30
| 2018-08-11T15:00:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
import json
import requests
from urllib.parse import urljoin
class HSServer(object):
def __init__(self, path=None):
# self.host = 'http://127.0.0.1:8001'
self.host = 'http://47.98.187.217'
self.url = urljoin(self.host, path)
def set_url_path(self, path):
self.url = urljoin(self.host, path)
def list(self, params):
response = requests.get(self.url, params=params)
re_dict = json.loads(response.text)
print(response.status_code, re_dict)
if response.status_code == 200:
res = dict(re_dict, **{
'status_code': 200
})
return res
else:
return {
'status_code': response.status_code
}
def get(self, id):
url = "{0}{1}/".format(self.url, str(id))
response = requests.get(url)
re_dict = json.loads(response.text)
print('get', re_dict)
if response.status_code == 200:
res = dict(re_dict, **{
'status_code': 200
})
return res
else:
return {
'status_code': response.status_code
}
def post(self, data):
headers = {
'Content-Type': 'application/json'
}
data = json.dumps(data)
response = requests.post(self.url, headers=headers, data=data)
re_dict = json.loads(response.text)
print('post', re_dict)
if response.status_code == 200:
res = dict(re_dict, **{
'status_code': 200
})
return res
else:
return {
'status_code': response.status_code
}
def put(self, id, data):
url = "{0}{1}/".format(self.url, str(id))
response = requests.put(url, json=data)
re_dict = json.loads(response.text)
print('put', re_dict)
if response.status_code == 200:
res = dict(re_dict, **{
'status_code': 200
})
return res
else:
return {
'status_code': response.status_code
}
def delete(self, id):
url = "{0}{1}/".format(self.url, id)
response = requests.delete(url)
status = response.status_code
print(status)
return 'success' if status == '204' else 'failed'
if __name__ == '__main__':
url = 'http://127.0.0.1:8001/winrate/'
server = HSServer(url)
params = {
'rank_range': 'TOP_1000_LEGEND',
'faction': 'Hunter',
'archetype': 'Highlander Hunter',
'create_time': '2020-7-19'
}
server.list(params=params)
data = {
'rank_range': 'TOP_1000_LEGEND',
'faction': 'Hunter',
'archetype': 'test',
'winrate': '99.99'
# 'create_time': str(datetime.datetime.now())
}
# server.put(id=117265, data=data)
# server.delete(id=117264)
|
[
"yf381966217@163.com"
] |
yf381966217@163.com
|
4cb20d9c4f026d16c353ff24d766471117d06273
|
d4eb113c44c86322b3811513a7286d176f106eb6
|
/experiments/convolutional_autoencoder_perturbations/rotated+scaled-precip/autoencoder.py
|
9f8144f085411691ebeb36578a3dcb59a2b63136
|
[] |
no_license
|
philip-brohan/Machine-Learning
|
67a2eb780383b3436da4fef1d763f39d255ae696
|
dc53b9c336d5f12272257f327abe49dec436ea04
|
refs/heads/master
| 2021-03-27T12:33:07.518279
| 2020-04-30T19:38:02
| 2020-04-30T19:38:02
| 56,614,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,609
|
py
|
#!/usr/bin/env python
# Convolutional autoencoder for 20CR prmsl fields.
# This version is all-convolutional - it uses strided convolutions
# instead of max-pooling, and transpose convolution instead of
# upsampling.
# This version uses scaled input fields that have a size (79x159) that
# match the strided convolution upscaling and downscaling.
# It also works on tensors with a rotated pole - so the data boundary
# is the equator - this limits the problems with boundary conditions.
# This version looks at precip
import os
import tensorflow as tf
import ML_Utilities
import pickle
import numpy
# How many epochs to train for
n_epochs=50
# Create TensorFlow Dataset object from the prepared training data
(tr_data,n_steps) = ML_Utilities.dataset(purpose='training',
source='rotated_pole/20CR2c',
variable='prate')
tr_data = tr_data.repeat(n_epochs)
# Also produce a tuple (source,target) for model
def to_model(ict):
ict=tf.reshape(ict,[79,159,1])
return(ict,ict)
tr_data = tr_data.map(to_model)
tr_data = tr_data.batch(1)
# Similar dataset from the prepared test data
(tr_test,test_steps) = ML_Utilities.dataset(purpose='test',
source='rotated_pole/20CR2c',
variable='prate')
tr_test = tr_test.repeat(n_epochs)
tr_test = tr_test.map(to_model)
tr_test = tr_test.batch(1)
# Input placeholder
original = tf.keras.layers.Input(shape=(79,159,1,))
# Encoding layers
x = tf.keras.layers.Conv2D(16, (3, 3), padding='same')(original)
x = tf.keras.layers.LeakyReLU()(x)
x = tf.keras.layers.Conv2D(8, (3, 3), strides= (2,2), padding='valid')(x)
x = tf.keras.layers.LeakyReLU()(x)
x = tf.keras.layers.Conv2D(8, (3, 3), strides= (2,2), padding='valid')(x)
x = tf.keras.layers.LeakyReLU()(x)
x = tf.keras.layers.Conv2D(8, (3, 3), strides= (2,2), padding='valid')(x)
x = tf.keras.layers.LeakyReLU()(x)
encoded = x
# Decoding layers
x = tf.keras.layers.Conv2DTranspose(8, (3, 3), strides= (2,2), padding='valid')(encoded)
x = tf.keras.layers.LeakyReLU()(x)
x = tf.keras.layers.Conv2DTranspose(8, (3, 3), strides= (2,2), padding='valid')(x)
x = tf.keras.layers.LeakyReLU()(x)
x = tf.keras.layers.Conv2DTranspose(8, (3, 3), strides= (2,2), padding='valid')(x)
x = tf.keras.layers.LeakyReLU()(x)
decoded = tf.keras.layers.Conv2D(1, (3, 3), padding='same')(x)
# Model relating original to output
autoencoder = tf.keras.models.Model(original,decoded)
# Choose a loss metric to minimise (RMS)
# and an optimiser to use (adadelta)
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
# Train the autoencoder
history=autoencoder.fit(x=tr_data,
epochs=n_epochs,
steps_per_epoch=n_steps,
validation_data=tr_test,
validation_steps=test_steps,
verbose=2) # One line per epoch
# Save the model
save_file=("%s/Machine-Learning-experiments/"+
"convolutional_autoencoder_perturbations/"+
"rotated+scaled_precip/saved_models/Epoch_%04d") % (
os.getenv('SCRATCH'),n_epochs)
if not os.path.isdir(os.path.dirname(save_file)):
os.makedirs(os.path.dirname(save_file))
tf.keras.models.save_model(autoencoder,save_file)
history_file=("%s/Machine-Learning-experiments/"+
"convolutional_autoencoder_perturbations/"+
"rotated+scaled_precip/saved_models/history_to_%04d.pkl") % (
os.getenv('SCRATCH'),n_epochs)
pickle.dump(history.history, open(history_file, "wb"))
|
[
"philip@brohan.org"
] |
philip@brohan.org
|
ea167fb8ab273f422a67ad5fd1577b556d59819c
|
04a540847c1333c987a1957fd8d31197c594f6bb
|
/programmers/64063_1_2.py
|
bb87f123a44803709df076a015cef03e5372985c
|
[] |
no_license
|
k8440009/Algorithm
|
fd148269b264b580876c7426e19dbe2425ddc1ab
|
a48eba0ac5c9f2e10f3c509ce9d349c8a1dc3f0c
|
refs/heads/master
| 2023-04-02T16:06:10.260768
| 2023-04-02T11:04:32
| 2023-04-02T11:04:32
| 200,506,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# 호텔 방 배정 (효율성 x)
# https://programmers.co.kr/learn/courses/30/lessons/64063
def solution(k, room_number):
answer = []
room = dict()
for room_key in room_number:
key = room_key
while(key in room):
key += 1
room[key] = 1
answer.append(key)
#print(answer)
return answer
solution(10, [1,3,4,1,3,1])
|
[
"k8440009@gmail.com"
] |
k8440009@gmail.com
|
1f7e64d2a7a01b64856c7764d77116c496b38347
|
4f4776eb69cbea9ee1c87a22732c5d778855c83a
|
/leetcode/Number_of_1_Bits.py
|
836df79c90ca983c9a66722751cf1ec02cb6fddf
|
[] |
no_license
|
k4u5h4L/algorithms
|
4a0e694109b8aadd0e3b7a66d4c20692ecdef343
|
b66f43354792b1a6facff90990a7685f5ed36a68
|
refs/heads/main
| 2023-08-19T13:13:14.931456
| 2021-10-05T13:01:58
| 2021-10-05T13:01:58
| 383,174,341
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
'''
Number of 1 Bits
Easy
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
Note:
Note that in some languages, such as Java, there is no unsigned integer type. In this case, the input will be given as a signed integer type. It should not affect your implementation, as the integer's internal binary representation is the same, whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3, the input represents the signed integer. -3.
'''
class Solution:
def hammingWeight(self, n: int) -> int:
res = []
while n > 0:
res.append(n % 2)
n = int(n/2)
return res.count(1)
|
[
"kaushal.v.bhat@gmail.com"
] |
kaushal.v.bhat@gmail.com
|
48435a69a8fa1ee0cb211e9fecb08f31c62ba1b8
|
bdc5e23423fa88fa98940950515325d4e5d516b4
|
/xpath.py
|
3c143c849e6fae213cdcabc2a00a4cb573af6fd9
|
[
"MIT"
] |
permissive
|
kolabszar/Xpath
|
7a2c6792e909b51df3bdee032eaf3d172221f5ea
|
d708b3ef199ca6b99dd4ca343ddac450516d1b58
|
refs/heads/master
| 2023-03-22T14:06:07.251488
| 2021-03-12T09:24:30
| 2021-03-12T09:24:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,118
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pylint: disable=R,W,E,C
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2016-2025 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xpath
import argparse
from xpath.logger.colored_logger import logger
def main():
examples = "python %(prog)s http://www.site.com/vuln.php?id=1 --dbs\n\n"
version = "Xpath {version}".format(version=f"{xpath.__version__}")
description = "A cross-platform python based automated tool to detect and exploit error-based sql injections."
parser = argparse.ArgumentParser(
usage="python %(prog)s -u URL [OPTIONS]",
description=description,
conflict_handler="resolve",
formatter_class=argparse.RawTextHelpFormatter,
)
general = parser.add_argument_group("General")
general.add_argument("-h", "--help", action="help", help="Shows the help.")
general.add_argument(
"--version", action="version", version=version, help="Shows the version."
)
general.add_argument(
"-v",
dest="verbose",
type=int,
default=1,
help="Verbosity level: 1-5 (default 1).",
)
general.add_argument(
"--batch",
dest="batch",
action="store_true",
help="Never ask for user input, use the default behavior",
)
general.add_argument(
"--flush-session",
dest="flush_session",
action="store_true",
help="Flush session files for current target",
)
target = parser.add_argument_group(
"Target",
description="At least one of these options has to be provided to define the\ntarget(s)",
)
target.add_argument(
"-u",
"--url",
dest="url",
type=str,
help="Target URL (e.g. 'http://www.site.com/vuln.php?id=1).",
required=True,
)
request = parser.add_argument_group(
"Request",
description="These options can be used to specify how to connect to the target URL",
)
request.add_argument(
"-A",
"--user-agent",
dest="user_agent",
type=str,
help="HTTP User-Agent header value",
default="",
metavar="",
)
request.add_argument(
"-H",
"--header",
dest="header",
type=str,
help='Extra header (e.g. "X-Forwarded-For: 127.0.0.1")',
default="",
metavar="",
)
request.add_argument(
"--host",
dest="host",
type=str,
help="HTTP Host header value",
default="",
metavar="",
)
request.add_argument(
"--data",
dest="data",
type=str,
help='Data string to be sent through POST (e.g. "id=1")',
default="",
metavar="",
)
request.add_argument(
"--cookie",
dest="cookie",
type=str,
help='HTTP Cookie header value (e.g. "PHPSESSID=a8d127e..")',
default="",
metavar="",
)
request.add_argument(
"--referer",
dest="referer",
type=str,
help="HTTP Referer header value",
default="",
metavar="",
)
request.add_argument(
"--headers",
dest="headers",
type=str,
help='Extra headers (e.g. "Accept-Language: fr\\nETag: 123")',
default="",
metavar="",
)
request.add_argument(
"--proxy",
dest="proxy",
type=str,
help="Use a proxy to connect to the target URL",
default="",
metavar="",
)
request.add_argument(
"--force-ssl",
dest="force_ssl",
action="store_true",
help="Force usage of SSL/HTTPS",
)
detection = parser.add_argument_group(
"Detection",
description="These options can be used to customize the detection phase",
)
detection.add_argument(
"--level",
dest="level",
type=int,
help="Level of tests to perform (1-3, default 1)",
default=1,
metavar="",
)
techniques = parser.add_argument_group(
"Techniques",
description="These options can be used to tweak testing of specific SQL injection\ntechniques",
)
techniques.add_argument(
"--technique",
dest="tech",
type=str,
help='SQL injection techniques to use (default "XEFDBGJ")',
default="XEFDBGO",
)
enumeration = parser.add_argument_group(
"Enumeration",
description=(
"These options can be used to enumerate the back-end database"
"\nmanagment system information, structure and data contained in the\ntables."
),
)
enumeration.add_argument(
"-b",
"--banner",
dest="banner",
action="store_true",
help="Retrieve DBMS banner",
)
enumeration.add_argument(
"--current-user",
dest="current_user",
action="store_true",
help="Retrieve DBMS current user",
)
enumeration.add_argument(
"--current-db",
dest="current_db",
action="store_true",
help="Retrieve DBMS current database",
)
enumeration.add_argument(
"--hostname",
dest="hostname",
action="store_true",
help="Retrieve DBMS server hostname",
)
enumeration.add_argument(
"--dbs", dest="dbs", action="store_true", help="Enumerate DBMS databases",
)
enumeration.add_argument(
"--tables",
dest="tables",
action="store_true",
help="Enumerate DBMS database tables",
)
enumeration.add_argument(
"--columns",
dest="columns",
action="store_true",
help="Enumerate DBMS database table columns",
)
enumeration.add_argument(
"--dump",
dest="dump",
action="store_true",
help="Dump DBMS database table entries",
)
enumeration.add_argument(
"--search",
dest="search",
action="store_true",
help="Search column(s), table(s) and/or database name(s)",
)
enumeration.add_argument(
"-D", dest="db", type=str, help="DBMS database to enumerate", default=None,
)
enumeration.add_argument(
"-T",
dest="tbl",
type=str,
help="DBMS database tables(s) to enumerate",
default=None,
)
enumeration.add_argument(
"-C",
dest="col",
type=str,
help="DBMS database table column(s) to enumerate",
default=None,
)
examples = parser.add_argument_group("Example", description=examples)
args = parser.parse_args()
if not args.url:
parser.print_help()
exit(0)
resp = xpath.perform_injection(
url=args.url,
data=args.data,
host=args.host,
header=args.header,
cookies=args.cookie,
headers=args.headers,
referer=args.referer,
user_agent=args.user_agent,
level=args.level,
verbosity=args.verbose,
techniques=args.tech,
batch=args.batch,
flush_session=args.flush_session,
proxy=args.proxy,
force_ssl=args.force_ssl,
)
if resp.is_injected:
injection_type = resp.injection_type
injected_param = resp.injected_param
session_filepath = resp.session_filepath
recommended_payload = resp.recommended_payload
recommended_payload_type = resp.recommended_payload_type
headers = resp.headers
proxy = resp.proxy
dbms = resp.dbms
target = xpath.XPATHInjector(
url=args.url,
data=args.data,
headers=headers,
payload=recommended_payload,
regex=recommended_payload_type,
injected_param=injected_param,
injection_type=injection_type,
session_filepath=session_filepath,
proxy=proxy,
dbms=dbms
)
if args.search:
target.search_for(database=args.db, table=args.tbl, column=args.col)
else:
if not args.dbs and (
args.hostname or args.current_user or args.current_db or args.banner
):
if args.banner:
target.extract_banner()
if args.current_user:
target.extract_current_user()
if args.current_db:
target.extract_current_db()
if args.hostname:
target.extract_hostname()
if args.dbs:
target.extract_dbs()
if args.db and args.tables:
target.extract_tables(database=args.db)
if args.db and args.tbl and args.columns:
target.extract_columns(database=args.db, table=args.tbl)
if args.db and args.tbl and args.col:
target.extract_records(
database=args.db, table=args.tbl, columns=args.col
)
if __name__ == "__main__":
main()
|
[
"r0oth3x49@gmail.com"
] |
r0oth3x49@gmail.com
|
cb3e9d5028d506ecb974097cec8f232e8f767507
|
9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c
|
/baomoicrawl/venv/Lib/site-packages/hyperlink/_url.py
|
993333f25cd5d26614b2dcf5be0a7ab5c515342e
|
[] |
no_license
|
thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler
|
b0fdedee2942a12d9f64dfed93f43802dc5ab340
|
87c8c07433466bbc43a24ea089f75baeb467c356
|
refs/heads/master
| 2022-11-27T21:36:33.917491
| 2020-08-10T23:24:42
| 2020-08-10T23:24:42
| 286,583,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85,134
|
py
|
# -*- coding: utf-8 -*-
u"""Hyperlink provides Pythonic URL parsing, construction, and rendering.
Usage is straightforward::
>>> import hyperlink
>>> url = hyperlink.parse(u'http://github.com/mahmoud/hyperlink?utm_source=docs')
>>> url.host
u'github.com'
>>> secure_url = url.replace(scheme=u'https')
>>> secure_url.get('utm_source')[0]
u'docs'
Hyperlink's API centers on the :class:`DecodedURL` type, which wraps
the lower-level :class:`URL`, both of which can be returned by the
:func:`parse()` convenience function.
""" # noqa: E501
import re
import sys
import string
import socket
from socket import AF_INET, AF_INET6
try:
from socket import AddressFamily
except ImportError:
AddressFamily = int # type: ignore[assignment,misc]
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Text,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from unicodedata import normalize
from ._socket import inet_pton
try:
from collections.abc import Mapping as MappingABC
except ImportError: # Python 2
from collections import Mapping as MappingABC
from idna import encode as idna_encode, decode as idna_decode
PY2 = sys.version_info[0] == 2
try:
unichr
except NameError: # Py3
unichr = chr # type: Callable[[int], Text]
NoneType = type(None) # type: Type[None]
QueryPairs = Tuple[Tuple[Text, Optional[Text]], ...] # internal representation
QueryParameters = Union[
Mapping[Text, Optional[Text]],
QueryPairs,
Sequence[Tuple[Text, Optional[Text]]],
]
T = TypeVar("T")
# from boltons.typeutils
def make_sentinel(name="_MISSING", var_name=""):
# type: (str, str) -> object
"""Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
Args:
name: Name of the Sentinel
var_name: Set this name to the name of the variable in its respective
module enable pickle-ability.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in boltons are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
"""
class Sentinel(object):
def __init__(self):
# type: () -> None
self.name = name
self.var_name = var_name
def __repr__(self):
# type: () -> str
if self.var_name:
return self.var_name
return "%s(%r)" % (self.__class__.__name__, self.name)
if var_name:
# superclass type hints don't allow str return type, but it is
# allowed in the docs, hence the ignore[override] below
def __reduce__(self):
# type: () -> str
return self.var_name
def __nonzero__(self):
# type: () -> bool
return False
__bool__ = __nonzero__
return Sentinel()
_unspecified = _UNSET = make_sentinel("_UNSET") # type: Any
# RFC 3986 Section 2.3, Unreserved URI Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
_UNRESERVED_CHARS = frozenset(
"~-._0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"
)
# URL parsing regex (based on RFC 3986 Appendix B, with modifications)
_URL_RE = re.compile(
r"^((?P<scheme>[^:/?#]+):)?"
r"((?P<_netloc_sep>//)"
r"(?P<authority>[^/?#]*))?"
r"(?P<path>[^?#]*)"
r"(\?(?P<query>[^#]*))?"
r"(#(?P<fragment>.*))?$"
)
_SCHEME_RE = re.compile(r"^[a-zA-Z0-9+-.]*$")
_AUTHORITY_RE = re.compile(
r"^(?:(?P<userinfo>[^@/?#]*)@)?"
r"(?P<host>"
r"(?:\[(?P<ipv6_host>[^[\]/?#]*)\])"
r"|(?P<plain_host>[^:/?#[\]]*)"
r"|(?P<bad_host>.*?))?"
r"(?::(?P<port>.*))?$"
)
_HEX_CHAR_MAP = dict(
[
((a + b).encode("ascii"), unichr(int(a + b, 16)).encode("charmap"))
for a in string.hexdigits
for b in string.hexdigits
]
)
_ASCII_RE = re.compile("([\x00-\x7f]+)")
# RFC 3986 section 2.2, Reserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.2
_GEN_DELIMS = frozenset(u":/?#[]@")
_SUB_DELIMS = frozenset(u"!$&'()*+,;=")
_ALL_DELIMS = _GEN_DELIMS | _SUB_DELIMS
_USERINFO_SAFE = _UNRESERVED_CHARS | _SUB_DELIMS | set(u"%")
_USERINFO_DELIMS = _ALL_DELIMS - _USERINFO_SAFE
_PATH_SAFE = _USERINFO_SAFE | set(u":@")
_PATH_DELIMS = _ALL_DELIMS - _PATH_SAFE
_SCHEMELESS_PATH_SAFE = _PATH_SAFE - set(":")
_SCHEMELESS_PATH_DELIMS = _ALL_DELIMS - _SCHEMELESS_PATH_SAFE
_FRAGMENT_SAFE = _UNRESERVED_CHARS | _PATH_SAFE | set(u"/?")
_FRAGMENT_DELIMS = _ALL_DELIMS - _FRAGMENT_SAFE
_QUERY_VALUE_SAFE = _UNRESERVED_CHARS | _FRAGMENT_SAFE - set(u"&+")
_QUERY_VALUE_DELIMS = _ALL_DELIMS - _QUERY_VALUE_SAFE
_QUERY_KEY_SAFE = _UNRESERVED_CHARS | _QUERY_VALUE_SAFE - set(u"=")
_QUERY_KEY_DELIMS = _ALL_DELIMS - _QUERY_KEY_SAFE
def _make_decode_map(delims, allow_percent=False):
# type: (Iterable[Text], bool) -> Mapping[bytes, bytes]
ret = dict(_HEX_CHAR_MAP)
if not allow_percent:
delims = set(delims) | set([u"%"])
for delim in delims:
_hexord = "{0:02X}".format(ord(delim)).encode("ascii")
_hexord_lower = _hexord.lower()
ret.pop(_hexord)
if _hexord != _hexord_lower:
ret.pop(_hexord_lower)
return ret
def _make_quote_map(safe_chars):
# type: (Iterable[Text]) -> Mapping[Union[int, Text], Text]
ret = {} # type: Dict[Union[int, Text], Text]
# v is included in the dict for py3 mostly, because bytestrings
# are iterables of ints, of course!
for i, v in zip(range(256), range(256)):
c = chr(v)
if c in safe_chars:
ret[c] = ret[v] = c
else:
ret[c] = ret[v] = "%{0:02X}".format(i)
return ret
_USERINFO_PART_QUOTE_MAP = _make_quote_map(_USERINFO_SAFE)
_USERINFO_DECODE_MAP = _make_decode_map(_USERINFO_DELIMS)
_PATH_PART_QUOTE_MAP = _make_quote_map(_PATH_SAFE)
_SCHEMELESS_PATH_PART_QUOTE_MAP = _make_quote_map(_SCHEMELESS_PATH_SAFE)
_PATH_DECODE_MAP = _make_decode_map(_PATH_DELIMS)
_QUERY_KEY_QUOTE_MAP = _make_quote_map(_QUERY_KEY_SAFE)
_QUERY_KEY_DECODE_MAP = _make_decode_map(_QUERY_KEY_DELIMS)
_QUERY_VALUE_QUOTE_MAP = _make_quote_map(_QUERY_VALUE_SAFE)
_QUERY_VALUE_DECODE_MAP = _make_decode_map(_QUERY_VALUE_DELIMS)
_FRAGMENT_QUOTE_MAP = _make_quote_map(_FRAGMENT_SAFE)
_FRAGMENT_DECODE_MAP = _make_decode_map(_FRAGMENT_DELIMS)
_UNRESERVED_QUOTE_MAP = _make_quote_map(_UNRESERVED_CHARS)
_UNRESERVED_DECODE_MAP = dict(
[
(k, v)
for k, v in _HEX_CHAR_MAP.items()
if v.decode("ascii", "replace") in _UNRESERVED_CHARS
]
)
_ROOT_PATHS = frozenset(((), (u"",)))
def _encode_reserved(text, maximal=True):
# type: (Text, bool) -> Text
"""A very comprehensive percent encoding for encoding all
delimiters. Used for arguments to DecodedURL, where a % means a
percent sign, and not the character used by URLs for escaping
bytes.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_UNRESERVED_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[
_UNRESERVED_QUOTE_MAP[t] if t in _UNRESERVED_CHARS else t
for t in text
]
)
def _encode_path_part(text, maximal=True):
# type: (Text, bool) -> Text
"Percent-encode a single segment of a URL path."
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_PATH_PART_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[_PATH_PART_QUOTE_MAP[t] if t in _PATH_DELIMS else t for t in text]
)
def _encode_schemeless_path_part(text, maximal=True):
# type: (Text, bool) -> Text
"""Percent-encode the first segment of a URL path for a URL without a
scheme specified.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_SCHEMELESS_PATH_PART_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[
_SCHEMELESS_PATH_PART_QUOTE_MAP[t]
if t in _SCHEMELESS_PATH_DELIMS
else t
for t in text
]
)
def _encode_path_parts(
text_parts, # type: Sequence[Text]
rooted=False, # type: bool
has_scheme=True, # type: bool
has_authority=True, # type: bool
maximal=True, # type: bool
):
# type: (...) -> Sequence[Text]
"""
Percent-encode a tuple of path parts into a complete path.
Setting *maximal* to False percent-encodes only the reserved
characters that are syntactically necessary for serialization,
preserving any IRI-style textual data.
Leaving *maximal* set to its default True percent-encodes
everything required to convert a portion of an IRI to a portion of
a URI.
RFC 3986 3.3:
If a URI contains an authority component, then the path component
must either be empty or begin with a slash ("/") character. If a URI
does not contain an authority component, then the path cannot begin
with two slash characters ("//"). In addition, a URI reference
(Section 4.1) may be a relative-path reference, in which case the
first path segment cannot contain a colon (":") character.
"""
if not text_parts:
return ()
if rooted:
text_parts = (u"",) + tuple(text_parts)
# elif has_authority and text_parts:
# raise Exception('see rfc above') # TODO: too late to fail like this?
encoded_parts = [] # type: List[Text]
if has_scheme:
encoded_parts = [
_encode_path_part(part, maximal=maximal) if part else part
for part in text_parts
]
else:
encoded_parts = [_encode_schemeless_path_part(text_parts[0])]
encoded_parts.extend(
[
_encode_path_part(part, maximal=maximal) if part else part
for part in text_parts[1:]
]
)
return tuple(encoded_parts)
def _encode_query_key(text, maximal=True):
# type: (Text, bool) -> Text
"""
Percent-encode a single query string key or value.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_QUERY_KEY_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[_QUERY_KEY_QUOTE_MAP[t] if t in _QUERY_KEY_DELIMS else t for t in text]
)
def _encode_query_value(text, maximal=True):
# type: (Text, bool) -> Text
"""
Percent-encode a single query string key or value.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_QUERY_VALUE_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[
_QUERY_VALUE_QUOTE_MAP[t] if t in _QUERY_VALUE_DELIMS else t
for t in text
]
)
def _encode_fragment_part(text, maximal=True):
# type: (Text, bool) -> Text
"""Quote the fragment part of the URL. Fragments don't have
subdelimiters, so the whole URL fragment can be passed.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_FRAGMENT_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[_FRAGMENT_QUOTE_MAP[t] if t in _FRAGMENT_DELIMS else t for t in text]
)
def _encode_userinfo_part(text, maximal=True):
# type: (Text, bool) -> Text
"""Quote special characters in either the username or password
section of the URL.
"""
if maximal:
bytestr = normalize("NFC", text).encode("utf8")
return u"".join([_USERINFO_PART_QUOTE_MAP[b] for b in bytestr])
return u"".join(
[
_USERINFO_PART_QUOTE_MAP[t] if t in _USERINFO_DELIMS else t
for t in text
]
)
# This port list painstakingly curated by hand searching through
# https://www.iana.org/assignments/uri-schemes/uri-schemes.xhtml
# and
# https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
SCHEME_PORT_MAP = {
"acap": 674,
"afp": 548,
"dict": 2628,
"dns": 53,
"file": None,
"ftp": 21,
"git": 9418,
"gopher": 70,
"http": 80,
"https": 443,
"imap": 143,
"ipp": 631,
"ipps": 631,
"irc": 194,
"ircs": 6697,
"ldap": 389,
"ldaps": 636,
"mms": 1755,
"msrp": 2855,
"msrps": None,
"mtqp": 1038,
"nfs": 111,
"nntp": 119,
"nntps": 563,
"pop": 110,
"prospero": 1525,
"redis": 6379,
"rsync": 873,
"rtsp": 554,
"rtsps": 322,
"rtspu": 5005,
"sftp": 22,
"smb": 445,
"snmp": 161,
"ssh": 22,
"steam": None,
"svn": 3690,
"telnet": 23,
"ventrilo": 3784,
"vnc": 5900,
"wais": 210,
"ws": 80,
"wss": 443,
"xmpp": None,
}
# This list of schemes that don't use authorities is also from the link above.
NO_NETLOC_SCHEMES = set(
[
"urn",
"about",
"bitcoin",
"blob",
"data",
"geo",
"magnet",
"mailto",
"news",
"pkcs11",
"sip",
"sips",
"tel",
]
)
# As of Mar 11, 2017, there were 44 netloc schemes, and 13 non-netloc
def register_scheme(text, uses_netloc=True, default_port=None):
# type: (Text, bool, Optional[int]) -> None
"""Registers new scheme information, resulting in correct port and
slash behavior from the URL object. There are dozens of standard
schemes preregistered, so this function is mostly meant for
proprietary internal customizations or stopgaps on missing
standards information. If a scheme seems to be missing, please
`file an issue`_!
Args:
text (Text): A string representation of the scheme.
(the 'http' in 'http://hatnote.com')
uses_netloc (bool): Does the scheme support specifying a
network host? For instance, "http" does, "mailto" does
not. Defaults to True.
default_port (Optional[int]): The default port, if any, for
netloc-using schemes.
.. _file an issue: https://github.com/mahmoud/hyperlink/issues
"""
text = text.lower()
if default_port is not None:
try:
default_port = int(default_port)
except (ValueError, TypeError):
raise ValueError(
"default_port expected integer or None, not %r"
% (default_port,)
)
if uses_netloc is True:
SCHEME_PORT_MAP[text] = default_port
elif uses_netloc is False:
if default_port is not None:
raise ValueError(
"unexpected default port while specifying"
" non-netloc scheme: %r" % default_port
)
NO_NETLOC_SCHEMES.add(text)
else:
raise ValueError("uses_netloc expected bool, not: %r" % uses_netloc)
return
def scheme_uses_netloc(scheme, default=None):
# type: (Text, Optional[bool]) -> Optional[bool]
"""Whether or not a URL uses :code:`:` or :code:`://` to separate the
scheme from the rest of the URL depends on the scheme's own
standard definition. There is no way to infer this behavior
from other parts of the URL. A scheme either supports network
locations or it does not.
The URL type's approach to this is to check for explicitly
registered schemes, with common schemes like HTTP
preregistered. This is the same approach taken by
:mod:`urlparse`.
URL adds two additional heuristics if the scheme as a whole is
not registered. First, it attempts to check the subpart of the
scheme after the last ``+`` character. This adds intuitive
behavior for schemes like ``git+ssh``. Second, if a URL with
an unrecognized scheme is loaded, it will maintain the
separator it sees.
"""
if not scheme:
return False
scheme = scheme.lower()
if scheme in SCHEME_PORT_MAP:
return True
if scheme in NO_NETLOC_SCHEMES:
return False
if scheme.split("+")[-1] in SCHEME_PORT_MAP:
return True
return default
class URLParseError(ValueError):
"""Exception inheriting from :exc:`ValueError`, raised when failing to
parse a URL. Mostly raised on invalid ports and IPv6 addresses.
"""
pass
def _optional(argument, default):
# type: (Any, Any) -> Any
if argument is _UNSET:
return default
else:
return argument
def _typecheck(name, value, *types):
# type: (Text, T, Type[Any]) -> T
"""
Check that the given *value* is one of the given *types*, or raise an
exception describing the problem using *name*.
"""
if not types:
raise ValueError("expected one or more types, maybe use _textcheck?")
if not isinstance(value, types):
raise TypeError(
"expected %s for %s, got %r"
% (" or ".join([t.__name__ for t in types]), name, value)
)
return value
def _textcheck(name, value, delims=frozenset(), nullable=False):
# type: (Text, T, Iterable[Text], bool) -> T
if not isinstance(value, Text):
if nullable and value is None:
# used by query string values
return value # type: ignore[unreachable]
else:
str_name = "unicode" if PY2 else "str"
exp = str_name + " or NoneType" if nullable else str_name
raise TypeError("expected %s for %s, got %r" % (exp, name, value))
if delims and set(value) & set(delims): # TODO: test caching into regexes
raise ValueError(
"one or more reserved delimiters %s present in %s: %r"
% ("".join(delims), name, value)
)
return value # type: ignore[return-value] # T vs. Text
def iter_pairs(iterable):
# type: (Iterable[Any]) -> Iterator[Any]
"""
Iterate over the (key, value) pairs in ``iterable``.
This handles dictionaries sensibly, and falls back to assuming the
iterable yields (key, value) pairs. This behaviour is similar to
what Python's ``dict()`` constructor does.
"""
if isinstance(iterable, MappingABC):
iterable = iterable.items()
return iter(iterable)
def _decode_unreserved(text, normalize_case=False, encode_stray_percents=False):
# type: (Text, bool, bool) -> Text
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_UNRESERVED_DECODE_MAP,
)
def _decode_userinfo_part(
text, normalize_case=False, encode_stray_percents=False
):
# type: (Text, bool, bool) -> Text
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_USERINFO_DECODE_MAP,
)
def _decode_path_part(text, normalize_case=False, encode_stray_percents=False):
# type: (Text, bool, bool) -> Text
"""
>>> _decode_path_part(u'%61%77%2f%7a')
u'aw%2fz'
>>> _decode_path_part(u'%61%77%2f%7a', normalize_case=True)
u'aw%2Fz'
"""
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_PATH_DECODE_MAP,
)
def _decode_query_key(text, normalize_case=False, encode_stray_percents=False):
# type: (Text, bool, bool) -> Text
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_QUERY_KEY_DECODE_MAP,
)
def _decode_query_value(
text, normalize_case=False, encode_stray_percents=False
):
# type: (Text, bool, bool) -> Text
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_QUERY_VALUE_DECODE_MAP,
)
def _decode_fragment_part(
text, normalize_case=False, encode_stray_percents=False
):
# type: (Text, bool, bool) -> Text
return _percent_decode(
text,
normalize_case=normalize_case,
encode_stray_percents=encode_stray_percents,
_decode_map=_FRAGMENT_DECODE_MAP,
)
def _percent_decode(
text, # type: Text
normalize_case=False, # type: bool
subencoding="utf-8", # type: Text
raise_subencoding_exc=False, # type: bool
encode_stray_percents=False, # type: bool
_decode_map=_HEX_CHAR_MAP, # type: Mapping[bytes, bytes]
):
# type: (...) -> Text
"""Convert percent-encoded text characters to their normal,
human-readable equivalents.
All characters in the input text must be encodable by
*subencoding*. All special characters underlying the values in the
percent-encoding must be decodable as *subencoding*. If a
non-*subencoding*-valid string is passed, the original text is
returned with no changes applied.
Only called by field-tailored variants, e.g.,
:func:`_decode_path_part`, as every percent-encodable part of the
URL has characters which should not be percent decoded.
>>> _percent_decode(u'abc%20def')
u'abc def'
Args:
text: Text with percent-encoding present.
normalize_case: Whether undecoded percent segments, such as encoded
delimiters, should be uppercased, per RFC 3986 Section 2.1.
See :func:`_decode_path_part` for an example.
subencoding: The name of the encoding underlying the percent-encoding.
raise_subencoding_exc: Whether an error in decoding the bytes
underlying the percent-decoding should be raised.
Returns:
Text: The percent-decoded version of *text*, decoded by *subencoding*.
"""
try:
quoted_bytes = text.encode(subencoding)
except UnicodeEncodeError:
return text
bits = quoted_bytes.split(b"%")
if len(bits) == 1:
return text
res = [bits[0]]
append = res.append
for item in bits[1:]:
hexpair, rest = item[:2], item[2:]
try:
append(_decode_map[hexpair])
append(rest)
except KeyError:
pair_is_hex = hexpair in _HEX_CHAR_MAP
if pair_is_hex or not encode_stray_percents:
append(b"%")
else:
# if it's undecodable, treat as a real percent sign,
# which is reserved (because it wasn't in the
# context-aware _decode_map passed in), and should
# stay in an encoded state.
append(b"%25")
if normalize_case and pair_is_hex:
append(hexpair.upper())
append(rest)
else:
append(item)
unquoted_bytes = b"".join(res)
try:
return unquoted_bytes.decode(subencoding)
except UnicodeDecodeError:
if raise_subencoding_exc:
raise
return text
def _decode_host(host):
# type: (Text) -> Text
"""Decode a host from ASCII-encodable text to IDNA-decoded text. If
the host text is not ASCII, it is returned unchanged, as it is
presumed that it is already IDNA-decoded.
Some technical details: _decode_host is built on top of the "idna"
package, which has some quirks:
Capital letters are not valid IDNA2008. The idna package will
raise an exception like this on capital letters:
> idna.core.InvalidCodepoint: Codepoint U+004B at position 1 ... not allowed
However, if a segment of a host (i.e., something in
url.host.split('.')) is already ASCII, idna doesn't perform its
usual checks. In fact, for capital letters it automatically
lowercases them.
This check and some other functionality can be bypassed by passing
uts46=True to idna.encode/decode. This allows a more permissive and
convenient interface. So far it seems like the balanced approach.
Example output (from idna==2.6):
>> idna.encode(u'mahmöud.io')
'xn--mahmud-zxa.io'
>> idna.encode(u'Mahmöud.io')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/mahmoud/virtualenvs/hyperlink/local/lib/python2.7/site-packages/idna/core.py", line 355, in encode
result.append(alabel(label))
File "/home/mahmoud/virtualenvs/hyperlink/local/lib/python2.7/site-packages/idna/core.py", line 276, in alabel
check_label(label)
File "/home/mahmoud/virtualenvs/hyperlink/local/lib/python2.7/site-packages/idna/core.py", line 253, in check_label
raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
idna.core.InvalidCodepoint: Codepoint U+004D at position 1 of u'Mahm\xf6ud' not allowed
>> idna.encode(u'Mahmoud.io')
'Mahmoud.io'
# Similar behavior for decodes below
>> idna.decode(u'Mahmoud.io')
u'mahmoud.io
>> idna.decode(u'Méhmoud.io', uts46=True)
u'm\xe9hmoud.io'
""" # noqa: E501
if not host:
return u""
try:
host_bytes = host.encode("ascii")
except UnicodeEncodeError:
host_text = host
else:
try:
host_text = idna_decode(host_bytes, uts46=True)
except ValueError:
# only reached on "narrow" (UCS-2) Python builds <3.4, see #7
# NOTE: not going to raise here, because there's no
# ambiguity in the IDNA, and the host is still
# technically usable
host_text = host
return host_text
def _resolve_dot_segments(path):
# type: (Sequence[Text]) -> Sequence[Text]
"""Normalize the URL path by resolving segments of '.' and '..'. For
more details, see `RFC 3986 section 5.2.4, Remove Dot Segments`_.
Args:
path: sequence of path segments in text form
Returns:
A new sequence of path segments with the '.' and '..' elements removed
and resolved.
.. _RFC 3986 section 5.2.4, Remove Dot Segments: https://tools.ietf.org/html/rfc3986#section-5.2.4
""" # noqa: E501
segs = [] # type: List[Text]
for seg in path:
if seg == u".":
pass
elif seg == u"..":
if segs:
segs.pop()
else:
segs.append(seg)
if list(path[-1:]) in ([u"."], [u".."]):
segs.append(u"")
return segs
def parse_host(host):
# type: (Text) -> Tuple[Optional[AddressFamily], Text]
"""Parse the host into a tuple of ``(family, host)``, where family
is the appropriate :mod:`socket` module constant when the host is
an IP address. Family is ``None`` when the host is not an IP.
Will raise :class:`URLParseError` on invalid IPv6 constants.
Returns:
family (socket constant or None), host (string)
>>> import socket
>>> parse_host('googlewebsite.com') == (None, 'googlewebsite.com')
True
>>> parse_host('::1') == (socket.AF_INET6, '::1')
True
>>> parse_host('192.168.1.1') == (socket.AF_INET, '192.168.1.1')
True
"""
if not host:
return None, u""
if u":" in host:
try:
inet_pton(AF_INET6, host)
except socket.error as se:
raise URLParseError("invalid IPv6 host: %r (%r)" % (host, se))
except UnicodeEncodeError:
pass # TODO: this can't be a real host right?
else:
family = AF_INET6 # type: Optional[AddressFamily]
else:
try:
inet_pton(AF_INET, host)
except (socket.error, UnicodeEncodeError):
family = None # not an IP
else:
family = AF_INET
return family, host
class URL(object):
r"""From blogs to billboards, URLs are so common, that it's easy to
overlook their complexity and power. With hyperlink's
:class:`URL` type, working with URLs doesn't have to be hard.
URLs are made of many parts. Most of these parts are officially
named in `RFC 3986`_ and this diagram may prove handy in identifying
them::
foo://user:pass@example.com:8042/over/there?name=ferret#nose
\_/ \_______/ \_________/ \__/\_________/ \_________/ \__/
| | | | | | |
scheme userinfo host port path query fragment
While :meth:`~URL.from_text` is used for parsing whole URLs, the
:class:`URL` constructor builds a URL from the individual
components, like so::
>>> from hyperlink import URL
>>> url = URL(scheme=u'https', host=u'example.com', path=[u'hello', u'world'])
>>> print(url.to_text())
https://example.com/hello/world
The constructor runs basic type checks. All strings are expected
to be decoded (:class:`unicode` in Python 2). All arguments are
optional, defaulting to appropriately empty values. A full list of
constructor arguments is below.
Args:
scheme (Optional[Text]): The text name of the scheme.
host (Optional[Text]): The host portion of the network location
port (Optional[int]): The port part of the network location. If
``None`` or no port is passed, the port will default to
the default port of the scheme, if it is known. See the
``SCHEME_PORT_MAP`` and :func:`register_default_port`
for more info.
path (Iterable[Text]): A tuple of strings representing the
slash-separated parts of the path.
query (Sequence[Tuple[Text, Optional[Text]]]): The query parameters, as
a dictionary or as an sequence of key-value pairs.
fragment (Text): The fragment part of the URL.
rooted (bool): A rooted URL is one which indicates an absolute path.
This is True on any URL that includes a host, or any relative URL
that starts with a slash.
userinfo (Text): The username or colon-separated
username:password pair.
uses_netloc (Optional[bool]): Indicates whether ``://`` (the "netloc
separator") will appear to separate the scheme from the *path* in
cases where no host is present. Setting this to ``True`` is a
non-spec-compliant affordance for the common practice of having URIs
that are *not* URLs (cannot have a 'host' part) but nevertheless use
the common ``://`` idiom that most people associate with URLs;
e.g. ``message:`` URIs like ``message://message-id`` being
equivalent to ``message:message-id``. This may be inferred based on
the scheme depending on whether :func:`register_scheme` has been
used to register the scheme and should not be passed directly unless
you know the scheme works like this and you know it has not been
registered.
All of these parts are also exposed as read-only attributes of
URL instances, along with several useful methods.
.. _RFC 3986: https://tools.ietf.org/html/rfc3986
.. _RFC 3987: https://tools.ietf.org/html/rfc3987
""" # noqa: E501
def __init__(
self,
scheme=None, # type: Optional[Text]
host=None, # type: Optional[Text]
path=(), # type: Iterable[Text]
query=(), # type: QueryParameters
fragment=u"", # type: Text
port=None, # type: Optional[int]
rooted=None, # type: Optional[bool]
userinfo=u"", # type: Text
uses_netloc=None, # type: Optional[bool]
):
# type: (...) -> None
if host is not None and scheme is None:
scheme = u"http" # TODO: why
if port is None and scheme is not None:
port = SCHEME_PORT_MAP.get(scheme)
if host and query and not path:
# per RFC 3986 6.2.3, "a URI that uses the generic syntax
# for authority with an empty path should be normalized to
# a path of '/'."
path = (u"",)
# Now that we're done detecting whether they were passed, we can set
# them to their defaults:
if scheme is None:
scheme = u""
if host is None:
host = u""
if rooted is None:
rooted = bool(host)
# Set attributes.
self._scheme = _textcheck("scheme", scheme)
if self._scheme:
if not _SCHEME_RE.match(self._scheme):
raise ValueError(
'invalid scheme: %r. Only alphanumeric, "+",'
' "-", and "." allowed. Did you meant to call'
" %s.from_text()?" % (self._scheme, self.__class__.__name__)
)
_, self._host = parse_host(_textcheck("host", host, "/?#@"))
if isinstance(path, Text):
raise TypeError(
"expected iterable of text for path, not: %r" % (path,)
)
self._path = tuple(
(_textcheck("path segment", segment, "/?#") for segment in path)
)
self._query = tuple(
(
_textcheck("query parameter name", k, "&=#"),
_textcheck("query parameter value", v, "&#", nullable=True),
)
for k, v in iter_pairs(query)
)
self._fragment = _textcheck("fragment", fragment)
self._port = _typecheck("port", port, int, NoneType)
self._rooted = _typecheck("rooted", rooted, bool)
self._userinfo = _textcheck("userinfo", userinfo, "/?#@")
if uses_netloc is None:
uses_netloc = scheme_uses_netloc(self._scheme, uses_netloc)
self._uses_netloc = _typecheck(
"uses_netloc", uses_netloc, bool, NoneType
)
will_have_authority = self._host or (
self._port and self._port != SCHEME_PORT_MAP.get(scheme)
)
if will_have_authority:
# fixup for rooted consistency; if there's any 'authority'
# represented in the textual URL, then the path must be rooted, and
# we're definitely using a netloc (there must be a ://).
self._rooted = True
self._uses_netloc = True
if (not self._rooted) and self.path[:1] == (u"",):
self._rooted = True
self._path = self._path[1:]
if not will_have_authority and self._path and not self._rooted:
# If, after fixing up the path, there *is* a path and it *isn't*
# rooted, then we are definitely not using a netloc; if we did, it
# would make the path (erroneously) look like a hostname.
self._uses_netloc = False
def get_decoded_url(self, lazy=False):
# type: (bool) -> DecodedURL
try:
return self._decoded_url
except AttributeError:
self._decoded_url = DecodedURL(self, lazy=lazy) # type: DecodedURL
return self._decoded_url
@property
def scheme(self):
# type: () -> Text
"""The scheme is a string, and the first part of an absolute URL, the
part before the first colon, and the part which defines the
semantics of the rest of the URL. Examples include "http",
"https", "ssh", "file", "mailto", and many others. See
:func:`~hyperlink.register_scheme()` for more info.
"""
return self._scheme
@property
def host(self):
# type: () -> Text
"""The host is a string, and the second standard part of an absolute
URL. When present, a valid host must be a domain name, or an
IP (v4 or v6). It occurs before the first slash, or the second
colon, if a :attr:`~hyperlink.URL.port` is provided.
"""
return self._host
@property
def port(self):
# type: () -> Optional[int]
"""The port is an integer that is commonly used in connecting to the
:attr:`host`, and almost never appears without it.
When not present in the original URL, this attribute defaults
to the scheme's default port. If the scheme's default port is
not known, and the port is not provided, this attribute will
be set to None.
>>> URL.from_text(u'http://example.com/pa/th').port
80
>>> URL.from_text(u'foo://example.com/pa/th').port
>>> URL.from_text(u'foo://example.com:8042/pa/th').port
8042
.. note::
Per the standard, when the port is the same as the schemes
default port, it will be omitted in the text URL.
"""
return self._port
@property
def path(self):
# type: () -> Sequence[Text]
"""A tuple of strings, created by splitting the slash-separated
hierarchical path. Started by the first slash after the host,
terminated by a "?", which indicates the start of the
:attr:`~hyperlink.URL.query` string.
"""
return self._path
@property
def query(self):
# type: () -> QueryPairs
"""Tuple of pairs, created by splitting the ampersand-separated
mapping of keys and optional values representing
non-hierarchical data used to identify the resource. Keys are
always strings. Values are strings when present, or None when
missing.
For more operations on the mapping, see
:meth:`~hyperlink.URL.get()`, :meth:`~hyperlink.URL.add()`,
:meth:`~hyperlink.URL.set()`, and
:meth:`~hyperlink.URL.delete()`.
"""
return self._query
@property
def fragment(self):
# type: () -> Text
"""A string, the last part of the URL, indicated by the first "#"
after the :attr:`~hyperlink.URL.path` or
:attr:`~hyperlink.URL.query`. Enables indirect identification
of a secondary resource, like an anchor within an HTML page.
"""
return self._fragment
@property
def rooted(self):
# type: () -> bool
"""Whether or not the path starts with a forward slash (``/``).
This is taken from the terminology in the BNF grammar,
specifically the "path-rootless", rule, since "absolute path"
and "absolute URI" are somewhat ambiguous. :attr:`path` does
not contain the implicit prefixed ``"/"`` since that is
somewhat awkward to work with.
"""
return self._rooted
@property
def userinfo(self):
# type: () -> Text
"""The colon-separated string forming the username-password
combination.
"""
return self._userinfo
@property
def uses_netloc(self):
# type: () -> Optional[bool]
"""
Indicates whether ``://`` (the "netloc separator") will appear to
separate the scheme from the *path* in cases where no host is present.
"""
return self._uses_netloc
@property
def user(self):
# type: () -> Text
"""
The user portion of :attr:`~hyperlink.URL.userinfo`.
"""
return self.userinfo.split(u":")[0]
def authority(self, with_password=False, **kw):
# type: (bool, Any) -> Text
"""Compute and return the appropriate host/port/userinfo combination.
>>> url = URL.from_text(u'http://user:pass@localhost:8080/a/b?x=y')
>>> url.authority()
u'user:@localhost:8080'
>>> url.authority(with_password=True)
u'user:pass@localhost:8080'
Args:
with_password (bool): Whether the return value of this
method include the password in the URL, if it is
set. Defaults to False.
Returns:
Text: The authority (network location and user information) portion
of the URL.
"""
# first, a bit of twisted compat
with_password = kw.pop("includeSecrets", with_password)
if kw:
raise TypeError("got unexpected keyword arguments: %r" % kw.keys())
host = self.host
if ":" in host:
hostport = ["[" + host + "]"]
else:
hostport = [self.host]
if self.port != SCHEME_PORT_MAP.get(self.scheme):
hostport.append(Text(self.port))
authority = []
if self.userinfo:
userinfo = self.userinfo
if not with_password and u":" in userinfo:
userinfo = userinfo[: userinfo.index(u":") + 1]
authority.append(userinfo)
authority.append(u":".join(hostport))
return u"@".join(authority)
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, self.__class__):
return NotImplemented
for attr in [
"scheme",
"userinfo",
"host",
"query",
"fragment",
"port",
"uses_netloc",
"rooted",
]:
if getattr(self, attr) != getattr(other, attr):
return False
if self.path == other.path or (
self.path in _ROOT_PATHS and other.path in _ROOT_PATHS
):
return True
return False
def __ne__(self, other):
# type: (Any) -> bool
if not isinstance(other, self.__class__):
return NotImplemented
return not self.__eq__(other)
def __hash__(self):
# type: () -> int
return hash(
(
self.__class__,
self.scheme,
self.userinfo,
self.host,
self.path,
self.query,
self.fragment,
self.port,
self.rooted,
self.uses_netloc,
)
)
@property
def absolute(self):
# type: () -> bool
"""Whether or not the URL is "absolute". Absolute URLs are complete
enough to resolve to a network resource without being relative
to a base URI.
>>> URL.from_text(u'http://wikipedia.org/').absolute
True
>>> URL.from_text(u'?a=b&c=d').absolute
False
Absolute URLs must have both a scheme and a host set.
"""
return bool(self.scheme and self.host)
def replace(
self,
scheme=_UNSET, # type: Optional[Text]
host=_UNSET, # type: Optional[Text]
path=_UNSET, # type: Iterable[Text]
query=_UNSET, # type: QueryParameters
fragment=_UNSET, # type: Text
port=_UNSET, # type: Optional[int]
rooted=_UNSET, # type: Optional[bool]
userinfo=_UNSET, # type: Text
uses_netloc=_UNSET, # type: Optional[bool]
):
# type: (...) -> URL
""":class:`URL` objects are immutable, which means that attributes
are designed to be set only once, at construction. Instead of
modifying an existing URL, one simply creates a copy with the
desired changes.
If any of the following arguments is omitted, it defaults to
the value on the current URL.
Args:
scheme (Optional[Text]): The text name of the scheme.
host (Optional[Text]): The host portion of the network location.
path (Iterable[Text]): A tuple of strings representing the
slash-separated parts of the path.
query (Sequence[Tuple[Text, Optional[Text]]]): The query
parameters, as a dictionary or as an sequence of key-value
pairs.
fragment (Text): The fragment part of the URL.
port (Optional[int]): The port part of the network location.
rooted (Optional[bool]): Whether or not the path begins with a
slash.
userinfo (Text): The username or colon-separated username:password
pair.
uses_netloc (bool): Indicates whether ``://`` (the "netloc
separator") will appear to separate the scheme from the *path*
in cases where no host is present. Setting this to ``True`` is
a non-spec-compliant affordance for the common practice of
having URIs that are *not* URLs (cannot have a 'host' part) but
nevertheless use the common ``://`` idiom that most people
associate with URLs; e.g. ``message:`` URIs like
``message://message-id`` being equivalent to
``message:message-id``. This may be inferred based on the
scheme depending on whether :func:`register_scheme` has been
used to register the scheme and should not be passed directly
unless you know the scheme works like this and you know it has
not been registered.
Returns:
URL: A copy of the current :class:`URL`, with new values for
parameters passed.
"""
if scheme is not _UNSET and scheme != self.scheme:
# when changing schemes, reset the explicit uses_netloc preference
# to honor the new scheme.
uses_netloc = None
return self.__class__(
scheme=_optional(scheme, self.scheme),
host=_optional(host, self.host),
path=_optional(path, self.path),
query=_optional(query, self.query),
fragment=_optional(fragment, self.fragment),
port=_optional(port, self.port),
rooted=_optional(rooted, self.rooted),
userinfo=_optional(userinfo, self.userinfo),
uses_netloc=_optional(uses_netloc, self.uses_netloc),
)
@classmethod
def from_text(cls, text):
# type: (Text) -> URL
"""Whereas the :class:`URL` constructor is useful for constructing
URLs from parts, :meth:`~URL.from_text` supports parsing whole
URLs from their string form::
>>> URL.from_text(u'http://example.com')
URL.from_text(u'http://example.com')
>>> URL.from_text(u'?a=b&x=y')
URL.from_text(u'?a=b&x=y')
As you can see above, it's also used as the :func:`repr` of
:class:`URL` objects. The natural counterpart to
:func:`~URL.to_text()`. This method only accepts *text*, so be
sure to decode those bytestrings.
Args:
text (Text): A valid URL string.
Returns:
URL: The structured object version of the parsed string.
.. note::
Somewhat unexpectedly, URLs are a far more permissive
format than most would assume. Many strings which don't
look like URLs are still valid URLs. As a result, this
method only raises :class:`URLParseError` on invalid port
and IPv6 values in the host portion of the URL.
"""
um = _URL_RE.match(_textcheck("text", text))
if um is None:
raise URLParseError("could not parse url: %r" % text)
gs = um.groupdict()
au_text = gs["authority"] or u""
au_m = _AUTHORITY_RE.match(au_text)
if au_m is None:
raise URLParseError(
"invalid authority %r in url: %r" % (au_text, text)
)
au_gs = au_m.groupdict()
if au_gs["bad_host"]:
raise URLParseError(
"invalid host %r in url: %r" % (au_gs["bad_host"], text)
)
userinfo = au_gs["userinfo"] or u""
host = au_gs["ipv6_host"] or au_gs["plain_host"]
port = au_gs["port"]
if port is not None:
try:
port = int(port) # type: ignore[assignment] # FIXME, see below
except ValueError:
if not port: # TODO: excessive?
raise URLParseError("port must not be empty: %r" % au_text)
raise URLParseError("expected integer for port, not %r" % port)
scheme = gs["scheme"] or u""
fragment = gs["fragment"] or u""
uses_netloc = bool(gs["_netloc_sep"])
if gs["path"]:
path = tuple(gs["path"].split(u"/"))
if not path[0]:
path = path[1:]
rooted = True
else:
rooted = False
else:
path = ()
rooted = bool(au_text)
if gs["query"]:
query = tuple(
(
qe.split(u"=", 1) # type: ignore[misc]
if u"=" in qe
else (qe, None)
)
for qe in gs["query"].split(u"&")
) # type: QueryPairs
else:
query = ()
return cls(
scheme,
host,
path,
query,
fragment,
port, # type: ignore[arg-type] # FIXME, see above
rooted,
userinfo,
uses_netloc,
)
def normalize(
self,
scheme=True,
host=True,
path=True,
query=True,
fragment=True,
userinfo=True,
percents=True,
):
# type: (bool, bool, bool, bool, bool, bool, bool) -> URL
"""Return a new URL object with several standard normalizations
applied:
* Decode unreserved characters (`RFC 3986 2.3`_)
* Uppercase remaining percent-encoded octets (`RFC 3986 2.1`_)
* Convert scheme and host casing to lowercase (`RFC 3986 3.2.2`_)
* Resolve any "." and ".." references in the path (`RFC 3986 6.2.2.3`_)
* Ensure an ending slash on URLs with an empty path (`RFC 3986 6.2.3`_)
* Encode any stray percent signs (`%`) in percent-encoded
fields (path, query, fragment, userinfo) (`RFC 3986 2.4`_)
All are applied by default, but normalizations can be disabled
per-part by passing `False` for that part's corresponding
name.
Args:
scheme (bool): Convert the scheme to lowercase
host (bool): Convert the host to lowercase
path (bool): Normalize the path (see above for details)
query (bool): Normalize the query string
fragment (bool): Normalize the fragment
userinfo (bool): Normalize the userinfo
percents (bool): Encode isolated percent signs for any
percent-encoded fields which are being normalized
(defaults to True).
>>> url = URL.from_text(u'Http://example.COM/a/../b/./c%2f?%61%')
>>> print(url.normalize().to_text())
http://example.com/b/c%2F?a%25
.. _RFC 3986 3.2.2: https://tools.ietf.org/html/rfc3986#section-3.2.2
.. _RFC 3986 2.3: https://tools.ietf.org/html/rfc3986#section-2.3
.. _RFC 3986 2.1: https://tools.ietf.org/html/rfc3986#section-2.1
.. _RFC 3986 6.2.2.3: https://tools.ietf.org/html/rfc3986#section-6.2.2.3
.. _RFC 3986 6.2.3: https://tools.ietf.org/html/rfc3986#section-6.2.3
.. _RFC 3986 2.4: https://tools.ietf.org/html/rfc3986#section-2.4
""" # noqa: E501
kw = {} # type: Dict[str, Any]
if scheme:
kw["scheme"] = self.scheme.lower()
if host:
kw["host"] = self.host.lower()
def _dec_unres(target):
# type: (Text) -> Text
return _decode_unreserved(
target, normalize_case=True, encode_stray_percents=percents
)
if path:
if self.path:
kw["path"] = [
_dec_unres(p) for p in _resolve_dot_segments(self.path)
]
else:
kw["path"] = (u"",)
if query:
kw["query"] = [
(_dec_unres(k), _dec_unres(v) if v else v)
for k, v in self.query
]
if fragment:
kw["fragment"] = _dec_unres(self.fragment)
if userinfo:
kw["userinfo"] = u":".join(
[_dec_unres(p) for p in self.userinfo.split(":", 1)]
)
return self.replace(**kw)
def child(self, *segments):
# type: (Text) -> URL
"""Make a new :class:`URL` where the given path segments are a child
of this URL, preserving other parts of the URL, including the
query string and fragment.
For example::
>>> url = URL.from_text(u'http://localhost/a/b?x=y')
>>> child_url = url.child(u"c", u"d")
>>> child_url.to_text()
u'http://localhost/a/b/c/d?x=y'
Args:
segments (Text): Additional parts to be joined and added to
the path, like :func:`os.path.join`. Special characters
in segments will be percent encoded.
Returns:
URL: A copy of the current URL with the extra path segments.
"""
if not segments:
return self
segments = [ # type: ignore[assignment] # variable is tuple
_textcheck("path segment", s) for s in segments
]
new_path = tuple(self.path)
if self.path and self.path[-1] == u"":
new_path = new_path[:-1]
new_path += tuple(_encode_path_parts(segments, maximal=False))
return self.replace(path=new_path)
def sibling(self, segment):
# type: (Text) -> URL
"""Make a new :class:`URL` with a single path segment that is a
sibling of this URL path.
Args:
segment (Text): A single path segment.
Returns:
URL: A copy of the current URL with the last path segment
replaced by *segment*. Special characters such as
``/?#`` will be percent encoded.
"""
_textcheck("path segment", segment)
new_path = tuple(self.path)[:-1] + (_encode_path_part(segment),)
return self.replace(path=new_path)
def click(self, href=u""):
# type: (Union[Text, URL]) -> URL
"""Resolve the given URL relative to this URL.
The resulting URI should match what a web browser would
generate if you visited the current URL and clicked on *href*.
>>> url = URL.from_text(u'http://blog.hatnote.com/')
>>> url.click(u'/post/155074058790').to_text()
u'http://blog.hatnote.com/post/155074058790'
>>> url = URL.from_text(u'http://localhost/a/b/c/')
>>> url.click(u'../d/./e').to_text()
u'http://localhost/a/b/d/e'
Args (Text):
href: A string representing a clicked URL.
Return:
A copy of the current URL with navigation logic applied.
For more information, see `RFC 3986 section 5`_.
.. _RFC 3986 section 5: https://tools.ietf.org/html/rfc3986#section-5
"""
if href:
if isinstance(href, URL):
clicked = href
else:
# TODO: This error message is not completely accurate,
# as URL objects are now also valid, but Twisted's
# test suite (wrongly) relies on this exact message.
_textcheck("relative URL", href)
clicked = URL.from_text(href)
if clicked.absolute:
return clicked
else:
clicked = self
query = clicked.query
if clicked.scheme and not clicked.rooted:
# Schemes with relative paths are not well-defined. RFC 3986 calls
# them a "loophole in prior specifications" that should be avoided,
# or supported only for backwards compatibility.
raise NotImplementedError(
"absolute URI with rootless path: %r" % (href,)
)
else:
if clicked.rooted:
path = clicked.path
elif clicked.path:
path = tuple(self.path)[:-1] + tuple(clicked.path)
else:
path = self.path
if not query:
query = self.query
return self.replace(
scheme=clicked.scheme or self.scheme,
host=clicked.host or self.host,
port=clicked.port or self.port,
path=_resolve_dot_segments(path),
query=query,
fragment=clicked.fragment,
)
def to_uri(self):
# type: () -> URL
u"""Make a new :class:`URL` instance with all non-ASCII characters
appropriately percent-encoded. This is useful to do in preparation
for sending a :class:`URL` over a network protocol.
For example::
>>> URL.from_text(u'https://ايران.com/foo⇧bar/').to_uri()
URL.from_text(u'https://xn--mgba3a4fra.com/foo%E2%87%A7bar/')
Returns:
URL: A new instance with its path segments, query parameters, and
hostname encoded, so that they are all in the standard
US-ASCII range.
"""
new_userinfo = u":".join(
[_encode_userinfo_part(p) for p in self.userinfo.split(":", 1)]
)
new_path = _encode_path_parts(
self.path, has_scheme=bool(self.scheme), rooted=False, maximal=True
)
new_host = (
self.host
if not self.host
else idna_encode(self.host, uts46=True).decode("ascii")
)
return self.replace(
userinfo=new_userinfo,
host=new_host,
path=new_path,
query=tuple(
[
(
_encode_query_key(k, maximal=True),
_encode_query_value(v, maximal=True)
if v is not None
else None,
)
for k, v in self.query
]
),
fragment=_encode_fragment_part(self.fragment, maximal=True),
)
def to_iri(self):
# type: () -> URL
u"""Make a new :class:`URL` instance with all but a few reserved
characters decoded into human-readable format.
Percent-encoded Unicode and IDNA-encoded hostnames are
decoded, like so::
>>> url = URL.from_text(u'https://xn--mgba3a4fra.example.com/foo%E2%87%A7bar/')
>>> print(url.to_iri().to_text())
https://ايران.example.com/foo⇧bar/
.. note::
As a general Python issue, "narrow" (UCS-2) builds of
Python may not be able to fully decode certain URLs, and
the in those cases, this method will return a best-effort,
partially-decoded, URL which is still valid. This issue
does not affect any Python builds 3.4+.
Returns:
URL: A new instance with its path segments, query parameters, and
hostname decoded for display purposes.
""" # noqa: E501
new_userinfo = u":".join(
[_decode_userinfo_part(p) for p in self.userinfo.split(":", 1)]
)
host_text = _decode_host(self.host)
return self.replace(
userinfo=new_userinfo,
host=host_text,
path=[_decode_path_part(segment) for segment in self.path],
query=tuple(
(
_decode_query_key(k),
_decode_query_value(v) if v is not None else None,
)
for k, v in self.query
),
fragment=_decode_fragment_part(self.fragment),
)
def to_text(self, with_password=False):
# type: (bool) -> Text
"""Render this URL to its textual representation.
By default, the URL text will *not* include a password, if one
is set. RFC 3986 considers using URLs to represent such
sensitive information as deprecated. Quoting from RFC 3986,
`section 3.2.1`:
"Applications should not render as clear text any data after the
first colon (":") character found within a userinfo subcomponent
unless the data after the colon is the empty string (indicating no
password)."
Args (bool):
with_password: Whether or not to include the password in the URL
text. Defaults to False.
Returns:
Text: The serialized textual representation of this URL, such as
``u"http://example.com/some/path?some=query"``.
The natural counterpart to :class:`URL.from_text()`.
.. _section 3.2.1: https://tools.ietf.org/html/rfc3986#section-3.2.1
"""
scheme = self.scheme
authority = self.authority(with_password)
path = "/".join(
_encode_path_parts(
self.path,
rooted=self.rooted,
has_scheme=bool(scheme),
has_authority=bool(authority),
maximal=False,
)
)
query_parts = []
for k, v in self.query:
if v is None:
query_parts.append(_encode_query_key(k, maximal=False))
else:
query_parts.append(
u"=".join(
(
_encode_query_key(k, maximal=False),
_encode_query_value(v, maximal=False),
)
)
)
query_string = u"&".join(query_parts)
fragment = self.fragment
parts = [] # type: List[Text]
_add = parts.append
if scheme:
_add(scheme)
_add(":")
if authority:
_add("//")
_add(authority)
elif scheme and path[:2] != "//" and self.uses_netloc:
_add("//")
if path:
if scheme and authority and path[:1] != "/":
_add("/") # relpaths with abs authorities auto get '/'
_add(path)
if query_string:
_add("?")
_add(query_string)
if fragment:
_add("#")
_add(fragment)
return u"".join(parts)
def __repr__(self):
# type: () -> str
"""Convert this URL to an representation that shows all of its
constituent parts, as well as being a valid argument to
:func:`eval`.
"""
return "%s.from_text(%r)" % (self.__class__.__name__, self.to_text())
def _to_bytes(self):
# type: () -> bytes
"""
Allows for direct usage of URL objects with libraries like
requests, which automatically stringify URL parameters. See
issue #49.
"""
return self.to_uri().to_text().encode("ascii")
if PY2:
__str__ = _to_bytes
__unicode__ = to_text
else:
__bytes__ = _to_bytes
__str__ = to_text
# # Begin Twisted Compat Code
asURI = to_uri
asIRI = to_iri
@classmethod
def fromText(cls, s):
# type: (Text) -> URL
return cls.from_text(s)
def asText(self, includeSecrets=False):
# type: (bool) -> Text
return self.to_text(with_password=includeSecrets)
def __dir__(self):
# type: () -> Sequence[Text]
try:
ret = object.__dir__(self)
except AttributeError:
# object.__dir__ == AttributeError # pdw for py2
ret = dir(self.__class__) + list(self.__dict__.keys())
ret = sorted(set(ret) - set(["fromText", "asURI", "asIRI", "asText"]))
return ret
# # End Twisted Compat Code
def add(self, name, value=None):
# type: (Text, Optional[Text]) -> URL
"""Make a new :class:`URL` instance with a given query argument,
*name*, added to it with the value *value*, like so::
>>> URL.from_text(u'https://example.com/?x=y').add(u'x')
URL.from_text(u'https://example.com/?x=y&x')
>>> URL.from_text(u'https://example.com/?x=y').add(u'x', u'z')
URL.from_text(u'https://example.com/?x=y&x=z')
Args:
name (Text): The name of the query parameter to add.
The part before the ``=``.
value (Optional[Text]): The value of the query parameter to add.
The part after the ``=``. Defaults to ``None``, meaning no
value.
Returns:
URL: A new :class:`URL` instance with the parameter added.
"""
return self.replace(query=self.query + ((name, value),))
def set(self, name, value=None):
# type: (Text, Optional[Text]) -> URL
"""Make a new :class:`URL` instance with the query parameter *name*
set to *value*. All existing occurences, if any are replaced
by the single name-value pair.
>>> URL.from_text(u'https://example.com/?x=y').set(u'x')
URL.from_text(u'https://example.com/?x')
>>> URL.from_text(u'https://example.com/?x=y').set(u'x', u'z')
URL.from_text(u'https://example.com/?x=z')
Args:
name (Text): The name of the query parameter to set.
The part before the ``=``.
value (Optional[Text]): The value of the query parameter to set.
The part after the ``=``. Defaults to ``None``, meaning no
value.
Returns:
URL: A new :class:`URL` instance with the parameter set.
"""
# Preserve the original position of the query key in the list
q = [(k, v) for (k, v) in self.query if k != name]
idx = next(
(i for (i, (k, v)) in enumerate(self.query) if k == name), -1
)
q[idx:idx] = [(name, value)]
return self.replace(query=q)
def get(self, name):
# type: (Text) -> List[Optional[Text]]
"""Get a list of values for the given query parameter, *name*::
>>> url = URL.from_text(u'?x=1&x=2')
>>> url.get('x')
[u'1', u'2']
>>> url.get('y')
[]
If the given *name* is not set, an empty list is returned. A
list is always returned, and this method raises no exceptions.
Args:
name (Text): The name of the query parameter to get.
Returns:
List[Optional[Text]]: A list of all the values associated with the
key, in string form.
"""
return [value for (key, value) in self.query if name == key]
def remove(
self,
name, # type: Text
value=_UNSET, # type: Text
limit=None, # type: Optional[int]
):
# type: (...) -> URL
"""Make a new :class:`URL` instance with occurrences of the query
parameter *name* removed, or, if *value* is set, parameters
matching *name* and *value*. No exception is raised if the
parameter is not already set.
Args:
name (Text): The name of the query parameter to remove.
value (Text): Optional value to additionally filter on.
Setting this removes query parameters which match both name
and value.
limit (Optional[int]): Optional maximum number of parameters to
remove.
Returns:
URL: A new :class:`URL` instance with the parameter removed.
"""
if limit is None:
if value is _UNSET:
nq = [(k, v) for (k, v) in self.query if k != name]
else:
nq = [
(k, v)
for (k, v) in self.query
if not (k == name and v == value)
]
else:
nq, removed_count = [], 0
for k, v in self.query:
if (
k == name
and (value is _UNSET or v == value)
and removed_count < limit
):
removed_count += 1 # drop it
else:
nq.append((k, v)) # keep it
return self.replace(query=nq)
EncodedURL = URL # An alias better describing what the URL really is
_EMPTY_URL = URL()
class DecodedURL(object):
"""
:class:`DecodedURL` is a type designed to act as a higher-level
interface to :class:`URL` and the recommended type for most
operations. By analogy, :class:`DecodedURL` is the
:class:`unicode` to URL's :class:`bytes`.
:class:`DecodedURL` automatically handles encoding and decoding
all its components, such that all inputs and outputs are in a
maximally-decoded state. Note that this means, for some special
cases, a URL may not "roundtrip" character-for-character, but this
is considered a good tradeoff for the safety of automatic
encoding.
Otherwise, :class:`DecodedURL` has almost exactly the same API as
:class:`URL`.
Where applicable, a UTF-8 encoding is presumed. Be advised that
some interactions can raise :exc:`UnicodeEncodeErrors` and
:exc:`UnicodeDecodeErrors`, just like when working with
bytestrings. Examples of such interactions include handling query
strings encoding binary data, and paths containing segments with
special characters encoded with codecs other than UTF-8.
Args:
url (URL): A :class:`URL` object to wrap.
lazy (bool): Set to True to avoid pre-decode all parts of the URL to
check for validity. Defaults to False.
.. note::
The :class:`DecodedURL` initializer takes a :class:`URL` object,
not URL components, like :class:`URL`. To programmatically
construct a :class:`DecodedURL`, you can use this pattern:
>>> print(DecodedURL().replace(scheme=u'https',
... host=u'pypi.org', path=(u'projects', u'hyperlink')).to_text())
https://pypi.org/projects/hyperlink
.. versionadded:: 18.0.0
"""
def __init__(self, url=_EMPTY_URL, lazy=False):
# type: (URL, bool) -> None
self._url = url
if not lazy:
# cache the following, while triggering any decoding
# issues with decodable fields
self.host, self.userinfo, self.path, self.query, self.fragment
return
@classmethod
def from_text(cls, text, lazy=False):
# type: (Text, bool) -> DecodedURL
"""\
Make a `DecodedURL` instance from any text string containing a URL.
Args:
text (Text): Text containing the URL
lazy (bool): Whether to pre-decode all parts of the URL to check for
validity. Defaults to True.
"""
_url = URL.from_text(text)
return cls(_url, lazy=lazy)
@property
def encoded_url(self):
# type: () -> URL
"""Access the underlying :class:`URL` object, which has any special
characters encoded.
"""
return self._url
def to_text(self, with_password=False):
# type: (bool) -> Text
"Passthrough to :meth:`~hyperlink.URL.to_text()`"
return self._url.to_text(with_password)
def to_uri(self):
# type: () -> URL
"Passthrough to :meth:`~hyperlink.URL.to_uri()`"
return self._url.to_uri()
def to_iri(self):
# type: () -> URL
"Passthrough to :meth:`~hyperlink.URL.to_iri()`"
return self._url.to_iri()
def click(self, href=u""):
# type: (Union[Text, URL, DecodedURL]) -> DecodedURL
"""Return a new DecodedURL wrapping the result of
:meth:`~hyperlink.URL.click()`
"""
if isinstance(href, DecodedURL):
href = href._url
return self.__class__(self._url.click(href=href))
def sibling(self, segment):
# type: (Text) -> DecodedURL
"""Automatically encode any reserved characters in *segment* and
return a new `DecodedURL` wrapping the result of
:meth:`~hyperlink.URL.sibling()`
"""
return self.__class__(self._url.sibling(_encode_reserved(segment)))
def child(self, *segments):
# type: (Text) -> DecodedURL
"""Automatically encode any reserved characters in *segments* and
return a new `DecodedURL` wrapping the result of
:meth:`~hyperlink.URL.child()`.
"""
if not segments:
return self
new_segs = [_encode_reserved(s) for s in segments]
return self.__class__(self._url.child(*new_segs))
def normalize(
self,
scheme=True,
host=True,
path=True,
query=True,
fragment=True,
userinfo=True,
percents=True,
):
# type: (bool, bool, bool, bool, bool, bool, bool) -> DecodedURL
"""Return a new `DecodedURL` wrapping the result of
:meth:`~hyperlink.URL.normalize()`
"""
return self.__class__(
self._url.normalize(
scheme, host, path, query, fragment, userinfo, percents
)
)
@property
def absolute(self):
# type: () -> bool
return self._url.absolute
@property
def scheme(self):
# type: () -> Text
return self._url.scheme
@property
def host(self):
# type: () -> Text
return _decode_host(self._url.host)
@property
def port(self):
# type: () -> Optional[int]
return self._url.port
@property
def rooted(self):
# type: () -> bool
return self._url.rooted
@property
def path(self):
# type: () -> Sequence[Text]
if not hasattr(self, "_path"):
self._path = tuple(
[
_percent_decode(p, raise_subencoding_exc=True)
for p in self._url.path
]
)
return self._path
@property
def query(self):
# type: () -> QueryPairs
if not hasattr(self, "_query"):
self._query = cast(
QueryPairs,
tuple(
tuple(
_percent_decode(x, raise_subencoding_exc=True)
if x is not None
else None
for x in (k, v)
)
for k, v in self._url.query
),
)
return self._query
@property
def fragment(self):
# type: () -> Text
if not hasattr(self, "_fragment"):
frag = self._url.fragment
self._fragment = _percent_decode(frag, raise_subencoding_exc=True)
return self._fragment
@property
def userinfo(self):
# type: () -> Union[Tuple[str], Tuple[str, str]]
if not hasattr(self, "_userinfo"):
self._userinfo = cast(
Union[Tuple[str], Tuple[str, str]],
tuple(
tuple(
_percent_decode(p, raise_subencoding_exc=True)
for p in self._url.userinfo.split(":", 1)
)
),
)
return self._userinfo
@property
def user(self):
# type: () -> Text
return self.userinfo[0]
@property
def uses_netloc(self):
# type: () -> Optional[bool]
return self._url.uses_netloc
def replace(
self,
scheme=_UNSET, # type: Optional[Text]
host=_UNSET, # type: Optional[Text]
path=_UNSET, # type: Iterable[Text]
query=_UNSET, # type: QueryParameters
fragment=_UNSET, # type: Text
port=_UNSET, # type: Optional[int]
rooted=_UNSET, # type: Optional[bool]
userinfo=_UNSET, # type: Union[Tuple[str], Tuple[str, str]]
uses_netloc=_UNSET, # type: Optional[bool]
):
# type: (...) -> DecodedURL
"""While the signature is the same, this `replace()` differs a little
from URL.replace. For instance, it accepts userinfo as a
tuple, not as a string, handling the case of having a username
containing a `:`. As with the rest of the methods on
DecodedURL, if you pass a reserved character, it will be
automatically encoded instead of an error being raised.
"""
if path is not _UNSET:
path = tuple(_encode_reserved(p) for p in path)
if query is not _UNSET:
query = cast(
QueryPairs,
tuple(
tuple(
_encode_reserved(x) if x is not None else None
for x in (k, v)
)
for k, v in iter_pairs(query)
),
)
if userinfo is not _UNSET:
if len(userinfo) > 2:
raise ValueError(
'userinfo expected sequence of ["user"] or'
' ["user", "password"], got %r' % (userinfo,)
)
userinfo_text = u":".join([_encode_reserved(p) for p in userinfo])
else:
userinfo_text = _UNSET
new_url = self._url.replace(
scheme=scheme,
host=host,
path=path,
query=query,
fragment=fragment,
port=port,
rooted=rooted,
userinfo=userinfo_text,
uses_netloc=uses_netloc,
)
return self.__class__(url=new_url)
def get(self, name):
# type: (Text) -> List[Optional[Text]]
"Get the value of all query parameters whose name matches *name*"
return [v for (k, v) in self.query if name == k]
def add(self, name, value=None):
# type: (Text, Optional[Text]) -> DecodedURL
"""Return a new DecodedURL with the query parameter *name* and *value*
added."""
return self.replace(query=self.query + ((name, value),))
def set(self, name, value=None):
# type: (Text, Optional[Text]) -> DecodedURL
"Return a new DecodedURL with query parameter *name* set to *value*"
query = self.query
q = [(k, v) for (k, v) in query if k != name]
idx = next((i for (i, (k, v)) in enumerate(query) if k == name), -1)
q[idx:idx] = [(name, value)]
return self.replace(query=q)
def remove(
self,
name, # type: Text
value=_UNSET, # type: Text
limit=None, # type: Optional[int]
):
# type: (...) -> DecodedURL
"""Return a new DecodedURL with query parameter *name* removed.
Optionally also filter for *value*, as well as cap the number
of parameters removed with *limit*.
"""
if limit is None:
if value is _UNSET:
nq = [(k, v) for (k, v) in self.query if k != name]
else:
nq = [
(k, v)
for (k, v) in self.query
if not (k == name and v == value)
]
else:
nq, removed_count = [], 0
for k, v in self.query:
if (
k == name
and (value is _UNSET or v == value)
and removed_count < limit
):
removed_count += 1 # drop it
else:
nq.append((k, v)) # keep it
return self.replace(query=nq)
def __repr__(self):
# type: () -> str
cn = self.__class__.__name__
return "%s(url=%r)" % (cn, self._url)
def __str__(self):
# type: () -> str
# TODO: the underlying URL's __str__ needs to change to make
# this work as the URL, see #55
return str(self._url)
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, self.__class__):
return NotImplemented
return self.normalize().to_uri() == other.normalize().to_uri()
def __ne__(self, other):
# type: (Any) -> bool
if not isinstance(other, self.__class__):
return NotImplemented
return not self.__eq__(other)
def __hash__(self):
# type: () -> int
return hash(
(
self.__class__,
self.scheme,
self.userinfo,
self.host,
self.path,
self.query,
self.fragment,
self.port,
self.rooted,
self.uses_netloc,
)
)
# # Begin Twisted Compat Code
asURI = to_uri
asIRI = to_iri
@classmethod
def fromText(cls, s, lazy=False):
# type: (Text, bool) -> DecodedURL
return cls.from_text(s, lazy=lazy)
def asText(self, includeSecrets=False):
# type: (bool) -> Text
return self.to_text(with_password=includeSecrets)
def __dir__(self):
# type: () -> Sequence[Text]
try:
ret = object.__dir__(self)
except AttributeError:
# object.__dir__ == AttributeError # pdw for py2
ret = dir(self.__class__) + list(self.__dict__.keys())
ret = sorted(set(ret) - set(["fromText", "asURI", "asIRI", "asText"]))
return ret
# # End Twisted Compat Code
def parse(url, decoded=True, lazy=False):
# type: (Text, bool, bool) -> Union[URL, DecodedURL]
"""
Automatically turn text into a structured URL object.
>>> url = parse(u"https://github.com/python-hyper/hyperlink")
>>> print(url.to_text())
https://github.com/python-hyper/hyperlink
Args:
url (str): A text string representation of a URL.
decoded (bool): Whether or not to return a :class:`DecodedURL`,
which automatically handles all
encoding/decoding/quoting/unquoting for all the various
accessors of parts of the URL, or a :class:`URL`,
which has the same API, but requires handling of special
characters for different parts of the URL.
lazy (bool): In the case of `decoded=True`, this controls
whether the URL is decoded immediately or as accessed. The
default, `lazy=False`, checks all encoded parts of the URL
for decodability.
.. versionadded:: 18.0.0
"""
enc_url = EncodedURL.from_text(url)
if not decoded:
return enc_url
dec_url = DecodedURL(enc_url, lazy=lazy)
return dec_url
|
[
"thuy4tbn99@gmail.com"
] |
thuy4tbn99@gmail.com
|
a6f1ee88d3be933a37a62eb2d070d8032201a492
|
24f664aa2344d4f5d5e7b048ac4e85231715c4c8
|
/datasets/me_db/tests/integration_tests/conftest.py
|
205f1f009297ae600813291058172b6b97e9018b
|
[
"MIT"
] |
permissive
|
speycode/clfuzz
|
79320655e879d1e0a06a481e8ec2e293c7c10db7
|
f2a96cf84a7971f70cb982c07b84207db407b3eb
|
refs/heads/master
| 2020-12-05T13:44:55.486419
| 2020-01-03T14:14:03
| 2020-01-03T14:15:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,358
|
py
|
# Copyright 2018, 2019 Chris Cummins <chrisc.101@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pytest fixtures for me.db tests."""
import tempfile
from datasets.me_db import me_db
from labm8.py import app
from labm8.py import bazelutil
from labm8.py import test
FLAGS = app.FLAGS
app.DEFINE_string(
"integration_tests_inbox",
None,
"If set, this sets the inbox path to be used by the "
"integration tests. This overrides the default in "
"//datasets/me_db/integration_tests/inbox.",
)
TEST_INBOX_PATH = bazelutil.DataPath("phd/datasets/me_db/tests/test_inbox")
@test.Fixture(scope="function")
def mutable_db() -> me_db.Database:
"""Returns a populated database for the scope of the function."""
with tempfile.TemporaryDirectory(prefix="phd_") as d:
db = me_db.Database(f"sqlite:///{d}/me.db")
db.ImportMeasurementsFromInboxImporters(TEST_INBOX_PATH)
yield db
@test.Fixture(scope="session")
def db() -> me_db.Database:
"""Returns a populated database that is reused for all tests.
DO NOT MODIFY THE TEST DATABASE. This will break other tests. For a test that
modifies the database, use the `mutable_db` fixture.
"""
with tempfile.TemporaryDirectory(prefix="phd_") as d:
db = me_db.Database(f"sqlite:///{d}/me.db")
db.ImportMeasurementsFromInboxImporters(TEST_INBOX_PATH)
yield db
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
7def0c19f5d2bf3212b2a2cf63953f42fc2debb6
|
2fb7188adb6f52023485d8a775e4ecbf96331b81
|
/PixelGeometry/calculateRMS.py
|
eaba86ec104f0376ab2dd280fa35e239332310ec
|
[] |
no_license
|
amassiro/dEdxCalibration
|
f5c580247041b4477a118772bbda9b924ae1103a
|
837eb440711983ddd0a283ec0cf3582c4a454d24
|
refs/heads/master
| 2020-03-24T03:31:45.099672
| 2019-11-08T13:09:40
| 2019-11-08T13:09:40
| 142,422,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,185
|
py
|
import os
# example of file name:
# plots_run/cc_add_layer_3_eta_5_BPIX.root
list_files = []
for root, dirs, files in os.walk("plots_run/"):
for filename in files:
if "cc_add_" in filename and ".root" in filename :
#print(filename)
list_files.append(filename)
#print "list_files = " , list_files
eta_edges = {}
eta_edges[0] = [0.0, 0.3]
eta_edges[1] = [0.3, 0.6]
eta_edges[2] = [0.6, 1.0]
eta_edges[3] = [1.0, 1.3]
eta_edges[4] = [1.3, 1.6]
eta_edges[5] = [1.6, 2.1]
eta_edges[6] = [2.1, 2.5]
print " writing results into : ", "smear_for_cmssw.txt"
file_out = open("smear_for_cmssw.txt","w")
file_out.write("# " + "\n")
file_out.write("# pix layerorside etaMin etaMax value iedge " + "\n")
file_out.write("# " + "\n")
import ROOT
for name_file in list_files:
f = ROOT.TFile.Open("plots_run/" + name_file)
canvas_name = name_file.replace('.root', '')
canvas = f.Get(canvas_name)
#print " histograms = ", canvas.GetListOfPrimitives()
histograms = canvas.GetListOfPrimitives()
rms_mc = 0.0
rms_data = 0.0
scale_rms = -1
ilayer = -1
iedge = -1
isBPIX = -1
for histo in histograms:
#print histo.GetName()
if "_mc" in histo.GetName():
# this is the MC histogram
if histo.GetEntries() > 10:
rms_mc = histo.GetRMS()
if "_data" in histo.GetName():
if histo.GetEntries() > 10:
rms_data = histo.GetRMS()
# example name histogram: h_ilayer_1__iEdge_2__dedxById_BPIX_data
name_histo = histo.GetName()
" ilayer_1 --> 1"
position_layer = name_histo.find("ilayer")
if position_layer != -1 :
#print " name_histo = ", name_histo, " --> position_layer = ", position_layer
ilayer = name_histo[position_layer+7]
#print " name_histo = ", name_histo, " --> ", ilayer
position_edge = name_histo.find("iEdge")
if position_edge != -1 :
iedge = name_histo[position_edge+6]
isBPIX = name_histo.find("BPIX")
if isBPIX == -1 :
isBPIX = 2 # FPIX = 2 !
else :
isBPIX = 1
# pix = 1 (BPIX), 2 (FPIX)
if rms_mc != 0 and rms_data != 0:
scale_rms = rms_data/rms_mc - 1.0
print " file = ", name_file, " name_histo = ", name_histo, " --> data = ", rms_data, " ; mc = ", rms_mc
#print " name_histo = ", name_histo, " --> ", ilayer, " ; ", iedge, " : ", isBPIX
#print " ---> " + " " + str(isBPIX) + " " + str(ilayer) + " " + str(eta_edges[iedge][0]) + " " + str(eta_edges[iedge][1]) + " " + str(scale_rms) + " " + str(iedge) + "\n"
#print " ---> iedge = ", iedge, " => ", eta_edges[iedge]
#print " iedge = ", iedge
#print eta_edges[int(iedge)][1]
file_out.write(" " + str(isBPIX) + " " + str(ilayer) + " " + str(eta_edges[int(iedge)][0]) + " " + str(eta_edges[int(iedge)][1]) + " " + str(scale_rms) + " " + str(iedge) + "\n")
#print eta_edges
#print eta_edges[0]
#print eta_edges[0][1]
#print str(eta_edges[0][1])
# pix layerorside etaMin etaMax value
#
# 1 1 0.000 0.300 0.01
#
file_out.close()
|
[
"massironi.andrea@gmail.com"
] |
massironi.andrea@gmail.com
|
0ad22ff0946074d30f09ea1f71b87a7bc97ec4db
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/8/u9a.py
|
2ac685ce8df552fd9ee52ecdb37a8d04c6f14181
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u9a':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
77976fb5e42151e942abbc439cf84f1d10eb52d3
|
066ee4df594a5dc90335d271b9d5a1b1e2a4d34c
|
/y/google-cloud-sdk/platform/google_appengine/lib/docker/tests/fake_api.py
|
861c8b35104b0f17514fbb2acea7c40a4e1866dd
|
[
"LGPL-2.1-or-later",
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ychen820/microblog
|
a2d82447525325ec58285c2e5db58b79cceaca1b
|
d379afa2db3582d5c3be652165f0e9e2e0c154c6
|
refs/heads/master
| 2021-01-20T05:58:48.424357
| 2015-04-28T22:03:09
| 2015-04-28T22:03:09
| 32,948,331
| 0
| 2
|
BSD-3-Clause
| 2020-07-25T05:04:35
| 2015-03-26T19:45:07
|
Python
|
UTF-8
|
Python
| false
| false
| 10,155
|
py
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CURRENT_VERSION = 'v1.12'
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_version():
status_code = 200
response = {'GoVersion': '1', 'Version': '1.1.1',
'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container():
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Privileged': True},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
"State": {
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'id': FAKE_IMAGE_ID,
'parent': "27cf784147099545",
'created': "2013-03-23T22:24:18.818426-07:00",
'container': FAKE_CONTAINER_ID,
'container_config':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_port():
status_code = 200
response = {
'HostConfig': {
'Binds': None,
'ContainerIDFile': '',
'Links': None,
'LxcConf': None,
'PortBindings': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None
},
'Privileged': False,
'PublishAllPorts': False
},
'NetworkSettings': {
'Bridge': 'docker0',
'PortMapping': None,
'Ports': {
'1111': None,
'1111/tcp': [{'HostIp': '127.0.0.1', 'HostPort': '4567'}],
'2222': None}
}
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
# Maps real api url to fake response callback
prefix = 'http+unix://var/run/docker.sock'
fake_responses = {
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
get_fake_images,
'{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
get_fake_image_history,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_import_image,
'{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
get_fake_containers,
'{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_start_container,
'{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
post_fake_resize_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_container,
'{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
post_fake_tag_image,
'{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
get_fake_wait,
'{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
get_fake_logs,
'{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
get_fake_diff,
'{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
get_fake_export,
'{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
post_fake_stop_container,
'{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
post_fake_kill_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_port,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
delete_fake_remove_container,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_image_create,
'{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
delete_fake_remove_image,
'{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
get_fake_get_image,
'{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
post_fake_load_image,
'{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_image,
'{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
get_fake_insert_image,
'{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
post_fake_push,
'{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
post_fake_commit,
'{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
post_fake_create_container,
'{1}/{0}/build'.format(CURRENT_VERSION, prefix):
post_fake_build_container
}
|
[
"ychen207@binghamton.edu"
] |
ychen207@binghamton.edu
|
7a567594c1e7adc2858272c848dcbaa3f69ad25b
|
a9c3db07c29a46baf4f88afe555564ed0d8dbf2e
|
/src/1018-largest-perimeter-triangle/largest-perimeter-triangle.py
|
0a77f6046ef72192ef87f5b340d5bb1cb593a6eb
|
[] |
no_license
|
HLNN/leetcode
|
86d2f5b390be9edfceadd55f68d94c78bc8b7644
|
35010d67341e6038ae4ddffb4beba4a9dba05d2a
|
refs/heads/master
| 2023-03-13T16:44:58.901326
| 2023-03-03T00:01:05
| 2023-03-03T00:01:05
| 165,402,662
| 6
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
# Given an integer array nums, return the largest perimeter of a triangle with a non-zero area, formed from three of these lengths. If it is impossible to form any triangle of a non-zero area, return 0.
#
#
# Example 1:
#
#
# Input: nums = [2,1,2]
# Output: 5
# Explanation: You can form a triangle with three side lengths: 1, 2, and 2.
#
#
# Example 2:
#
#
# Input: nums = [1,2,1,10]
# Output: 0
# Explanation:
# You cannot use the side lengths 1, 1, and 2 to form a triangle.
# You cannot use the side lengths 1, 1, and 10 to form a triangle.
# You cannot use the side lengths 1, 2, and 10 to form a triangle.
# As we cannot use any three side lengths to form a triangle of non-zero area, we return 0.
#
#
#
# Constraints:
#
#
# 3 <= nums.length <= 104
# 1 <= nums[i] <= 106
#
#
class Solution:
def largestPerimeter(self, nums: List[int]) -> int:
nums.sort()
for i in range(len(nums) - 3, -1, -1):
if nums[i] + nums[i + 1] > nums[i + 2]:
return nums[i] + nums[i + 1] + nums[i + 2]
return 0
|
[
"Huangln555@gmail.com"
] |
Huangln555@gmail.com
|
a40ca002ab664eb7d143fa43e4065c311d103456
|
8c7be58d2ddb6d1d10b93cb1139d2376be9fd07d
|
/naive_chatbot/predictor_pb2.py
|
c5bb06e1718c3cad08cebb4bfb529397d2cb7a66
|
[] |
no_license
|
zhouziqunzzq/MiraiGo-DD-naive-chatbot
|
c47e630cf38dd1b67ec4ccda2769a5db4a49608c
|
a69143c306a7e91e9262cec82ea1a8af8afecb45
|
refs/heads/main
| 2023-04-27T19:43:57.272470
| 2021-05-16T23:40:53
| 2021-05-16T23:40:53
| 343,130,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 7,834
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: predictor.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='predictor.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0fpredictor.proto\"o\n\x0ePredictRequest\x12\x0b\n\x03msg\x18\x01 \x02(\t\x12\x17\n\x0cn_prediction\x18\x02 \x01(\x03:\x01\x35\x12 \n\x13time_offset_seconds\x18\x03 \x01(\x03:\x03\x33\x30\x30\x12\x15\n\nsim_cutoff\x18\x04 \x01(\x02:\x01\x30\"l\n\x0cPredictReply\x12.\n\x06result\x18\x01 \x03(\x0b\x32\x1e.PredictReply.PredictReplyElem\x1a,\n\x10PredictReplyElem\x12\x0b\n\x03msg\x18\x01 \x02(\t\x12\x0b\n\x03sim\x18\x02 \x02(\x02\x32?\n\rChatPredictor\x12.\n\nPredictOne\x12\x0f.PredictRequest\x1a\r.PredictReply\"\x00'
)
_PREDICTREQUEST = _descriptor.Descriptor(
name='PredictRequest',
full_name='PredictRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='PredictRequest.msg', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='n_prediction', full_name='PredictRequest.n_prediction', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_offset_seconds', full_name='PredictRequest.time_offset_seconds', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=True, default_value=300,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sim_cutoff', full_name='PredictRequest.sim_cutoff', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=130,
)
_PREDICTREPLY_PREDICTREPLYELEM = _descriptor.Descriptor(
name='PredictReplyElem',
full_name='PredictReply.PredictReplyElem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='PredictReply.PredictReplyElem.msg', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sim', full_name='PredictReply.PredictReplyElem.sim', index=1,
number=2, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=240,
)
_PREDICTREPLY = _descriptor.Descriptor(
name='PredictReply',
full_name='PredictReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='PredictReply.result', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PREDICTREPLY_PREDICTREPLYELEM, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=240,
)
_PREDICTREPLY_PREDICTREPLYELEM.containing_type = _PREDICTREPLY
_PREDICTREPLY.fields_by_name['result'].message_type = _PREDICTREPLY_PREDICTREPLYELEM
DESCRIPTOR.message_types_by_name['PredictRequest'] = _PREDICTREQUEST
DESCRIPTOR.message_types_by_name['PredictReply'] = _PREDICTREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PredictRequest = _reflection.GeneratedProtocolMessageType('PredictRequest', (_message.Message,), {
'DESCRIPTOR': _PREDICTREQUEST,
'__module__': 'predictor_pb2'
# @@protoc_insertion_point(class_scope:PredictRequest)
})
_sym_db.RegisterMessage(PredictRequest)
PredictReply = _reflection.GeneratedProtocolMessageType('PredictReply', (_message.Message,), {
'PredictReplyElem': _reflection.GeneratedProtocolMessageType('PredictReplyElem', (_message.Message,), {
'DESCRIPTOR': _PREDICTREPLY_PREDICTREPLYELEM,
'__module__': 'predictor_pb2'
# @@protoc_insertion_point(class_scope:PredictReply.PredictReplyElem)
})
,
'DESCRIPTOR': _PREDICTREPLY,
'__module__': 'predictor_pb2'
# @@protoc_insertion_point(class_scope:PredictReply)
})
_sym_db.RegisterMessage(PredictReply)
_sym_db.RegisterMessage(PredictReply.PredictReplyElem)
_CHATPREDICTOR = _descriptor.ServiceDescriptor(
name='ChatPredictor',
full_name='ChatPredictor',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=242,
serialized_end=305,
methods=[
_descriptor.MethodDescriptor(
name='PredictOne',
full_name='ChatPredictor.PredictOne',
index=0,
containing_service=None,
input_type=_PREDICTREQUEST,
output_type=_PREDICTREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CHATPREDICTOR)
DESCRIPTOR.services_by_name['ChatPredictor'] = _CHATPREDICTOR
# @@protoc_insertion_point(module_scope)
|
[
"zhouziqun@cool2645.com"
] |
zhouziqun@cool2645.com
|
6ec8eb0c7925f1935b91bc24f5fc9ba37f80f94e
|
3a4549470cb0e6e55c98522ba08ce629d60960ea
|
/froide/foirequest/migrations/0014_auto_20180111_0738.py
|
6ac2c84671c22abd5063ab69c08f7dae915f3e1f
|
[
"MIT"
] |
permissive
|
lanmarc77/froide
|
4e28d3e33017b3e776a7eb13d63c7b71bdb3bc68
|
bddc8bb27c8a7c2a959003dda724194948bc381a
|
refs/heads/main
| 2023-03-17T03:02:01.277465
| 2021-03-06T16:37:26
| 2021-03-06T16:37:26
| 345,137,125
| 0
| 0
|
MIT
| 2021-03-06T16:13:09
| 2021-03-06T16:13:09
| null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-11 06:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0001_initial'),
('foirequest', '0013_auto_20171220_1718'),
]
operations = [
migrations.AddField(
model_name='foiproject',
name='team',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='team.Team', verbose_name='Team'),
),
migrations.AlterField(
model_name='foiproject',
name='last_update',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
a9f89df989abb980a1262104c411c0d54f5ed4f1
|
f75609812d20d46a9f94ee0cfdb91c321d26b63d
|
/_python/python_OOP/ReverseList.py
|
bce461e45a9c3d243043de9f60d6131bff3880a7
|
[] |
no_license
|
IanAranha/Python2021
|
eff47a20451f61b144b17f48321a7b06308aadca
|
d9769b8b387b77753b77f6efe3a9a270a1f158d3
|
refs/heads/main
| 2023-04-02T08:20:24.382913
| 2021-04-10T22:27:10
| 2021-04-10T22:27:10
| 345,918,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
def reverseList(list):
mid = int(len(list)/2)
for i in range(0, mid):
print("Swapping", list[i], list[len(list)-1-i])
list[i], list[len(list)-1-i] = list[len(list)-1-i], list[i]
return list
|
[
"ianorama@gmail.com"
] |
ianorama@gmail.com
|
9a1921c0fab1929c7b32e8be96af2137df25ab32
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_014/ch21_2020_03_04_16_45_58_943718.py
|
00d24fdcc27af6720298b24527260aeea05098a5
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
a = input ('Quantos dias?')
b = input ('Quantas horas?')
c = input ('Quantos minutos?')
d = input ('Quantos segundos?')
print(a,b,c,d)
|
[
"you@example.com"
] |
you@example.com
|
271dec9f5adcad66f6210b0577ceeb4e167480f9
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/jakub/xdomain/examples/2D_sep_f.py
|
1084bb4e78de58c3c36540538e88662be0ba3cde
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
'''
Created on Sep 23, 2009
@author: jakub
'''
if __name__ == '__main__':
from ibvpy.api import FEDomain, FERefinementGrid, FEGrid, TStepper as TS,\
BCDofGroup,BCDof, RTraceDomainListField
from ibvpy.core.tloop import TLoop, TLine
from ibvpy.mesh.xfe_subdomain import XFESubDomain
from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic
from ibvpy.fets.fets2D.fets2D4q import FETS2D4Q
from ibvpy.fets.fets2D.fets2D4q8u import FETS2D4Q8U
from ibvpy.fets.fets2D.fets2D4q9u import FETS2D4Q9U
from ibvpy.fets.fets2D.fets2D9q import FETS2D9Q
from ibvpy.fets.fets_ls.fets_crack import FETSCrack
fets_eval = FETS2D4Q(mats_eval = MATS2DElastic(E= 1.,nu=0.))
xfets_eval = FETSCrack( parent_fets = fets_eval, int_order = 5 )
# Discretization
fe_domain = FEDomain()
fe_level1 = FERefinementGrid( domain = fe_domain, fets_eval = fets_eval )
fe_grid1 = FEGrid( coord_max = (1.,1.,0.),
shape = (1,1),
fets_eval = fets_eval,
level = fe_level1 )
# fe_grid1.deactivate( (1,0) )
# fe_grid1.deactivate( (1,1) )
fe_xdomain = XFESubDomain( domain = fe_domain,
fets_eval = xfets_eval,
#fe_grid_idx_slice = fe_grid1[1,0],
fe_grid_slice = fe_grid1['X - 0.5 '] )
ts = TS( dof_resultants = True,
sdomain = fe_domain,
bcond_list = [BCDofGroup(var='u', value = 0., dims = [0,1],
get_dof_method = fe_grid1.get_left_dofs ),
BCDofGroup(var='u', value = 0., dims = [0],
get_dof_method = fe_grid1.get_right_dofs ),
BCDofGroup(var='u', value = 0., dims = [1],
get_dof_method = fe_grid1.get_bottom_right_dofs ),
BCDofGroup(var='f', value = -.2, dims = [1],
get_dof_method = fe_grid1.get_top_right_dofs ),
BCDof(var='u', value = 0.,
dof = 9 ),
BCDof(var='f', value = -0.4/3,
dof = 11 ),
],
rtrace_list = [
# RTraceGraph(name = 'Fi,right over u_right (iteration)' ,
# var_y = 'F_int', idx_y = 0,
# var_x = 'U_k', idx_x = 1),
RTraceDomainListField(name = 'Stress' ,
var = 'sig_app', idx = 0, warp = True ),
RTraceDomainListField(name = 'Displacement' ,
var = 'u', idx = 0,
warp = True),
# RTraceDomainField(name = 'N0' ,
# var = 'N_mtx', idx = 0,
# record_on = 'update')
]
)
#
# # Add the time-loop control
tloop = TLoop( tstepper = ts,
# tolerance = 1e-4, KMAX = 4,
# debug = True, RESETMAX = 2,
tline = TLine( min = 0.0, step = 1., max = 1.0 ))
#print "elements ",fe_xdomain.elements[0]
fe_xdomain.deactivate_sliced_elems()
print 'parent elems ',fe_xdomain.fe_grid_slice.elems
print 'parent dofs ',fe_xdomain.fe_grid_slice.dofs
print "dofmap ",fe_xdomain.elem_dof_map
print "ls_values ", fe_xdomain.dots.dof_node_ls_values
print 'intersection points ',fe_xdomain.fe_grid_slice.r_i
print "triangles ", fe_xdomain.dots.rt_triangles
print "vtk points ", fe_xdomain.dots.vtk_X
print "vtk data ", fe_xdomain.dots.get_vtk_cell_data('blabla',0,0)
print 'ip_triangles', fe_xdomain.dots.int_division
print 'ip_coords', fe_xdomain.dots.ip_coords
print 'ip_weigths', fe_xdomain.dots.ip_weights
print 'ip_offset', fe_xdomain.dots.ip_offset
print 'ip_X_coords', fe_xdomain.dots.ip_X
print 'ip_ls', fe_xdomain.dots.ip_ls_values
print 'vtk_ls', fe_xdomain.dots.vtk_ls_values
print 'J_det ',fe_xdomain.dots.J_det_grid
print tloop.eval()
# #ts.setup()
from ibvpy.plugins.ibvpy_app import IBVPyApp
ibvpy_app = IBVPyApp( ibv_resource = ts )
ibvpy_app.main()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
cff565e39aba802e39091d4bd9ce442f20ad1c56
|
87ad372898e793faf1ad89f4bb3b6e84a8002131
|
/tests/unit/FundManager/test_set_next_time_lock.py
|
8ce922c052d511d6829b68fa62421a68b8fab166
|
[] |
no_license
|
atsignhandle/unagii-vault-v2
|
6a9a96c11d34257bc3fdae57455ec3b2f9c0029a
|
548f715f34329eb5abebffe40acbeb56a31cb6f3
|
refs/heads/main
| 2023-08-27T00:59:48.080152
| 2021-09-28T02:47:36
| 2021-09-28T02:47:36
| 413,448,825
| 0
| 0
| null | 2021-10-04T14:07:37
| 2021-10-04T14:07:36
| null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import brownie
import pytest
def test_set_next_time_lock(fundManager, user):
timeLock = fundManager.timeLock()
# not time lock
with brownie.reverts("!time lock"):
fundManager.setNextTimeLock(user, {"from": user})
tx = fundManager.setNextTimeLock(user, {"from": timeLock})
assert fundManager.nextTimeLock() == user
assert tx.events["SetNextTimeLock"].values() == [user]
|
[
"tsk.nakamura@gmail.com"
] |
tsk.nakamura@gmail.com
|
152a0575c29496fc966528cfe473fc4a02bb05aa
|
a4364a0ee3dd9aa057c049771319c628b88b1b8d
|
/week5/file.py
|
90250ed1abed55b0f11a3670aecbae07c9106e37
|
[] |
no_license
|
lsh931125/altole
|
22e69c98af2fd673fa2dc40d58b64a6ebc0ce577
|
15afc3d511ed5be56ea091e9842408b362ad79e3
|
refs/heads/main
| 2023-04-10T16:15:14.865549
| 2021-04-12T14:13:59
| 2021-04-12T14:13:59
| 336,302,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# writedata.py
f = open("새파일.txt", 'w',encoding='utf-8')
for i in range(1, 11):
data = "%d번째 줄입니다.\n" % i
f.write(data)
f.close()
|
[
"you@example.com"
] |
you@example.com
|
88cf61b2fac3023bb2a72c2fb2afef1483328050
|
29da2ca6def1270be13a3096685a8e5d82828dff
|
/CIM15/IEC61970/Informative/InfWork/WorkStatusEntry.py
|
e3719f0ca36369573702570e2383844ff6dcbd92
|
[
"MIT"
] |
permissive
|
rimbendhaou/PyCIM
|
75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3
|
d578bb0bf1af344342bd23344385ed9c06c2d0ee
|
refs/heads/master
| 2022-04-28T01:16:12.673867
| 2020-04-16T02:19:09
| 2020-04-16T02:19:09
| 256,085,381
| 0
| 0
|
MIT
| 2020-04-16T02:15:20
| 2020-04-16T02:08:14
| null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.ActivityRecord import ActivityRecord
class WorkStatusEntry(ActivityRecord):
"""A type of ActivityRecord that records information about the status of an item, such as a Work or WorkTask, at a point in time.A type of ActivityRecord that records information about the status of an item, such as a Work or WorkTask, at a point in time.
"""
def __init__(self, percentComplete=0.0, *args, **kw_args):
"""Initialises a new 'WorkStatusEntry' instance.
@param percentComplete: Estimated percentage of completion of this individual work task or overall work order.
"""
#: Estimated percentage of completion of this individual work task or overall work order.
self.percentComplete = percentComplete
super(WorkStatusEntry, self).__init__(*args, **kw_args)
_attrs = ["percentComplete"]
_attr_types = {"percentComplete": float}
_defaults = {"percentComplete": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
[
"rwl@thinker.cable.virginmedia.net"
] |
rwl@thinker.cable.virginmedia.net
|
a65c657ece078bdf1546ffad0971554575ffe0ec
|
3d947e8502aa677398f4f8e102f6dc10dfa4a8ae
|
/apps/workpoint/admin.py
|
1dd68861a9eef366fa3b43767a789c59b15a8e05
|
[] |
no_license
|
wd5/3gspeed
|
400f65996a168efb2322e736f47de7d5667bb568
|
9451605898541bd8f912ed84d23c74e500b49595
|
refs/heads/master
| 2016-08-08T22:02:08.798728
| 2012-09-11T01:50:37
| 2012-09-11T01:50:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from apps.utils.widgets import Redactor
from sorl.thumbnail.admin import AdminImageMixin
from models import Ability, Distinct, City, Point, SpeedAtPoint, ModemType, Operator
class AbilityAdmin(AdminImageMixin, admin.ModelAdmin):
list_display = ('id','title','download_speed',)
list_display_links = ('id','title',)
list_editable = ('download_speed',)
search_fields = ('title','download_speed',)
list_filter = ('download_speed',)
admin.site.register(Ability, AbilityAdmin)
class CityAdminForm(forms.ModelForm):
class Meta:
model = City
class Media:
js = (
'/media/js/jquery.js',
'http://api-maps.yandex.ru/2.0/?load=package.full&mode=debug&lang=ru-RU',
'/media/js/ymaps_form.js',
)
class DistinctInline(admin.TabularInline):
fields = ('title', 'is_published')
model = Distinct
class CityAdmin(admin.ModelAdmin):
list_display = ('id','title','coord','is_published',)
list_display_links = ('id','title','coord',)
list_editable = ('is_published',)
search_fields = ('title',)
list_filter = ('is_published',)
form = CityAdminForm
inlines = [DistinctInline,]
admin.site.register(City, CityAdmin)
class SpeedAtPointInline(admin.TabularInline):
fields = ('operator', 'modem_type', 'internet_speed')
model = SpeedAtPoint
class PointAdminForm(forms.ModelForm):
class Meta:
model = Point
class Media:
js = (
'/media/js/jquery.js',
'http://api-maps.yandex.ru/2.0/?load=package.full&mode=debug&lang=ru-RU',
'/media/js/ymaps_form.js',
'/media/js/js_loads.js',
)
class PointAdmin(admin.ModelAdmin):
list_display = ('id','distinct','coord','datetime_create',)
list_display_links = ('id','distinct','coord','datetime_create',)
list_filter = ('datetime_create','distinct',)
inlines = [SpeedAtPointInline,]
form = PointAdminForm
admin.site.register(Point, PointAdmin)
class ModemTypeInline(admin.TabularInline):
fields = ('vendor', 'model', 'download_speed')
model = ModemType
class OperatorAdmin(AdminImageMixin, admin.ModelAdmin):
list_display = ('id','title','order', 'is_published',)
list_display_links = ('id','title',)
list_editable = ('order', 'is_published',)
search_fields = ('title',)
list_filter = ('is_published',)
inlines = [ModemTypeInline,]
admin.site.register(Operator, OperatorAdmin)
|
[
"steeg.xs@gmail.com"
] |
steeg.xs@gmail.com
|
72934dd176732ebd3730bd794e8e20805878afb9
|
3ab5cc24ce16f937fbc0d8906b5a3a3e95fd08f2
|
/Kapittel-03/fasit_3.py
|
1ec0549290b8def11279486f0b77f7251cdb2572
|
[] |
no_license
|
magnusoy/Python-Grunnleggende
|
8aaf4ed5b373f17233bdcf38f52f5a1fa90db5bd
|
984e04e285b46d920c51c92095223fee0766fa29
|
refs/heads/master
| 2023-02-24T21:38:35.823047
| 2021-01-19T19:37:02
| 2021-01-19T19:37:02
| 295,389,253
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
"""
Oppgave 1
Gjør om all teksten i variabelen til store bokstaver.
"""
txt = "vI LæReR om PYTon i dETte kurSEt"
txt2 = txt.upper()
print(txt2)
"""
Oppgave 2
Slutter teksten med: pappesker?
"""
txt = "Her var det veldig mange tomme pappesker"
result = txt.endswith("pappesker")
print(result)
"""
Oppgave 3
Finn ut om teksten inneholder ordet "is" og print ut indeksen.
"""
txt = "På tivoli er det mange som spiser is, og tar karuseller."
result = txt.find("is")
print(result)
"""
Oppgave 4
Bytt ut alle komma med mellomrom.
"""
txt = "Hund,Katt,Geit,Bjørn,Gaupe,Ørn,Spurv"
result = txt.replace(",", " ")
print(result)
|
[
"magnus.oye@gmail.com"
] |
magnus.oye@gmail.com
|
11cc25e0f718e43bfcb3c39dc12f902e4724bdf2
|
c5be6a92f216957d340474b58507606a38c10f5f
|
/course-files/projects/project_01/option1_graphics/main.py
|
5b91603f485c3f9fdd9db91d718b32be8446b708
|
[] |
no_license
|
eecs110/winter2019
|
0b314c35e886b8099368ed7dfd51b707ab73c0c2
|
f4107207ca1c9c10b78bdbb74fd82410b00ee363
|
refs/heads/master
| 2020-04-11T10:09:28.100445
| 2019-03-21T18:00:25
| 2019-03-21T18:00:25
| 161,705,160
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
from tkinter import Canvas, Tk
import helpers
import utilities
import helpers
import time
gui = Tk()
gui.title('My Terrarium')
# initialize canvas:
window_width = gui.winfo_screenwidth()
window_height = gui.winfo_screenheight()
canvas = Canvas(gui, width=window_width, height=window_height, background='white')
canvas.pack()
########################## YOUR CODE BELOW THIS LINE ##############################
# sample code to make a creature:
helpers.make_creature(canvas, (200, 200), fill='white')
########################## YOUR CODE ABOVE THIS LINE ##############################
# makes sure the canvas keeps running:
canvas.mainloop()
|
[
"vanwars@gmail.com"
] |
vanwars@gmail.com
|
fb6cac8ca7336dff94c7a117a60d6edc63f4e026
|
19316c08712a502b1124f2b55cb98bfcbcca7af5
|
/dev/docs-old/advanced/v1 cookbook/uses/2017-12-19 search abf for comments/go.py
|
742202ac1771508baccbc0fe5b6b22965932ae34
|
[
"MIT"
] |
permissive
|
swharden/pyABF
|
49a50d53015c50f1d5524242d4192718e6f7ccfa
|
06247e01ca3c19f5419c3b9b2207ee544e30dbc5
|
refs/heads/main
| 2023-08-28T02:31:59.540224
| 2023-08-17T16:34:48
| 2023-08-17T16:34:48
| 109,707,040
| 92
| 39
|
MIT
| 2023-04-06T00:37:29
| 2017-11-06T14:39:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
import glob
import sys
import os
sys.path.insert(0,os.path.abspath("../../../src/"))
import pyabf
if __name__=="__main__":
PATH=R"X:\Data\projects\2017-01-09 AT1-Cre mice\2017-01-09 global expression NTS\data"
for fname in sorted(glob.glob(PATH+"/*.abf")):
abf=pyabf.ABF(fname)
if not abf.commentsExist:
continue
print(abf.ID,abf.commentTags)
print("DONE")
|
[
"swharden@gmail.com"
] |
swharden@gmail.com
|
ddd8b19f908ab3f029b6724dfada7b82a7cf029c
|
19af5a32468f39e8fa198901b6585ca485aca0ca
|
/sketches/particle/particlesystem.py
|
669112dca09c760ea2c1c201df79b2ce2046e4dd
|
[
"MIT"
] |
permissive
|
kantel/nodebox1
|
9bf8f4bd5cd8fde569cb235a60be2bda4607ab76
|
9f9e8fe3cb7f033e55c4a52fe106c6b3fd849406
|
refs/heads/master
| 2022-05-18T12:12:14.314978
| 2022-04-09T16:04:00
| 2022-04-09T16:04:00
| 92,529,020
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
# Particle-System
# Nach dem Processing (Java) Sketch von Daniel Shiffmann
# aus: The Nature of Code, o.O. 2012, Seite 149ff
size(560, 315)
speed(30)
colormode(RGB)
from pvector import PVector
#---------------------Klassendefinitionen ------------------------
class Particle(object):
def __init__(self, l):
self.acceleration = PVector(0, 0.05)
self.velocity = PVector(random(-1.5, 1.5), random(-2.0, 2.0))
self.location = l.get()
self.lifespan = 255
def run(self):
self.update()
self.display()
def update(self):
self.velocity.add(self.acceleration)
self.location.add(self.velocity)
self.lifespan -= 2
def display(self):
colorrange(255)
stroke(0, 0, 0)
fill(255, 140, 0, self.lifespan)
ellipse(self.location.x, self.location.y, 20, 20)
def isDead(self):
if self.lifespan <= 0:
return True
else:
return False
#----------------------------------------------------------------------
particles = []
def setup():
global loc
loc = PVector(WIDTH/2, 50)
def draw():
global loc
background("#1f2838")
particles.append(Particle(loc))
for i in range(len(particles) - 1, 0, -1):
particles[i].run()
if particles[i].isDead():
particles.pop(i)
# print(len(particles))
|
[
"joerg@kantel.de"
] |
joerg@kantel.de
|
952ec57240ec0cd076b3210096804a21425ada68
|
839ed24e7ecf66b6983f4f2629ef87b90c02d41e
|
/C/python3/c025.py
|
babcf7bbffb550765c70a42db073bacaff42438b
|
[] |
no_license
|
ht0919/ProgrammingExercises
|
86634153be11d08ba45c8d06bf2574b48974a2a6
|
5a9dc356603e3a54d7fdd9d8780c8851f7f37928
|
refs/heads/master
| 2020-04-04T07:32:09.083569
| 2018-10-17T04:28:37
| 2018-10-17T04:28:37
| 25,193,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
M = int(input()) # 1回に運べる枚数
N = int(input()) # FAXが届く回数
first = True
cnt = 0
for i in range(N):
tmp = input().rstrip().split(' ')
x = int(tmp[0]) #時
y = int(tmp[1]) #分
c = int(tmp[2]) #枚数
if first == True:
# 最初のケース
h = x
p = c
first = False
else:
# 2回め以降のケース
if h == x:
# 同じ時間帯
p += c
else:
# 時間帯が変わったケース
# 前時間帯の集計処理
cnt += int(p / M)
if p % M != 0:
cnt += 1
# 変数のリセット
h = x
p = c
# 未集計データ処理
cnt += int(p / M)
if p % M != 0:
cnt += 1
print(cnt)
|
[
"ht0919@gmail.com"
] |
ht0919@gmail.com
|
a7b89ed666f90f741189709714a0d769e456603a
|
9dc496421cc79235c6ad4a5207de52ec2298b0dd
|
/post/migrations/0005_auto_20190117_1443.py
|
1eab8ae327ec6f5836545c39540e0aa3ad2f1b77
|
[] |
no_license
|
m0nte-cr1st0/my-first-blog
|
f9494189566984c62f16e579830074e539d19bd8
|
56aa3804253ea6c80560b6b5fcc6534614a0ac5f
|
refs/heads/master
| 2020-04-17T14:21:11.979436
| 2019-01-22T11:36:20
| 2019-01-22T11:36:20
| 155,852,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# Generated by Django 2.1.5 on 2019-01-17 12:43
from django.db import migrations, models
import post.models
class Migration(migrations.Migration):
dependencies = [
('post', '0004_auto_20190117_1429'),
]
operations = [
migrations.AlterField(
model_name='post',
name='logo',
field=models.ImageField(blank=True, default='media/def-avatar.png', height_field='height_field', null=True, upload_to=post.models.upload_location, width_field='width_field'),
),
]
|
[
"dinamo.mutu111@gmail.com"
] |
dinamo.mutu111@gmail.com
|
261b1e853ef38fe2040e5be44aa5017d5c600f5c
|
f76b743338f48bef09bdf9447cf230719b0a3c76
|
/深度学习/分数阶微分/赵骞/mnist_loader.py
|
e0a9c01d325e95807daebd1d75c81bde7434d8c0
|
[] |
no_license
|
shao1chuan/regression
|
ca71a165067f7407a598f593d61e1df24632a42e
|
4ec9e40b1eafb697f89e956d1c625d8bb8c10ada
|
refs/heads/master
| 2023-08-23T01:30:05.924203
| 2021-10-13T14:32:43
| 2021-10-13T14:32:43
| 329,070,549
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,480
|
py
|
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network mycode.
"""
#### Libraries
# Standard library
import pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9 卷积神经网络CNN) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = pickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
mycode."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9 卷积神经网络CNN) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
|
[
"4448445@qq.com"
] |
4448445@qq.com
|
4ac51470ec597cda7528073e0c5c0b5d8d5a926f
|
4a50774fb00196e0bb57edf47a442e30dc1aa5be
|
/ฝึกเขียน/For_loop/mainloop.py
|
614d7b696686ddbf50faf163e35196e13cb38ec3
|
[] |
no_license
|
Sittisukintaruk/learning-Coding-Self-Python
|
1124581aba4ebb01bcdee85e328a67d9c061931f
|
b0d2cad76a54ab94ceed3b456eca48eb09225aa3
|
refs/heads/master
| 2023-02-16T21:48:57.310708
| 2021-01-13T18:10:57
| 2021-01-13T18:10:57
| 298,890,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
import math
def fact (n):
if n >= 0:
if n == 0 or n == 1 :
return 1
else:
f = 1
for i in range(2 ,n+1):
f = f * i
return f
else:
return math.nan
def permut(n , k):
return fact (n) / (fact (n - k))
def combition(n , k):
#return fact (n) / (fact (k) * fact (n-k))
return permut(n , k) / fact (k)
if __name__ == '__main__':
print(fact (1))
print(fact (5))
print(fact (-7))
print(fact (0))
|
[
"fah78963214@gmail.com"
] |
fah78963214@gmail.com
|
5c5537d89bd2cf5d80512c0fb6160539d94352fb
|
870599f68f29701f6dc0c9911183ee1127a0e8f1
|
/attend/solver.py
|
179bdf6840c56b4794e687c8988ec6f2264ce95d
|
[] |
no_license
|
rubenvereecken/attend
|
dc8168df8fde45421ed1f02a8dc519fc07b836ee
|
d5fedeec589956ba86c11a93709ad2c7d4f01fcf
|
refs/heads/master
| 2021-03-27T20:50:00.543071
| 2017-09-29T09:42:16
| 2017-09-29T09:42:16
| 95,661,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,082
|
py
|
import tensorflow as tf
import numpy as np
from time import time
from collections import OrderedDict
from .util import *
from attend.log import Log; log = Log.get_logger(__name__)
from attend import util, tf_util
import attend
class AttendSolver():
def __init__(self, model, update_rule, learning_rate, stats_every):
self.model = model
self.update_rule = update_rule
self.learning_rate = learning_rate
self.summary_producer = None
if self.update_rule == 'adam':
self.optimizer = tf.train.AdamOptimizer
else:
raise Exception()
self.loss_names = ['mse', 'pearson_r', 'icc']
from attend import SummaryProducer
self.summary_producer = SummaryProducer(self.loss_names)
self.stats_every = stats_every
# TODO
# This will contain the whole validation set, for losses
# that are not implemented streaming
self.placeholders_for_loss = {}
def test(self, graph, saver, save_path, provider, init_op, context_ops, loss_ops,
output_op,
summary_writer, global_step):
# Reasons for building a new session every validation step:
# - There is no way to keep track of epochs OR to restart queues
# so a new session keeps it easy to loop through the input
sess = tf.Session(graph=graph)
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord, collection=attend.GraphKeys.VAL_INPUT_RUNNERS)
# Load trained variables into the shared graph inside the val session
saver.restore(sess, save_path)
# These are batch losses per key
loss_names = self.loss_names # TODO
losses_by_loss_by_key = {k:OrderedDict() for k in loss_names}
seq_lengths_by_key = OrderedDict()
start = time.time()
# TODO rid of this, memory expensive
# Save the sequences so we can do an in-memory icc test
predictions_by_key = OrderedDict()
targets_by_key = OrderedDict()
try:
for i in range(1000000000):
context_ops = context_ops.copy()
context_ops.update(loss_ops['context'])
# Note I used to also run loss_ops['all'] and loss_ops['batch'],
# I think for sequence-wise losses
# They've all currently been replaced with streaming 'total'
# losses
ctx, total_loss, predictions, targets = sess.run(
[context_ops, loss_ops['total'],
output_op, provider.targets])
keys = list(map(lambda x: x.decode(), ctx['key']))
for i, key in enumerate(keys):
# if key not in seq_lengths_by_key:
# seq_lengths_by_key[key] = ctx['length'][i]
predictions_bits = predictions_by_key.get(key, [])
targets_bits = targets_by_key.get(key, [])
predictions_bits.append(predictions[i])
targets_bits.append(targets[i])
predictions_by_key[key] = predictions_bits
targets_by_key[key] = targets_bits
if coord.should_stop():
log.warning('Validation stopping because coord said so')
except tf.errors.OutOfRangeError:
log.info('Finished validation in %.2fs', time.time() - start)
seq_lengths_by_key = OrderedDict(zip(ctx['all_keys'],
ctx['all_lengths']))
mean_by_loss = {}
# mean_by_loss = { k: np.mean(v) for k, v in all_loss.items() }
mean_by_loss.update(total_loss)
# n_keys = len(ctx['all_keys'])
# for k, v in total_loss.items():
# all_loss.update({ k: np.zeros((n_keys, *v.shape)) })
# Compute icc manually
def _piece_together(d):
return OrderedDict((k, np.concatenate(v)) for k, v in d.items())
max_length = max(seq_lengths_by_key.values())
predictions_by_key = _piece_together(predictions_by_key)
targets_by_key = _piece_together(targets_by_key)
max_padded_length = max(map(lambda v: v.shape[0], targets_by_key.values()))
from attend.util import pad_and_stack
predictions = pad_and_stack(predictions_by_key.values())
targets = pad_and_stack(targets_by_key.values())
del predictions_by_key, targets_by_key
icc_op = loss_ops['icc']
icc_score = sess.run(icc_op, {
self.placeholders_for_loss['predictions']: predictions,
self.placeholders_for_loss['targets']: targets,
self.placeholders_for_loss['lengths']: list(seq_lengths_by_key.values()),
})
mean_by_loss['icc'] = icc_score
del predictions, targets
summary = self.summary_producer.create_loss_summary(sess, mean_by_loss)
# all_loss)
summary_writer.add_summary(summary, global_step)
# TODO threads are joined successfully but weird warnings about queues
coord.request_stop()
coord.join(threads)
sess.close()
def train(self, num_epochs, steps_per_epoch, batch_size, time_steps,
provider,
encoder,
log_dir,
val_provider=None,
debug=False,
save_eval_graph=True,
restore_if_possible=True,
keep_all_checkpoints=False,
show_progress_bar=None):
if show_progress_bar is None and debug is False:
show_progress_bar = True
if show_progress_bar:
from tqdm import tqdm
progress_wrapper = tqdm
else:
progress_wrapper = lambda i, **kwargs: i
total_steps = num_epochs * steps_per_epoch
g = tf.get_default_graph()
# For now, its results are stored inside the provider
provider.batch_sequences_with_states(is_training=True, reuse=False)
global_step = tf.Variable(0, trainable=False, name='global_step')
# Prediction and loss
with tf.variable_scope(tf.get_variable_scope()):
out, ctx = self.model.build_model(provider, True, total_steps)
outputs = out['output']
loss_op = self.model.calculate_loss(outputs, provider.targets, ctx['length'])
# This bit is for alpha regularization loss
reg_losses = g.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
assert len(reg_losses) <= 1, 'I know of only one, just making sure'
reg_beta = .5
if len(reg_losses) > 0:
log.info('Adding regularization to loss function')
reg_loss = reg_losses[0]
loss_op += reg_beta * reg_loss
else:
log.info('No regularization to perform')
if not val_provider is None:
n_vars = len(tf.trainable_variables())
val_provider.batch_sequences_with_states(1, is_training=False,
reuse=True,
collection=attend.GraphKeys.VAL_INPUT_RUNNERS)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
# tf.get_variable_scope().reuse_variables()
val_out, val_ctx = self.model.build_model(val_provider, False)
val_outputs = val_out['output']
# Should not reuse variables, so back out of the reusing scope
val_losses, _ = self.model.calculate_losses(val_outputs,
val_provider.targets, val_ctx['key'], val_ctx['length'], 'val_loss')
loss_predictions = tf.placeholder(tf.float32, [None, None, 1])
loss_targets = tf.placeholder(tf.float32, [None, None, 1])
loss_lengths = tf.placeholder(tf.int32, [None])
self.placeholders_for_loss.update(dict(predictions=loss_predictions,
targets=loss_targets,
lengths=loss_lengths))
from .losses import icc
icc_loss = self.model.calculate_loss(loss_predictions, loss_targets,
loss_lengths, icc(3,1))
val_losses['icc'] = icc_loss
if debug:
assert n_vars == len(tf.trainable_variables()), 'New vars were created for val'
if save_eval_graph:
log.info('Creating eval graph')
eval_graph = self.create_test_graph(**provider.__dict__)
# tf.train.write_graph(eval_graph, log_dir, 'eval_model.graph.proto',
# as_text=False)
# These saveables prevent the graph from being reconstructed so remove
# for serialization
saveables = eval_graph.get_collection_ref('saveable_objects')
backup = saveables.copy()
saveables.clear()
tf.train.export_meta_graph(log_dir + '/eval_model.meta.proto',
graph=eval_graph, as_text=False, clear_devices=True)
eval_graph.get_collection_ref('saveable_objects').extend(backup)
log.info('Exported eval_model')
with tf.variable_scope('optimizer', reuse=False):
optimizer = self.optimizer(learning_rate=self.learning_rate)
# train_op = optimizer.minimize(loss_op, global_step=global_step,
# var_list=tf.trainable_variables())
grads = tf.gradients(loss_op, tf.trainable_variables())
grads_and_vars = list(zip(grads, tf.trainable_variables()))
# All updates that aren't part of the graph
# Currently just batch norm moving averages
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars=grads_and_vars,
global_step=global_step)
num_vars = np.sum(list(map(lambda v: np.prod(v.shape), tf.trainable_variables())))
log.info('Total trainable vars {}'.format(num_vars))
# Initialize variables
# The Supervisor actually runs this automatically, but I'm keeping this
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Summary op
tf.summary.scalar('batch_loss', loss_op, family='train')
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
# for grad, var in grads_and_vars:
# tf.summary.histogram(var.op.name+'/gradient', grad)
# The Supervisor already merges all summaries but I like explicit
summary_op = tf.summary.merge_all()
# The Supervisor saves summaries after X seconds, not good for model progressions
# sv = tf.train.Supervisor(logdir=log_dir, summary_op=summary_op,
# save_summaries_secs=0)
# coord = sv.coord
coord = tf.train.Coordinator()
if debug:
config = tf.ConfigProto(
# intra_op_parallelism_threads=1
)
else:
config = tf.ConfigProto()
train_sess = tf.Session(graph=g)
sess = train_sess
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess, thread_name_filter="MainThread$")
sess.run(init_op)
saver = tf.train.Saver(save_relative_paths=True,
max_to_keep=num_epochs if keep_all_checkpoints else 2)
if restore_if_possible:
states = tf.train.get_checkpoint_state(log_dir)
if not states is None:
checkpoint_paths = states.all_model_checkpoint_paths
last_checkpoint = tf.train.latest_checkpoint(log_dir)
log.info('Resuming training from checkpoint {}'.format(last_checkpoint))
saver.restore(sess, last_checkpoint)
saver.recover_last_checkpoints(checkpoint_paths)
# Special input runners run separately because the supervisor can't
# serialize them
input_threads = tf.train.start_queue_runners(sess=sess, coord=coord,
collection='input_runners')
# input_threads = []
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
g.finalize() # No ops can be added after this
log.info('Started training')
losses = np.empty(self.stats_every) # Keep losses to average every so often
global_step_value = sess.run(global_step)
t_start = time.time()
t_stats = time.time()
try:
# while not coord.should_stop():
while global_step_value < num_epochs * steps_per_epoch:
step_i = global_step_value % num_epochs
# for epoch_i in progress_wrapper(range(num_epochs)):
# for step_i in progress_wrapper(range(steps_per_epoch)):
if (global_step_value) % self.stats_every == 0:
t_stats = time.time()
try:
loss, _, summary, keys = sess.run([loss_op, train_op,
summary_op, ctx['key']])
# np.savez('{}/output.{:05d}'.format(log_dir, global_step_value),
# output=outputs_arr,target=targets_arr)
losses[step_i % self.stats_every] = loss # Circular buffer
# keys = list(map(lambda x: x.decode(), keys))
# If duplicate key is encountered this could happen rarely
except tf.errors.InvalidArgumentError as e:
# log.exception(e)
raise e
global_step_value = tf.train.global_step(sess, global_step)
log.debug('TRAIN %s - %s', global_step_value, loss)
summary_writer.add_summary(summary, global_step_value)
# Runtime stats every so often
if global_step_value % self.stats_every == 0:
stats_summary = self.summary_producer.create_stats_summary(
sess, time.time() - t_stats, global_step_value,
np.mean(losses))
summary_writer.add_summary(stats_summary, global_step_value)
if coord.should_stop():
break
# END STEP
if global_step_value % steps_per_epoch == 0:
## END OF EPOCH
# SAVING
save_path = saver.save(sess, log_dir + '/model.ckpt',
global_step_value, write_meta_graph=False)
# Validation after every epoch
if val_provider:
self.test(
graph=g, saver=saver,
save_path=save_path,
provider=val_provider,
init_op=init_op,
loss_ops=val_losses,
context_ops=val_ctx,
output_op=val_outputs,
summary_writer=summary_writer,
global_step = global_step_value
)
coord.request_stop()
except tf.errors.OutOfRangeError:
log.info('Done training -- epoch limit reached')
notify('Done training', 'Took {:.1f}s'.format(time.time() - t_start))
except Exception as e:
log.exception(e)
notify('Error occurred', 'Took {:.1f}s'.format(time.time() - t_start))
finally:
log.debug('Joining threads - ...')
# Requests the coordinator to stop, joins threads
# and closes the summary writer if enabled through supervisor
coord.join(threads + input_threads)
# sv.stop()
# coord.stop() DOESNT EXIST
sess.close()
def create_test_graph(self, **kwargs):
from attend.provider import InMemoryProvider, Provider
graph = tf.Graph()
provider = InMemoryProvider(kwargs.pop('feature_dims'),
**util.pick(kwargs, util.params_for(Provider.__init__)))
scope = ''
with graph.as_default():
with tf.variable_scope(scope, reuse=False):
provider.batch_sequences_with_states(is_training=False, reuse=False)
out_ops, ctx_ops = self.model.build_model(provider, False)
reset_op = provider.state_saver.reset_states()
tf_util.add_to_collection(attend.GraphKeys.STATE_RESET, reset_op, graph)
return graph
|
[
"rubenvereecken@gmail.com"
] |
rubenvereecken@gmail.com
|
16786f8c9d2bef2bd6ac9518fcf57cc7fc19e607
|
73770ddb5441f589742dd600545c555dd9922ae8
|
/db_scripts/versions/d2c4a25c3c1d_v30_course_tree.py
|
824db58eae92c5499013a31689236ed782979526
|
[] |
no_license
|
annndrey/trainingsite
|
c200a203c7a5fe832de70e9497a93df3745d63ef
|
6dce55dc25b2a6f5285a8cfba1fbd7d83dbbe96b
|
refs/heads/master
| 2020-03-29T16:11:06.336532
| 2018-09-24T12:53:55
| 2018-09-24T12:53:55
| 150,102,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,792
|
py
|
"""V30_course_tree
Revision ID: d2c4a25c3c1d
Revises: a24e5727c770
Create Date: 2017-08-11 17:37:18.038419
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'd2c4a25c3c1d'
down_revision = 'a24e5727c770'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('elements',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('is_published', sa.Integer(), nullable=True),
sa.Column('is_archived', sa.Integer(), nullable=True),
sa.Column('lastchanged', sa.DateTime(), nullable=False),
sa.Column('headerimage', sa.Text(), nullable=True),
sa.Column('preview', sa.Text(), nullable=True),
sa.Column('elemtype', sa.Enum('course', 'week', 'workout', 'excercise'), nullable=True),
sa.Column('weektype', sa.Enum('power', 'endurance', 'fingers', 'projecting', 'rest', 'test'), nullable=True),
sa.Column('descr', sa.Text(), nullable=True),
sa.Column('timetotal', sa.String(length=200), nullable=True),
sa.Column('numreps', sa.Integer(), nullable=True),
sa.Column('numsets', sa.Integer(), nullable=True),
sa.Column('resttime', sa.Text(), nullable=True),
sa.Column('perftime', sa.String(length=200), nullable=True),
sa.Column('comments', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], name=op.f('fk_elements_author_id_users')),
sa.ForeignKeyConstraint(['parent_id'], [u'elements.id'], name=op.f('fk_elements_parent_id_elements')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_elements'))
)
op.drop_table('excersise_workout_association')
op.drop_table('courses')
op.drop_table('workout_week_association')
op.drop_table('weeks')
op.drop_table('excersises')
op.drop_table('workouts')
op.drop_table('week_course_association')
op.drop_constraint(u'fk_media_excercise_id_excersises', 'media', type_='foreignkey')
op.create_foreign_key(op.f('fk_media_excercise_id_elements'), 'media', 'elements', ['excercise_id'], ['id'])
op.drop_constraint(u'fk_subscriptions_course_id_courses', 'subscriptions', type_='foreignkey')
op.create_foreign_key(op.f('fk_subscriptions_course_id_elements'), 'subscriptions', 'elements', ['course_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('fk_subscriptions_course_id_elements'), 'subscriptions', type_='foreignkey')
op.create_foreign_key(u'fk_subscriptions_course_id_courses', 'subscriptions', 'courses', ['course_id'], ['id'])
op.drop_constraint(op.f('fk_media_excercise_id_elements'), 'media', type_='foreignkey')
op.create_foreign_key(u'fk_media_excercise_id_excersises', 'media', 'excersises', ['excercise_id'], ['id'])
op.create_table('week_course_association',
sa.Column('weeks_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('courses_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['courses_id'], [u'courses.id'], name=u'fk_week_course_association_courses_id_courses'),
sa.ForeignKeyConstraint(['weeks_id'], [u'weeks.id'], name=u'fk_week_course_association_weeks_id_weeks'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('workouts',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.Column('descr', mysql.MEDIUMTEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('time', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('excersises',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.Column('descr', mysql.MEDIUMTEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('numreps', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('numsets', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('comments', mysql.MEDIUMTEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('rest', mysql.TEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('perftime', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.Column('time', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('weeks',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.Column('comments', mysql.MEDIUMTEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('workout_week_association',
sa.Column('workouts_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('weeks_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['weeks_id'], [u'weeks.id'], name=u'fk_workout_week_association_weeks_id_weeks'),
sa.ForeignKeyConstraint(['workouts_id'], [u'workouts.id'], name=u'fk_workout_week_association_workouts_id_workouts'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('courses',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('author_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('is_published', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('headerimage', mysql.TEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('preview', mysql.TEXT(collation=u'utf8_unicode_ci'), nullable=True),
sa.Column('is_archived', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('lastchanged', mysql.DATETIME(), nullable=False),
sa.Column('header', mysql.VARCHAR(collation=u'utf8_unicode_ci', length=200), nullable=True),
sa.ForeignKeyConstraint(['author_id'], [u'users.id'], name=u'fk_courses_author_id_users'),
sa.PrimaryKeyConstraint('id'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.create_table('excersise_workout_association',
sa.Column('excersise_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('workout_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['excersise_id'], [u'excersises.id'], name=u'fk_excersise_workout_association_excersise_id_excersises'),
sa.ForeignKeyConstraint(['workout_id'], [u'workouts.id'], name=u'fk_excersise_workout_association_workout_id_workouts'),
mysql_collate=u'utf8_unicode_ci',
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
op.drop_table('elements')
# ### end Alembic commands ###
|
[
"annndrey@trololo.info"
] |
annndrey@trololo.info
|
6c833c96e9b2d68d9022c12b7ad1c058fa95fa67
|
4252102a1946b2ba06d3fa914891ec7f73570287
|
/pylearn2/datasets/tfd.py
|
2a8cff8eaf7353dfa2b022826fe526b825b96b87
|
[] |
no_license
|
lpigou/chalearn2014
|
21d487f314c4836dd1631943e20f7ab908226771
|
73b99cdbdb609fecff3cf85e500c1f1bfd589930
|
refs/heads/master
| 2020-05-17T00:08:11.764642
| 2014-09-24T14:42:00
| 2014-09-24T14:42:00
| 24,418,815
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
import numpy as np
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.rng import make_np_rng
class TFD(dense_design_matrix.DenseDesignMatrix):
"""
Pylearn2 wrapper for the Toronto Face Dataset.
http://aclab.ca/users/josh/TFD.html
Parameters
----------
which_set : WRITEME
Dataset to load. One of ['train','valid','test','unlabeled'].
fold : WRITEME
TFD contains 5 official folds for train, valid and test.
image_size : WRITEME
One of [48,96]. Load smaller or larger dataset variant.
example_range : WRITEME
Array_like. Load only examples in range
[example_range[0]:example_range[1]].
center : WRITEME
Move data from range [0.,255.] to [-127.5,127.5]
scale : WRITEME
shuffle : WRITEME
one_hot : WRITEME
rng : WRITEME
seed : WRITEME
preprocessor : WRITEME
axes : WRITEME
"""
mapper = {'unlabeled': 0, 'train': 1, 'valid': 2, 'test': 3,
'full_train': 4}
def __init__(self, which_set, fold = 0, image_size = 48,
example_range = None, center = False, scale = False,
shuffle=False, one_hot = False, rng=None, seed=132987,
preprocessor = None, axes = ('b', 0, 1, 'c')):
"""
Creates a DenseDesignMatrix object for the Toronto Face Dataset.
Parameters
----------
which_set : str
dataset to load. One of ['train','valid','test','unlabeled'].
center : Bool
if True, move data from range [0.,255.] to [-127.5,127.5]
example_range : array_like.
Load only examples in range [example_range[0]:example_range[1]].
fold : int in {0,1,2,3,4}
TFD contains 5 official folds for train, valid and test.
image_size : one of [48,96].
Load smaller or larger dataset variant.
"""
if which_set not in self.mapper.keys():
raise ValueError("Unrecognized which_set value: %s. Valid values are %s." % (str(which_set), str(self.mapper.keys())))
assert (fold >=0) and (fold <5)
# load data
path = '${PYLEARN2_DATA_PATH}/faces/TFD/'
if image_size == 48:
data = load(path + 'TFD_48x48.mat')
elif image_size == 96:
data = load(path + 'TFD_96x96.mat')
else:
raise ValueError("image_size should be either 48 or 96.")
# retrieve indices corresponding to `which_set` and fold number
if self.mapper[which_set] == 4:
set_indices = (data['folds'][:, fold] == 1) + (data['folds'][:,fold] == 2)
else:
set_indices = data['folds'][:, fold] == self.mapper[which_set]
assert set_indices.sum() > 0
# limit examples returned to `example_range`
ex_range = slice(example_range[0], example_range[1]) \
if example_range else slice(None)
# get images and cast to float32
data_x = data['images'][set_indices]
data_x = np.cast['float32'](data_x)
data_x = data_x[ex_range]
# create dense design matrix from topological view
data_x = data_x.reshape(data_x.shape[0], image_size ** 2)
if center:
data_x -= 127.5
if scale:
assert not center
data_x /= 255.
if shuffle:
rng = make_np_rng(rng, seed, which_method='permutation')
rand_idx = rng.permutation(len(data_x))
data_x = data_x[rand_idx]
# get labels
if which_set != 'unlabeled':
data_y = data['labs_ex'][set_indices]
data_y = data_y[ex_range] -1
data_y_identity = data['labs_id'][set_indices]
data_y_identity = data_y_identity[ex_range]
if shuffle:
data_y = data_y[rand_idx]
data_y_identity = data_y_identity[rand_idx]
self.one_hot = one_hot
if one_hot:
one_hot = np.zeros((data_y.shape[0], 7), dtype = 'float32')
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i]] = 1.
data_y = one_hot
else:
data_y = None
data_y_identity = None
# create view converting for retrieving topological view
view_converter = dense_design_matrix.DefaultViewConverter((image_size, image_size, 1),
axes)
# init the super class
super(TFD, self).__init__(X = data_x, y = data_y, view_converter = view_converter)
assert not np.any(np.isnan(self.X))
self.y_identity = data_y_identity
self.axes = axes
if preprocessor is not None:
preprocessor.apply(self)
|
[
"lionelpigou@gmail.com"
] |
lionelpigou@gmail.com
|
7445fa91b3c0c18b7a57868865369d11968c26c3
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_196/ch25_2020_03_11_11_18_42_760637.py
|
290e11d5e107eaf55948946794c96f9f115582db
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
a = float (input("Qual a velocidade?"))
b = float (input("Qual o ângulo de lançamento?"))
g = 9.8
from math import sin
d = ((a**2)*(sin*(2*b)))/(g)
if (100-d)==2 or (100-d)<2:
print ("Acertou!")
elif d < 98:
print ("Muito perto")
else:
print ("Muito longe")
|
[
"you@example.com"
] |
you@example.com
|
3860aeb9a171e0c0407f824c7a3b97b4b7518ba3
|
ed682f663f29d43f45ece61afe8925055ecec161
|
/wsconnections/counter/views.py
|
0621dfc7e6f7a03bad8cbd26459756653f7f3035
|
[] |
no_license
|
aaronbassett/django-ws-connections
|
0f24493c157748d809d623d610f6f21e27dbdba2
|
675b2431ee80bf76aee744a904eee0e284324678
|
refs/heads/master
| 2020-05-09T13:28:11.611876
| 2019-04-10T19:56:55
| 2019-04-10T19:56:55
| 181,153,357
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
from django.views.generic import TemplateView
from nanoid import generate
class CountersWrapperView(TemplateView):
template_name = "counter/index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['nanoid'] = generate(size=6)
return context
class CountView(TemplateView):
template_name = "counter/count.html"
|
[
"me@aaronbassett.com"
] |
me@aaronbassett.com
|
6477f7f8502a7098015934e5eb7e18a92b6d2f88
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03469/s126935227.py
|
78a20b768e62235c2f99c198722cfc536c92ce38
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
wrong_date = input()
exact_date = wrong_date.replace('7', '8', 1)
print(exact_date)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
82b25688d4151650c665230d32673cf1ff5e9ffc
|
038a78e9496a4887fde09ebd406d3c23866a7b80
|
/lookout/core/tests/__init__.py
|
b585b6e9dfbe6d5e1b466609f330c1b9da2d0b7d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
warenlg/lookout-sdk-ml
|
e07305c913b6a933634f669cb425ea2dffd92937
|
de36358b711baff61ee407c01b8086cea45537e5
|
refs/heads/master
| 2020-04-06T11:37:23.060886
| 2019-02-28T10:59:36
| 2019-02-28T10:59:36
| 157,424,280
| 0
| 0
| null | 2018-11-13T18:07:37
| 2018-11-13T18:07:36
| null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
from lookout.core.test_helpers import server
if not server.exefile.exists():
server.fetch()
|
[
"vadim@sourced.tech"
] |
vadim@sourced.tech
|
1a4049be4c964247c6ed69d9dbb126d7e5729c14
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/deviceupdate/v20200301preview/__init__.py
|
ed784172151ec8f045708ca4679c6f49841e4023
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .account import *
from .get_account import *
from .get_instance import *
from .get_private_endpoint_connection import *
from .get_private_endpoint_connection_proxy import *
from .instance import *
from .private_endpoint_connection import *
from .private_endpoint_connection_proxy import *
from ._inputs import *
from . import outputs
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
3557914301b9c6b30c03271309e557c40ac31c24
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/devices/v20200301/iot_dps_resource.py
|
24ff4aa176709c49c612e62feff015417f73edda
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 7,270
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['IotDpsResource']
class IotDpsResource(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['IotDpsPropertiesDescriptionArgs']]] = None,
provisioning_service_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['IotDpsSkuInfoArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The description of the provisioning service.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[pulumi.InputType['IotDpsPropertiesDescriptionArgs']] properties: Service specific properties for a provisioning service
:param pulumi.Input[str] provisioning_service_name: Name of provisioning service to create or update.
:param pulumi.Input[str] resource_group_name: Resource group identifier.
:param pulumi.Input[pulumi.InputType['IotDpsSkuInfoArgs']] sku: Sku info for a provisioning Service.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if properties is None:
raise TypeError("Missing required property 'properties'")
__props__['properties'] = properties
if provisioning_service_name is None:
raise TypeError("Missing required property 'provisioning_service_name'")
__props__['provisioning_service_name'] = provisioning_service_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if sku is None:
raise TypeError("Missing required property 'sku'")
__props__['sku'] = sku
__props__['tags'] = tags
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devices/latest:IotDpsResource"), pulumi.Alias(type_="azure-nextgen:devices/v20170821preview:IotDpsResource"), pulumi.Alias(type_="azure-nextgen:devices/v20171115:IotDpsResource"), pulumi.Alias(type_="azure-nextgen:devices/v20180122:IotDpsResource"), pulumi.Alias(type_="azure-nextgen:devices/v20200101:IotDpsResource"), pulumi.Alias(type_="azure-nextgen:devices/v20200901preview:IotDpsResource")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IotDpsResource, __self__).__init__(
'azure-nextgen:devices/v20200301:IotDpsResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IotDpsResource':
"""
Get an existing IotDpsResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return IotDpsResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.IotDpsPropertiesDescriptionResponse']:
"""
Service specific properties for a provisioning service
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.IotDpsSkuInfoResponse']:
"""
Sku info for a provisioning Service.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
5cede28da970acd0decfb0e8f209f2c2e9e56a86
|
9d734ff697cadaf79ec645a1440a8d690977a97e
|
/code/py/robbie/execution/execsrclink.py
|
4bbb0873fab2dd33561a1cd60a3c30b1abdddb70
|
[] |
no_license
|
karapuz/quad
|
85ed26f6479e58c3d88b50ac98c51007fcefaf83
|
e2e436eedfb01212a112e787b483db925b063f96
|
refs/heads/master
| 2021-06-05T12:19:35.298605
| 2016-10-26T12:07:55
| 2016-10-26T12:07:55
| 58,860,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,192
|
py
|
'''
AUTHOR : ilya presman, 2016
TYPE: : lib
DESCRIPTION : execution.execsrclink - fixlink for the exec source
'''
import traceback
import quickfix as quickfix
import robbie.fix.util as fut
import robbie.execution.util as execut
from robbie.util.logging import logger
from robbie.echo.stratutil import EXECUTION_MODE
class Application( quickfix.Application ):
_cx2orig = {}
def onCreate(self, sessionID):
logger.debug('onCreate sessionID=%s', sessionID)
self._sessionID = sessionID
self._session = quickfix.Session.lookupSession( sessionID )
self._cx2orig[ self._sessionID ] = {}
def setMode(self, mode):
self._mode = mode
def getSessionID(self):
return self._sessionID
def getSession(self):
return self._session
def registerPriceStrip( self, priceStrip ):
'''setup price strip '''
self._priceStrip = priceStrip
def registerStratManager( self, signalStrat ):
'''callback into the execution sink'''
self._signalStrat = signalStrat
def addMessageAdapter( self, msgAdapter ):
'''callback into the execution sink'''
self._msgAdapter = msgAdapter
def onLogon(self, sessionID ):
logger.debug( 'onLogon: sessionID=%s', sessionID )
def onLogout(self, sessionID ):
logger.debug( 'onLogout: sessionID=%s', sessionID )
def toAdmin(self, message, sessionID ):
try:
if self._msgAdapter:
with self._msgAdapter(message, 'fromApp'):
return self.onToAdmin( sessionID, message )
else:
return self.onToAdmin( sessionID, message )
except:
logger.error( 'toAdmin: ERROR %s', traceback.format_exc() )
def fromAdmin(self, message, sessionID ):
try:
if self._msgAdapter:
with self._msgAdapter(message, 'fromApp'):
return self.onFromAdmin( sessionID, message )
else:
return self.onFromAdmin( sessionID, message )
except:
logger.error( 'fromAdmin: ERROR %s', traceback.format_exc() )
def toApp(self, message, sessionID ):
try:
if self._mode == EXECUTION_MODE.NEW_FILL_CX:
onToApp = self.onToApp
elif self._mode == EXECUTION_MODE.FILL_ONLY:
onToApp = self.onToAppFillOnly
else:
raise ValueError('Unknown mode=%s' % self._mode)
if self._msgAdapter:
with self._msgAdapter(message, 'fromApp'):
onToApp( sessionID, message )
else:
return onToApp( sessionID, message )
except:
logger.error( 'toApp: ERROR %s', traceback.format_exc() )
def fromApp(self, message, sessionID):
try:
if self._msgAdapter:
with self._msgAdapter(message, 'fromApp'):
return self.onFromApp( sessionID, message )
else:
return self.onFromApp( sessionID, message )
except:
logger.error( 'fromApp: ERROR %s', traceback.format_exc() )
''' onEvent handlers '''
def onToAdmin( self, sessionID, message ):
hdr = message.getHeader()
msgType = hdr.getField( fut.Tag_MsgType )
if msgType == fut.Msg_Heartbeat:
# logger.debug( msgType )
return
logger.debug('onToAdmin msgType=%s', fut.msgVal2Name(msgType))
try:
if msgType == fut.Msg_Logout or msgType == fut.Msg_Logon:
logger.debug('onToAdmin msgType=%s message=%s', msgType, message)
execut.resetSeqNum( sessionID, message, 1 )
except:
print 'onFromAdmin ERROR e=%s' % str(traceback.format_exc() )
def onFromAdmin( self, sessionID, message ):
hdr = message.getHeader()
msgType = hdr.getField( fut.Tag_MsgType )
if msgType == fut.Msg_Heartbeat:
# logger.debug( msgType )
return
logger.debug('onFromAdmin msgType=%s', fut.msgVal2Name(msgType))
try:
if msgType == fut.Msg_Logout or msgType == fut.Msg_Logon:
execut.resetSeqNum( sessionID, message )
except:
print 'onFromAdmin ERROR e=%s' % str(traceback.format_exc() )
def onFromApp( self, sessionID, message ):
hdr = message.getHeader()
msgType = hdr.getField( fut.Tag_MsgType )
logger.debug('onFromApp msgType=%s', fut.msgVal2Name(msgType))
if msgType == fut.Msg_ExecReport:
execType = message.getField( fut.Tag_ExecType )
orderStatus = message.getField( fut.Tag_OrderStatus )
orderId = message.getField( fut.Tag_ClientOrderId )
if execType == fut.Val_ExecType_New:
if orderId in self._seenOrderId:
logger.error('onFromApp: see duplicate orderId=%s', orderId)
return
self._seenOrderId.add( orderId )
return self.onSubmit( orderId=orderId, message=message, execType=execType, orderStatus=orderStatus )
elif orderStatus in ( fut.Val_OrderStatus_Fs ):
return self.onOrderFill( orderId=orderId, message=message, execType=execType, orderStatus=orderStatus )
elif orderStatus == fut.Val_OrderStatus_Cx:
return self.onOrderCancel( orderId=orderId, message=message, execType=execType, orderStatus=orderStatus )
elif orderStatus == fut.Val_OrderStatus_Rx:
return self.onOrderReject( orderId=orderId, message=message, execType=execType, orderStatus=orderStatus )
elif orderStatus == fut.Val_OrderStatus_Pnd_Cx:
return self.onOrderPendingCancel( orderId=orderId, message=message, execType=execType, orderStatus=orderStatus )
else:
logger.error( 'onFromApp [1] unhandled %s %s %s' % ( msgType, execType, orderStatus ) )
else:
logger.error( 'onFromApp [2] unhandled %s' % ( msgType ) )
def onToAppFillOnly( self, sessionID, message ):
hdr = message.getHeader()
msgType = hdr.getField( fut.name2tag('MsgType') )
logger.debug('onToAppFillOnly msgType=%s', fut.msgVal2Name(msgType))
def onToApp( self, sessionID, message ):
hdr = message.getHeader()
msgType = hdr.getField( fut.name2tag('MsgType') )
logger.debug('onToApp msgType=%s', fut.msgVal2Name(msgType))
orderId = message.getField( fut.Tag_ClientOrderId )
if orderId in self._seenOrderId:
logger.error('onToApp: see duplicate orderId=%s', orderId)
return
self._seenOrderId.add( orderId )
if msgType == fut.Msg_NewOrderSingle:
return self.onSubmitToApp( orderId=orderId, message=message )
else:
logger.error( 'onToApp [1] unhandled %s' % ( msgType ) )
def onOrderFill( self, orderId, message, execType, orderStatus ):
''' '''
logger.debug( 'onOrderFill %s %s' % ( execType, orderStatus ) )
txTime = message.getField( fut.Tag_TransactTime )
lastPx = float ( message.getField( fut.Tag_LastPx ) )
side = message.getField( fut.Tag_Side )
symbol = message.getField( fut.Tag_Symbol )
account = message.getField( fut.Tag_Account )
# orderType = message.getField( fut.Tag_OrderType )
# cumQty = int ( message.getField( fut.Tag_CumQty ) )
# leavesQty = int ( message.getField( fut.Tag_LeavesQty ) )
lastShares = int ( message.getField( fut.Tag_LastShares ) )
qty = fut.convertQty( side, lastShares )
mktPrice = self.getMarketPrices(symbol=symbol)
self._signalStrat.onFill(
signalName = account,
execTime = txTime,
orderId = orderId,
symbol = symbol,
qty = qty,
price = lastPx,
mktPrice = mktPrice)
logger.debug( 'fix.fill oid=%s s=%-4s q=%4d p=%f' % ( orderId, symbol, qty, lastPx ))
def onOrderCancelToApp( self, orderId, message):
return self.onOrderCancel( orderId=orderId, message=message, execType=None, orderStatus=None )
def onOrderCancel( self, orderId, message, execType, orderStatus ):
''' '''
# logger.debug( 'onOrderCancel %s %s' % ( execType, orderStatus ) )
logger.debug( 'onOrderCancel %s' % str(message) )
# orderId = message.getField( fut.Tag_ClientOrderId )
txTime = message.getField( fut.Tag_TransactTime )
lastShares = int( message.getField( fut.Tag_LastShares ))
symbol = message.getField( fut.Tag_Symbol )
account = message.getField( fut.Tag_Account )
# orderType = message.getField( fut.Tag_OrderType )
side = message.getField( fut.Tag_Side )
if lastShares == 0:
cumQty = fut.convertQty( side, int( message.getField( fut.Tag_CumQty ) ) )
orderQty= fut.convertQty( side, int( message.getField( fut.Tag_OrderQty ) ) )
cxqty = orderQty - cumQty
else:
cxqty = fut.convertQty( side, lastShares )
if cxqty == 0:
cxqty = fut.convertQty( side, int( message.getField( fut.Tag_LeavesQty ) ) )
try:
origOrderId = message.getField( fut.Tag_OrigClOrdID )
except quickfix.FieldNotFound as _e:
if orderId not in self._cx2orig[ self._sessionID ]:
logger.error( 'fix.cxed Absent originalId - cx not issued by echo. oid=%s s=%-4s q=%4d' % ( orderId, symbol, cxqty ))
return
origOrderId = self._cx2orig[ self._sessionID ][ orderId ]
mktPrice = self.getMarketPrices(symbol=symbol)
logger.debug( 'fix.cxed oid=%s s=%-4s q=%4d' % ( orderId, symbol, cxqty ))
self._signalStrat.onCxRx(
signalName = account,
execTime = txTime,
orderId = orderId,
symbol = symbol,
qty = cxqty,
origOrderId = origOrderId,
mktPrice = mktPrice)
def onOrderReject( self, orderId, message, execType, orderStatus ):
''' '''
logger.debug( 'onOrderReject %s %s' % ( execType, orderStatus ) )
# orderId = message.getField( fut.Tag_ClientOrderId )
txTime = message.getField( fut.Tag_TransactTime )
lastShares = int ( message.getField( fut.Tag_LastShares ) )
side = message.getField( fut.Tag_Side )
symbol = message.getField( fut.Tag_Symbol )
account = message.getField( fut.Tag_Account )
# orderType = message.getField( fut.Tag_OrderType )
if lastShares == 0:
cumQty = fut.convertQty( side, int( message.getField( fut.Tag_CumQty ) ) )
orderQty= fut.convertQty( side, int( message.getField( fut.Tag_OrderQty ) ) )
rxqty = orderQty - cumQty
else:
rxqty = fut.convertQty( side, lastShares )
mktPrice = self.getMarketPrices(symbol=symbol)
self._signalStrat.onCxRx(
signalName = account,
execTime = txTime,
orderId = orderId,
symbol = symbol,
qty = rxqty,
origOrderId = None,
mktPrice = mktPrice)
logger.debug( 'fix.rxed oid=%s s=%-4s q=%4d' % ( orderId, symbol, rxqty ))
def onSubmit( self, orderId, message, execType, orderStatus ):
logger.debug( 'onSubmit %s %s' % ( execType, orderStatus ) )
txTime = message.getField( fut.Tag_TransactTime )
side = message.getField( fut.Tag_Side )
symbol = message.getField( fut.Tag_Symbol )
account = message.getField( fut.Tag_Account )
orderType = message.getField( fut.Tag_OrderType )
timeInForce = message.getField( fut.Tag_TimeInForce )
# cumQty = int ( message.getField( fut.Tag_CumQty ) )
# leavesQty = int ( message.getField( fut.Tag_LeavesQty ) )
lastPx = float ( message.getField( fut.Tag_LastPx ) )
lastShares = int ( message.getField( fut.Tag_LastShares ) )
qty = fut.convertQty( side, lastShares )
mktPrice = self.getMarketPrices(symbol=symbol)
self._signalStrat.onNew(
timeInForce = timeInForce,
signalName = account,
execTime = txTime,
orderId = orderId,
symbol = symbol,
orderType = orderType,
qty = qty,
price = lastPx,
mktPrice = mktPrice)
logger.debug( 'fix.new formApp onSubmit oid=%s s=%-4s q=%4d p=%f' % ( orderId, symbol, qty, lastPx ))
def onSubmitToApp( self, orderId, message ):
logger.debug( 'onSubmitToApp' )
# orderId = message.getField( fut.Tag_ClientOrderId )
txTime = message.getField( fut.Tag_TransactTime )
side = message.getField( fut.Tag_Side )
symbol = message.getField( fut.Tag_Symbol )
account = message.getField( fut.Tag_Account )
orderType = message.getField( fut.Tag_OrderType )
timeInForce = message.getField( fut.Tag_TimeInForce )
price = 0
try:
price = float ( message.getField( fut.Tag_LastPx ) )
except quickfix.FieldNotFound as _e:
pass
try:
price = float ( message.getField( fut.Tag_Price ) )
except quickfix.FieldNotFound as _e:
pass
# cumQty = int ( message.getField( fut.Tag_CumQty ) )
# leavesQty = int ( message.getField( fut.Tag_LeavesQty ) )
lastShares = int ( message.getField( fut.Tag_OrderQty ) )
qty = fut.convertQty( side, lastShares )
mktPrice = self.getMarketPrices(symbol=symbol)
self._signalStrat.onNew(
signalName = account,
execTime = txTime,
orderId = orderId,
symbol = symbol,
qty = qty,
price = price,
orderType = orderType,
timeInForce = timeInForce,
mktPrice = mktPrice)
logger.debug( 'fix.new onSubmitToApp oid=%s s=%-4s q=%4d p=%f' % ( orderId, symbol, qty, price ))
def onOrderPendingCancel( self, orderId, message, execType, orderStatus ):
logger.debug( 'onOrderPendingCancel %s %s' % ( execType, orderStatus ) )
account = message.getField( fut.Tag_Account )
symbol = message.getField( fut.Tag_Symbol )
mktPrice = self.getMarketPrices(symbol=symbol)
logger.error( 'ERROR!!!!!!!! onOrderPendingCancel: toApp oid=%s s=%-4s' % ( orderId, symbol ))
def getMarketPrices(self, symbol):
''' get Market Prices '''
if self._priceStrip is None:
logger.error( 'ERROR!!!!!!!! getMarketPrices: nor price for %s' % symbol)
return {}
trade = self._priceStrip.getInstantPriceByName(priceType='TRADE', symbol=symbol)
bid = self._priceStrip.getInstantPriceByName(priceType='BID', symbol=symbol)
ask = self._priceStrip.getInstantPriceByName(priceType='ASK', symbol=symbol)
return {'TRADE': trade, 'BID': bid, 'ASK': ask}
''' order issuing block '''
def sendOrder( self, senderCompID, targetCompID, account, orderId, symbol, qty, price, timeInForce=fut.Val_TimeInForce_DAY, orderType=None ):
logger.debug( 'fix.lnk.send account=%s, orderId=%s, symbol=%s, qty=%s, price=%s, timeInForce=%s, orderType=%s',
account, orderId, symbol, qty, price, timeInForce, orderType)
msg = fut.form_NewOrder(
senderCompID = senderCompID,
targetCompID = targetCompID,
account = account,
timeInForce = timeInForce,
orderId = orderId,
symbol = symbol,
qty = qty,
price = price,
orderType = orderType )
session = self.getSession()
session.sendToTarget( msg )
logger.debug( 'fix.lnk.send id=%s s=%-4s q=%4d p=%s' % ( orderId, symbol, qty, price ))
def cancelOrder( self, senderCompID, targetCompID, account, orderId, origOrderId, symbol, qty ):
logger.debug( 'fix.lnk.cx enter' )
msg = fut.form_Cancel(
senderCompID = senderCompID,
targetCompID = targetCompID,
account = account,
orderId = orderId,
origOrderId = origOrderId,
symbol = symbol,
ccy = None,
qty = qty )
#self._cx2orig[ orderId ] = origOrderId
self._cx2orig[ self._sessionID ][ orderId ] = origOrderId
session = self.getSession()
session.sendToTarget( msg )
logger.debug( 'fix.lnk.cx msg=%s' % ( msg ))
logger.debug( 'fix.lnk.cx id=%s s=%-4s q=%4d' % ( orderId, symbol, qty ))
def init(tweakName, signalStrat, mode, pricestrip, cleanSlate=False, msgAdapter=None):
''' '''
cfgpath = execut.initFixConfig( tweakName, cleanSlate=cleanSlate )
app = Application( )
app.setMode(mode=mode)
app.registerStratManager( signalStrat )
app.addMessageAdapter( msgAdapter )
app._seenOrderId = set()
app.registerPriceStrip(pricestrip)
appThread = execut.AppThread( app=app, cfgpath=cfgpath, useLogger=True )
appThread.run()
return appThread, app
|
[
"ilya.presman@gmail.com"
] |
ilya.presman@gmail.com
|
2976ea7ae45914e9623b137b7d0c8dbd805a632f
|
f71273e5a2c4382ad990e350dfd58b702db56fdf
|
/app/migrations/0002_hashtag.py
|
79996242ffd39f4eb93d96ffb79c5a15687fff46
|
[] |
no_license
|
jangheeseung/Django_pr
|
dcb6c4596b61a8f524964f735b726087c763ad7c
|
94dfe789b361ddb6cf28575d42238f85f8d25129
|
refs/heads/master
| 2023-04-27T08:07:16.398124
| 2019-06-02T07:19:07
| 2019-06-02T07:19:07
| 189,816,258
| 0
| 0
| null | 2022-11-22T03:33:52
| 2019-06-02T07:15:27
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
# Generated by Django 2.2 on 2019-05-21 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
]
|
[
"gmltmd121@gmail.com"
] |
gmltmd121@gmail.com
|
bb4c204fd838cabab66d96bd22ffe3b75aba2054
|
2bba4782f9085d2c0c324f6668709a81e482e095
|
/secao06/ex39.py
|
60905c0de2ffaf50db15faaf89b89564324145f2
|
[] |
no_license
|
Saccha/Exercicios_Python
|
886ae07392f006226688b8817bf17a7a52020ef9
|
e54203cb8754180f0fe120ee60c462c2e74c86e3
|
refs/heads/main
| 2023-04-18T06:44:42.243579
| 2021-04-18T03:49:30
| 2021-04-18T03:49:30
| 346,230,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
"""
39. Faça um programa que calcule a área de um triângulo, cuja base e altura
são fornecida pelo usuário. Esse programa não pode permitir a entrada de dados inválidos,
ou seja, medidas menores ou iguais a 0.
"""
base = int(input("Digite o tamanho da base do triângulo: "))
if base > 0:
altura = int(input("Digite o tamanho da altura do triângulo: "))
if altura > 0:
area_triangulo = (base * altura) / 2
print(f"A aréa do triângulo é {area_triangulo}")
else:
print("Altura inválida!")
else:
print("Base inválida!")
|
[
"noreply@github.com"
] |
Saccha.noreply@github.com
|
421b77d86e1828dc04ab6239fe78cafe93a062c5
|
d6e53e37c394acab024696b15c2da0a53a00d317
|
/zanhu/news/tests/test_urls.py
|
d11cf8b1a85d2fb9e98a5ab9fc6d0c7b5a5636e7
|
[] |
no_license
|
3pacccccc/zanhu
|
04943ebefb32bac5668287c75dd1e6793010760d
|
c4ee76ef6018ae98cab7ea53255feeae83c8dfaf
|
refs/heads/master
| 2022-12-13T21:24:18.136698
| 2019-07-21T17:19:15
| 2019-07-21T17:19:15
| 191,209,450
| 4
| 0
| null | 2022-12-08T05:17:22
| 2019-06-10T16:57:02
|
CSS
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
from test_plus import TestCase
from django.urls import reverse, resolve
class TestUserURLs(TestCase):
def setUp(self):
self.user = self.make_user()
def test_detail_reverse(self):
self.assertEqual(reverse('users:detail', kwargs={'username': 'testuser'}), '/users/testuser/')
def test_detail_resolve(self):
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
self.assertEqual(reverse('users:update'), '/users/update/')
def test_update_resolve(self):
self.assertEqual(resolve('/users/update/').view_name, 'users:update')
|
[
"351489917@qq.com"
] |
351489917@qq.com
|
8fbf4dbd5b542b886e1093c883ec70240afca7aa
|
3be42b83a15d022f5863c96ec26e21bac0f7c27e
|
/tensorflow_probability/python/bijectors/categorical_to_discrete_test.py
|
9ed4b7e54dc20c538567c8a95c95853e66ff5711
|
[
"Apache-2.0"
] |
permissive
|
ogrisel/probability
|
846f5c13cddee5cf167b215e651b7479003f15d2
|
8f67456798615f9bf60ced2ce6db5d3dba3515fe
|
refs/heads/master
| 2022-11-09T10:53:23.000918
| 2020-07-01T23:16:03
| 2020-07-01T23:17:25
| 276,580,359
| 2
| 1
|
Apache-2.0
| 2020-07-02T07:37:58
| 2020-07-02T07:37:57
| null |
UTF-8
|
Python
| false
| false
| 5,842
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests CategoricalToDiscrete bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.bijectors import categorical_to_discrete
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class CategoricalToDiscreteTest(test_util.TestCase):
def testUnsortedValuesRaises(self):
with self.assertRaisesOpError('map_values is not strictly increasing'):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[1, 3, 2], validate_args=True)
self.evaluate(bijector.forward([0, 1, 2]))
def testMapValuesRankNotEqualToOneRaises(self):
with self.assertRaisesWithPredicateMatch(ValueError,
'Rank of map_values must be 1'):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[[1, 2], [3, 4]], validate_args=True)
self.evaluate(bijector.map_values)
def testMapValuesSizeZeroRaises(self):
with self.assertRaisesWithPredicateMatch(
ValueError, 'Size of map_values must be greater than 0'):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[], validate_args=True)
self.evaluate(bijector.map_values)
def testBijectorForward(self):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[0.1, 0.2, 0.3, 0.4], validate_args=True)
self.assertAllClose([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]],
self.evaluate(
bijector.forward([[0, 1, 2, 3], [3, 2, 1, 0]])))
def testBijectorForwardOutOfBoundIndicesRaises(self):
with self.assertRaisesOpError('indices out of bound'):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[0.1, 0.2, 0.3, 0.4], validate_args=True)
self.evaluate(bijector.forward([5]))
def testBijectorInverse(self):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[0.1, 0.2, 0.3, 0.4], validate_args=True)
self.assertAllEqual([[3, 3, 3], [0, 1, 2]],
self.evaluate(
bijector.inverse([[0.400001, 0.4, 0.399999],
[0.1, 0.2, 0.3]])))
def testBijectorInverseValueNotFoundRaises(self):
with self.assertRaisesOpError('inverse value not found'):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[0.1, 0.2, 0.3, 0.4], validate_args=True)
self.evaluate(bijector.inverse([0.21, 0.4]))
def testInverseLogDetJacobian(self):
bijector = categorical_to_discrete.CategoricalToDiscrete(
map_values=[0.1, 0.2], validate_args=True)
self.assertAllClose(
0,
self.evaluate(
bijector.inverse_log_det_jacobian([0.1, 0.2], event_ndims=0)))
def testBijectiveAndFinite32bit(self):
x = np.arange(100).astype(np.int32)
y = np.logspace(-10, 10, 100).astype(np.float32)
bijector = categorical_to_discrete.CategoricalToDiscrete(map_values=y)
bijector_test_util.assert_bijective_and_finite(
bijector, x, y, eval_func=self.evaluate, event_ndims=0)
def testBijectiveAndFinite16bit(self):
x = np.arange(100).astype(np.int32)
y = np.logspace(-5, 4, 100).astype(np.float16)
bijector = categorical_to_discrete.CategoricalToDiscrete(map_values=y)
bijector_test_util.assert_bijective_and_finite(
bijector, x, y, eval_func=self.evaluate, event_ndims=0)
@test_util.jax_disable_variable_test
@test_util.numpy_disable_gradient_test
def testVariableGradients(self):
map_values = tf.Variable([0.3, 0.5])
b = categorical_to_discrete.CategoricalToDiscrete(map_values=map_values,
validate_args=True)
with tf.GradientTape() as tape:
y = tf.reduce_sum(b.forward([0, 1]))
grads = tape.gradient(y, [map_values])
self.assertAllNotNone(grads)
@test_util.numpy_disable_gradient_test
def testNonVariableGradients(self):
map_values = tf.convert_to_tensor([0.3, 0.5])
def _func(map_values):
b = categorical_to_discrete.CategoricalToDiscrete(map_values=map_values,
validate_args=True)
return tf.reduce_sum(b.forward([0, 1]))
grads = tfp.math.value_and_gradient(_func, [map_values])
self.assertAllNotNone(grads)
def testModifiedMapValuesIncreasingAssertion(self):
map_values = tf.Variable([0.1, 0.2])
b = categorical_to_discrete.CategoricalToDiscrete(map_values=map_values,
validate_args=True)
self.evaluate(map_values.initializer)
with self.assertRaisesOpError('map_values is not strictly increasing.'):
with tf.control_dependencies([map_values.assign([0.2, 0.1])]):
self.evaluate(b.forward([0, 1]))
if __name__ == '__main__':
tf.test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
cb90f482637e3cc11e4a5c03d5017b231addd2d6
|
2e5c9b0e0cd6a9e0185a27675b405f0458334c1c
|
/pyarchinit/pyarchinit_Schedaind_mainapp.py
|
b876200713b124ed23f490bdd04a81ccf093ab9b
|
[] |
no_license
|
pyarchinit/deleting_pyarchinit_beta_test_qgis2
|
80e59501dcf9f444e255c7a9111af3b19f2c1a6b
|
f0e0e2fad43e55207c4a122f4fa4083ce0e9eef2
|
refs/heads/master
| 2021-01-10T18:37:36.546306
| 2013-09-29T13:23:24
| 2013-09-29T13:23:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,710
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
/***************************************************************************
pyArchInit Plugin - A QGIS plugin to manage archaeological dataset
stored in Postgres
-------------------
begin : 2007-12-01
copyright : (C) 2008 by Luca Mandolesi
email : mandoluca at gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import sys, os
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4.QtGui
try:
from qgis.core import *
from qgis.gui import *
except:
pass
from datetime import date
from psycopg2 import *
#--import pyArchInit modules--#
from pyarchinit_schedaind_ui import Ui_DialogInd
from pyarchinit_schedaind_ui import *
from pyarchinit_utility import *
from pyarchinit_pyqgis import Pyarchinit_pyqgis
from sortpanelmain import SortPanelMain
try:
from pyarchinit_db_manager import *
except:
pass
from pyarchinit_exp_Individui_pdf import *
from delegateComboBox import *
class pyarchinit_Schedaind(QDialog, Ui_DialogInd):
MSG_BOX_TITLE = "PyArchInit - pyarchinit_US_version 0.4 - Scheda Individuo"
DATA_LIST = []
DATA_LIST_REC_CORR = []
DATA_LIST_REC_TEMP = []
REC_CORR = 0
REC_TOT = 0
BROWSE_STATUS = "b"
STATUS_ITEMS = {"b": "Usa", "f": "Trova", "n": "Nuovo Record"}
SORT_MODE = 'asc'
SORTED_ITEMS = {"n": "Non ordinati", "o": "Ordinati"}
SORT_STATUS = "n"
UTILITY = Utility()
DB_MANAGER = ""
TABLE_NAME = 'individui_table'
MAPPER_TABLE_CLASS = "SCHEDAIND"
NOME_SCHEDA = "Scheda Individuo"
ID_TABLE = "id_scheda_ind"
CONVERSION_DICT = {
ID_TABLE:ID_TABLE,
"Sito":"sito",
"US":"us",
"Area": "area",
"Nr. Individuo":"nr_individuo",
"Data Schedatura":"data_schedatura",
"Schedatore":"schedatore",
"Stima del sesso":"sesso",
"Stima dell'eta' di morte min":"eta_min",
"Stima dell'eta' di morte max":"eta_max",
"Classi di eta'":"classi_eta",
"Osservazioni":"osservazioni"
}
SORT_ITEMS = [
ID_TABLE,
"Sito",
"Area",
"US",
"Nr. Individuo",
"Data schedatura",
"Schedatore",
"Stima del sesso",
"Stima dell'eta' di morte min",
"Stima dell'eta' di morte max",
"Classi di eta'",
"Osservazioni"
]
TABLE_FIELDS = [
'sito',
'area',
'us',
'nr_individuo',
'data_schedatura',
'schedatore',
'sesso',
'eta_min',
'eta_max',
'classi_eta',
'osservazioni'
]
def __init__(self, iface):
self.iface = iface
self.pyQGIS = Pyarchinit_pyqgis(self.iface)
QDialog.__init__(self)
self.setupUi(self)
self.customize_GUI() #call for GUI customizations
self.currentLayerId = None
try:
self.on_pushButton_connect_pressed()
except:
pass
def enable_button(self, n):
self.pushButton_connect.setEnabled(n)
self.pushButton_new_rec.setEnabled(n)
self.pushButton_view_all.setEnabled(n)
self.pushButton_first_rec.setEnabled(n)
self.pushButton_last_rec.setEnabled(n)
self.pushButton_prev_rec.setEnabled(n)
self.pushButton_next_rec.setEnabled(n)
self.pushButton_delete.setEnabled(n)
self.pushButton_new_search.setEnabled(n)
self.pushButton_search_go.setEnabled(n)
self.pushButton_sort.setEnabled(n)
def enable_button_search(self, n):
self.pushButton_connect.setEnabled(n)
self.pushButton_new_rec.setEnabled(n)
self.pushButton_view_all.setEnabled(n)
self.pushButton_first_rec.setEnabled(n)
self.pushButton_last_rec.setEnabled(n)
self.pushButton_prev_rec.setEnabled(n)
self.pushButton_next_rec.setEnabled(n)
self.pushButton_delete.setEnabled(n)
self.pushButton_save.setEnabled(n)
self.pushButton_sort.setEnabled(n)
def on_pushButton_connect_pressed(self):
from pyarchinit_conn_strings import *
conn = Connection()
conn_str = conn.conn_str()
try:
self.DB_MANAGER = Pyarchinit_db_management(conn_str)
self.DB_MANAGER.connection()
self.charge_records() #charge records from DB
#check if DB is empty
if bool(self.DATA_LIST) == True:
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.label_sort.setText(self.SORTED_ITEMS["n"])
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.charge_list()
self.fill_fields()
else:
QMessageBox.warning(self, "BENVENUTO", "Benvenuto in pyArchInit" + self.NOME_SCHEDA + ". Il database e' vuoto. Premi 'Ok' e buon lavoro!", QMessageBox.Ok)
self.charge_list()
self.on_pushButton_new_rec_pressed()
except Exception, e:
e = str(e)
if e.find("no such table"):
QMessageBox.warning(self, "Alert", "La connessione e' fallita <br><br> Tabella non presente. E' NECESSARIO RIAVVIARE QGIS" , QMessageBox.Ok)
else:
QMessageBox.warning(self, "Alert", "La connessione e' fallita <br> Errore: <br>" + str(e) , QMessageBox.Ok)
def customize_GUI(self):
pass
def loadMapPreview(self, mode = 0):
pass
def charge_list(self):
sito_vl = self.UTILITY.tup_2_list_III(self.DB_MANAGER.group_by('site_table', 'sito', 'SITE'))
try:
sito_vl.remove('')
except:
pass
self.comboBox_sito.clear()
sito_vl.sort()
self.comboBox_sito.addItems(sito_vl)
def charge_periodo_list(self):
pass
def charge_fase_iniz_list(self):
pass
def charge_fase_fin_list(self):
pass
#buttons functions
def generate_list_pdf(self):
data_list = []
for i in range(len(self.DATA_LIST)):
data_list.append([
str(self.DATA_LIST[i].sito), #1 - Sito
int(self.DATA_LIST[i].area), #2 - Area
int(self.DATA_LIST[i].us), #3 - us
int(self.DATA_LIST[i].nr_individuo), #4 - nr individuo
str(self.DATA_LIST[i].data_schedatura), #5 - data schedatura
str(self.DATA_LIST[i].schedatore), #6 - schedatore
str(self.DATA_LIST[i].sesso), #7 - sesso
str(self.DATA_LIST[i].eta_min), #8 - eta' minima
str(self.DATA_LIST[i].eta_max), #9- eta massima
str(self.DATA_LIST[i].classi_eta), #10 - classi di eta'
str(self.DATA_LIST[i].osservazioni) #11 - osservazioni
])
return data_list
def on_pushButton_pdf_exp_pressed(self):
Individui_pdf_sheet = generate_pdf()
data_list = self.generate_list_pdf()
Individui_pdf_sheet.build_Individui_sheets(data_list)
"""
def on_toolButtonPan_toggled(self):
self.toolPan = QgsMapToolPan(self.mapPreview)
self.mapPreview.setMapTool(self.toolPan)
"""
"""
def on_pushButton_showSelectedFeatures_pressed(self):
field_position = self.pyQGIS.findFieldFrDict(self.ID_TABLE)
field_list = self.pyQGIS.selectedFeatures()
id_list_sf = self.pyQGIS.findItemInAttributeMap(field_position, field_list)
id_list = []
for idl in id_list_sf:
sid = idl.toInt()
id_list.append(sid[0])
items,order_type = [self.ID_TABLE], "asc"
self.empty_fields()
self.DATA_LIST = []
temp_data_list = self.DB_MANAGER.query_sort(id_list, items, order_type, self.MAPPER_TABLE_CLASS, self.ID_TABLE)
for us in temp_data_list:
self.DATA_LIST.append(us)
self.fill_fields()
self.label_status.setText(self.STATUS["usa"])
if type(self.REC_CORR) == "<type 'str'>":
corr = 0
else:
corr = self.REC_CORR
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
"""
#buttons functions
def on_pushButton_sort_pressed(self):
dlg = SortPanelMain(self)
dlg.insertItems(self.SORT_ITEMS)
dlg.exec_()
items,order_type = dlg.ITEMS, dlg.TYPE_ORDER
self.SORT_ITEMS_CONVERTED = []
for i in items:
self.SORT_ITEMS_CONVERTED.append(self.CONVERSION_DICT[unicode(i)])
self.SORT_MODE = order_type
self.empty_fields()
id_list = []
for i in self.DATA_LIST:
id_list.append(eval("i." + self.ID_TABLE))
self.DATA_LIST = []
temp_data_list = self.DB_MANAGER.query_sort(id_list, self.SORT_ITEMS_CONVERTED, self.SORT_MODE, self.MAPPER_TABLE_CLASS, self.ID_TABLE)
for i in temp_data_list:
self.DATA_LIST.append(i)
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
if type(self.REC_CORR) == "<type 'str'>":
corr = 0
else:
corr = self.REC_CORR
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.SORT_STATUS = "o"
self.label_sort.setText(self.SORTED_ITEMS[self.SORT_STATUS])
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.fill_fields()
def on_toolButtonGis_toggled(self):
if self.toolButtonGis.isChecked() == True:
QMessageBox.warning(self, "Messaggio", "Modalita' GIS attiva. Da ora le tue ricerche verranno visualizzate sul GIS", QMessageBox.Ok)
else:
QMessageBox.warning(self, "Messaggio", "Modalita' GIS disattivata. Da ora le tue ricerche non verranno piu' visualizzate sul GIS", QMessageBox.Ok)
def on_toolButtonPreview_toggled(self):
if self.toolButtonPreview.isChecked() == True:
QMessageBox.warning(self, "Messaggio", "Modalita' Preview US attivata. Le piante delle US saranno visualizzate nella sezione Piante", QMessageBox.Ok)
self.loadMapPreview()
else:
self.loadMapPreview(1)
"""
def on_pushButton_addRaster_pressed(self):
if self.toolButtonGis.isChecked() == True:
self.pyQGIS.addRasterLayer()
"""
def on_pushButton_new_rec_pressed(self):
#set the GUI for a new record
if self.BROWSE_STATUS != "n":
self.BROWSE_STATUS = "n"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.empty_fields()
self.label_sort.setText(self.SORTED_ITEMS["n"])
self.setComboBoxEditable(["self.comboBox_sito"],0)
self.setComboBoxEnable(["self.comboBox_sito"],"True")
self.setComboBoxEnable(["self.lineEdit_area"],"True")
self.setComboBoxEnable(["self.lineEdit_us"],"True")
self.setComboBoxEnable(["self.lineEdit_individuo"],"True")
self.set_rec_counter('', '')
self.enable_button(0)
def on_pushButton_save_pressed(self):
#save record
if self.BROWSE_STATUS == "b":
if self.records_equal_check() == 1:
self.update_if(QMessageBox.warning(self,'ATTENZIONE',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
self.label_sort.setText(self.SORTED_ITEMS["n"])
self.enable_button(1)
else:
QMessageBox.warning(self, "ATTENZIONE", "Non è stata realizzata alcuna modifica.", QMessageBox.Ok)
else:
if self.data_error_check() == 0:
test_insert = self.insert_new_rec()
if test_insert == 1:
self.empty_fields()
self.label_sort.setText(self.SORTED_ITEMS["n"])
self.charge_list()
self.charge_records()
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), len(self.DATA_LIST)-1
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
self.fill_fields(self.REC_CORR)
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
self.setComboBoxEditable(["self.comboBox_sito"],1)
self.setComboBoxEnable(["self.comboBox_sito"],"False")
self.setComboBoxEnable(["self.lineEdit_area"],"False")
self.setComboBoxEnable(["self.lineEdit_us"],"False")
self.setComboBoxEnable(["self.lineEdit_individuo"],"False")
self.enable_button(1)
else:
pass
def data_error_check(self):
test = 0
#EC = Error_check()
#somes check here
return test
def insert_new_rec(self):
if self.comboBox_eta_min.currentText() == "":
eta_min = None
else:
eta_min = int(self.comboBox_eta_min.currentText())
if self.comboBox_eta_max.currentText() == "":
eta_max = None
else:
eta_max = int(self.comboBox_eta_max.currentText())
if self.comboBox_classi_eta.currentText() == "":
classi_eta = ''
else:
classi_eta = str(self.comboBox_classi_eta.currentText())
try:
data = self.DB_MANAGER.insert_values_ind(
self.DB_MANAGER.max_num_id(self.MAPPER_TABLE_CLASS, self.ID_TABLE)+1,
str(self.comboBox_sito.currentText()), #1 - Sito
str(self.lineEdit_area.text()), #2 - area
int(self.lineEdit_us.text()), #3 - US
int(self.lineEdit_individuo.text()), #4 - individuo
str(self.lineEdit_data_schedatura.text()), #5 - data schedatura
str(self.lineEdit_schedatore.text()), #6 - schedatore
str(self.comboBox_sesso.currentText()), #7 - sesso
eta_min, #8 - eta' min
eta_max, #9 - eta' max
classi_eta, #10 - classi eta
str(self.textEdit_osservazioni.toPlainText()) #11 - osservazioni
)
try:
self.DB_MANAGER.insert_data_session(data)
return 1
except Exception, e:
e_str = str(e)
if e_str.__contains__("Integrity"):
msg = self.ID_TABLE + " gia' presente nel database"
else:
msg = e
QMessageBox.warning(self, "Errore", "Attenzione 1 ! \n"+ str(msg), QMessageBox.Ok)
return 0
except Exception, e:
QMessageBox.warning(self, "Errore", "Attenzione 2 ! \n"+str(e), QMessageBox.Ok)
return 0
#insert new row into tableWidget
def on_pushButton_insert_row_rapporti_pressed(self):
self.insert_new_row('self.tableWidget_rapporti')
def on_pushButton_insert_row_inclusi_pressed(self):
self.insert_new_row('self.tableWidget_inclusi')
def on_pushButton_insert_row_campioni_pressed(self):
self.insert_new_row('self.tableWidget_campioni')
def on_pushButton_view_all_pressed(self):
self.empty_fields()
self.charge_records()
self.fill_fields()
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
if type(self.REC_CORR) == "<type 'str'>":
corr = 0
else:
corr = self.REC_CORR
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.label_sort.setText(self.SORTED_ITEMS["n"])
#records surf functions
def on_pushButton_first_rec_pressed(self):
if self.records_equal_check() == 1:
self.update_if(QMessageBox.warning(self,'Errore',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
try:
self.empty_fields()
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.fill_fields(0)
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
except Exception, e:
QMessageBox.warning(self, "Errore", str(e), QMessageBox.Ok)
def on_pushButton_last_rec_pressed(self):
if self.records_equal_check() == 1:
self.update_if(QMessageBox.warning(self,'Errore',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
try:
self.empty_fields()
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), len(self.DATA_LIST)-1
self.fill_fields(self.REC_CORR)
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
except Exception, e:
QMessageBox.warning(self, "Errore", str(e), QMessageBox.Ok)
def on_pushButton_prev_rec_pressed(self):
if self.records_equal_check() == 1:
self.update_if(QMessageBox.warning(self,'Errore',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
self.REC_CORR = self.REC_CORR-1
if self.REC_CORR == -1:
self.REC_CORR = 0
QMessageBox.warning(self, "Errore", "Sei al primo record!", QMessageBox.Ok)
else:
try:
self.empty_fields()
self.fill_fields(self.REC_CORR)
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
except Exception, e:
QMessageBox.warning(self, "Errore", str(e), QMessageBox.Ok)
def on_pushButton_next_rec_pressed(self):
if self.records_equal_check() == 1:
self.update_if(QMessageBox.warning(self,'Errore',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
self.REC_CORR = self.REC_CORR+1
if self.REC_CORR >= self.REC_TOT:
self.REC_CORR = self.REC_CORR-1
QMessageBox.warning(self, "Errore", "Sei all'ultimo record!", QMessageBox.Ok)
else:
try:
self.empty_fields()
self.fill_fields(self.REC_CORR)
self.set_rec_counter(self.REC_TOT, self.REC_CORR+1)
except Exception, e:
QMessageBox.warning(self, "Errore", str(e), QMessageBox.Ok)
def on_pushButton_delete_pressed(self):
msg = QMessageBox.warning(self,"Attenzione!!!","Vuoi veramente eliminare il record? \n L'azione e' irreversibile", QMessageBox.Cancel,1)
if msg != 1:
QMessageBox.warning(self,"Messagio!!!","Azione Annullata!")
else:
try:
id_to_delete = eval("self.DATA_LIST[self.REC_CORR]." + self.ID_TABLE)
self.DB_MANAGER.delete_one_record(self.TABLE_NAME, self.ID_TABLE, id_to_delete)
self.charge_records() #charge records from DB
QMessageBox.warning(self,"Messaggio!!!","Record eliminato!")
self.charge_list()
except Exception, e:
QMessageBox.warning(self, "Attenzione", "Il database e' vuoto!" + str(e), QMessageBox.Ok)
if bool(self.DATA_LIST) == False:
self.DATA_LIST = []
self.DATA_LIST_REC_CORR = []
self.DATA_LIST_REC_TEMP = []
self.REC_CORR = 0
self.REC_TOT = 0
self.empty_fields()
self.set_rec_counter(0, 0)
#check if DB is empty
if bool(self.DATA_LIST) == True:
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.fill_fields()
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.label_sort.setText(self.SORTED_ITEMS["n"])
def on_pushButton_new_search_pressed(self):
if self.records_equal_check() == 1 and self.BROWSE_STATUS == "b":
msg = self.update_if(QMessageBox.warning(self,'Errore',"Il record e' stato modificato. Vuoi salvare le modifiche?", QMessageBox.Cancel,1))
#else:
self.enable_button_search(0)
#set the GUI for a new search
if self.BROWSE_STATUS != "f":
self.BROWSE_STATUS = "f"
###
self.setComboBoxEditable(["self.comboBox_sito"],1)
self.setComboBoxEnable(["self.comboBox_sito"],"True")
self.setComboBoxEnable(["self.lineEdit_area"],"True")
self.setComboBoxEnable(["self.lineEdit_us"],"True")
self.setComboBoxEnable(["self.lineEdit_individuo"],"True")
###
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.set_rec_counter('','')
self.label_sort.setText(self.SORTED_ITEMS["n"])
self.charge_list()
self.empty_fields()
def on_pushButton_search_go_pressed(self):
if self.BROWSE_STATUS != "f":
QMessageBox.warning(self, "ATTENZIONE", "Per eseguire una nuova ricerca clicca sul pulsante 'new search' ", QMessageBox.Ok)
else:
if self.lineEdit_us.text() != "":
us = int(self.lineEdit_us.text())
else:
us = ""
if self.lineEdit_individuo.text() != "":
individuo = int(self.lineEdit_individuo.text())
else:
individuo = ""
if self.comboBox_eta_min.currentText() != "":
eta_min = int(self.comboBox_eta_min.currentText())
else:
eta_min = ""
if self.comboBox_eta_max.currentText() != "":
eta_max = int(self.comboBox_eta_max.currentText())
else:
eta_max = ""
search_dict = {
self.TABLE_FIELDS[0] : "'" + str(self.comboBox_sito.currentText())+"'", #1 - Sito
self.TABLE_FIELDS[1] : "'" + str(self.lineEdit_area.text()) + "'", #2 - Area
self.TABLE_FIELDS[2] : us, #3 - US
self.TABLE_FIELDS[3] : individuo, #4 - individuo
self.TABLE_FIELDS[4] : "'" + str(self.lineEdit_data_schedatura.text()) + "'", #5 - data schedatura
self.TABLE_FIELDS[5] : "'" + str(self.lineEdit_schedatore.text())+"'", #6 - schedatore
self.TABLE_FIELDS[6] : "'" + str(self.comboBox_sesso.currentText())+"'", #7 - sesso
self.TABLE_FIELDS[7] : eta_min, #8 - eta min
self.TABLE_FIELDS[8] : eta_max, #9 - eta max
self.TABLE_FIELDS[9] : "'" + str(self.comboBox_classi_eta.currentText())+"'", #10 - classi eta
self.TABLE_FIELDS[10] : str(self.textEdit_osservazioni.toPlainText()) #11 - osservazioni
}
u = Utility()
search_dict = u.remove_empty_items_fr_dict(search_dict)
if bool(search_dict) == False:
QMessageBox.warning(self, "ATTENZIONE", "Non e' stata impostata alcuna ricerca!!!", QMessageBox.Ok)
else:
res = self.DB_MANAGER.query_bool(search_dict, self.MAPPER_TABLE_CLASS)
if bool(res) == False:
QMessageBox.warning(self, "ATTENZIONE", "Non e' stato trovato alcun record!", QMessageBox.Ok)
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.fill_fields(self.REC_CORR)
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.setComboBoxEnable(["self.comboBox_sito"],"False")
self.setComboBoxEnable(["self.lineEdit_area"],"False")
self.setComboBoxEnable(["self.lineEdit_us"],"False")
self.setComboBoxEnable(["self.lineEdit_individuo"],"False")
else:
self.DATA_LIST = []
for i in res:
self.DATA_LIST.append(i)
self.REC_TOT, self.REC_CORR = len(self.DATA_LIST), 0
self.DATA_LIST_REC_TEMP = self.DATA_LIST_REC_CORR = self.DATA_LIST[0]
self.fill_fields()
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
self.set_rec_counter(len(self.DATA_LIST), self.REC_CORR+1)
if self.REC_TOT == 1:
strings = ("E' stato trovato", self.REC_TOT, "record")
if self.toolButtonGis.isChecked() == True:
id_us_list = self.charge_id_us_for_individuo()
self.pyQGIS.charge_individui_us(id_us_list)
else:
strings = ("Sono stati trovati", self.REC_TOT, "records")
if self.toolButtonGis.isChecked() == True:
id_us_list = self.charge_id_us_for_individuo()
self.pyQGIS.charge_individui_us(id_us_list)
self.setComboBoxEnable(["self.comboBox_sito"],"False")
self.setComboBoxEnable(["self.lineEdit_area"],"False")
self.setComboBoxEnable(["self.lineEdit_us"],"False")
self.setComboBoxEnable(["self.lineEdit_individuo"],"False")
QMessageBox.warning(self, "Messaggio", "%s %d %s" % strings, QMessageBox.Ok)
self.enable_button_search(1)
def update_if(self, msg):
rec_corr = self.REC_CORR
self.msg = msg
if self.msg == 1:
test = self.update_record()
if test == 1:
id_list = []
for i in self.DATA_LIST:
id_list.append(eval("i."+ self.ID_TABLE))
self.DATA_LIST = []
if self.SORT_STATUS == "n":
temp_data_list = self.DB_MANAGER.query_sort(id_list, [self.ID_TABLE], 'asc', self.MAPPER_TABLE_CLASS, self.ID_TABLE) #self.DB_MANAGER.query_bool(self.SEARCH_DICT_TEMP, self.MAPPER_TABLE_CLASS) #
else:
temp_data_list = self.DB_MANAGER.query_sort(id_list, self.SORT_ITEMS_CONVERTED, self.SORT_MODE, self.MAPPER_TABLE_CLASS, self.ID_TABLE)
for i in temp_data_list:
self.DATA_LIST.append(i)
self.BROWSE_STATUS = "b"
self.label_status.setText(self.STATUS_ITEMS[self.BROWSE_STATUS])
if type(self.REC_CORR) == "<type 'str'>":
corr = 0
else:
corr = self.REC_CORR
return 1
elif test == 0:
return 0
#custom functions
def charge_records(self):
self.DATA_LIST = []
id_list = []
for i in self.DB_MANAGER.query(eval(self.MAPPER_TABLE_CLASS)):
id_list.append(eval("i."+ self.ID_TABLE))
temp_data_list = self.DB_MANAGER.query_sort(id_list, [self.ID_TABLE], 'asc', self.MAPPER_TABLE_CLASS, self.ID_TABLE)
for i in temp_data_list:
self.DATA_LIST.append(i)
def datestrfdate(self):
now = date.today()
today = now.strftime("%d-%m-%Y")
return today
def table2dict(self, n):
self.tablename = n
row = eval(self.tablename+".rowCount()")
col = eval(self.tablename+".columnCount()")
lista=[]
for r in range(row):
sub_list = []
for c in range(col):
value = eval(self.tablename+".item(r,c)")
if value != None:
sub_list.append(str(value.text()))
if bool(sub_list) == True:
lista.append(sub_list)
return lista
def tableInsertData(self, t, d):
pass
"""
self.table_name = t
self.data_list = eval(d)
self.data_list.sort()
#column table count
table_col_count_cmd = ("%s.columnCount()") % (self.table_name)
table_col_count = eval(table_col_count_cmd)
#clear table
table_clear_cmd = ("%s.clearContents()") % (self.table_name)
eval(table_clear_cmd)
for i in range(table_col_count):
table_rem_row_cmd = ("%s.removeRow(%d)") % (self.table_name, i)
eval(table_rem_row_cmd)
#for i in range(len(self.data_list)):
#self.insert_new_row(self.table_name)
for row in range(len(self.data_list)):
cmd = ('%s.insertRow(%s)') % (self.table_name, row)
eval(cmd)
for col in range(len(self.data_list[row])):
#item = self.comboBox_sito.setEditText(self.data_list[0][col]
item = QTableWidgetItem(self.data_list[row][col])
exec_str = ('%s.setItem(%d,%d,item)') % (self.table_name,row,col)
eval(exec_str)
"""
def insert_new_row(self, table_name):
"""insert new row into a table based on table_name"""
cmd = table_name+".insertRow(0)"
eval(cmd)
def empty_fields(self):
#rapporti_row_count = self.tableWidget_rapporti.rowCount()
#campioni_row_count = self.tableWidget_campioni.rowCount()
#inclusi_row_count = self.tableWidget_inclusi.rowCount()
self.comboBox_sito.setEditText("") #1 - Sito
self.lineEdit_area.clear() #2 - area
self.lineEdit_us.clear() #3 - US
self.lineEdit_data_schedatura.clear() #4 - data schedatura
self.lineEdit_schedatore.clear() #5 - schedatore
self.lineEdit_individuo.clear() #6 - individuo
self.comboBox_sesso.setEditText("") #7 - sesso
self.comboBox_eta_min.setEditText("") #8 - eta' minima
self.comboBox_eta_max.setEditText("") #9 - eta' massima
self.comboBox_classi_eta.setEditText("") #10 - classi di eta'
self.textEdit_osservazioni.clear() #11 - osservazioni
def fill_fields(self, n=0):
self.rec_num = n
try:
self.comboBox_sito.setEditText(str(self.DATA_LIST[self.rec_num].sito)) #1 - Sito
self.lineEdit_area.setText(str(self.DATA_LIST[self.rec_num].area)) #2 - area
self.lineEdit_us.setText(str(self.DATA_LIST[self.rec_num].us)) #3 - us
self.lineEdit_individuo.setText(str(self.DATA_LIST[self.rec_num].nr_individuo)) #4 - nr individuo
self.lineEdit_data_schedatura.setText(str(self.DATA_LIST[self.rec_num].data_schedatura)) #5 - data schedatura
self.lineEdit_schedatore.setText(str(self.DATA_LIST[self.rec_num].schedatore)) #6 - schedatore
self.comboBox_sesso.setEditText(str(self.DATA_LIST[self.rec_num].sesso)) #7 - sesso
if self.DATA_LIST[self.rec_num].eta_min == None: #8 - eta minima
self.comboBox_eta_min.setEditText("")
else:
self.comboBox_eta_min.setEditText(str(self.DATA_LIST[self.rec_num].eta_min))
if self.DATA_LIST[self.rec_num].eta_max == None: #9 - eta massima
self.comboBox_eta_max.setEditText("")
else:
self.comboBox_eta_max.setEditText(str(self.DATA_LIST[self.rec_num].eta_max))
self.comboBox_classi_eta.setEditText(str(self.DATA_LIST[self.rec_num].classi_eta)) #10 - classi di eta
unicode(self.textEdit_osservazioni.setText(self.DATA_LIST[self.rec_num].osservazioni)) #11 - osservazioni
if self.toolButtonPreview.isChecked() == True:
self.loadMapPreview()
except Exception, e:
QMessageBox.warning(self, "Errore", str(e), QMessageBox.Ok)
def set_rec_counter(self, t, c):
self.rec_tot = t
self.rec_corr = c
self.label_rec_tot.setText(str(self.rec_tot))
self.label_rec_corrente.setText(str(self.rec_corr))
def set_LIST_REC_TEMP(self):
if self.comboBox_eta_min.currentText() == "":
eta_min = None
else:
eta_min = self.comboBox_eta_min.currentText()
if self.comboBox_eta_max.currentText() == "":
eta_max = None
else:
eta_max = self.comboBox_eta_max.currentText()
#data
self.DATA_LIST_REC_TEMP = [
str(self.comboBox_sito.currentText()), #1 - Sito
str(self.lineEdit_area.text()), #2 - Area
str(self.lineEdit_us.text()), #3 - US
str(self.lineEdit_individuo.text()), #4 - individuo
str(self.lineEdit_data_schedatura.text()), #5 - data schedatura
str(self.lineEdit_schedatore.text()), #6 - schedatore
str(self.comboBox_sesso.currentText()), #7 - sesso
str(eta_min), #8- eta minima
str(eta_max), #9 - eta massima
str(self.comboBox_classi_eta.currentText()), #10 - classi eta
str(self.textEdit_osservazioni.toPlainText().toLatin1())] #11 - osservazioni
def set_LIST_REC_CORR(self):
self.DATA_LIST_REC_CORR = []
for i in self.TABLE_FIELDS:
self.DATA_LIST_REC_CORR.append(eval("str(self.DATA_LIST[self.REC_CORR]." + i + ")"))
def records_equal_check(self):
self.set_LIST_REC_TEMP()
self.set_LIST_REC_CORR()
if self.DATA_LIST_REC_CORR == self.DATA_LIST_REC_TEMP:
return 0
else:
return 1
def setComboBoxEditable(self, f, n):
field_names = f
value = n
for fn in field_names:
cmd = ('%s%s%d%s') % (fn, '.setEditable(', n, ')')
eval(cmd)
def setComboBoxEnable(self, f, v):
field_names = f
value = v
for fn in field_names:
cmd = ('%s%s%s%s') % (fn, '.setEnabled(', v, ')')
eval(cmd)
def update_record(self):
try:
self.DB_MANAGER.update(self.MAPPER_TABLE_CLASS,
self.ID_TABLE,
[eval("int(self.DATA_LIST[self.REC_CORR]." + self.ID_TABLE+")")],
self.TABLE_FIELDS,
self.rec_toupdate())
return 1
except Exception, e:
QMessageBox.warning(self, "Messaggio", "Problema di encoding: sono stati inseriti accenti o caratteri non accettati dal database. Se chiudete ora la scheda senza correggere gli errori perderete i dati. Fare una copia di tutto su un foglio word a parte. Errore :" + str(e), QMessageBox.Ok)
return 0
def rec_toupdate(self):
rec_to_update = self.UTILITY.pos_none_in_list(self.DATA_LIST_REC_TEMP)
#f = open('/test_rec_to_update_ind.txt', 'w')
#f.write(str(rec_to_update))
#f.close()
return rec_to_update
def charge_id_us_for_individuo(self):
data_list_us = []
for rec in range(len(self.DATA_LIST)):
sito = "'"+str(self.DATA_LIST[rec].sito)+"'"
area = "'"+str(self.DATA_LIST[rec].area)+"'"
us = int(self.DATA_LIST[rec].us)
serch_dict_us = {'sito': sito, 'area': area, 'us': us}
us_ind = self.DB_MANAGER.query_bool(serch_dict_us, "US")
data_list_us.append(us_ind)
data_list_id_us = []
for us in range(len(data_list_us)):
data_list_id_us.append(data_list_us[us][0].id_us)
return data_list_id_us
def testing(self, name_file, message):
f = open(str(name_file), 'w')
f.write(str(message))
f.close()
## Class end
|
[
"pyarchinit@gmail.com"
] |
pyarchinit@gmail.com
|
d1110e2cb826366becbe3ad092ac71f92eaa8b9b
|
bb3c7f1af8520ec1ba117353819af1066d098b35
|
/Backend/ShipEzy/settings.py
|
eead43054d37953888167f2c0e1321fc750a47f1
|
[] |
no_license
|
RitikaSingh02/ShipEzyy
|
fc36cab264848d61090d4a0824cde7f453b8fcc6
|
a00e86afd39efcfe99a2e770cbde5d15ff8fdae0
|
refs/heads/master
| 2023-07-11T21:50:58.788964
| 2021-07-30T15:56:05
| 2021-07-30T15:56:05
| 394,031,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
import os
from dotenv import load_dotenv
load_dotenv('./.env')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('secret_key')
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Main',
'Admin',
'Customer',
'Driver',
'Rides',
'Vehicles'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ShipEzy.urls'
DATABASES = {
'default': {
'ENGINE': 'djongo',
'NAME': 'ShipEzy',
'CLIENT': {
'host':os.environ.get('host'),
'port': int(os.environ.get('port')),
'username': os.environ.get('username'),
'password': os.environ.get('password'),
'authSource': os.environ.get('authSource'),
'authMechanism': 'SCRAM-SHA-1'
},
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ShipEzy.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
SESSION_COOKIE_SECURE = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"ritika2002singh@gmail.com"
] |
ritika2002singh@gmail.com
|
9e0d5240635da3a56ff4da7586e70ff3ae30616e
|
84abce44bd0278fa99e9556168290675f399834c
|
/EcalAlCaRecoProducers/config/reRecoTags/Cal_Nov2016_ped_v1.py
|
d5be47190da438134c7f8e99b01ad280f2f89499
|
[] |
no_license
|
ECALELFS/ECALELF
|
7c304c6b544b0f22a4b62cf942f47fa8b58abef0
|
62a046cdf59badfcb6281a72923a0f38fd55e183
|
refs/heads/master
| 2021-01-23T13:36:31.574985
| 2017-06-22T12:26:28
| 2017-06-22T12:26:28
| 10,385,620
| 1
| 9
| null | 2017-06-30T12:59:05
| 2013-05-30T15:18:55
|
C++
|
UTF-8
|
Python
| false
| false
| 725
|
py
|
import FWCore.ParameterSet.Config as cms
# Official GT for september re-reco
# pedestal tags produced by Jean for pedestal studies:
# 6a9a2818932fce79d8222768ba4f2ad3f60f894c payload is used (first Bon pedestal run of 2016, Apr)
from CondCore.ESSources.CondDBESSource_cfi import *
#CondDBConnection.connect = cms.string( 'frontier://FrontierProd/CMS_CONDITIONS' )
RerecoGlobalTag = GlobalTag.clone(
globaltag = cms.string('80X_dataRun2_2016SeptRepro_v4'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("EcalPedestalsRcd"),
tag = cms.string("EcalPedestals_laser_2016"),
connect = cms.string("frontier://FrontierPrep/CMS_CONDITIONS"),
),
),
)
|
[
"shervin.nourbakhsh@cern.ch"
] |
shervin.nourbakhsh@cern.ch
|
f7a2fb5e4a3b14443a444dcc6a0e365dd6f4e98b
|
4076cbced07fa1d62f8f947471899bdb19ce2624
|
/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py
|
78f2a82d5d2af91b925b39ea33cf7d93ddde0746
|
[
"Apache-2.0"
] |
permissive
|
andresriancho/moto
|
7afbc8b543342d9774bb865cdaeccbefa5adcb87
|
c65ff170b2f483b4ff630d3256959751c7afe3a5
|
refs/heads/master
| 2020-12-26T04:55:27.092585
| 2014-08-20T17:52:23
| 2014-08-20T17:52:23
| 15,454,934
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,770
|
py
|
template = {
"Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.",
"Parameters": {
"SSHLocation": {
"ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.",
"Description": " The IP address range that can be used to SSH to the EC2 instances",
"Default": "0.0.0.0/0",
"MinLength": "9",
"AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
"MaxLength": "18",
"Type": "String"
},
"KeyName": {
"Type": "String",
"Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance",
"MinLength": "1",
"AllowedPattern": "[\\x20-\\x7E]*",
"MaxLength": "255",
"ConstraintDescription": "can contain only ASCII characters."
},
"InstanceType": {
"Default": "m1.small",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Type": "String",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.xlarge",
"m3.2xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
}
},
"AWSTemplateFormatVersion": "2010-09-09",
"Outputs": {
"URL": {
"Description": "Newly created application URL",
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WebServerInstance",
"PublicIp"
]
}
]
]
}
}
},
"Resources": {
"Subnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"CidrBlock": "10.0.0.0/24",
"Tags": [
{
"Value": {
"Ref": "AWS::StackId"
},
"Key": "Application"
}
]
}
},
"WebServerWaitHandle": {
"Type": "AWS::CloudFormation::WaitConditionHandle"
},
"Route": {
"Type": "AWS::EC2::Route",
"Properties": {
"GatewayId": {
"Ref": "InternetGateway"
},
"DestinationCidrBlock": "0.0.0.0/0",
"RouteTableId": {
"Ref": "RouteTable"
}
},
"DependsOn": "AttachGateway"
},
"SubnetRouteTableAssociation": {
"Type": "AWS::EC2::SubnetRouteTableAssociation",
"Properties": {
"SubnetId": {
"Ref": "Subnet"
},
"RouteTableId": {
"Ref": "RouteTable"
}
}
},
"InternetGateway": {
"Type": "AWS::EC2::InternetGateway",
"Properties": {
"Tags": [
{
"Value": {
"Ref": "AWS::StackId"
},
"Key": "Application"
}
]
}
},
"RouteTable": {
"Type": "AWS::EC2::RouteTable",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"Tags": [
{
"Value": {
"Ref": "AWS::StackId"
},
"Key": "Application"
}
]
}
},
"WebServerWaitCondition": {
"Type": "AWS::CloudFormation::WaitCondition",
"Properties": {
"Handle": {
"Ref": "WebServerWaitHandle"
},
"Timeout": "300"
},
"DependsOn": "WebServerInstance"
},
"VPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "10.0.0.0/16",
"Tags": [
{
"Value": {
"Ref": "AWS::StackId"
},
"Key": "Application"
}
]
}
},
"InstanceSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"SecurityGroupIngress": [
{
"ToPort": "22",
"IpProtocol": "tcp",
"CidrIp": {
"Ref": "SSHLocation"
},
"FromPort": "22"
},
{
"ToPort": "80",
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0",
"FromPort": "80"
}
],
"VpcId": {
"Ref": "VPC"
},
"GroupDescription": "Enable SSH access via port 22"
}
},
"WebServerInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash\n",
"yum update -y aws-cfn-bootstrap\n",
"# Helper function\n",
"function error_exit\n",
"{\n",
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
{
"Ref": "WebServerWaitHandle"
},
"'\n",
" exit 1\n",
"}\n",
"# Install the simple web page\n",
"/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r WebServerInstance ",
" --region ",
{
"Ref": "AWS::Region"
},
" || error_exit 'Failed to run cfn-init'\n",
"# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n",
"/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n",
"# All done so signal success\n",
"/opt/aws/bin/cfn-signal -e 0 -r \"WebServer setup complete\" '",
{
"Ref": "WebServerWaitHandle"
},
"'\n"
]
]
}
},
"Tags": [
{
"Value": {
"Ref": "AWS::StackId"
},
"Key": "Application"
}
],
"SecurityGroupIds": [
{
"Ref": "InstanceSecurityGroup"
}
],
"KeyName": {
"Ref": "KeyName"
},
"SubnetId": {
"Ref": "Subnet"
},
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"InstanceType": {
"Ref": "InstanceType"
}
},
"Metadata": {
"Comment": "Install a simple PHP application",
"AWS::CloudFormation::Init": {
"config": {
"files": {
"/etc/cfn/cfn-hup.conf": {
"content": {
"Fn::Join": [
"",
[
"[main]\n",
"stack=",
{
"Ref": "AWS::StackId"
},
"\n",
"region=",
{
"Ref": "AWS::Region"
},
"\n"
]
]
},
"owner": "root",
"group": "root",
"mode": "000400"
},
"/etc/cfn/hooks.d/cfn-auto-reloader.conf": {
"content": {
"Fn::Join": [
"",
[
"[cfn-auto-reloader-hook]\n",
"triggers=post.update\n",
"path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n",
"action=/opt/aws/bin/cfn-init -s ",
{
"Ref": "AWS::StackId"
},
" -r WebServerInstance ",
" --region ",
{
"Ref": "AWS::Region"
},
"\n",
"runas=root\n"
]
]
}
},
"/var/www/html/index.php": {
"content": {
"Fn::Join": [
"",
[
"<?php\n",
"echo '<h1>AWS CloudFormation sample PHP application</h1>';\n",
"?>\n"
]
]
},
"owner": "apache",
"group": "apache",
"mode": "000644"
}
},
"services": {
"sysvinit": {
"httpd": {
"ensureRunning": "true",
"enabled": "true"
},
"sendmail": {
"ensureRunning": "false",
"enabled": "false"
}
}
},
"packages": {
"yum": {
"httpd": [],
"php": []
}
}
}
}
}
},
"IPAddress": {
"Type": "AWS::EC2::EIP",
"Properties": {
"InstanceId": {
"Ref": "WebServerInstance"
},
"Domain": "vpc"
},
"DependsOn": "AttachGateway"
},
"AttachGateway": {
"Type": "AWS::EC2::VPCGatewayAttachment",
"Properties": {
"VpcId": {
"Ref": "VPC"
},
"InternetGatewayId": {
"Ref": "InternetGateway"
}
}
}
},
"Mappings": {
"RegionMap": {
"ap-southeast-1": {
"AMI": "ami-74dda626"
},
"ap-southeast-2": {
"AMI": "ami-b3990e89"
},
"us-west-2": {
"AMI": "ami-16fd7026"
},
"us-east-1": {
"AMI": "ami-7f418316"
},
"ap-northeast-1": {
"AMI": "ami-dcfa4edd"
},
"us-west-1": {
"AMI": "ami-951945d0"
},
"eu-west-1": {
"AMI": "ami-24506250"
},
"sa-east-1": {
"AMI": "ami-3e3be423"
}
}
}
}
|
[
"spulec@gmail.com"
] |
spulec@gmail.com
|
1f802b207229f09d76b481f39de935bcd569ef13
|
6db8aba817161dc573f16cde185f4a1c02c753e0
|
/KadaneAlgo.py
|
b019a9517f32915d97e6d71d177e4028b4821033
|
[] |
no_license
|
Prakashchater/Leetcode-array-easy-questions
|
456153a13397c895acae6550dad8f1b1851ff854
|
7c5d40f9d68dbf61f4a61a33d9b54f769473b057
|
refs/heads/main
| 2023-06-19T14:01:52.483440
| 2021-07-22T19:44:40
| 2021-07-22T19:44:40
| 354,926,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
"""
# Time: O(N) Space: O(1)
def kadaneAlgo(arr):
maxsum = 0
currsum = 0
for i in range(len(arr)):
currsum = currsum + arr[i]
if currsum > maxsum:
maxsum = currsum
if currsum < 0:
currsum = 0
return maxsum
if __name__ == '__main__':
arr = [5, -4, -2, 6, -1]
print(kadaneAlgo(arr))
"""
# Time : O(N) Space: O(1)
def kadaneAlgorithm(arr):
maxEnding = arr[0]
maxSoFar = arr[0]
for num in arr[1:]:
maxEnding = max(num, maxEnding + num)
maxSoFar = max(maxSoFar, maxEnding)
return maxSoFar
if __name__ == '__main__':
# arr = [3, 5, -9, 1, 3, -2, 3, 4, 7, 2, -9, 6, 3, 1, -5, 4]
arr = [5, -4, -2, 6, -1]
print(kadaneAlgorithm(arr))
|
[
"prakashchater@gmail.com"
] |
prakashchater@gmail.com
|
1331240bbd0b83d5db30d30dc807f04821e2ffaa
|
927b50cdaf1c384c8bbf6f13816d0ba465852fd8
|
/main/admin.py
|
28531a2a583252a8277b2b1e07b6cbe1b50faae0
|
[
"MIT"
] |
permissive
|
jhabarsingh/DOCMED
|
f37d336483cffd874b0a7db43677c08a47bd639c
|
8a831886d3dd415020699491687fb73893e674c5
|
refs/heads/main
| 2023-04-26T06:45:10.409633
| 2021-05-19T14:37:53
| 2021-05-19T14:37:53
| 316,683,855
| 3
| 5
|
MIT
| 2021-02-21T13:32:33
| 2020-11-28T07:51:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
from django.contrib import admin
from main.models import UserCategory, Patient, Doctor, Appointment, Prescription, Contact
# Register your models here.
admin.site.register(UserCategory)
admin.site.register(Patient)
admin.site.register(Contact)
admin.site.register(Doctor)
admin.site.register(Appointment)
admin.site.register(Prescription)
|
[
"jhabarsinghbhati23@gmail.com"
] |
jhabarsinghbhati23@gmail.com
|
f485063b77515a897b7551e37b28a77deb3059e6
|
700577285824a21df647aba584d51420db59c598
|
/OpenColibri/allauth/socialaccount/providers/oauth2/views.py
|
669a16b17622bb341dac4351bf60494a8bf28ae9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
epu-ntua/opencolibri
|
2c05acc43ef1b1c86608f6e729a4f83773b01b73
|
78e2411f78a0213b3961145cfe67cd52398cea70
|
refs/heads/master
| 2016-09-11T02:39:43.798777
| 2014-04-06T11:30:39
| 2014-04-06T11:30:39
| 15,764,540
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount import providers
from allauth.socialaccount.providers.oauth2.client import (OAuth2Client,
OAuth2Error)
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.models import SocialToken, SocialLogin
class OAuth2Adapter(object):
def get_provider(self):
return providers.registry.by_id(self.provider_id)
def complete_login(self, request, app, access_token):
"""
Returns a SocialLogin instance
"""
raise NotImplementedError
class OAuth2View(object):
@classmethod
def adapter_view(cls, adapter):
def view(request, *args, **kwargs):
self = cls()
self.request = request
self.adapter = adapter()
return self.dispatch(request, *args, **kwargs)
return view
def get_client(self, request, app):
callback_url = reverse(self.adapter.provider_id + "_callback")
callback_url = request.build_absolute_uri(callback_url)
client = OAuth2Client(self.request, app.client_id, app.secret,
self.adapter.authorize_url,
self.adapter.access_token_url,
callback_url,
self.adapter.get_provider().get_scope())
return client
class OAuth2LoginView(OAuth2View):
def dispatch(self, request):
app = self.adapter.get_provider().get_app(self.request)
client = self.get_client(request, app)
client.state = SocialLogin.marshall_state(request)
try:
return HttpResponseRedirect(client.get_redirect_url())
except OAuth2Error:
return render_authentication_error(request)
class OAuth2CallbackView(OAuth2View):
def dispatch(self, request):
if 'error' in request.GET or not 'code' in request.GET:
# TODO: Distinguish cancel from error
return render_authentication_error(request)
app = self.adapter.get_provider().get_app(self.request)
client = self.get_client(request, app)
try:
access_token = client.get_access_token(request.GET['code'])
token = SocialToken(app=app,
token=access_token)
login = self.adapter.complete_login(request,
app,
token)
token.account = login.account
login.token = token
login.state = SocialLogin.unmarshall_state(request.REQUEST
.get('state'))
return complete_social_login(request, login)
except OAuth2Error:
return render_authentication_error(request)
|
[
"smouzakitis@epu.ntua.gr"
] |
smouzakitis@epu.ntua.gr
|
eca051c8d2c5c42eb02bcae3847065a964ac1b97
|
721c11ed6bc1bfd1898290baebea5cd065ab84d1
|
/train/jfda/detector.py
|
f7f25571a52fe73b3147ab2d87628a8ff80ddb94
|
[
"MIT"
] |
permissive
|
cnanyi/MTCNN
|
1cea91027bbcb750a04e9c324f9319daf74bbf74
|
33d171a93e25f65e3de4e39aa31b75585cd70be2
|
refs/heads/master
| 2023-02-08T22:39:35.000375
| 2021-01-03T09:29:00
| 2021-01-06T14:50:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,981
|
py
|
# pylint: disable=bad-indentation, no-member, invalid-name, line-too-long
import math
import cv2
import caffe
import numpy as np
from utils import crop_face, Timer
class JfdaDetector:
'''JfdaDetector
'''
def __init__(self, nets):
assert len(nets) in [2, 4, 6, 8], 'wrong number of nets'
self.pnet, self.rnet, self.onet, self.lnet = None, None, None, None
if len(nets) >= 2:
self.pnet = caffe.Net(nets[0], caffe.TEST, weights=nets[1])
if len(nets) >= 4:
self.rnet = caffe.Net(nets[2], caffe.TEST, weights=nets[3])
if len(nets) >= 6:
self.onet = caffe.Net(nets[4], caffe.TEST, weights=nets[5])
if len(nets) >= 8:
self.lnet = caffe.Net(nets[6], caffe.TEST, weights=nets[7])
self.pnet_single_forward = False
def set_pnet_single_forward(self, single_forward=True):
'''convert image pyramid to a single image and forward once
'''
self.pnet_single_forward = single_forward
def detect(self, img, ths, min_size, factor, debug=False):
'''detect face, return bboxes, [bbox score offset landmark]
if debug is on, return bboxes of every stage and time consumption
'''
timer = Timer()
ts = [0, 0, 0, 0]
bb = [[], [], [], []]
# stage-1
timer.tic()
base = 12. / min_size
height, width = img.shape[:-1]
l = min(width, height)
l *= base
scales = []
while l > 12:
scales.append(base)
base *= factor
l *= factor
if not self.pnet_single_forward or len(scales) <= 1:
bboxes = np.zeros((0, 4 + 1 + 4 + 10), dtype=np.float32)
for scale in scales:
w, h = int(math.ceil(scale * width)), int(math.ceil(scale * height))
data = cv2.resize(img, (w, h))
data = data.transpose((2, 0, 1)).astype(np.float32)
data = (data - 128) / 128
data = data.reshape((1, 3, h, w))
prob, bbox_pred, landmark_pred = self._forward(self.pnet, data, ['prob', 'bbox_pred', 'landmark_pred'])
_bboxes = self._gen_bbox(prob[0][1], bbox_pred[0], landmark_pred[0], scale, ths[0])
keep = nms(_bboxes, 0.5)
_bboxes = _bboxes[keep]
bboxes = np.vstack([bboxes, _bboxes])
else:
# convert to a single image
data, pyramid_info = convert_image_pyramid(img, scales, interval=2)
# forward pnet
data = data.astype(np.float32)
data = (data.transpose((2, 0, 1)) - 128) / 128
data = data[np.newaxis, :, :, :]
prob, bbox_pred, landmark_pred = self._forward(self.pnet, data, ['prob', 'bbox_pred', 'landmark_pred'])
bboxes = self._gen_bbox(prob[0][1], bbox_pred[0], landmark_pred[0], 1, ths[0])
# nms over every pyramid
keep = nms(bboxes, 0.5)
bboxes = bboxes[keep]
# map to original image
bboxes = get_original_bboxes(bboxes, pyramid_info)
keep = nms(bboxes, 0.7)
bboxes = bboxes[keep]
bboxes = self._bbox_reg(bboxes)
bboxes = self._make_square(bboxes)
timer.toc()
ts[0] = timer.elapsed()
bb[0] = bboxes.copy()
self._clear_network_buffer(self.pnet)
# stage-2
if self.rnet is None or len(bboxes) == 0:
if debug is True:
return bb, ts
else:
return bboxes
timer.tic()
n = len(bboxes)
data = np.zeros((n, 3, 24, 24), dtype=np.float32)
for i, bbox in enumerate(bboxes):
face = crop_face(img, bbox[:4])
data[i] = cv2.resize(face, (24, 24)).transpose((2, 0, 1))
data = (data - 128) / 128
prob, bbox_pred, landmark_pred = self._forward(self.rnet, data, ['prob', 'bbox_pred', 'landmark_pred'])
prob = prob.reshape(n, 2)
bbox_pred = bbox_pred.reshape(n, 4)
landmark_pred = landmark_pred.reshape(n, 10)
keep = prob[:, 1] > ths[1]
bboxes = bboxes[keep]
bboxes[:, 4] = prob[keep, 1]
bboxes[:, 5:9] = bbox_pred[keep]
bboxes[:, 9:] = landmark_pred[keep]
keep = nms(bboxes, 0.7)
bboxes = bboxes[keep]
bboxes = self._bbox_reg(bboxes)
bboxes = self._make_square(bboxes)
timer.toc()
ts[1] = timer.elapsed()
bb[1] = bboxes.copy()
self._clear_network_buffer(self.rnet)
# stage-3
if self.onet is None or len(bboxes) == 0:
if debug is True:
return bb, ts
else:
return bboxes
timer.tic()
n = len(bboxes)
data = np.zeros((n, 3, 48, 48), dtype=np.float32)
for i, bbox in enumerate(bboxes):
face = crop_face(img, bbox[:4])
data[i] = cv2.resize(face, (48, 48)).transpose((2, 0, 1))
data = (data - 128) / 128
prob, bbox_pred, landmark_pred = self._forward(self.onet, data, ['prob', 'bbox_pred', 'landmark_pred'])
prob = prob.reshape(n, 2)
bbox_pred = bbox_pred.reshape(n, 4)
landmark_pred = landmark_pred.reshape(n, 10)
keep = prob[:, 1] > ths[2]
bboxes = bboxes[keep]
bboxes[:, 4] = prob[keep, 1]
bboxes[:, 5:9] = bbox_pred[keep]
bboxes[:, 9:] = landmark_pred[keep]
bboxes = self._locate_landmark(bboxes)
bboxes = self._bbox_reg(bboxes)
keep = nms(bboxes, 0.7, 'Min')
bboxes = bboxes[keep]
timer.toc()
ts[2] = timer.elapsed()
bb[2] = bboxes.copy()
self._clear_network_buffer(self.onet)
# stage-4
if self.lnet is None or len(bboxes) == 0:
if debug is True:
return bb, ts
else:
return bboxes
timer.tic()
n = len(bboxes)
data = np.zeros((n, 15, 24, 24), dtype=np.float32)
w, h = bboxes[:, 2]-bboxes[:, 0], bboxes[:, 3]-bboxes[:, 1]
l = np.maximum(w, h) * 0.25
for i in range(len(bboxes)):
x1, y1, x2, y2 = bboxes[i, :4]
landmark = bboxes[i, 9:].reshape((5, 2))
for j in range(5):
x, y = landmark[j]
patch_bbox = [x-l[i]/2, y-l[i]/2, x+l[i]/2, y+l[i]/2]
patch = crop_face(img, patch_bbox)
patch = cv2.resize(patch, (24, 24))
patch = patch.transpose((2, 0, 1))
data[i, (3*j):(3*j+3)] = patch
data = (data - 128) / 128
offset = self._forward(self.lnet, data, ['landmark_offset'])[0]
offset = offset.reshape(n, 10)
offset *= l.reshape((-1, 1))
bboxes[:, 9:] += offset
timer.toc()
ts[3] = timer.elapsed()
bb[3] = bboxes.copy()
self._clear_network_buffer(self.lnet)
if debug is True:
return bb, ts
else:
return bboxes
def _forward(self, net, data, outs):
'''forward a net with given data, return blobs[out]
'''
net.blobs['data'].reshape(*data.shape)
net.blobs['data'].data[...] = data
net.forward()
return [net.blobs[out].data for out in outs]
def _clear_network_buffer(self, net):
if net is self.pnet:
fake = np.zeros((1, 3, 12, 12), dtype=np.float32)
elif net is self.rnet:
fake = np.zeros((1, 3, 24, 24), dtype=np.float32)
elif net is self.onet:
fake = np.zeros((1, 3, 48, 48), dtype=np.float32)
else:
fake = np.zeros((1, 15, 24, 24), dtype=np.float32)
net.blobs['data'].reshape(*fake.shape)
net.blobs['data'].data[...] = fake
net.forward()
def _gen_bbox(self, hotmap, offset, landmark, scale, th):
'''[x1, y1, x2, y2, score, offset_x1, offset_y1, offset_x2, offset_y2]
'''
h, w = hotmap.shape
stride = 2
win_size = 12
hotmap = hotmap.reshape((h, w))
keep = hotmap > th
pos = np.where(keep)
score = hotmap[keep]
offset = offset[:, keep]
landmark = landmark[:, keep]
x, y = pos[1], pos[0]
x1 = stride * x
y1 = stride * y
x2 = x1 + win_size
y2 = y1 + win_size
x1 = x1 / scale
y1 = y1 / scale
x2 = x2 / scale
y2 = y2 / scale
bbox = np.vstack([x1, y1, x2, y2, score, offset, landmark]).transpose()
return bbox.astype(np.float32)
def _locate_landmark(self, bboxes):
w = bboxes[:, 2] - bboxes[:, 0]
h = bboxes[:, 3] - bboxes[:, 1]
bboxes[:, 9::2] = bboxes[:, 9::2] * w.reshape((-1, 1)) + bboxes[:, 0].reshape((-1, 1))
bboxes[:, 10::2] = bboxes[:, 10::2] * h.reshape((-1, 1)) + bboxes[:, 1].reshape((-1, 1))
return bboxes
def _bbox_reg(self, bboxes):
w = bboxes[:, 2] - bboxes[:, 0]
h = bboxes[:, 3] - bboxes[:, 1]
bboxes[:, 0] += bboxes[:, 5] * w
bboxes[:, 1] += bboxes[:, 6] * h
bboxes[:, 2] += bboxes[:, 7] * w
bboxes[:, 3] += bboxes[:, 8] * h
return bboxes
def _make_square(self, bboxes):
'''make bboxes sqaure
'''
x_center = (bboxes[:, 0] + bboxes[:, 2]) / 2
y_center = (bboxes[:, 1] + bboxes[:, 3]) / 2
w = bboxes[:, 2] - bboxes[:, 0]
h = bboxes[:, 3] - bboxes[:, 1]
size = np.vstack([w, h]).max(axis=0).transpose()
bboxes[:, 0] = x_center - size / 2
bboxes[:, 2] = x_center + size / 2
bboxes[:, 1] = y_center - size / 2
bboxes[:, 3] = y_center + size / 2
return bboxes
def nms(dets, thresh, meth='Union'):
'''nms from py-faster-rcnn
'''
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if meth == 'Union':
ovr = inter / (areas[i] + areas[order[1:]] - inter)
else:
ovr = inter / np.minimum(areas[i], areas[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def convert_image_pyramid(img, scales, interval=2):
"""convert image pyramid to a single image
Parameters
==========
img: image
scales: pyramid scales
interval: interval pixels between pyramid images
Returns
=======
result: image pyramid in a single image
bboxes: every pyramid image in the result image with position and scale information,
(x, y, w, h, scale)
"""
assert len(scales) >= 2
height, width = img.shape[:2]
pyramids = []
for scale in scales:
w, h = int(math.ceil(scale*width)), int(math.ceil(scale*height))
img_pyramid = cv2.resize(img, (w, h))
pyramids.append(img_pyramid)
input_h, input_w = pyramids[0].shape[:2]
# x, y, w, h
bboxes = [[0, 0, img.shape[1], img.shape[0], scale] for img, scale in zip(pyramids, scales)]
if input_h < input_w:
output_h = input_h + interval + pyramids[1].shape[0]
output_w = 0
available = [[0, 0]]
for bbox in bboxes:
min_used_width = 3 * width
choosed = -1
for i, (x, y) in enumerate(available):
if y + bbox[3] <= output_h and x + bbox[2] < min_used_width:
min_used_width = x + bbox[2]
bbox[0], bbox[1] = x, y
choosed = i
assert choosed != -1, "No suitable position for this pyramid scale"
# extend available positions
x, y = available[choosed]
w, h = bbox[2:4]
available[choosed][0] = x + interval + w
available[choosed][1] = y
available.append([x, y + interval + h])
output_w = max(output_w, min_used_width)
else:
output_w = input_w + interval + pyramids[1].shape[1]
output_h = 0
available = [[0, 0]]
for bbox in bboxes:
min_used_height = 3 * height
choosed = -1
for i, (x, y) in enumerate(available):
if x + bbox[2] <= output_w and y + bbox[3] < min_used_height:
min_used_height = y + bbox[3]
bbox[0], bbox[1] = x, y
choosed = i
assert choosed != -1, "No suitable position for this pyramid scale"
# extend available positions
x, y = available[choosed]
w, h = bbox[2:4]
available[choosed][0] = x + interval + w
available[choosed][1] = y
available.append([x, y + interval + h])
output_h = max(output_h, min_used_height)
# convert to a single image
result = np.zeros((output_h, output_w, 3), dtype=np.uint8)
for bbox, pyramid in zip(bboxes, pyramids):
x, y, w, h, scale = bbox
assert pyramid.shape[0] == h and pyramid.shape[1] == w
result[y:y+h, x:x+w, :] = pyramid
return result, bboxes
def get_original_bboxes(bboxes, pyramid_info):
"""get original bboxes
Parameters
==========
bboxes: detected bboxes
pyramid_info: information of pyramid from `convert_image_pyramid`
Returns
=======
bboxes_ori: bboxes in original image
"""
count = 0
bboxes_ori = np.zeros((0, bboxes.shape[1]), dtype=np.float32)
for x, y, w, h, scale in pyramid_info:
x1, y1, x2, y2 = x, y, x+w, y+h
idx = np.logical_and(
np.logical_and(bboxes[:, 0] >= x1, bboxes[:, 1] >= y1),
np.logical_and(bboxes[:, 2] <= x2, bboxes[:, 3] <= y2))
bboxes[idx, 0] = (bboxes[idx, 0] - x1) / scale
bboxes[idx, 1] = (bboxes[idx, 1] - y1) / scale
bboxes[idx, 2] = (bboxes[idx, 2] - x1) / scale
bboxes[idx, 3] = (bboxes[idx, 3] - y1) / scale
bboxes_ori = np.vstack([bboxes_ori, bboxes[idx]])
count += idx.sum()
#assert count == len(bboxes), "generate bboxes gives wrong number"
return bboxes_ori
|
[
"1293830063@qq.com"
] |
1293830063@qq.com
|
c738ca9f29e6ac11acd68e45f8559240446bfa5b
|
b7da8d949067a6a7c3a177693ba1bc1f1fe4d533
|
/quantdigger/branches/quantweb/demo/backup/dstushare.py
|
5a9cb6ddd582847d0365cd348ccd8339f0c35624
|
[] |
no_license
|
sunlandli/stock
|
422722d14405e368f7f295ef2ef7c40e880cd4ef
|
0112f70335d5ae17b05093ac7c36824a41caf88c
|
refs/heads/master
| 2021-07-15T15:21:45.834055
| 2017-10-19T07:05:13
| 2017-10-19T07:05:13
| 100,480,563
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
# -*- coding: utf-8 -*-
import pandas as pd
import tushare as ts
import cache as cc
# tsshare的get_h_data接口默认只取一年
# 所以这里使用一个很早的时间强制获取所有历史数据
_VERY_EARLY_START = '1988-12-12'
def _process_dt(dt):
return str(pd.to_datetime(dt).date()) if dt else None
def _process_tushare_data(data):
data.open = data.open.astype(float)
data.close = data.close.astype(float)
data.high = data.high.astype(float)
data.low = data.low.astype(float)
## @todo bug: data.volume 里面有浮点值!
data.volume = data.volume.astype(int)
data.amount = data.amount.astype(float)
data.index.names = ['datetime']
data.index = pd.to_datetime(data.index)
return data
class StockTsSource(object):
'''tushare股票数据源'''
def __init__(self):
pass
def load_data(self, pcontract, dt_start=None, dt_end=None):
dt_start = _process_dt(dt_start)
if not dt_start: dt_start = _VERY_EARLY_START
dt_end = _process_dt(dt_end)
data = ts.get_h_data(pcontract.contract.code,
start=dt_start, end=dt_end)
if data is None:
return None
else:
return _process_tushare_data(data.iloc[::-1])
class CachedStockTsSource(cc.CachedDatasource):
def __init__(self, base_path):
datasource = StockTsSource()
cache = cc.LocalFsCache(base_path)
super(CachedStockTsSource, self).__init__(datasource, cache)
|
[
"sunlandli@tom.com"
] |
sunlandli@tom.com
|
667ab42b912802cfb95982deef3a8ed88a2b595b
|
0a88cb38439a3e19dd4d4f052895c94f19979649
|
/box/Scripts/migrate-script.py
|
ceadc22465e8cc343d82766ee1a183e9d804415d
|
[] |
no_license
|
pace-noge/online_shop
|
a9427fbb2eff205af87f03c0bbb3585a37bb697f
|
ac20010d4046c86b399217cab09267c890a777c2
|
refs/heads/master
| 2020-05-18T03:03:16.364749
| 2013-02-15T06:28:28
| 2013-02-15T06:28:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
#!C:\EmeraldBox\box\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'sqlalchemy-migrate==0.7.2','console_scripts','migrate'
__requires__ = 'sqlalchemy-migrate==0.7.2'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('sqlalchemy-migrate==0.7.2', 'console_scripts', 'migrate')()
)
|
[
"nasa.freaks@gmail.com"
] |
nasa.freaks@gmail.com
|
61c2511bc853396c22902d39e5574a0bff6f5c68
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D12A/CODECOD12AUN.py
|
b336b95e699e8b394f68b938224e628f94b6e90f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD12AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'TDT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'GID', MIN: 0, MAX: 999, LEVEL: [
{ID: 'HAN', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'SGP', MIN: 0, MAX: 999},
{ID: 'DGS', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'EQD', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99},
{ID: 'TMD', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 99},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'DIM', MIN: 0, MAX: 99},
{ID: 'SEL', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PCD', MIN: 0, MAX: 9},
{ID: 'EQA', MIN: 0, MAX: 9},
{ID: 'COD', MIN: 0, MAX: 1},
{ID: 'HAN', MIN: 0, MAX: 9},
{ID: 'DAM', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COD', MIN: 0, MAX: 1},
]},
{ID: 'TDT', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'DGS', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
5e5f8bba2d3970f3c9198111bf0ec501b57389a9
|
7e4c86df42e3c4780e9b01026c904769f6dc39ec
|
/class-35/demos/graph/test_graph.py
|
2f7a9fb4b5e07ca80c434c89c3572f1c68ba882f
|
[] |
no_license
|
everydaytimmy/seattle-python-401d16
|
3a85c55204471ae11ce31cf0b6fc688eea7aee9f
|
372a4c097f6c217098d1f8a420a37168e2c6229f
|
refs/heads/main
| 2023-06-25T03:55:06.288698
| 2021-07-13T19:08:31
| 2021-07-13T19:08:31
| 370,841,803
| 0
| 0
| null | 2021-05-25T22:20:32
| 2021-05-25T22:20:32
| null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
"""
Implement your own Graph. The graph should be represented as an adjacency list, and should include the following methods:
add node
Arguments: value
Returns: The added node
Add a node to the graph
add edge
Arguments: 2 nodes to be connected by the edge, weight (optional)
Returns: nothing
Adds a new edge between two nodes in the graph
If specified, assign a weight to the edge
Both nodes should already be in the Graph
get nodes
Arguments: none
Returns all of the nodes in the graph as a collection (set, list, or similar)
get neighbors
Arguments: node
Returns a collection of edges connected to the given node
Include the weight of the connection in the returned collection
size
Arguments: none
Returns the total number of nodes in the graph
TESTS
An empty graph properly returns null
"""
from graph import Graph, Vertex
def test_add_node():
graph = Graph()
expected_value = "spam"
actual = graph.add_node("spam")
assert actual.value == expected_value
def test_get_nodes_one():
graph = Graph()
graph.add_node("spam")
actual = graph.get_nodes()
expected = 1
assert len(actual) == expected
assert isinstance(actual[0], Vertex)
assert actual[0].value == "spam"
# REFACTOR to not do so much
def test_get_nodes_two():
graph = Graph()
graph.add_node("spam")
graph.add_node("eggs")
actual = graph.get_nodes()
expected = 2
assert len(actual) == expected
assert isinstance(actual[0], Vertex)
assert isinstance(actual[1], Vertex)
assert actual[0].value == "spam"
assert actual[1].value == "eggs"
def test_size_two():
graph = Graph()
graph.add_node("spam")
graph.add_node("eggs")
actual = graph.size()
expected = 2
assert actual == expected
def test_add_edge_no_weight():
graph = Graph()
spam_vertex = graph.add_node("spam")
eggs_vertex = graph.add_node("eggs")
return_val = graph.add_edge(spam_vertex, eggs_vertex)
assert return_val is None
def test_get_neighbors():
graph = Graph()
spam_vertex = graph.add_node("spam")
eggs_vertex = graph.add_node("eggs")
graph.add_edge(spam_vertex, eggs_vertex, 5)
neighbors = graph.get_neighbors(spam_vertex)
assert len(neighbors) == 1
single_edge = neighbors[0]
assert single_edge.vertex.value == "eggs"
assert single_edge.weight == 5
def test_get_neighbors_solo():
graph = Graph()
spam_vertex = graph.add_node("spam")
graph.add_edge(spam_vertex, spam_vertex)
neighbors = graph.get_neighbors(spam_vertex)
assert len(neighbors) == 1
single_edge = neighbors[0]
assert single_edge.vertex.value == "spam"
assert single_edge.weight == 0
|
[
"jb.tellez@gmail.com"
] |
jb.tellez@gmail.com
|
b68210d8b6efbf80cbe001da472b636f94873e39
|
18be742be30452764865a830abb2327ab779b01d
|
/ARP/ARPClient/main.py
|
bb2b1e3ee48188b03819be13f30bea51a684ac9a
|
[] |
no_license
|
mhdr/Thesis
|
06823914a1f9eca7d75501844c15ac20435cf754
|
9242d25143cc2fcbf768ca0d01dc80a7521fdf6c
|
refs/heads/master
| 2021-01-10T06:30:44.860331
| 2016-01-02T05:28:48
| 2016-01-02T05:28:48
| 46,107,026
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,572
|
py
|
import socket
import threading
from colorama import Fore
from datetime import datetime
from datetime import timedelta
import os
import time
from Message import Message
counter=0
mac_list=[]
timer_initial_value=5
timer=timer_initial_value
is_traffic_blocked=False
def get_counter():
lock=threading.BoundedSemaphore()
global counter
lock.acquire()
counter=counter+1
lock.release()
return counter
def fetch():
while True:
global HOST
global PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(b'1')
data_recv=s.recv(1024)
data=bytearray()
s.settimeout(1)
while len(data_recv)>0:
data.extend(data_recv)
if len(data_recv) < 1024:
break
try:
data_recv=s.recv(1024)
except socket.timeout:
break
s.close()
if len(data)>0:
message= Message.loads(data)
if False:assert isinstance(message,Message)
current_time=datetime.now()
if len(message.macs)>0:
if message.verify():
if current_time - message.time < timedelta(seconds=5):
for mac in message.macs:
global mac_list
if mac not in mac_list:
cmd1="arptables -A INPUT --source-mac {0} -j DROP".format(mac)
cmd2="iptables -A INPUT -m mac --mac-source {0} -j DROP".format(mac)
cmd3="ip neighbour flush all"
os.system(cmd1)
os.system(cmd2)
os.system(cmd3)
mac_list.append(mac)
print("{0} - ".format(get_counter()) + Fore.MAGENTA + "MAC : {0}".format(mac) + Fore.RESET)
reset_timer()
else:
print("{0} - ".format(get_counter()) + Fore.RED +
"We received some data,but it seems they are manipulated." + Fore.RESET)
else:
global mac_list
if False : assert isinstance(mac_list,list)
for mac in mac_list:
cmd1="arptables -D INPUT --source-mac {0} -j DROP".format(mac)
cmd2="iptables -D INPUT -m mac --mac-source {0} -j DROP".format(mac)
os.system(cmd1)
os.system(cmd2)
mac_list.remove(mac)
print("{0} - ".format(get_counter()) + Fore.GREEN + "Safe" + Fore.RESET)
reset_timer()
time.sleep(1)
def get_arp_server_mac():
while True:
global HOST
global PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(b'2')
data_recv=s.recv(1024)
data=bytearray()
s.settimeout(1)
while len(data_recv)>0:
data.extend(data_recv)
if len(data_recv) < 1024:
break
try:
data_recv=s.recv(1024)
except socket.timeout:
break
s.close()
mac=data
return mac
def block_traffic():
global is_traffic_blocked
global arp_server_mac
cmd1="arptables -P INPUT DROP"
cmd2="arptables -A INPUT --source-mac {0} -j ACCEPT".format(arp_server_mac)
os.system(cmd1)
os.system(cmd2)
is_traffic_blocked=True
def allow_traffic():
global is_traffic_blocked
os.system("arptables -P INPUT ACCEPT")
os.system("arptables -F")
os.system("ip neighbour flush all")
is_traffic_blocked=False
def run_timer():
global timer
global is_traffic_blocked
while True:
time.sleep(1)
if timer>0:
timer=timer-1
else:
if is_traffic_blocked==False:
block_traffic()
def reset_timer():
lock=threading.BoundedSemaphore()
global timer
global timer_initial_value
global is_traffic_blocked
lock.acquire()
timer=timer_initial_value
if is_traffic_blocked==True:
allow_traffic()
lock.release()
HOST = '192.168.1.104'
PORT = 11000
os.system("arptables -F")
os.system("iptables -F")
arp_server_mac=get_arp_server_mac()
threading.Thread(target=fetch).start()
threading.Thread(target=run_timer).start()
|
[
"ramzani.mahmood@gmail.com"
] |
ramzani.mahmood@gmail.com
|
48c55f7e8add217fd0d8ebd93b3e5374465ab76c
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/gaming/v1/gaming-v1-py/google/cloud/gaming/__init__.py
|
89c8b7badaaf852bab45a5e7dbfdc0a57e9e0f73
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,404
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.gaming_v1.services.game_server_clusters_service.async_client import GameServerClustersServiceAsyncClient
from google.cloud.gaming_v1.services.game_server_clusters_service.client import GameServerClustersServiceClient
from google.cloud.gaming_v1.services.game_server_configs_service.async_client import GameServerConfigsServiceAsyncClient
from google.cloud.gaming_v1.services.game_server_configs_service.client import GameServerConfigsServiceClient
from google.cloud.gaming_v1.services.game_server_deployments_service.async_client import GameServerDeploymentsServiceAsyncClient
from google.cloud.gaming_v1.services.game_server_deployments_service.client import GameServerDeploymentsServiceClient
from google.cloud.gaming_v1.services.realms_service.async_client import RealmsServiceAsyncClient
from google.cloud.gaming_v1.services.realms_service.client import RealmsServiceClient
from google.cloud.gaming_v1.types.common import DeployedFleetDetails
from google.cloud.gaming_v1.types.common import LabelSelector
from google.cloud.gaming_v1.types.common import OperationMetadata
from google.cloud.gaming_v1.types.common import OperationStatus
from google.cloud.gaming_v1.types.common import RealmSelector
from google.cloud.gaming_v1.types.common import Schedule
from google.cloud.gaming_v1.types.common import SpecSource
from google.cloud.gaming_v1.types.common import TargetDetails
from google.cloud.gaming_v1.types.common import TargetState
from google.cloud.gaming_v1.types.game_server_clusters import CreateGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import DeleteGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import GameServerCluster
from google.cloud.gaming_v1.types.game_server_clusters import GameServerClusterConnectionInfo
from google.cloud.gaming_v1.types.game_server_clusters import GetGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import GkeClusterReference
from google.cloud.gaming_v1.types.game_server_clusters import ListGameServerClustersRequest
from google.cloud.gaming_v1.types.game_server_clusters import ListGameServerClustersResponse
from google.cloud.gaming_v1.types.game_server_clusters import PreviewCreateGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import PreviewCreateGameServerClusterResponse
from google.cloud.gaming_v1.types.game_server_clusters import PreviewDeleteGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import PreviewDeleteGameServerClusterResponse
from google.cloud.gaming_v1.types.game_server_clusters import PreviewUpdateGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_clusters import PreviewUpdateGameServerClusterResponse
from google.cloud.gaming_v1.types.game_server_clusters import UpdateGameServerClusterRequest
from google.cloud.gaming_v1.types.game_server_configs import CreateGameServerConfigRequest
from google.cloud.gaming_v1.types.game_server_configs import DeleteGameServerConfigRequest
from google.cloud.gaming_v1.types.game_server_configs import FleetConfig
from google.cloud.gaming_v1.types.game_server_configs import GameServerConfig
from google.cloud.gaming_v1.types.game_server_configs import GetGameServerConfigRequest
from google.cloud.gaming_v1.types.game_server_configs import ListGameServerConfigsRequest
from google.cloud.gaming_v1.types.game_server_configs import ListGameServerConfigsResponse
from google.cloud.gaming_v1.types.game_server_configs import ScalingConfig
from google.cloud.gaming_v1.types.game_server_deployments import CreateGameServerDeploymentRequest
from google.cloud.gaming_v1.types.game_server_deployments import DeleteGameServerDeploymentRequest
from google.cloud.gaming_v1.types.game_server_deployments import FetchDeploymentStateRequest
from google.cloud.gaming_v1.types.game_server_deployments import FetchDeploymentStateResponse
from google.cloud.gaming_v1.types.game_server_deployments import GameServerConfigOverride
from google.cloud.gaming_v1.types.game_server_deployments import GameServerDeployment
from google.cloud.gaming_v1.types.game_server_deployments import GameServerDeploymentRollout
from google.cloud.gaming_v1.types.game_server_deployments import GetGameServerDeploymentRequest
from google.cloud.gaming_v1.types.game_server_deployments import GetGameServerDeploymentRolloutRequest
from google.cloud.gaming_v1.types.game_server_deployments import ListGameServerDeploymentsRequest
from google.cloud.gaming_v1.types.game_server_deployments import ListGameServerDeploymentsResponse
from google.cloud.gaming_v1.types.game_server_deployments import PreviewGameServerDeploymentRolloutRequest
from google.cloud.gaming_v1.types.game_server_deployments import PreviewGameServerDeploymentRolloutResponse
from google.cloud.gaming_v1.types.game_server_deployments import UpdateGameServerDeploymentRequest
from google.cloud.gaming_v1.types.game_server_deployments import UpdateGameServerDeploymentRolloutRequest
from google.cloud.gaming_v1.types.realms import CreateRealmRequest
from google.cloud.gaming_v1.types.realms import DeleteRealmRequest
from google.cloud.gaming_v1.types.realms import GetRealmRequest
from google.cloud.gaming_v1.types.realms import ListRealmsRequest
from google.cloud.gaming_v1.types.realms import ListRealmsResponse
from google.cloud.gaming_v1.types.realms import PreviewRealmUpdateRequest
from google.cloud.gaming_v1.types.realms import PreviewRealmUpdateResponse
from google.cloud.gaming_v1.types.realms import Realm
from google.cloud.gaming_v1.types.realms import UpdateRealmRequest
__all__ = (
'CreateGameServerClusterRequest',
'CreateGameServerConfigRequest',
'CreateGameServerDeploymentRequest',
'CreateRealmRequest',
'DeleteGameServerClusterRequest',
'DeleteGameServerConfigRequest',
'DeleteGameServerDeploymentRequest',
'DeleteRealmRequest',
'DeployedFleetDetails',
'FetchDeploymentStateRequest',
'FetchDeploymentStateResponse',
'FleetConfig',
'GameServerCluster',
'GameServerClusterConnectionInfo',
'GameServerClustersServiceAsyncClient',
'GameServerClustersServiceClient',
'GameServerConfig',
'GameServerConfigOverride',
'GameServerConfigsServiceAsyncClient',
'GameServerConfigsServiceClient',
'GameServerDeployment',
'GameServerDeploymentRollout',
'GameServerDeploymentsServiceAsyncClient',
'GameServerDeploymentsServiceClient',
'GetGameServerClusterRequest',
'GetGameServerConfigRequest',
'GetGameServerDeploymentRequest',
'GetGameServerDeploymentRolloutRequest',
'GetRealmRequest',
'GkeClusterReference',
'LabelSelector',
'ListGameServerClustersRequest',
'ListGameServerClustersResponse',
'ListGameServerConfigsRequest',
'ListGameServerConfigsResponse',
'ListGameServerDeploymentsRequest',
'ListGameServerDeploymentsResponse',
'ListRealmsRequest',
'ListRealmsResponse',
'OperationMetadata',
'OperationStatus',
'PreviewCreateGameServerClusterRequest',
'PreviewCreateGameServerClusterResponse',
'PreviewDeleteGameServerClusterRequest',
'PreviewDeleteGameServerClusterResponse',
'PreviewGameServerDeploymentRolloutRequest',
'PreviewGameServerDeploymentRolloutResponse',
'PreviewRealmUpdateRequest',
'PreviewRealmUpdateResponse',
'PreviewUpdateGameServerClusterRequest',
'PreviewUpdateGameServerClusterResponse',
'Realm',
'RealmSelector',
'RealmsServiceAsyncClient',
'RealmsServiceClient',
'ScalingConfig',
'Schedule',
'SpecSource',
'TargetDetails',
'TargetState',
'UpdateGameServerClusterRequest',
'UpdateGameServerDeploymentRequest',
'UpdateGameServerDeploymentRolloutRequest',
'UpdateRealmRequest',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
393109ee8fa16eb9b5501ef98f949f56c88d7743
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/euler1.py
|
6979ed7c3f6ecb14b65e563e98144779fb1776eb
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
"""
Project Euler problem 1 in cpmpy.
http://projecteuler.net/index.php?section=problems&id=1
'''
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
Using Constraint Modeling for this problem is probably
a tad overkill...
This model (using ipython):
'''
In [1]: %time run euler1.py
z: 233168
status: ExitStatus.OPTIMAL (0.006237117 seconds)
CPU times: user 1.2 s, sys: 1.34 s, total: 2.53 s
Wall time: 197 ms
'''
Compare with this ipython oneliner.
'''
% time sum([i for i in range(1000) if (i %3 == 0 or i %5 == 0)])
CPU times: user 66 µs, sys: 12 µs, total: 78 µs
Wall time: 80.3 µs
Out[1]: 233168
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my CPMpy page: http://www.hakank.org/cpmpy/
"""
from cpmpy import *
import numpy as np
from cpmpy_hakank import *
def euler1():
n = 1000
x = boolvar(shape=n,name="x")
z = intvar(0,sum(range(n)),name="z")
model = Model([x[0] == 0,
[x[i] == ((i % 3==0) | (i % 5==0)) for i in range(1,n)],
z==sum([i*x[i] for i in range(n)])
])
ss = CPM_ortools(model)
if ss.solve():
print("z:", z.value())
print("status:", ss.status())
euler1()
|
[
"hakank@gmail.com"
] |
hakank@gmail.com
|
cbffa1fcedb73a4050e1a2aa7b4fd171ff4888b8
|
c71f575fd537a7eb1fe2a68e7f350e8461baeeae
|
/pytext/data/contextual_intent_slot_data_handler.py
|
cf3870a7d2fc5a10f48a9eb30866a5d6fcada624
|
[
"BSD-3-Clause"
] |
permissive
|
orchestor/pytext
|
bfe6fc5e6010fd1cadf374cb05c18f892ebee0f4
|
eaba52f9757654dc384adee0d37732bdda7634de
|
refs/heads/master
| 2020-05-14T11:03:14.747987
| 2019-04-15T23:24:34
| 2019-04-15T23:29:17
| 181,771,040
| 1
| 0
|
NOASSERTION
| 2019-04-16T21:36:43
| 2019-04-16T21:36:43
| null |
UTF-8
|
Python
| false
| false
| 9,389
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List
from pytext.config.contextual_intent_slot import (
ExtraField,
ModelInput,
ModelInputConfig,
TargetConfig,
)
from pytext.config.field_config import DocLabelConfig, WordLabelConfig
from pytext.data.featurizer import InputRecord
from pytext.fields import (
CharFeatureField,
DictFeatureField,
DocLabelField,
Field,
FloatField,
FloatVectorField,
PretrainedModelEmbeddingField,
RawField,
SeqFeatureField,
TextFeatureField,
WordLabelField,
create_fields,
create_label_fields,
)
from pytext.utils import data
from .joint_data_handler import JointModelDataHandler
class RawData:
DOC_LABEL = "doc_label"
WORD_LABEL = "word_label"
TEXT = "text"
DICT_FEAT = "dict_feat"
DOC_WEIGHT = "doc_weight"
WORD_WEIGHT = "word_weight"
DENSE_FEAT = "dense_feat"
class ContextualIntentSlotModelDataHandler(JointModelDataHandler):
"""
Data Handler to build pipeline to process data and generate tensors to be consumed
by ContextualIntentSlotModel. Columns of Input data includes:
1. doc label for intent classification
2. word label for slot tagging of the last utterance
3. a sequence of utterances (e.g., a dialog)
4. Optional dictionary feature contained in the last utterance
5. Optional doc weight that stands for the weight of intent task in joint loss.
6. Optional word weight that stands for the weight of slot task in joint loss.
Attributes:
raw_columns: columns to read from data source. In case of files, the order
should match the data stored in that file. Raw columns include
::
[
RawData.DOC_LABEL,
RawData.WORD_LABEL,
RawData.TEXT,
RawData.DICT_FEAT (Optional),
RawData.DOC_WEIGHT (Optional),
RawData.WORD_WEIGHT (Optional),
]
labels: doc labels and word labels
features: embeddings generated from sequences of utterances and
dictionary features of the last utterance
extra_fields: doc weights, word weights, and etc.
"""
class Config(JointModelDataHandler.Config):
columns_to_read: List[str] = [
RawData.DOC_LABEL,
RawData.WORD_LABEL,
RawData.TEXT,
RawData.DICT_FEAT,
RawData.DOC_WEIGHT,
RawData.WORD_WEIGHT,
]
@classmethod
def from_config(
cls,
config: Config,
feature_config: ModelInputConfig,
target_config: TargetConfig,
**kwargs,
):
"""Factory method to construct an instance of
ContextualIntentSlotModelDataHandler object from the module's config,
model input config and target config.
Args:
config (Config): Configuration object specifying all the
parameters of ContextualIntentSlotModelDataHandler.
feature_config (ModelInputConfig): Configuration object specifying
model input.
target_config (TargetConfig): Configuration object specifying target.
Returns:
type: An instance of ContextualIntentSlotModelDataHandler.
"""
features: Dict[str, Field] = create_fields(
feature_config,
{
ModelInput.TEXT: TextFeatureField,
ModelInput.DICT: DictFeatureField,
ModelInput.CHAR: CharFeatureField,
ModelInput.PRETRAINED: PretrainedModelEmbeddingField,
ModelInput.SEQ: SeqFeatureField,
ModelInput.DENSE: FloatVectorField,
},
)
# Label fields.
labels: Dict[str, Field] = create_label_fields(
target_config,
{
DocLabelConfig._name: DocLabelField,
WordLabelConfig._name: WordLabelField,
},
)
extra_fields: Dict[str, Field] = {
ExtraField.DOC_WEIGHT: FloatField(),
ExtraField.WORD_WEIGHT: FloatField(),
ExtraField.RAW_WORD_LABEL: RawField(),
ExtraField.TOKEN_RANGE: RawField(),
ExtraField.UTTERANCE: RawField(),
}
kwargs.update(config.items())
return cls(
raw_columns=config.columns_to_read,
labels=labels,
features=features,
extra_fields=extra_fields,
**kwargs,
)
def preprocess_row(self, row_data: Dict[str, Any]) -> Dict[str, Any]:
"""Preprocess steps for a single input row: 1. apply tokenization to a
sequence of utterances; 2. process dictionary features to align with
the last utterance. 3. align word labels with the last utterance.
Args:
row_data (Dict[str, Any]): Dict of one row data with column names as keys.
Keys includes "doc_label", "word_label", "text", "dict_feat",
"word weight" and "doc weight".
Returns:
Dict[str, Any]: Preprocessed dict of one row data includes:
"seq_word_feat" (list of list of string)
tokenized words of sequence of utterances
"word_feat" (list of string)
tokenized words of last utterance
"raw_word_label" (string)
raw word label
"token_range" (list of tuple)
token ranges of word labels, each tuple contains the start
position index and the end position index
"utterance" (list of string)
raw utterances
"word_label" (list of string)
list of labels of words in last utterance
"doc_label" (string)
doc label for intent classification
"word_weight" (float)
weight of word label
"doc_weight" (float)
weight of document label
"dict_feat" (tuple, optional)
tuple of three lists, the first is the label of each words,
the second is the weight of the feature, the third is the
length of the feature.
"""
sequence = data.parse_json_array(row_data[RawData.TEXT])
# ignore dictionary feature for context sentences other than the last one
features_list = [
self.featurizer.featurize(InputRecord(raw_text=utterance))
for utterance in sequence[:-1]
]
# adding dictionary feature for the last (current) message
features_list.append(
self.featurizer.featurize(
InputRecord(
raw_text=sequence[-1],
raw_gazetteer_feats=row_data.get(ModelInput.DICT, ""),
)
)
)
res = {
# features
ModelInput.SEQ: [utterance.tokens for utterance in features_list],
ModelInput.TEXT: features_list[-1].tokens,
ModelInput.DICT: (
features_list[-1].gazetteer_feats,
features_list[-1].gazetteer_feat_weights,
features_list[-1].gazetteer_feat_lengths,
),
ModelInput.CHAR: features_list[-1].characters,
ModelInput.PRETRAINED: features_list[-1].pretrained_token_embedding,
# labels
DocLabelConfig._name: row_data[RawData.DOC_LABEL],
# extra data
# TODO move the logic to FloatField
ExtraField.DOC_WEIGHT: row_data.get(RawData.DOC_WEIGHT) or 1.0,
ExtraField.WORD_WEIGHT: row_data.get(RawData.WORD_WEIGHT) or 1.0,
ExtraField.RAW_WORD_LABEL: row_data[RawData.WORD_LABEL],
ExtraField.UTTERANCE: row_data[RawData.TEXT],
ExtraField.TOKEN_RANGE: features_list[-1].token_ranges,
}
if RawData.DENSE_FEAT in row_data:
res[ModelInput.DENSE] = row_data.get(RawData.DENSE_FEAT)
if WordLabelConfig._name in self.labels:
# TODO move it into word label field
res[WordLabelConfig._name] = data.align_slot_labels(
features_list[-1].token_ranges,
row_data[RawData.WORD_LABEL],
self.labels[WordLabelConfig._name].use_bio_labels,
)
return res
def _train_input_from_batch(self, batch):
text_input = getattr(batch, ModelInput.TEXT)
seq_input = getattr(batch, ModelInput.SEQ)
result = (
# text_input[0] contains the word embeddings,
# text_input[1] contains the lengths of each word
text_input[0],
*(
getattr(batch, key)
for key in self.features
if key not in [ModelInput.TEXT, ModelInput.SEQ, ModelInput.DENSE]
),
seq_input[0],
text_input[1],
seq_input[1],
)
# Append dense faeture to decoder layer at the end.
if ModelInput.DENSE in self.features:
result = result + (getattr(batch, ModelInput.DENSE),)
return result
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
15779bacd0de6d82ef5a73e93bd98f8348762423
|
8e939e0f075a14377d87e0eb7729e4f0818f1df9
|
/zarc/old/views_2017-05-21-00:37:34.py
|
94112e6a1affd874baef018a6d3f7c1f0238331d
|
[
"MIT"
] |
permissive
|
mimcomp/caseke
|
072d076c9442c19916d8f71ec25fa45676031914
|
3c0749a431bb2e2c82bcb292d528c748bea8b1ba
|
refs/heads/master
| 2020-06-02T15:42:24.159728
| 2019-06-03T16:34:09
| 2019-06-03T16:34:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 63,520
|
py
|
# coding: utf-8
# AUTOGENERATED BY gen_script.sh from kpony4.py
# Copyright (C) Nyimbi Odero, Sun May 21 00:31:54 EAT 2017
import calendar
from flask import redirect, flash, url_for, Markup
from flask import render_template
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.views import ModelView, BaseView, MasterDetailView, MultipleView, RestCRUDView, CompactCRUDMixin
from flask_appbuilder import ModelView, CompactCRUDMixin, aggregate_count, action, expose, BaseView, has_access
from flask_appbuilder.charts.views import ChartView, TimeChartView, GroupByChartView
from flask_appbuilder.models.group import aggregate_count
from flask_appbuilder.widgets import ListThumbnail, ListWidget
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import appbuilder, db
from .models import *
# Basic Lists
hide_list = ['created_by', 'changed_by', 'created_on', 'changed_on']
#To pretty Print from PersonMixin
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
class AttorneyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Attorney, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class BailView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Bail, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CaseView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CauseofactionView(CompactCRUDMixin, ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Causeofaction, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ConstituencyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Constituency, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(County, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Court, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtlevelView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Courtlevel, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DefendantView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Defendant, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DoctemplateView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Doctemplate, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DocumentView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Document, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class InvestigationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Investigation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class JudgeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Judge, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class LawfirmView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Lawfirm, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class NatureofsuitView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Natureofsuit, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PlaintiffView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Plaintiff, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicemanView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policeman, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PoliceroleView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policerole, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicestationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policestation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisonView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prison, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisonremandView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prisonremand, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutor, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorteamView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutorteam, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SubcountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Subcounty, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SuretyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Surety, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TownView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Town, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class WitnesView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Witnes, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class AttorneyChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Attorney by Birth'
label_columns = AttorneyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class AttorneyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Birth Attorney'
chart_type = 'AreaChart'
label_columns = AttorneyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Plaintiff by Birth'
label_columns = PlaintiffView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Birth Plaintiff'
chart_type = 'AreaChart'
label_columns = PlaintiffView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ObserverChartView(GroupByChartView):
datamodel = SQLAInterface(Observer , db.session)
chart_title = 'Grouped Observer by Birth'
label_columns = ObserverView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ObserverTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Observer , db.session)
chart_title = 'Grouped Birth Observer'
chart_type = 'AreaChart'
label_columns = ObserverView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Surety by Birth'
label_columns = SuretyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Birth Surety'
chart_type = 'AreaChart'
label_columns = SuretyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Prosecutor by Birth'
label_columns = ProsecutorView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Birth Prosecutor'
chart_type = 'AreaChart'
label_columns = ProsecutorView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PolicemanChartView(GroupByChartView):
datamodel = SQLAInterface(Policeman , db.session)
chart_title = 'Grouped Policeman by Birth'
label_columns = PolicemanView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PolicemanTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Policeman , db.session)
chart_title = 'Grouped Birth Policeman'
chart_type = 'AreaChart'
label_columns = PolicemanView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class JudgeChartView(GroupByChartView):
datamodel = SQLAInterface(Judge , db.session)
chart_title = 'Grouped Judge by Birth'
label_columns = JudgeView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class JudgeTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Judge , db.session)
chart_title = 'Grouped Birth Judge'
chart_type = 'AreaChart'
label_columns = JudgeView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Defendant by Birth'
label_columns = DefendantView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Birth Defendant'
chart_type = 'AreaChart'
label_columns = DefendantView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
# How to create a MasterDetailView
#class DetailView(ModelView):
# datamodel = SQLAInterface(DetailTable, db.session)
#class MasterView(MasterDetailView):
# datamodel = SQLAInterface(MasterTable, db.session)
# related_views = [DetailView]
# How to create a MultipleView
#class MultipleViewsExp(MultipleView):
# views = [GroupModelView, ContactModelView]
#View Registration
db.create_all()
fill_gender()
appbuilder.add_view(AttorneyView(), "Attorneys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(BailView(), "Bails", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CaseView(), "Cases", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CauseofactionView(), "Causeofactions", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ConstituencyView(), "Constituencys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CountyView(), "Countys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtView(), "Courts", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtlevelView(), "Courtlevels", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DefendantView(), "Defendants", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DoctemplateView(), "Doctemplates", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DocumentView(), "Documents", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingView(), "Filings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingtypeView(), "Filingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingView(), "Hearings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingtypeView(), "Hearingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(InvestigationView(), "Investigations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(JudgeView(), "Judges", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(LawfirmView(), "Lawfirms", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(NatureofsuitView(), "Natureofsuits", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PlaintiffView(), "Plaintiffs", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicemanView(), "Policemans", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PoliceroleView(), "Policeroles", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicestationView(), "Policestations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonView(), "Prisons", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonremandView(), "Prisonremands", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorView(), "Prosecutors", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorteamView(), "Prosecutorteams", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SubcountyView(), "Subcountys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SuretyView(), "Suretys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(TownView(), "Towns", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(WitnesView(), "Witness", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(AttorneyChartView(), 'Attorney Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(AttorneyTimeChartView(), 'Attorney Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffChartView(), 'Plaintiff Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffTimeChartView(), 'Plaintiff Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ObserverChartView(), 'Observer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ObserverTimeChartView(), 'Observer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyChartView(), 'Surety Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyTimeChartView(), 'Surety Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorChartView(), 'Prosecutor Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorTimeChartView(), 'Prosecutor Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PolicemanChartView(), 'Policeman Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PolicemanTimeChartView(), 'Policeman Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudgeChartView(), 'Judge Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudgeTimeChartView(), 'Judge Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantChartView(), 'Defendant Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantTimeChartView(), 'Defendant Time Chart', icon='fa-dashboard', category='Reports')
#appbuilder.add_separator("Setup")
#appbuilder.add_separator("My Views")
#appbuilder.add_link(name, href, icon='', label='', category='', category_icon='', category_label='', baseview=None)
|
[
"nyimbi@gmail.com"
] |
nyimbi@gmail.com
|
0c15e3f95e19147d72f251d03c7fadef4f1ac626
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_041/ch23_2019_04_02_12_34_31_027536.py
|
3c83817e385509191b67c91a0d4fb30eb41e21ab
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
def verifica_idade(idade):
if idade>21:
return 'Liberado EUA e Brasil'
elif idade>=18 and idade<=21:
return 'Liberado BRASIL'
else:
return 'Não está liberado'
|
[
"you@example.com"
] |
you@example.com
|
b5d8e0977a892778bc9c2ddbb5b7885a79e76cee
|
20021a5bc80f2649269cfb7e1b7f10d6f6b6a839
|
/POCScan/information/jsp_conf_find.py
|
1374f06350e84617bc93cab0f8a3c56987e7a055
|
[] |
no_license
|
polosec/SZhe_Scan
|
bddd1f53452c44837a6ddf902546975879449e11
|
d5e20991530de763c374b5a3c6e8db689bff5265
|
refs/heads/master
| 2022-07-19T09:31:05.840747
| 2020-05-19T14:57:07
| 2020-05-19T14:57:07
| 265,748,880
| 0
| 1
| null | 2020-05-21T03:49:48
| 2020-05-21T03:49:47
| null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: java配置文件文件发现
referer: unknow
author: Lucifer
description: web.xml是java框架使用的配置文件,可以获取敏感信息
'''
import sys
import requests
import warnings
from termcolor import cprint
class jsp_conf_find_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
payload = "/WEB-INF/web.xml"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, timeout=10, verify=False)
if req.headers["Content-Type"] == "application/xml":
return True,vulnurl,"java配置文件文件发现",payload,req.text
else:
return False, None, None, None, None
except:
return False, None, None, None, None
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = jsp_conf_find_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"2585614464@qq.com"
] |
2585614464@qq.com
|
dbf639cc2ed00a1b48e93b1cfeba7c265bbfc258
|
fdfd0d6cf04509eb642c1065240df297b8b7bf91
|
/tests/helpers.py
|
7b1d59c37a6f8cad92e6a3c07223c8a013893308
|
[
"BSD-3-Clause"
] |
permissive
|
lnielsen/cernservicexml
|
747902c102f3e25e4c941c8aef29acb62bf73078
|
3f1691a81fd0534678a32acd2a352de4de55a82e
|
refs/heads/master
| 2021-01-20T13:47:33.100484
| 2015-06-16T15:10:48
| 2015-06-16T15:10:48
| 37,516,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Service XML
# Copyright (C) 2015 CERN.
#
# CERN Service XML is free software; you can redistribute it and/or modify
# it under the terms of the Revised BSD License; see LICENSE file for
# more details.
"""Test helpers."""
from __future__ import absolute_import, print_function, unicode_literals
import functools
import sys
PY34 = sys.version_info[0:2] >= (3, 4)
def import_httpretty():
"""Import HTTPretty and monkey patch Python 3.4 issue.
See https://github.com/gabrielfalcao/HTTPretty/pull/193 and
as well as https://github.com/gabrielfalcao/HTTPretty/issues/221.
"""
if not PY34:
import httpretty
else:
import socket
old_SocketType = socket.SocketType
import httpretty
from httpretty import core
def sockettype_patch(f):
@functools.wraps(f)
def inner(*args, **kwargs):
f(*args, **kwargs)
socket.SocketType = old_SocketType
socket.__dict__['SocketType'] = old_SocketType
return inner
core.httpretty.disable = sockettype_patch(
httpretty.httpretty.disable
)
return httpretty
|
[
"lars.holm.nielsen@cern.ch"
] |
lars.holm.nielsen@cern.ch
|
dfe3ebea885fdd94e4bb398302f5e26a34f93f98
|
df5e91385e3a4c89116a111a10ff460e4e380b86
|
/manage.py
|
a8efacc67cdd1f80836e5c4953be5ad41c29f50d
|
[] |
no_license
|
taojy123/TBspider
|
820a2da618fff5c49633954e7f411bcff263158c
|
8b6844f2d8d0d9639e0e69af29517ef639ec4d26
|
refs/heads/master
| 2020-12-24T15:49:13.380774
| 2014-03-07T12:49:58
| 2014-03-07T12:49:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tbspider.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"taojy123@163.com"
] |
taojy123@163.com
|
3ae564ec2995618a677d2ebe236c5f0ae443ef5e
|
e6683202f97190f8a5230fbb0b99d9692f10872c
|
/core/lib/dns/rdtypes/ANY/SSHFP.py
|
b6ed396f1d2897efeb5c6ce2155c58ad87a41ff7
|
[
"MIT"
] |
permissive
|
swagkarna/arissploit
|
0ae6ba9c91aa8a1160597f052462a1030276688d
|
b0a58f61afc12ac78c65e0275dfa5e4d1e44989e
|
refs/heads/master
| 2020-09-28T18:51:40.741777
| 2019-12-08T21:54:15
| 2019-12-08T21:54:15
| 226,839,315
| 3
| 0
|
MIT
| 2019-12-09T09:57:43
| 2019-12-09T09:57:43
| null |
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class SSHFP(dns.rdata.Rdata):
"""SSHFP record
@ivar algorithm: the algorithm
@type algorithm: int
@ivar fp_type: the digest type
@type fp_type: int
@ivar fingerprint: the fingerprint
@type fingerprint: string
@see: draft-ietf-secsh-dns-05.txt"""
__slots__ = ['algorithm', 'fp_type', 'fingerprint']
def __init__(self, rdclass, rdtype, algorithm, fp_type,
fingerprint):
super(SSHFP, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.fp_type = fp_type
self.fingerprint = fingerprint
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %s' % (self.algorithm,
self.fp_type,
dns.rdata._hexify(self.fingerprint,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
fp_type = tok.get_uint8()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
fingerprint = b''.join(chunks)
fingerprint = binascii.unhexlify(fingerprint)
return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BB", self.algorithm, self.fp_type)
file.write(header)
file.write(self.fingerprint)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BB", wire[current: current + 2])
current += 2
rdlen -= 2
fingerprint = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], fingerprint)
|
[
"noreply@github.com"
] |
swagkarna.noreply@github.com
|
b639fbb2299a12acf5adfa56c7eb10aefd3818fb
|
3b5c46ce2daa75e1e157838d0f6cfd92469471a0
|
/plastering/inferencers/scrabble_helper.py
|
a69252ea56f5e2fdc8ae172faddd96b0a3c939dc
|
[
"MIT"
] |
permissive
|
plastering/plastering
|
1b4e9c04fce4b26b22fe5ade05af9baf644b4eaa
|
26ffeecb38844ebb122fde5d9bd2276a7b4150a0
|
refs/heads/master
| 2023-04-04T07:50:59.087529
| 2021-05-17T23:31:40
| 2021-05-17T23:31:40
| 149,086,461
| 37
| 17
|
MIT
| 2023-03-24T23:19:24
| 2018-09-17T07:32:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
from collections import defaultdict
import pdb
from pkg_resources import resource_string
from io import StringIO
import pandas as pd
from ..metadata_interface import LabeledMetadata, RawMetadata
def elem2list(elem):
if isinstance(elem, str):
return elem.split('_')
else:
return []
def csv2json(df, key_idx, value_idx):
keys = df[key_idx].tolist()
values = df[value_idx].tolist()
return {k: elem2list(v) for k, v in zip(keys, values)}
def load_data(target_building,
source_buildings,
unit_mapping_file=resource_string('config', 'unit_mapping.csv'),
bacnettype_mapping_file=resource_string('config', 'bacnettype_mapping.csv'),
#unit_mapping_file='config/unit_mapping.csv',
#bacnettype_mapping_file='config/bacnettype_mapping.csv',
bacnettype_flag=False,
metadata_types=['VendorGivenName'],
):
building_sentence_dict = dict()
building_label_dict = dict()
building_tagsets_dict = dict()
known_tags_dict = defaultdict(list)
units = csv2json(pd.read_csv(StringIO(unit_mapping_file.decode('utf-8'))),
'unit',
'word',
)
units[None] = []
units[''] = []
bacnettypes = csv2json(pd.read_csv(StringIO(bacnettype_mapping_file.decode('utf-8'))),
'bacnet_type_str',
'candidates',
)
bacnettypes[None] = []
bacnettypes[''] = []
for building in source_buildings:
true_tagsets = {}
label_dict = {}
for labeled in LabeledMetadata.objects(building=building):
srcid = labeled.srcid
true_tagsets[srcid] = labeled.tagsets
fullparsing = labeled.fullparsing
labels = {}
for metadata_type, pairs in fullparsing.items():
labels[metadata_type] = [pair[1] for pair in pairs]
label_dict[srcid] = labels
building_tagsets_dict[building] = true_tagsets
building_label_dict[building] = label_dict
sentence_dict = dict()
for raw_point in RawMetadata.objects(building=building):
srcid = raw_point.srcid
metadata = raw_point['metadata']
sentences = {}
for clm in metadata_types:
if clm not in ['BACnetUnit', 'BACnetTypeStr']:
sentences[clm] = [c for c in metadata.get(clm, '').lower()]
sentence_dict[srcid] = sentences
bacnet_unit = metadata.get('BACnetUnit')
if bacnet_unit:
known_tags_dict[srcid] += units[bacnet_unit]
if bacnettype_flag:
known_tags_dict[srcid] += bacnettypes[metadata.get('BACnetTypeStr')]
building_sentence_dict[building] = sentence_dict
target_srcids = list(building_label_dict[target_building].keys())
return building_sentence_dict, target_srcids, building_label_dict,\
building_tagsets_dict, known_tags_dict
|
[
"bk7749@gmail.com"
] |
bk7749@gmail.com
|
50854a154c4b01f862c232713478f59d981e5b6d
|
f34a81fa55a80130e2f70197e011cccb9dee063f
|
/python/oreilly_intermediate_python/unit6_scraping/scraper3.py
|
53731b7e6bcce199400ed90c7903128ae2c3e0bd
|
[] |
no_license
|
ilyarudyak/data_science
|
7d71ecdf17cc68be33b598ae2e51cba48b5b3aab
|
deb992b2760c4fea7f39f64089f6c1884f3fdb51
|
refs/heads/master
| 2020-05-27T11:10:32.679938
| 2017-02-22T21:29:26
| 2017-02-22T21:29:26
| 82,545,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
from urllib.request import urlopen, urlretrieve
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import os
baseUrl = "https://apod.nasa.gov/apod/archivepix.html"
archiveHtmlStr = urlopen(baseUrl).read()
for link in BeautifulSoup(archiveHtmlStr, "html.parser").findAll("a", limit=10):
imgBaseUrl = urljoin(baseUrl, link['href'])
# follow the link to image page
imgHtmlStr = urlopen(imgBaseUrl).read()
imgUrl = urljoin(imgBaseUrl, BeautifulSoup(imgHtmlStr, "html.parser").img['src'])
imgName = imgUrl.split('/')[-1]
print(imgName, imgUrl)
# download and store image
downloadDir = 'apod_pictures'
urlretrieve(imgUrl, os.path.join(downloadDir, imgName))
|
[
"ilyarudyak@yahoo.com"
] |
ilyarudyak@yahoo.com
|
3f9e10788f14ab2cd56b8d0fcd5113a3d30b5a99
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Hans.CMN/Serif_8/pdf_to_json_test_Hans.CMN_Serif_8.py
|
2f64816c501918d27c2ac1dc0830fcb5c2325910
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Hans.CMN/Serif_8/udhr_Hans.CMN_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
7048b0d769e8eb1e550e5e242e33d057e16460c5
|
fe4df940d16f8a9657028ee53a1780cccd32c817
|
/Python/test_roman_numerals.py
|
6fdd59f316e790b6f9ddf90e47a3baa9b1f53240
|
[
"BSD-2-Clause"
] |
permissive
|
Kwpolska/roman_numerals
|
2e6d700d9852d5072d96524d73169445e7288753
|
887e648a39fa73583f4b7cf330436f94bf88325e
|
refs/heads/master
| 2020-08-01T16:01:15.539378
| 2018-01-01T00:00:00
| 2017-12-31T18:33:35
| 73,572,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
# -*- encoding: utf-8 -*-
# Test Suite for Roman Numeral Converter
# Copyright © MMXVI-MMXVIII, Chris Warrick.
# All rights reserved.
# License: 3-clause BSD, see main file for details.
"""Test suite for Roman Numeral Converter."""
import pytest
from roman_numerals import to_roman, from_roman
def test_to_roman():
assert to_roman(1) == 'I'
assert to_roman(2) == 'II'
assert to_roman(3) == 'III'
assert to_roman(4) == 'IV'
assert to_roman(5) == 'V'
assert to_roman(6) == 'VI'
assert to_roman(1234) == 'MCCXXXIV'
assert to_roman(1958) == 'MCMLVIII'
assert to_roman(2222) == 'MMCCXXII'
assert to_roman(3999) == 'MMMCMXCIX'
def test_to_roman_invalid():
with pytest.raises(ValueError):
to_roman(0)
with pytest.raises(ValueError):
to_roman(-1)
with pytest.raises(ValueError):
to_roman(4000)
def test_to_roman_file():
with open('../test_data.txt') as fh:
for line in fh:
integer, roman = line.strip().split(' ')
integer = int(integer)
assert to_roman(integer) == roman
def test_from_roman():
assert from_roman("I") == 1
assert from_roman("Ii") == 2
assert from_roman("iii") == 3
assert from_roman(" iv ") == 4
assert from_roman("V") == 5
assert from_roman("ViIi") == 8
def test_from_roman_file():
with open('../test_data.txt') as fh:
for line in fh:
integer, roman = line.strip().split(' ')
integer = int(integer)
assert from_roman(roman) == integer
if __name__ == '__main__':
print("Please run with py.test.")
|
[
"kwpolska@gmail.com"
] |
kwpolska@gmail.com
|
70e24b5ae7a71fda9781af5c5d9bfe6cd088dd29
|
e70b678712a355a0b51632728c7781b0bdcf29f4
|
/Algorithms/Python/Contains-Duplicate-II.py
|
fdaf86c10320f2d83fdd79f38c2eb8f4dff4022d
|
[] |
no_license
|
keyi/Leetcode_Solutions
|
b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af
|
69e4e969b435ff2796bd7c4b5dad9284a853ab54
|
refs/heads/master
| 2020-05-21T23:36:20.450053
| 2018-11-11T03:45:28
| 2018-11-11T03:45:28
| 33,714,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
dic = collections.defaultdict(list)
for i in range(len(nums)):
if nums[i] in dic:
for x in dic[nums[i]]:
if i - x <= k:
return True
dic[nums[i]].append(i)
return False
|
[
"yike921012@gmail.com"
] |
yike921012@gmail.com
|
6e1c6809f055a02678b42764cbc0f8d5eb360592
|
d9b53673b899a9b842a42060740b734bf0c63a31
|
/leetcode/python/easy/p119_getRow.py
|
f635d4dcaee648ef09c71c18a2b7eb6440bc7cc5
|
[
"Apache-2.0"
] |
permissive
|
kefirzhang/algorithms
|
a8d656774b576295625dd663154d264cd6a6a802
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
refs/heads/master
| 2021-06-13T13:05:40.851704
| 2021-04-02T07:37:59
| 2021-04-02T07:37:59
| 173,903,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
class Solution:
def getRow(self, numRows): # 进阶到O(K)空间复杂度核心点是扩充上一个数组
l_data = [1]
if numRows == 0:
return [1]
for i in range(numRows):
for j in range(i + 1):
if j != i:
l_data[j] = l_data[j] + l_data[j + 1]
l_data = [1] + l_data
return l_data
def getRow1(self, numRows):
numRows += 1
l_data = []
for i in range(1, numRows + 1):
l_cur = []
for j in range(i):
if j == 0 or j == i - 1:
l_cur.append(1)
else:
l_cur.append(l_data[-1][j - 1] + l_data[-1][j])
l_data.append(l_cur)
return l_data[-1]
slu = Solution()
print(slu.getRow(4))
|
[
"8390671@qq.com"
] |
8390671@qq.com
|
61cfc91b56eeabeaa0e59ead7dfb135142cd761e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/anagram/src/334.py
|
dcd298320ef99f702d1ae67f9b29bf6905ed5ce5
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from itertools import permutations
def detect_anagrams(word, anagrams):
return [x for x in anagrams if x.lower() != word.lower() and x.lower() in set([a.lower() for a in anagrams]).intersection(["".join(p) for p in permutations(word.lower())])]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
4a6218d1fe1d17ba0f622a6721c1ecf9f6a001cf
|
f75ec2c20c3208350d310038a2cd0a67253b44df
|
/example/petstore/apis/pet/updatePetWithForm.py
|
858c0b878074e4e9ffc1a6be75fb2b60d1c1cbb1
|
[] |
no_license
|
vichooz/swagger_codegen
|
e53f59f3cd2c080157863698f932a606705db4e4
|
8238356075eea4218b2e6a645c7ea2b8826b1044
|
refs/heads/master
| 2022-08-03T04:32:49.291426
| 2020-05-27T06:09:28
| 2020-05-27T06:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from __future__ import annotations
import pydantic
import datetime
import asyncio
import typing
from pydantic import BaseModel
from swagger_codegen.api.request import ApiRequest
def make_request(self, petid: int, name: str = ..., status: str = ...,) -> None:
"""Updates a pet in the store with form data"""
m = ApiRequest(
method="POST",
path="/api/v3/pet/{petId}".format(petId=petid,),
content_type=None,
body=None,
headers=self._only_provided({}),
query_params=self._only_provided({"name": name, "status": status,}),
cookies=self._only_provided({}),
)
return self.make_request({}, m)
|
[
"n10101010@gmail.com"
] |
n10101010@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.