blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f418b7bee3176104ad9a89194ca1f4a3fa27ee1 | 3a9b154aa9d5e379683476f80f30630bf44d2102 | /Server_v1/api/urls.py | d9480d7b24288746c33ed7570e2a86d0a4d9360c | [] | no_license | KevinDon/py_amazon_analysis | 81995e360d2b536e1df6e515aae9457054edae29 | 13b5fbb046ca6516ac3a47e8f7867baf358011f4 | refs/heads/master | 2022-12-13T00:27:27.511783 | 2019-08-14T11:45:53 | 2019-08-14T11:45:53 | 185,160,162 | 0 | 1 | null | 2022-12-10T05:38:15 | 2019-05-06T08:56:40 | TSQL | UTF-8 | Python | false | false | 8,094 | py | # coding:utf-8
from django.conf.urls import url
from django.urls import include, path
# from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from api.views import *
# router = routers.DefaultRouter() # 创建路由对象
# router.register(r'statvisitqrcodeskus', StatVisitQrcodeSkusSet, basename='sku')
# router.register(r'statvisitqrcodeskuday', StatVisitQrcodeSkuDaySet, basename='sku')
# schema_view = get_schema_view(title='API DOCS', renderer_classes=[OpenAPIRenderer, SwaggerUIRenderer])
urlpatterns = [
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
# url(r'^docs$',schema_view),
# url(r'^(?P<version>[v1|v2]+)/',include(router.urls)),
# url(r'^test$',TestSet.as_view()),
url(r'^docs$', get_swagger_view(title='Docs API')),
url(r'^(?P<version>[v1|v2]+)/apitokenauth', LoginSet.as_view(), name='apitokenauth'),
url(r'^(?P<version>[v1|v2]+)/userprofile', UserProfileSet.as_view(), name='userprofile'),
# url(r'^api-token-auth', obtain_jwt_token),
url(r'^(?P<version>[v1|v2]+)/statamazonsku/', StatAmazonSkuSet.as_view(), name='statamazonsku'),
url(r'^(?P<version>[v1|v2]+)/statamazonskulist/', StatAmazonSkuListGet.as_view(), name='statamazonskulist'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvday/', StatAmazonSkuUvDaySet.as_view(), name='statamazonskuuvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvmonth/', StatAmazonSkuUvMonthSet.as_view(), name='statamazonskuuvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskuuvweek/', StatAmazonSkuUvWeekSet.as_view(), name='statamazonskuuvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvday/', StatAmazonSkuPvDaySet.as_view(), name='statamazonskupvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvmonth/', StatAmazonSkuPvMonthSet.as_view(), name='statamazonskupvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupvweek/', StatAmazonSkuPvWeekSet.as_view(), name='statamazonskupvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsday/', StatAmazonSkuTotalItemsDaySet.as_view(), name='statamazonskutotalitemsday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsmonth/', StatAmazonSkuTotalItemsMonthSet.as_view(), name='statamazonskutotalitemsmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskutotalitemsweek/', StatAmazonSkuTotalItemsWeekSet.as_view(), name='statamazonskutotalitemsweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubuyboxday/', StatAmazonSkuBuyBoxDaySet.as_view(), name='statamazonskubuyboxday'),
# amazon category
url(r'^(?P<version>[v1|v2]+)/statmazoncategorylist/', StatAmazonCategoryListSet.as_view(), name='statamazoncategorylistset'),
url(r'^(?P<version>[v1|v2]+)/statmazoncategorys/', StatAmazonCategorySet.as_view(), name='statamazoncategorysset'),
# keyword
url(r'^(?P<version>[v1|v2]+)/statamazonkeywordlistset/', StatAmazonKeywordListSet.as_view(), name='statamazonkeywordlistset'),
url(r'^(?P<version>[v1|v2]+)/statamazonkeywordsset/', StatAmazonKeywordsSet.as_view(), name='statamazonkeywordsset'),
# template variant
url(r'^(?P<version>[v1|v2]+)/statamazonvariantlistset/', StatAmazonVariantListSet.as_view(), name='statamazonvariantlistset'),
# proxy ip
url(r'^(?P<version>[v1|v2]+)/statamazonproxyiplistset/', StatAmazonProxyIpListSet.as_view(), name='statamazonproxyiplistset'),
# line
url(r'^(?P<version>[v1|v2]+)/statamazonline/', StatAmazonLineSet.as_view(), name='statamazonline'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvday/', StatAmazonLineUvDaySet.as_view(), name='statamazonlineuvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvmonth/', StatAmazonLineUvMonthSet.as_view(), name='statamazonlineuvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvweek/', StatAmazonLineUvWeekSet.as_view(), name='statamazonlineuvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvday/', StatAmazonLinePvDaySet.as_view(), name='statamazonlinepvday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvmonth/', StatAmazonLinePvMonthSet.as_view(), name='statamazonlinepvmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinepvweek/', StatAmazonLinePvWeekSet.as_view(), name='statamazonlinepvweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsday/', StatAmazonLineTotalItemsDaySet.as_view(), name='statamazonlinetotalitemsday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsmonth/', StatAmazonLineTotalItemsMonthSet.as_view(), name='statamazonlinetotalitemsmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinetotalitemsweek/', StatAmazonLineTotalItemsWeekSet.as_view(), name='statamazonlinetotalitemsweek'),
url(r'^(?P<version>[v1|v2]+)/statamazonlinebuyboxday/', StatAmazonLineBuyBoxDaySet.as_view(), name='statamazonlinebuyboxday'),
url(r'^(?P<version>[v1|v2]+)/statamazonlineuvitemsconversionrateday/', StatAmazonLineUvItemsConversionRateDaySet.as_view(), name='statamazonlineuvitemsconversionrateday'),
# Category Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankday/', StatAmazonSkuCategoryRankDaySet.as_view(), name='statamazonskucategoryrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankmonth/', StatAmazonSkuCategoryRankMonthSet.as_view(), name='statamazonskucategoryrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucategoryrankweek/', StatAmazonSkuCategoryRankWeekSet.as_view(), name='statamazonskucategoryrankweek'),
# Keyword Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankday/', StatAmazonSkuKeywordRankDaySet.as_view(), name='statamazonskukeywordrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankmonth/', StatAmazonSkuKeywordRankMonthSet.as_view(), name='statamazonskukeywordrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskukeywordrankweek/', StatAmazonSkuKeywordRankWeekSet.as_view(), name='statamazonskukeywordrankweek'),
# Review Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankday/', StatAmazonSkuReviewRankDaySet.as_view(), name='statamazonskureviewrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankmonth/', StatAmazonSkuReviewRankMonthSet.as_view(), name='statamazonskureviewrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskureviewrankweek/', StatAmazonSkuReviewRankWeekSet.as_view(), name='statamazonskureviewrankweek'),
# Composite Report
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportday/', StatAmazonSkuCompositeReportDaySet.as_view(), name='statamazonskucompositereportday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportmonth/', StatAmazonSkuCompositeReportMonthSet.as_view(), name='statamazonskucompositereportmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskucompositereportweek/', StatAmazonSkuCompositeReportWeekSet.as_view(), name='statamazonskucompositereportweek'),
# Price Log
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogday/', StatAmazonSkuPriceLogDaySet.as_view(), name='statamazonskupricelogday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogmonth/', StatAmazonSkuPriceLogMonthSet.as_view(), name='statamazonskupricelogmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskupricelogweek/', StatAmazonSkuPriceLogWeekSet.as_view(), name='statamazonskupricelogweek'),
# Bestseller Rank
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankday/', StatAmazonSkuBestsellerRankDaySet.as_view(), name='statamazonskubestsellerrankday'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankmonth/', StatAmazonSkuBestsellerRankMonthSet.as_view(), name='statamazonskubestsellerrankmonth'),
url(r'^(?P<version>[v1|v2]+)/statamazonskubestsellerrankweek/', StatAmazonSkuBestsellerRankWeekSet.as_view(), name='statamazonskubestsellerrankweek'),
# Auth Department
url(r'^(?P<version>[v1|v2]+)/statauthdepartment/', StatAuthDepartmentView.as_view(), name='statauthdepartment'),
]
'''
request params:
{
"pager":{"size":5, "page":1}
,"order":["dy", "-sku"]
,"filter": [[{"sku-eq":"WBLANKET-PLUSH-5KG"},{"sku-eq":"HM-BED-TASSEL-COT-CR"}],[{"dy-lk-and":"2019-03-19"}]]
}
'''
| [
"kevintang002@gmail.com"
] | kevintang002@gmail.com |
8996a492d2fae89a7cdfe698186f932481087617 | 1ca466de0ffc59b48ab63afdda369ccc13fe4fd3 | /python_import/test_audio_sum_use_01.py | 9b81b7f39cbb116710eaff58efe5025bac9c108d | [] | no_license | forwiat/youngri | 380df95b6eb5c6eaa070099530b5ff9ba39cc8d0 | 9ed93838db56f202153413095b661273c1e33ddb | refs/heads/main | 2023-05-06T06:36:50.651872 | 2021-06-04T02:30:18 | 2021-06-04T02:30:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | # 국립 국어원 발화 말뭉치의 여성 화자를 화자별, 토픽9만 합쳐보자!!
import librosa
from pydub import AudioSegment
import soundfile as sf
import os
from voice_handling import import_test, voice_sum
import_test()
# ==== it will be great ====
# ---------------------------------------------------------------
# voice_sum: 오디오 한 wav 파일로 합쳐서 저장하기
# def voice_sum(form, pathaudio, save_dir, out_dir):
# **** example ****
# form(파일 형식): 'wav' or 'flac'
# audio_dir(여러 오디오가 있는 파일경로) = 'C:/nmb/nmb_data/F1F2F3/F3/'
# save_dir(flac일 경우 wav파일로 저장할 경로) = 'C:/nmb/nmb_data/F1F2F3/F3_to_wave/'
# out_dir(wav파일을 합쳐서 저장할 경로+파일명까지) = "C:/nmb/nmb_data/combine_test/F3_sum.wav"
# 1) wav일 때
<<<<<<< HEAD:python_import/test_audio_sum_use_01.py
path_wav = 'C:/nmb/gan_0504/audio'
path_out = 'C:/nmb/gan_0504/audio/b96_e10000_n100_total10000_sum.wav'
=======
path_wav = 'C:/nmb/gan_0504/audio/b100_e5000_n100_male'
path_out = 'C:/nmb/gan_0504/audio/b100_e5000_n100_male_total05000_sum.wav'
>>>>>>> 27a2e9746f969d30ff34658f0932877f900b077f:data/sum/test_audio_sum_use_01.py
voice_sum(form='wav', audio_dir=path_wav, save_dir=None, out_dir=path_out)
# 잘 되는 것 확인!
'''
# 2) flac일 때
path_flac = 'C:/nmb/nmb_data/channel_split/pansori_fandm/'
path_save = 'C:/nmb/nmb_data/channel_split/pansori_fandm_wav/'
path_out = 'C:/nmb/nmb_data/channel_split/pansori_fandm.wav'
voice_sum(form='flac', audio_dir=path_flac, save_dir=path_save, out_dir=path_out)
# 잘 되는 것 확인!
''' | [
"lemontleo0311@gmail.com"
] | lemontleo0311@gmail.com |
5cbdb08ef6c1b94df2eec04e2133cd087b486f96 | 0d86bb399a13152cd05e3ba5684e4cb22daeb247 | /python-exercise/6-regex/py151_match_address.py | 8d42e31cc168df7f51b8b1e726739eb9d57c1862 | [] | no_license | tazbingor/learning-python2.7 | abf73f59165e09fb19b5dc270b77324ea00b047e | f08c3bce60799df4f573169fcdb1a908dcb8810f | refs/heads/master | 2021-09-06T05:03:59.206563 | 2018-02-02T15:22:45 | 2018-02-02T15:22:45 | 108,609,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18/1/12 下午5:47
# @Author : Aries
# @Site :
# @File : py151_match_address.py
# @Software: PyCharm
'''
15-5 匹配住址.
1180 Bordeaux Drive
3120 De la Cruz Boulevard
'''
from re import match
def match_address(address=''):
try:
return match(r'^[\u4E00-\u9FA5A-Za-z0-9_ ]+$', address).group()
except AttributeError:
return None
def match_chinese_address(address=''):
try:
return match(ur'^[\u4E00-\u9FA5A-Za-z0-9_ ]+$', address).group()
except AttributeError:
return None
if __name__ == '__main__':
print match_address('1180 Bordeaux Drive') # 3120
print match_address('3120 De la Cruz Boulevard') # 3120 De la Cruz Boulevard
for i in match_chinese_address(u'新街口南大街 百花深处胡同'):
print i,
| [
"852353298@qq.com"
] | 852353298@qq.com |
41e85e05c129f9c11ad1d862da42bb45eac84f4b | 9a2ea68439d24632cdf1321db0137f412ad2b1ed | /analyzePcapWithScapy.py | 61ddb90dc464a22310a33722337e2244798418df | [] | no_license | parahaoer/AnalyzePcap | 91ec1656fd65e0aa51ce1fbc14d2cb7aac18fd22 | c67e6f0d72a9351c86c8ae05e55426e21ad2ec02 | refs/heads/master | 2022-12-13T05:19:19.832656 | 2020-08-28T07:28:16 | 2020-08-28T07:28:16 | 279,524,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,500 | py | from scapy.all import *
import re
def analyzePcap(filepath):
s1 = PcapReader(filepath)
ftxt = open('scapy_analyze_result/result_feature_great_than_30.txt','a', encoding="utf-8")
write_line = []
vnc_feature_count = 0
global vnc_file_count
No = 1
try:
# data 是以太网 数据包
data = s1.read_packet()
while data is not None:
if(is_ipv4_tcp(data)):
if(hasFeature01(data)):
write_line.append("No." + str(No) + " maybe RFB 协议")
vnc_feature_count +=1
elif(hasFeature03(data)):
write_line.append("No." + str(No) + " maybe pointerEvent")
vnc_feature_count +=1
elif(hasFeature02(data)):
write_line.append("No." + str(No) + " maybe security types supported package")
vnc_feature_count +=1
elif(hasFeature04(data)):
write_line.append("No." + str(No) + " maybe KeyEvent")
vnc_feature_count +=1
data = s1.read_packet()
No += 1
s1.close()
except:
pass
if(vnc_feature_count >= 30):
vnc_file_count += 1
ftxt.write(filepath + "\n")
ftxt.write("vnc_feature_count=" + str(vnc_feature_count) + "\n")
for line in write_line:
ftxt.write("\t" + line + "\n")
ftxt.close()
#print(type(data.payload)) #==><class 'scapy.layers.inet.IP'> 可以使用 help(scapy.layers.inet.IP) 查看帮助文档
def is_ipv4_tcp(data):
ip_packet = data.payload
return data.fields['type'] == 2048 and ip_packet.fields['version'] == 4 and ip_packet.fields['proto'] == 6
def getTcpPayloadLen(data):
ip_packet = data.payload
tcp_packet = ip_packet.payload
ip_header_len = ip_packet.fields['ihl'] * 4
ip_len = ip_packet.fields['len']
tcp_len = ip_len - ip_header_len
tcp_header_len = tcp_packet.fields['dataofs'] * 4
tcp_payload_len = tcp_len - tcp_header_len
# print(tcp_payload_len)
return tcp_payload_len
def getTcpPayload(data):
ip_packet = data.payload
tcp_packet = ip_packet.payload
tcp_payload = tcp_packet.payload
'''
tcp_payload.original 与 tcp_payload.fields['load'] 返回的都是 bytes对象
通过下标获取bytes对象的某一个字节内容,是十进制的,而不是十六进制数据。
'''
# print(tcp_payload.original[0]) # 82 , 转换成16进制是0x52, 与wireshark 中显示的相同。
# print(tcp_payload.original) # b'RFB 003.008\n', 结果是以字节的值为ASCII值转换成相应的字符串(字符串前边的b表示是bytes对象)。
# print(tcp_payload.original.hex())
# print(type(tcp_payload.original))
# print(type(tcp_payload.fields['load']))
return tcp_payload.original
# tcp_payload 的长度为12字节, 且包含字符串“RFB”
def hasFeature01(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 12 and re.search("RFB", str(tcp_payload))
# tcp_payload的第一个字节内容等于tcp_payload的长度 减一。则该数据包是服务器向客户端发送其支持的security type
def hasFeature02(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload[0] != 0 and tcp_payload[0] == tcp_payload_len -1
# tcp_payload的长度为6字节,且tcp_payload的第一个字节内容为5.则该数据包是一个pointerEvent
def hasFeature03(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 6 and tcp_payload[0] == 5
# tcp_payload的长度为8字节,且tcp_payload的第一个字节内容为4.则该数据包是一个KeyEvent.
def hasFeature04(data):
tcp_payload = getTcpPayload(data)
tcp_payload_len = getTcpPayloadLen(data)
return tcp_payload_len == 8 and tcp_payload[0] == 4
def get_filelist(dir):
if os.path.isfile(dir):
try:
analyzePcap(dir)
except Scapy_Exception as e:
pass
elif os.path.isdir(dir):
for s in os.listdir(dir):
newDir = os.path.join(dir, s)
get_filelist(newDir)
vnc_file_count = 0
get_filelist('C:\\Users\\dong\\Desktop\\workAtHome\\dridex\\dridexPcap')
print(vnc_file_count)
| [
"884101054@qq.com"
] | 884101054@qq.com |
0890a6bf27d61c2f9589587a2bffc15d5faec9cc | 06e10ace821eb75f88299b8721f7e42ad497ca4c | /libby/main.py | 25cb7a143748aef15cab1aebc3c877138cff324b | [] | no_license | kwarwp/henrietta | a36f4e7fecf652e6fb00600aeed92fe18acc481b | 76963014bb7d47c0713cc7b43d61fe1292794f72 | refs/heads/master | 2022-11-23T22:37:35.872643 | 2022-11-08T17:39:41 | 2022-11-08T17:39:41 | 128,998,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # henrietta.libby.main.py
class Templo:
def __init__(self):
self._esta_no_templo = False
def entra(self):
self._esta_no_templo = True
def sai(self):
self._esta_no_templo = False
def entrou(self):
return self._esta_no_templo
class EntradaDoTemplo(Templo):
def __init__(self):
super().__init__()
self.corredor = CorredorDoTemplo()
def sai(self):
Templo.sai(self)
self.corredor.entra()
class CorredorDoTemplo(Templo):
def __init__(self):
super().__init__()
self.musica = SalaDoTemplo()
self.oceano = SalaDoTemplo()
self.floresta = SalaDoTemplo()
class SalaDoTemplo(Templo):
def __init__(self):
super().__init__()
def mostra_templo():
print("musica:{}, oceano:{}, floresta:{}, entrada: {}, cd:{}".format(
musica.entrou(), oceano.entrou(),
floresta.entrou(), entrada.entrou(), corredor.entrou()
)
)
musica = SalaDoTemplo()
oceano = SalaDoTemplo()
floresta = SalaDoTemplo()
#floresta.entra()
#oceano.entra()
entrada = EntradaDoTemplo()
corredor = entrada.corredor
entrada.entra()
mostra_templo()
entrada.sai()
mostra_templo()
| [
"38007182+kwarwp@users.noreply.github.com"
] | 38007182+kwarwp@users.noreply.github.com |
d01ab306dfeee67e9bda6895a5e86a518044d490 | d14032ed6f3ec3b4f149a02df9e5bf6fbd8fda44 | /app/modules/auth/active.py | d58070692fb442d44cd4994d430d41a8cfbe55ad | [] | no_license | tomszhou/pony | 6bae77c6188c710eaf82898b6e792234ec773161 | 1fa6ab22a04f3cd2c1a130803833c5c22460a382 | refs/heads/master | 2021-05-17T23:21:42.023804 | 2018-07-09T05:54:45 | 2018-07-09T05:54:45 | 250,999,515 | 1 | 0 | null | 2020-03-29T09:50:46 | 2020-03-29T09:50:45 | null | UTF-8 | Python | false | false | 1,375 | py | from django.shortcuts import redirect
from app.models.account.account import UserAccount
from app.models.account.token import AccessToken
from app.models.account.info import UserInfo
from app.modules.common.util_struct import *
from app.modules.common.secret import verify_password
from app.modules.common.easemob import register_ease_mob
def active_account_handler(request):
token = request.GET.get("access_token")
pass_port = request.GET.get("pass_port")
try:
access_token = AccessToken.objects.get(access_token=token)
if not verify_password(access_token.access_token+access_token.salt, pass_port):
return json_fail_response("无效的用户请求")
if access_token.status == 0:
return json_fail_response("token失效")
except AccessToken.DoesNotExist:
return json_fail_response("请求无效!")
try:
account = UserAccount.objects.get(id=access_token.user_id)
if account.status == 1:
return json_fail_response("当前用户已经激活")
except UserAccount.DoesNotExist:
return json_fail_response("激活用户不存在")
account.status = 1
account.save()
# 注册环信
user_info = UserInfo.query_format_info_by_user_id(account.id, use_cache=False)
register_ease_mob(user_info['ease_mob'])
return redirect("/auth/login")
| [
"wudong@eastwu.cn"
] | wudong@eastwu.cn |
8965f08a72396840cde95e71a464254a0bf45145 | 3bb57eb1f7c1c0aced487e7ce88f3cb84d979054 | /paetzold_nns/scripts/rankers/Run_Glavas.py | 081d7414d035f7ae7b6995b54c84c075c76a41a6 | [] | no_license | ghpaetzold/phd-backup | e100cd0bbef82644dacc73a8d1c6b757b2203f71 | 6f5eee43e34baa796efb16db0bc8562243a049b6 | refs/heads/master | 2020-12-24T16:41:21.490426 | 2016-04-23T14:50:07 | 2016-04-23T14:50:07 | 37,981,094 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | from lexenstein.rankers import *
from lexenstein.features import *
import sys
victor_corpus = sys.argv[1]
test_victor_corpus = sys.argv[2].strip()
output_path = sys.argv[3].strip()
model = '/export/data/ghpaetzold/benchmarking/lexmturk/scripts/evaluators/stanford-postagger-full-2015-04-20/models/english-bidirectional-distsim.tagger'
tagger = '/export/data/ghpaetzold/benchmarking/lexmturk/scripts/evaluators/stanford-postagger-full-2015-04-20/stanford-postagger.jar'
java = '/usr/bin/java'
fe = FeatureEstimator()
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 0, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 0, 1, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 1, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 2, 0, 'Simplicity')
fe.addNGramProbabilityFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 0, 2, 'Simplicity')
#fe.addCollocationalFeature('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt', 1, 1, 'Simplicity')
w2vmodel = '/export/data/ghpaetzold/word2vecvectors/models/word_vectors_all_200_glove.bin'
fe.addWordVectorSimilarityFeature(w2vmodel, 'Simplicity')
fe.addWordVectorContextSimilarityFeature(w2vmodel, model, tagger, java, 'Simplicity')
br = GlavasRanker(fe)
ranks = br.getRankings(test_victor_corpus)
lm = kenlm.LanguageModel('/export/data/ghpaetzold/subtitlesimdb/corpora/160715/subtleximdb.5gram.unk.bin.txt')
o = open(output_path, 'w')
f = open(test_victor_corpus)
for rank in ranks:
target = f.readline().strip().split('\t')[1].strip()
targetp = lm.score(target)
newline = ''
if len(rank)>0:
candp = lm.score(rank[0])
if targetp>=candp:
newline = target + '\t'
else:
newline = ''
for r in rank:
newline += r + '\t'
else:
newline = target
o.write(newline.strip() + '\n')
o.close()
| [
"ghpaetzold@outlook.com"
] | ghpaetzold@outlook.com |
790f2c451355c35536dceb9e440223556ded9d71 | 1bad7fc3fdd9e38b7ff50a7825565b7b190fa5b7 | /qrback/migrations/0026_company_slogan.py | c8bcd1c71abe99686a4ad68070fc1d0f902b136d | [] | no_license | furkankykc/QRforAll | d4be43e403d75c86436ed9d9e2b222619ecf92b1 | 6cc0555fdc27797586628f2012523dce5212b321 | refs/heads/master | 2023-07-10T13:02:27.618792 | 2021-08-05T07:22:29 | 2021-08-05T07:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.0.8 on 2020-08-22 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qrback', '0025_company_counter'),
]
operations = [
migrations.AddField(
model_name='company',
name='slogan',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
63389afedfb107a2984a334fcbf2d0ddd4c0af9e | d1d9b21a81a354baf1c5bc1b3db4ee38825f794b | /_eh.py | 52189ad74a92a321aac330cbb9af43576d83b004 | [] | no_license | pytsite/plugin-seo | 5c235630490fea8d0067d8c03c76a9b1678d6c51 | 486d4a8e8ab42938ca73b7bd757b7f8bee51ed78 | refs/heads/master | 2021-01-11T18:49:19.960731 | 2018-08-02T11:13:42 | 2018-08-02T11:13:42 | 79,632,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | """PytSite SEO Plugin Event Handlers.
"""
__author__ = 'Alexander Shepetko'
__email__ = 'a@shepetko.com'
__license__ = 'MIT'
from pytsite import metatag as _metatag, reg as _reg
def router_dispatch():
for tag in _reg.get('seo.global_metatags', []):
_metatag.t_set(tag['name'], tag['content'])
| [
"a@shepetko.com"
] | a@shepetko.com |
9a8df1df3b7aaeea7f01727f104107208d1bf7fd | 02b1eccce01f515089ecb40862fc01c8b214fc50 | /auth.py | cb6da04d15be14443a0013fbfededf9ac506b531 | [
"MIT"
] | permissive | Nekmo/nekutils | 1f8a1f4e309733d31d16ca34c266367369f2cb45 | 1de28bb810625db1d4c575f61426ab67e7d1f1e0 | refs/heads/master | 2016-08-12T16:10:13.838188 | 2016-04-01T14:45:20 | 2016-04-01T14:45:20 | 43,179,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py |
class AuthAddress(object):
def __init__(self, address):
self._user = None
self._host = None
self._port = None
self._endpoint = None
self.address = address
@property
def user(self):
if self._user is None:
user_endpoint = self.address.split('@')
if len(user_endpoint) > 1:
self._user = user_endpoint[0]
else:
self._user = False
return self._user
@property
def endpoint(self):
if self._endpoint is None:
self._endpoint = self.address.split('@')
if len(self.endpoint) > 1:
self._endpoint = self.endpoint[1]
else:
self._endpoint = self._endpoint[0]
return self._endpoint
@property
def host(self):
if self._host is None:
self._host = self.endpoint.split(':')[0]
return self._host
@property
def port(self):
if self._port is None:
host_port = self.endpoint.split(':')
if len(host_port) < 2:
self._port = False
else:
self._port = host_port[1]
return self._port
def __str__(self):
return self.address
class UserPassword(object):
def __init__(self, user_password):
self._user = None
self._password = None
self.user_password = user_password
@property
def user(self):
if self._user is None:
self._user = self.user_password.split(':')
return self._user
@property
def password(self):
if self._password is None:
user_password = self.user_password.split(':')
if len(user_password) > 1:
self._password = user_password[1]
else:
self._password = False
return self._password
| [
"contacto@nekmo.com"
] | contacto@nekmo.com |
0db417c20a5d963481fb0f4b056258b3c8389ac1 | 7d5e694aba546c166004cab8e592a000fb7283ef | /PyQt5_Udemy/01_Basic_Widgets/07_comboBox_2.py | 72affbce9bf5aa4a6a113f381cf8eab7b81f8c4c | [] | no_license | OnurKaraguler/PyQt5 | 45ffe320911f25f2ad0e318de2c7e3851db7be0c | 909546b53c0f80c1eae27c660f47cd5ded3ff1a6 | refs/heads/master | 2022-12-21T09:06:24.063816 | 2020-09-24T14:54:11 | 2020-09-24T14:54:11 | 298,299,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | import sys, os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
class Main(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('combo box demo')
self.setGeometry(500,150,300,300)
self.setFixedSize(self.size()) #Ekran ölçüsünü sabitlemek için
self.UI()
self.show()
def UI(self):
self.window()
def window(self):
self.cb = QComboBox()
self.cb.addItem("C")
self.cb.addItem("C++")
self.cb.addItems(["Java", "C#", "Python"])
self.cb.currentIndexChanged.connect(self.selectionchange)
print(self.cb.count())
print(self.cb.itemText(2))
self.cb.setItemText(2,'Onur')
# self.cb.activated[str].connect(self.activated)
self.cb.highlighted[str].connect(self.activated)
self.layout = QVBoxLayout()
self.layout.addWidget(self.cb)
self.mainLayout = QHBoxLayout()
self.mainLayout.addLayout(self.layout)
self.setLayout(self.mainLayout)
def selectionchange(self, i):
pass
# print("Items in the list are :")
# for count in range(self.cb.count()):
# print(self.cb.itemText(count))
# print("Current index", i, "selection changed ", self.cb.currentText())
# self.cb.clear()
def activated(self,text):
print(text)
if __name__=='__main__':
app = QApplication(sys.argv)
window = Main()
sys.exit(app.exec_()) | [
"onurkaraguler@hotmail.com"
] | onurkaraguler@hotmail.com |
c3765a33bb6228a494b01e9c2042906c4ff81caf | 8412b576f09202e8b07a241749d31fd6ef5380c3 | /rpc_interface.py | fe2bc978670679ced000f0f3ccc914e095611aff | [
"MIT"
] | permissive | meeh420/ngcccbase | 2d7f64e16972904a4c4a97d300f3e301632b98d0 | 1c15e9f813076151b9c758e2b8c7de086fccedc0 | refs/heads/master | 2020-12-31T01:11:23.659834 | 2013-11-18T01:10:09 | 2013-11-18T01:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,652 | py | """
rpc_interface.py
This file connects ngccc-server.py to wallet_controller.py
The main functions that this file has are to take the
JSON-RPC commands from the server and pass them through to
the wallet controller.
Note console_interface.py does a similar thing for ngccc.py
to wallet_controller.py
"""
from wallet_controller import WalletController
from pwallet import PersistentWallet
import pyjsonrpc
import json
# create a global wallet for this use.
wallet = PersistentWallet()
wallet.init_model()
model = wallet.get_model()
controller = WalletController(model)
def get_asset_definition(moniker):
"""Get the asset/color associated with the moniker.
"""
adm = model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker(moniker)
if asset:
return asset
else:
raise Exception("asset %s not found" % moniker)
def balance(moniker):
"""Returns the balance in Satoshi for a particular asset/color.
"bitcoin" is the generic uncolored coin.
"""
asset = get_asset_definition(moniker)
return controller.get_balance(asset)
def newaddr(moniker):
"""Creates a new bitcoin address for a given asset/color.
"""
asset = get_asset_definition(moniker)
addr = controller.get_new_address(asset)
return addr.get_address()
def alladdresses(moniker):
"""Lists all addresses for a given asset/color
"""
asset = get_asset_definition(moniker)
return [addr.get_address()
for addr in controller.get_all_addresses(asset)]
def addasset(moniker, color_description):
"""Imports a color definition. This is useful if someone else has
issued a color and you want to be able to receive it.
"""
controller.add_asset_definition(
{"monikers": [moniker],
"color_set": [color_description]}
)
def dump_config():
"""Returns a JSON dump of the current configuration
"""
config = wallet.wallet_config
dict_config = dict(config.iteritems())
return json.dumps(dict_config, indent=4)
def setval(self, key, value):
"""Sets a value in the configuration.
Key is expressed like so: key.subkey.subsubkey
"""
if not (key and value):
print "setval command expects: key value"
return
kpath = key.split('.')
try:
value = json.loads(value)
except ValueError:
print "didn't understand the value: %s" % value
return
try:
# traverse the path until we get to the value we
# need to set
if len(kpath) > 1:
branch = self.wallet.wallet_config[kpath[0]]
cdict = branch
for k in kpath[1:-1]:
cdict = cdict[k]
cdict[kpath[-1]] = value
value = branch
self.wallet.wallet_config[kpath[0]] = value
except TypeError:
print "could not set the key: %s" % key
def getval(self, key):
"""Returns the value for a given key in the config.
Key is expressed like so: key.subkey.subsubkey
"""
if not key:
print "getval command expects: key"
return
kpath = key.split('.')
cv = self.wallet.wallet_config
try:
# traverse the path until we get the value
for k in kpath:
cv = cv[k]
print json.dumps(cv)
except (KeyError, TypeError):
print "could not find the key: %s" % key
def send(moniker, address, amount):
"""Send some amount of an asset/color to an address
"""
asset = get_asset_definition(moniker)
controller.send_coins(address, asset, amount)
def issue(moniker, pck, units, atoms_in_unit):
"""Starts a new color based on <coloring_scheme> with
a name of <moniker> with <units> per share and <atoms>
total shares.
"""
controller.issue_coins(moniker, pck, units, atoms_in_unit)
def scan():
"""Update the database of transactions (amount in each address).
"""
controller.scan_utxos()
def history(self, **kwargs):
"""print the history of transactions for this color
"""
asset = self.get_asset_definition(moniker=kwargs['moniker'])
return self.controller.get_history(asset)
class RPCRequestHandler(pyjsonrpc.HttpRequestHandler):
"""JSON-RPC handler for ngccc's commands.
The command-set is identical to the console interface.
"""
methods = {
"balance": balance,
"newaddr": newaddr,
"alladdresses": alladdresses,
"addasset": addasset,
"dump_config": dump_config,
"setval": setval,
"getval": getval,
"send": send,
"issue": issue,
"scan": scan,
"history": history,
}
| [
"jaejoon@gmail.com"
] | jaejoon@gmail.com |
b4050eea1c4ce4a8157554ca83fc9bbb7fcd3a6d | d8cbe9ce0469f72b8929af01538b6ceddff10a38 | /homeassistant/components/scrape/config_flow.py | cbd0ed7d525a4948b89d1fa79f9dd60463e63892 | [
"Apache-2.0"
] | permissive | piitaya/home-assistant | 9c1ba162dac9604e4d43e035e74bad7bba327f0b | 48893738192431f96966998c4ff7a3723a2f8f4a | refs/heads/dev | 2023-03-07T16:13:32.117970 | 2023-01-10T17:47:48 | 2023-01-10T17:47:48 | 172,578,293 | 3 | 1 | Apache-2.0 | 2023-02-22T06:15:56 | 2019-02-25T20:19:40 | Python | UTF-8 | Python | false | false | 9,208 | py | """Adds config flow for Scrape integration."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any
import uuid
import voluptuous as vol
from homeassistant.components.rest import create_rest_data_from_config
from homeassistant.components.rest.data import DEFAULT_TIMEOUT
from homeassistant.components.rest.schema import DEFAULT_METHOD, METHODS
from homeassistant.components.sensor import (
CONF_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
CONF_ATTRIBUTE,
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_RESOURCE,
CONF_TIMEOUT,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
UnitOfTemperature,
)
from homeassistant.core import async_get_hass
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.schema_config_entry_flow import (
SchemaCommonFlowHandler,
SchemaConfigFlowHandler,
SchemaFlowError,
SchemaFlowFormStep,
SchemaFlowMenuStep,
)
from homeassistant.helpers.selector import (
BooleanSelector,
NumberSelector,
NumberSelectorConfig,
NumberSelectorMode,
ObjectSelector,
SelectSelector,
SelectSelectorConfig,
SelectSelectorMode,
TemplateSelector,
TextSelector,
TextSelectorConfig,
TextSelectorType,
)
from . import COMBINED_SCHEMA
from .const import CONF_INDEX, CONF_SELECT, DEFAULT_NAME, DEFAULT_VERIFY_SSL, DOMAIN
RESOURCE_SETUP = {
vol.Required(CONF_RESOURCE): TextSelector(
TextSelectorConfig(type=TextSelectorType.URL)
),
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): SelectSelector(
SelectSelectorConfig(options=METHODS, mode=SelectSelectorMode.DROPDOWN)
),
vol.Optional(CONF_AUTHENTICATION): SelectSelector(
SelectSelectorConfig(
options=[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION],
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(CONF_USERNAME): TextSelector(),
vol.Optional(CONF_PASSWORD): TextSelector(
TextSelectorConfig(type=TextSelectorType.PASSWORD)
),
vol.Optional(CONF_HEADERS): ObjectSelector(),
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): BooleanSelector(),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): NumberSelector(
NumberSelectorConfig(min=0, step=1, mode=NumberSelectorMode.BOX)
),
}
SENSOR_SETUP = {
vol.Required(CONF_SELECT): TextSelector(),
vol.Optional(CONF_INDEX, default=0): NumberSelector(
NumberSelectorConfig(min=0, step=1, mode=NumberSelectorMode.BOX)
),
vol.Optional(CONF_ATTRIBUTE): TextSelector(),
vol.Optional(CONF_VALUE_TEMPLATE): TemplateSelector(),
vol.Optional(CONF_DEVICE_CLASS): SelectSelector(
SelectSelectorConfig(
options=[cls.value for cls in SensorDeviceClass],
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(CONF_STATE_CLASS): SelectSelector(
SelectSelectorConfig(
options=[cls.value for cls in SensorStateClass],
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): SelectSelector(
SelectSelectorConfig(
options=[cls.value for cls in UnitOfTemperature],
custom_value=True,
mode=SelectSelectorMode.DROPDOWN,
)
),
}
async def validate_rest_setup(
handler: SchemaCommonFlowHandler, user_input: dict[str, Any]
) -> dict[str, Any]:
"""Validate rest setup."""
hass = async_get_hass()
rest_config: dict[str, Any] = COMBINED_SCHEMA(user_input)
try:
rest = create_rest_data_from_config(hass, rest_config)
await rest.async_update()
except Exception as err:
raise SchemaFlowError("resource_error") from err
if rest.data is None:
raise SchemaFlowError("resource_error")
return user_input
async def validate_sensor_setup(
handler: SchemaCommonFlowHandler, user_input: dict[str, Any]
) -> dict[str, Any]:
"""Validate sensor input."""
user_input[CONF_INDEX] = int(user_input[CONF_INDEX])
user_input[CONF_UNIQUE_ID] = str(uuid.uuid1())
# Standard behavior is to merge the result with the options.
# In this case, we want to add a sub-item so we update the options directly.
sensors: list[dict[str, Any]] = handler.options.setdefault(SENSOR_DOMAIN, [])
sensors.append(user_input)
return {}
async def validate_select_sensor(
handler: SchemaCommonFlowHandler, user_input: dict[str, Any]
) -> dict[str, Any]:
"""Store sensor index in flow state."""
handler.flow_state["_idx"] = int(user_input[CONF_INDEX])
return {}
async def get_select_sensor_schema(handler: SchemaCommonFlowHandler) -> vol.Schema:
"""Return schema for selecting a sensor."""
return vol.Schema(
{
vol.Required(CONF_INDEX): vol.In(
{
str(index): config[CONF_NAME]
for index, config in enumerate(handler.options[SENSOR_DOMAIN])
},
)
}
)
async def get_edit_sensor_suggested_values(
handler: SchemaCommonFlowHandler,
) -> dict[str, Any]:
"""Return suggested values for sensor editing."""
idx: int = handler.flow_state["_idx"]
return handler.options[SENSOR_DOMAIN][idx]
async def validate_sensor_edit(
handler: SchemaCommonFlowHandler, user_input: dict[str, Any]
) -> dict[str, Any]:
"""Update edited sensor."""
user_input[CONF_INDEX] = int(user_input[CONF_INDEX])
# Standard behavior is to merge the result with the options.
# In this case, we want to add a sub-item so we update the options directly.
idx: int = handler.flow_state["_idx"]
handler.options[SENSOR_DOMAIN][idx].update(user_input)
return {}
async def get_remove_sensor_schema(handler: SchemaCommonFlowHandler) -> vol.Schema:
"""Return schema for sensor removal."""
return vol.Schema(
{
vol.Required(CONF_INDEX): cv.multi_select(
{
str(index): config[CONF_NAME]
for index, config in enumerate(handler.options[SENSOR_DOMAIN])
},
)
}
)
async def validate_remove_sensor(
handler: SchemaCommonFlowHandler, user_input: dict[str, Any]
) -> dict[str, Any]:
"""Validate remove sensor."""
removed_indexes: set[str] = set(user_input[CONF_INDEX])
# Standard behavior is to merge the result with the options.
# In this case, we want to remove sub-items so we update the options directly.
entity_registry = er.async_get(handler.parent_handler.hass)
sensors: list[dict[str, Any]] = []
sensor: dict[str, Any]
for index, sensor in enumerate(handler.options[SENSOR_DOMAIN]):
if str(index) not in removed_indexes:
sensors.append(sensor)
elif entity_id := entity_registry.async_get_entity_id(
SENSOR_DOMAIN, DOMAIN, sensor[CONF_UNIQUE_ID]
):
entity_registry.async_remove(entity_id)
handler.options[SENSOR_DOMAIN] = sensors
return {}
DATA_SCHEMA_RESOURCE = vol.Schema(RESOURCE_SETUP)
DATA_SCHEMA_EDIT_SENSOR = vol.Schema(SENSOR_SETUP)
DATA_SCHEMA_SENSOR = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): TextSelector(),
**SENSOR_SETUP,
}
)
CONFIG_FLOW = {
"user": SchemaFlowFormStep(
schema=DATA_SCHEMA_RESOURCE,
next_step="sensor",
validate_user_input=validate_rest_setup,
),
"sensor": SchemaFlowFormStep(
schema=DATA_SCHEMA_SENSOR,
validate_user_input=validate_sensor_setup,
),
}
OPTIONS_FLOW = {
"init": SchemaFlowMenuStep(
["resource", "add_sensor", "select_edit_sensor", "remove_sensor"]
),
"resource": SchemaFlowFormStep(
DATA_SCHEMA_RESOURCE,
validate_user_input=validate_rest_setup,
),
"add_sensor": SchemaFlowFormStep(
DATA_SCHEMA_SENSOR,
suggested_values=None,
validate_user_input=validate_sensor_setup,
),
"select_edit_sensor": SchemaFlowFormStep(
get_select_sensor_schema,
suggested_values=None,
validate_user_input=validate_select_sensor,
next_step="edit_sensor",
),
"edit_sensor": SchemaFlowFormStep(
DATA_SCHEMA_EDIT_SENSOR,
suggested_values=get_edit_sensor_suggested_values,
validate_user_input=validate_sensor_edit,
),
"remove_sensor": SchemaFlowFormStep(
get_remove_sensor_schema,
suggested_values=None,
validate_user_input=validate_remove_sensor,
),
}
class ScrapeConfigFlowHandler(SchemaConfigFlowHandler, domain=DOMAIN):
"""Handle a config flow for Scrape."""
config_flow = CONFIG_FLOW
options_flow = OPTIONS_FLOW
def async_config_entry_title(self, options: Mapping[str, Any]) -> str:
"""Return config entry title."""
return options[CONF_RESOURCE]
| [
"noreply@github.com"
] | piitaya.noreply@github.com |
052d73f6b96d29283777078b074e925cc5d8b8f4 | ac1fdf53359b53e183fb9b2602328595b07cf427 | /ParlAI/parlai/scripts/convert_data_to_fasttext_format.py | 70ebc7703659461c6ac56e9bc58a7c97fc00ca52 | [] | no_license | Ufukdogann/MasterThesis | 780410c5df85b789136b525bce86ba0831409233 | b09ede1e3c88c4ac3047800f5187c671eeda18be | refs/heads/main | 2023-01-24T18:09:52.285718 | 2020-11-27T16:14:29 | 2020-11-27T16:14:29 | 312,416,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:efecd6dfb74a652f16bcda15d3f0cf10eb85b19973aaaee4dabc722b6798caf9
size 3094
| [
"134679852Ufuk*"
] | 134679852Ufuk* |
95a028c6657a6a3a6252707015f2e449e578cd0c | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-servermanager/azure/mgmt/servermanager/models/__init__.py | e6e542614005fecb3af1a7d6d4f16fb74b7017be | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 2,795 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .encryption_jwk_resource import EncryptionJwkResource
from .gateway_status import GatewayStatus
from .gateway_resource import GatewayResource
from .gateway_profile import GatewayProfile
from .gateway_parameters import GatewayParameters
from .node_resource import NodeResource
from .node_parameters import NodeParameters
from .session_resource import SessionResource
from .session_parameters import SessionParameters
from .version import Version
from .power_shell_session_resource import PowerShellSessionResource
from .prompt_field_description import PromptFieldDescription
from .power_shell_command_result import PowerShellCommandResult
from .power_shell_command_results import PowerShellCommandResults
from .power_shell_command_status import PowerShellCommandStatus
from .power_shell_session_resources import PowerShellSessionResources
from .power_shell_command_parameters import PowerShellCommandParameters
from .prompt_message_response import PromptMessageResponse
from .power_shell_tab_completion_parameters import PowerShellTabCompletionParameters
from .power_shell_tab_completion_results import PowerShellTabCompletionResults
from .error import Error, ErrorException
from .gateway_resource_paged import GatewayResourcePaged
from .node_resource_paged import NodeResourcePaged
from .server_management_enums import (
UpgradeMode,
RetentionPeriod,
CredentialDataFormat,
PromptFieldType,
GatewayExpandOption,
PowerShellExpandOption,
)
__all__ = [
'Resource',
'EncryptionJwkResource',
'GatewayStatus',
'GatewayResource',
'GatewayProfile',
'GatewayParameters',
'NodeResource',
'NodeParameters',
'SessionResource',
'SessionParameters',
'Version',
'PowerShellSessionResource',
'PromptFieldDescription',
'PowerShellCommandResult',
'PowerShellCommandResults',
'PowerShellCommandStatus',
'PowerShellSessionResources',
'PowerShellCommandParameters',
'PromptMessageResponse',
'PowerShellTabCompletionParameters',
'PowerShellTabCompletionResults',
'Error', 'ErrorException',
'GatewayResourcePaged',
'NodeResourcePaged',
'UpgradeMode',
'RetentionPeriod',
'CredentialDataFormat',
'PromptFieldType',
'GatewayExpandOption',
'PowerShellExpandOption',
]
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
f39c368702daa9eef179818bc2e09dc0458cd47a | 863b664aa0849c9c90124e0c644490feae42b9e9 | /python3-demo/app/log.py | 70e7f2a8c22ad938b31a26f384a052bea88aa920 | [] | no_license | mingz2013/study.python | 75d856a77c752a6b6c58b8fcdbd4c2c2bb9189fe | d65017912aa8f8b2ec932518a95990d1ff0c8c6e | refs/heads/master | 2021-12-28T04:57:11.266866 | 2021-08-03T02:59:10 | 2021-08-03T02:59:10 | 78,043,106 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | # -*- coding: utf-8 -*-
"""
@FileName: log
@Time: 2020/5/19 15:41
@Author: zhaojm
Module Description
"""
from datetime import datetime
# from app.config import config
def register_logging():
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
from logging.handlers import RotatingFileHandler
# 内部日志
f = datetime.now().strftime('%Y-%m-%d')
rotating_handler1 = RotatingFileHandler('logs/info-' + f + '.log', maxBytes=1 * 1024 * 1024 * 1024, backupCount=100)
rotating_handler2 = RotatingFileHandler('logs/error-' + f + '.log', maxBytes=1 * 1024 * 1024 * 1024,
backupCount=100)
formatter1 = logging.Formatter(
'%(asctime)s %(levelname)s - ''in %(funcName)s [%(filename)s:%(lineno)d]: %(message)s')
rotating_handler1.setFormatter(formatter1)
rotating_handler2.setFormatter(formatter1)
logger = logging.getLogger('name')
logger.addHandler(rotating_handler1)
logger.addHandler(rotating_handler2)
logger.setLevel(logging.INFO)
rotating_handler2.setLevel(logging.ERROR)
# if config.debug:
# app.logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
return logger
logger = register_logging()
# def _logFunc(*argl, **argd):
# # ftlog.xxx(... caller=self) for instance method
# # ftlog.xxx(... caller=cls) for @classmethod
# callerClsName = ""
# try:
# _caller = argd.get("caller", None)
# if _caller:
# if not hasattr(_caller, "__name__"):
# _caller = _caller.__class__
# callerClsName = _caller.__name__
# del argd["caller"]
# except:
# pass
# if log_level > LOG_LEVEL_DEBUG:
# print "[ ]",
# else:
# print "[" + callerClsName + "." + sys._getframe().f_back.f_back.f_code.co_name + "]",
# return argd
def _log(*argl, **argd):
_log_msg = ""
for l in argl:
if type(l) == tuple:
ps = str(l)
else:
try:
ps = "%r" % l
except:
try:
ps = str(l)
except:
ps = 'ERROR LOG OBJECT'
if type(l) == str:
_log_msg += ps[1:-1] + ' '
# elif type(l) == unicode:
# _log_msg += ps[2:-1] + ' '
else:
_log_msg += ps + ' '
if len(argd) > 0:
_log_msg += str(argd)
# ct = datetime.now().strftime('%m-%d %H:%M:%S.%f')
# _log_msg = ct + " " + _log_msg
return _log_msg
def debug(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.debug(msg)
def info(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.info(msg)
def error(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.error(msg)
def exception(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.exception(msg)
def warn(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.warn(msg)
def warning(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.warning(msg)
def critical(*args, **kwargs):
msg = _log(*args, **kwargs)
logger.critical(msg)
| [
"305603665@qq.com"
] | 305603665@qq.com |
2d401730bc0c78d7c4c300b3aec2845406bb0f39 | b885eaf4df374d41c5a790e7635726a4a45413ca | /LeetCode/Session3/MinimumDepth.py | f6f31c751b0207b9ab055f367ff94a5a73cd8970 | [
"MIT"
] | permissive | shobhitmishra/CodingProblems | 2a5de0850478c3c2889ddac40c4ed73e652cf65f | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | refs/heads/master | 2021-01-17T23:22:42.442018 | 2020-04-17T18:25:24 | 2020-04-17T18:25:24 | 84,218,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
return self.minDepthHelper(root)
def minDepthHelper(self, root: TreeNode) -> int:
if not root.left and not root.right:
return 1
if not root.left:
return 1 + self.minDepthHelper(root.right)
if not root.right:
return 1 + self.minDepthHelper(root.left)
return 1 + min([self.minDepthHelper(root.left), self.minDepthHelper(root.right)])
ob = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
# root.right = TreeNode(20)
# root.right.left = TreeNode(15)
# root.right.right = TreeNode(7)
print(ob.minDepth(root)) | [
"shmishra@microsoft.com"
] | shmishra@microsoft.com |
b0697372f6464df3cdb5fcb923c349a26573ab08 | 02e2e17aeebe1e9e69a955f88686edab7efbe5a8 | /kiyoshi_ni_shokuhatsu/update_objects.py | 7f636d04e83eeebce0822a2e9369f71f0f8acdc7 | [
"MIT"
] | permissive | grokit/grokit.github.io | 948d893010ed3203f43a54af2d75259b69e2a895 | 4150b013eacb9bbdbc1a5046bbc8355d8306a9bc | refs/heads/master | 2021-07-17T21:06:08.951517 | 2020-04-26T18:58:22 | 2020-04-26T18:58:22 | 136,870,651 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | #!/usr/bin/python3
import dcore.env_setup as env_setup
import glob
import os
def gen_generic_objects():
files = list(glob.iglob('.' + '/**/*.**', recursive=True))
exts = ['.js']
files = [os.path.splitext(os.path.split(f)[1])[0] for f in files if os.path.splitext(f)[1] in exts and os.path.split(f)[1][0:2] == 'OB']
files = set(files)
tagBegin = '// Reflect objects START.'
tagEnd = '// Reflect objects END.'
template = 'objs.push( function(){return new __file__();});'
insert = [template.replace('__file__', f) for f in files]
insert = "\n".join(insert)
env_setup.updateFileContentBetweenMarks('./src/objects/ObjectFactory.js', tagBegin, tagEnd, insert, False)
def gen_surfaces():
files = list(glob.iglob('.' + '/**/*.**', recursive=True))
exts = ['.png']
files = [os.path.split(f)[1] for f in files if '/surface/' in f]
files = [f for f in files if os.path.splitext(f)[1] in exts]
tagBegin = '// Reflect objects category: surface START.'
tagEnd = '// Reflect objects category: surface END.'
template = 'this._filesMappingToThis.add("__file__");'
insert = [template.replace('__file__', f) for f in files]
insert = "\n".join(insert)
env_setup.updateFileContentBetweenMarks('./src/objects/OBSurface.js', tagBegin, tagEnd, insert, False)
if __name__ == '__main__':
gen_generic_objects()
gen_surfaces()
| [
"you@example.com"
] | you@example.com |
7dfa6fce30442805c5ee7317697fc349a849a656 | 5094868ffc84f6591ee4ec6feb25b10b549aef2b | /inwin/fund/orderform.py | 2435061a767cd8d8161548e88387a2065c4af9ab | [] | no_license | 137996047/finance_trading | c8d9606cfb67525d79a9e60d5cb36b1c293fcc3c | d97edfbfbafc9eea7c47f30064b7aeb3f6e4bf55 | refs/heads/master | 2020-12-10T08:49:02.272634 | 2013-11-19T08:12:11 | 2013-11-19T08:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | '''
Created on 2013/2/13
@author: yhuang
'''
from django import forms
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, MultiField, Div, Field,Button
from crispy_forms.bootstrap import AppendedText,FormActions
TSTYPE_CHOICES = (
('1', _('purchase')),
('2', _('withdraw')),
('3', _('dividend')),
('4', _('interest')),
)
class orderform(forms.Form):
F_Date= forms.DateTimeField(label=_('Trading Date'),)
F_SKID= forms.CharField(label=_('FundID'),max_length=8)
F_TSType= forms.ChoiceField(label=_('Trading Type'),choices=TSTYPE_CHOICES)
F_CurID=forms.CharField(label=_('Currency'),max_length=8)
F_Amt=forms.DecimalField(label=_('Amount'),max_digits=28, decimal_places=4)
F_Qty=forms.DecimalField(label=_('Quantity'),max_digits=28, decimal_places=4)
F_Rate=forms.DecimalField(label=_('Rate'),max_digits=28, decimal_places=4)
F_Nav=forms.DecimalField(label=_('Nav'),max_digits=28, decimal_places=4)
F_Fee=forms.DecimalField(label=_('Fee'),max_digits=10, decimal_places=4)
F_Exp=forms.DecimalField(label=_('Expense'),max_digits=10, decimal_places=4)
F_Payable=forms.DecimalField(label=_('Pay Amount'),max_digits=28, decimal_places=4)
F_Receivable=forms.DecimalField(label=_('Receive Amount'),max_digits=28, decimal_places=4)
F_Note=forms.CharField(label=_('Note'),max_length=128)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'orderform'
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = 'submit_survey'
self.helper.layout = Layout(
MultiField(
'first arg is the legend of the fieldset',
Div('F_Date',
style="background: white;", title="Explication title", css_class="bigdivs"
),
'F_SKID',
'F_TSType',
'F_CurID',
'F_Qty',
'F_Rate',
'F_Nav',
'F_Fee',
'F_Exp',
'F_Payable',
'F_Receivable',
),
AppendedText('F_Amt', '$', active=True),
Field('F_Note', id="password-field", css_class="passwordfields", title="Explanation"),
#Field('slider', template="custom-slider.html"),
ButtonHolder(
Submit('submit', 'Submit', css_class='button white')
),
FormActions(
Submit('save', 'Save changes'),
Button('cancel', 'Cancel')
)
)
super(orderform, self).__init__(*args, **kwargs) | [
"yingchauhuang@gmail.com"
] | yingchauhuang@gmail.com |
41fd56496294aa28b4df70baf4467a20cfc53bc6 | ea5b4fdf353e76c44a8de71fa16aa8bae88c726a | /heap/613.highFive.py | 9f8641a749e1afe5a6f792b433db444a691bcab7 | [] | no_license | umnstao/lintcode-practice | dd61c66950ae89abec000063fe0d1a33f13ce6ec | e73b495e23c4dcb0421ab09133e573aaba23c431 | refs/heads/master | 2021-01-23T02:48:26.294160 | 2018-03-27T21:54:26 | 2018-03-27T21:54:26 | 86,024,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | '''
Definition for a Record
class Record:
def __init__(self, id, score):
self.id = id
self.score = score
'''
class Solution:
# @param {Record[]} results a list of <student_id, score>
# @return {dict(id, average)} find the average of 5 highest scores for each person
# <key, value> (student_id, average_score)
def highFive(self, results):
# Write your code here
hash = dict()
for r in results:
if r.id not in hash:
hash[r.id] = []
hash[r.id].append(r.score)
if len(hash[r.id]) > 5:
index = 5
for i in range(5):
if hash[r.id][i] < hash[r.id][index]:
index = i
hash[r.id].pop(index)
#print hash
answer = {}
for id,score in hash.items():
answer[id] = sum(score)/5.
return answer | [
"umnstao@gmail.com"
] | umnstao@gmail.com |
1ef60f5fc25c6b4427ff0a3202d65fbdb4d2172c | f039b3665b5ca29a5e197ed05a9860f9180a16aa | /maxProfit.py | 946506e98ea2316952ca5664c4aa99c22eb4f464 | [] | no_license | NeilWangziyu/HighPerformancwAlgorithm | 895a0e9d78aee9a0eacc6f81352f8fde10b9310b | 7e3fba6879bbe25b738989ef550fd71c7a49dab0 | refs/heads/master | 2020-04-17T04:36:35.178522 | 2019-08-09T16:11:34 | 2019-08-09T16:11:34 | 166,237,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
profit = 0
for i in range(1, len(prices)):
if prices[i] - prices[i - 1] > 0:
profit += prices[i] - prices[i - 1]
return profit
| [
"noreply@github.com"
] | NeilWangziyu.noreply@github.com |
45978b08a29506f6bd384b7c4cc8c361fc40d77b | 62442c2547b22aae27f3bb3a0d3f84a9e8e535a0 | /python/djangopro/mysite/polls/admin.py | 55708fd52ed1fb701b7c1cd1b86a4096caca8aef | [] | no_license | jsdelivrbot/demos | 935729fe9afde33709c4e4e74863b64c16c33b33 | 01a97eda371c2d832c9f2c907a945310662e0710 | refs/heads/master | 2020-04-10T10:35:44.039560 | 2018-12-08T10:53:29 | 2018-12-08T10:53:29 | 160,970,588 | 0 | 0 | null | 2018-12-08T19:13:00 | 2018-12-08T19:13:00 | null | UTF-8 | Python | false | false | 872 | py | from mysite.polls.models import Poll
from mysite.polls.models import Choice
from django.contrib import admin
#class ChoiceInline(admin.StackedInline):
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
# Show the fields in the following order
#fields = ['pub_date', 'question']
#Use the corresponding label for each field
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields':['pub_date'], 'classes':['collapse']}),
]
# Quickly add new choices while adding the poll
inlines = [ChoiceInline]
# In the 'Select a Poll to change' menu, display following fields for each poll
list_display = ('question', 'pub_date', 'was_published_today')
# Shows filter/facet for the below fields
list_filter = ['pub_date']
admin.site.register(Poll, PollAdmin)
admin.site.register(Choice)
| [
"amjedonline@gmail.com"
] | amjedonline@gmail.com |
2374b67ce1f63682539314996c2c82b71ee4b6df | cc8f8030d143f21e885995f97fd146d3dcc5fa40 | /sbb/tools/instances.py | b6775a7f93f1bdd48286bca7253a32349a5af83c | [
"MIT"
] | permissive | DanielLSM/train-sbb-challenge | 6719cb197df4eb16ef56e0ee3dbe267400cc5fcf | 9779b178c1e31f445d136d567e9f62390b0d2c5e | refs/heads/master | 2020-03-28T20:54:50.023088 | 2018-10-17T16:25:46 | 2018-10-17T16:25:46 | 149,113,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,913 | py | import logging
import sbb.tools.logger
import pprint
import networkx
from collections import defaultdict
from itertools import chain, product, starmap
from functools import partial
from sbb.tools.parsers import parse_input_paths, parse_json_file
from sbb.tools import input_dir, input_samples
from sbb.tools.route_graph import generate_route_graphs
class Instances:
def __init__(self, ninstance: int = 0, input_dir: str = input_samples):
self._input_dir = input_dir
self._ipaths = parse_input_paths(input_dir)
self.logger = logging.getLogger('APIlogger')
self.logger.setLevel(logging.INFO)
self.data = self._load_data(ninstance)
self.route_graphs = generate_route_graphs(self.data)
self._fname = self.data['label']
self._hash = self.data['hash']
self._generate_route2markers2sections()
self._generate_route2sections2nodes()
self._generate_service_intentions()
self.logger.info('api for the instances initialized')
def __str__(self):
return 'API to interface instance {}'.format(self._fname)
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def _load_data(self, ninstance: int) -> dict:
try:
self.logger.info('loaded {}'.format(
self._ipaths[ninstance].parts[-1]))
return parse_json_file(self._ipaths[ninstance])
except ValueError as e:
self.logger.ERROR("select an instance from 0 to {}".format(
len(self._ipaths)))
raise e
def _generate_service_intentions(self) -> None:
""" Creates a dict from train to service intentions, and stores
more approprietly
"""
self.service_intentions = {
train['id']: train for train in self.data['service_intentions']
}
def _generate_route2markers2sections(self) -> None:
""" Creates a dict where the key is route_id
to markers, each marker has a list of possible required
sections
"""
# TODO: add route_alternative_marker_at_exit to compute paths
# TODO: finish this
self.route2marker2sections = {}
for route in self.data['routes']:
self.route2marker2sections[route['id']] = defaultdict(list)
for route_path in route['route_paths']:
for route_section in route_path['route_sections']:
if 'section_marker' in route_section.keys():
self.route2marker2sections[route['id']][route_section[
'section_marker'][0]].append(
route_section['sequence_number'])
#TODO: Put more things such as time rectritions on this dict
def _generate_route2sections2nodes(self) -> None:
""" Creates a dict where the key is route_id
to sections, to each 'in' and 'out' node
"""
self.route2sections2nodes = {}
for key in self.route_graphs.keys():
self.route2sections2nodes[key] = {}
edges_info = self.route_graphs[key].edges()
# TODO: do inverted mapping or actually check how they
# TODO: store the nodes and arcs
# inv_map = {v: k for k, v in edges_info.iteritems()}
for edges in edges_info:
self.route2sections2nodes[key][edges_info[edges[0], edges[1]][
'sequence_number']] = {
'in': edges[0],
'out': edges[1]
}
# TODO: PRODUCT ACEPTS ANY NUMBER OF PATHS XD
def paths_from_nodes(self, route_id, nodes) -> list:
""" Given a list of nodes by ORDER, get all possible paths (lists of lists) """
ppaths = []
for i in range(len(nodes) - 1):
paths = self.generate_edge_paths(route_id, nodes[i], nodes[i + 1])
if i is not 0:
pathsi = []
for path in list(product(ppaths, paths)):
pathsi.append(list(chain(*path)))
ppaths = pathsi
else:
ppaths = paths
return ppaths
def paths_from_arcs(self, route_id, arcs) -> list:
""" Given a list of nodes by ORDER, get all possible paths (lists of lists) """
nodes = self.transform_arcs2nodes(route_id, arcs)
return self.paths_from_nodes(route_id, nodes)
def transform_arcs2nodes(self, route_id, arcs) -> list:
nodes = []
for arc in arcs:
nodes.append(self.route2sections2nodes[route_id][arc]['in'])
nodes.append(self.route2sections2nodes[route_id][arc]['out'])
# import pdb; pdb.set_trace()
return nodes
def nodes(self, route_id) -> list:
return list(self.route_graphs[route_id].nodes())
def edges(self, route_id) -> list:
return list(self.route_graphs[route_id].edges())
def edges_sn(self, route_id) -> list:
return self.route_graphs[route_id].edges()
def generate_edge_paths(self, route_id: int, start: str, end: str) -> list:
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
path_iter = all_paths(start, end)
paths = []
edges_info = self.edges_sn(route_id)
for path in path_iter:
edges_path = []
for i in range(len(path) - 1):
edges_path.append(
edges_info[path[i], path[i + 1]]['sequence_number'])
paths.append(edges_path)
return paths
def generate_paths(self, route_id: int, start: str, end: str) -> list:
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
path_iter = all_paths(start, end)
return list(path_iter)
#TODO generate meaningfull route sections with sequece numbers as needed for the solution
#TODO get rid of the M1 cancer, we need to generate meaningful variables with times
def generate_all_paths(self, route_id: int) -> list:
roots = (
v for v, d in self.route_graphs[route_id].in_degree() if d == 0)
leaves = (
v for v, d in self.route_graphs[route_id].out_degree() if d == 0)
all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
self.route_graphs[route_id])
return list(
chain.from_iterable(starmap(all_paths, product(roots, leaves))))
#TODO This is problably the most retarded function of the whole code
#TODO but I blaim networkx on this one.
#TODO this func transforms full paths written in nodes into paths written in
#TODO edges
def from_paths_to_arcs(self, route_id: int, path: list) -> list:
edges_info = self.edges_sn(route_id)
edges_path = []
for i in range(len(path) - 1):
edges_path.append(
edges_info[path[i], path[i + 1]]['sequence_number'])
return edges_path
# def inspect(self, key: str):
# return pprint.pprint(self.data[key])
# def generate_paths(self, route_id: int, start: str, end: str) -> list:
# all_paths = partial(networkx.algorithms.simple_paths.all_simple_paths,
# self.route_graphs[route_id])
# return list(chain.from_iterable(starmap(all_paths, start, end)))
# TODO: Include more information such as time
# def _generate_all_paths_under_restrictions(self):
# """ By route and train """
# self._train2path = {}
# for train in self.data['service_intentions']:
# self._train2paths[train['id']] = {}
# for requirement in train['section_requirements']:
if __name__ == "__main__":
i = Instances() | [
"daniellsmarta@gmail.com"
] | daniellsmarta@gmail.com |
c8fad1f100e4968fe5c63524938cdcb4c7395128 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_chart_pattern05.py | 4ffee075709f4ba1541f72c152fcaf13ae9b4934 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 3,121 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pattern05.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [110902272, 110756608]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
worksheet.write_column('D1', data[3])
worksheet.write_column('E1', data[4])
worksheet.write_column('F1', data[5])
worksheet.write_column('G1', data[6])
worksheet.write_column('H1', data[7])
chart.add_series({
'values': '=Sheet1!$A$1:$A$3',
'pattern': {
'pattern': 'percent_25',
'fg_color': '#C00000',
'bg_color': '#FFFFFF'
}
})
chart.add_series({
'values': '=Sheet1!$B$1:$B$3',
'pattern': {
'pattern': 'percent_75',
'fg_color': '#FF0000',
}
})
chart.add_series({
'values': '=Sheet1!$C$1:$C$3',
'pattern': {
'pattern': 'dark_upward_diagonal',
'fg_color': '#FFC000',
}
})
chart.add_series({
'values': '=Sheet1!$D$1:$D$3',
'pattern': {
'pattern': 'narrow_horizontal',
'fg_color': '#FFFF00',
}
})
chart.add_series({
'values': '=Sheet1!$E$1:$E$3',
'pattern': {
'pattern': 'dashed_vertical',
'fg_color': '#92D050',
}
})
chart.add_series({
'values': '=Sheet1!$F$1:$F$3',
'pattern': {
'pattern': 'horizontal_brick',
'fg_color': '#00B050',
}
})
chart.add_series({
'values': '=Sheet1!$G$1:$G$3',
'pattern': {
'pattern': 'shingle',
'fg_color': '#00B0F0',
}
})
chart.add_series({
'values': '=Sheet1!$H$1:$H$3',
'pattern': {
'pattern': 'large_check',
'fg_color': '#0070C0',
}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
b75df8010682fe9df788a973a54b7c29ad65d8bb | 11aac96a622eadf3992d12659eaf0a450b9398bf | /Assignment/settings.py | 95126fea50afc7271ec767a5834dbca2a8746130 | [] | no_license | naveenkumar2505/Assignment | 36604d0545c10a4bcce5606ea26dbbf1c7596159 | 635c64c8116ad17a2893aa86f498cf8ecdc1f944 | refs/heads/master | 2020-06-01T03:54:23.802759 | 2019-06-07T05:57:41 | 2019-06-07T05:57:41 | 190,622,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | """
Django settings for Assignment project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'upo7q5y25)mlvl#%@q7r%*37h$iq2am71j)nm21qnecon49kj_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'User.apps.UserConfig',
'rest_framework'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
#'DEFAULT_PAGINATION_CLASS': 'apps.core.pagination.StandardResultsSetPagination',
'PAGE_SIZE': 5
}
ROOT_URLCONF = 'Assignment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Assignment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE':'django.db.backends.mysql',
'NAME':'assigndb',
'USER':'root',
'PASSWORD':'root'
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
#
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"naveentechhie@gmail.com"
] | naveentechhie@gmail.com |
dbd7245e581ae91c182ba9ce192bb227f29d3af5 | e4266d7995c6952a374037e6809678a28e2972f4 | /abs/abs_project_task_template/models/task_template.py | 5fc1f01895efab778ea89c021e1665e3f7aa96f9 | [] | no_license | h3llopy/addons_12 | cdd3957faa46be9beb20239b713bcde7d3fb24bf | 7440086ae976754b0d268986519705cbc9ea0a8a | refs/heads/master | 2023-08-31T12:33:54.645648 | 2021-11-01T02:21:06 | 2021-11-01T02:21:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | # -*- coding: utf-8 -*-
#################################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2018-Today Ascetic Business Solution <www.asceticbs.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from odoo import api,fields,models,_
#New Class Is Created For Task Template.
class TaskTemplate(models.Model):
_name='project.task.template'
name = fields.Char(string='Task Title', track_visibility='always', required=True, help=" The Title Of Task")
user_id = fields.Many2one('res.users', string='Assigned to', index=True, track_visibility='always', help="Many2one Field Related To res user")
date_deadline = fields.Date(string='Deadline', copy=False, help="Date Field For Deadline")
description = fields.Html(string='Description', help="Html Field For Description")
active = fields.Boolean(default=True, help="Boolean Field For Task Status")
#Class Is Extended For Add New Feature Of Task Template.
class Project(models.Model):
_inherit = 'project.project'
use_task_template = fields.Boolean(string="Use Active Task Templates", help="Use Task Templates for creating Tasks of the Project")
#Create Method Override To Add Task Template At The Time Of Project Creation.
@api.model
def create(self,vals):
variable=super(Project,self).create(vals)
if vals.get('use_task_template'):
template_id = self.env['project.task.template'].search([('active','=',True)])
if template_id:
for template in template_id:
tasktemplate={}
tasktemplate['name']=template.name
tasktemplate['user_id']=template.user_id.id
tasktemplate['date_deadline']=template.date_deadline
tasktemplate['description']=template.description
tasktemplate['project_id']=variable.id
self.env['project.task'].create(tasktemplate)
return variable
| [
"diegobgajardo@gmail.com"
] | diegobgajardo@gmail.com |
66b94cd88087c441f60c732183e04658634fc47f | 3dff4bef08954fadb7cc83c4f212fffa81b7d27e | /pub_site/src/pub_site/transfer/forms.py | e39f18c35f5843e52340d97b53ee14bb5ffa4b0e | [] | no_license | webee/pay | 3ec91cb415d9e3addabe961448533d861c0bd67a | b48c6892686bf3f9014bb67ed119506e41050d45 | refs/heads/master | 2020-04-29T14:31:09.643993 | 2016-02-02T07:14:14 | 2016-02-02T07:14:14 | 176,198,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from flask.ext.wtf import Form
from pub_site.commons import amount_less_than_balance, MyRegexp
from wtforms import StringField, SubmitField, ValidationError, DecimalField
from wtforms.validators import DataRequired, NumberRange, Length
from pub_site import dba
def username_should_exists(form, field):
username = field.data
if not dba.is_username_exists(username):
raise ValidationError(u"用户不存在")
class TransferForm(Form):
username = StringField(u"用户名", validators=[DataRequired(u"用户名不能为空"), username_should_exists])
amount = DecimalField(u"转账金额(元)",
validators=[DataRequired(u'请输入数字,小数点后最多2位, 例如"8.88"'), MyRegexp(r'^\d+(.\d{1,2})?$', message=u'请输入数字,小数点后最多2位, 例如"8.88"'),
amount_less_than_balance,
NumberRange(min=Decimal(0.01), message=u"提现金额必须大于0")])
info = StringField(u"备注", validators=[Length(max=50, message=u"备注不能超过50个字")])
submit = SubmitField(u"提交")
| [
"yiwang@lvye.com"
] | yiwang@lvye.com |
26bcb4a835dda6d70c80ca374cf533025eb70965 | c1a2befc19abff0cb476618e33004a2c8ed3b01f | /tensorflow/contrib/rnn/python/ops/rnn_cell.py | 72206daff4c0dd5cd4f161984d26bb89c77d056e | [
"Apache-2.0"
] | permissive | SatishGitHubs/TensorFlow | 842ede88c31157ab886c8e01b91f3170c38d5395 | 422b17b34f4f1380d2e487b3509bb97ff726edca | refs/heads/master | 2022-10-20T05:14:39.748631 | 2016-09-20T13:19:29 | 2016-09-20T13:19:29 | 68,717,129 | 0 | 1 | Apache-2.0 | 2022-09-30T19:31:40 | 2016-09-20T13:55:53 | C++ | UTF-8 | Python | false | false | 36,709 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=False,
activation=math_ops.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
if num_proj:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "LSTMCell"
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[3 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [inputs, m_prev])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
j, f, o = array_ops.split(1, 3, lstm_matrix)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple
else array_ops.concat(1, [c, m]))
return m, new_state
class TimeFreqLSTMCell(rnn_cell.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
"""
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to
"TimeFreqLSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "TimeFreqLSTMCell"
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B", shape=[4 * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[fq], m_prev,
m_prev_freq])
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat(1, [c, m])
m_out = m
else:
state_out = array_ops.concat(1, [state_out, c, m])
m_out = array_ops.concat(1, [m_out, m])
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=1,
couple_input_forget_gates=False,
state_is_tuple=False):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, default False. Set True to enable diagonal/peephole
connections.
share_time_frequency_weights: bool, default False. Set True to enable
shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
num_frequency_blocks: int, The total number of frequency blocks needed to
cover the whole input feature.
couple_input_forget_gates: bool, Whether to couple the input and forget
gates, i.e. f_gate = 1.0 - i_gate, to reduce model parameters and
computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._num_frequency_blocks = int(num_frequency_blocks)
if state_is_tuple:
state_names = ""
for freq_index in range(self._num_frequency_blocks):
name_prefix = "state_f%02d" % freq_index
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"GridLSTMStateTuple", state_names.strip(','))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._num_frequency_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._num_frequency_blocks * 2
self._output_size = num_units * self._num_frequency_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def __call__(self, inputs, state, scope=None):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
with vs.variable_scope(scope or type(self).__name__,
initializer=self._initializer): # "GridLSTMCell"
concat_w_f = _get_concat_variable(
"W_f", [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f", shape=[num_gates * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t", [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t", shape=[num_gates * self._num_units],
initializer=array_ops.zeros_initializer, dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf", shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt", shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf", shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt", shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf", shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt", shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef", shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet", shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef", shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet", shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef", shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros(
[int(inputs.get_shape()[0]), self._num_units], dtype)
c_prev_freq = array_ops.zeros(
[int(inputs.get_shape()[0]), self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if self._state_is_tuple:
name_prefix = "state_f%02d" % freq_index
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(1, [freq_inputs[freq_index], m_prev_time,
m_prev_freq])
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(1, num_gates,
lstm_matrix_freq)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(1, num_gates,
lstm_matrix_freq)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(1, num_gates,
lstm_matrix_time)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(1, 4,
lstm_matrix_time)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time +
w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_timef * c_prev_freq +
w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time +
w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(1, state_out_lst)
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(1, m_out_lst)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please check"
" the input size and filter config are correct." % (
self._num_frequency_blocks, num_feats))
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
# pylint: disable=protected-access
_linear = rnn_cell._linear
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/pdf/1601.06733.pdf.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=False):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
if not isinstance(cell, rnn_cell.RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell with attention (LSTMA)."""
with vs.variable_scope(scope or type(self).__name__):
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
inputs = _linear([inputs, attns], input_size, True)
lstm_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(1, nest.flatten(new_state))
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("AttnOutputProjection"):
output = _linear([lstm_output, new_attns], self._attn_size, True)
new_attn_states = array_ops.concat(1, [new_attn_states,
array_ops.expand_dims(output, 1)])
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(1, list(new_state))
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("Attention"):
k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("AttnV", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
y = _linear(query, self._attn_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
19ef3e931eaaa31f4ee7726864baf8d4c408bd89 | a5d22c99e781270317078f8980c934bcc71e6e8b | /samples/misc/opencv_samples/mqtt_cam/config.py | c1ba8083f6f40c04138fb58ff10003c9d0deedcf | [
"Apache-2.0"
] | permissive | aivclab/vision | dda3b30648b01c2639d64a016b8dbcfccb87b27f | 06839b08d8e8f274c02a6bcd31bf1b32d3dc04e4 | refs/heads/master | 2023-08-21T22:35:10.114394 | 2022-11-02T10:14:08 | 2022-11-02T10:14:08 | 172,566,233 | 1 | 3 | Apache-2.0 | 2023-08-16T05:11:30 | 2019-02-25T19:00:57 | Python | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
from pathlib import Path
from warg import NOD
MQTT_CAM_CONFIG = NOD(
mqtt=NOD(
broker="localhost", port=1883, QOS=1
), # or an ip address like 192.168.1.74
camera=NOD(
video_source=0,
fps=30, # 2
mqtt_topic="video/video0/capture",
# If your desired camera is listed as source 0 you will configure video_source: 0. Alternatively
# you can configure the video source as an MJPEG or RTSP stream. For example in config.yml you may
# configure something like video_source: "rtsp://admin:password@192.168.1.94:554/11" for a RTSP
# camera.
),
processing=NOD(
subscribe_topic="video/video0/capture",
publish_topic="video/video0/capture/rotated",
),
save_captures=NOD(
mqtt_topic="video/video0/capture", captures_directory=Path("captures")
),
)
| [
"christian.heider@alexandra.dk"
] | christian.heider@alexandra.dk |
42e17a6a17075549bcba19d12ccfd1b3f4983c35 | f525a67f7920d6d35077e60bbe3012ffd455ebdb | /sorting/reorder_data_log_files.py | d2cffa475979985bf5fd5fcbac86803e9689541e | [] | no_license | uma-c/CodingProblemSolving | c29671a76762ba34af0cab05d68e86f798616cab | b7d3b9e2f45ba68a121951c0ca138bf94f035b26 | refs/heads/master | 2023-05-02T05:38:43.666829 | 2021-05-19T02:23:13 | 2021-05-19T02:23:13 | 286,168,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | '''
You have an array of logs. Each log is a space delimited string of words.
For each log, the first word in each log is an alphanumeric identifier. Then, either:
Each word after the identifier will consist only of lowercase letters, or;
Each word after the identifier will consist only of digits.
We will call these two varieties of logs letter-logs and digit-logs. It is guaranteed that each log has at least one word after its identifier.
Reorder the logs so that all of the letter-logs come before any digit-log. The letter-logs are ordered lexicographically ignoring identifier, with the identifier used in case of ties. The digit-logs should be put in their original order.
Return the final order of the logs.
'''
from typing import List
def reorder_logs(logs: List[int]) -> List[str]:
let_logs = []
dig_logs = []
for log in logs:
if '0' <= log[-1] <= '9':
dig_logs.append(log)
else:
ident_after_idx = log.index(' ')
let_logs.append([log[(ident_after_idx+1):], log[0:ident_after_idx]])
let_logs.sort()
result = []
for let_log in let_logs:
result.append(let_log[1] + ' ' + let_log[0])
result += dig_logs
return result | [
"chowtoori@live.com"
] | chowtoori@live.com |
bccfed5d348f8c095814aa00c8d5e77feb4040ee | 05e454259b44882a1bfff0ba82475374b36b74f0 | /vision/utils/video_writer.py | 099a8a4933882341dd3d3cd0c9295757e019ac70 | [
"BSD-3-Clause"
] | permissive | TeamAutonomousCarOffenburg/TACO_2017 | ec49f539528388f28114cca9787c1ab7db880e64 | 724c37188209818c22046d2229f67d882c36e2f4 | refs/heads/master | 2021-08-14T18:33:24.203830 | 2017-11-16T13:48:57 | 2017-11-16T13:48:57 | 110,350,009 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | import os
import time as t
from threading import Thread
import utils.tools as tools
import cv2
class VideoWriter:
def __init__(self, im_width, im_height, folder, video_queue):
self.video_queue = video_queue
self.folder = folder
self.im_width = im_width
self.im_height = im_height
self.writer = None
self.file = None
self.fps = None
self.stopped = True
self.thread = Thread(target=self.update, args=())
def init(self, file_prefix="output", fps=30):
filename = "{}_{}.avi".format(file_prefix, tools.get_timestamp_ms())
self.file = os.path.join(self.folder, filename)
self.fps = fps
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
self.writer = cv2.VideoWriter(self.file, fourcc,
float(self.fps), (self.im_width,
self.im_height))
return self
def start(self):
self.stopped = False
self.thread.start()
print("[VIDEO WRITER] Thread for writing video started")
return self
def update(self):
while True:
# if self.stopped and self.video_queue.empty():
if self.stopped:
return
# wait for element in queue
try:
image = self.video_queue.get_nowait()
except Exception as e:
t.sleep(0.02)
continue
self.writer.write(image)
def stop(self):
while not self.video_queue.empty():
t.sleep(0.1)
self.stopped = True
self.writer.release()
print('[VIDEO WRITER] Video written to file: {}'.format(self.file))
def is_running(self):
return not self.stopped
def is_thread_alive(self):
return self.thread.is_alive()
def get_video_file_name(self):
return self.file
| [
"jensfischer95@gmail.com"
] | jensfischer95@gmail.com |
a9ea65ef0f77600f090da1acf54b75f98d380c1c | c2643d37464d847facfaa39eca662578b6744c39 | /async_www/app.py | a365c66be8802682fdcaba1b4ff00589baf96892 | [] | no_license | Jelair/TMS_back_end | c85cd8dd74792a88354c8c2d85ff7e99dfd92677 | be267a70741cf7b6810bcc165fbe383c809f24ff | refs/heads/master | 2021-09-07T08:36:58.711793 | 2018-02-20T11:52:11 | 2018-02-20T11:52:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,866 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
-------------------------------------------------
File Name: app
Description :
Author : simplefly
date: 2018/2/3
-------------------------------------------------
Change Activity:
2018/2/3:
-------------------------------------------------
"""
__author__ = 'simplefly'
from jinja2 import Environment, FileSystemLoader
import asyncio, os, json, time
from datetime import datetime
from async_www import orm
from aiohttp import web
from async_www.config import configs
import logging; logging.basicConfig(level=logging.INFO)
from async_www.coreweb import add_routes, add_static
from async_www.handlers import cookie2user, COOKIE_NAME
# 初始化渲染模板
def init_jinja2(app, **kw):
logging.info('init jinja2...')
options = dict(
autoescape = kw.get('autoescape', True),
block_start_string = kw.get('block_start_string', '{%'),
block_end_string = kw.get('block_end_string', '%}'),
variable_start_string = kw.get('variable_start_string', '{{'),
variable_end_string = kw.get('variable_end_string', '}}'),
auto_reload = kw.get('auto_reload', True)
)
path = kw.get('path', None)
if path is None:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
logging.info('set jinja2 template path:%s' % path)
env = Environment(loader=FileSystemLoader(path), **options)
filters = kw.get('filters', None)
if filters is not None:
for name, f in filters.items():
env.filters[name] = f
app['__templating__'] = env
@asyncio.coroutine
def logger_factory(app, handler):
@asyncio.coroutine
def logger(request):
logging.info('Request: %s %s' % (request.method, request.path))
return (yield from handler(request))
return logger
@asyncio.coroutine
def auth_factory(app, handler):
@asyncio.coroutine
def auth(request):
logging.info('check user: %s %s' % (request.method, request.path))
request.__user__ = None
cookie_str = request.cookies.get(COOKIE_NAME)
if cookie_str:
user = yield from cookie2user(cookie_str)
if user:
logging.info('set current user: %s' % user.email)
request.__user__ = user
if request.path.startswith('/manage/') and (request.__user__ is None or not request.__user__.admin):
return web.HTTPFound('/signin')
return (yield from handler(request))
return auth
@asyncio.coroutine
def data_factory(app, handler):
@asyncio.coroutine
def parse_data(request):
if request.method == 'POST':
if request.content_type.startswith('application/json'):
request.__data__ = yield from request.json()
logging.info('request json: %s' % str(request.__data__))
elif request.content_type.startswith('application/x-www-form-urlencoded'):
request.__data__ = yield from request.post()
logging.info('request form: %s' % str(request.__data__))
return (yield from handler(request))
return parse_data
@asyncio.coroutine
def response_factory(app, handler):
@asyncio.coroutine
def response(request):
logging.info('Response handler...')
r = yield from handler(request)
if isinstance(r, web.StreamResponse):
return r
if isinstance(r, bytes):
resp = web.Response(body=r)
resp.content_type = 'application/octet-stream'
return resp
if isinstance(r, str):
if r.startswith('redirect:'):
return web.HTTPFound(r[9:])
resp = web.Response(body=r.encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, dict):
template = r.get('__template__')
if template is None:
resp = web.Response(body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))
resp.content_type = 'application/json;charset=utf-8'
return resp
else:
r['__user__'] = request.__user__
resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))
resp.content_type = 'text/html;charset=utf-8'
return resp
if isinstance(r, int) and t >= 100 and t < 600:
return web.Response(t)
if isinstance(r, tuple) and len(r) == 2:
t, m = r
if isinstance(t, int) and t >= 100 and t < 600:
return web.Response(t, str(m))
# default
resp = web.Response(body=str(r).encode('utf-8'))
resp.content_type = 'text/plain;charset=utf-8'
return resp
return response
def datetime_filter(t):
delta = int(time.time() - t)
if delta < 60:
return u'1分钟前'
if delta < 3600:
return u'%s分钟前' % (delta // 60)
if delta < 86400:
return u'%s小时前' % (delta // 3600)
if delta < 604800:
return u'%s天前' % (delta // 86400)
dt = datetime.fromtimestamp(t)
return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)
@asyncio.coroutine
def init(loop):
yield from orm.create_pool(loop=loop, **configs.db)
app = web.Application(loop=loop, middlewares=[
logger_factory, auth_factory, response_factory
])
init_jinja2(app, filters=dict(datetime=datetime_filter))
add_routes(app, 'handlers')
add_static(app)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"1059229782@qq.com"
] | 1059229782@qq.com |
6823ae91cfcabb0d9c1f3bdc24adb4ffb866e73c | 3e30f89790a93e715ef7eb396575e28ae5849cf0 | /SurfaceTest.py | d4faf5549dc422114ab10b5dc19c567fb65d69b3 | [] | no_license | sulantha2006/Surface | 090d5d56fbe778de0b6c3a75cfb6cc3a2ebe12f3 | ccc58cbd206da5063c880927c8ba130b6fe6e097 | refs/heads/master | 2021-01-10T18:49:45.489158 | 2013-12-11T19:01:48 | 2013-12-11T19:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | __author__ = 'sulantha'
import numpy
from mayavi.mlab import *
from mayavi import mlab
def test_contour3d():
x, y, z = numpy.ogrid[-100:100:1, -100:100:1, -100:100:1]
scalars = (x*2*y*z*z)+(x*y*z)
obj = contour3d(scalars, contours=4, transparent=True)
return obj
test_contour3d()
mlab.show()
| [
"sulantha.s@gmail.com"
] | sulantha.s@gmail.com |
81dac263c0eb19bc1f2482b80239c0c651db6ed4 | 45fd54ecc12334806b4a285ca3886f3fe0d191c4 | /tests/fixtures/entities.py | 3d1cb92dff353405a98989dc76ded7ab47a091a7 | [
"BSD-3-Clause"
] | permissive | azthief/pontoon | 124fcb4b36ecbe7dc288df8d49ac4ed8e02b9d71 | 14f9de9b020e45c375311181ed32e487e76d28f8 | refs/heads/master | 2021-08-23T21:05:16.511013 | 2017-12-01T15:01:30 | 2017-12-01T15:01:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # -*- coding: utf-8 -*-
import functools
import pytest
from pontoon.base.models import Entity
@pytest.fixture
def entity0(resource0):
"""Entity 0"""
return Entity.objects.get(resource=resource0, string="entity0")
@pytest.fixture
def entity1(resource1):
"""Entity 1"""
return Entity.objects.get(resource=resource1, string="entity1")
@pytest.fixture
def entity_factory(factory):
"""Entity factory
create entities in a hurry!
Provides an entity factory function that accepts the following args:
:arg int `batch`: number of entities to instantiate, defaults to len of
`batch_kwargs` or 1
:arg list `batch_kwargs`: a list of kwargs to instantiate the entities
"""
def instance_attrs(instance, i):
if not instance.string:
instance.string = "Entity %s" % i
return functools.partial(
factory, Model=Entity, instance_attrs=instance_attrs)
| [
"ryan@synca.io"
] | ryan@synca.io |
a00525930a6cd48eadc9c0a8846ad4b1f4204286 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_070/ch159_2020_06_21_20_15_37_551744.py | 5e4469af1a68b27e0cd0f94e4e1993cac9b58f68 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import json
with open('estoque.json', 'r') as arquivo:
estoque = arquivo.read()
estoque = json.loads(estoque)
valorfinal = 0
for produto in estoque["produtos"]:
qntd = produto["quantidade"]
valor = produto["valor"]
valorfinal += (qntd * valor)
print(valorfinal) | [
"you@example.com"
] | you@example.com |
6a11e069625a1828e9ba4064e026411c93586e9f | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_load_balancer_backend_address_pools_operations.py | 9307da4a8071e2a55f8c2e4a3ff1f14923674312 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 22,448 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> "_models.BackendAddressPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendAddressPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> AsyncLROPoller["_models.BackendAddressPool"]:
"""Creates or updates a load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:param parameters: Parameters supplied to the create or update load balancer backend address
pool operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.BackendAddressPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackendAddressPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.BackendAddressPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
4ce36af0556c38c0b99aa0f46457cfeee2e0ccb1 | 318572c21d892155e7418e7eee88057a4f3c721d | /test/test_csr.py | a5958650d7ea97ccde7757fc32b9ade9bdb92cdc | [
"BSD-2-Clause"
] | permissive | goran-mahovlic/litex | 69a1b1d8b1e0c1e3788c5691888527ae7bc74506 | 8030c691137d294043d797ff140de3c65aefc086 | refs/heads/master | 2020-07-11T08:49:58.894980 | 2019-08-26T21:02:03 | 2019-08-26T21:02:03 | 204,495,234 | 1 | 0 | NOASSERTION | 2019-08-26T16:36:25 | 2019-08-26T14:36:25 | null | UTF-8 | Python | false | false | 3,085 | py | # This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
import unittest
from migen import *
from litex.soc.interconnect import csr
from litex.soc.interconnect import csr_bus
def csr32_write(dut, adr, dat):
for i in range(4):
yield from dut.csr.write(adr + 3 - i, (dat >> 8*i) & 0xff)
def csr32_read(dut, adr):
dat = 0
for i in range(4):
dat |= ((yield from dut.csr.read(adr + 3 - i)) << 8*i)
return dat
class CSRModule(Module, csr.AutoCSR):
def __init__(self):
self._csr = csr.CSR()
self._storage = csr.CSRStorage(32, reset=0x12345678, write_from_dev=True)
self._status = csr.CSRStatus(32, reset=0x12345678)
# # #
# When csr is written:
# - set storage to 0xdeadbeef
# - set status to storage value
self.comb += [
If(self._csr.re,
self._storage.we.eq(1),
self._storage.dat_w.eq(0xdeadbeef)
)
]
self.sync += [
If(self._csr.re,
self._status.status.eq(self._storage.storage)
)
]
class CSRDUT(Module):
def address_map(self, name, memory):
return {"csrmodule": 0}[name]
def __init__(self):
self.csr = csr_bus.Interface()
self.submodules.csrmodule = CSRModule()
self.submodules.csrbankarray = csr_bus.CSRBankArray(
self, self.address_map)
self.submodules.csrcon = csr_bus.Interconnect(
self.csr, self.csrbankarray.get_buses())
class TestCSR(unittest.TestCase):
def test_csr_storage(self):
def generator(dut):
# check init value
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
# check writes
yield from csr32_write(dut, 1, 0x5a5a5a5a)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0x5a5a5a5a))
yield from csr32_write(dut, 1, 0xa5a5a5a5)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0xa5a5a5a5))
# check update from dev
yield from dut.csr.write(0, 1)
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0xdeadbeef))
dut = CSRDUT()
run_simulation(dut, generator(dut))
def test_csr_status(self):
def generator(dut):
# check init value
self.assertEqual(hex((yield from csr32_read(dut, 1))), hex(0x12345678))
# check writes (no effect)
yield from csr32_write(dut, 5, 0x5a5a5a5a)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
yield from csr32_write(dut, 5, 0xa5a5a5a5)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0x12345678))
# check update from dev
yield from dut.csr.write(0, 1)
yield from dut.csr.write(0, 1)
self.assertEqual(hex((yield from csr32_read(dut, 5))), hex(0xdeadbeef))
dut = CSRDUT()
run_simulation(dut, generator(dut))
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
85d1b8bfb200d8f6023817335f55d8cc0ce0daa0 | 6a0cb1571b72b3f5708bb861b303380cc57a9a16 | /English/prepare_flickr_train_chunks.py | 3fa5629d374d4e1aa59fb12e265eddbe8553ad13 | [
"Apache-2.0"
] | permissive | cltl/Spoken-versus-Written | 5bb8f5c46bba2594e86bcaeb12b63c29f78aa443 | 997024ae60a3f1dacf87162aa3c82439393c1bf2 | refs/heads/master | 2020-03-21T13:35:22.902840 | 2018-12-07T20:56:28 | 2018-12-07T20:56:28 | 138,614,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | from collections import defaultdict
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
# Only focus on the training data.
with open('./Resources/Flickr30K/splits/train_images.txt') as f:
train_ids = {line.split('.')[0] for line in f}
# Compile index for the written Flickr30K descriptions
flickr_index = defaultdict(list)
with open('./Resources/Flickr30K/results_20130124.token') as f:
for line in f:
identifier, description = line.strip().split('\t')
identifier = identifier.split('.')[0]
if identifier in train_ids:
flickr_index[identifier].append(description + '\n')
descriptions = [flickr_index[imgid] for imgid in train_ids]
flattened_descriptions = [description for split in zip(*descriptions)
for description in split]
gen = chunks(flattened_descriptions, 1000)
for i in range(100):
lines = next(gen)
with open('./Resources/Flickr30K/train_chunks/flickr_chunk.{0:03}'.format(i), 'w') as f:
f.writelines(lines)
| [
"emielonline@gmail.com"
] | emielonline@gmail.com |
0f736fdf633fa85c109716227574ac1e44c6a553 | bfb036667018dd50883f03ccc51b2d7cbe93b94e | /SignIn/urls.py | d7e0b30211a9534c025dc93ee04b8e09a3c42ea1 | [] | no_license | uniquehou/txj-php | 845589bd256237d133471d51e4501a06082ff6c7 | 05edbbcfac541a03754421850c7d0767d12030cc | refs/heads/master | 2021-07-21T05:10:01.513922 | 2017-10-30T23:27:50 | 2017-10-30T23:27:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django.conf.urls import url
from . import views
app_name = "SignIn"
urlpatterns = [
url(r'^$', views.index),
url(r'index', views.index, name='index'),
url(r'submit', views.submit, name='submit'),
]
| [
"919863463@qq.com"
] | 919863463@qq.com |
f465a631611beafd6ed28baa3a9cd236e84b711e | 45e03dd61493195cbbbce14fa54a787715c7c1fb | /Python_String_Methods/Encode().py | 66027a0d7c89956c85b20b4cacfba92d51cd82c2 | [] | no_license | VaishnaviReddyGuddeti/Python_programs | c55ee69c05d78a70a44385ee2e66365f69546035 | 309a1786fa5a3886d516cd49eb09f9cd847389df | refs/heads/master | 2023-02-17T03:41:31.480858 | 2021-01-14T15:39:52 | 2021-01-14T15:39:52 | 279,762,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # Encode() - Returns an encoded version of the string
# Syntax - string.encode(encoding=encoding, errors=errors)
# UTF-8 encode the string:
txt = "My name is Sushanth"
x = txt.encode()
print(x)
# These examples uses ascii encoding, and a character that cannot be encoded, showing the result with different errors:
txt = "My name is Ståle"
print(txt.encode(encoding="ascii",errors="backslashreplace"))
print(txt.encode(encoding="ascii",errors="ignore"))
print(txt.encode(encoding="ascii",errors="namereplace"))
print(txt.encode(encoding="ascii",errors="replace"))
print(txt.encode(encoding="ascii",errors="xmlcharrefreplace"))
print(txt.encode(encoding="ascii",errors="strict"))
| [
"vaishnavireddyguddeti@gmail.com"
] | vaishnavireddyguddeti@gmail.com |
390c4e261333eb5c95eb3d2d31bbf17f59221205 | 0ffed23713096d9034efdc44a9f1740f79ddc9e5 | /scripts/QMDP_RCNN/IRL_linear_decay.py | 3e763658e69cb3f7aea43ee969982ea4cbb4f5d1 | [] | no_license | rohitsemwal16/RCNN_MDP | 1f559a725195fbaf59e6f2f375695372251b8e55 | 1c493d17d71c470ebc1dbd1795a75d8ed11eb00a | refs/heads/master | 2021-09-04T17:24:04.348415 | 2018-01-20T10:52:17 | 2018-01-20T10:52:17 | 118,078,756 | 0 | 0 | null | 2018-01-19T04:52:16 | 2018-01-19T04:52:16 | null | UTF-8 | Python | false | false | 5,552 | py | #!/usr/bin/env python
import numpy as npy
from variables import *
action_size = 8
def initialize_state():
# global current_pose, from_state_belief, observed_state
global observed_state
from_state_belief[observed_state[0],observed_state[1]] = 1.
def initialize_observation():
global observation_model
# observation_model = npy.array([[0.05,0.05,0.05],[0.05,0.6,0.05],[0.05,0.05,0.05]])
# observation_model = npy.array([[0.05,0.05,0.05],[0.05,0.6,0.05],[0.05,0.05,0.05]])
observation_model = npy.array([[0.05,0.1,0.05],[0.1,0.4,0.1],[0.05,0.1,0.05]])
epsilon=0.0001
observation_model += epsilon
observation_model /= observation_model.sum()
def display_beliefs():
global from_state_belief,to_state_belief,target_belief,current_pose
print "From:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print from_state_belief[i,current_pose[1]-5:current_pose[1]+5]
print "To:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print to_state_belief[i,current_pose[1]-5:current_pose[1]+5]
print "Target:"
for i in range(current_pose[0]-5,current_pose[0]+5):
print target_belief[i,current_pose[1]-5:current_pose[1]+5]
def bayes_obs_fusion():
global to_state_belief, current_pose, observation_model, obs_space, observed_state, corr_to_state_belief
dummy = npy.zeros(shape=(discrete_size,discrete_size))
h = obs_space/2
for i in range(-h,h+1):
for j in range(-h,h+1):
dummy[observed_state[0]+i,observed_state[1]+j] = to_state_belief[observed_state[0]+i,observed_state[1]+j] * observation_model[h+i,h+j]
corr_to_state_belief[:,:] = copy.deepcopy(dummy[:,:]/dummy.sum())
def initialize_all():
initialize_state()
initialize_observation()
def construct_from_ext_state():
global from_state_ext, from_state_belief,discrete_size
d=discrete_size
from_state_ext[w:d+w,w:d+w] = copy.deepcopy(from_state_belief[:,:])
def belief_prop_extended(action_index):
global trans_mat, from_state_ext, to_state_ext, w, discrete_size
to_state_ext = signal.convolve2d(from_state_ext,trans_mat[action_index],'same')
d=discrete_size
##NOW MUST FOLD THINGS:
for i in range(0,2*w):
to_state_ext[i+1,:]+=to_state_ext[i,:]
to_state_ext[i,:]=0
to_state_ext[:,i+1]+=to_state_ext[:,i]
to_state_ext[:,i]=0
to_state_ext[d+2*w-i-2,:]+= to_state_ext[d+2*w-i-1,:]
to_state_ext[d+2*w-i-1,:]=0
to_state_ext[:,d+2*w-i-2]+= to_state_ext[:,d+2*w-i-1]
to_state_ext[:,d+2*w-i-1]=0
to_state_belief[:,:] = copy.deepcopy(to_state_ext[w:d+w,w:d+w])
def feedforward_recurrence():
global from_state_belief, to_state_belief, corr_to_state_belief
# from_state_belief = copy.deepcopy(corr_to_state_belief)
from_state_belief = copy.deepcopy(to_state_belief)
def calc_softmax():
global qmdp_values, qmdp_values_softmax
for act in range(0,action_size):
qmdp_values_softmax[act] = npy.exp(qmdp_values[act]) / npy.sum(npy.exp(qmdp_values), axis=0)
def dummy_softmax():
global qmdp_values, qmdp_values_softmax, action_size
# for act in range(0,action_size):
qmdp_values_softmax = npy.zeros(action_size)
qmdp_values_softmax[npy.argmax(qmdp_values)]=1.
def update_QMDP_values():
global to_state_belief, q_value_estimate, qmdp_values, from_state_belief
for act in range(0,action_size):
# qmdp_values[act] = npy.sum(q_value_estimate[act]*to_state_belief)
qmdp_values[act] = npy.sum(q_value_estimate[act]*from_state_belief)
# def IRL_backprop():
def Q_backprop():
global to_state_belief, q_value_estimate, qmdp_values_softmax, learning_rate, annealing_rate
global trajectory_index, length_index, target_actions, time_index
update_QMDP_values()
calc_softmax()
# dummy_softmax()
alpha = learning_rate - annealing_rate * time_index
for act in range(0,action_size):
q_value_estimate[act,:,:] = q_value_estimate[act,:,:] - alpha*(qmdp_values_softmax[act]-target_actions[act])*from_state_belief[:,:]
# print "Ello", alpha*(qmdp_values_softmax[act]-target_actions[act])*from_state_belief[:,:]
def parse_data():
global observed_state, trajectory_index, length_index, target_actions, current_pose, trajectories
observed_state[:] = observed_trajectories[trajectory_index,length_index,:]
target_actions[:] = 0
target_actions[actions_taken[trajectory_index,length_index]] = 1
current_pose[:] = trajectories[trajectory_index,length_index,:]
def master():
global trans_mat_unknown, to_state_belief, from_state_belief, target_belief, current_pose
global trajectory_index, length_index
construct_from_ext_state()
belief_prop_extended(actions_taken[trajectory_index,length_index])
print observed_state, current_pose, target_actions, qmdp_values_softmax
# bayes_obs_fusion()
parse_data()
Q_backprop()
# display_beliefs()
feedforward_recurrence()
def Inverse_Q_Learning():
global trajectories, trajectory_index, length_index, trajectory_length, number_trajectories, time_index
time_index = 0
for trajectory_index in range(0,number_trajectories):
initialize_all()
for length_index in range(0,trajectory_length):
if (from_state_belief.sum()>0):
master()
time_index += 1
print time_index
else:
print "We've got a problem"
trajectory_index = 0
length_index = 0
parse_data()
Inverse_Q_Learning()
value_function = npy.amax(q_value_estimate, axis=0)
plt.imshow(value_function, interpolation='nearest', origin='lower', extent=[0,50,0,50], aspect='auto')
plt.show(block=False)
plt.colorbar()
plt.show()
with file('Q_Value_Estimate.txt','w') as outfile:
for data_slice in q_value_estimate:
outfile.write('#Q_Value_Estimate.\n')
npy.savetxt(outfile,data_slice,fmt='%-7.2f') | [
"tanmay.shankar@gmail.com"
] | tanmay.shankar@gmail.com |
9985f02a3baeb6a8113ad32eba14a61b5d1e079b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnurethra.py | e6482a1e9e0c51deb67c1bc5d89eac47143830f5 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('LeakWTI2.py', 1), ('ClarGE2.py', 5)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
97852b5e54f297008951ce01fea42b20236751c7 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Algorithms/Pascal's Triangle grid.py | 7171af45039db025d7f6251f269ee3a26bdb34cf | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py |
grid = []
n = 7
for i in range(n+1):
grid.append([1] * (n+1))
for i in range(n):
for j in range(n):
grid[i+1][j+1] = grid[i][j+1] + grid[i+1][j]
print(grid)
print(str(grid[n][n]))
'''
1
1 1
1 2 1
1 3 3 1
1 4 6 4 1
1 5 10 10 5 1
1 6 15 20 15 6 1
1 7 21 35 35 21 7 1
'''
print('\n--------------')
def main():
size = 3
grid_points_count = size + 1
grid = []
for x in range(grid_points_count):
cur_row = []
for y in range(grid_points_count):
if x > 0 and y > 0:
cur_row.append(cur_row[y - 1] + grid[x - 1][y])
else:
cur_row.append(1)
grid.append(cur_row)
print(grid)
print(grid[size][size])
main()
print('\n-------------------- OnE MATRIX ROW - PASCAL TRIANGLE----------------------\n')
def generate_Pascal_Triangle(row_nr) :
'''**©** Made by Bogdan Trif @ 2016-12-20, 21:20.
:Description: Generates the Pascal Triangle , Binomial Coefficients
:param row_nr: int, the row number, int
:return: nested list, matrix in the form of Pascal's Triangle '''
blueprint = [1]*(row_nr+1)
Pascal= [blueprint]
for i in range(row_nr) :
tmp=[]
for j in range(0, row_nr-i) :
tmp.append(sum(Pascal[-1][0:j+1]) )
# print(tmp)
Pascal.append(tmp)
return Pascal
print('\n-----------Pascal s Triangle --------------' )
Pasc = generate_Pascal_Triangle(7)
print(Pasc,'\n')
for i in range(len(Pasc)):
print(Pasc[i])
# print(T)
# for i in Pascal:
# for j in range(i, rows+1):
# comb[j] += comb[j-i]
# print(comb)
#
# print(comb)
#
# rows = 7
# comb = [1] + [0]*rows
# # print(comb)
# Pascal = [1]*rows
# # print(Pascal)
#
# for i in Pascal:
# for j in range(i, rows+1):
# comb[j] += comb[j-i]
# print(comb)
#
# print(comb)
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
50c850718ee41b0daaf57cbf5aa1c0f224458fa1 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x0A-python-inheritance/4-inherits_from.py | 7c1e345e609de731fc003fa63903631ed93486f7 | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!/usr/bin/python3
"""check if obj inhertied directly of a class
"""
def inherits_from(obj, a_class):
"""inherits_from definition
Arguments:
obj {[object]} -- [the object to check]
a_class {[class]} -- [the class]
Returns:
[bool] -- [true if inherited directly, false if not]
"""
if issubclass(type(obj), a_class) and not type(obj) == a_class:
return True
return False
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
f4fdf0ae11d02017349137cfed039cd42492a91b | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /pytorch/source/caffe2/python/modeling/gradient_clipping_test.py | ca5c2ba8e22b62f2beb49d53b2707c3b0769a17a | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 10,161 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.gradient_clipping import GradientClipping
import numpy as np
class GradientClippingTest(unittest.TestCase):
def test_gradient_clipping_by_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 17)
def test_gradient_clipping_by_norm_l1_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l1_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (2 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 15)
def test_gradient_clipping_by_norm_using_param_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (5 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 21)
def test_gradient_clipping_by_norm_compute_norm_ratio(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
compute_norm_ratio=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (6 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 23)
def test_gradient_clipping_by_value(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
clip_max = 1e-8
clip_min = 0
net_modifier = GradientClipping(
grad_clip_method='by_value',
clip_max=clip_max,
clip_min=clip_min,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (1 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 13)
fc1_w_grad = workspace.FetchBlob('fc1_w_grad')
self.assertLessEqual(np.amax(fc1_w_grad), clip_max)
self.assertGreaterEqual(np.amin(fc1_w_grad), clip_min)
def test_gradient_clipping_by_norm_including_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
blobs_to_include=['fc1_w'],
blobs_to_exclude=None
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 1 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 14)
def test_gradient_clipping_by_norm_excluding_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
blobs_to_include=None,
blobs_to_exclude=['fc1_w', 'fc2_w']
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 0 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 11)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
316acafff434b705f475ce32dac493a6ae7e8acc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02921/s577429851.py | 3c521007cd28832c9426b343761ba33af753eeb7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | s = input()
t = input()
ans = 0
for i in range(3):
if s[i] == t[i]:
ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0c79eacddb49731e119b453110e076bd5b9ca5da | 9ba2b89dbdeefa54c6b6935d772ce36be7b05292 | /devilry/devilry_group/cradmin_instances/crinstance_base.py | f01a0b0b7c01fb528306066de649c1c7909be4c5 | [] | no_license | kristtuv/devilry-django | 0ffcd9d2005cad5e51f6377484a83d778d65050f | dd2a4e5a887b28268f3a45cc3b25a40c0e313fd3 | refs/heads/master | 2020-04-27T06:02:45.518765 | 2019-02-15T13:28:20 | 2019-02-15T13:28:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,093 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.functions import Lower, Concat
from devilry.apps.core.models import Examiner, Candidate, AssignmentGroup
from devilry.devilry_dbcache.models import AssignmentGroupCachedData
class DevilryGroupCrInstanceMixin(object):
roleclass = AssignmentGroup
rolefrontpage_appname = 'feedbackfeed'
def _get_base_rolequeryset(self):
"""Get base rolequerysets used by subclasses.
Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related
:class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.
"""
return AssignmentGroup.objects \
.annotate_with_is_waiting_for_feedback_count() \
.annotate_with_is_waiting_for_deliveries_count() \
.annotate_with_is_corrected_count() \
.select_related('parentnode__parentnode__parentnode') \
.prefetch_related(
models.Prefetch('candidates',
queryset=self._get_candidatequeryset())) \
.prefetch_related(
models.Prefetch('examiners',
queryset=self._get_examinerqueryset())) \
.prefetch_related(
models.Prefetch('cached_data',
queryset=self._get_assignment_group_cacheddata_queryset()))
def _get_candidatequeryset(self):
"""Get candidates.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.
"""
return Candidate.objects \
.select_related('relatedstudent') \
.order_by(
Lower(Concat('relatedstudent__user__fullname',
'relatedstudent__user__shortname')))
def _get_examinerqueryset(self):
"""Get examiners.
Returns:
QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.
"""
return Examiner.objects \
.select_related('relatedexaminer') \
.order_by(
Lower(Concat('relatedexaminer__user__fullname',
'relatedexaminer__user__shortname')))
def _get_assignment_group_cacheddata_queryset(self):
return AssignmentGroupCachedData.objects\
.select_related(
'group',
'first_feedbackset',
'last_feedbackset',
'last_published_feedbackset')
def get_titletext_for_role(self, role):
"""String representation for the role.
Args:
role: An :obj:`~devilry.apps.core.models.AssignmentGroup`
instance of the roleclass for the crinstance.
Returns:
str: Formatted string reprensentation of the crinstance role.
"""
return "{} - {}".format(role.period, role.assignment.short_name)
| [
"stianjul@gmail.com"
] | stianjul@gmail.com |
9ce297a89bcb7527f0066f244fe50fac15f47f23 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part3/updated_part2_batch/jm/parser_errors_2/158698348.py | 07c5e587ea7fc17c711767e5940198e5050b7d40 | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 3,078 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 158698348
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 3, 3, 7)
assert board is not None
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 2, 4, 2) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 3, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 4) == 0
assert gamma_move(board, 3, 2, 2) == 0
board413242309 = gamma_board(board)
assert board413242309 is not None
assert board413242309 == ("..132\n" "2211.\n" ".....\n")
del board413242309
board413242309 = None
assert gamma_move(board, 1, 1, 4) == 0
assert gamma_busy_fields(board, 1) == 3
assert gamma_free_fields(board, 1) == 8
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_move(board, 3, 2, 0) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_free_fields(board, 1) == 3
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_golden_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 4, 0) == 1
assert gamma_golden_move(board, 3, 2, 0) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_busy_fields(board, 1) == 4
assert gamma_free_fields(board, 1) == 2
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_busy_fields(board, 1) == 4
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_busy_fields(board, 2) == 7
board793231563 = gamma_board(board)
assert board793231563 is not None
assert board793231563 == ("22132\n" "22111\n" "223.3\n")
del board793231563
board793231563 = None
assert gamma_move(board, 3, 0, 1) == 0
gamma_delete(board)
| [
"noreply@github.com"
] | kozakusek.noreply@github.com |
fb618bb99c8eb4fecc90504df2e15c24a4405d5e | 50eb4e3092fadb9af8f5ad33f2d37edce43633ed | /okfncart/tests/test_promotion_loader.py | fe9378704631eaf15f98a06f8b440fc913f5d10a | [] | no_license | tomwardill/okfncart | 8723cf42955f1393deeebadc4e7dbaa0de5b435e | 89759bf18efb7a49e16492dabdcf23fca41f49c9 | refs/heads/master | 2020-04-06T04:01:39.242094 | 2014-09-08T14:23:29 | 2014-09-08T14:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import unittest
from okfncart.promotions.promotion_loader import PromotionLoader
class TestPromotionLoader(unittest.TestCase):
def setUp(self):
self.loader = PromotionLoader()
def test_load_promotions_empty(self):
promotions = self.loader.load_promotions()
self.assertTrue(promotions) | [
"tom@howrandom.net"
] | tom@howrandom.net |
12b07ab23fed1bee315b968b910789912c086c85 | e0d9844e123fa0706388814b9f29758258589487 | /torch/jit/_fuser.py | 349ecbea75621a03adfa9001d1d4c5bbd82370e0 | [] | no_license | pigpigman8686/seg | b5cf5261a5744e89ed5e5b145f60b0ccc3ba2c0c | 61c3816f7ba76243a872fe5c5fc0dede17026987 | refs/heads/master | 2023-04-10T22:22:35.035542 | 2021-04-22T06:24:36 | 2021-04-22T06:24:36 | 360,398,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,647 | py | import contextlib
import torch
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""
A context manager that controls whether the JIT's executor will run
optimizations before executing a function.
"""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""
A context manager that facilitates switching between
backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
if name == 'fuser0': # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser1': # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser2': # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
else:
raise Exception("unrecognized fuser option")
try:
yield
finally:
if name == 'fuser1': # NNC
torch._C._jit_set_profiling_executor(old_profiling_executor)
torch._C._jit_set_profiling_mode(old_profiling_mode)
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _graph_for(self, *args, **kwargs):
self(*args, **kwargs)
return last_executed_optimized_graph()
| [
"952361195@qq.com"
] | 952361195@qq.com |
0beb595ac8b8afe6fe9f98094c63c6054d060ac7 | df1eea603a7adbdd3f81e06800f788ee97ecefe1 | /0x11-python-network_1/101-starwars.py | bf5bd2f574c972d35b0370cd4d2369fe11c16ebf | [] | no_license | ledbagholberton/holbertonschool-higher_level_programming | be0b4423beb8331bd5915f065870a2cbcd8c6008 | df937fd4888dc64470f0068323a9fa6ad400e56d | refs/heads/master | 2021-06-12T15:16:57.812663 | 2019-10-01T06:00:34 | 2019-10-01T06:00:34 | 184,110,453 | 0 | 0 | null | 2021-04-30T21:18:08 | 2019-04-29T16:59:29 | TSQL | UTF-8 | Python | false | false | 550 | py | #!/usr/bin/python3
""" With request ask for header"""
import requests
import sys
if __name__ == "__main__":
url = "http://swapi.co/api/people/?all=true"
if len(sys.argv) < 2:
sys.exit(1)
else:
data = {'search': sys.argv[1]}
html = requests.get(url, params=data)
try:
my_json = html.json()
print("Number of results: ", my_json.get('count'))
list_results = my_json.get('results')
for dict_results in list_results:
print(dict_results.get('name'))
except:
pass
| [
"789@holbertonschool.com"
] | 789@holbertonschool.com |
da5af4886df7355553c45475f6a10ea3d749dcbb | 18e10db2ac29420dadf40fc1185091a1e827d6b8 | /lib/core/model/facebox/net.py | 521cbca006167d62a739d88199d16dcd83b6996c | [
"Apache-2.0"
] | permissive | liuxiaoliu321/faceboxes-tensorflow | a853fb86b8c1ee895028e4ce35f141f8f3ff158c | 39d12704cf9c1da324bb439b6e8a72c8b4d01d34 | refs/heads/master | 2020-08-08T08:05:57.379527 | 2019-09-25T04:06:25 | 2019-09-25T04:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,032 | py | import tensorflow.contrib.slim as slim
import tensorflow as tf
import numpy as np
from lib.core.model.facebox.losses_and_ohem import localization_loss, ohem_loss
from lib.core.model.facebox.utils.box_utils import batch_decode
from lib.core.model.facebox.utils.nms import batch_non_max_suppression
from train_config import config as cfg
def facebox_arg_scope(weight_decay=0.00001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': True, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d,slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.glorot_normal_initializer(),
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def inception_block(x,scope):
# path 1
x1 = slim.conv2d(x, 32, (1, 1), scope=scope + '/conv_1x1_path1')
# path 2
y = slim.avg_pool2d(x, (3, 3), stride=1, padding='SAME', scope=scope + '/pool_3x3_path2')
x2 = slim.conv2d(y, 32, (1, 1), scope=scope + '/conv_1x1_path2')
# path 3
y = slim.conv2d(x, 24, (1, 1), scope=scope + '/conv_1x1_path3')
x3 = slim.conv2d(y, 32, (3, 3), scope=scope + '/conv_3x3_path3')
# path 4
y = slim.conv2d(x, 24, (1, 1), scope=scope + '/conv_1x1_path4')
y = slim.conv2d(y, 32, (3, 3), scope=scope + '/conv_3x3_path4')
x4 = slim.conv2d(y, 32, (3, 3), scope=scope + '/conv_3x3_second_path4')
return tf.concat([x1, x2, x3, x4], axis=3, name=scope + '/concat')
return net_out
### RDCL in the papre
# def RDCL(net_in):
# with tf.name_scope('RDCL'):
# net = slim.conv2d(net_in, 24, [7, 7], stride=2,activation_fn=tf.nn.crelu, scope='init_conv')
# net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name='init_pool')
# net = slim.conv2d(net, 64, [5, 5], stride=2,activation_fn=tf.nn.crelu,scope='conv1x1_before')
# net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name='init_pool2')
# return net
### the new one
def RDCL(net_in):
with tf.name_scope('RDCL'):
net = slim.conv2d(net_in, 12, [7, 7], stride=2,activation_fn=tf.nn.relu, scope='init_conv1')
net = slim.conv2d(net, 24, [3, 3], stride=2, activation_fn=tf.nn.crelu, scope='init_conv2')
#net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name='init_pool')
net = slim.conv2d(net, 32, [3, 3], stride=2,activation_fn=tf.nn.relu,scope='conv1x1_before1')
net = slim.conv2d(net, 64, [3, 3], stride=2, activation_fn=tf.nn.crelu, scope='conv1x1_before2')
#net = tf.nn.max_pool(net, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name='init_pool2')
return net
def MSCL(net):
with tf.name_scope('MSCL'):
feature_maps = []
net = inception_block(net, 'inception1')
net = inception_block(net, 'inception2')
net = inception_block(net, 'inception3')
feature_maps.append(net)
net = slim.conv2d(net, 128, (1, 1), scope='conv3_1')
net = slim.conv2d(net, 256, (3, 3), stride=2, scope='conv3_2')
feature_maps.append(net)
net = slim.conv2d(net, 128, (1, 1), scope='conv4_1')
net = slim.conv2d(net, 256, (3, 3), stride=2, scope='conv4_2')
feature_maps.append(net)
print("feature_maps shapes:", feature_maps)
return feature_maps
def output(feature_maps):
feature_1,feature_2,feature_3=feature_maps
with tf.name_scope('out'):
###level 1
reg_1 = slim.conv2d(feature_1, 4*21, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level1_reg_out')
reg_1=tf.reshape(reg_1, ([-1, 32, 32, 21, 4]))
reg_1 = tf.reshape(reg_1, ([-1,32* 32*21, 4]))
###level 2
reg_2 = slim.conv2d(feature_2, 4*1, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level2_reg_out')
reg_2 = tf.reshape(reg_2, ([-1, 16, 16, 1, 4]))
reg_2 = tf.reshape(reg_2, ([-1,16*16, 4]))
###level 3
reg_3 = slim.conv2d(feature_3, 4*1, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level3_reg_out')
reg_3=tf.reshape(reg_3, ([-1, 8, 8, 1, 4]))
reg_3 = tf.reshape(reg_3, ([-1,8*8, 4]))
reg=tf.concat([reg_1,reg_2,reg_3],axis=1)
##cla
cla_1 = slim.conv2d(feature_1, 2 * 21, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level1_cla_out')
cla_1 = tf.reshape(cla_1, ([-1, 32, 32, 21, 2]))
cla_1 = tf.reshape(cla_1, ([-1, 32 * 32 * 21, 2]))
cla_2 = slim.conv2d(feature_2, 2 * 1, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level2_cla_out')
cla_2 = tf.reshape(cla_2, ([-1, 16, 16, 1, 2]))
cla_2 = tf.reshape(cla_2, ([-1, 16 * 16, 2]))
cla_3 = slim.conv2d(feature_3, 2 * 1, [3, 3], stride=1, activation_fn=None,
normalizer_fn=None, scope='level3_cla_out')
cla_3=tf.reshape(cla_3, ([-1, 8, 8, 1, 2]))
cla_3 = tf.reshape(cla_3, ([-1,8*8, 2]))
cla = tf.concat([cla_1, cla_2, cla_3], axis=1)
return reg,cla
def preprocess(image):
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = cfg.DATA.PIXEL_MEAN
#std = np.asarray(cfg.DATA.PIXEL_STD)
image_mean = tf.constant(mean, dtype=tf.float32)
#image_invstd = tf.constant(1.0 / std, dtype=tf.float32)
image = (image - image_mean)#*image_invstd
return image
def facebox_backbone(inputs,L2_reg,training=True):
inputs=preprocess(inputs)
arg_scope = facebox_arg_scope(weight_decay=L2_reg)
with slim.arg_scope(arg_scope):
with slim.arg_scope([slim.batch_norm], is_training=training):
with tf.name_scope('Facebox'):
net=RDCL(inputs)
fms=MSCL(net)
reg,cla =output(fms)
return reg,cla
def facebox(inputs, reg_targets, matches, L2_reg, training):
loc_predict, cla_predict = facebox_backbone(inputs, L2_reg, training)
with tf.name_scope('losses'):
# whether anchor is matched
is_matched = tf.greater_equal(matches, 0)
weights = tf.to_float(is_matched)
# shape [batch_size, num_anchors]
# we have binary classification for each anchor
cls_targets = tf.to_int32(is_matched)
with tf.name_scope('classification_loss'):
cls_losses = ohem_loss(
cla_predict,
cls_targets,
is_matched
)
with tf.name_scope('localization_loss'):
location_losses = localization_loss(
loc_predict,
reg_targets, weights
)
# they have shape [batch_size, num_anchors]
with tf.name_scope('normalization'):
matches_per_image = tf.reduce_sum(weights, axis=1) # shape [batch_size]
num_matches = tf.reduce_sum(matches_per_image) # shape []
normalizer = tf.maximum(num_matches, 1.0)
reg_loss = tf.reduce_sum(location_losses) / normalizer
cla_loss = tf.reduce_sum(cls_losses) / normalizer
######add nms in the graph
get_predictions(loc_predict, cla_predict, anchors=cfg.MODEL.anchors)
return {'localization_loss': reg_loss, 'classification_loss':cla_loss}
def get_predictions(box_encodings,cla,anchors, score_threshold=cfg.TEST.score_threshold, iou_threshold=cfg.TEST.iou_threshold, max_boxes=cfg.TEST.max_boxes):
"""Postprocess outputs of the network.
Returns:
boxes: a float tensor with shape [batch_size, N, 4].
scores: a float tensor with shape [batch_size, N].
num_boxes: an int tensor with shape [batch_size], it
represents the number of detections on an image.
where N = max_boxes.
"""
with tf.name_scope('postprocessing'):
boxes = batch_decode(box_encodings, anchors)
# if the images were padded we need to rescale predicted boxes:
boxes = tf.clip_by_value(boxes, 0.0, 1.0)
# it has shape [batch_size, num_anchors, 4]
scores = tf.nn.softmax(cla, axis=2)[:, :, 1]
# it has shape [batch_size, num_anchors], background are ignored
with tf.device('/cpu:0'), tf.name_scope('nms'):
boxes, scores, num_detections = batch_non_max_suppression(
boxes, scores, score_threshold, iou_threshold, max_boxes
)
boxes=tf.identity(boxes,name='boxes')
scores = tf.identity(scores, name='scores')
num_detections = tf.identity(num_detections, name='num_detections')
return {'boxes': boxes, 'scores': scores, 'num_boxes': num_detections}
| [
"2120140200@mail.nankai.edu.cn"
] | 2120140200@mail.nankai.edu.cn |
7fe9f839b8d31d5cc614f940a265d49226e3dd4c | 0326f06f68fb0d919f8467f4744dfd60a654836a | /eggs/Django-1.6.5-py2.7.egg/django/contrib/gis/db/models/query.py | 1f2e5196a4b36b6f6d7b2ae9c794a847e4f479a0 | [] | no_license | ethirajit/onlinepos | 67de6023241339ae08c3b88a9e7b62b837ec17a3 | 186ba6585d0b29f96a5c210462764515cccb3b47 | refs/heads/master | 2021-01-17T13:23:36.490727 | 2014-07-01T10:30:17 | 2014-07-01T10:30:17 | 34,388,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,047 | py | from django.db import connections
from django.db.models.query import QuerySet, ValuesQuerySet, ValuesListQuerySet
from django.contrib.gis import memoryview
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import get_srid_info, PointField, LineStringField
from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
from django.utils import six
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
### Methods overloaded from QuerySet ###
def __init__(self, model=None, query=None, using=None):
super(GeoQuerySet, self).__init__(model=model, query=query, using=using)
self.query = query or GeoQuery(self.model)
def values(self, *fields):
return self._clone(klass=GeoValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=GeoValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Peforming setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None))
s = {'procedure_args' : procedure_args,
'geo_field' : geo_field,
'setup' : False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analagous to a union operation, but much faster because
boundaries are not dissolved.
"""
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geomtry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the users wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ '
'support GeoJSON serialization.')
if not isinstance(precision, six.integer_types):
raise TypeError('Precision keyword must be set with an integer.')
# Setting the options flag -- which depends on which version of
# PostGIS we're using. SpatiaLite only uses the first group of options.
if backend.spatial_version >= (1, 4, 0):
options = 0
if crs and bbox: options = 3
elif bbox: options = 1
elif crs: options = 2
else:
options = 0
if crs and bbox: options = 3
elif crs: options = 1
elif bbox: options = 2
s = {'desc' : 'GeoJSON',
'procedure_args' : {'precision' : precision, 'options' : options},
'procedure_fmt' : '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc' : 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc' : 'GML', 'procedure_args' : {'precision' : precision}}
if backend.postgis:
# PostGIS AsGML() aggregate function parameter order depends on the
# version -- uggh.
if backend.spatial_version > (1, 3, 1):
s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s'
else:
s['procedure_fmt'] = '%(geo_col)s,%(precision)s,%(version)s'
s['procedure_args'] = {'precision' : precision, 'version' : version}
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc' : 'KML',
'procedure_fmt' : '%(geo_col)s,%(precision)s',
'procedure_args' : {'precision' : kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field' : GeomField(),}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size' : size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize,
'xorigin' : xorigin, 'yorigin' : yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt' : procedure_fmt,
'procedure_args' : procedure_args,
'select_field' : GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, six.integer_types):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {'desc' : 'SVG',
'procedure_fmt' : '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args' : {'rel' : relative,
'precision' : precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, six.integer_types):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
tmp, geo_field = self._spatial_setup('transform', field_name=field_name)
# Getting the selection SQL for the given geographic field.
field_col = self._geocol_select(geo_field, field_name)
# Why cascading substitutions? Because spatial backends like
# Oracle and MySQL already require a function call to convert to text, thus
# when there's also a transformation we need to cascade the substitutions.
# For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )'
geo_col = self.query.custom_select.get(geo_field, field_col)
# Setting the key for the field's column with the custom SELECT SQL to
# override the geometry column returned from the database.
custom_sel = '%s(%s, %s)' % (connections[self.db].ops.transform, geo_col, srid)
# TODO: Should we have this as an alias?
# custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name))
self.query.transformed_srid = srid # So other GeoQuerySet methods
self.query.custom_select[geo_field] = custom_sel
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
return self._spatial_aggregate(aggregates.Union, **kwargs)
### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None: desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function' : func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle: agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
backend = connection.ops
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in six.iteritems(default_args): settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, six.string_types): model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
sel_fld = settings['select_field']
if isinstance(sel_fld, GeomField) and backend.select:
self.query.custom_select[model_att] = backend.select
if connection.ops.oracle:
sel_fld.empty_strings_allowed = False
self.query.extra_select_fields[model_att] = sel_fld
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
return self.extra(select={model_att : fmt % settings['procedure_args']},
select_params=settings['select_params'])
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
if self.query.transformed_srid:
u, unit_name, s = get_srid_info(self.query.transformed_srid, connection)
geodetic = unit_name in geo_field.geodetic_units
if backend.spatialite and geodetic:
raise ValueError('SQLite does not support linear distance calculations on geodetic coordinate systems.')
if distance:
if self.query.transformed_srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, self.query.transformed_srid)
if geom.srid is None or geom.srid == self.query.transformed_srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, self.query.transformed_srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += ', %s(%s(%%%%s, %s), %s)' % (backend.transform, backend.from_text,
geom.srid, self.query.transformed_srid)
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, self.query.transformed_srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(memoryview(params[0].ewkb)).geom_type) == 'Point':
raise ValueError('Spherical distance calculation only supported with Point Geometry parameters')
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.distance_spheroid, 'spheroid' : params[1]})
else:
procedure_args.update({'function' : backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.length_spheroid, 'spheroid' : params[1]})
elif geom_3d and backend.postgis:
# Use 3D variants of perimeter and length routines on PostGIS.
if perimeter:
procedure_args.update({'function' : backend.perimeter3d})
elif length:
procedure_args.update({'function' : backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field' : DistanceField(dist_att),
'setup' : False,
'geo_field' : geo_field,
'procedure_args' : procedure_args,
'procedure_fmt' : procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field' : GeomField(),}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance' : tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {'geom_args' : ('geom',),
'select_field' : GeomField(),
'procedure_fmt' : '%(geo_col)s,%(geom)s',
'procedure_args' : {'geom' : geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
opts = self.model._meta
if not geo_field in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
self.query.add_select_related([field_name])
compiler = self.query.get_compiler(self.db)
compiler.pre_sql_setup()
for (rel_table, rel_col), field in self.query.related_select_cols:
if field == geo_field:
return compiler._field_column(geo_field, rel_table)
raise ValueError("%r not in self.query.related_select_cols" % geo_field)
elif not geo_field in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
tmp_fld, parent_model, direct, m2m = opts.get_field_by_name(geo_field.name)
return self.query.get_compiler(self.db)._field_column(geo_field, parent_model._meta.db_table)
else:
return self.query.get_compiler(self.db)._field_column(geo_field)
class GeoValuesQuerySet(ValuesQuerySet):
def __init__(self, *args, **kwargs):
super(GeoValuesQuerySet, self).__init__(*args, **kwargs)
# This flag tells `resolve_columns` to run the values through
# `convert_values`. This ensures that Geometry objects instead
# of string values are returned with `values()` or `values_list()`.
self.query.geo_values = True
class GeoValuesListQuerySet(GeoValuesQuerySet, ValuesListQuerySet):
pass
| [
"root@server.onlinepos.co.in"
] | root@server.onlinepos.co.in |
18f4edd212936a2d9ad07e7a58d32021e5000f79 | cc619d6e81c39fe54d4875e3c6936e25bb8a7ebd | /Python/src/17 Scientific Python/SciKitImage/03_tablets.py | 35e9a6bcf6f9bf53efee9a38eb2c8fe75f4f809d | [] | no_license | joshua4289/python3-examples | cb01060f649c7dc97185566b00fa0d59a1ffdca3 | 70c1fd0b1e5bf25e82697257fb9f92cd06e922b7 | refs/heads/master | 2020-12-19T08:19:13.310071 | 2017-04-28T13:48:01 | 2017-04-28T13:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,018 | py | import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import skimage.morphology as morphology
import skimage.feature as feature
import PIL.Image as Image
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="int32" )
return data
def set_title(title):
figure = plt.gcf()
figure.canvas.set_window_title(title)
# # use PIL to show true image
# from PIL import Image
img = Image.open("images/tablets.jpg")
img.show()
# image is an int numpy array [0 ... 255]
set_title("numpy int array 0 ... 255")
image = load_image("images/tablets.jpg")
plt.imshow(image, interpolation="none")
plt.show()
# convert image to floats
image = image / 256.0
set_title("numpy float array 0.0 ... 1.0")
plt.imshow(image, interpolation="none")
plt.show()
# algorithms expect monochrome images
# so just use the RED part of the image
image = image[:,:,0]
# use Canny algorith to detect edges
# vary sigma
for i in range(2,10):
set_title("sigma = {}".format(i))
edges = feature.canny(image, sigma=i, low_threshold=40/256.0, high_threshold=50/256.0)
plt.imshow(edges, cmap=plt.cm.gray)
plt.show()
# vary thresholds
for i in range(5, 60, 5):
low = i / 256.0
high = (i + 5) / 256.0
set_title("low = {}, high = {}".format(low*256, high*256))
edges = feature.canny(image, sigma=4, low_threshold=low, high_threshold=high)
plt.imshow(edges, cmap=plt.cm.gray)
plt.show()
# chose best parametrs
sigma = 4
low = 40/256.0
high = 45/256.0
set_title("Best choice: sigma = {}, low = {}, high = {}".format(sigma, low*256, high*256))
edges = feature.canny(image, sigma=sigma, low_threshold=low, high_threshold=high)
# close edges
for i in range(1,10):
closed = morphology.binary_closing(edges, morphology.square(i)).astype(int)
# fill circles
set_title("fill factor = {}".format(i))
filled = nd.binary_fill_holes(closed).astype(int)
plt.imshow(filled, cmap=plt.cm.gray)
plt.show()
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
6a300cad68c584580fc8f8d23564a9d3917e56de | 73d9b5664d6949140b13e92d8b91a01e8502752a | /good_spot/images/migrations/0006_auto_20180313_1440.py | d6a7bce62944f54512cae8f23ae1618d32f2648b | [
"MIT"
] | permissive | jasmine92122/NightClubBackend | 3ed46cce0f6b534b4b49829f53fe7cb6a42ae42e | 7f59129b78baaba0e0c25de2b493033b858f1b00 | refs/heads/master | 2022-11-23T00:42:25.606762 | 2019-10-02T01:56:29 | 2019-10-02T01:56:29 | 212,234,882 | 0 | 0 | MIT | 2022-11-22T02:10:16 | 2019-10-02T01:47:52 | JavaScript | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-13 14:40
from __future__ import unicode_literals
from django.db import migrations
def reorder(apps, schema_editor):
from django.core import management
management.call_command('reorder', 'images.PlaceImage')
class Migration(migrations.Migration):
dependencies = [
('images', '0005_auto_20180313_1438'),
]
operations = [
migrations.RunPython(reorder)
]
| [
"jasminegarcia111@outlook.com"
] | jasminegarcia111@outlook.com |
20c0be5ceca17532092c08704ef8644540114ee4 | 936dc2666f27de7a7d1428c7ad2ded62a722b8fa | /src/geofr/tasks.py | cccc966051394561c8ed36e13ac6c969e30e66cd | [
"ISC"
] | permissive | MTES-MCT/aides-territoires | 03451a32bdeaab3812b8593bfe3a27c1b1d9a182 | af9f6e6e8b1918363793fbf291f3518ef1454169 | refs/heads/master | 2023-09-04T22:15:17.819264 | 2023-08-25T13:19:17 | 2023-08-25T13:19:17 | 124,301,398 | 21 | 11 | NOASSERTION | 2023-09-12T13:46:49 | 2018-03-07T22:19:11 | Python | UTF-8 | Python | false | false | 206 | py | from core.celery import app
from django.core import management
@app.task
def count_by_department():
"""Count backers and programs by department."""
management.call_command("count_by_department")
| [
"noreply@github.com"
] | MTES-MCT.noreply@github.com |
df71e788bcfd8b63c8f6aabc31fca3443c9f04b4 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tests/parsers/trendmicroav.py | a8f3cf80dcbcd5830b11ee17d920946a8d4d990f | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 1,777 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Trend Micro AV Log parser."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import trendmicroav as _ # pylint: disable=unused-import
from plaso.parsers import trendmicroav
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class TrendMicroUnitTest(test_lib.ParserTestCase):
"""Tests for the Trend Micro AV Log parser."""
@shared_test_lib.skipUnlessHasTestFile(['pccnt35.log'])
def testParse(self):
"""Tests the Parse function."""
parser = trendmicroav.OfficeScanVirusDetectionParser()
storage_writer = self._ParseFile(['pccnt35.log'], parser)
# The file contains 3 lines which results in 3 events.
self.assertEqual(storage_writer.number_of_events, 3)
# The order in which DSVParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-30 14:45:32.000000')
# The third and last event has been edited to match the older, documented
# format for log lines (without a Unix timestamp).
event = events[2]
self.CheckTimestamp(event.timestamp, '2018-01-30 14:46:00.000000')
# Test the third event.
self.assertEqual(event.path, 'C:\\temp\\')
self.assertEqual(event.filename, 'eicar.com_.gstmp')
expected_message = (
r'Path: C:\temp\ File name: eicar.com_.gstmp '
r'Eicar_test_1 : Failure (clean), moved (Real-time scan)')
expected_short_message = r'C:\temp\ eicar.com_.gstmp Failure (clean), moved'
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| [
"onager@deerpie.com"
] | onager@deerpie.com |
4d1e24b85cf62a6aab7cfc79383b9f0d1481768f | 5b4fe473179b5fadaf59ec96d55b2ec4cb326f65 | /src/graph_transpiler/webdnn/backend/webgl/optimize_rules/insert_channel_mode_conversion.py | 121a0b3cbf32a9c693a52efa3c3fdef3de9bc1d1 | [
"Zlib",
"MIT"
] | permissive | TarrySingh/webdnn | 13d3f1ec4936916abacfb67e270f48571e2fcff2 | b31b19de0798d8ca198b78d19cb06e4fce1bc260 | refs/heads/master | 2021-05-07T02:24:47.500746 | 2017-11-13T13:00:24 | 2017-11-13T13:00:24 | 110,582,816 | 0 | 1 | null | 2017-11-13T18:03:46 | 2017-11-13T18:03:46 | null | UTF-8 | Python | false | false | 2,948 | py | from typing import Tuple
from webdnn.backend.webgl.attributes.channel_mode import ChannelModeEnum, ChannelMode
from webdnn.backend.webgl.attributes.texture_shape import TextureShape
from webdnn.backend.webgl.operators.convert_r_to_rgba import ConvertRtoRGBA, convert_r_to_rgba
from webdnn.backend.webgl.operators.convert_rgba_to_r import ConvertRGBAtoR, convert_rgba_to_r
from webdnn.graph import traverse
from webdnn.graph.graph import Graph
from webdnn.graph.operator import Operator
from webdnn.graph.operators.sgemm import Sgemm
from webdnn.graph.operators.tensordot import Tensordot
from webdnn.graph.optimize_rule import OptimizeRule
from webdnn.graph.variable import Variable
def _replace_input(op: Operator, var_name: str, target: ChannelModeEnum):
"""
before)
v -{op}-
after)
v -{conversion}- v' -{op}-
"""
v = op.inputs[var_name]
if ChannelMode.get(v) == target:
return False
if target == ChannelModeEnum.RGBA:
v_new = convert_r_to_rgba(v)
else:
v_new = convert_rgba_to_r(v)
TextureShape.set(v_new, height=TextureShape.get(v)[0], width=TextureShape.get(v)[1])
op.replace_input(v, v_new)
return True
def _replace_output(op: Operator, var_name: str, target: ChannelModeEnum):
"""
before)
-{op}- v
after)
-{op}- v' -{conversion}- v
"""
v = op.outputs[var_name]
if ChannelMode.get(v) == target:
return False
v_new = Variable(v.shape, v.order)
ChannelMode.set(v_new, target)
op.replace_output(v, v_new)
if target == ChannelModeEnum.RGBA:
convert_rgba_to_r(v_new).change_order(v.order).replace(v)
else:
convert_r_to_rgba(v_new).change_order(v.order).replace(v)
return True
def _replace_input_all(op: Operator, target: ChannelModeEnum):
return any(_replace_input(op, var_name, target) for var_name in op.inputs.keys())
def _replace_output_all(op: Operator, target: ChannelModeEnum):
return any(_replace_output(op, var_name, target) for var_name in op.outputs.keys())
class InsertChannelModeConversion(OptimizeRule):
def optimize(self, graph: Graph) -> Tuple[Graph, bool]:
flag_changed = False
for op in traverse.listup_operators(graph):
if isinstance(op, (Sgemm, Tensordot)):
pass
elif isinstance(op, ConvertRGBAtoR):
flag_changed |= _replace_input(op, "x0", ChannelModeEnum.RGBA)
flag_changed |= _replace_output(op, "y", ChannelModeEnum.R)
elif isinstance(op, ConvertRtoRGBA):
flag_changed |= _replace_input(op, "x0", ChannelModeEnum.R)
flag_changed |= _replace_output(op, "y", ChannelModeEnum.RGBA)
else:
flag_changed |= _replace_input_all(op, ChannelModeEnum.R)
flag_changed |= _replace_output_all(op, ChannelModeEnum.R)
return graph, flag_changed
| [
"y.kikura@gmail.com"
] | y.kikura@gmail.com |
cb0ecd20b0fdca9b9abf3647279afb77cc77ecbb | 318270aeab9182a42482c33167f90b3e6bb8a77b | /Pattern exercise/pattern 8.py | 69e53ceffc379c4d66b91586da5711cef2d1d07f | [] | no_license | Raj-kar/Python | b857214392384752855f6ab5d673b0218ce3ecd7 | 7eab4705eda566827ad01b3285095d253e55a7dc | refs/heads/master | 2023-01-21T07:11:03.054162 | 2020-11-30T08:47:11 | 2020-11-30T08:47:11 | 286,132,816 | 3 | 11 | null | 2020-10-13T21:47:31 | 2020-08-08T23:09:42 | Python | UTF-8 | Python | false | false | 428 | py | # * *
# ** **
# *** ***
# **** ****
# ***** *****
# ****** ******
# *************
num = int(input("Enter a range :: "))
symbol = input("Enter a symbol :: ") # user can enter any symbol for print !
num += 1
for i in range(1, num):
for j in range(1, (num*2)-2):
if j <= i or j >= ((num*2)-2)-i:
print(symbol, end="")
else:
print(" ", end="")
print()
| [
"rajkar921@gmail.com"
] | rajkar921@gmail.com |
cfd6f557a31e4bd3f453e4ab345336c9aedd49ed | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/KoubeiMallScanpurchaseDiscountdetailModifyRequest.py | 5233478aa7ed053e012b041bd7d35200c1f49d5d | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,051 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMallScanpurchaseDiscountdetailModifyModel import KoubeiMallScanpurchaseDiscountdetailModifyModel
class KoubeiMallScanpurchaseDiscountdetailModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMallScanpurchaseDiscountdetailModifyModel):
self._biz_content = value
else:
self._biz_content = KoubeiMallScanpurchaseDiscountdetailModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.mall.scanpurchase.discountdetail.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
0346683d74959bf1bc9cf8400043c01c34de5b01 | 8200122ad875e73f627f5d1eca29c778167cb5fb | /tests/test_documenter.py | a9b851c1b0a4b51e81f69479b92cd72e904c4922 | [
"ISC"
] | permissive | jaimergp/mkdocstrings | bb1a5ad2360f051e17e4af0c854119dcc6b652ac | 895c3192cb9328d0800234a8732745ecae840d97 | refs/heads/master | 2021-02-17T17:47:03.476674 | 2020-03-04T20:30:36 | 2020-03-04T20:30:36 | 245,115,235 | 1 | 0 | ISC | 2020-03-05T09:05:19 | 2020-03-05T09:05:19 | null | UTF-8 | Python | false | false | 862 | py | from mkdocstrings.documenter import Documenter
def test_getattr_dunder():
class Base:
def __init__(self):
pass
def __getitem__(self, item):
"""Written docstring."""
return item
class Child(Base):
def __init__(self):
super().__init__()
def __getitem__(self, item):
return item
doc = Documenter()
class_doc = doc.get_class_documentation(Child)
for child in class_doc.children:
if child.name == "__getitem__":
assert child.docstring.original_value == ""
def test_no_filter():
doc = Documenter()
assert not doc.filter_name_out("hello")
def test_filter():
doc = Documenter(["!^_[^_]", "!^__C$"])
assert doc.filter_name_out("_B")
assert doc.filter_name_out("__C")
assert not doc.filter_name_out("__A")
| [
"pawamoy@pm.me"
] | pawamoy@pm.me |
28a809f729a9e54614cfc5b64ccef5cd57046d51 | 5f73d71c47ecac793e2e1a1ce14ca5c24483d45a | /tools/vis_gt_stats.py | a64d6b583c3b35e1c28d921cf1f608683c3aa6c4 | [] | no_license | GeeeG/sixd_toolkit | 694db642c9f2a179353284dfcf7b16f45722aaae | ec914f3a9d2ced9a5b6d87722f342c6bef1b95b7 | refs/heads/master | 2021-08-08T13:32:29.018936 | 2017-11-10T12:10:36 | 2017-11-10T12:10:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,069 | py | # Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz)
# Center for Machine Perception, Czech Technical University in Prague
# Plots statistics of the ground truth poses.
import os
import sys
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pysixd import inout
from params.dataset_params import get_dataset_params
# dataset = 'hinterstoisser'
dataset = 'tless'
# dataset = 'tudlight'
# dataset = 'rutgers'
# dataset = 'tejani'
# dataset = 'doumanoglou'
dataset_part = 'train'
# dataset_part = 'test'
delta = 15 # Tolerance used in the visibility test [mm]
# Load dataset parameters
dp = get_dataset_params(dataset)
if dataset_part == 'train':
data_ids = range(1, dp['obj_count'] + 1)
gt_mpath_key = 'obj_gt_mpath'
gt_stats_mpath_key = 'obj_gt_stats_mpath'
else: # 'test'
data_ids = range(1, dp['scene_count'] + 1)
gt_mpath_key = 'scene_gt_mpath'
gt_stats_mpath_key = 'scene_gt_stats_mpath'
# Load the GT statistics
gt_stats = []
for data_id in data_ids:
print('Loading GT stats: {}, {}'.format(dataset, data_id))
gts = inout.load_gt(dp[gt_mpath_key].format(data_id))
gt_stats_curr = inout.load_yaml(
dp[gt_stats_mpath_key].format(data_id, delta))
for im_id, gt_stats_im in gt_stats_curr.items():
for gt_id, p in enumerate(gt_stats_im):
p['data_id'] = data_id
p['im_id'] = im_id
p['gt_id'] = gt_id
p['obj_id'] = gts[im_id][gt_id]['obj_id']
gt_stats.append(p)
print('GT count: {}'.format(len(gt_stats)))
# Collect the data
px_count_all = [p['px_count_all'] for p in gt_stats]
px_count_valid = [p['px_count_valid'] for p in gt_stats]
px_count_visib = [p['px_count_visib'] for p in gt_stats]
visib_fract = [p['visib_fract'] for p in gt_stats]
bbox_all_x = [p['bbox_all'][0] for p in gt_stats]
bbox_all_y = [p['bbox_all'][1] for p in gt_stats]
bbox_all_w = [p['bbox_all'][2] for p in gt_stats]
bbox_all_h = [p['bbox_all'][3] for p in gt_stats]
bbox_visib_x = [p['bbox_visib'][0] for p in gt_stats]
bbox_visib_y = [p['bbox_visib'][1] for p in gt_stats]
bbox_visib_w = [p['bbox_visib'][2] for p in gt_stats]
bbox_visib_h = [p['bbox_visib'][3] for p in gt_stats]
f, axs = plt.subplots(2, 2)
f.canvas.set_window_title(dataset)
axs[0, 0].hist([px_count_all, px_count_valid, px_count_visib],
bins=20, range=(min(px_count_visib), max(px_count_all)))
axs[0, 0].legend([
'All object mask pixels',
'Valid object mask pixels',
'Visible object mask pixels'
])
axs[0, 1].hist(visib_fract, bins=50, range=(0.0, 1.0))
axs[0, 1].set_xlabel('Visible fraction')
axs[1, 0].hist([bbox_all_x, bbox_all_y, bbox_visib_x, bbox_visib_y], bins=20)
axs[1, 0].legend([
'Bbox all - x',
'Bbox all - y',
'Bbox visib - x',
'Bbox visib - y'
])
axs[1, 1].hist([bbox_all_w, bbox_all_h, bbox_visib_w, bbox_visib_h], bins=20)
axs[1, 1].legend([
'Bbox all - width',
'Bbox all - height',
'Bbox visib - width',
'Bbox visib - height'
])
f.tight_layout()
plt.show()
| [
"tom.hodan@gmail.com"
] | tom.hodan@gmail.com |
0370c67e4ad6a1c9a00ba24860cd52675781253d | 616cc6c05f525dd2cb67916601f6ecd2c8242f24 | /homework/hw05/problems/client/protocols/collaborate.py | eee5309d70dd21fd0d26006009d80ae2aa9e2c93 | [] | no_license | cookieli/cs61a_li | 6f1d51aad7cd32fb27f64c855b3803bd2f8d9aad | 6ee0df9c64842bde9e30a0484e661abf04212358 | refs/heads/master | 2020-04-07T14:32:38.337554 | 2018-03-07T10:18:03 | 2018-03-07T10:18:03 | 124,218,933 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,240 | py | from client.protocols.common import models
from client.protocols.grading import grade
from client.utils import output
from client.utils import auth
from client.utils.firebase import pyrebase
import client
import requests
import os
import sys
import shutil
import json
import logging
import urllib.error
import urllib.request
import platform
import time
import webbrowser
log = logging.getLogger(__name__)
class CollaborateProtocol(models.Protocol):
# Timeouts are specified in seconds.
LONG_TIMEOUT = 30
API_ENDPOINT = '{prefix}://{server}'
FIREBASE_CONFIG = {
'apiKey': "AIzaSyAFJn-q5SbxJnJcPVFhjxd25DA5Jusmd74",
'authDomain': "ok-server.firebaseapp.com",
'databaseURL': "https://ok-server.firebaseio.com",
'storageBucket': "ok-server.appspot.com"
}
FILE_TIME_FORMAT = '%m_%d_%H_%M_%S'
TIME_FORMAT = '%m/%d %H:%M:%S'
BACKUP_DIRECTORY = 'ok-collab'
COLLAB_SERVER = 'collab.cs61a.org'
# COLLAB_SERVER = '127.0.0.1:8000' # Dev Server
def run(self, messages):
if not self.args.collab:
return
elif self.args.local:
log.info("Collaborate requires network access.")
return
if not messages.get('file_contents'):
log.warning("Collaborate needs to be after file contents")
return
if not messages.get('analytics'):
log.warning("Collaborate needs to be after analytics")
return
self.file_contents = messages.get('file_contents', {})
self.collab_analytics = {'save': [], 'grade': []}
messages['collaborate'] = self.collab_analytics
self.collab_analytics['launch'] = time.strftime(self.TIME_FORMAT)
try:
print("Starting collaboration mode.")
exit_val = self.start_firebase(messages)
if exit_val is None:
return
except (Exception, KeyboardInterrupt, AttributeError, RuntimeError, OSError) as e:
print("Exiting collaboration mode (Run with --debug if this was unexpected)")
self.log_event('exit', {'method': str(e)})
if hasattr(self, 'stream') and self.stream:
self.stream.close()
if hasattr(self, 'presence'):
(self.get_firebase()
.child('clients').child(self.presence['name'])
.remove(self.fire_user['idToken']))
log.warning("Exception while waiting", exc_info=True)
def start_firebase(self, messages):
access_token = auth.authenticate(False)
email = auth.get_student_email(access_token)
identifier = auth.get_identifier(token=access_token, email=email)
firebase = pyrebase.initialize_app(self.FIREBASE_CONFIG)
self.fire_auth = firebase.auth()
self.fire_db = firebase.database()
self.user_email = email
self.hostname = platform.node()
data = {
'access_token': access_token,
'email': email,
'identifier': identifier,
'assignment': self.assignment.endpoint,
'file_contents': messages.get('file_contents'),
'analytics': messages.get('analytics'),
}
# Check for existing sessions first - TBD Future
# existing_sessions = self.send_messages(data, endpoint='/collab/list')
# response = self.prompt_for_existing_session(existing_sessions.get('sessions'))
# if response:
# data['desired_session'] = response
# Send data to collaborate server
response_data = self.send_messages(data, self.LONG_TIMEOUT)
if 'error' in response_data or 'session' not in response_data:
print("There was an error while starting the session: {} Try again later"
.format(response_data.get('error')))
log.warning("Error: {}".format(response_data.get('error')))
return
self.session_id = response_data['session']
self.short_url = response_data['short_url']
self.login_user = response_data.get('login_user')
# Login as the firebase user
email, password = response_data.get('login_user'), response_data.get('password')
try:
self.fire_user = self.fire_auth.sign_in_with_email_and_password(email,
password)
self.fire_uid = self.fire_user['localId']
except (ValueError, KeyError) as e:
log.warning("Could not login", exc_info=True)
print("Could not login to the collaboration server.")
return
self.stream = (self.get_firebase()
.child('actions').stream(self.stream_listener,
self.fire_user['idToken']))
self.presence = (self.get_firebase()
.child('clients').push({'computer': platform.node(),
'uid': self.fire_uid,
'owner': self.user_email,
'email': self.user_email},
self.fire_user['idToken']))
# Parse response_url
if response_data:
open_url = response_data['url']
if 'access_token' not in open_url:
open_url = open_url + "?access_token={}".format(access_token)
could_open = webbrowser.open_new(open_url)
if not could_open:
print("Could not open browser. Go to {}".format(open_url))
else:
log.error("There was an error with the server. Please try again later!")
return
print("Tell your group members or course staff to go to {}"
.format(self.short_url))
while True:
data = input("[{}] Type exit to disconnect: ".format(self.short_url))
if data.strip().lower() == 'exit':
raise ValueError('Done with session')
def prompt_for_existing_session(self, sessions):
""" Prompt user if they want to resume an old session
(or their partners session) or create a new session.
"""
if not sessions:
return None
print("Would you like to join a previous session or create a new session?")
for index, session in enumerate(sessions):
print(("{id} : {creator} started at {timestamp} ({hashid})"
.format(id=index+1, creator=session.get('creator'),
timestamp=session.get('created'), hashid=session.get('id'))))
print("{new_id} : Create a new session with the current files?"
.format(new_id=len(sessions)+1))
desired = input("Type the number of the session you'd like to join: ")
try:
outcome = int(desired.strip())
except:
outcome = len(sessions)+1
log.warning("Could not parse int for choice")
if outcome >= len(sessions):
log.info("Chose to start new session")
return None
else:
log.info("Resuming session {}".format(outcome - 1))
desired = sessions[outcome - 1]
return session
def send_messages(self, data, timeout=30, endpoint='/collab/start/'):
"""Send messages to server, along with user authentication."""
serialized_data = json.dumps(data).encode(encoding='utf-8')
server = self.COLLAB_SERVER + endpoint
prefix = 'http' if self.args.insecure else 'https'
address = self.API_ENDPOINT.format(server=server, prefix=prefix)
params = {
'client_name': 'ok-client',
'client_version': client.__version__,
}
headers = {"Content-Type": "application/json"}
log.info('Sending messages to %s', address)
try:
r = requests.post(address, params=params, data=serialized_data,
headers=headers, timeout=timeout)
r.raise_for_status()
return r.json()
except (requests.exceptions.RequestException, urllib.error.HTTPError, Exception) as ex:
message = '{}: {}'.format(ex.__class__.__name__, str(ex))
log.warning(message)
print("There was an error connecting to the server."
"Run with --debug for more details")
return
def log_event(self, name, data):
if not self.collab_analytics.get(name):
self.collab_analytics[name] = []
log_data = {
'time': time.strftime(self.TIME_FORMAT),
'data': data
}
self.collab_analytics[name].append(log_data)
############
# Firebase #
############
def get_firebase(self):
return (self.fire_db.child('ok-sessions')
.child(self.fire_uid)
.child(self.session_id))
def send_firebase(self, channel, data):
return (self.get_firebase().child(channel)
.push(data, self.fire_user['idToken']))
def stream_listener(self, message):
data = message.get('data')
if not data:
logging.info("Irrelevant message logged while listening")
return
action = data.get('action')
sender = data.get('user')
log.debug('Received new {} message from {}'.format(action, sender))
file_name = data.get('fileName')
if action == "save":
print("Saving {} locally (initiated by {})"
.format(file_name, data.get('user')))
self.log_event('save', data)
return self.save(data)
elif action == "grade":
print("Running tests locally (initiated by {})".format(data.get('user')))
self.log_event('grade', data)
return self.run_tests(data)
else:
print("Unknown action {}".format(action))
def run_tests(self, data):
backup = self.save(data)
# Perform reload of some modules for file change
if self.assignment.src:
for module in self.assignment.src:
module_name = module.replace('.py', '')
if '/' not in module_name:
# Ignore subdirectories for now.
if module_name in sys.modules:
del sys.modules[module_name]
if not backup:
(self.get_firebase().child('term')
.push({"status": 'Failed',
"computer": self.hostname,
"time": time.strftime(self.TIME_FORMAT),
"email": self.user_email,
'text': "Unknown files. Could not run autograding\n"},
self.fire_user['idToken']))
return
test_names = [t.name for t in list(self.assignment.test_map.values())]
desired_test = data.get('run_test')
if desired_test:
test_names = [t for t in test_names if t.lower() == desired_test.lower()]
(self.get_firebase().child('term')
.push({"status": 'Running',
"computer": self.hostname,
"time": time.strftime(self.TIME_FORMAT),
"email": self.user_email,
'text': "Running tests for: {}\n".format(test_names)},
self.fire_user['idToken']))
grading_results = self.grade(list(self.assignment.test_map.values()))
(self.get_firebase().child('term')
.push({"status": 'Done',
"computer": self.hostname,
"email": self.user_email,
"time": time.strftime(self.TIME_FORMAT),
'text': str(grading_results['output'])[:6000],
'grading': grading_results['grading']},
self.fire_user['idToken']))
# Treat autograde attempts like a backup if the source wasn't from the same user
if data['user'] != self.user_email:
if backup and backup != data.get('fileName'):
shutil.move(backup, data.get('fileName'))
def save(self, data):
file_name = data['fileName']
file_name = file_name.strip()
if file_name not in self.assignment.src or file_name.endswith('.ok'):
if file_name != 'submit':
logging.warning("Unknown filename {}".format(file_name))
print("Unknown file - Not saving {}".format(file_name))
return
if not os.path.isfile(file_name):
log.warning('File {} does not exist. Not backing up'.format(file_name))
backup_dst = file_name
else:
# Backup the file
log.debug("Backing up file")
backup_dst = self.backup_file(file_name)
print("Backed up file to {}".format(backup_dst))
log.debug("Beginning overwriting file")
contents = data['file']
with open(file_name, 'w') as f:
f.write(contents)
log.debug("Done replacing file")
# Update file contents for backup
self.file_contents[file_name] = contents
return backup_dst
def backup_file(self, file_name):
if not os.path.exists(self.BACKUP_DIRECTORY):
os.makedirs(self.BACKUP_DIRECTORY)
safe_file_name = file_name.replace('/', '').replace('.py', '')
backup_name = '{}/{}-{}.txt'.format(self.BACKUP_DIRECTORY, safe_file_name,
time.strftime(self.FILE_TIME_FORMAT))
log.info("Backing up {} to {}".format(file_name, backup_name))
shutil.copyfile(file_name, backup_name)
return backup_name
def grade(self, tests):
data = {}
print("Starting grading from external request")
log_id = output.new_log()
grade(tests, data, verbose=self.args.verbose)
printed_output = ''.join(output.get_log(log_id))
output.remove_log(log_id)
data['output'] = printed_output
return data
protocol = CollaborateProtocol
| [
"you@example.com"
] | you@example.com |
f760f1df9186ccfbc96386c84461deddfae9e773 | d420b5780e26071caa34de8bc0cfcaf431ae4dcd | /testing/test_disjoint_catstrat.py | 90d218907624eea220027bd9b546e12261795221 | [
"MIT"
] | permissive | parrt/stratx | 2a09fdacd6b7a0f9b065feada1ecdfdfa0f0b513 | 97741afcead00c83dbe7fce37a42c4a4d4890a1c | refs/heads/master | 2022-04-30T13:57:50.037114 | 2022-04-24T16:55:58 | 2022-04-24T16:55:58 | 185,261,236 | 61 | 14 | MIT | 2019-10-21T21:14:14 | 2019-05-06T19:46:39 | Jupyter Notebook | UTF-8 | Python | false | false | 14,278 | py | """
MIT License
Copyright (c) 2019 Terence Parr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
from sklearn.ensemble import RandomForestRegressor
from timeit import default_timer as timer
from sklearn.utils import resample
import shap
import stratx.partdep
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy import nan
# TEST CATSTRATPD ON SYNTHETIC (SOME COLINEAR) DATASETS WITH KNOWN ANSWERS
def check(X, y, colname,
expected_deltas, expected_avg_per_cat,
expected_ignored=0,
min_samples_leaf=15):
leaf_deltas, leaf_counts, avg_per_cat, count_per_cat, ignored = \
stratx.partdep.cat_partial_dependence(X, y, colname, min_samples_leaf=min_samples_leaf)
print(leaf_deltas, avg_per_cat)
# Normalize so all 0-based
expected_avg_per_cat -= np.nanmin(expected_avg_per_cat)
avg_per_cat -= np.nanmin(avg_per_cat)
assert ignored==expected_ignored, f"Expected ignored {expected_ignored} got {ignored}"
assert len(leaf_deltas)==len(expected_deltas), f"Expected ranges {expected_deltas}"
np.testing.assert_array_equal(leaf_deltas, expected_deltas, f"Expected deltas {expected_deltas} got {leaf_deltas}")
assert len(avg_per_cat)==len(expected_avg_per_cat), f"Expected slopes {expected_avg_per_cat}"
np.testing.assert_array_equal(avg_per_cat, expected_avg_per_cat, f"Expected slopes {expected_avg_per_cat} got {avg_per_cat}")
def test_binary_one_region():
df = pd.DataFrame()
df['x1'] = [1, 1]
df['x2'] = [3, 4]
df['y'] = [100, 130]
X = df.drop('y', axis=1)
y = df['y']
expected_deltas = np.array([nan, nan, nan, 0, 30]).reshape(-1,1)
expected_avg_per_cat = np.array([nan, nan, nan, 0, 30])
check(X, y, "x2", expected_deltas, expected_avg_per_cat, min_samples_leaf=2)
def test_one_region():
df = pd.DataFrame()
df['x1'] = [1, 1, 1]
df['x2'] = [3, 4, 5]
df['y'] = [10, 15, 20]
X = df.drop('y', axis=1)
y = df['y']
expected_deltas = np.array([nan, nan, nan, 0, 5, 10]).reshape(-1,1)
expected_avg_per_cat = np.array([nan, nan, nan, 0, 5, 10])
check(X, y, "x2", expected_deltas, expected_avg_per_cat, min_samples_leaf=3)
def test_disjoint_regions():
"""
What happens when we have two disjoint regions in x_j space?
Does the 2nd start with 0 again with cumsum?
"""
df = pd.DataFrame()
df['x1'] = [1, 1, 1, # stratify first three x2
3, 3, 3] # stratify 2nd three x2
df['x2'] = [1, 2, 3,
5, 6, 7]
df['y'] = [10, 13, 16, # first x2 region +2 slope
50, 60, 70] # second x2 region +10 slope
X = df.drop('y', axis=1)
y = df['y']
avg_y_group1 = np.mean([10, 13, 16])
avg_y_group2 = np.mean([50, 60, 70])
group_averages = [avg_y_group1,avg_y_group2]
d = group_averages - group_averages[0]
# plt.bar([1,2,3,5,6,7],np.array([10,13,15,50,60,70]))
# plt.bar([1,2,3,5,6,7],np.array([10,13,15,50,60,70])-10)
# plt.show()
expected_deltas = np.array([[nan, nan],
[ 0, nan],
[ 3, nan],
[ 6, nan],
[nan, nan],
[nan, 0], # index cat 5
[nan, 10],
[nan, 20]])
expected_avg_per_cat = np.array([nan, d[0]+0, d[0]+3, d[0]+6, nan, d[1]+0, d[1]+10, d[1]+20])
check(X, y, "x2", expected_deltas, expected_avg_per_cat, min_samples_leaf=3)
def test_disjoint_regions_with_isolated_single_x_in_between():
df = pd.DataFrame()
df['x1'] = [1, 1, 1, # stratify first three x2
3, 3, 3, # stratify middle group
5, 5, 5] # stratify 3rd group x2
df['x2'] = [1, 2, 3,
4, 4, 4, # middle of other groups and same cat
5, 6, 7]
df['y'] = [10, 11, 12, # first x2 region +1 slope
9, 7, 8,
20, 19, 18] # 2nd x2 region -1 slope but from higher y downwards
X = df.drop('y', axis=1)
y = df['y']
avg_y_group1 = np.mean([10, 11, 12])
avg_y_group2 = np.mean([9, 7, 8])
avg_y_group3 = np.mean([20, 19, 18])
group_averages = [avg_y_group1,avg_y_group2,avg_y_group3]
d = group_averages - group_averages[0]
expected_deltas = np.array([[nan, nan, nan],
[0, nan, nan],
[1, nan, nan],
[2, nan, nan],
[nan, 0, nan],
[nan, nan, 2],
[nan, nan, 1],
[nan, nan, 0]])
expected_avg_per_cat = np.array([nan, d[0]+0, d[0]+1, d[0]+2, d[1]+0, d[2]+2, d[2]+1, d[2]+0])
check(X, y, "x2",
expected_deltas, expected_avg_per_cat,
min_samples_leaf=3)
def test_sawtooth_derivative_disjoint_regions_bulldozer():
df = pd.DataFrame()
df['YearMade'] = [1, 1, 1, # stratify into 4 groups
3, 3, 3,
5, 5, 5,
7, 7, 7]
df['ModelID'] = [1, 2, 3,
5, 6, 7,
9, 10, 11,
13, 14, 15]
df['y'] = [10, 9, 8,
12, 13, 14,
6, 5, 4,
16, 17, 18]
print((df['y']+10).values)
X = df.drop('y', axis=1)
y = df['y']
leaf_deltas, leaf_counts, avg_per_cat, count_per_cat, ignored = \
stratx.partdep.cat_partial_dependence(X, y, "ModelID", min_samples_leaf=3)
expected_deltas = np.array([[nan, nan, nan, nan],
[ 2, nan, nan, nan],
[ 1, nan, nan, nan],
[ 0, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 0, nan, nan],
[nan, 1, nan, nan],
[nan, 2, nan, nan],
[nan, nan, nan, nan],
[nan, nan, 2, nan],
[nan, nan, 1, nan],
[nan, nan, 0, nan],
[nan, nan, nan, nan],
[nan, nan, nan, 0],
[nan, nan, nan, 1],
[nan, nan, nan, 2]])
expected_avg_per_cat = np.array([nan, 2, 1, 0, nan, 4, 5, 6, nan, -2, -3, -4, nan, 8, 9, 10])
check(X, y, "ModelID", expected_deltas, expected_avg_per_cat, min_samples_leaf=3)
plt.figure(figsize=(4,3))
plt.scatter(df['ModelID'], y, s=8, label="Actual", c='#4A4A4A')
plt.scatter(df['ModelID'], avg_per_cat[np.where(~np.isnan(avg_per_cat))],
marker="s",
s=10, label="PDP", c='#FEAE61')
plt.xlabel("ModelID")
plt.ylabel("SalePrice")
plt.legend(loc="upper left")
plt.show()
def test_sawtooth_derivative_disjoint_regions_bulldozer_some_negative():
df = pd.DataFrame()
df['YearMade'] = [1, 1, 1, # stratify into 4 groups
3, 3, 3,
5, 5, 5,
7, 7, 7]
df['ModelID'] = [1, 2, 3,
5, 6, 7,
9, 10, 11,
13, 14, 15]
df['y'] = [ 0, -1, -2, # first x2 region -1 slope
2, 3, 4, # second x2 region +1 slope
-4, -5, -6,
6, 7, 8]
X = df.drop('y', axis=1)
y = df['y']
leaf_deltas, leaf_counts, avg_per_cat, count_per_cat, ignored = \
stratx.partdep.cat_partial_dependence(X, y, "ModelID", min_samples_leaf=3)
expected_deltas = np.array([[nan, nan, nan, nan],
[ 2, nan, nan, nan],
[ 1, nan, nan, nan],
[ 0, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 0, nan, nan],
[nan, 1, nan, nan],
[nan, 2, nan, nan],
[nan, nan, nan, nan],
[nan, nan, 2, nan],
[nan, nan, 1, nan],
[nan, nan, 0, nan],
[nan, nan, nan, nan],
[nan, nan, nan, 0],
[nan, nan, nan, 1],
[nan, nan, nan, 2]])
expected_avg_per_cat = np.array([nan, 2, 1, 0, nan, 4, 5, 6, nan, -2, -3, -4, nan, 8, 9, 10])
check(X, y, "ModelID", expected_deltas, expected_avg_per_cat, min_samples_leaf=3)
plt.figure(figsize=(4,3))
plt.scatter(df['ModelID'], y, s=8, label="Actual", c='#4A4A4A')
plt.scatter(df['ModelID'], avg_per_cat[np.where(~np.isnan(avg_per_cat))],
marker="s",
s=10, label="PDP", c='#FEAE61')
plt.xlabel("ModelID")
plt.ylabel("SalePrice")
plt.legend(loc="upper left")
plt.show()
# ------ Stuff below is mostly for exploring disjoint regions ------------
def synthetic_bulldozer(n:int, gaps_in_ModelID=False, drop_modelIDs=None):
"""
Bulldozer with ModelID, MachineHours, YearMade, EROP -> SalePrice
where EROP (cage description) in {1,2,3,4},
YearMade in {2000..2010},
ModelID in {100,500},
MachineHours in 0..1000.
EROP tied to ModelID
ModelID tied to YearMade; randomize ID within tranches
MachineHours is independent
"""
# First, set up random column values but tie them to other columns
df = pd.DataFrame()
df['YearMade'] = np.random.randint(2000,2010+1, size=(n,))
df['MachineHours'] = np.random.random(size=n)*1000
df['ModelID'] = 0
n1 = np.sum(df['YearMade'].isin([2000,2001,2002]))
n2 = np.sum(df['YearMade'].isin([2000,2003,2005]))
n3 = np.sum(df['YearMade'].isin(range(2005,2007+1)))
n4 = np.sum(df['YearMade'].isin(range(2008,2010+1)))
df.loc[df['YearMade'].isin([2000,2001,2002]), 'ModelID'] = np.random.randint(100, 200+1, size=(n1,))
df.loc[df['YearMade'].isin([2000,2003,2005]), 'ModelID'] = np.random.randint(201, 300+1, size=(n2,))
df.loc[df['YearMade'].isin(range(2005,2007+1)), 'ModelID'] = np.random.randint(301, 400+1, size=(n3,))
df.loc[df['YearMade'].isin(range(2008,2010+1)), 'ModelID'] = np.random.randint(401, 500+1, size=(n4,))
df.loc[df['ModelID']==0, "ModelID"] = np.random.randint(100,500+1, np.sum(df['ModelID']==0))
if gaps_in_ModelID:
df = df[(df['ModelID']<300)|(df['ModelID']>=400)] # kill 300..399
if drop_modelIDs is not None:
df = df.iloc[np.where(~df['ModelID'].isin(np.random.randint(100,500+1,size=drop_modelIDs)))] # drop some
print("n =", len(df))
df['EROP'] = 1 # None
df.loc[df['ModelID'] % 2==0, 'EROP'] = 2 # Sides only
df.loc[df['ModelID'] % 3==0, 'EROP'] = 3 # Full
df.loc[df['ModelID'] % 4==0, 'EROP'] = 4 # With AC
# Compute a sawtooth for known values of different models
modelvalue = df['ModelID'].isin(range(100,199+1)) * -df['ModelID'] + \
df['ModelID'].isin(range(200,299+1)) * df['ModelID'] + \
df['ModelID'].isin(range(300,399+1)) * -df['ModelID'] + \
df['ModelID'].isin(range(400,500+1)) * df['ModelID']
age = np.max(df['YearMade']) - df['YearMade']
df['SalePrice'] = 15_000 \
- age*1000 \
- df['MachineHours']*1.5 \
+ (df['EROP']-1) * 200 \
+ modelvalue*10
X = df.drop('SalePrice', axis=1)
y = df['SalePrice']
# plt.scatter(df['ModelID'], y, s=3)
# plt.show()
plt.scatter(df['ModelID'], modelvalue*10, s=1)
plt.show()
return df, X, y
def random_sawtooth_derivative_disjoint_regions_bulldozer():
df, X, y = synthetic_bulldozer(n=100,
gaps_in_ModelID=False,
drop_modelIDs=None)
# rf = RandomForestRegressor(n_estimators=50, oob_score=True)
# rf.fit(X,y)
# print("OOB", rf.oob_score_)
leaf_deltas, leaf_counts, avg_per_cat, count_per_cat, ignored = \
stratx.partdep.cat_partial_dependence(X, y, "ModelID", min_samples_leaf=10)
# plot_catstratpd(X, y, colname='ModelID', targetname='SalePrice',
# n_trials=1,
# min_samples_leaf=15,
# show_xticks=False,
# show_impact=True,
# # min_y_shifted_to_zero=True,
# figsize=(10,5),
# # yrange=(-150_000, 150_000),
# verbose=False)
# plt.show()
#
# plot_stratpd(X, y, colname='ModelID', targetname='SalePrice',
# show_impact=True,
# min_slopes_per_x=1,
# min_samples_leaf=15)
# plt.show()
| [
"parrt@cs.usfca.edu"
] | parrt@cs.usfca.edu |
01986fb8b82dfc269f8acfe87fc88ab902ec5cd7 | a9868b17ddc5f7f28911c57870e327238a2432d8 | /python_Pandas_Numpy/Pandas/Pandas08_05_Oper_최윤종.py | 8e68d21c628a7da3d95fdb66df283dc46f2a3a4d | [] | no_license | ChoiYoonJong/DataScience | 439568a668307ed0cab0cffb688fd832b10047ab | 3cab98eacecd8c1782e6f91b2b7ffa0ecefe4ed1 | refs/heads/main | 2023-06-07T02:56:08.335411 | 2021-07-09T13:23:58 | 2021-07-09T13:23:58 | 378,833,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py |
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
scientists = pd.read_csv('../data/scientists.csv')
# In[3]:
ages = scientists['Age']
print(ages.max())
# In[5]:
print(ages + ages)
# In[6]:
print(ages * ages)
# In[7]:
print(ages + 100)
# In[8]:
print(ages * 2)
# In[9]:
print(pd.Series([1,100]))
# In[10]:
print(ages,"\n\n")
print(pd.Series([1,100]),"\n\n")
print(ages + pd.Series([1,100]))
# In[11]:
print(ages)
# In[14]:
rev_ages = ages.sort_index(ascending=False)
print(rev_ages)
# In[15]:
print(ages * 2)
# In[16]:
print(ages + rev_ages)
| [
"noreply@github.com"
] | ChoiYoonJong.noreply@github.com |
18843cd91a77a4b123c23a31259d14bb5f63f9a9 | f83934dd60d4961848c0a86f6d7fbe07b79a1d63 | /examples/skybox.py | 35338a6b839cf8d1a1925b6a5bd7f559fced491a | [] | no_license | brianholland/glumpy | 2a31e2f5fd039d1debb30dd010ad36c458f329cf | a691082385e02db9b1d461847b9e36d8534630fa | refs/heads/master | 2020-12-25T21:43:58.743259 | 2015-11-30T11:04:46 | 2015-11-30T11:04:46 | 46,670,630 | 0 | 0 | null | 2015-11-30T11:04:46 | 2015-11-22T17:10:24 | Python | UTF-8 | Python | false | false | 2,225 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gloo, gl, data
from glumpy.transforms import Trackball, Position
vertex = """
attribute vec3 position;
attribute vec3 texcoord;
varying vec3 v_texcoord;
void main()
{
gl_Position = <transform(position)> * vec4(-1,-1,1,1);
v_texcoord = texcoord;
}
"""
fragment = """
uniform samplerCube texture;
varying vec3 v_texcoord;
void main()
{
gl_FragColor = textureCube(texture, v_texcoord);
}
"""
window = app.Window(width=1024, height=1024)
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLES, indices)
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
vertices = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1],
[+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]])
texcoords = np.array([[+1,+1,+1], [-1,+1,+1], [-1,-1,+1], [+1,-1,+1],
[+1,-1,-1], [+1,+1,-1], [-1,+1,-1], [-1,-1,-1]])
faces = np.array([vertices[i] for i in [0,1,2,3, 0,3,4,5, 0,5,6,1,
6,7,2,1, 7,4,3,2, 4,7,6,5]])
indices = np.resize(np.array([0,1,2,0,2,3], dtype=np.uint32), 36)
indices += np.repeat(4 * np.arange(6, dtype=np.uint32), 6)
indices = indices.view(gloo.IndexBuffer)
texture = np.zeros((6,1024,1024,3),dtype=np.float32).view(gloo.TextureCube)
texture.interpolation = gl.GL_LINEAR
program = gloo.Program(vertex, fragment, count=24)
program['position'] = faces*10
program['texcoord'] = faces
program['texture'] = texture
program['transform'] = Trackball(Position(), distance=0)
texture[2] = data.get("sky-left.png")/255.
texture[3] = data.get("sky-right.png")/255.
texture[0] = data.get("sky-front.png")/255.
texture[1] = data.get("sky-back.png")/255.
texture[4] = data.get("sky-up.png")/255.
texture[5] = data.get("sky-down.png")/255.
window.attach(program["transform"])
app.run()
| [
"Nicolas.Rougier@inria.fr"
] | Nicolas.Rougier@inria.fr |
29610a74a78bbc0a0b58a407062d860d22fc3808 | f65eefd3a4835f5fabcd2000540f352309c79451 | /nomos/__init__.py | aed7179c7ef31fcf7e5325ff2e30b79cb450f1d6 | [
"Apache-2.0"
] | permissive | whiteclover/Nomos | b3610e5b1e450d96b99d492476ead092887cb48b | 13d52f8bc728982496948b0b2a23e1bf60de60c0 | refs/heads/master | 2018-07-10T16:21:07.312647 | 2018-06-01T07:11:59 | 2018-06-01T07:11:59 | 103,917,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | #!/usr/bin/env python
#
# Copyright 2017 Nomos
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = "0.1.0"
| [
"lyanghwy@gmail.com"
] | lyanghwy@gmail.com |
8d14fad12f09747881d57daf744f6a3832bf66d5 | f4d0c26d3aa27c77a7c27d9002a08465a0638cbb | /csv_schema/apps.py | cc68a34da948dcfb3886e8505753ddb7fcf4e90f | [] | no_license | uk-gov-mirror/nhsengland.NCDR-reference-library | 3afe0711f47dc1b5fa25646bc870a806b3512ce5 | cac30ee0787e81fb9868731576c242c7ea3dbde8 | refs/heads/master | 2023-04-03T15:10:19.320708 | 2017-11-03T15:03:27 | 2017-11-03T15:03:27 | 356,799,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CsvSchemaConfig(AppConfig):
name = 'csv_schema'
| [
"fredkingham@gmail.com"
] | fredkingham@gmail.com |
d41949b29005b96b1b15f022fe7c76b524793263 | f5d3d2f2a79b07bf71a0f1bbbb50385f135f5dd3 | /jina/peapods/runtimes/base.py | 504d65cff495cf7ce2b57c50bf5d6701964b5b87 | [
"Apache-2.0"
] | permissive | Rohitpandit021/jina | 5ab9be96eebeb6ec1a7cfae78a47e9b71789158e | f3db4d5e480375d8dc3bceda814ac1963dee76d7 | refs/heads/master | 2023-06-02T14:46:16.445246 | 2021-06-21T10:18:01 | 2021-06-21T10:18:01 | 378,832,389 | 0 | 0 | Apache-2.0 | 2021-06-21T10:22:10 | 2021-06-21T06:42:59 | Python | UTF-8 | Python | false | false | 3,683 | py | import argparse
from ...logging.logger import JinaLogger
class BaseRuntime:
"""A Jina Runtime is a procedure that blocks the main process once running (i.e. :meth:`run_forever`),
therefore must be put into a separated thread/process. Any program/library/package/module that blocks the main
process, can be formulated into a :class:`BaseRuntime` class and then be used in :class:`BasePea`.
In the sequel, we call the main process/thread as ``M``, the process/thread blocked :class:`Runtime` as ``S``.
In Jina, a :class:`BasePea` object is used to manage a :class:`Runtime` object's lifecycle. A :class:`BasePea`
acts as a :class:`multiprocessing.Process` or :class:`threading.Thread`, it starts from ``M`` and once the
``S`` is spawned, it calls :class:`Runtime` methods in the following order:
0. :meth:`__init__`
1. :meth:`run_forever`. Note that this will block ``S``, step 3 won't be
reached until it is unblocked by :meth:`cancel`
2. :meth:`teardown` in ``S``. Note that ``S`` is blocked by
:meth:`run_forever`, this step won't be reached until step 2 is unblocked by :meth:`cancel`
The :meth:`__init__` and :meth:`teardown` pair together, which defines instructions that will be executed before
and after. In subclasses, `teardown` is optional.
The :meth:`run_forever` and :meth:`cancel` pair together, which introduces blocking to ``S`` and then
unblocking from it. They are mandatory for all subclasses.
Note that, there is no "exclusive" relation between :meth:`run_forever` and :meth:`teardown`, :meth:`teardown`
is not about "cancelling", it is about "cleaning".
Unlike other three methods that get invoked inside ``S``, the :meth:`cancel` is invoked in ``M`` to unblock ``S``.
Therefore, :meth:`cancel` usually requires some special communication between ``M`` and ``S``, e.g.
- Use :class:`threading.Event` or `multiprocessing.Event`, while :meth:`run_forever` polls for this event
- Use ZMQ to send a message, while :meth:`run_forever` polls for this message
- Use HTTP/REST to send a request, while :meth:`run_forever` listens to this request
Note, another way to jump out from :meth:`run_forever` is raise exceptions from it. This will immediately move to
:meth:`teardown`.
.. note::
Rule of thumb on exception handling: if you are not sure if you should handle exception inside
:meth:`run_forever`, :meth:`cancel`, :meth:`teardown`, then DO NOT catch exception in them.
Exception is MUCH better handled by :class:`BasePea`.
.. seealso::
:class:`BasePea` for managing a :class:`Runtime` object's lifecycle.
"""
def __init__(self, args: 'argparse.Namespace', **kwargs):
super().__init__()
self.args = args
if args.name:
self.name = f'{args.name}/{self.__class__.__name__}'
else:
self.name = self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
def run_forever(self):
"""Running the blocking procedure inside ``S``. Note, once this method is called,
``S`` is blocked.
.. note::
If this method raises any exception, :meth:`teardown` will be called.
.. seealso::
:meth:`cancel` for cancelling the forever loop.
"""
raise NotImplementedError
def teardown(self):
"""Method called immediately after :meth:`run_forever` is unblocked.
You can tidy up things here. Optional in subclasses. The default implementation does nothing.
"""
self.logger.close()
| [
"rajashree.patil@embold.io"
] | rajashree.patil@embold.io |
0b283b8c8bf43055210cd3e22e33b88cdbe61862 | e4428f7635a978e3e68f0a94b736c3626f0ea66d | /src/basic/server/server.py | e17e91830e4dfa9c9aceece4d5eb3c8a59fa0211 | [] | no_license | rodrigoms2004/pythonWebSocketsNumpy | df297908f796dd5b9c7ff69d82cad3c06b240c46 | 353af18cc1303dfa0a0f2da7533eb603b7fe6bcb | refs/heads/master | 2020-08-04T04:39:28.198215 | 2019-10-01T04:08:30 | 2019-10-01T04:08:30 | 212,009,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/env python
# WS server example
import asyncio
import websockets
import numpy as np
from pickle import dumps, loads
async def inverseMatrix(websocket, path):
buffer_matrix = await websocket.recv()
matrix = loads(buffer_matrix)
print("Receiving matrix:\n", matrix)
inverse = np.linalg.inv(matrix)
buffer_inverse = dumps(inverse)
await websocket.send(buffer_inverse)
print("Sending inverse:\n", inverse)
start_server = websockets.serve(inverseMatrix, "localhost", 8765)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever() | [
"rodrigoms2004@gmail.com"
] | rodrigoms2004@gmail.com |
148085e914624bff502e11b0827994b9858b1877 | 7d98c95227ff36e7735eaf05857507baa8ecfaff | /myproject/settings.py | 2521c71c6540df85adc9b41e75e259df36493067 | [] | no_license | KANISHK-VERMA/Blogapp | 55876b58f1a5cb9db252571ad0898fb92c1df85a | 87e384139c453bff3e1487992449fed2e3e6d045 | refs/heads/master | 2022-11-11T13:25:57.291910 | 2020-07-02T17:35:53 | 2020-07-02T17:35:53 | 254,918,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,326 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'js)^obc*7izn8512y#-^1_!g&7p^1+f20r3gw(p=$@&on#k-dx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'crispy_forms',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles','Blogapp','user.apps.UserConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
CRISPY_TEMPLATES_PACK='bootstrap4'
LOGIN_REDIRECT_URL='nblog'
LOGIN_URL='nlogin' | [
"you@example.com"
] | you@example.com |
3bb3f731ef2c9152d232482fc1e57fa643925b8e | ca59d18e503ef22fbc920c6de48ffc8eac5a1443 | /tools/Polygraphy/examples/api/02_using_real_data/example.py | c59a454b5dbe79668694119aaf766617c9d0051b | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | boh-inspur/TensorRT | 9fc0ae0ad4e31da040d10728b63d9dc284852b67 | e4d2f7f4406f1c8f4632cc67de33728cef90ca29 | refs/heads/master | 2023-04-13T21:24:13.912673 | 2021-04-23T09:55:18 | 2021-04-23T09:55:18 | 265,431,588 | 0 | 0 | Apache-2.0 | 2021-04-23T09:55:19 | 2020-05-20T02:49:58 | null | UTF-8 | Python | false | false | 2,068 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script uses the Polygraphy Runner API to validate the outputs
of an identity model using a trivial dataset.
"""
import os
import numpy as np
from polygraphy.backend.trt import (EngineFromNetwork, NetworkFromOnnxPath,
TrtRunner)
INPUT_SHAPE = (1, 1, 2, 2)
REAL_DATASET = [ # Definitely real data
np.ones(INPUT_SHAPE, dtype=np.float32),
np.zeros(INPUT_SHAPE, dtype=np.float32),
np.ones(INPUT_SHAPE, dtype=np.float32),
np.zeros(INPUT_SHAPE, dtype=np.float32),
]
# For our identity network, the golden output values are the same as the input values.
# Though this network appears to do nothing, it can be incredibly useful in some cases (like here!).
GOLDEN_VALUES = REAL_DATASET
MODEL = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "models", "identity.onnx")
build_engine = EngineFromNetwork(NetworkFromOnnxPath(MODEL))
# Activate the runner using a context manager. For TensorRT, this will build an engine,
# then destroy it upon exiting the context.
# NOTE: You can also use the activate() function for this, but you will need to make sure to
# deactivate() to avoid a memory leak. For that reason, a context manager is the safer option.
with TrtRunner(build_engine) as runner:
for (data, golden) in zip(REAL_DATASET, GOLDEN_VALUES):
outputs = runner.infer(feed_dict={"x": data})
assert np.all(outputs["y"] == golden)
| [
"rajeevsrao@users.noreply.github.com"
] | rajeevsrao@users.noreply.github.com |
5bec41631c278d7d249f2265987181c7564e255d | 8a936bd3e28c9ec116244df37d3ba5aedd48c9cc | /dashboard/internal/pages/frmcmd4.py | b751b2e3718719d22d233b122acc197ad1d5ed43 | [
"Apache-2.0"
] | permissive | AssassinDev422/PHP_Minera | a66bd23610cbcfd43545e5b6a689c2c1b1248814 | f507dbcc4b4609990f14995754d54f42dcaaa618 | refs/heads/master | 2020-03-15T13:16:30.708491 | 2018-05-04T16:20:05 | 2018-05-04T16:20:05 | 132,162,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import os
import sys
print '<form action="process_order.php?sid=' + sys.argv[1] + '" method="post" role="form">'
| [
"saas.exp7@gmail.com"
] | saas.exp7@gmail.com |
b4a25d41e9e2a05fc29f834942d9ff3e5a17ecf4 | e0bd328b49ac5992316961aa8a1b17bbffa535f9 | /gnn/segments.py | 0d176ea1619c96c723515ee25b92c8670a54bd87 | [
"MIT"
] | permissive | BartoszPiotrowski/ATPboost | 2e71e3c98793567c1217f9fb9af99e2d7b644e77 | ae65e7b1ad41eea37bf27ad94cb9307c0adb1854 | refs/heads/master | 2021-05-04T14:50:36.639256 | 2020-07-02T15:51:40 | 2020-07-02T15:51:40 | 120,211,828 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,983 | py | import tensorflow as tf
import os
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
from .debug_node import tf_debug
from .tf_helpers import *
from collections.abc import Iterable
# class encapsulating tf.segment_sum etc. offering basic functionalities
class Segments:
def __init__(self, lens, nonzero = False):
self.nonzero_guarantee = nonzero
if nonzero: assertions = [tf.assert_less(0, lens)]
else: assertions = []
with tf.name_scope(None, "segments") as scope:
with tf.control_dependencies(assertions):
self.lens = lens
self.start_indices = tf.cumsum(self.lens, exclusive = True)
self.segment_num = tf.size(self.lens)
if nonzero:
self.nonzero_indices = tf.range(self.segment_num)
self.nonzero_lens = self.lens
self.nonzero_num = self.segment_num
self.start_indices_nonzero = self.start_indices
else:
mask = tf.greater(self.lens, 0)
self.nonzero_indices = tf.boolean_mask(
tf.range(self.segment_num), mask
)
self.nonzero_lens = tf.boolean_mask(self.lens, mask)
self.nonzero_num = tf.size(self.nonzero_lens)
self.start_indices_nonzero = tf.cumsum(self.nonzero_lens, exclusive = True)
self.empty = tf.equal(self.nonzero_num, 0)
self.data_len = tf.reduce_sum(self.nonzero_lens)
def scatter_empty():
return tf.zeros([0], dtype=tf.int32)
def scatter_nonempty():
return tf.scatter_nd(
tf.expand_dims(self.start_indices_nonzero[1:], 1),
tf.ones([self.nonzero_num-1], dtype=tf.int32),
[self.data_len],
)
scattered = tf.cond(self.empty, scatter_empty, scatter_nonempty)
self.segment_indices_nonzero = tf.cumsum(scattered)
if nonzero: self.segment_indices = self.segment_indices_nonzero
else:
self.segment_indices = self.fill_nonzero(
tf.boolean_mask(tf.range(self.segment_num), mask),
)
def fill(self, constants):
return tf.gather(constants, self.segment_indices)
def fill_nonzero(self, constants):
return tf.gather(constants, self.segment_indices_nonzero)
def collapse_nonzero(self, data, operations = [tf.segment_max, tf.segment_sum]):
with tf.name_scope(None, "collapse_nonzero") as scope:
res = [
op(data, self.segment_indices_nonzero)
for op in operations
]
if len(res) == 1: return res[0]
else: return tf.concat(res, -1)
def add_zeros(self, data):
if self.nonzero_guarantee: return data
with tf.name_scope(None, "add_zeros") as scope:
out_dim = [self.segment_num] + data.shape.dims[1:]
return tf.scatter_nd(
tf.expand_dims(self.nonzero_indices, 1),
data, out_dim,
)
def collapse(self, data, *args, **kwargs):
x = self.collapse_nonzero(data, *args, **kwargs)
x = self.add_zeros(x)
return x
def mask_segments(self, data, mask, nonzero = False):
if self.nonzero_guarantee: nonzero = True
with tf.name_scope(None, "mask_segments") as scope:
mask = tf.cast(mask, bool)
data_mask = self.fill(mask)
masked_lens = tf.boolean_mask(self.lens, mask)
masked_data = tf.boolean_mask(data, data_mask)
return Segments(masked_lens, nonzero), masked_data
def mask_data(self, data, mask, nonzero = False):
if nonzero: assert(self.nonzero_guarantee)
with tf.name_scope(None, "mask_data") as scope:
new_lens = self.segment_sum(tf.cast(mask, tf.int32))
new_data = tf.boolean_mask(data, tf.cast(mask, bool))
return Segments(new_lens, nonzero), new_data
def partition_segments(self, data, partitions, num_parts, nonzero = False):
if self.nonzero_guarantee: nonzero = True
if not isinstance(nonzero, Iterable):
nonzero = [nonzero]*num_parts
with tf.name_scope(None, "partition_segments") as scope:
data_parts = self.fill(partitions)
parted_lens = tf.dynamic_partition(self.lens, partitions, num_parts)
parted_data = tf.dynamic_partition(data, data_parts, num_parts)
return [
(Segments(lens, nzg), parted_data_part)
for lens, parted_data_part, nzg in zip(parted_lens, parted_data, nonzero)
]
def segment_sum_nonzero(self, data):
return tf.segment_sum(data, self.segment_indices_nonzero)
def segment_sum(self, data):
if self.nonzero_guarantee:
return self.segment_sum_nonzero(data)
else:
return tf.unsorted_segment_sum(
data = data,
segment_ids = self.segment_indices,
num_segments = self.segment_num,
)
def log_softmax(self, logits):
with tf.name_scope(None, "segments.log_softmax") as scope:
# numeric stability
offset = tf.segment_max(logits, self.segment_indices_nonzero)
logits_stab = logits - self.fill_nonzero(offset)
# softmax denominators
sums_e = self.fill_nonzero(
self.segment_sum_nonzero(tf.exp(logits_stab)),
)
return logits_stab - tf.log(sums_e)
def gather_nonzero(self, data, indices):
with tf.name_scope(None, "segments.gather") as scope:
with tf.control_dependencies([tf.assert_non_negative(indices),
tf.assert_less(indices, self.nonzero_lens)]):
return tf.gather(data, self.start_indices_nonzero + indices)
def gather(self, data, indices):
assert(self.nonzero_guarantee)
return self.gather_nonzero(data, indices)
def cumsum_ex(self, data):
all_cumsum = tf.cumsum(data, exclusive = True)
seg_cumsum = tf.gather(all_cumsum, self.start_indices_nonzero)
return all_cumsum - self.fill_nonzero(seg_cumsum)
def sample_nonzero(self, probs):
x = self.segment_sum(
tf.cast(dtype = tf.int32, x=tf.greater_equal(
self.fill_nonzero(tf.random_uniform([self.nonzero_num])),
self.cumsum_ex(probs),
))
)
x = tf.maximum(0, x-1)
return x
def sample(self, probs):
assert(self.nonzero_guarantee)
return self.sample_nonzero(probs)
def argmax_nonzero(self, data):
max_vals = tf.segment_max(data, self.segment_indices_nonzero)
is_max = tf.equal(self.fill_nonzero(max_vals), data)
not_after_max = tf.equal(0, self.cumsum_ex(tf.cast(is_max, tf.int32)))
return self.segment_sum_nonzero(tf.cast(not_after_max, tf.int32)) - 1
def argmax(self, data):
assert(self.nonzero_guarantee)
return self.argmax_nonzero(data)
def sparse_cross_entropy(self, log_probs, labels, weights = None, aggregate = mean_or_zero):
result = self.gather(-log_probs, labels)
if weights is not None: result = result * weights
if aggregate: result = aggregate(result)
return result
def cross_entropy(self, log_probs, target_probs, aggregate = mean_or_zero):
result = -self.segment_sum(log_probs * target_probs)
if aggregate: result = aggregate(result)
return result
def entropy(self, probs = None, log_probs = None, aggregate = mean_or_zero):
assert(probs is not None or log_probs is not None)
if probs is None: probs = tf.exp(log_probs)
elif log_probs is None:
log_probs = tf.where(tf.greater(probs, 0), tf.log(probs),
tf.zeros_like(probs))
return self.cross_entropy(log_probs, probs, aggregate)
def kl_divergence(self, log_probs, target_probs, aggregate = mean_or_zero):
return self.cross_entropy(log_probs, target_probs, aggregate) \
- self.entropy(probs = target_probs, aggregate = aggregate)
class SegmentsPH(Segments):
def __init__(self, data_shape = (), dtype = tf.int32, nonzero = False):
Segments.__init__(self, tf.placeholder(tf.int32, [None], name="segment_lens"), nonzero = nonzero)
if data_shape is None:
self.data = None
else:
self.data_shape = tuple(data_shape)
self.data = tf.placeholder(dtype, (None,)+self.data_shape, name="segment_data")
self.empty_input = np.zeros((0,)+self.data_shape, dtype = dtype.as_numpy_dtype)
def feed(self, d, data):
lens = list(map(len, data))
if self.nonzero_guarantee: assert(0 not in lens)
d[self.lens] = lens
if self.data is not None:
nonempty_data = tuple(filter(lambda d: len(d) > 0, data))
if len(nonempty_data) == 0: flattened = self.empty_input
else: flattened = np.concatenate(nonempty_data)
d[self.data] = flattened
class MergedSegments(Segments):
def __init__(self, segments_list, nonzero = False):
if not nonzero:
for segments in segments_list:
if not segments.nonzero_guarantee:
break
else: nonzero = True
with tf.name_scope(None, "merged_segments") as scope:
merged_lens = tf.stack([segments.lens for segments in segments_list], axis = 1)
Segments.__init__(self, tf.reduce_sum(merged_lens, axis = 1), nonzero = nonzero)
offsets_list = tf.unstack(
tf.reshape(
tf.cumsum(
tf.reshape(merged_lens, [-1]),
exclusive = True
),
[-1, len(segments_list)],
),
axis = 1
)
self.stitch_indices = [
segments.fill(offsets-segments.start_indices)
+ tf.range(segments.data_len)
for segments, offsets in zip(segments_list, offsets_list)
]
def merge_data(self, data_list):
with tf.name_scope(None, "merge_data") as scope:
return tf.dynamic_stitch(self.stitch_indices, data_list)
def merge_segments(seg_data_list, nonzero = False):
segments_list, data_list = zip(*seg_data_list)
segments = MergedSegments(segments_list, nonzero)
return segments, segments.merge_data(data_list)
def unpartition_segments(seg_data_list, partitions):
with tf.name_scope(None, "unpartition_segments") as scope:
segments_list, data_list = zip(*seg_data_list)
segment_num = 0
data_len = 0
nonzero = True
for s in segments_list:
segment_num = segment_num + s.segment_num
data_len = data_len + s.data_len
if not s.nonzero_guarantee: nonzero = False
lens_stitch_indices = tf.dynamic_partition(
tf.range(segment_num),
partitions,
len(seg_data_list),
)
segments = Segments(tf.dynamic_stitch(
lens_stitch_indices,
[ s.lens for s in segments_list ],
), nonzero = nonzero)
data_stitch_indices = tf.dynamic_partition(
tf.range(data_len),
segments.fill(partitions),
len(seg_data_list),
)
data = tf.dynamic_stitch(
data_stitch_indices,
data_list,
)
return segments, data
| [
"bartoszpiotrowski@post.pl"
] | bartoszpiotrowski@post.pl |
2a897f6362fbdff9b1df3a593a276ff405e2436c | b2e1d96c0551b6b31ef85353f9b6e5b6354d64e8 | /datafaucet/spark/data.py | b701af9a518224f407232a2f190558970479dda2 | [
"MIT"
] | permissive | SylarCS/datafaucet-1 | 8bd7b96cecc5592e153b61367892e2a63a96119d | a63074ba1fb1a6d15f06e2bfff05df754aaaa452 | refs/heads/master | 2020-09-15T06:04:31.999012 | 2019-11-18T20:00:55 | 2019-11-18T20:00:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from pyspark.sql import DataFrame
from datafaucet.data import _Data
class Data(_Data):
def collect(self, n=1000, axis=0):
res = self.df.select(self.columns).limit(n).toPandas()
return res.T if axis else res
def _data(self):
return Data(self)
DataFrame.data = property(_data)
| [
"natalino.busa@gmail.com"
] | natalino.busa@gmail.com |
f52739e9263506b2a11cd2811dcdee2f33f93112 | d2cc4c74652edcc1e877e47ae549a7a99be70bf8 | /DEMscripts/dem_headwall_medial_axis.py | 5247e5c6bbbaa3c4253dcb6451f85071606ba29a | [
"MIT"
] | permissive | sun1753814280/rs_data_proc | 2bc87812650d394af38e42d6f628e06db425fe53 | c815dccc940034ad20bbdefaf2ca478f65db47ee | refs/heads/main | 2023-08-25T01:28:19.608056 | 2021-11-10T18:29:20 | 2021-11-10T18:29:20 | 394,591,797 | 0 | 0 | MIT | 2021-08-10T09:12:26 | 2021-08-10T09:12:25 | null | UTF-8 | Python | false | false | 40,094 | py | #!/usr/bin/env python
# Filename: dem_headwall_medial_axis.py
"""
introduction: try to extract headwall based on skimage.morphology.medial_axis
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 26 August, 2021
"""
import os,sys
from optparse import OptionParser
import time
from skimage import measure
from skimage import morphology
import numpy as np
import pandas as pd
deeplabforRS = os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS')
sys.path.insert(0, deeplabforRS)
import vector_gpd
import raster_io
import rasterio
from rasterio.features import rasterize
import math
import cv2
import raster_statistic
import basic_src.io_function as io_function
import basic_src.map_projection as map_projection
import basic_src.basic as basic
###################################################################################################
# copy the following codes from:
# https://github.com/gabyx/WormAnalysis/blob/master/SkeletonTest/Skeletonize.ipynb
# and modified by Lingcao Huang
import collections
import itertools
import networkx as nx
class Vertex:
def __init__(self, point, degree=0, edges=None):
self.point = np.asarray(point)
self.degree = degree
self.edges = []
self.visited = False
if edges is not None:
self.edges = edges
def __str__(self):
return str(self.point)
class Edge:
def __init__(self, start, end=None, pixels=None):
self.start = start
self.end = end
self.pixels = []
if pixels is not None:
self.pixels = pixels
self.visited = False
def buildTree(img, start=None,weight_img=None):
# copy image since we set visited pixels to black
img = img.copy()
shape = img.shape
nWhitePixels = np.sum(img)
if weight_img is None:
weight_img = np.ones_like(img)
# neighbor offsets (8 nbors)
nbPxOff = np.array([[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 1],
[1, -1], [1, 0], [1, 1]
])
queue = collections.deque()
# a list of all graphs extracted from the skeleton
graphs = []
blackedPixels = 0
# we build our graph as long as we have not blacked all white pixels!
while nWhitePixels != blackedPixels:
# if start not given: determine the first white pixel
if start is None:
it = np.nditer(img, flags=['multi_index'])
while not it[0]:
it.iternext()
start = it.multi_index
startV = Vertex(start)
queue.append(startV)
# print("Start vertex: ", startV)
# set start pixel to False (visited)
img[startV.point[0], startV.point[1]] = False
blackedPixels += 1
# create a new graph
G = nx.Graph()
G.add_node(startV)
# build graph in a breath-first manner by adding
# new nodes to the right and popping handled nodes to the left in queue
while len(queue):
currV = queue[0] # get current vertex
# print("Current vertex: ", currV)
# check all neigboor pixels
for nbOff in nbPxOff:
# pixel index
pxIdx = currV.point + nbOff
if (pxIdx[0] < 0 or pxIdx[0] >= shape[0]) or (pxIdx[1] < 0 or pxIdx[1] >= shape[1]):
continue # current neigbor pixel out of image
if img[pxIdx[0], pxIdx[1]]:
# print( "nb: ", pxIdx, " white ")
# pixel is white
newV = Vertex([pxIdx[0], pxIdx[1]])
# add edge from currV <-> newV
G.add_edge(currV, newV, object=Edge(currV, newV),weight=weight_img[currV.point[0], currV.point[1]])
# G.add_edge(newV,currV)
# add node newV
G.add_node(newV)
# push vertex to queue
queue.append(newV)
# set neighbor pixel to black
img[pxIdx[0], pxIdx[1]] = False
blackedPixels += 1
# pop currV
queue.popleft()
# end while
# empty queue
# current graph is finished ->store it
graphs.append(G)
# reset start
start = None
# end while
return graphs, img
def getEndNodes(g):
# return [ n for n in nx.nodes_iter(g) if nx.degree(g,n) == 1 ]
return [ n for n in nx.nodes(g) if nx.degree(g,n) == 1 ]
def getLongestPath(graph, endNodes):
"""
graph is a fully reachable graph = every node can be reached from every node
"""
if len(endNodes) < 2:
raise ValueError("endNodes need to contain at least 2 nodes!")
# get all shortest paths from each endpoint to another endpoint
allEndPointsComb = itertools.combinations(endNodes, 2)
maxLength = 0
maxPath = None
for ePoints in allEndPointsComb:
# get shortest path for these end points pairs
sL = nx.dijkstra_path_length(graph,
source=ePoints[0],
target=ePoints[1],
weight='weight')
# dijkstra can throw if now path, but we are sure we have a path
# store maximum
if (sL > maxLength):
maxPath = ePoints
maxLength = sL
if maxPath is None:
raise ValueError("No path found!")
return nx.dijkstra_path(graph,
source=maxPath[0],
target=maxPath[1]), maxLength
##########################################################################################
def slope_tif_to_slope_bin(slope_tif,slope_bin_path,slope_threshold):
if os.path.isfile(slope_bin_path):
print('%s exist'%slope_bin_path)
else:
slope_data, nodata = raster_io.read_raster_one_band_np(slope_tif)
bin_slope = np.zeros_like(slope_data,dtype=np.uint8)
bin_slope[slope_data > slope_threshold] = 1
bin_slope[slope_data > 88] = 0 # if slope is too large, it may caused by artifacts, so remove them
# save
slope_bin = bin_slope*255
# set nodata as 0
if raster_io.save_numpy_array_to_rasterfile(slope_bin,slope_bin_path,slope_tif,nodata=0,compress='lzw',tiled='yes',bigtiff='if_safer') is not True:
return None
return slope_bin_path
def slope_bin_to_medial_axis_raster(in_image_path, out_image_path):
if os.path.isfile(out_image_path):
print('%s exists, skip slope_bin_to_medial_axis_raster'%out_image_path)
return out_image_path
image_np, nodata = raster_io.read_raster_one_band_np(in_image_path)
# out_np = morphology.medial_axis(image_np, mask=None, return_distance=False)
out_np,dist = morphology.medial_axis(image_np, mask=None, return_distance=True)
# bool numpy to uint8
out_np = out_np.astype(np.uint8)
# Distance to the background for pixels of the skeleton
dist = dist * out_np
dist = dist.astype(np.float32)
# save to file
raster_io.save_numpy_array_to_rasterfile(out_np, out_image_path, in_image_path, compress='lzw',
tiled='yes', bigtiff='if_safer')
# save distances to file
out_dist_path = io_function.get_name_by_adding_tail(out_image_path,'dist')
raster_io.save_numpy_array_to_rasterfile(dist, out_dist_path, in_image_path, compress='lzw',
tiled='yes', bigtiff='if_safer')
return out_image_path
def slope_bin_to_skeleton(in_image_path, out_image_path):
# the output of skeleton is similar to medial, not useful.
if os.path.isfile(out_image_path):
print('%s exists, skip slope_bin_to_skeleton'%out_image_path)
return out_image_path
image_np, nodata = raster_io.read_raster_one_band_np(in_image_path)
# make image only have 0 and 1
image_np[image_np>1] = 1
out_np = morphology.skeletonize(image_np)
# out_np,dist = morphology.medial_axis(image_np, mask=None, return_distance=True)
# bool numpy to uint8
out_np = out_np.astype(np.uint8)
# dist = dist.astype(np.float32)
# save to file
raster_io.save_numpy_array_to_rasterfile(out_np, out_image_path, in_image_path, compress='lzw',
tiled='yes', bigtiff='if_safer')
return out_image_path
def medial_axis_raster_to_vector(in_medial_axis_tif, medial_axis_dist_tif,out_vector_shp, raster_res=2):
if vector_gpd.raster2shapefile(in_medial_axis_tif,out_shp=out_vector_shp,connect8=True) is None:
return None
vector_shp_buff = io_function.get_name_by_adding_tail(out_vector_shp,'buff')
if os.path.isfile(vector_shp_buff):
print('%s exists, skip buffering'%vector_shp_buff)
return vector_shp_buff
polys = vector_gpd.read_polygons_gpd(out_vector_shp,b_fix_invalid_polygon=False)
if len(polys) < 1:
basic.outputlogMessage('warning, no medial_axis in %s'%in_medial_axis_tif)
return None
# calculate the attributes before buffer
# from the area, we can tell how many pixels in each line (line segment), each pixel have size of 2*2 m^2
id_list = [item for item in range(len(polys))]
area_list = [ item.area for item in polys]
length_list = [ item.length for item in polys]
pixel_size = raster_res*raster_res
pixel_count_list = [ item/pixel_size for item in area_list]
# buffer 0.1 meters, so the width of polygons is around 2.02 meters (2-m ArcticDEM), still have width of one pixel
polys_buff = [item.buffer(0.01) for item in polys]
# after buffer, for complex medial axis, it may have holes in the polygons, get hole count
hole_count_list =[len(list(item.interiors)) for item in polys_buff]
# save, overwrite out_vector_shp
wkt = map_projection.get_raster_or_vector_srs_info_proj4(out_vector_shp)
save_pd = pd.DataFrame({'id':id_list,'area':area_list,'length':length_list,'pixels':pixel_count_list,
'holes':hole_count_list,'Polygon':polys_buff})
vector_gpd.save_polygons_to_files(save_pd, 'Polygon', wkt, vector_shp_buff)
# get the width based on medial axis
raster_statistic.zonal_stats_multiRasters(vector_shp_buff,medial_axis_dist_tif,nodata=0,stats = ['max'],prefix='axisR',all_touched=False)
# convert to meter and width
width_max_list = vector_gpd.read_attribute_values_list(vector_shp_buff,'axisR_max')
width_max_list = [ item*raster_res*2 for item in width_max_list]
attributes = {'width_max':width_max_list}
vector_gpd.add_attributes_to_shp(vector_shp_buff,attributes)
return vector_shp_buff
def remove_based_on_length_pixel(medial_axis_shp,min_length, max_length, max_axis_width,wkt, rm_length_shp):
polygons, attributes = vector_gpd.read_polygons_attributes_list(medial_axis_shp,['pixels','width_max'],b_fix_invalid_polygon=False)
lengths_pixel = attributes[0]
width_max_meter = attributes[1]
remain_polygons_idx = []
# remove relative large but narrow ones.
remove_count = 0
remove_based_width = 0
for idx, (poly,length,width) in enumerate(zip(polygons,lengths_pixel,width_max_meter)):
# remove too long or too short ones
if length > max_length or length < min_length:
remove_count += 1
continue
if width > max_axis_width:
remove_based_width += 1
continue
remain_polygons_idx.append(idx)
basic.outputlogMessage('remove %d polygons based on length in pixel, %d polygon based on max width, remain %d ones saving to %s' %
(remove_count,remove_based_width, len(remain_polygons_idx), rm_length_shp))
if len(remain_polygons_idx) < 1:
return False
vector_gpd.save_shapefile_subset_as(remain_polygons_idx,medial_axis_shp,rm_length_shp)
return rm_length_shp
def remove_based_on_hole(medial_axis_shp, max_hole,wkt, rm_hole_shp):
polygons, holes_count = vector_gpd.read_polygons_attributes_list(medial_axis_shp,'holes',b_fix_invalid_polygon=False)
remain_polygons_idx = []
# remove relative large but narrow ones.
remove_count = 0
for idx, (poly,holes) in enumerate(zip(polygons,holes_count)):
# remove too long or too short ones
if holes > max_hole:
remove_count += 1
continue
remain_polygons_idx.append(idx)
basic.outputlogMessage('remove %d polygons based on holes, remain %d ones saving to %s' %
(remove_count, len(remain_polygons_idx), rm_hole_shp))
if len(remain_polygons_idx) < 1:
return False
vector_gpd.save_shapefile_subset_as(remain_polygons_idx,medial_axis_shp,rm_hole_shp)
return rm_hole_shp
def remove_based_on_slope_area(slope_bin_path, min_slope_area_size,max_slope_area_size):
'''
remove based on slope area size, such as steep slope in valley or ridge of mountains.
:param slope_bin_path:
:param max_slope_area_size: areas, in meters
:return:
'''
slope_bin_edit_path = io_function.get_name_by_adding_tail(slope_bin_path,'edit')
if os.path.isfile(slope_bin_edit_path):
print('%s exists, skip remove_based_on_slope_area'%slope_bin_edit_path)
return slope_bin_edit_path
################################################################################
# the idea of polygonize, then remove large and samll region based on areas is not good
# because polygonize make take a lot of time (> 12 hours) if the slope_bin is large and complex.
#
# # to shapefile
# slope_bin_shp = vector_gpd.raster2shapefile(slope_bin_path,connect8=True,format='GPKG')
# if slope_bin_shp is None:
# return False
#
# polygons = vector_gpd.read_polygons_gpd(slope_bin_shp, b_fix_invalid_polygon=False)
#
# remain_polygons = []
# # remove large or small regions
# remove_count = 0
# for idx, poly in enumerate(polygons):
# # remove quite large or too small ones
# if poly.area > max_slope_area_size or poly.area < min_slope_area_size:
# remove_count += 1
# continue
# remain_polygons.append(poly)
# basic.outputlogMessage('remove %d large or tiny slope areas based on area, remain %d ones' %(remove_count, len(remain_polygons)))
#
# if len(remain_polygons) < 1:
# return False
#
# #fill holes before rasterize, making medial axis or skeleton easier
# # buffer to solve MultiPolygon issues
# remain_polygons = [ item.buffer(0.01) for item in remain_polygons ]
# polyons_noMulti = []
# for idx, poly in enumerate(remain_polygons):
# tmp_polygons = vector_gpd.MultiPolygon_to_polygons(idx, poly)
# polyons_noMulti.extend([ item for item in tmp_polygons if item.area >= min_slope_area_size])
# basic.outputlogMessage('convert MultiPolygon to polygons and remove tiny ones, remain %d ones' % (len(polyons_noMulti)))
# remain_polygons = [ vector_gpd.fill_holes_in_a_polygon(item) for item in polyons_noMulti ]
#
#
# # burn the remain polygons raster
# raster_io.burn_polygons_to_a_raster(slope_bin_path,remain_polygons,255,slope_bin_edit_path)
# raster_io.set_nodata_to_raster_metadata(slope_bin_edit_path,0)
################################################################################
# edit the raaster direclty using skimage
resx, resy = raster_io.get_xres_yres_file(slope_bin_path)
slope_bin_np,nodata = raster_io.read_raster_one_band_np(slope_bin_path)
# set background, label 0 will be ignored
labels = measure.label(slope_bin_np,background=nodata,connectivity=2) # 2-connectivity, 8 neighbours
# get regions
regions = measure.regionprops(labels)
# based on each region, to edit raster
edit_slope_np = np.zeros_like(slope_bin_np)
remove_count = 0
remain_regions = []
for idx, reg in enumerate(regions):
# area = reg.area * resx * resy # pixel to m^2
area = reg.filled_area * resx * resy # pixel to m^2 filled_area or area_filled
if area > max_slope_area_size or area < min_slope_area_size:
remove_count += 1
continue
remain_regions.append(reg)
basic.outputlogMessage('remove %d large or tiny slope areas based on area, remain %d ones' % (remove_count, len(remain_regions)))
# fill holes?
# write remain_regions to
for idx, reg in enumerate(remain_regions):
# get regions
# print(reg.coords)
# row, col = reg.coords[:,0], reg.coords[:,1]
# if reg.area != reg.filled_area:
# print(reg.area, reg.filled_area)
# print(reg.filled_image)
# print(np.sum(reg.filled_image))
# # print(row, col)
# sys.exit(0)
# edit_slope_np[row, col] = 255
# get region and fill the hole
bbox = reg.bbox # (min_row, min_col, max_row, max_col) #Pixels belonging to the bounding box are in the half-open interval [min_row; max_row) and [min_col; max_col).
loc = np.where(reg.filled_image==True)
# print(loc)
row = loc[0] + bbox[0]
col = loc[1] + bbox[1]
# print(row,col)
edit_slope_np[row, col] = 255
# sys.exit(0)
raster_io.save_numpy_array_to_rasterfile(edit_slope_np,slope_bin_edit_path,slope_bin_path,compress='lzw',tiled='yes',bigtiff='if_safer')
return slope_bin_edit_path
# def calculate_remove_based_on_line_segments(medial_axis_shp, max_line_segments,wkt, rm_line_segment_shp):
#
# # note: eventually, this is the same to hole count and does not provide new information.
# # so it's not necessary to use it.
#
# # calculate the number of line segments
# polygons = vector_gpd.read_polygons_gpd(medial_axis_shp,b_fix_invalid_polygon=False)
# line_segments_list = []
# for idx, poly in enumerate(polygons):
# # out_line = poly.exterior
# in_lines = list(poly.interiors)
# count = 1 + len(in_lines)
# line_segments_list.append(count)
# add_attributes = {'lines':line_segments_list}
# vector_gpd.add_attributes_to_shp(medial_axis_shp,add_attributes)
#
# # remove based on the number of line segments
# remain_polygons_idx = []
# # remove relative large but narrow ones.
# remove_count = 0
# for idx, lines in enumerate(line_segments_list):
# # remove too long or too short ones
# if lines > max_line_segments:
# remove_count += 1
# continue
# remain_polygons_idx.append(idx)
#
# basic.outputlogMessage('remove %d polygons based on the count of line segments, remain %d ones saving to %s' %
# (remove_count, len(remain_polygons_idx), rm_line_segment_shp))
#
# if len(remain_polygons_idx) < 1:
# return False
#
# vector_gpd.save_shapefile_subset_as(remain_polygons_idx,medial_axis_shp,rm_line_segment_shp)
#
# return rm_line_segment_shp
def calculate_line_segment_polygon_pixels(polygon, raster_res=2):
'''
get number of line segments
:param polygon:
:param raster_res:
:param min_line_pixel: line segment shorter than min_line_pixel would be ignored
:return:
'''
# rasterize the polygon to pixels (original from medial axis, width is one pixel)
# based on the pixels, find the longest line segment, and other segments.
# raster_io.burn_polygons_to_a_raster(ref_raster, [polygon], 1, save_raster)
minx, miny, maxx, maxy = vector_gpd.get_polygon_bounding_box(polygon)
poly_transform = rasterio.transform.from_origin(minx,maxy,raster_res,raster_res)
height = math.ceil((maxy - miny)/raster_res)
width = math.ceil((maxx - minx)/raster_res)
save_dtype = rasterio.uint8
burn_out = np.zeros((height, width))
# rasterize the shapes
burn_shapes = [(item_shape, item_int) for (item_shape, item_int) in zip([polygon], [1])]
out_label = rasterize(burn_shapes, out=burn_out, transform=poly_transform,
fill=0, all_touched=False, dtype=save_dtype)
# print('pixel count',np.sum(out_label))
graphs, imgB = buildTree(out_label)
if len(graphs) != 1:
raise ValueError('should only have one graph')
line_graph = graphs[0]
longest_path,maxLength = getLongestPath(line_graph,getEndNodes(line_graph))
# print('note of longest_path and length',len(longest_path), maxLength)
# remove longest path
line_graph.remove_nodes_from(longest_path)
# print('after removing longest path, end node count:',len(getEndNodes(graphs[0])))
# print(len(line_graph.nodes))
# print(len(line_graph.edges))
# remove isolated nodes
remove_list = []
for idx,node in enumerate(line_graph.nodes):
# print(line_graph.edges(node))
# nbs = nx.neighbors(line_graph, node)
# print(nbs[0], nbs[1])
neighbours = [n for n in line_graph[node]]
# print(idx,neighbours)
if len(neighbours) < 1:
remove_list.append(node)
line_graph.remove_nodes_from(remove_list)
# print(len(line_graph.nodes))
# print(len(line_graph.edges))
# # plot to check graph
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
# import random
# # draw all graphs
# fig = plt.figure(figsize=(12, 12))
#
# plt.imshow(out_label, cmap=cm.gray, interpolation='nearest')
# ax = plt.gca()
# plt.axis("equal")
#
# class PosWrapper(dict):
# def __getitem__(self, key):
# return [key.point[1], key.point[0]] # switch x and y
#
# for i, g in enumerate(graphs):
# nx.draw_networkx(g, PosWrapper(), ax=ax,
# with_labels=False,
# node_color="#%06x" % random.randint(0, 0xFFFFFF),
# edge_color='b', node_size=20
# )
# plt.show()
remain_pixel_count = len(line_graph.nodes)
return remain_pixel_count
# find the number of line segments, may not need to find the longest line segment.
# If we can get the end point number, then the number of line segment is count of end point – 1.
# An end point like the start and end point of a line.
# In the 8-neighbour, end point should only have one connectted point.
# since 1 is valid pixel, 0 is background.
# update
# it turns out that the ideas of end point not always work, maybe we can check cross points.
# a cross point connecting two or more line segments should have >=3 pixels in its 8-neighbour
# kernel = np.ones((3, 3), np.float32)
# kernel[1,1] = 0 # set the middle one as 0
# # print(kernel)
# dst = cv2.filter2D(out_label, -1, kernel,borderType=cv2.BORDER_CONSTANT)
# # only keep the line pixels
# dst = dst*out_label
# loc_end_points = np.where(dst==1)
#
# # by removing these end points to remove some short line segments (length=one pixel)
# # remove line segment small than min_line_pixel
# # min_line_pixel = 3
# for idx in range(min_line_pixel):
# # if the line is too short or already only have two end points, then skip
# if len(loc_end_points[0]) <= 2:
# break
#
# out_label[loc_end_points] = 0
# # calculate the end points again
# dst = cv2.filter2D(out_label, -1, kernel, borderType=cv2.BORDER_CONSTANT)
# dst = dst * out_label
# loc_end_points = np.where(dst == 1)
# print(loc_end_points)
# return number of line segment
# num_line = len(loc_end_points[0]) -1
# # debug
# if num_line < 1:
# # plot for testing
# # note: the polygon is in the projection of EPSG:3413,
# # if we open the shapefile in QGIS but another prjection, it will look different.
# import matplotlib.pyplot as plt
# # imgplot = plt.imshow(out_label)
# fig, axs = plt.subplots(1,2)
# axs[0].imshow(out_label)
# axs[1].imshow(dst)
# plt.show()
# return num_line
def get_longest_line_one_polygon(polygon, distance_np, dist_raster_transform, raster_res=2):
'''
get the longest line from
:param polygon:
:param distance_np:
:param raster_res:
:return:
'''
minx, miny, maxx, maxy = vector_gpd.get_polygon_bounding_box(polygon)
poly_transform = rasterio.transform.from_origin(minx,maxy,raster_res,raster_res)
height = math.ceil((maxy - miny)/raster_res)
width = math.ceil((maxx - minx)/raster_res)
save_dtype = rasterio.uint8
burn_out = np.zeros((height, width))
# rasterize the shapes
burn_shapes = [(item_shape, item_int) for (item_shape, item_int) in zip([polygon], [1])]
out_label = rasterize(burn_shapes, out=burn_out, transform=poly_transform,
fill=0, all_touched=False, dtype=save_dtype)
xs = [minx,maxx]
ys = [maxy,miny] # maxy (uppper left), miny (lower right)
rows, cols = rasterio.transform.rowcol(dist_raster_transform,xs,ys)
# there is one pixel offset
rows = [ item+1 for item in rows]
cols = [ item+1 for item in cols]
weight_np = distance_np[rows[0]:rows[1], cols[0]:cols[1]]
# print(out_label.shape)
# print(weight_np.shape)
# check the offset
# weight_np = weight_np*(out_label-1)
graphs, imgB = buildTree(out_label,weight_img=weight_np)
if len(graphs) != 1:
raise ValueError('should only have one graph')
line_graph = graphs[0]
longest_path,maxLength = getLongestPath(line_graph,getEndNodes(line_graph))
# print('nodes of longest_path and length',len(longest_path), maxLength)
#Vertex to point list
point_list = [ (item.point[0],item.point[1]) for item in longest_path]
# covert pixel to xy in projection for the entire image
line_rows = [item[0] + rows[0] for item in point_list]
line_cols = [item[1] + cols[0] for item in point_list]
# Returns the x and y coordinates of pixels at rows and cols.
# The pixel’s center is returned by default, but a corner can be returned by setting offset to one of ul, ur, ll, lr.
line_xs, line_ys = rasterio.transform.xy(dist_raster_transform,line_rows,line_cols,offset='center')
point_list = [ (x,y) for x,y in zip(line_xs, line_ys)]
# save the longest_path
main_line = vector_gpd.points_to_LineString(point_list)
return main_line
# # plot for testing
# # note: the polygon is in the projection of EPSG:3413,
# # if we open the shapefile in QGIS but another prjection, it will look different.
# import matplotlib.pyplot as plt
# # imgplot = plt.imshow(out_label)
# fig, axs = plt.subplots(1,2)
# axs[0].imshow(out_label)
# axs[1].imshow(weight_np)
# plt.show()
def get_main_longest_line_segments(medial_axis_shp,medial_axis_dist_tif, wkt, main_line_segment_shp,raster_res=2):
'''
calculate the main (weighted longest) line segments in medial axis, then save to file
:param medial_axis_shp:
:param medial_axis_dist_tif:
:param wkt:
:param main_line_segment_shp:
:return:
'''
# calculate the number of line segments
polygons = vector_gpd.read_polygons_gpd(medial_axis_shp, b_fix_invalid_polygon=False)
distance_np, nodata = raster_io.read_raster_one_band_np(medial_axis_dist_tif)
dist_transform = raster_io.get_transform_from_file(medial_axis_dist_tif)
main_line_list = []
id_list = []
for idx, poly in enumerate(polygons):
main_line = get_longest_line_one_polygon(poly,distance_np,dist_transform,raster_res=raster_res)
main_line_list.append(main_line)
id_list.append(idx)
# length in meters
length_list = [ item.length for item in main_line_list]
# save to file
save_pd = pd.DataFrame({'id':id_list,'length_m':length_list, 'Line':main_line_list})
vector_gpd.save_lines_to_files(save_pd,'Line',wkt,main_line_segment_shp)
return main_line_segment_shp
def calculate_remove_based_on_pixel_line_segments(medial_axis_shp,wkt, rm_line_segment_shp,raster_res=2,max_unwant_line_pixel=10):
'''
convert polygons to pixels again, then count line segments and find the longest line segments from pixels.
:param medial_axis_shp:
:param wkt:
:param rm_line_segment_shp:
:param raster_res:
:param max_unwant_line_pixel:
:return:
'''
# calculate the number of line segments
polygons = vector_gpd.read_polygons_gpd(medial_axis_shp,b_fix_invalid_polygon=False)
unwant_line_pixels_list = []
for idx, poly in enumerate(polygons):
remain_line_pixel_count = calculate_line_segment_polygon_pixels(poly,raster_res=raster_res)
unwant_line_pixels_list.append(remain_line_pixel_count)
# save to file
add_attributes = {'unwant_pi':unwant_line_pixels_list}
vector_gpd.add_attributes_to_shp(medial_axis_shp,add_attributes)
# remove based on the number of line segments
remain_polygons_idx = []
# remove relative large but narrow ones.
remove_count = 0
for idx, lines in enumerate(unwant_line_pixels_list):
# remove too long or too short ones
if lines > max_unwant_line_pixel:
remove_count += 1
continue
remain_polygons_idx.append(idx)
basic.outputlogMessage('remove %d polygons based on the count of unwanted line pixels, remain %d ones saving to %s' %
(remove_count, len(remain_polygons_idx), rm_line_segment_shp))
if len(remain_polygons_idx) < 1:
return False
vector_gpd.save_shapefile_subset_as(remain_polygons_idx,medial_axis_shp,rm_line_segment_shp)
return rm_line_segment_shp
# def get_main_medial_axis_raster(medial_axis_tif, medial_axis_dist_tif, out_main_medial_axis_tif):
# ## build graphs from the entire imagery is time-consuming, cancel this idea.
#
# if os.path.isfile(out_main_medial_axis_tif):
# print('%s exists, skip get_main_medial_axis_raster')
# return out_main_medial_axis_tif
#
# medial_axis_np,nodata = raster_io.read_raster_one_band_np(medial_axis_tif)
# medial_axis_dist_np,nodta2 = raster_io.read_raster_one_band_np(medial_axis_dist_tif)
#
# graphs, imgB = buildTree(medial_axis_np,weight_img=medial_axis_dist_np)
# print('print, built %d graphs'%len(graphs))
#
# for idx, graph in enumerate(graphs):
# longest_path, maxLength = getLongestPath(graph, getEndNodes(graph))
# print('note of longest_path and length',len(longest_path), maxLength)
def extract_headwall_based_medial_axis_from_slope(idx, total, slope_tif, work_dir, save_dir,slope_threshold, min_area_size, max_area_size,
min_length, max_length, max_hole_count,max_axis_width,process_num):
'''
extract headwall from slope based on medial axis (skimage.morphology.medial_axis)
:param idx: tif index
:param total: total slope file count
:param slope_tif: slope file
:param work_dir:
:param save_dir:
:param slope_threshold:
:param min_length: min length, the medial axis calculated from skimage.morphology has width of one pixel, the length is based pixel count of line segments
:param max_length: max length (pixel count)
:param max_hole_count: some complex line segment may end in holes when forming polygons
:param process_num:
:return:
'''
headwall_shp = os.path.splitext(os.path.basename(io_function.get_name_by_adding_tail(slope_tif, 'headwall')))[0] + '.shp'
save_headwall_shp = os.path.join(save_dir, headwall_shp)
if os.path.isfile(save_headwall_shp):
print('%s exists, skip' % save_headwall_shp)
return save_headwall_shp
print('(%d/%d) extracting headwall from %s' % (idx, total, slope_tif))
wkt = map_projection.get_raster_or_vector_srs_info_wkt(slope_tif)
# binary slope
slope_bin_path = os.path.join(work_dir, os.path.basename(io_function.get_name_by_adding_tail(slope_tif, 'bin')))
if slope_tif_to_slope_bin(slope_tif, slope_bin_path, slope_threshold) is None:
return False
# remove some large and small slope areas and save to slope_bin_edit_path
slope_bin_edit_path = remove_based_on_slope_area(slope_bin_path,min_area_size,max_area_size)
if slope_bin_edit_path is False:
return False
# get medial axis raster
medial_axis_tif = io_function.get_name_by_adding_tail(slope_bin_edit_path,'medial_axis')
if slope_bin_to_medial_axis_raster(slope_bin_edit_path, medial_axis_tif) is None:
return False
medial_axis_dist_tif = io_function.get_name_by_adding_tail(medial_axis_tif,'dist')
if os.path.isfile(medial_axis_dist_tif) is False:
medial_axis_dist_tif = None
# build graph and find main medial from rasters
# note: build graph from the entire images is time consuming. cancel this
# main_medial_axis_tif = io_function.get_name_by_adding_tail(slope_bin_path,'main_medial_axis')
# get madial axis vector (polygons: width of one pixel)
medial_axis_poly_shp = os.path.join(work_dir, io_function.get_name_no_ext(medial_axis_tif) + '_poly.shp')
medial_axis_poly_shp_buff = medial_axis_raster_to_vector(medial_axis_tif, medial_axis_dist_tif,medial_axis_poly_shp)
if medial_axis_poly_shp_buff is None:
return False
# only keep not too long or too short line segments, and not too wide
rm_length_shp = io_function.get_name_by_adding_tail(medial_axis_poly_shp_buff, 'rmLength')
if os.path.isfile(rm_length_shp):
print('%s exists, skip removing based on length in pixels' % rm_length_shp)
else:
# medial_axis_shp,min_length, max_length,wkt, rm_length_shp
if remove_based_on_length_pixel(medial_axis_poly_shp_buff, min_length, max_length, max_axis_width, wkt, rm_length_shp) is False:
return False
# remove based on hole count, if too many holes, not headwall
rm_hole_shp = io_function.get_name_by_adding_tail(rm_length_shp, 'rmHole')
if os.path.isfile(rm_hole_shp):
print('%s exists, skip removing based holes' % rm_hole_shp)
else:
# medial_axis_shp,min_length, max_length,wkt, rm_length_shp
if remove_based_on_hole(rm_length_shp, max_hole_count, wkt, rm_hole_shp) is False:
return False
# get line segments in polygons and remove based on line segments
# eventually, this is the same to hole count and does not provide new information.
# max_line_count = 10
# rm_line_shp = io_function.get_name_by_adding_tail(rm_hole_shp, 'rmLine')
# if os.path.isfile(rm_line_shp):
# print('%s exists, skip removing based the count of line segments' % rm_line_shp)
# else:
# # medial_axis_shp,min_length, max_length,wkt, rm_length_shp
# if calculate_remove_based_on_line_segments(rm_hole_shp, max_line_count, wkt, rm_line_shp) is False:
# return False
# # calculate number of line segments based on pixels.
# this idea is wrong, since headwall of thaw slumps may also have many line segments.
# rm_line_shp = io_function.get_name_by_adding_tail(rm_hole_shp, 'rmLine')
# if os.path.isfile(rm_line_shp):
# print('%s exists, skip removing based the number of line segments' % rm_line_shp)
# else:
# if calculate_remove_based_on_pixel_line_segments(rm_hole_shp, wkt, rm_line_shp,
# raster_res=2, max_unwant_line_pixel=max_unwanted_line_pixel) is False:
# return False
main_line_shp = io_function.get_name_by_adding_tail(rm_hole_shp,'mainLine')
if os.path.isfile(main_line_shp):
print('%s exists, skip getting main line'%main_line_shp)
else:
get_main_longest_line_segments(rm_hole_shp,medial_axis_dist_tif, wkt, main_line_shp,raster_res=2)
# copy the results.
io_function.copy_shape_file(main_line_shp, save_headwall_shp)
def test_slope_bin_to_medial_axis():
# data_dir = os.path.expanduser('~/Data/dem_processing/headwall_shp_sub_6174/20080511_dem_slope')
# slope_bin_path = os.path.join(data_dir, '20080511_dem_slope_bin.tif' )
# medial_axis_tif = os.path.join(data_dir, '20080511_dem_slope_bin_medial_axis.tif')
data_dir = os.path.expanduser('~/Data/dem_processing/grid_9274_tmp_files')
slope_bin_path = os.path.join(data_dir, '20150507_dem_slope_bin.tif')
medial_axis_tif = os.path.join(data_dir, '20150507_dem_slope_bin_medial_axis.tif')
medial_axis_tif = slope_bin_to_medial_axis_raster(slope_bin_path,medial_axis_tif)
medial_axis_poly_shp = os.path.join(data_dir, '20080511_dem_slope_bin_medial_axis_poly.shp')
medial_axis_raster_to_vector(medial_axis_tif, medial_axis_poly_shp)
def test_slope_bin_to_skeleton():
data_dir = os.path.expanduser('~/Data/dem_processing/grid_9274_tmp_files')
slope_bin_path = os.path.join(data_dir, '20150507_dem_slope_bin.tif')
skeleton_path = os.path.join(data_dir, '20150507_dem_slope_bin_skeleton.tif')
slope_bin_to_skeleton(slope_bin_path, skeleton_path)
def test_extract_headwall_based_medial_axis_from_slope():
# data_dir = os.path.expanduser('~/Data/dem_processing/grid_6174_tmp_files/slope_sub_6174')
# slope_tif = os.path.join(data_dir,'20080511_dem_slope.tif')
# work_dir = os.path.expanduser('~/Data/dem_processing')
# save_dir = os.path.expanduser('~/Data/dem_processing/grid_6174_tmp_files')
data_dir = os.path.expanduser('~/Data/dem_processing/grid_9053_tmp_files/slope_sub_9053')
slope_tif = os.path.join(data_dir,'20140701_dem_slope.tif')
work_dir = os.path.expanduser('~/Data/dem_processing/grid_9053_tmp_files')
save_dir = os.path.expanduser('~/Data/dem_processing/grid_9053_tmp_files')
slope_threshold = 20
min_area_size = 200 # in m^2
max_area_size = 50000 # in m^2
max_axis_width = 80 # in meters
min_length = 15 # in pixel
max_length = 1000 # in pixel
max_hole_count = 0
process_num = 1
extract_headwall_based_medial_axis_from_slope(0, 1, slope_tif, work_dir, save_dir, slope_threshold,min_area_size,max_area_size,
min_length, max_length, max_hole_count, max_axis_width,process_num)
def test_calculate_line_segment_polygon_pixels():
one_line_poly_shp = os.path.expanduser('~/Data/dem_processing/test_cal_line_segment/one_line_segment.shp')
polygons = vector_gpd.read_polygons_gpd(one_line_poly_shp, b_fix_invalid_polygon=False)
# print(polygons[0])
line_segment = calculate_line_segment_polygon_pixels(polygons[0])
print('remain pixel count in lines', line_segment)
def test_get_longest_line_one_polygon():
one_line_poly_shp = os.path.expanduser('~/Data/dem_processing/grid_9274_tmp_files/one_medial_axis/one_medial_axis.shp')
polygons = vector_gpd.read_polygons_gpd(one_line_poly_shp, b_fix_invalid_polygon=False)
distance_tif = os.path.expanduser('~/Data/dem_processing/grid_9274_tmp_files/20150507_dem_slope_bin_medial_axis_dist.tif')
distance_np,nodata = raster_io.read_raster_one_band_np(distance_tif)
dist_transform = raster_io.get_transform_from_file(distance_tif)
line_string = get_longest_line_one_polygon(polygons[0],distance_np,dist_transform)
# print('remain pixel count in lines', line_segment)
# save to file
save_pd = pd.DataFrame({'Line':[line_string]})
wkt = map_projection.get_raster_or_vector_srs_info_proj4(one_line_poly_shp)
save_shp = io_function.get_name_by_adding_tail(one_line_poly_shp,'main')
vector_gpd.save_lines_to_files(save_pd,'Line',wkt,save_shp)
def main():
# test_slope_bin_to_medial_axis()
test_extract_headwall_based_medial_axis_from_slope()
# test_calculate_line_segment_polygon_pixels()
# test_slope_bin_to_skeleton()
# test_get_longest_line_one_polygon()
pass
if __name__ == '__main__':
main()
pass | [
"huanglingcao@gmail.com"
] | huanglingcao@gmail.com |
7c881f192f7bb5eea325cab96de8af8dd74bfdf8 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Resnet50_Cifar_for_PyTorch/configs/_base_/models/regnet/regnetx_12gf.py | d0b11c71bd70e06fedb1869a0fa5f51e24fc5d1b | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 928 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='RegNet', arch='regnetx_12gf'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2240,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
8a4a6045331fbe880e465f92ec60997cf8936fb5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cabbie.py | 6e6ef5a74bd29c23a2f80dd665c5137381783643 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py |
#calss header
class _CABBIE():
def __init__(self,):
self.name = "CABBIE"
self.definitions = [u'a driver of a taxi']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9be3e219222181adcea02d90ac202c71fab57999 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4051/867004051.py | 975e246781a36ab16a69e9109f3e1c5a1418a908 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 3,180 | py | from bots.botsconfig import *
from records004051 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'PT',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BPT', MIN: 1, MAX: 1},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'MEA', MIN: 0, MAX: 20},
{ID: 'PSA', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 12},
{ID: 'PER', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
]},
]},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
{ID: 'LCD', MIN: 0, MAX: 2},
]},
{ID: 'PTD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'PRF', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'MAN', MIN: 0, MAX: 1},
{ID: 'LCD', MIN: 0, MAX: 2},
{ID: 'LQ', MIN: 0, MAX: 99999},
{ID: 'MEA', MIN: 0, MAX: 99999},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 20},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'SII', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 1},
]},
]},
{ID: 'QTY', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LIN', MIN: 0, MAX: 1},
{ID: 'PO3', MIN: 0, MAX: 25},
{ID: 'PO4', MIN: 0, MAX: 1},
{ID: 'UIT', MIN: 0, MAX: 12},
{ID: 'AMT', MIN: 0, MAX: 12},
{ID: 'ITA', MIN: 0, MAX: 10},
{ID: 'PID', MIN: 0, MAX: 200},
{ID: 'MEA', MIN: 0, MAX: 40},
{ID: 'PWK', MIN: 0, MAX: 25},
{ID: 'PKG', MIN: 0, MAX: 25},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'PER', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'CUR', MIN: 0, MAX: 1},
{ID: 'DD', MIN: 0, MAX: 99999},
{ID: 'LDT', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 0, MAX: 100},
]},
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
]},
{ID: 'CTT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'AMT', MIN: 0, MAX: 12},
{ID: 'ITA', MIN: 0, MAX: 10},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
4ca43d2b719997ab42b0ad4b0cd4e19ca724f756 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /storagegateway_write_f/working-storage_add.py | fdffa92a273c0f4515c3752bc83ee18b075b6423 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
describe-working-storage : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/storagegateway/describe-working-storage.html
"""
write_parameter("storagegateway", "add-working-storage") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
e6e142ba00e50a37c2699eb8e541e435194bb656 | f09e98bf5de6f6c49df2dbeea93bd09f4b3b902f | /google-cloud-sdk/lib/surface/compute/operations/__init__.py | 62bb5791805c19a38a10ac3b652bdc619bac8e65 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Peterfeng100/notepal | 75bfaa806e24d85189bd2d09d3cb091944dc97e6 | d5ba3fb4a06516fec4a4ae3bd64a9db55f36cfcd | refs/heads/master | 2021-07-08T22:57:17.407571 | 2019-01-22T19:06:01 | 2019-01-22T19:06:01 | 166,490,067 | 4 | 1 | null | 2020-07-25T04:37:35 | 2019-01-19T00:37:04 | Python | UTF-8 | Python | false | false | 1,057 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Operations(base.Group):
"""Read and manipulate Google Compute Engine operations."""
Operations.category = 'Info'
Operations.detailed_help = {
'brief': 'Read and manipulate Google Compute Engine operations',
}
| [
"kevinhk.zhang@mail.utoronto.ca"
] | kevinhk.zhang@mail.utoronto.ca |
30e09cae99d89491f961c166bc2f75e473bea427 | 0809673304fe85a163898983c2cb4a0238b2456e | /src/lesson_algorithms/functools_reduce_short_sequences.py | 55bf8b3a815b7250c2e53d854cee96174ee95735 | [
"Apache-2.0"
] | permissive | jasonwee/asus-rt-n14uhp-mrtg | 244092292c94ff3382f88f6a385dae2aa6e4b1e1 | 4fa96c3406e32ea6631ce447db6d19d70b2cd061 | refs/heads/master | 2022-12-13T18:49:02.908213 | 2018-10-05T02:16:41 | 2018-10-05T02:16:41 | 25,589,776 | 3 | 1 | Apache-2.0 | 2022-11-27T04:03:06 | 2014-10-22T15:42:28 | Python | UTF-8 | Python | false | false | 487 | py | import functools
def do_reduce(a, b):
print('do_reduce({}, {})'.format(a, b))
return a + b
print('Single item in sequence:',
functools.reduce(do_reduce, [1]))
print('Single item in sequence with initializer:',
functools.reduce(do_reduce, [1], 99))
print('Empty sequence with initializer:',
functools.reduce(do_reduce, [], 99))
try:
print('Empty sequence:', functools.reduce(do_reduce, []))
except TypeError as err:
print('ERROR: {}'.format(err))
| [
"peichieh@gmail.com"
] | peichieh@gmail.com |
eb748af2736e8a4cc158f58a4ef86fc66c6f8b14 | aa480d8b09dd7ad92c37c816ebcace24a35eb34c | /third-round/540.有序数组中的单一元素.py | 98dff56fd19fa1ef3b473daf1bbf5de8db64538d | [] | no_license | SR2k/leetcode | 7e701a0e99f9f05b21216f36d2f5ac07a079b97f | de131226159865dcb7b67e49a58d2ddc3f0a82c7 | refs/heads/master | 2023-03-18T03:37:02.916453 | 2022-09-16T01:28:13 | 2022-09-16T01:28:13 | 182,083,445 | 0 | 0 | null | 2023-03-08T05:44:26 | 2019-04-18T12:27:12 | Python | UTF-8 | Python | false | false | 1,739 | py | #
# @lc app=leetcode.cn id=540 lang=python3
#
# [540] 有序数组中的单一元素
#
# https://leetcode-cn.com/problems/single-element-in-a-sorted-array/description/
#
# algorithms
# Medium (61.02%)
# Likes: 480
# Dislikes: 0
# Total Accepted: 88.8K
# Total Submissions: 145.7K
# Testcase Example: '[1,1,2,3,3,4,4,8,8]'
#
# 给你一个仅由整数组成的有序数组,其中每个元素都会出现两次,唯有一个数只会出现一次。
#
# 请你找出并返回只出现一次的那个数。
#
# 你设计的解决方案必须满足 O(log n) 时间复杂度和 O(1) 空间复杂度。
#
#
#
# 示例 1:
#
#
# 输入: nums = [1,1,2,3,3,4,4,8,8]
# 输出: 2
#
#
# 示例 2:
#
#
# 输入: nums = [3,3,7,7,10,11,11]
# 输出: 10
#
#
#
#
#
#
# 提示:
#
#
# 1 <= nums.length <= 10^5
# 0 <= nums[i] <= 10^5
#
#
#
# @lc code=start
class Solution:
def singleNonDuplicate(self, nums: list[int]) -> int:
left, right = 0, len(nums) - 1
while left + 1 < right:
middle = (left + right) >> 1
if self.check(nums, middle):
left = middle
else:
right = middle
if not self.check(nums, left):
return nums[left]
return nums[right]
def check(self, nums: list[int], i: int):
if i % 2:
return i - 1 >= 0 and nums[i] == nums[i - 1]
else:
return i + 1 < len(nums) and nums[i] == nums[i + 1]
# @lc code=end
print(Solution().singleNonDuplicate([1,1,2,3,3,4,4,8,8]))
print(Solution().singleNonDuplicate([3,3,7,7,10,11,11]))
print(Solution().singleNonDuplicate([1,3,3]))
print(Solution().singleNonDuplicate([3,3,1]))
print(Solution().singleNonDuplicate([1]))
| [
"luozhou.csy@alibaba-inc.com"
] | luozhou.csy@alibaba-inc.com |
d66c544c7a655a84d2c0492fddee34becf974d0a | 1e987bd8b8be0dc1c139fa6bf92e8229eb51da27 | /util/freefocus/server/flask/flask.py | a41889348901545b057d2d513b7db5ebe3f9a5ae | [] | no_license | tszdanger/phd | c97091b4f1d7712a836f0c8e3c6f819d53bd0dd5 | aab7f16bd1f3546f81e349fc6e2325fb17beb851 | refs/heads/master | 2023-01-01T00:54:20.136122 | 2020-10-21T18:07:42 | 2020-10-21T18:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,738 | py | #!/usr/bin/env python
"""Run a REST server."""
import typing
from argparse import ArgumentParser
from contextlib import contextmanager
import flask
import flask_cors
import sqlalchemy
from flask import abort
from flask import request
from util.freefocus import freefocus
from util.freefocus import sql
app = flask.Flask(__name__)
flask_cors.CORS(app)
app.config.from_object("config")
make_session = None
@contextmanager
def Session(commit: bool = False) -> sqlalchemy.orm.session.Session:
"""Provide a transactional scope around a series of operations."""
session = make_session()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
API_BASE = f"/api/v{freefocus.SPEC_MAJOR}.{freefocus.SPEC_MINOR}"
URL_STUB = "http://" + app.config.get("SERVER_NAME", "") + API_BASE
def active_task_graph():
def build_graph(session, task: sql.Task):
return {
"id": task.id,
"body": task.body.split("\n")[0],
"completed": True if task.completed else False,
"children": [
build_graph(session, child)
for child in session.query(sql.Task)
.filter(sql.Task.parent_id == task.id)
.order_by(sql.Task.created.desc())
],
}
with Session() as session:
# List 'root' tasks
q = (
session.query(sql.Task)
.filter(sql.Task.parent_id == None)
.order_by(sql.Task.created.desc())
)
r = [build_graph(session, t) for t in q]
return r
@app.route("/")
def index():
data = {
"freefocus": {
"version": f"{freefocus.SPEC_MAJOR}.{freefocus.SPEC_MINOR}.{freefocus.SPEC_MICRO}",
},
"assets": {
"cache_tag": 1,
"bootstrap_css": flask.url_for("static", filename="bootstrap.css"),
"styles_css": flask.url_for("static", filename="styles.css"),
"site_js": flask.url_for("static", filename="site.js"),
},
"tasks": active_task_graph(),
}
return flask.render_template("lists.html", **data)
def response(data):
""" make an API response """
return jsonify(data)
def paginated_response(iterable: typing.Iterable):
""" make a paginated API response """
# TODO: chunk and paginate
return response(list(iterable))
def truncate(string: str, maxlen=144):
suffix = "..."
if len(string) > maxlen:
truncated = string[: maxlen - len(suffix)] + suffix
return {"data": truncated, "truncated": True}
else:
return {"data": string, "truncated": False}
def task_url(task: sql.Task):
return URL_STUB + f"/tasks/{task.id}"
def group_url(group: sql.Group):
return URL_STUB + f"/groups/{group.id}"
def asset_url(asset: sql.Asset):
return URL_STUB + f"/assets/{group.id}"
def date(d):
if d:
return d.isoformat()
else:
return None
@app.errorhandler(404)
def not_found(error):
""" 404 error handler """
return make_response(jsonify({"error": "Not found"}), 404)
@app.errorhandler(400)
def not_found(error):
""" 400 Bad Request """
return make_response(jsonify({"error": "Bad Request"}), 400)
@app.route(API_BASE + "/persons", methods=["GET"])
def get_persons():
with Session() as session:
q = session.query(sql.Person)
return paginated_response(p.json() for p in q)
@app.route(API_BASE + "/persons/<int:person_uid>", methods=["GET"])
def get_person(person_uid: int):
with Session() as session:
p = session.query(sql.Person).filter(sql.Person.uid == person_uid).first()
if not p:
abort(404)
return response(p.json())
@app.route(API_BASE + "/persons/<int:person_uid>/groups", methods=["GET"])
def get_person_groups(person_uid: int):
with Session() as session:
p = session.query(sql.Person).filter(sql.Person.uid == person_uid).first()
if not p:
abort(404)
return paginated_response(g.json() for g in p.groups)
@app.route(API_BASE + "/tasks", methods=["GET"])
def get_tasks():
def build_graph(session, task: sql.Task = None):
parent = None if task is None else task.id
q = (
session.query(sql.Task)
.filter(sql.Task.parent_id == parent)
.order_by(sql.Task.created.desc())
)
# "Completed" request parameter
completed = request.args.get("completed", None)
if completed is not None:
if completed == "true":
q = q.filter(sql.Task.completed)
elif completed == "false":
q = q.filter(sql.Task.completed == None)
else:
abort(400)
children = [build_graph(session, t) for t in q]
if task is None:
return children
else:
return {
"url": task_url(task),
"body": truncate(task.body),
"status": task.status,
"assigned": [g.id for g in task.assigned],
"children": children,
}
with Session() as session:
return paginated_response(build_graph(session))
@app.route(API_BASE + "/tasks/<int:task_id>", methods=["GET"])
def get_task(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
return response(
{
"body": t.body,
"assigned": t.is_assigned,
"blocked": t.is_blocked,
"defer_until": date(t.defer_until),
"start_on": date(t.start_on),
"estimated_duration": t.duration,
"due": date(t.due),
"started": date(t.started),
"completed": date(t.completed),
"created": {"at": date(t.created), "by": group_url(t.created_by),},
}
)
@app.route(API_BASE + "/tasks/<int:task_id>/owners", methods=["GET"])
def get_task_owners(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
# TODO: summary
return paginated_response(group_url(g) for g in t.owners)
@app.route(API_BASE + "/tasks/<int:task_id>/assigned", methods=["GET"])
def get_task_assigned(task_id: int):
with Session() as session:
t = session.query(sql.Task).filter(sql.Task.id == task_id).first()
if not t:
abort(404)
# TODO: summary
return paginated_response(group_url(g) for g in t.assigned)
@app.route(API_BASE + "/tasks", methods=["POST"])
def add_task():
with Session(commit=True) as session:
pass
def main():
global make_session
parser = ArgumentParser(description=__doc__)
parser.add_argument("uri")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
engine = sqlalchemy.create_engine(args.uri, echo=args.verbose)
sql.Base.metadata.create_all(engine)
sql.Base.metadata.bind = engine
make_session = sqlalchemy.orm.sessionmaker(bind=engine)
app.RunWithArgs(debug=True, host="0.0.0.0")
if __name__ == "__main__":
main()
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
77331412b268e9fbf82fdb836edc42208c80e3a2 | bc2742cac4347eb8652295a0d4aeb8633eea7c1b | /tests/layers/test_gated_average_layer.py | 9b7827828d7f8e22c374d627ce99185f293b49e2 | [
"MIT"
] | permissive | temp3rr0r/neupy | 933648658cc2a5e85e0fc3955de0a3de65ea97c1 | f36071f5f46bf79ffd18485acca941db578656e8 | refs/heads/master | 2023-05-25T09:55:10.654122 | 2018-12-17T16:56:30 | 2018-12-17T16:56:30 | 163,117,180 | 0 | 0 | MIT | 2023-05-22T21:44:38 | 2018-12-25T23:24:17 | Python | UTF-8 | Python | false | false | 5,202 | py | import numpy as np
from neupy import layers
from neupy.utils import asfloat
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class GatedAverageTestCase(BaseTestCase):
def test_gated_average_layer_negative_index(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=-1)
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(8,), (8,), (2,)])
gated_avg_layer = layers.GatedAverage(gating_layer_index=-3)
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(2,), (8,), (8,)])
def test_gated_average_layer_exceptions_index_position(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=3)
with self.assertRaisesRegexp(LayerConnectionError, "Invalid index"):
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage(gating_layer_index=-4)
with self.assertRaisesRegexp(LayerConnectionError, "Invalid index"):
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
def test_gated_average_layer_exceptions(self):
gated_avg_layer = layers.GatedAverage()
with self.assertRaisesRegexp(LayerConnectionError, "should be vector"):
layers.join([
layers.Input((10, 3, 3)), # shape not 1d
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage()
error_message = "only 3 networks, got 2 networks"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join([
layers.Input(10) > layers.Softmax(3),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
gated_avg_layer = layers.GatedAverage()
error_message = "expect to have the same shapes"
with self.assertRaisesRegexp(LayerConnectionError, error_message):
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(10),
], gated_avg_layer)
def test_gated_average_layer_non_default_index(self):
gated_avg_layer = layers.GatedAverage(gating_layer_index=1)
layers.join([
layers.Input(20) > layers.Relu(8),
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(8,), (2,), (8,)])
def test_gated_average_layer_output_shape(self):
gated_avg_layer = layers.GatedAverage()
self.assertIsNone(gated_avg_layer.output_shape)
layers.join([
layers.Input(10) > layers.Softmax(2),
layers.Input(20) > layers.Relu(8),
layers.Input(20) > layers.Relu(8),
], gated_avg_layer)
self.assertEqual(gated_avg_layer.output_shape, (8,))
self.assertEqual(gated_avg_layer.input_shape, [(2,), (8,), (8,)])
def test_gated_average_layer_output(self):
input_layer = layers.Input(10)
network = layers.join(
[
input_layer > layers.Softmax(2),
input_layer > layers.Relu(8),
input_layer > layers.Relu(8),
],
layers.GatedAverage()
)
random_input = asfloat(np.random.random((20, 10)))
actual_output = self.eval(network.output(random_input))
self.assertEqual(actual_output.shape, (20, 8))
def test_gated_average_layer_multi_dimensional_inputs(self):
input_layer = layers.Input((5, 5, 1))
network = layers.join(
[
input_layer > layers.Reshape() > layers.Softmax(2),
input_layer > layers.Convolution((2, 2, 3)),
input_layer > layers.Convolution((2, 2, 3)),
],
layers.GatedAverage()
)
self.assertEqual(network.input_shape, (5, 5, 1))
self.assertEqual(network.output_shape, (4, 4, 3))
random_input = asfloat(np.random.random((8, 5, 5, 1)))
actual_output = self.eval(network.output(random_input))
self.assertEqual(actual_output.shape, (8, 4, 4, 3))
| [
"mail@itdxer.com"
] | mail@itdxer.com |
6329de61ee60371c8076e5ae6630d63412935d5a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_106/ch60_2020_05_05_08_13_18_583778.py | 43482c80ca128e108aa30532e4ae4edf9f5a01b4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | def eh_palindromo(string):
new=[]
for i in string:
new.append(i)
new.reverse()
news=''.join(new)
if news==string:
return True
else:
return False | [
"you@example.com"
] | you@example.com |
d98a36fe8eefb7fd90bd8d44aeabd6b7876417a2 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/1/test_20200606181205.py | 39015065b66f8a71c94844dc817ec7ffaed21592 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | people = ["Влад", "Яромир"]
for name in people
print(people) | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
69540f2a46c06f8779897219544ba72f26af92e3 | bbf874cf4abb20e7ec5c66e808e97ae6f2043c3f | /0x01-python-if_else_loops_functions/1-last_digit.py | b40981bd458d6316d7bf311194b37fbc97f191f0 | [] | no_license | imperfectskillz/holbertonschool-higher_level_programming | 105fd80c2bea8fbb60eb786ce9019b3f63188342 | 704e99b29125d6449db32b9d52ede443318df620 | refs/heads/master | 2021-09-14T10:44:22.551896 | 2018-05-12T03:38:59 | 2018-05-12T03:38:59 | 113,130,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number >= 0:
last = number % 10
if last == 0:
print("Last digit of {} is {} and is 0".format(number, last))
elif last < 6:
print("last digit of {} is {} and is less than 6 and not 0".format(number, last))
elif last > 5:
print("Last digit of {} is {} and is greater than 5".format(number, last))
elif number < 0:
last = number % -10
print("Last digit of {} is {} and is less than 6 and not 0".format(number, last))
| [
"j.choi.89@gmail.com"
] | j.choi.89@gmail.com |
629f048bdb22a01508441f0c624ddca24a37e392 | bcca6c84d7fd2cbb782b38c68425b24a2dedeaee | /tests/chainsync/test_chainsync_adapter.py | 854d25620dc372d8124deb67d56a268041972a18 | [
"MIT"
] | permissive | dpays/chainsync | 8ba3f057e889f6b3b7d6405cbea1bf350493164a | 1277363787e37aa595571ab8a789831aadf0d3e6 | refs/heads/master | 2020-03-28T12:05:39.448076 | 2018-06-15T04:16:56 | 2018-06-15T04:16:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | from chainsync import ChainSync
from chainsync.adapters.steem import SteemAdapter
import unittest
class ChainSyncAdapterTestCase(unittest.TestCase):
def setUp(self):
self.chainsync = ChainSync(adapter=SteemAdapter)
def test_adapter_init_default_adapter(self):
self.assertNotEqual(self.chainsync.adapter, None)
def test_adapter_init_custom_adapter_custom_endpoint_no_endpoints(self):
adapter = SteemAdapter()
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8090')
def test_adapter_init_custom_adapter_custom_endpoint_string(self):
adapter = SteemAdapter(endpoints='http://localhost:8091')
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8091')
def test_adapter_init_custom_adapter_custom_endpoint_list(self):
endpoints = ['http://localhost:8091', 'http://localhost:8090']
adapter = SteemAdapter(endpoints=endpoints)
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.endpoint, 'http://localhost:8091')
self.assertEqual(custom.adapter.endpoints, endpoints)
def test_adapter_debug_flag_default_false(self):
self.assertEqual(self.chainsync.adapter.debug, False)
def test_adapter_debug_flag_set_true(self):
adapter = SteemAdapter(debug=True)
custom = ChainSync(adapter)
self.assertEqual(custom.adapter.debug, True)
def test_adapter_debug_flag_set_true_from_main(self):
adapter = SteemAdapter()
custom = ChainSync(adapter, debug=True)
self.assertEqual(custom.adapter.debug, True)
def test_adapter_debug_flag_set_true_from_main_false_for_adapter(self):
adapter = SteemAdapter(debug=False)
# main debug flag should override adapter
custom = ChainSync(adapter, debug=True)
self.assertEqual(custom.adapter.debug, True)
| [
"aaron.cox@greymass.com"
] | aaron.cox@greymass.com |
2e47504f5f032988f0852462405fce0dfe5432fc | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/reduce_min_run.py | e46d731933bfc91f2158e842557a774c331fd039 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reduce_min_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import reduce_min
from akg.utils.dsl_create import get_reduce_out_shape
from tests.common.gen_random import random_gaussian
from tests.common.base import get_rtol_atol
def reduce_min_run(shape, axis, keepdims, dtype, kernel_name="reduce_min", attrs=None):
"""run function for dsl function reduce_min."""
if attrs is None:
attrs = {}
op_attrs = [axis, keepdims]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(reduce_min.reduce_min, [shape], [dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, inputs, output = gen_data(axis, dtype, keepdims, shape)
return mod, expect, (inputs, output)
return mod
mod = utils.op_build_test(reduce_min.reduce_min, [shape], [dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
expect, inputs, output = gen_data(axis, dtype, keepdims, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
rtol, atol = get_rtol_atol("reduce_min", dtype)
return inputs, output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)
def gen_data(axis, dtype, keepdims, shape):
"""Generates input, output and expect data."""
inputs = random_gaussian(shape, miu=0, sigma=100.0).astype("float16").astype(dtype.lower())
expect = np.amin(inputs, axis=axis, keepdims=keepdims)
out_shape = get_reduce_out_shape(shape, axis=axis, keepdims=keepdims)
output = np.full(out_shape, np.nan, dtype)
return expect, inputs, output
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
71f3d25a4e999716de853943d0178735c0406b4c | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/toolkit/splines/spline.py | 69fdd56907113a61dddfbedbe4fe3afdc16bce09 | [
"BSD-3-Clause"
] | permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 42,195 | py | import itertools
import numpy as np
from sofia_redux.toolkit.splines.spline_utils import (
flat_index_mapping, build_observation,
find_knots, create_ordering, solve_observation,
add_knot, knot_fit, calculate_minimum_bandwidth, check_input_arrays,
determine_smoothing_spline, perform_fit)
__all__ = ['Spline']
class Spline(object):
def __init__(self, *args, weights=None, limits=None, degrees=3,
smoothing=None, knots=None, knot_estimate=None, eps=1e-8,
fix_knots=None, tolerance=1e-3, max_iteration=20, exact=False,
reduce_degrees=False, solve=True):
"""
Initialize a Spline object.
This is a Python implementation of the Fortran fitpack spline based
routines (Diercchx 1993) but is not limited to a maximum of
2-dimensions. Fast numerical processing is achieved using the `numba`
Python package (Lam et. al., 2015).
The actual spline fitting (representation) is performed during
initialization from user supplied data values and coordinates and other
parameters (see below). Spline evaluations at other coordinates may
then be retrieved using the __call__() method.
Spline coefficients and knots are derived iteratively, and will be
deemed acceptable once:
abs(sum(residuals^2) - smoothing) <= tolerance * smoothing
However, iterations may cease in a variety of scenarios. Exit
conditions should be examined prior to evaluating the spline and can
be retrieved from the `exit_code` attribute or `exit_message` property.
References
----------
Dierckx, P. Curve and Surface Fitting with Splines (Oxford Univ. Press,
1993).
Lam, S. K., Pitrou, A., & Seibert, S. (2015). Numba: A llvm-based
python jit compiler. In Proceedings of the Second Workshop on
the LLVM Compiler Infrastructure in HPC (pp. 1–6).
Parameters
----------
args : n-tuple (numpy.ndarray) or numpy.ndarray
The input arguments of the form (c1, ..., cn, d) or d where c
signifies data coordinates and d are the data values. If a single
data array is passed in, the coordinates are derived from the data
dimensions. For example if d is an array of shape (a, b, c), c1
will range from 0 -> a - 1 etc. If coordinates are specified, the
coordinates for each dimension and the data array should be
one-dimensional.
weights : numpy.ndarray, optional
Optional weights to supply to the spline fit for each data point.
Should be the same shape as the supplied data values.
limits : numpy.ndarray (float), optional
An array of shape (n_dimensions, 2) that may be supplied to set the
minimum and maximum coordinate values used during the spline fit.
For example, limits[1, 0] sets the minimum knot value in the second
dimensions and limits[1, 1] sets the maximum knot value in the
second dimension. By default this is set to the minimum and
maximum values of the coordinates in each dimension.
degrees : int or numpy.ndarray (int), optional
The degree of spline to fit in each dimension. Either a scalar can
be supplied pertaining to all dimensions, or an array of shape
(n_dimensions,) can be used.
smoothing : float, optional
Used to specify the smoothing factor. If set to `None`, the
smoothing will be determined based on user settings or input data.
If `exact` is `True`, smoothing will be disabled (zero). If
`exact` is `False`, smoothing will be set to n - sqrt(2 * n)
where n is the number of data values. If supplied, smoothing
must be greater than zero. See above for further details. Note
that if smoothing is zero, and the degrees are not equal over
each dimension, smoothing will be set to `eps` due to numerical
instabilities.
knots : list or tuple or numpy.ndarray, optional
A set of starting knot coordinates for each dimension. If a list
or tuple is supplied it should be of length n_dimensions where
element i is an array of shape (n_knots[i]) for dimension i.
If an array is supplied, it should be of shape
(n_dimension, max(n_knots)). Note that there must be at least
2 * (degree + 1) knots for each dimension. Unused or invalid
knots may be set to NaN, at the end of each array. Knot
coordinates must also be monotonically increasing in each
dimension.
knot_estimate : numpy.ndarray (int), optional
The maximum number of knots required for the spline fit in each
dimension and of shape (n_dimensions,). If not supplied, the knot
estimate will be set to
int((n / n_dimensions) ** n_dimensions^(-1)) or n_knots if
knots were supplied and fixed.
eps : float, optional
A value where 0 < eps < 1. This defines the magnitude used to
identify singular values in the spline observation matrix (A). If
any row of A[:, 0] < (eps * max(A[:,0])) it will be considered
singular.
fix_knots : bool, optional
If `True`, do not attempt to modify or add knots to the spline fit.
Only the initial supplied user knots will be used.
tolerance : float, optional
A value in the range 0 < tolerance < 1 used to determine the exit
criteria for the spline fit. See above for further details.
max_iteration : int, optional
The maximum number of iterations to perform when solving for the
spline fit.
exact : bool, optional
If `True`, the initial knots used will coincide with the actual
input coordinates and smoothing will be set to zero. No knots
should be supplied by the user in this instance.
reduce_degrees : bool, optional
Only relevant if `exact` is `True`. If set to `True`, the maximum
allowable degree in each dimension will be limited to
(len(unique(x)) // 2) - 1 where x are the coordinate values in any
dimension.
solve : bool, optional
If `True`, solve for the knots and spline coefficients. Otherwise,
leave for later processing.
"""
self.coordinates = None # float: (n_dimensions, n_data)
self.values = None # float: (n_data,)
self.weights = None # float: (n_data,)
self.limits = None # float: (n_dimensions, 2)
self.knots = None # list (n_dimensions) of float: (knot_size,)
self.degrees = None # int (n_dimensions,)
self.k1 = None # int (n_dimensions,)
self.knot_estimate = None # int (n_dimensions,)
self.knot_indices = None # int : (n_dimensions, n_data)
self.knot_coordinates = None # float : (n_dimensions, knot_estimate)
self.knot_weights = None # float : (n_dimensions, knot_estimate)
self.panel_mapping = None # int : (n_dimensions, n_panels)
self.panel_steps = None # int : (n_dimensions,)
self.knot_mapping = None # int : (n_dimensions, nk1)
self.knot_steps = None # int : (n_dimensions,)
self.panel_indices = None # int : (n_data,)
self.amat = None # float : (max_possible_knots, n_coefficients)
self.beta = None # float : (max_possible_knots,)
self.spline_steps = None # int : (n_dimensions,)
self.spline_mapping = None # int : (n_dimensions, bandwidth)
self.n_knots = None # int : (n_dimensions,)
self.start_indices = None # int : (n_data,)
self.next_indices = None # int : (n_data,)
self.splines = None # float : (n_dimensions, n_data, max(k1))
self.coefficients = None # float : (n_coefficients,)
self.n_dimensions = 0
self.dimension_permutations = None
self.dimension_order = None
self.permutation = None
self.change_order = False
self.smoothing = 0.0
self.accuracy = 0.0 # absolute tolerance
self.fix_knots = False
self.eps = 1e-8
self.tolerance = 1e-3
self.max_iteration = 20
self.exit_code = 20 # Return code
self.panel_shape = None # panel dimensions (solution space) (nxx)
self.nk1 = None # Last valid knot (n_dimensions,)
self.iteration = -1
self.smoothing_difference = np.nan
self.n_panels = 0 # nreg
self.n_intervals = 0 # nrint
self.sum_square_residual = 0.0
self.initial_sum_square_residual = np.nan
self.bandwidth = 0 # iband
self.n_coefficients = 0
self.rank = 0
self.fitted_knots = None
self.fit_coordinates = None
self.fit = None
self.grid_reduction = None
self.exact = False
self.parse_inputs(
*args, weights=weights, limits=limits, degrees=degrees,
smoothing=smoothing, knots=knots, knot_estimate=knot_estimate,
eps=eps, tolerance=tolerance, max_iteration=max_iteration,
fix_knots=fix_knots, exact=exact, reduce_degrees=reduce_degrees)
if solve:
self.iterate()
self.final_reorder()
if self.fitted_knots is None:
self.fit_knots()
@property
def exit_message(self):
"""
Returns an exit message for the spline fit. Error codes in the range
-2 -> 0 generally indicate a successful fit.
Returns
-------
message : str
"""
if self.exit_code == 0:
msg = (f"The spline has a residual sum of squares fp such that "
f"abs(fp-s)/s <= {self.tolerance}")
elif self.exit_code == -1:
msg = "The spline is an interpolating spline (fp=0)"
elif self.exit_code == -2:
msg = (f"The spline is a weighted least-squares polynomial of "
f"degree {self.degrees}. fp gives the upper bound for fp0 "
f"for the smoothing factor s = {self.smoothing}.")
elif self.exit_code == -3:
msg = ("Warning. The coefficients of the spline have been "
"computed as the minimal norm least-squares solution of "
"a rank deficient system.")
elif self.exit_code < 0:
if self.rank >= self.n_coefficients:
msg = f"Rank={self.rank} (full rank)"
else:
msg = (f"Rank={self.rank} (rank deficient "
f"{self.rank}/{self.n_coefficients})")
elif self.exit_code == 1:
msg = ("The required storage space exceeds the available storage "
"space. Probable causes: knot_estimate too small or s is "
"too small. (fp>s)")
elif self.exit_code == 2:
msg = (f"A theoretically impossible result when finding a "
f"smoothing spline with fp=s. Probable causes: s too "
f"small or badly chosen eps."
f"(abs(fp-s)/s>{self.tolerance})")
elif self.exit_code == 3:
msg = (f"The maximal number of iterations ({self.max_iteration}) "
f"allowed for finding smoothing spline with fp=s has been "
f"reached. Probable cause: s too small."
f"(abs(fp-s)/s>{self.tolerance})")
elif self.exit_code == 4:
msg = ("No more knots can be added because the number of B-spline"
"coefficients already exceeds the number of data points m."
"Probable causes: either s or m too small. (fp>s)")
elif self.exit_code == 5:
msg = ("No more knots can be added because the additional knot "
"would coincide with an old one. Probable cause: s too "
"small or too large a weight to an inaccurate data point. "
"(fp>s)")
elif self.exit_code == 20:
msg = "Knots are not initialized."
else:
msg = "An unknown error occurred."
return msg
@property
def size(self):
"""
Return the number of values used for the spline fit.
Returns
-------
n : int
"""
return self.values.size
@property
def knot_size(self):
"""
Return the number of knots in each dimension.
Returns
-------
n_knots : numpy.ndarray (int)
An array of shape (n_dimensions,).
"""
n_knots = np.zeros(self.n_dimensions, dtype=int) # nx, ny
for dimension in range(self.n_dimensions):
knot = self.knots[dimension]
for i in range(knot.size):
if np.isnan(knot[i]):
break
n_knots[dimension] += 1
return n_knots
def parse_inputs(self, *args, weights=None, limits=None, degrees=3,
smoothing=None, knots=None, fix_knots=None,
knot_estimate=None, exact=False, reduce_degrees=False,
eps=1e-8, tolerance=1e-3, max_iteration=20):
"""
Parse and apply user inputs to the spline fit.
Parameters
----------
args : n-tuple (numpy.ndarray) or numpy.ndarray
The input arguments of the form (c1, ..., cn, d) or d where c
signifies data coordinates and d are the data values. If a single
data array is passed in, the coordinates are derived from the data
dimensions. For example if d is an array of shape (a, b, c), c1
will range from 0 -> a - 1 etc. If coordinates are specified, the
coordinates for each dimension and the data array should be
one-dimensional.
weights : numpy.ndarray, optional
Optional weights to supply to the spline fit for each data point.
Should be the same shape as the supplied data values.
limits : numpy.ndarray (float), optional
An array of shape (n_dimensions, 2) that may be supplied to set the
minimum and maximum coordinate values used during the spline fit.
For example, limits[1, 0] sets the minimum knot value in the second
dimensions and limits[1, 1] sets the maximum knot value in the
second dimension. By default this is set to the minimum and
maximum values of the coordinates in each dimension.
degrees : int or numpy.ndarray (int), optional
The degree of spline to fit in each dimension. Either a scalar can
be supplied pertaining to all dimensions, or an array of shape
(n_dimensions,) can be used.
smoothing : float, optional
Used to specify the smoothing factor. If set to `None`, the
smoothing will be determined based on user settings or input data.
If `exact` is `True`, smoothing will be disabled (zero). If
`exact` is `False`, smoothing will be set to n - sqrt(2 * n)
where n is the number of data values. If supplied, smoothing
must be greater than zero. See __init__() for further details.
Note that if smoothing is zero, and the degrees are not equal
over each dimension, smoothing will be set to `eps` due to
numerical instabilities.
knots : list or tuple or numpy.ndarray, optional
A set of starting knot coordinates for each dimension. If a list
or tuple is supplied it should be of length n_dimensions where
element i is an array of shape (n_knots[i]) for dimension i. If
an array is supplied, it should be of shape
(n_dimension, max(n_knots)). Note that there must be at least
2 * (degree + 1) knots for each dimension. Unused or invalid
knots may be set to NaN, at the end of each array. Knot
coordinates must also be monotonically increasing in each
dimension.
fix_knots : bool, optional
If `True`, do not attempt to modify or add knots to the spline fit.
Only the initial supplied user knots will be used.
knot_estimate : numpy.ndarray (int), optional
The maximum number of knots required for the spline fit in each
dimension and of shape (n_dimensions,). If not supplied, the knot
estimate will be set to
int((n / n_dimensions) ** n_dimensions^(-1)) or n_knots
if knots were supplied and fixed.
exact : bool, optional
If `True`, the initial knots used will coincide with the actual
input coordinates and smoothing will be set to zero. No knots
should be supplied by the user in this instance.
reduce_degrees : bool, optional
Only relevant if `exact` is `True`. If set to `True`, the maximum
allowable degree in each dimension will be limited to
(len(unique(x)) // 2) - 1 where x are the coordinate values in any
dimension.
eps : float, optional
A value where 0 < eps < 1. This defines the magnitude used to
identify singular values in the spline observation matrix (A). If
any row of A[:, 0] < (eps * max(A[:,0])) it will be considered
singular.
tolerance : float, optional
A value in the range 0 < tolerance < 1 used to determine the exit
criteria for the spline fit. See __init__() further details.
max_iteration : int, optional
The maximum number of iterations to perform when solving for the
spline fit.
Returns
-------
None
"""
if len(args) == 1:
# Assume a regularly spaced grid of data values
indices = np.indices(np.asarray(args[0]).shape)[::-1]
self.coordinates = np.stack(
[np.asarray(x, dtype=float).ravel() for x in indices])
else:
self.coordinates = np.stack(
[np.asarray(x, dtype=float).ravel() for x in args[:-1]])
self.n_dimensions = self.coordinates.shape[0]
self.values = np.asarray(args[-1], dtype=float).ravel()
if weights is None:
self.weights = np.ones(self.size, dtype=float)
else:
self.weights = np.asarray(weights, dtype=float).ravel()
self.degrees = np.atleast_1d(np.asarray(degrees, dtype=int))
if self.degrees.size != self.n_dimensions:
self.degrees = np.full(self.n_dimensions, self.degrees[0])
self.k1 = self.degrees + 1
if exact:
if knots is not None:
raise ValueError(
"Cannot use the 'exact' option if knots are supplied")
knots = []
for dimension in range(self.n_dimensions):
knots.append(np.unique(self.coordinates[dimension]))
if smoothing is None:
smoothing = 0.0
self.exact = True
if reduce_degrees:
n_knots = np.asarray([knot_line.size for knot_line in knots])
max_degrees = (n_knots // 2) - 1
self.degrees = np.clip(self.degrees, None, max_degrees)
self.k1 = self.degrees + 1
else:
self.exact = False
self.dimension_permutations = np.asarray(
list(itertools.permutations(np.arange(self.n_dimensions))))
self.check_array_inputs()
if not self.exact and reduce_degrees:
nx = np.asarray([np.unique(x).size for x in self.coordinates])
max_degrees = (nx // 2) - 1
self.degrees = np.clip(self.degrees, None, max_degrees)
self.k1 = self.degrees + 1
if not (0 < eps < 1):
raise ValueError(f"eps not in range (0 < eps < 1): {eps}")
self.eps = float(eps)
if not (0 < tolerance < 1):
raise ValueError(
f"tolerance not in range (0 < tolerance < 1): {tolerance}")
self.tolerance = float(tolerance)
if self.size < np.prod(self.k1):
raise ValueError("Data size >= product(degrees + 1) "
"not satisfied.")
if smoothing is None:
self.smoothing = self.size - np.sqrt(2 * self.size)
else:
if smoothing < 0:
raise ValueError(f"smoothing must be >= 0: {smoothing}")
self.smoothing = float(smoothing)
if fix_knots is None:
fix_knots = knots is not None
self.fix_knots = bool(fix_knots)
self.knots = []
if knots is None and self.fix_knots:
raise ValueError('Knots must be supplied if fixed.')
if limits is None:
if not self.fix_knots:
self.limits = np.stack(
[np.array([x.min(), x.max()], dtype=float)
for x in self.coordinates])
else:
self.limits = np.stack(
[np.array([min(k), max(k)], dtype=float)
for k in knots])
else:
self.limits = np.atleast_2d(limits).astype(float)
if self.limits.shape != (self.n_dimensions, 2):
raise ValueError(
f"limits must be of shape ({self.n_dimensions}, 2).")
if knots is None:
for dimension in range(self.n_dimensions):
self.knots.append(np.pad(self.limits[dimension],
self.degrees[dimension],
mode='edge'))
else:
for dimension in range(self.n_dimensions):
knot_line = np.unique(np.asarray(knots[dimension],
dtype=float))
self.knots.append(knot_line)
self.n_knots = self.knot_size
if self.fix_knots:
for dimension in range(self.n_dimensions):
if self.n_knots[dimension] < (2 * self.k1[dimension]):
raise ValueError(
f"There must be at least 2 * (degree + 1) knots in "
f"dimension {dimension} for fixed knots. "
f"knots={self.n_knots[dimension]}, "
f"degree={self.degrees[dimension]}.")
self.knot_estimate = self.n_knots.copy()
elif knot_estimate is not None:
self.knot_estimate = np.asarray(
np.atleast_1d(knot_estimate), dtype=int)
if self.knot_estimate.size == 1 < self.n_dimensions:
self.knot_estimate = np.full(
self.n_dimensions, self.knot_estimate[0])
elif self.knot_estimate.size != self.n_dimensions:
raise ValueError(f"Knot estimate must be a scalar or have "
f"size {self.n_dimensions}")
elif self.smoothing == 0:
add = (3 * self.size) ** (1 / self.n_dimensions)
self.knot_estimate = (self.degrees + add).astype(int)
else:
add = (self.size / self.n_dimensions) ** (1 / self.n_dimensions)
self.knot_estimate = (self.degrees + add).astype(int)
self.knot_estimate = np.clip(self.knot_estimate,
(2 * self.degrees) + 3, None)
# Expand knot arrays if necessary
max_k1 = np.max(self.k1)
max_estimate = np.max(self.knot_estimate)
max_knot = np.max(self.knot_estimate + self.k1)
new_knots = np.full((self.n_dimensions, max_estimate), np.nan)
for dimension in range(self.n_dimensions):
knot_line = self.knots[dimension]
new_knots[dimension, :knot_line.size] = knot_line.copy()
self.knots = new_knots
self.sum_square_residual = 0.0
self.max_iteration = int(max_iteration)
self.exit_code = -2
# Initialize some work arrays
self.knot_coordinates = np.zeros((2, max_knot))
self.knot_weights = np.zeros((2, max_knot))
self.splines = np.zeros((self.n_dimensions, self.size, max_k1),
dtype=float)
self.accuracy = self.tolerance * self.smoothing
self.dimension_order = np.arange(self.n_dimensions)
def check_array_inputs(self):
"""
Remove zero weights and invalid data points.
Invalid data points are those that contain NaN values, weights, or
coordinates, or zero weights.
Returns
-------
None
"""
valid = check_input_arrays(self.values, self.coordinates, self.weights)
if valid.all():
return
self.values = self.values[valid]
self.weights = self.weights[valid]
self.coordinates = self.coordinates[:, valid]
def initialize_iteration(self):
"""
Initialize the iteration for the number of current panels.
Creates array maps that represents N-dimensional data flattened to
a single dimension for fast access and the ability to pass these
structures to numba JIT compiled functions.
Returns
-------
None
"""
# The number of panels in which the approximation domain is divided.
self.panel_shape = self.n_knots - (2 * self.k1) + 1
self.n_panels = int(np.prod(self.panel_shape))
self.n_intervals = int(np.sum(self.panel_shape))
self.nk1 = self.n_knots - self.k1 # last valid knot index
self.n_coefficients = int(np.prod(self.nk1))
# Find the bandwidth of the observation matrix (amat)
# # Never change
# self.dimension_permutations = self.dimension_permutations[0][None]
self.bandwidth, self.permutation, self.change_order = (
calculate_minimum_bandwidth(
self.degrees, self.n_knots, self.dimension_permutations))
if self.change_order:
# Reordering dimensions creates the mapping indices.
self.reorder_dimensions(self.permutation)
else:
self.create_mapping_indices()
def create_mapping_indices(self):
"""
Mapping indices allow 1-D representation of N-D data.
Returns
-------
None
"""
# index mapping
self.panel_mapping, _, self.panel_steps = flat_index_mapping(
self.panel_shape)
self.knot_mapping, _, self.knot_steps = flat_index_mapping(self.nk1)
self.spline_mapping, _, self.spline_steps = flat_index_mapping(self.k1)
# find the panel and knot indices for the data values.
self.order_points()
def reorder_dimensions(self, order):
"""
Re-order the dimensions in various attributes.
Occasionally it is beneficial to re-order the dimensions of the
data structures such that a minimal bandwidth for the observation
matrix is achievable. This reduces the amount of processing time
required to reach a solution.
Parameters
----------
order : numpy.ndarray (int)
An array of shape (n_dimensions,) indicating the new order of the
dimensions. E.g., to re-order dimensions [1, 2, 3] to [3, 1, 2],
order should be [1, 2, 0].
Returns
-------
None
"""
if self.n_dimensions < 2:
return
if np.allclose(order, np.arange(self.n_dimensions)):
return
old_mapping, _, old_steps = flat_index_mapping(self.nk1)
new_mapping, _, new_steps = flat_index_mapping(self.nk1[order])
reverse_order = np.argsort(order)
c_order = np.sum(new_mapping[reverse_order] * old_steps[:, None],
axis=0)
# The easy to reorder attributes...
for attribute in ['coordinates', 'limits', 'degrees', 'k1', 'nk1',
'knots', 'n_knots', 'knot_estimate',
'knot_coordinates', 'knot_weights', 'panel_shape',
'splines']:
value = getattr(self, attribute)
if not isinstance(value, np.ndarray):
continue
if value.shape[0] != self.n_dimensions:
continue
setattr(self, attribute, value[order])
self.n_knots = self.knot_size
# The attributes that are dependent on a specific coefficient order...
for attribute in ['coefficients', 'amat', 'beta']:
value = getattr(self, attribute)
if not isinstance(value, np.ndarray):
continue
diff = self.n_coefficients - value.shape[0]
if diff > 0:
padding = [(0, 0)] * value.ndim
padding[0] = (0, diff)
value = np.pad(value, padding, mode='constant')
setattr(self, attribute, value[c_order])
self.dimension_order = self.dimension_order[order].copy()
self.create_mapping_indices()
def final_reorder(self):
"""
Re-order the dimensions of various arrays to match those of the inputs.
The dimensions of the various data structures may have changed during
the course of processing to reduce the bandwidth of the observation
matrix. This step correctly reorders all dimensions such that they
match those initially provided by the user.
Returns
-------
None
"""
self.reorder_dimensions(np.argsort(self.dimension_order))
def order_points(self):
"""
Sort the data points according to which panel they belong to.
This is based on the fporde Fortran fitpack routine.
Returns
-------
None
"""
self.knot_indices = find_knots(
coordinates=self.coordinates,
knots=self.knots,
valid_knot_start=self.degrees,
valid_knot_end=self.nk1)
self.panel_indices = self.knot_indices_to_panel_indices(
self.knot_indices)
start_indices, next_indices = create_ordering(
self.panel_indices, self.size)
self.n_panels = np.nonzero(start_indices != -1)[0][-1] + 1
self.start_indices = start_indices
self.next_indices = next_indices
def knot_indices_to_panel_indices(self, knot_indices):
"""
Convert knot indices to flat panel indices.
Parameters
----------
knot_indices : numpy.ndarray (int)
An array of shape (n_dimensions, n_knots).
Returns
-------
panel_indices : numpy.ndarray (int)
The flat 1-D panel indices for the knots.
"""
panel_indices = knot_indices - self.degrees[:, None]
panel_indices *= self.panel_steps[:, None]
return np.sum(panel_indices, axis=0)
def panel_indices_to_knot_indices(self, panel_indices):
"""
Convert panel indices to dimensional knot indices.
Parameters
----------
panel_indices : numpy.ndarray (int)
An array of shape (n_knots,).
Returns
-------
panel_indices : numpy.ndarray (int)
knot.
"""
return self.panel_mapping[:, panel_indices] + self.degrees[:, None]
def iterate(self):
"""
Iteratively determine the spline fit.
Calculates the splines and coefficients for the provided data. If
this cannot be accomplished before reaching the maximum number of
iterations, a smoothing spline will be calculated instead.
Returns
-------
None
"""
self.iteration = -1
for iteration in range(1, self.size + 1):
self.iteration = iteration
if not self.next_iteration():
break
else: # pragma: no cover
# This should never happen - but just in case...
self.determine_smoothing_spline()
return
if self.fix_knots:
return
if abs(self.smoothing_difference) <= self.accuracy:
return
if self.smoothing_difference < 0:
self.determine_smoothing_spline()
def next_iteration(self):
"""
Perform a single iteration of the spline fit.
During each iteration, the observation matrix is built and solved. An
exit code will be generated in cases where no further modifications to
the solution are appropriate. Additional knots will also be added if
required and possible.
Returns
-------
continue_iterations : bool
If `False` no further iterations should occur due to an acceptable
solution being reached or due to a given limitation. If `True`,
subsequent iterations are deemed appropriate.
"""
self.initialize_iteration()
amat, beta, splines, ssr = build_observation(
coordinates=self.coordinates,
values=self.values,
weights=self.weights,
n_coefficients=self.n_coefficients,
bandwidth=self.bandwidth,
degrees=self.degrees,
knots=self.knots,
knot_steps=self.knot_steps,
start_indices=self.start_indices,
next_indices=self.next_indices,
panel_mapping=self.panel_mapping,
spline_mapping=self.spline_mapping)
self.amat = amat
self.beta = beta
self.splines = splines
self.sum_square_residual = ssr
self.coefficients, self.rank, ssr_solve = solve_observation(
amat=self.amat, beta=self.beta, n_coefficients=self.n_coefficients,
bandwidth=self.bandwidth, eps=self.eps)
self.sum_square_residual += ssr_solve
if self.exit_code == -2:
self.initial_sum_square_residual = self.sum_square_residual
if self.fix_knots: # do not find knots
self.exit_code = -self.rank
return False
# Test whether the lsq spline is acceptable
self.smoothing_difference = (
self.sum_square_residual - self.smoothing)
if abs(self.smoothing_difference) <= self.accuracy:
if self.sum_square_residual <= 0:
self.exit_code = -1
self.sum_square_residual = 0.0
if self.n_coefficients != self.rank:
self.exit_code = -self.rank
return False
# Test whether we can accept the choice of knots
if self.smoothing_difference < 0:
return False # Do smoothing
if self.n_coefficients > self.size:
self.exit_code = 4
return False
# Add a new knot
self.exit_code = 0
self.fit_knots()
self.exit_code = add_knot(
knot_weights=self.knot_weights,
knot_coords=self.knot_coordinates,
panel_shape=self.panel_shape,
knots=self.knots,
n_knots=self.n_knots,
knot_estimate=self.knot_estimate,
k1=self.k1)
return self.exit_code == 0 # False if an error was encountered
def fit_knots(self):
"""
Derive fits at the current knot locations.
In addition to finding the value of the function at each knot,
the knot weights and weight normalized coordinates are also determined.
These a subsequently used to decide where a new knot should be placed.
Returns
-------
None
"""
fitted_knots, knot_weights, knot_coordinates = knot_fit(
splines=self.splines,
coefficients=self.coefficients,
start_indices=self.start_indices,
next_indices=self.next_indices,
panel_mapping=self.panel_mapping,
spline_mapping=self.spline_mapping,
knot_steps=self.knot_steps,
panel_shape=self.panel_shape,
k1=self.k1,
weights=self.weights,
values=self.values,
coordinates=self.coordinates)
self.fitted_knots = fitted_knots
self.knot_weights = knot_weights
self.knot_coordinates = knot_coordinates
def determine_smoothing_spline(self, smoothing=None):
"""
Smooth the interpolating spline to the required level.
Parameters
----------
smoothing : float, optional
Used to specify the an alternate smoothing factor. Note that this
should be very close to the original smoothing factor in order to
succeed.
Returns
-------
None
"""
if smoothing is not None and smoothing != self.smoothing:
change_smoothing = True
else:
change_smoothing = False
if self.exit_code == -2 and not change_smoothing:
return
if change_smoothing:
if smoothing < 0:
raise ValueError(f"smoothing must be >= 0: {smoothing}")
# Calculate this before setting new smoothing...
self.smoothing = float(smoothing)
self.smoothing_difference = (
self.sum_square_residual - self.smoothing)
self.accuracy = self.tolerance * self.smoothing
coefficients, sq, exit_code, fitted_knots = determine_smoothing_spline(
knots=self.knots,
n_knots=self.n_knots,
knot_estimate=self.knot_estimate,
degrees=self.degrees,
initial_sum_square_residual=self.initial_sum_square_residual,
smoothing=self.smoothing,
smoothing_difference=self.smoothing_difference,
n_coefficients=self.n_coefficients,
bandwidth=self.bandwidth,
amat=self.amat,
beta=self.beta,
max_iteration=self.max_iteration,
knot_steps=self.knot_steps,
knot_mapping=self.knot_mapping,
eps=self.eps,
splines=self.splines,
start_indices=self.start_indices,
next_indices=self.next_indices,
panel_mapping=self.panel_mapping,
spline_mapping=self.spline_mapping,
coordinates=self.coordinates,
values=self.values,
weights=self.weights,
panel_shape=self.panel_shape,
accuracy=self.accuracy)
self.coefficients = coefficients
self.sum_square_residual = sq
self.exit_code = exit_code
self.fitted_knots = fitted_knots
def __call__(self, *args):
"""
Evaluate the spline at given coordinates.
Parameters
----------
args : tuple (numpy.ndarray) or numpy.ndarray (float)
The coordinate arguments. If supplied as a tuple, should be of
length n_dimensions where each element of the tuple defines grid
coordinates along the (x, y, z,...) dimensions. Arbitrary
coordinates may be supplied as an array of shape (n_dimensions, n)
where n is the number of coordinates. A singular coordinate may
also be supplied as an array of shape (n_dimensions,).
Returns
-------
fit : float or numpy.ndarray (float)
An (x[n], x[n-1], ..., x[0]) shaped array of values if args was
provided in tuple form, or an array of shape (n,) if a
2-dimensional array of arbitrary coordinates were provided.
If a single coordinate was provided, the resulting output will
be a float.
"""
if len(args) == 1:
# In cases where an array of coordinates are supplied.
fit_coordinates = np.asarray(np.atleast_2d(args[0]), dtype=float)
if fit_coordinates.shape[0] != self.n_dimensions:
if (fit_coordinates.shape[0] == 1
and fit_coordinates.shape[1] == self.n_dimensions):
singular = True
fit_coordinates = fit_coordinates.T
else:
raise ValueError("Coordinate shape[0] does not match "
"number of spline dimensions.")
else:
singular = False
self.grid_reduction = False
self.fit_coordinates = fit_coordinates
out_shape = fit_coordinates.shape
elif len(args) != self.n_dimensions:
raise ValueError("Number of arguments does not match number of "
"spline dimensions.")
else:
# In cases where grid coordinates are provided
self.grid_reduction = True
self.fit_coordinates = np.vstack(
[np.asarray(x, dtype=float).ravel() for x in
np.meshgrid(*args[::-1], indexing='ij')[::-1]])
out_shape = tuple([len(x) if hasattr(x, '__len__') else 1
for x in args[::-1]])
singular = np.allclose(out_shape, 1)
self.fit = perform_fit(
coordinates=self.fit_coordinates,
knots=self.knots,
coefficients=self.coefficients,
degrees=self.degrees,
panel_mapping=self.panel_mapping,
panel_steps=self.panel_steps,
knot_steps=self.knot_steps,
nk1=self.nk1,
spline_mapping=self.spline_mapping,
n_knots=self.n_knots) # This is ok
if self.grid_reduction:
self.fit = self.fit.reshape(out_shape)
if singular:
self.fit = self.fit.ravel()[0]
return self.fit.copy()
| [
"mclarke@sofia.usra.edu"
] | mclarke@sofia.usra.edu |
1aa9df92148ee3c66a03c148fee3e222dc4f2641 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_240/ch152_2020_04_13_19_48_06_345499.py | e688570e1002fdff7ff6b90dde697c99074708c1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | def verifica_preco(l, d, t):
return t[d[l]] | [
"you@example.com"
] | you@example.com |
275470b552dbd57a9f1a0a80b53b397c66d110f4 | 88bd9a59edb18ecc51b3c0939cd96fb5baebf18d | /tests/test_linkedlist.py | 777e69c1866af79b65e9f2b806460f6737d2df3c | [] | no_license | LennyBoyatzis/compsci | 244d540485155d5846a93a84399d18097925468e | 9c1ca2288090ad7c1c7151202bb28990760b1997 | refs/heads/master | 2022-07-20T22:25:42.549428 | 2019-09-05T05:26:18 | 2019-09-05T05:26:18 | 91,846,937 | 0 | 0 | null | 2022-06-21T21:45:37 | 2017-05-19T21:31:42 | Python | UTF-8 | Python | false | false | 305 | py | from datastructures.linkedlist import SingleLinkedList
def test_single_linked_list_init():
linked_list = SingleLinkedList()
assert linked_list.head is None
def test_push_front_method():
linked_list = SingleLinkedList()
linked_list.push_front(7)
assert linked_list.head is not None
| [
"lennyboyatzis@gmail.com"
] | lennyboyatzis@gmail.com |
8beaeecef14307357208d23d9892986cb71a2f27 | 93ceca6312bbbee196d57df3a6634bd66800bd26 | /hgsc_vcf/io.py | bc241057fc45947ee6620d435b769723830ab730 | [] | no_license | OpenGenomics/muse-tool | a9c058bddce466a6af64447a7fbdfc68a593f7c0 | cd59173c8e69c2a4f073ac2f1e07402835400b85 | refs/heads/master | 2021-01-24T05:25:28.170881 | 2019-09-23T07:50:54 | 2019-09-23T07:50:54 | 59,329,634 | 1 | 5 | null | 2018-02-10T17:53:16 | 2016-05-20T22:17:03 | Python | UTF-8 | Python | false | false | 4,494 | py |
import csv
from hgsc_vcf.metainfo import *
from collections import *
class Reader(object):
def __init__(self, fobj):
self.fobj = fobj
self.header = VCFHeader()
self.header.load(self.fobj)
self._next = None
@staticmethod
def parse_info_field(info):
infos = info.split(';')
result = OrderedDict()
for i in infos:
if '=' in i:
k, v = i.split('=',1)
result[k] = v.split(',')
else:
result[i] = True # True indicates that the flag is active
return result
def peek(self):
return self._next
def take(self):
old = self._next
try:
self._next = self.next()
except StopIteration:
pass # swallow the error
return old
@staticmethod
def parse_sample(format_keys, slist):
return OrderedDict(zip(format_keys, [i.split(',') for i in slist]))
def __iter__(self):
return self
def next(self):
line = [c.strip() for c in self.fobj.readline().split('\t')]
if len(line) < 1 or line[0] == '':
self._next = None
raise StopIteration
try:
record = OrderedDict()
for k, v in (
('CHROM', line[0]),
('POS', int(line[1])),
('ID', line[2].split(';')),
('REF', line[3]),
('ALT', line[4].split(',')),
('QUAL', float(line[5]) if line[5] != '.' else '.'),
('FILTER', line[6].split(';')),
('INFO', Reader.parse_info_field(line[7]))
):
record[k] = v
if len(line) > 8:
record['FORMAT'] = line[8].split(':')
if len(line) > 9:
record['SAMPLES'] = OrderedDict(zip(
self.header.samples,
[Reader.parse_sample(record['FORMAT'], s.split(':')) for s in line[9:]]
))
self._next = record
return record
except:
print line
raise
class Writer(object):
def __init__(self, fobj, header):
assert isinstance(header, VCFHeader), "header must be a VCFHeader"
self.header = header
self.header_written = False
self.fobj = fobj
def write_header(self):
if self.header_written:
raise ValueError("Can't write the header twice")
for h in self.header.headers:
self.fobj.write(str(h) + '\n')
header_cols = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']
if len(self.header.samples) > 0:
header_cols.append('FORMAT')
for s in self.header.samples:
header_cols.append(s)
self.fobj.write('#' + '\t'.join(header_cols) + '\n')
self.header_written = True
def write_record(self, record):
if not self.header_written:
raise ValueError("Must write the header first")
field_parts = []
for k, joiner in (('CHROM', None), ('POS', None), ('ID', ';'), ('REF', None), ('ALT', ','), ('QUAL', None), ('FILTER', ';')):
if joiner:
try:
field_parts.append(joiner.join(record[k]))
except:
print k, joiner, record[k]
raise
else:
field_parts.append(str(record[k]))
# info is a bit trickier
info_parts = []
for k, v in record['INFO'].items():
if k == '.' and len(record['INFO']) > 1:
continue # this is a leftover empty marker
if isinstance(v, list):
info_parts.append('%s=%s' % (k, ','.join(v)))
else:
info_parts.append(k)
field_parts.append(';'.join(info_parts))
if len(self.header.samples) > 0:
field_parts.append(':'.join(record['FORMAT']))
for s in self.header.samples:
sinfo = record['SAMPLES'][s]
# sinfo is a dict (OrderedDict ideally)
try:
field_parts.append(':'.join([','.join(sinfo[k]) for k in record['FORMAT']]))
except:
print sinfo
raise
self.fobj.write('\t'.join(field_parts) + '\n')
| [
"kellrott@gmail.com"
] | kellrott@gmail.com |
dbdbac578d96de37dc813bbbeee84b1773aabd04 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_30/ar_/test_artificial_128_Anscombe_Lag1Trend_30__100.py | 5582c252a3315ff23753b7b0b1494b978e138ecb | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 270 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
22857c0d3ef535f2ee9b813146680f4fb73bde70 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/11/usersdata/64/5178/submittedfiles/jogo.py | 5dd413f8c66a659270296c78990142bc4fae2baa | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
Cv = input('Digite o número de vitótias do Cormengo: ')
Ce = input('Digite o número de empates do Cormengo: ')
Cs = input('Digite o saldo de gols: ')
Fv = input('Digite o número de vitótias do Flaminthians: ')
Fe = input('Digite o número de empates do Flaminthians: ')
Fs = input('Digite o saldo de gols: ')
if Cv > Fv:
print "'C'"
elif Cv < Fv:
print "'F'"
elif Cv == Fv:
if Cs > Fs:
print "'C'"
elif Cs < Fs:
print "'F'"
elif Cs == Fs:
if Ce > Fe:
print "'C'"
elif Ce < Fe:
print "'V'"
else:
print "'='"
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8a552617657674743ae61f13d460d8618ba37d1e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_frenzy.py | 5638f60ebd7f097df0883fb40ad7ee40d162e391 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py |
#calss header
class _FRENZY():
def __init__(self,):
self.name = "FRENZY"
self.definitions = [u'(an example of) uncontrolled and excited behaviour or emotion that is sometimes violent: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
173535ac75a264211a5a21d5ad8aa98a968678e0 | e2a9c92dc7a4b2e73f2bcc1a2d507b591b75f814 | /Many-Tricks-for-ImageCalssification/model/__init__.py | abb0584a4cdb1e1724c348121213c2b82920356c | [] | no_license | ForrestPi/Tricks | f5f74f47e6e6ec24a5a544837eba35c5ce8e773e | d874318d4662c0192bcd147e1aad1e92cd3ea8b6 | refs/heads/master | 2022-12-08T18:06:24.390238 | 2020-10-20T04:01:04 | 2020-10-20T04:01:04 | 220,658,244 | 4 | 2 | null | 2022-12-08T03:44:45 | 2019-11-09T14:59:23 | Jupyter Notebook | UTF-8 | Python | false | false | 580 | py | # -*- encoding: utf-8 -*-
'''
@File : __init__.py.py
@Contact : whut.hexin@foxmail.com
@License : (C)Copyright 2017-2018, HeXin
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/21 10:18 xin 1.0 None
'''
from .baseline import BaseLine
def build_model(cfg):
if cfg.MODEL.NAME == "baseline":
model = BaseLine(cfg.MODEL.N_CHANNEL, cfg.MODEL.N_CLASS, cfg.MODEL.BACKBONE, cfg.MODEL.DROPOUT, cfg.MODEL.USE_NONLOCAL, cfg.MODEL.USE_SCSE, cfg.MODEL.USE_ATTENTION)
return model
| [
"forrest_zhu@foxmail.com"
] | forrest_zhu@foxmail.com |
68551acd8cab934463307fbf762962f323034d23 | b4e072d0759775836155ae97e06f6d1f0fce7500 | /dasss/__init__.py | 7722037351c57feb8d85a4b22e638d7da7153ed5 | [] | no_license | David-Hakobyan1/_different | 99abdfd62db8224dcc1a944537f43e3d83d34921 | 7c57f626cac7acfb2666d28f1a623ed60f8020d4 | refs/heads/master | 2023-06-18T00:27:18.745467 | 2021-07-19T09:01:52 | 2021-07-19T09:01:52 | 387,401,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import os
from flask import Flask
from flask_sqalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
login_manager = LoginManager()
app = Flask(__name__)
app.config['SECRET_KEY']='fhuiefh236374623ewhfwwhfeu'
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path(basedir,'data.sqlite')
db = SQLAlchemy(app)
Migrate(app,db)
login_manager.init_app(app)
login_manager.login_view = 'login'
| [
"my@mail.ru"
] | my@mail.ru |
ab583ceecd8854228670358e21d0f10d0cbf031d | 114b61513733083555924fc8ab347335e10471ae | /stackone/stackone/model/LDAPGroupManager.py | 0a863ce54ec5c114a96c1a39e5db5fec2260546d | [] | no_license | smarkm/ovm | 6e3bea19816affdf919cbd0aa81688e6c56e7565 | cd30ad5926f933e6723805d380e57c638ee46bac | refs/heads/master | 2021-01-21T04:04:28.637901 | 2015-08-31T03:05:03 | 2015-08-31T03:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,081 | py | from stackone.core.utils.utils import to_unicode, to_str, get_ldap_module
import tg
import logging
LOGGER = logging.getLogger('stackone.model')
from pprint import pprint
LDAP_Groups_Dict = {}
class LDAPBaseGroup():
def __init__(self):
self.ldap = get_ldap_module()
self.SCOPE_BASE = self.ldap.SCOPE_BASE
self.user_key = tg.config.get('user_key', 'uid')
self.group_key = tg.config.get('group_key', 'groupMembership')
self.email_key = tg.config.get('email_key', 'email')
def get_all_user(self, ldapcon, group_base_dn):
return self._get_all_user(ldapcon, group_base_dn)
def get_user_by_dn(self, ldapcon, user_dn, scope=None):
try:
if not scope:
scope = self.SCOPE_BASE
results = ldapcon.search_s(user_dn,scope)
return results
except Exception as e:
pass
def get_user_from_user_dn(self, dn):
dn_dict = dict([item.split('=') for item in dn.split(',')])
if dn_dict.has_key(self.user_key):
if not dn_dict.get(self.user_key):
LOGGER.error('%s is None in user DN: %s' % (self.user_key, dn))
return dn_dict.get(self.user_key)
LOGGER.error('Can not find %s in user DN: %s' % (self.user_key, dn))
class LDAPGroupOfNames(LDAPBaseGroup):
MEMBER_ATTR = 'member'
def get_user_groups(self, user_details, group_key):
group_details = self._get_user_groups(user_details, group_key)
group_names = self.parse_group(group_details)
return group_names
def parse_group(self, group_details):
group_names = self._parse_group(group_details)
return group_names
def _get_user_groups(self, user_details, group_key):
try:
group_details = user_details.get(group_key)
return group_details
except Exception as e:
print e
def _parse_group(self, group_details):
l = []
try:
for gp_name_str in group_details:
for item in gp_name_str.split(','):
splt = item.split('=')
if len(splt) == 1:
l.append(splt[0])
if splt[0] == self.user_key:
l.append(splt[1])
except Exception as ex:
raise ex
return l
def _get_all_user(self, ldapcon, group_base_dn, scope=None):
try:
if not scope:
scope = self.SCOPE_BASE
result = []
result_list = []
result_data_dict = {}
try:
result = ldapcon.search_s(group_base_dn, scope)
except Exception as e:
import traceback
if not len(result):
LOGGER.info('Could not find group: %s' % group_base_dn)
else:
g_dn,result_data_dict = result[0]
users = result_data_dict.get(self.MEMBER_ATTR)
LOGGER.info('Members of Group:%s from LDAP: ===== %s \n' % (group_base_dn, users))
for user in users:
user_info = self.get_user_by_dn(ldapcon, user)
LOGGER.info('Info of User:%s ==== %s' % (user, user_info))
if not user_info:
LOGGER.info('Could not find user: %s' % user)
u_dn,usr_info = user_info[0]
LOGGER.info('DN and Info of User:%s ==== DN:%s ==== Info:%s' % (user, u_dn, usr_info))
res_dict = {self.user_key: self.get_user_from_dn(u_dn),self.group_key:usr_info.get(self.group_key),self.email_key:usr_info.get(self.email_key)}
result_list.append(res_dict)
LOGGER.info('Members of Group:%s after Parsing: ===== %s \n' % (group_base_dn, result_list))
return result_list
except Exception as e:
import traceback
traceback.print_exc()
LOGGER.error(e)
raise e
LDAP_Groups_Dict['groupOfNames'] = LDAPGroupOfNames
| [
"18614072558@163.com"
] | 18614072558@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.