blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43866c23e7957b764f0b579688d0275579b2fd44 | ef2e2a40c9e03173ee936f6672a90a794db5b2a0 | /app/search.py | 5dbfba175888cd77005d66737abc91a5e3083ee9 | [] | no_license | crazynayan/flask-tutorial | fc2fbc3bd7e7f30d48dd2abce5ea05ef3168fc6b | 6e51323bf086cadd39a4860388e07b047b8c6fbe | refs/heads/master | 2022-12-13T23:13:08.832155 | 2019-10-30T12:16:54 | 2019-10-30T12:16:54 | 182,255,340 | 0 | 0 | null | 2022-12-08T05:01:38 | 2019-04-19T11:36:10 | Python | UTF-8 | Python | false | false | 969 | py | from flask import current_app
def add_to_index(index, model):
if not current_app.elasticsearch:
return
payload = {}
for field in model.__searchable__:
payload[field] = getattr(model, field)
current_app.elasticsearch.index(index=index, id=model.id, body=payload)
def remove_from_index(index, model):
if not current_app.elasticsearch:
return
current_app.elaseticsearch.delete(index=index, id=model.id)
def query_index(index, query, page, per_page):
if not current_app.elasticsearch:
return
query_body = {
'query': {
'multi_match': {
'query': query,
'fields': ['*'],
},
},
'from': (page - 1) * per_page,
'size': per_page,
}
search = current_app.elasticsearch.search(index=index, body=query_body)
ids = [int(hit['_id']) for hit in search['hits']['hits']]
return ids, search['hits']['total']['value'] | [
"nayan@crazyideas.co.in"
] | nayan@crazyideas.co.in |
7b98acc53d76f81399ffb120b7e715a6c5608d0a | 00c9701cfc7b1b0bff6a72319d02cd59dc1eca9c | /ros_ws/src/regulation_imugps/src/regulation_from_err_alpha_dist.py | 146f95c8f23cd620b7aa61a5194cd0db3ac032a3 | [] | no_license | EnstaBretagneClubRobo/GuerledanDamScanning | ae80340556898ec6a39395e11975e21272c16c31 | 4309412f0dc883db3e5e4415539f38b5baaa762d | refs/heads/master | 2021-06-14T16:11:16.907465 | 2017-03-03T14:10:51 | 2017-03-03T14:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,085 | py | #!/usr/bin/env python
"""
This regulateur is just a template and publish a forward command only
"""
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float32
from math import atan, pi, tan
def update_err_d(msg):
global eD
eD = msg.data
def update_err_cap(msg):
global ecap
ecap = msg.data
rospy.init_node('regulation_cap')
cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
imu_sub = rospy.Subscriber('err_d', Float32, update_err_d)
gps_sub = rospy.Subscriber('err_cap', Float32, update_err_cap)
# erreur en cap et en distance
ecap, eD = 0, 0
K = -3 / pi # rad/s
radius = 5 # largeur d'effet du suivi de ligne
v = -5.0 # todo trouver pourquoi
cmd = Twist()
rate = rospy.Rate(20) # il faut avoir une bonne frequence
while not rospy.is_shutdown():
# error = cap(/mur) - cap_desire
err = ecap - atan(eD / radius)
err = err / 2 # pour ramener de [-pi,pi] a [-pi/2,pi/2]
cmd.angular.z = K * atan(tan((err)))
print ecap, atan(eD)
cmd.linear.x = v
cmd_pub.publish(cmd)
rate.sleep()
| [
"ejalaa12@gmail.com"
] | ejalaa12@gmail.com |
264248272a1c358a4acd5d74b1c03580e66eaedb | 7807d8d9d109a3e272fffed91bf841201da39256 | /trans_ITP1_8_A/tsuru_aji_ITP1_8_A_kotonoha.py | 235487016630d8bb7d2384be3761ff1a3e9e983b | [] | no_license | y-akinobu/AOJ_to_Kotonoha | 0e8df43393964fcdd5df06c75545091bd6c0c2e2 | 5a694a55a3d85e3fbc4a07b57edc4374556db9a1 | refs/heads/main | 2023-02-05T15:33:16.581177 | 2020-12-30T16:14:44 | 2020-12-30T16:14:44 | 325,524,216 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # strと入力された文字列の英大文字を英小文字、英小文字を英大文字に変換した文字列を出力する
print(str.swapcase(input())) | [
"sx2_vn_yuka@outlook.jp"
] | sx2_vn_yuka@outlook.jp |
ebbdd594ec1e0b143441c4a911fcf81481ed0acf | 4ae1879c21a4193da3df6ae740674ee0655a8beb | /drawDeviation.py | a8b9efe078feb123768f809991f2275a25cac77e | [] | no_license | cynerelee/collision-avoidance | 68bccce1a54009ce7b3bee1bf2adc571b6cde956 | c269b7040b68b91eb5e7e1134feb8363da1091f0 | refs/heads/master | 2023-07-09T02:40:23.760176 | 2023-06-24T03:44:02 | 2023-06-24T03:44:02 | 281,842,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import xlrd #读取excel的库
x=np.arange(0, 2.01,0.01)
#print(x)
#print(x.shape)
data1 = xlrd.open_workbook("deviation_k1.xlsx")
table1 = data1.sheet_by_index(0)
line=table1.col_values(0)
base=np.array(line)
base=base.T
resArray=[] #先声明一个空list
data = xlrd.open_workbook("deviation_k3.xlsx") #读取文件
table = data.sheet_by_index(0) #按索引获取工作表,0就是工作表1
for i in range(table.nrows): #table.nrows表示总行数
line=table.row_values(i) #读取每行数据,保存在line里面,line是list
resArray.append(line) #将line加入到resArray中,resArray是二维list
resArray=np.array(resArray) #将resArray从二维list变成数组
font1 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size':15,
}
font2 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size':10,
}
color=['#377eb8', '#ff7f00', '#4daf4a','#f781bf', '#a65628', '#984ea3','#999999', '#e41a1c']
alpha=0.6
figure, ax = plt.subplots()
# 设置matplotlib正常显示中文和负号
matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文
matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号
# 显示横轴标签
plt.xlabel("Time(s)",font1)
# 显示纵轴标签
plt.ylabel("Deviation(cm)",font1)
plt.axis([0, 2, 0, 6])
plt.tick_params(labelsize=15)
plt.xticks([0,0.2,0.4,0.6,0.8,1,1.2,1.4,1.6,1.8,2])
plt.yticks([0,1,2,3,4,5,6])
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
# 显示图标题
#plt.title("频数/频率分布直方图")
#plt.legend(loc = 'upper right',prop=font2)
plt.plot(x, base,alpha=0.6,label='Baseline',color=color[0],linewidth=2)
plt.plot(x, resArray[:,1],alpha=0.6,label='K2=0.1',color=color[1],linewidth=2)
plt.plot(x, resArray[:,2],alpha=0.6,label='K2=1',color=color[2],linewidth=2)
plt.plot(x, resArray[:,3],alpha=0.6,label='K2=5',color=color[3],linewidth=2)
plt.plot(x, resArray[:,4],alpha=0.6,label='K2=10',color=color[4],linewidth=2)
plt.legend(loc = 0,prop=font2)
plt.savefig('./Deviation_k3.png')
plt.show() | [
"l"
] | l |
3a2127cf485882ad716605f78202ae8536f46498 | f453897fccafc2278f959010c6bad52c7802a2fe | /sidebarUpdate.py | ec7becd648760176a127d1c08e6db75bb5c76b28 | [] | no_license | ColinHaley/Python | 4977c325c13652251386e5a5e3f65d55a3f13a07 | bbef9fc8c4e1d31fe5e1142cf7506fc4738295dd | refs/heads/master | 2021-01-25T08:28:17.231365 | 2018-05-09T21:46:32 | 2018-05-09T21:46:32 | 42,951,804 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,866 | py | """
__author__ = 'Colin Haley, aka Kazra'
__purpose__ = 'Update the /r/asov sidebar with online players from asov Vanilla'
Steps:
1. Create upload variables: [string]CSS, [string]Sidebar
2. Get current players
a. If 0:
i. Clear Sidebar Playerheads
ii. Set to "No Players Online."
ii. Exit()
b. If >= 1:
i. For each player online:
- If their img exists in /data && newer than GETDATE()-3:
1. Add Strings to CSS and Sidebar variables.
- If not:
1. If older than GETDATE()-7, delete old playerhead icon.
2. wget or python equivalent to ~/srv/_config/data/ their player head icon
3. rename from 32.png to playername.png
4. Upload image
- Update Users table with:
1. UPDATE Users set Timestamp = NOW() WHERE Username = 'playername'
# Other Resources
http://cravatar.us/head/__playername__/32.png
Even unclaimed names return a 'Steve' head, no error handling needed? Dangerzone
https://www.reddit.com/dev/api
#POST_api_upload_sr_img
#POST_api_delete_sr_img
https://github.com/reddit/reddit/wiki/OAuth2
# Mandatory External Libraries
Praw: https://gist.github.com/shrayasr/100005943
Mcstatus: https://github.com/Dinnerbone/mcstatus
"""
# Imports
import praw
import time
import datetime
from mcstatus import MinecraftServer
import urllib
#Static Variables
__clientID__ = 'redditClientID'
__secretkey__ = 'redditSecretKey'
__subreddit__ = 'subredditName'
__username__ = 'redditUsername'
__password__ = 'redditPassword'
__serveraddress__ = 'minecraftAddress'
__serverport__ = #RCON Port for Minecraft
__datadirectory__ = '/dir/to/location/to/store/playerheads'
# Section to display playerheads within on the sidebar on reddit.
__sidebarheader__ = '[](/STARTONLINEPLAYERS)'
__sidebarfooter__ = '[](/ENDONLINEPLAYERS)'
# Header for CSS to update playerheads online.
__cssheader__ = '/* END ONLINE PLAYER HEADS DO NOT DELETE OR MOVE FROM HEADER POSITION */'
def generate_css(playerName):
# return a string formatted "a[href="/playername"]:after { content: url(%%playername%%) }"
# change this to a .format(playername) at some later point.
return 'a[href="/' + playerName + ']:after { content: url(%%'+ playerName + '%%) }'
def generate_sidebar(playerName):
# return a string formatted "[](/playername)"
# change this to a .format(playerName) at some point.
return '[](/' + playerName + ')'
def clear_sidebar():
# Needs to iterate through players currently listed online and remove their image uploads.
# Requires open connection to Reddit through use of global 'r' variable.
sidebar = r.get_settings(__subreddit__)['Description']
clearString = sidebar[:sidebar.index(__sidebarheader__) + len(__sidebarheader__) + sidebar[sidebar.index(__sidebarfooter__):]
r.update_settings(r.get_subreddit(__subreddit__), description = clearString)
def get_css():
stylesheet = r.get_stylesheet(__subreddit__)
return stylesheet
def clear_css():
# Delete all CSS between two marker comments, using indexOf("str")
# Requires open connection to reddit via 'r' global
subCSS = get_css()
r.set_stylesheet(__subreddit__, [__header__:])
def upload_css_to_reddit(stringCSS):
# takes .join() list of generateCSS(playername) as a string for upload
r.set_stylesheet(__subreddit__, stringCSS)
def upload_sidebar_to_reddit(stringSidebar):
# takes .join() list of generateSidebar(playername) as a string for upload
def getCurrentPlayers():
server = MinecraftServer(__serveraddress__, __serverport__)
try:
query = server.query()
return {'Count': query.players.online, 'Players':query.players.names}
except:
exit()
def download_playerhead(playername):
downloadPath = 'http://cravatar.eu/head/' + playername + '/32.png'
savepath = __datadirectory__ + playername + '.png'
urllib.urlretrieve(downloadPath, savePath)
# grabs a player head from cravatar to the data folder.
def upload_image_to_reddit(playername):
__imagedir__ = __datadirectory__ + playername + '.png'
r.upload_image(__subreddit__, __imagedir__, playername)
def delete_image_from_reddit(playername):
r.delete_image(__subreddit__, name=playername, header=False)
def parse_players_from_sidebar()
# Get the players online from the server via RCON
# if unsure of the address use MinecraftServer.lookup()
server = MinecraftServer(__serveraddress__, __serverport__)
try:
query = server.query()
if query.players.online > 0:
#do stuff
else
#set sidebar to 'No Players Online'
clear_css()
clear_sidebar()
except:
exit()
#Define the Praw useragent
settings = r.get_settings(__subreddit__)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
8ad0f2dc3caaa8c82153abee8dbfd4ae141503a1 | 9321d3460ffbbb6cd7917b2bac77ce8321e04737 | /contributions/Legacy/JPlus/Old/EDMFunctions with infiltration not working.py | 0e4abdb725c1f2e288370fefe3c7374edd491e9d | [
"MIT"
] | permissive | muehleisen/CEAforArcGIS | b820d837cd5373b95851b4e5dda609d69f054b97 | b6aeca5a9d70835381625a9162d5695714e1a02b | refs/heads/master | 2021-01-11T21:24:18.482264 | 2017-01-06T05:28:48 | 2017-01-06T05:28:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,097 | py | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ####MODULES
# <codecell>
from __future__ import division
import arcpy
from arcpy import sa
import sys,os
import pandas as pd
import datetime
import jdcal
import numpy as np
import math
import sympy as sp
import scipy
import scipy.optimize
sys.path.append("C:\console\sandbox")
from pyGDsandbox.dataIO import df2dbf, dbf2df
arcpy.env.overwriteOutput = True
arcpy.CheckOutExtension("spatial")
arcpy.CheckOutExtension("3D")
# <markdowncell>
# ##RADIATION MODEL
# <markdowncell>
# ###1. Calculation of hourly radiation in a day
# <codecell>
def CalcRadiation(day, CQ_name, DEMfinal, Observers, T_G_day, latitude, locationtemp1):
# Local Variables
Latitude = str(latitude)
skySize = '3000'
dayInterval = '1'
hourInterval = '1'
calcDirections = '32'
zenithDivisions = '1500'
azimuthDivisions = '160'
diffuseProp = str(T_G_day.loc[day-1,'diff'])
transmittivity = str(T_G_day.loc[day-1,'ttr'])
heightoffset = '5'
global_radiation = locationtemp1+'\\'+CQ_name+'\\'+'radiation'+'\\'+'Day_'+str(day)+'.shp'
timeConfig = 'WithinDay '+str(day)+', 0, 24'
#Run the extension of arcgis
arcpy.gp.PointsSolarRadiation_sa(DEMfinal, Observers, global_radiation, heightoffset,
Latitude, skySize, timeConfig, dayInterval, hourInterval, "INTERVAL", "1", "FROM_DEM",
calcDirections, zenithDivisions, azimuthDivisions, "STANDARD_OVERCAST_SKY",
diffuseProp, transmittivity, "#", "#", "#")
return arcpy.GetMessages()
# <markdowncell>
# 1.1 Sub-function to calculate radiation non-sunshinehours
# <codecell>
def calc_radiationday(day, CQ_name, T_G_day, locationtemp1):
radiation_sunnyhours = dbf2df(locationtemp1+'\\'+CQ_name+'\\'+'radiation'+'\\'+'Day_'+str(day)+'.dbf')
#Obtain the number of points modeled to do the iterations
radiation_sunnyhours['ID'] = 0
counter = radiation_sunnyhours.ID.count()
value = counter+1
radiation_sunnyhours['ID'] = range(1, value)
# Table with empty values with the same range as the points.
Table = pd.DataFrame.copy(radiation_sunnyhours)
Names = ['T0','T1','T2','T3','T4','T5','T6','T7','T8','T9','T10','T11','T12','T13','T14','T15','T16','T17','T18','T19','T20','T21','T22','T23']
for Name in Names:
Table[Name]= 0
#Counter of Columns in the Initial Table
Counter = radiation_sunnyhours.count(1)
Value = Counter[0]-1
#Condition to take into account daysavingtime in Switzerland as the radiation data in ArcGIS is calculated for 2013.
if 90 <= day <300:
D = 1
else:
D = 0
# Calculation of Sunrise time
Sunrise_time = T_G_day.loc[day-1,'sunrise']
# Calculation of table
for time in range(Value):
Hour = int(Sunrise_time)+ int(time)
Table['T'+str(Hour)] = radiation_sunnyhours['T'+str(time)]
#rename the table for every T to get in 1 to 8760 hours.
if day == 1:
name = 1
else:
name = int(day-1)*24+1
Table.rename(columns={'T0':'T'+str(name),'T1':'T'+str(name+1),'T2':'T'+str(name+2),'T3':'T'+str(name+3),'T4':'T'+str(name+4),
'T5':'T'+str(name+5),'T6':'T'+str(name+6),'T7':'T'+str(name+7),'T8':'T'+str(name+8),'T9':'T'+str(name+9),
'T10':'T'+str(name+10),'T11':'T'+str(name+11),'T12':'T'+str(name+12),'T13':'T'+str(name+13),'T14':'T'+str(name+14),
'T15':'T'+str(name+15),'T16':'T'+str(name+16),'T17':'T'+str(name+17),'T18':'T'+str(name+18),'T19':'T'+str(name+19),
'T20':'T'+str(name+20),'T21':'T'+str(name+21),'T22':'T'+str(name+22),'T23':'T'+str(name+23),'ID':'ID'},inplace=True)
return Table.copy()
# <markdowncell>
# ###2. Burn buildings into DEM
# <codecell>
def Burn(Buildings,DEM,DEMfinal,locationtemp1, locationtemp2, database, DEM_extent = '676682, 218586, 684612, 229286'):
#Create a raster with all the buildings
Outraster = locationtemp1+'\\'+'AllRaster'
arcpy.env.extent = DEM_extent #These coordinates are extracted from the environment settings/once the DEM raster is selected directly in ArcGIS,
arcpy.FeatureToRaster_conversion(Buildings,'height',Outraster,'0.5') #creating raster of the footprints of the buildings
#Clear non values and add all the Buildings to the DEM
OutNullRas = sa.IsNull(Outraster) # identify noData Locations
Output = sa.Con(OutNullRas == 1,0,Outraster)
RadiationDEM = sa.Raster(DEM) + Output
RadiationDEM.save(DEMfinal)
return arcpy.GetMessages()
# <markdowncell>
# ###3. Calculate Boundaries - Factor Height and Factor Shade
# <codecell>
def CalcBoundaries (Simple_CQ,locationtemp1, locationtemp2, DataFactorsCentroids, DataFactorsBoundaries):
#local variables
NearTable = locationtemp1+'\\'+'NearTable.dbf'
CQLines = locationtemp2+'\\'+'\CQLines'
CQVertices = locationtemp2+'\\'+'CQVertices'
CQSegments = locationtemp2+'\\'+'CQSegment'
CQSegments_centroid = locationtemp2+'\\'+'CQSegmentCentro'
centroidsTable_name = 'CentroidCQdata.dbf'
centroidsTable = locationtemp1+'\\'+centroidsTable_name
Overlaptable = locationtemp1+'\\'+'overlapingTable.csv'
#Create points in the centroid of segment line and table with near features:
# indentifying for each segment of line of building A the segment of line of building B in common.
arcpy.FeatureToLine_management(Simple_CQ,CQLines)
arcpy.FeatureVerticesToPoints_management(Simple_CQ,CQVertices,'ALL')
arcpy.SplitLineAtPoint_management(CQLines,CQVertices,CQSegments,'2 METERS')
arcpy.FeatureVerticesToPoints_management(CQSegments,CQSegments_centroid,'MID')
arcpy.GenerateNearTable_analysis(CQSegments_centroid,CQSegments_centroid,NearTable,"1 Meters","NO_LOCATION","NO_ANGLE","CLOSEST","0")
#Import the table with NearMatches
NearMatches = dbf2df(NearTable)
# Import the table with attributes of the centroids of the Segments
arcpy.TableToTable_conversion(CQSegments_centroid, locationtemp1, centroidsTable_name)
DataCentroids = dbf2df(centroidsTable, cols={'Name','height','ORIG_FID'})
# CreateJoin to Assign a Factor to every Centroid of the lines,
FirstJoin = pd.merge(NearMatches,DataCentroids,left_on='IN_FID', right_on='ORIG_FID')
SecondaryJoin = pd.merge(FirstJoin,DataCentroids,left_on='NEAR_FID', right_on='ORIG_FID')
# delete matches within the same polygon Name (it can happen that lines are too close one to the other)
# also delete matches with a distance of more than 20 cm making room for mistakes during the simplicfication of buildings but avoiding deleten boundaries
rows = SecondaryJoin.IN_FID.count()
for row in range(rows):
if SecondaryJoin.loc[row,'Name_x'] == SecondaryJoin.loc[row,'Name_y'] or SecondaryJoin.loc[row,'NEAR_DIST'] > 0.2:
SecondaryJoin = SecondaryJoin.drop(row)
SecondaryJoin.reset_index(inplace=True)
#FactorShade = 0 if the line exist in a building totally covered by another one, and Freeheight is equal to the height of the line
# that is not obstructed by the other building
rows = SecondaryJoin.IN_FID.count()
SecondaryJoin['FactorShade']=0
SecondaryJoin['Freeheight']=0
for row in range(rows):
if SecondaryJoin.loc[row,'height_x'] <= SecondaryJoin.loc[row,'height_y']:
SecondaryJoin.loc[row,'FactorShade'] = 0
SecondaryJoin.loc[row,'Freeheight'] = 0
elif SecondaryJoin.loc[row,'height_x'] > SecondaryJoin.loc[row,'height_y'] and SecondaryJoin.loc[row,'height_x']-1 <= SecondaryJoin.loc[row,'height_y']:
SecondaryJoin.loc[row,'FactorShade'] = 0
else:
SecondaryJoin.loc[row,'FactorShade'] = 1
SecondaryJoin.loc[row,'Freeheight'] = abs(SecondaryJoin.loc[row,'height_y']- SecondaryJoin.loc[row,'height_x'])
#Create and export Secondary Join with results, it will be Useful for the function CalcObservers
SecondaryJoin.to_csv(DataFactorsBoundaries,index=False)
#Update table Datacentroids with the Fields Freeheight and Factor Shade. for those buildings without
#shading boundaries these factors are equal to 1 and the field 'height' respectively.
DataCentroids['FactorShade'] = 1
DataCentroids['Freeheight'] = DataCentroids['height']
Results = DataCentroids.merge(SecondaryJoin, left_on='ORIG_FID', right_on='ORIG_FID_x', how='outer')
Results.FactorShade_y.fillna(Results['FactorShade_x'],inplace=True)
Results.Freeheight_y.fillna(Results['Freeheight_x'],inplace=True)
Results.rename(columns={'FactorShade_y':'FactorShade','Freeheight_y':'Freeheight'},inplace=True)
FinalDataCentroids = pd.DataFrame(Results,columns={'ORIG_FID','height','FactorShade','Freeheight'})
FinalDataCentroids.to_csv(DataFactorsCentroids,index=False)
return arcpy.GetMessages()
# <markdowncell>
# ###4. Calculate observation points
# <codecell>
def CalcObservers(Simple_CQ,Observers, DataFactorsBoundaries, locationtemporal2):
#local variables
Buffer_CQ = locationtemporal2+'\\'+'BufferCQ'
temporal_lines = locationtemporal2+'\\'+'lines'
Points = locationtemporal2+'\\'+'Points'
AggregatedBuffer = locationtemporal2+'\\'+'BufferAggregated'
temporal_lines3 = locationtemporal2+'\\'+'lines3'
Points3 = locationtemporal2+'\\'+'Points3'
Points3Updated = locationtemporal2+'\\'+'Points3Updated'
EraseObservers = locationtemporal2+'\\'+'eraseobservers'
Observers0 = locationtemporal2+'\\'+'observers0'
NonoverlappingBuildings = locationtemporal2+'\\'+'Non_overlap'
templines = locationtemporal2+'\\'+'templines'
templines2 = locationtemporal2+'\\'+'templines2'
Buffer_CQ0 = locationtemporal2+'\\'+'Buffer_CQ0'
Buffer_CQ = locationtemporal2+'\\'+'Buffer_CQ'
Buffer_CQ1 = locationtemporal2+'\\'+'Buffer_CQ1'
Simple_CQcopy = locationtemporal2+'\\'+'Simple_CQcopy'
#First increase the boundaries in 2m of each surface in the community to
#analyze- this will avoid that the observers overlap the buildings and Simplify
#the community vertices to only create 1 point per surface
arcpy.CopyFeatures_management(Simple_CQ,Simple_CQcopy)
#Make Square-like buffers
arcpy.PolygonToLine_management(Simple_CQcopy,templines,"IGNORE_NEIGHBORS")
arcpy.SplitLine_management(templines,templines2)
arcpy.Buffer_analysis(templines2,Buffer_CQ0,"0.75 Meters","FULL","FLAT","NONE","#")
arcpy.Append_management(Simple_CQcopy,Buffer_CQ0,"NO_TEST")
arcpy.Dissolve_management(Buffer_CQ0,Buffer_CQ1,"Name","#","SINGLE_PART","DISSOLVE_LINES")
arcpy.SimplifyBuilding_cartography(Buffer_CQ1,Buffer_CQ,simplification_tolerance=8, minimum_area=None)
#arcpy.Buffer_analysis(Simple_CQ,Buffer_CQ,buffer_distance_or_field=1, line_end_type='FLAT') # buffer with a flat finishing
#arcpy.Generalize_edit(Buffer_CQ,"2 METERS")
#Transform all polygons of the simplified areas to observation points
arcpy.SplitLine_management(Buffer_CQ,temporal_lines)
arcpy.FeatureVerticesToPoints_management(temporal_lines,Points,'MID') # Second the transformation of Lines to a mid point
#Join all the polygons to get extra vertices, make lines and then get points.
#these points should be added to the original observation points
arcpy.AggregatePolygons_cartography(Buffer_CQ,AggregatedBuffer,"0.5 Meters","0 SquareMeters","0 SquareMeters","ORTHOGONAL") # agregate polygons
arcpy.SplitLine_management(AggregatedBuffer,temporal_lines3) #make lines
arcpy.FeatureVerticesToPoints_management(temporal_lines3,Points3,'MID')# create extra points
# add information to Points3 about their buildings
arcpy.SpatialJoin_analysis(Points3,Buffer_CQ,Points3Updated,"JOIN_ONE_TO_ONE","KEEP_ALL",match_option="CLOSEST",search_radius="5 METERS")
arcpy.Erase_analysis(Points3Updated,Points,EraseObservers,"2 Meters")# erase overlaping points
arcpy.Merge_management([Points,EraseObservers],Observers0)# erase overlaping points
# Eliminate Observation points above roofs of the highest surfaces(a trick to make the
#Import Overlaptable from function CalcBoundaries containing the data about buildings overlaping, eliminate duplicades, chose only those ones no overlaped and reindex
DataNear = pd.read_csv(DataFactorsBoundaries)
CleanDataNear = DataNear[DataNear['FactorShade'] == 1]
CleanDataNear.drop_duplicates(cols='Name_x',inplace=True)
CleanDataNear.reset_index(inplace=True)
rows = CleanDataNear.Name_x.count()
for row in range(rows):
Field = "Name" # select field where the name exists to iterate
Value = CleanDataNear.loc[row,'Name_x'] # set the value or name of the City quarter
Where_clausule = ''''''+'"'+Field+'"'+"="+"\'"+str(Value)+"\'"+'''''' # strange writing to introduce in ArcGIS
if row == 0:
arcpy.MakeFeatureLayer_management(Simple_CQ, 'Simple_lyr')
arcpy.SelectLayerByAttribute_management('Simple_lyr',"NEW_SELECTION",Where_clausule)
else:
arcpy.SelectLayerByAttribute_management('Simple_lyr',"ADD_TO_SELECTION",Where_clausule)
arcpy.CopyFeatures_management('simple_lyr', NonoverlappingBuildings)
arcpy.ErasePoint_edit(Observers0,NonoverlappingBuildings,"INSIDE")
arcpy.CopyFeatures_management(Observers0,Observers)#copy features to reset the OBJECTID
return arcpy.GetMessages()
# <markdowncell>
# ###5. Radiation results to surfaces
# <codecell>
def CalcRadiationSurfaces(Observers, Radiationyearfinal, DataFactorsCentroids, DataradiationLocation, locationtemp1, locationtemp2):
# local variables
CQSegments_centroid = locationtemp2+'\\'+'CQSegmentCentro'
Outjoin = locationtemp2+'\\'+'Join'
CQSegments = locationtemp2+'\\'+'CQSegment'
OutTable = 'CentroidsIDobserv.dbf'
# Create Join of features Observers and CQ_sementscentroids to
# assign Names and IDS of observers (field TARGET_FID) to the centroids of the lines of the buildings,
# then create a table to import as a Dataframe
arcpy.SpatialJoin_analysis(CQSegments_centroid,Observers,Outjoin,"JOIN_ONE_TO_ONE","KEEP_ALL",match_option="CLOSEST",search_radius="10 METERS")
arcpy.JoinField_management(Outjoin,'OBJECTID',CQSegments, 'OBJECTID') # add the lenghts of the Lines to the File
arcpy.TableToTable_conversion(Outjoin, locationtemp1, OutTable)
Centroids_ID_observers = dbf2df(locationtemp1+'\\'+OutTable, cols={'Name_12','height','ORIG_FID','Shape_Leng'})
Centroids_ID_observers.rename(columns={'Name_12':'Name'},inplace=True)
#Create a Join of the Centroid_ID_observers and Datacentroids in the Second Chapter to get values of surfaces Shaded.
Datacentroids = pd.read_csv(DataFactorsCentroids)
DataCentroidsFull = pd.merge(Centroids_ID_observers,Datacentroids,left_index=True,right_index=True)
#Read again the radiation table and merge values with the Centroid_ID_observers under the field ID in Radiationtable and 'ORIG_ID' in Centroids...
Radiationtable = pd.read_csv(DataradiationLocation,index_col='Unnamed: 0')
DataRadiation = pd.merge(DataCentroidsFull,Radiationtable, left_on='ORIG_FID_x',right_on='ID')
DataRadiation.to_csv(Radiationyearfinal,index=False)
return arcpy.GetMessages()
# <markdowncell>
# ##DETERMINISTIC ENERGY MODEL
# <markdowncell>
# ###1. Thermal properties and geometry of buildings
# <codecell>
def CalcProperties(CQ, CQproperties, RadiationFile,locationtemp1):
#Local Variables
OutTable = 'CQshape3.dbf'
# Set of estimated constants
Z = 3 # height of basement for every building in m
Bf = 0.7 # It calculates the coefficient of reduction in transmittance for surfaces in contact with the ground according to values of SIA 380/1
# Set of constants according to EN 13790
his = 3.45 #heat transfer coefficient between air and the surfacein W/(m2K)
hms = 9.1 # Heat transfer coeddicient between nodes m and s in W/m2K
# Set of estimated constants
#Import RadiationFile and Properties of the shapefiles
rf = pd.read_csv(RadiationFile)
arcpy.TableToTable_conversion(CQ, locationtemp1, OutTable)
CQShape_properties = dbf2df(locationtemp1+'\\'+OutTable)
#Areas above ground #get the area of each wall in the buildings
rf['Awall'] = rf['Shape_Leng']*rf['Freeheight']*rf['FactorShade']
Awalls0 = pd.pivot_table(rf,rows='Name',values='Awall',aggfunc=np.sum); Awalls = pd.DataFrame(Awalls0) #get the area of walls in the whole buildings
Areas = pd.merge(Awalls,CQproperties, left_index=True,right_on='Name')
Areas['Aw'] = Areas['Awall']*Areas['fwindow']*Areas['PFloor'] # Finally get the Area of windows
Areas['Aop_sup'] = Areas['Awall']*Areas['PFloor'] #....and Opaque areas PFloor represents a factor according to the amount of floors heated
#Areas bellow ground
AllProperties = pd.merge(Areas,CQShape_properties,on='Name')# Join both properties files (Shape and areas)
AllProperties['Aop_bel'] = Z*AllProperties['Shape_Leng']+AllProperties['Shape_Area'] # Opague areas in m2 below ground including floor
AllProperties['Atot'] = AllProperties['Aop_sup']+AllProperties['Aop_bel']+AllProperties['Shape_Area'] # Total area of the building envelope m2, it is considered the roof to be flat
AllProperties['Af'] = AllProperties['Shape_Area']*AllProperties['Floors_y']*AllProperties['Hs_y']# conditioned area
AllProperties['Aef'] = AllProperties['Shape_Area']*AllProperties['Floors_y']*AllProperties['Es']# conditioned area only those for electricity
AllProperties['Am'] = AllProperties.Construction.apply(lambda x:AmFunction(x))*AllProperties['Af'] # Effective mass area in m2
#Steady-state Thermal transmittance coefficients and Internal heat Capacity
AllProperties ['Htr_w'] = AllProperties['Aw']*AllProperties['Uwindow'] # Thermal transmission coefficient for windows and glazing. in W/K
AllProperties ['HD'] = AllProperties['Aop_sup']*AllProperties['Uwall']+AllProperties['Shape_Area']*AllProperties['Uroof'] # Direct Thermal transmission coefficient to the external environment in W/K
AllProperties ['Hg'] = Bf*AllProperties ['Aop_bel']*AllProperties['Ubasement'] # stady-state Thermal transmission coeffcient to the ground. in W/K
AllProperties ['Htr_op'] = AllProperties ['Hg']+ AllProperties ['HD']
AllProperties ['Htr_ms'] = hms*AllProperties ['Am'] # Coupling conduntance 1 in W/K
AllProperties ['Htr_em'] = 1/(1/AllProperties['Htr_op']-1/ AllProperties['Htr_ms']) # Coupling conduntance 2 in W/K
AllProperties ['Htr_is'] = his*AllProperties ['Atot']
AllProperties['Cm'] = AllProperties.Construction.apply(lambda x:CmFunction(x))*AllProperties['Af'] # Internal heat capacity in J/K
# Year Category of building
AllProperties['YearCat'] = AllProperties.apply(lambda x: YearCategoryFunction(x['Year_y'], x['Renovated']), axis=1)
AllProperties.rename(columns={'Hs_y':'Hs','Floors_y':'Floors','PFloor_y':'PFloor','Year_y':'Year','fwindow_y':'fwindow'},inplace=True)
return AllProperties
# <codecell>
def CalcIncidentRadiation(AllProperties, Radiationyearfinal):
#Import Radiation table and compute the Irradiation in W in every building's surface
Radiation_Shading2 = pd.read_csv(Radiationyearfinal)
Columns = 8761
Radiation_Shading2['AreaExposed'] = Radiation_Shading2['Shape_Leng']*Radiation_Shading2['FactorShade']*Radiation_Shading2['Freeheight']
for Column in range(1, Columns):
#transform all the points of solar radiation into Wh
Radiation_Shading2['T'+str(Column)] = Radiation_Shading2['T'+str(Column)]*Radiation_Shading2['AreaExposed']
#Do pivot table to sum up the irradiation in every surface to the building
#and merge the result with the table allProperties
PivotTable3 = pd.pivot_table(Radiation_Shading2,rows='Name',margins='Add all row')
RadiationLoad = pd.DataFrame(PivotTable3)
Solar = AllProperties.merge(RadiationLoad, left_on='Name',right_index=True)
return Solar # total solar radiation in areas exposed to radiation in Watts
# <markdowncell>
# 1.1 Sub-functions of Thermal mass
# <codecell>
def CmFunction (x):
if x == 'Medium':
return 165000
elif x == 'Heavy':
return 300000
elif x == 'Light':
return 110000
else:
return 165000
# <codecell>
def AmFunction (x):
if x == 'Medium':
return 2.5
elif x == 'Heavy':
return 3.2
elif x == 'Light':
return 2.5
else:
return 2.5
# <markdowncell>
# 1.2. Sub- Function Hourly thermal transmission coefficients
# <codecell>
def calc_Htr(Hve, Htr_is, Htr_ms, Htr_w):
Htr_1 = 1/(1/Hve+1/Htr_is)
Htr_2 = Htr_1+Htr_w
Htr_3 = 1/(1/Htr_2+1/Htr_ms)
Coefficients = [Htr_1,Htr_2,Htr_3]
return Coefficients
# <markdowncell>
# ###2. Calculation of thermal and Electrical loads - No processes
# <codecell>
def CalcThermalLoads(i, AllProperties, locationFinal, Solar, Profiles,Profiles_names, Temp, Seasonhours, Servers,Coolingroom):
# Mode is a variable 0 without losses, 1 With losses of distribution enmission and control
#Local Variables
Name = AllProperties.loc[i,'Name']
# Set of constants according to EN 13790
g_gl = 0.9*0.75 # solar energy transmittance assuming a reduction factor of 0.9 and most of the windows to be double glazing (0.75)
pa_ca = 1200 # Air constant J/m3K
F_f = 0.3 # Frame area faction coefficient
Bf = 0.7 # It calculates the coefficient of reduction in transmittance for surfaces in contact with the ground according to values of SIA 380/1
tw = 10 # the temperature of intake of water for hot water
# Set of variables used offently
nf = AllProperties.loc[i,'Floors']
nfpercent = AllProperties.loc[i,'PFloor']
height = AllProperties.loc[i,'height']
Lw = AllProperties.loc[i,'MBG_Width']
Ll = AllProperties.loc[i,'MBG_Length']
Awall = AllProperties.loc[i,'Awall']
footprint = AllProperties.loc[i,'Shape_Area']
Year = AllProperties.loc[i,'Year']
Yearcat = AllProperties.loc[i,'YearCat']
Af = AllProperties.loc[i,'Af']
Aef = AllProperties.loc[i,'Aef']
SystemH = AllProperties.loc[i,'Emission_heating']
SystemC = AllProperties.loc[i,'Emission_cooling']
tsh0 = AllProperties.loc[i,'tsh0']
trh0 = AllProperties.loc[i,'trh0']
tsc0 = AllProperties.loc[i,'tsc0']
trc0 = AllProperties.loc[i,'trc0']
te_min = Temp.te.min()
te_max = Temp.te.max()
# Determination of Profile of occupancy to use
Occupancy0 = calc_Type(Profiles,Profiles_names, AllProperties, i, Servers,Coolingroom)
#Create Labels in data frame to iterate
Columns = ['IH_nd_ac','IC_nd_ac','g_gl','Htr_1','Htr_2','Htr_3','tm_t','tair_ac','top_ac','IHC_nd_ac', 'Asol', 'I_sol','te',
'Eal','Qhsf','Qcsf','Qhs','Qcs','Qwwf','Qww','tair','top','tsc','trc','tsh','trh','Qhs_em_ls','Qcs_em_ls',
'Qhs_d_ls','Qcs_d_ls','Qww_dh_ls','Qww_d_ls','tamb','Qcs_dis_em_ls','Qhs_dis_em_ls',
'Eaux_hs', 'Eaux_cs', 'Eaux_ww']
for Label in Columns:
Occupancy0 [Label] = 0
if Af >0:
#Assign temperature data to the table
Occupancy0['te'] = Temp['te']
# Determination of Hourly Thermal transmission coefficient due to Ventilation in W/K
# without infiltration - this value is calculated later on
Occupancy0['Hve'] = pa_ca*(Occupancy0['Ve']* Af/3600)
#Calculation of hot water use At 60 degrees and 45 degress for new buildings
if AllProperties.loc[i,'Year'] >= 2020:
twws = 45
else:
twws = 60
Occupancy0['Qww'] = Occupancy0['Mww']*Af*4.184*(twws-tw)*0.277777777777778 # in wattshour.
#Calculation of lossess distribution system for domestic hot water
Occupancy = calc_Qww_dis_ls(nf, nfpercent, Lw, Ll, Year,Af,twws, Bf, AllProperties.loc[i,'Renovated'],
Occupancy0, Seasonhours,footprint,1) #1 when internal loads ar calculated
#addd losses of hotwater system into internal loads for the mass balance
Occupancy['I_int'] = Occupancy['I_int']*Af+ Occupancy['Qww_dh_ls']*0.8# 80% is recoverable or enter to play in the energy balance
#Determination of Heat Flows for internal loads in W
Occupancy['I_ia'] = 0.5*Occupancy['I_int']
# Calculation Shading factor per hour due to operation of external shadings, 1 when I > 300 W/m2
Rf_sh = Calc_Rf_sh(AllProperties.loc[i,'Shading_Po'],AllProperties.loc[i,'Shading_Ty'])
# Calculation of effecive solar area of surfaces in m2, opaque areas are not considered, reduction factor of overhangs is not included. Fov =0
Num_Hours = Occupancy.tamb.count()
for hour in range(Num_Hours):
Occupancy.loc[hour,'g_gl'] = calc_gl(Solar.loc[i,'T'+str(hour+1)]/AllProperties.loc[i,'Awall'], g_gl,Rf_sh)
# Calculation of solar efective area per hour in m2
Occupancy.loc[hour,'Asol'] = Occupancy.loc[hour,'g_gl']*(1-F_f)*AllProperties.loc[i,'Aw']
# Calculation of Solar gains in each facade in W it is neglected the extraflow of radiation from the surface to the exterior Fr_k*Ir_k = 0 as well as gains in opaque surfaces
Occupancy.loc[hour,'I_sol'] = Occupancy.loc[hour,'Asol']*(Solar.loc[i,'T'+str(hour+1)]/AllProperties.loc[i,'Awall'])#-Fr*AllProperties.loc[i,'Aw_N']*AllProperties.loc[i,'Uwindow']*delta_t_er*hr*Rse
# Determination of Hourly thermal transmission coefficients for Determination of operation air temperatures in W/K
Coefficients = calc_Htr(Occupancy.loc[hour,'Hve'], AllProperties.loc[i,'Htr_is'], AllProperties.loc[i,'Htr_ms'], AllProperties.loc[i,'Htr_w'])
Occupancy.loc[hour,'Htr_1'] = Coefficients[0]
Occupancy.loc[hour,'Htr_2'] = Coefficients[1]
Occupancy.loc[hour,'Htr_3'] = Coefficients[2]
# Determination of Heat Flows for internal heat sources
Occupancy['I_m'] = (AllProperties.loc[i,'Am']/AllProperties.loc[i,'Atot'])*(Occupancy['I_ia']+Occupancy['I_sol'])
Occupancy['I_st'] = (1-(AllProperties.loc[i,'Am']/AllProperties.loc[i,'Atot'])-(AllProperties.loc[i,'Htr_w']/(9.1*AllProperties.loc[i,'Atot'])))*(Occupancy['I_ia']+Occupancy['I_sol'])
# Seed for calculation
# factors of Losses due to emission of systems vector hot or cold water for heating and cooling
tHC_corr = [0,0]
tHC_corr = calc_Qem_ls(str(SystemH),str(SystemC))
tHset_corr = tHC_corr[0]
tCset_corr = tHC_corr[1]
Occupancy.loc[0,'tm_t'] = Occupancy.loc[0,'te']
for j in range(1,Num_Hours): #mode = 0
# first calculation without Losses to get real operation and air temperatures
Losses = 0
tm_t0 = Occupancy.loc[j-1,'tm_t']
te_t = Occupancy.loc[j,'te']
tintH_set = Occupancy.loc[j,'tintH_set']
tintC_set = Occupancy.loc[j,'tintC_set']
Htr_em = AllProperties.loc[i,'Htr_em']
Htr_ms = AllProperties.loc[i,'Htr_ms']
Htr_is = AllProperties.loc[i,'Htr_is']
Htr_1 = Occupancy.loc[j,'Htr_1']
Htr_2 = Occupancy.loc[j,'Htr_2']
Htr_3 = Occupancy.loc[j,'Htr_3']
Hve = Occupancy.loc[j,'Hve']
Htr_w = AllProperties.loc[i,'Htr_w']
I_st = Occupancy.loc[j,'I_st']
I_ia = Occupancy.loc[j,'I_ia']
I_m = Occupancy.loc[j,'I_m']
Cm = AllProperties.loc[i,'Cm']
Results0 = calc_TL(str(SystemH),str(SystemC), te_min, te_max, tm_t0, te_t, tintH_set, tintC_set, Htr_em, Htr_ms, Htr_is, Htr_1,
Htr_2, Htr_3, I_st, Hve, Htr_w, I_ia, I_m, Cm, Af, Losses, tHset_corr,
tCset_corr)
#Occupancy.loc[j,'tm_t'] = Results0[0]
Occupancy.loc[j,'tair'] = Results0[1] # temperature of inside air
#Occupancy.loc[j,'top'] = Results0[2] # temperature of operation
#Occupancy.loc[j,'Qhs'] = Results0[3] # net heating load
#Occupancy.loc[j,'Qcs'] = Results0[4] # net cooling load
#NOW CONSIDERING INFILTRATION
Temp0 = calc_infiltration(Temp,Occupancy,Awall, Yearcat,height,nfpercent)
Occupancy['Hve'] = pa_ca*(Occupancy['Ve']* Af/3600+ Temp0['Ve_inf'])
Num_Hours = Occupancy.tamb.count()
for hour in range(Num_Hours):
Coefficients = calc_Htr(Occupancy.loc[hour,'Hve'], AllProperties.loc[i,'Htr_is'], AllProperties.loc[i,'Htr_ms'], AllProperties.loc[i,'Htr_w'])
Occupancy.loc[hour,'Htr_1'] = Coefficients[0]
Occupancy.loc[hour,'Htr_2'] = Coefficients[1]
Occupancy.loc[hour,'Htr_3'] = Coefficients[2]
# Determination of Heat Flows for internal heat sources
Occupancy['I_m'] = (AllProperties.loc[i,'Am']/AllProperties.loc[i,'Atot'])*(Occupancy['I_ia']+Occupancy['I_sol'])
Occupancy['I_st'] = (1-(AllProperties.loc[i,'Am']/AllProperties.loc[i,'Atot'])-(AllProperties.loc[i,'Htr_w']/(9.1*AllProperties.loc[i,'Atot'])))*(Occupancy['I_ia']+Occupancy['I_sol'])
for j in range(1,Num_Hours):
# Determination of net thermal loads and temperatures including emission losses
Losses = 0
#tm_t0 = Occupancy.loc[j-1,'tm_t']
#te_t = Occupancy.loc[j,'te']
#tintH_set = Occupancy.loc[j,'tintH_set']
#tintC_set = Occupancy.loc[j,'tintC_set']
#Htr_em = AllProperties.loc[i,'Htr_em']
#Htr_ms = AllProperties.loc[i,'Htr_ms']
#Htr_is = AllProperties.loc[i,'Htr_is']
Htr_1 = Occupancy.loc[j,'Htr_1']
Htr_2 = Occupancy.loc[j,'Htr_2']
Htr_3 = Occupancy.loc[j,'Htr_3']
Hve = Occupancy.loc[j,'Hve']
#Htr_w = AllProperties.loc[i,'Htr_w']
I_st = Occupancy.loc[j,'I_st']
I_ia = Occupancy.loc[j,'I_ia']
I_m = Occupancy.loc[j,'I_m']
#Cm = AllProperties.loc[i,'Cm']
Results0 = calc_TL(str(SystemH),str(SystemC), te_min, te_max, tm_t0, te_t, tintH_set, tintC_set, Htr_em, Htr_ms, Htr_is, Htr_1,
Htr_2, Htr_3, I_st, Hve, Htr_w, I_ia, I_m, Cm, Af, Losses, tHset_corr,
tCset_corr)
Occupancy.loc[j,'tm_t'] = Results0[0]
Occupancy.loc[j,'tair'] = Results0[1] # temperature of inside air
Occupancy.loc[j,'top'] = Results0[2] # temperature of operation
Occupancy.loc[j,'Qhs'] = Results0[3] # net heating load
Occupancy.loc[j,'Qcs'] = Results0[4] # net cooling load
Losses = 1
Results1 = calc_TL(str(SystemH),str(SystemC), te_min, te_max, tm_t0, te_t, tintH_set, tintC_set, Htr_em, Htr_ms, Htr_is, Htr_1,
Htr_2, Htr_3, I_st, Hve, Htr_w, I_ia, I_m, Cm, Af, Losses, tHset_corr,tCset_corr)
Occupancy.loc[j,'Qhs_em_ls'] = Results1[3]- Occupancy.loc[j,'Qhs'] # losses emission and control
Occupancy.loc[j,'Qcs_em_ls'] = Results1[4]- Occupancy.loc[j,'Qcs'] #losses emission and control
#Calculation of the emission factor of the distribution system
Emissionfactor = calc_em_t(str(SystemH),str(SystemC))
nh = Emissionfactor[4]
# sum of final energy up to the generation first time
Occupancy['Qhsf'] = Occupancy['Qhs']
Occupancy['Qcsf'] = -Occupancy['Qcs']
Occupancy['Qwwf'] = Occupancy['Qww']
Occupancy.to_csv(r'C:\ArcGIS\Toerase0.csv')
#Qc MUST BE POSITIVE
#Calculation temperatures of the distribution system during time
Results2 = calc_temperatures(str(SystemH),str(SystemC),Occupancy,Temp0,tsh0,trh0,tsc0,trc0,nh,nf,Af)
Occupancy2 = Results2[0]
#Calculation of lossess distribution system for space heating space cooling
Occupancy3 = calc_Qdis_ls(str(SystemH),str(SystemC), nf,nfpercent,Lw,Ll,Year,Af,twws, Bf, AllProperties.loc[i,'Renovated'],
Occupancy2, Seasonhours,footprint)
#Calculation of lossess distribution system for domestic hot water
Occupancy4 = calc_Qww_dis_ls(nf, nfpercent, Lw, Ll, Year,Af,twws, Bf, AllProperties.loc[i,'Renovated'],
Occupancy3, Seasonhours,footprint,0)#0 when real loads are calculated
Occupancy4.to_csv(r'C:\ArcGIS\Toerase.csv')
Occupancy4['Qww_dis_ls'] = Occupancy4['Qww_d_ls']+ Occupancy4['Qww_dh_ls']
Occupancy4['Qcs_dis_em_ls'] = -(Occupancy4['Qcs_em_ls']+ Occupancy4['Qcs_d_ls'])
Occupancy4['Qhs_dis_em_ls'] = Occupancy4['Qhs_em_ls']+ Occupancy4['Qhs_d_ls']
# sum of final energy up to the generation
Occupancy4['Qhsf'] = Occupancy4['Qhs']+Occupancy4['Qhs_dis_em_ls']#it is already taking into account contributon of heating system.
Occupancy4['Qcsf'] = -Occupancy4['Qcs']+Occupancy4['Qcs_dis_em_ls']
Occupancy4['Qwwf'] = Occupancy4['Qww'] + Occupancy4['Qww_dis_ls']
Occupancy4.to_csv(r'C:\ArcGIS\Toerase2.csv')
#Calculation temperatures of the distribution system during time second time
Results3 = calc_temperatures(str(SystemH),str(SystemC),Occupancy4,Temp0,tsh0,trh0,tsc0,trc0,nh,nf,Af)
Occupancy5 = Results3[0]
Qhs0 = Results3[1]/1000
Qcs0 = Results3[2]/1000
mwh0 = Results3[3]/4190
mwc0 = Results3[4]/4190
tsh0 = Results3[5]
trh0 = Results3[6]
tsc0 = Results3[7]
trc0 = Results3[8]
Occupancy5.to_csv(r'C:\ArcGIS\Toerase3.csv')
for j in range(1,Num_Hours):
if Seasonhours[0] < j < Seasonhours[1]:
Occupancy4.loc[j,'Qhs'] = 0
Occupancy4.loc[j,'Qhsf'] = 0
Occupancy4.loc[j,'Qhs_em_ls'] = 0
Occupancy4.loc[j,'Qhs_d_ls'] = 0
Occupancy4.loc[j,'tsh'] = 0
Occupancy4.loc[j,'trh'] = 0
elif 0 <= j <= Seasonhours[0] or Seasonhours[1] <= j <= 8759:
Occupancy4.loc[j,'Qcs'] = 0
Occupancy4.loc[j,'Qcsf'] = 0
Occupancy4.loc[j,'Qcs_em_ls'] = 0
Occupancy4.loc[j,'Qcs_d_ls'] = 0
Occupancy4.loc[j,'tsc'] = 0
Occupancy4.loc[j,'trc'] = 0
#calculation of energy for pumping of all the systems (no air-conditioning
Occupancy6 = calc_Aux_hscs(nf,nfpercent,Lw,Ll,footprint,Year,Qhs0,tsh0,trh0,Occupancy5,Qcs0,tsc0,trc0,
str(SystemH),str(SystemC),twws,tw)
#Calculation of Electrical demand
if SystemC == 'Air conditioning' or SystemC == 'Ceiling cooling':
for j in range(Num_Hours): #mode = 0
if Seasonhours[0] < j < Seasonhours[1]: #cooling season air conditioning 15 may -15sept
Occupancy6.loc[j,'Eal'] = (Occupancy6.loc[j,'Ealf_ve'] + Occupancy6.loc[j,'Ealf_nove'])*AllProperties.loc[i,'Aef']
else:
Occupancy6.loc[j,'Eal'] = (Occupancy6.loc[j,'Ealf_nove'])*Aef
if SystemH == 'Air conditioning':
for j in range(Num_Hours): #mode = 0
if 0 <= j <= Seasonhours[0]: #heating season air conditioning 15 may -15sept
Occupancy6.loc[j,'Eal'] = (Occupancy6.loc[j,'Ealf_ve'] + Occupancy6.loc[j,'Ealf_nove'])*AllProperties.loc[i,'Aef']
elif Seasonhours[1] <= j <= 8759: #cooling season air conditioning 15 may -15sept
Occupancy6.loc[j,'Eal'] = (Occupancy6.loc[j,'Ealf_ve'] + Occupancy6.loc[j,'Ealf_nove'])*AllProperties.loc[i,'Aef']
else:
Occupancy6.loc[j,'Eal'] = (Occupancy6.loc[j,'Ealf_nove'])*AllProperties.loc[i,'Aef']
else:
Occupancy0['Eal'] = Occupancy0['Ealf_nove']*Aef
Occupancy6 = Occupancy0
Qhs0 = 0
Qcs0 = 0
Occupancy6['Eaux'] = Occupancy6['Eaux_hs'] + Occupancy6['Eaux_cs'] + Occupancy6['Eaux_ww']
Occupancy6['Ealf'] = Occupancy6['Eal'] + Occupancy6['Eaux']
Occupancy6['NAME'] = AllProperties.loc[i,'Name']
# Calculate Occupancy
Occupancy6['Occupancy'] = Occupancy6['People']*Af
# Results
Result_TL = pd.DataFrame(Occupancy6,columns = ['DATE','NAME','Qhs_dis_em_ls','Qcs_dis_em_ls','Qww_dis_ls','Qhs','Qcs','Qww','Qhsf','Qcsf','Qwwf','Ealf','Eaux',
'I_sol','I_int','tsh','trh','tsc','trc','tair','top','te','Occupancy'])
Totals_TL = pd.DataFrame(Result_TL.sum()).T/1000000 #in MWh
GT = {'Name':[AllProperties.loc[i,'Name']],'Qhs_dis_em_ls':Totals_TL.Qhs_dis_em_ls,'Qhsf':Totals_TL.Qhsf,'Qcs_dis_em_ls':Totals_TL.Qcs_dis_em_ls,'Qcsf':Totals_TL.Qcsf,
'Qhs':Totals_TL.Qhs,'Qcs':Totals_TL.Qcs,'Qww':Totals_TL.Qww,'Qww_dis_ls':Totals_TL.Qww_dis_ls,'Qwwf':Totals_TL.Qwwf,
'Ealf':Totals_TL.Ealf,'Eaux':Totals_TL.Eaux,'Occupancy':Totals_TL.Occupancy,'tsh0':tsh0,'trh0':trh0,'tsc0':tsc0,'trc0':trc0,'Qhs0':Qhs0,'Qcs0':Qcs0,'mwh0':mwh0,'mwc0':mwc0,'Af':Af}
Grandtotal = pd.DataFrame(GT)
# EXPORT RESULTS
Result_TL.to_csv(locationFinal+'\\'+Name+'.csv',index=False)
Grandtotal.to_csv(locationFinal+'\\'+Name+'T'+'.csv')
return Grandtotal
# <codecell>
def calc_infiltration(Temp,Occupancy,Awall,Yearcat,height,nfpercent):
if Yearcat <= 5: # all renovated buildings plus those from 2000 on are considered tight
K1 = 0.1
K2 = 0.011
K3 = 0.034
elif 2 < Yearcat <= 4: # these categories are considered medium
K1 = 0.1
K2 = 0.017
K3 = 0.049
else: # up to 1970 and not renovated are poorly
K1 = 0.1
K2 = 0.023
K3 = 0.007
Temp['Wind_net'] = 0.21*Temp['Wind']*height**0.33 # city center conditions urban
Temp['Ve_inf'] = 0#(K1 + K2*abs(Temp['te'] - Occupancy['tair'])+K3*Temp['Wind_net'])*Awall*nfpercent*3/3600
return Temp.copy()
# <markdowncell>
# Calc temperatures distribution system
# <codecell>
def calc_temperatures(SystemH,SystemC,DATA,Temp0,tsh0,trh0,tsc0,trc0,nh,Af,Floors):
# FOR HEATING SYSTEMS FOLLOW THIS
if SystemH == 'No':
Qhsmax = 0
else:
Qh0 = Qhsmax = DATA['Qhsf'].max()
tair0 = DATA['tintH_set'].max()
if SystemH == 'Air conditioning':
HVAC = calc_HVAC(DATA,Temp0,tsh0,trh0,Qh0,tair0,nh)
RESULT = HVAC[0]
elif SystemH == 'Radiator':
rad = calc_RAD(DATA,tsh0,trh0,Qh0,tair0,nh)
RESULT = rad[0]
mwh0 = rad[1]/4190
elif SystemH == 'Floor heating':
fH = calc_TABSH(DATA,Qh0,tair0,Af,Floors)
RESULT = fh[0]
mwh0 = fh[1]/4190
tsh0 = rad[2] # this values are designed for the conditions of the building
trh0 = rad[3] # this values are designed for the conditions of the building
if SystemC == 'No':
Qcsmax = 0
else:
Qc0 = Qcsmax = DATA['Qcsf'].max()
tair0 = DATA['tintC_set'].min()
if SystemC == 'Ceiling cooling': # it is considered it has a ventilation system to regulate moisture.
fc = calc_TABSC(DATA, Qc0,tair0,Af)
RESULT = fc[0]
mwc0 = fc[1]/4190
tsc0 = fc[2]
trc0 = fc[3]
return RESULT.copy(),Qhsmax,Qcsmax, mwh0, mwc0, tsh0, trh0, tsc0, trc0
# <markdowncell>
# 2.1 Sub-function temperature radiator systems
# <codecell>
def calc_RAD(DATA,tsh0,trh0,Qh0,tair0,nh):
tair0 = tair0 + 273
tsh0 = tsh0 + 273
trh0 = trh0 + 273
mCw0 = Qh0/(tsh0-trh0)
LMRT = (tsh0-trh0)/scipy.log((tsh0-tair0)/(trh0-tair0))
k1 = 1/mCw0
def fh(x):
Eq = mCw0*k2-Qh0*(k2/(scipy.log((x+k2-tair)/(x-tair))*LMRT))**(nh+1)
return Eq
rows = DATA.Qhsf.count()
for row in range(rows):
if DATA.loc[row,'Qhsf'] != 0 and (DATA.loc[row,'tair'] == (tair0-273) or DATA.loc[row,'tair'] == 16): # in case hotel or residential
k2 = DATA.loc[row,'Qhsf']*k1
tair = DATA.loc[row,'tair']+ 273
result = scipy.optimize.newton(fh, trh0, maxiter=100,tol=0.01) - 273
DATA.loc[row,'trh'] = result.real
DATA.loc[row,'tsh'] = DATA.loc[row,'trh'] + k2
return DATA.copy(), mCw0, tsh0, trh0
# <markdowncell>
# 2.1 Sub-function temperature Floor activated slabs
# <codecell>
def calc_TABSH(DATA, Qh0,tair0,Floors,Af):
tair0 = tair0 + 273
tmean_max = tair0 + 10 # according ot EN 1264, simplifying to +9 k inernal surfaces and 15 perimeter and batroom
nh = 0.025
q0 = Qh0/Af
S0 = 5 #drop of temperature of supplied water at nominal conditions
U0 = q0/(tmean_max-tair0)
deltaH0 = (Qh0/(U0*Af))
if S0/deltaH0 <= 0.5: #temperature drop of water should be in this range
deltaV0 = deltaH0 + S0/2
else:
deltaV0 = deltaH0 + S0/2+(S0**2/(12*deltaH0))
tsh0 = deltaV0 + tair0
trh0 = tsh0 - S0
tsh0 = tsh0 + 273
trh0 = trh0 + 273
mCw0 = q0*Af/(tsh0-trh0)
LMRT = (tsh0-trh0)/scipy.log((tsh0-tair0)/(trh0-tair0))
qh0 = 8.92*(tmean_max-tair0)**1.1
kH0 = qh0*Af/(LMRT**(1+n))
k1 = 1/mCw0
def fh(x):
Eq = mCw0*k2-kH0*(k2/(scipy.log((x+k2-tair)/(x-tair))))**(1+n)
return Eq
rows = DATA.Qhsf.count()
DATA['surface']=0
for row in range(rows):
if DATA.loc[row,'Qhsf'] != 0 (DATA.loc[row,'tair'] == (tair0-273) or DATA.loc[row,'tair'] == 16):
Q = DATA.loc[row,'Qhsf']
q =Q/Af
k2 = Q*k1
tair = DATA.loc[row,'tair'] + 273
result = scipy.optimize.newton(fh, trh0, maxiter=100,tol=0.01) - 273
DATA.loc[row,'trh'] = result.real
DATA.loc[row,'tsh'] = DATA.loc[row,'trh'] + k2
DATA.loc[row,'surface'] = (q/U0)**(1/1.1)+ DATA.loc[row,'tair']
#FLOW CONSIDERING LOSSES Floor slab prototype
# no significative losses are considered
# !!!!!!!!!this text is just in case if in the future it will be used!!!!!
#sins = 0.07
#Ru = sins/0.15+0.17+0.1
#R0 = 0.1+0.0093+0.045/1 # su = 0.045 it is the tickness of the slab
# CONSTANT FLOW CONDITIONS
#tu = 13 # temperature in the basement
#if Floors ==1:
# mCw0 = Af*q0/(S0)*(1+R0/Ru+(tair-tu)/(q0*Ru))
#else:
# Af1 = Af/Floors
# mCw0 = Af1*q0/(S0)*(1+R0/Ru+(tair-tu)/(Qh0*Ru/Af1))+((Af-Af1)*q0/(S0*4190)*(1+R0/Ru))
tsh0 = DATA.loc[row,'tsh'].max()
trh0 = DATA.loc[row,'trh'].max()
return DATA.copy(), mCw0, tsh0, trh0
# <markdowncell>
# 2.1 Subfunction temperature and flow TABS Cooling
# <codecell>
def calc_TABSC(DATA, Qc0,tair0, Af):
tair0 = tair0 + 273
qc0 = Qc0/(Af*0.5) # 50% of the area available for heat exchange = to size of panels
tmean_min = dewP = 18
deltaC_N = 8 # estimated difference of temperature room and panel at nominal conditions
Sc0 = 2.5 # rise of temperature of supplied water at nominal conditions
delta_in_des = deltaC_N + Sc0/2
U0 = qc0/deltaC_N
tsc0 = tair0 - 273 - delta_in_des
if tsc0 <= dewP:
tsc0 = dewP - 1
trc0 = tsc0 + Sc0
tsc0 = tsc0 + 273
trc0 = trc0 + 273
tmean_min = (tsc0+trc0)/2 # for design conditions difference room and cooling medium
mCw0 = Qc0/(trc0-tsc0)
LMRT = (trc0-tsc0)/scipy.log((tsc0-tair0)/(trc0-tair0))
kC0 = Qc0/(LMRT)
k1 = 1/mCw0
def fc(x):
Eq = mCw0*k2-kC0*(k2/(scipy.log((x-k2-tair)/(x-tair))))
return Eq
rows = DATA.Qcsf.count()
DATA['surfaceC']=0
for row in range(rows):
if DATA.loc[row,'Qcsf'] != 0 and (DATA.loc[row,'tair'] == (tair0-273) or DATA.loc[row,'tair'] == 30):# in a hotel
Q = DATA.loc[row,'Qcsf']
q = Q/(Af*0.5)
k2 = Q*k1
tair = DATA.loc[row,'tair'] + 273
DATA.loc[row,'trc'] = scipy.optimize.newton(fc, trc0, maxiter=100,tol=0.01) - 273
DATA.loc[row,'tsc'] = DATA.loc[row,'trc'] - k2
DATA.loc[row,'surfaceC'] = DATA.loc[row,'tair'] - (q/U0)
#FLOW CONSIDERING LOSSES Floor slab prototype
# no significative losses are considered
tsc0 = (tsc0-273)
trc0 = (trc0-273)
return DATA.copy(), mCw0, tsc0, trc0
# <markdowncell>
# 2.1 Sub-function temperature Air conditioning
# <codecell>
def calc_HVAC(Temp,DATA,tsh0,trh0,Qh0,tair0,nh):
#Claculate net ventilation required taking into account losses and efficiency of ventilation system
#assumptions
# ev = 1
#nrec_teta = 0.75
#Cctr = 0.8
#Cdu_lea =
#Ci_lea = Cdu_lea*CAHU_lea
#CRCA =
# DATA['Ve_req'] = (DATA['Ve']+Temp0['Ve_inf'])*Cctr*Ci_lea*CRCA/ev
return 0
# <markdowncell>
# 2.1. Sub-Function Hourly thermal load
# <codecell>
def calc_TL(SystemH, SystemC, te_min, te_max, tm_t0, te_t, tintH_set, tintC_set, Htr_em, Htr_ms, Htr_is, Htr_1, Htr_2, Htr_3, I_st, Hve, Htr_w, I_ia, I_m, Cm, Af, Losses, tHset_corr,tCset_corr):
# assumptions
# the installed capacities are assumed to be gigantic, it is assumed that the building can
# generate heat and cold at anytime
IC = 500
IH = 500
if Losses == 1:
#Losses due to emission and control of systems
tintH_set = tintH_set + tHset_corr
tintC_set = tintC_set + tCset_corr
# Case 1 IHC_nd = 0
IHC_nd = 0
IC_nd_ac = 0
IH_nd_ac = 0
Im_tot = I_m + Htr_em * te_t + Htr_3*(I_st + Htr_w*te_t + Htr_1*(((I_ia + IHC_nd)/Hve)+ te_t))/Htr_2
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
if SystemH =='Floor heating' or SystemC =='Floor cooling':#by norm 29 max temperature of operation,
t_TABS = 29 - (29-15)*(te_t-te_min)/(te_max-te_min)
I_TABS = Af/0.08*(t_TABS-tm)
Im_tot = Im_tot+I_TABS
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
ts = (Htr_ms * tm + I_st + Htr_w*te_t + Htr_1*(te_t+(I_ia+IHC_nd)/Hve))/(Htr_ms+Htr_w+Htr_1)
tair0 = (Htr_is*ts + Hve*te_t + I_ia + IHC_nd)/(Htr_is+Hve)
top0 = 0.31*tair0+0.69*ts
if (tintH_set <= tair0) and (tair0<=tintC_set):
tair_ac = tair0
top_ac = top0
IHC_nd_ac = 0
IH_nd_ac = IHC_nd_ac
IC_nd_ac = IHC_nd_ac
else:
if tair0 > tintC_set:
tair_set = tintC_set
else:
tair_set = tintH_set
# Case 2 IHC_nd = 10 * Af
IHC_nd = IHC_nd_10 = 10*Af
Im_tot = I_m + Htr_em * te_t + Htr_3*(I_st + Htr_w*te_t + Htr_1*(((I_ia + IHC_nd)/Hve)+ te_t))/Htr_2
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
if SystemH =='Floor heating' or SystemC =='Floor cooling':#by norm 29 max temperature of operation,
t_TABS = 29 - (29-15)*(te_t-te_min)/(te_max-te_min)
I_TABS = Af/0.08*(t_TABS-tm)
Im_tot = Im_tot+I_TABS
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
ts = (Htr_ms * tm + I_st + Htr_w*te_t + Htr_1*(te_t+(I_ia+IHC_nd)/Hve))/(Htr_ms+Htr_w+Htr_1)
tair10 = (Htr_is*ts + Hve*te_t + I_ia + IHC_nd)/(Htr_is+Hve)
top10 = 0.3*tair10+0.7*ts
IHC_nd_un = IHC_nd_10*(tair_set - tair0)/(tair10-tair0)
IC_max = -IC*Af
IH_max = IH*Af
if IC_max < IHC_nd_un < IH_max:
tair_ac = tair_set
top_ac = 0.31*tair_ac+0.69*ts
IHC_nd_ac = IHC_nd_un
else:
if IHC_nd_un > 0:
IHC_nd_ac = IH_max
else:
IHC_nd_ac = IC_max
# Case 3 when the maximum power is exceeded
Im_tot = I_m + Htr_em * te_t + Htr_3*(I_st + Htr_w*te_t + Htr_1*(((I_ia + IHC_nd_ac)/Hve)+ te_t))/Htr_2
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
if SystemH =='Floor heating' or SystemC =='Floor cooling':#by norm 29 max temperature of operation,
t_TABS = 29 - (29-15)*(te_t-te_min)/(te_max-te_min)
I_TABS = Af/0.08*(t_TABS-tm)
Im_tot = Im_tot+I_TABS
tm_t = (tm_t0 *((Cm/3600)-0.5*(Htr_3+ Htr_em))+ Im_tot)/((Cm/3600)+0.5*(Htr_3+Htr_em))
tm = (tm_t+tm_t0)/2
ts = (Htr_ms * tm + I_st + Htr_w*te_t + Htr_1*(te_t+(I_ia+IHC_nd_ac)/Hve))/(Htr_ms+Htr_w+Htr_1)
tair_ac = (Htr_is*ts + Hve*te_t + I_ia + IHC_nd)/(Htr_is+Hve)
top_ac = 0.31*tair_ac+0.69*ts
# Results
if IHC_nd_ac > 0:
IH_nd_ac = IHC_nd_ac
else:
IC_nd_ac = IHC_nd_ac
Results = [tm_t, tair_ac ,top_ac, IH_nd_ac, IC_nd_ac]
return list(Results)
# <markdowncell>
# 2.1. Sub-Function Shading Factors of movebale parts
# <codecell>
#It calculates the rediction factor of shading due to type of shading
def Calc_Rf_sh (ShadingPosition,ShadingType):
#0 for not #1 for Louvres, 2 for Rollo, 3 for Venetian blinds, 4 for Courtain, 5 for Solar control glass
d ={'Type':[0, 1, 2, 3, 4,5],'ValueIN':[1, 0.2,0.2,0.3,0.77,0.1],'ValueOUT':[1, 0.08,0.08,0.15,0.57,0.1]}
ValuesRf_Table = pd.DataFrame(d)
rows = ValuesRf_Table.Type.count()
for row in range(rows):
if ShadingType == ValuesRf_Table.loc[row,'Type'] and ShadingPosition == 1: #1 is exterior
return ValuesRf_Table.loc[row,'ValueOUT']
elif ShadingType == ValuesRf_Table.loc[row,'Type'] and ShadingPosition == 0: #0 is intetiror
return ValuesRf_Table.loc[row,'ValueIN']
# <codecell>
def calc_gl(radiation, g_gl,Rf_sh):
if radiation > 300: #in w/m2
return g_gl*Rf_sh
else:
return g_gl
# <markdowncell>
# 2.2. Sub-Function equivalent profile of Occupancy
# <codecell>
def calc_Type(Profiles, Profiles_names, AllProperties, i, Servers, Coolingroom):
profiles_num = len(Profiles)
if Servers == 0:
Profiles[1] = Profiles[0]
if Coolingroom == 0:
Profiles[10] = Profiles[15]
Profiles[0].Ve = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].Ve
Profiles[0].I_int = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].I_int
Profiles[0].tintH_set = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].tintH_set
Profiles[0].tintC_set = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].tintC_set
Profiles[0].Mww = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].Mww
Profiles[0].Mw = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].Mw
Profiles[0].Ealf_ve = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].Ealf_ve
Profiles[0].Ealf_nove = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].Ealf_nove
Profiles[0].People = AllProperties.loc[i,Profiles_names[0]] * Profiles[0].People
for num in range(1,profiles_num):
Profiles[0].Ve = Profiles[0].Ve + AllProperties.loc[i,Profiles_names[num]]*Profiles[num].Ve
Profiles[0].I_int = Profiles[0].I_int + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].I_int
Profiles[0].tintH_set = Profiles[0].tintH_set + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].tintH_set
Profiles[0].tintC_set = Profiles[0].tintC_set + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].tintC_set
Profiles[0].Mww = Profiles[0].Mww + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].Mww
Profiles[0].Mw = Profiles[0].Mw + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].Mw
Profiles[0].Ealf_ve = Profiles[0].Ealf_ve + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].Ealf_ve
Profiles[0].Ealf_nove = Profiles[0].Ealf_nove + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].Ealf_nove
Profiles[0].People = Profiles[0].People + AllProperties.loc[i,Profiles_names[num]] * Profiles[num].People
return Profiles[0].copy()
# <markdowncell>
# 2.3 Sub-Function calculation of thermal losses of emission systems differet to air conditioning
# <codecell>
def calc_Qem_ls(SystemH,SystemC):
tHC_corr = [0,0]
# values extracted from SIA 2044 - national standard replacing values suggested in EN 15243
if SystemH == 'Ceiling heating' or 'Radiator':
tHC_corr[0] = 0.5 + 1.2
elif SystemH == 'Floor heating':
tHC_corr[0] = 0 + 1.2
elif SystemH == 'Air conditioning': # no emission losses but emissions for ventilation
tHC_corr[0] = 0.5 + 1 #regulation is not taking into account here
else:
tHC_corr[0] = 0.5 + 1.2
if SystemC == 'Ceiling cooling':
tHC_corr[1] = 0 - 1.8
elif SystemC == 'Floor cooling':
tHC_corr[1] = - 0.4 - 1.8
elif SystemC == 'Air conditioning': # no emission losses but emissions for ventilation
tHC_corr[1] = 0 - 1 #regulation is not taking into account here
else:
tHC_corr[1] = 0 + - 1.2
return list(tHC_corr)
# <markdowncell>
# 2.1. Sub-Function losses heating system distribution
# <codecell>
def calc_Qdis_ls(SystemH,SystemC,nf,nfpercent, Lw,Ll,year,Af,twws, Bf, Renovated, Occupancy,Seasonhours,footprint):
# Local variables
D = 20 #in mm the diameter of the pipe to calculate losses
tws = 32 # t at the spurs according to EN 1516 3-2
# Ifdentification of linera trasmissivity coefficeitn dependent on dimensions and year of construction of building W/(m.K)
if year >= 1995 or Renovated == 'Yes':
Y = [0.2,0.3,0.3]
elif 1985 <= year < 1995 and Renovated == 'No':
Y = [0.3,0.4,0.4]
else:
Y = [0.4,0.4,0.4]
fforma = Calc_form(Lw,Ll,footprint)
# Identification of equivalent lenghts
hf = 3*(nf-1) # standard height of every floor -1 for the distribution system
Lv = (2*Ll+0.0325*Ll*Lw+6)*fforma
Lvww_c = (2*Ll+0.0125*Ll*Lw)*fforma
Lvww_dis = (Ll+0.0625*Ll*Lw)*fforma
Lsww_c = (0.075*Ll*Lw*nf*nfpercent*hf)*fforma
Lsww_dis = (0.038*Ll*Lw*nf*nfpercent*hf)*fforma
Lslww_dis = (0.05*Ll*Lw*nf*nfpercent)*fforma
# Calculate tamb in basement according to EN
hours = Occupancy.tamb.count()
for hour in range(hours):
if Seasonhours[0] < hour < Seasonhours[1]: # cooling season
Occupancy.loc[hour,'tamb'] = Occupancy.loc[hour,'tintC_set'] - Bf*(Occupancy.loc[hour,'tintC_set']-Occupancy.loc[hour,'te'])
elif 0 <= hour <= Seasonhours[0] or Seasonhours[1] <= hour <= 8759:
Occupancy.loc[hour,'tamb'] = Occupancy.loc[hour,'tintH_set'] - Bf*(Occupancy.loc[hour,'tintH_set']-Occupancy.loc[hour,'te'])
# Calculation of losses only nonrecoverable losses are considered for the calculation, # those of the distribution in the basement for space heating and cooling system
# This part applies the method described by SIA 2044
if SystemH != 'No':
if Occupancy['Qhs'].max()!=0:
Occupancy['Qhs_d_ls'] = ((Occupancy['tsh']+Occupancy['trh'])/2-Occupancy['tamb'])*(Occupancy['Qhs']/Occupancy['Qhs'].max())*(Lv*Y[0])
else:
Occupancy['Qhs_d_ls'] = 0
if SystemC != 'No':
if Occupancy['Qcs'].min()!=0:
Occupancy['Qcs_d_ls'] = ((Occupancy['tsc']+Occupancy['trc'])/2-Occupancy['tamb'])*(Occupancy['Qcs']/Occupancy['Qcs'].min())*(Lv*Y[0])
else:
Occupancy['Qcs_d_ls']=0
# Calculation of lossesof the distribution and cirulation loop of the hotwater system in the basement.
Occupancy['Qww_d_ls'] = (twws-Occupancy['tamb'])*Y[0]*(Lvww_c+Lvww_dis)*(Occupancy['Mww']*Af)/(12*60) #velocity of flow of 12 l/min
# Physical approach, losses Inside the conditioned space
hours = Occupancy.tamb.count()
for hour in range(hours):
if Seasonhours[0] < hour < Seasonhours[1]: # cooling season
Occupancy.loc[hour,'tamb'] = Occupancy['tintC_set'].min()
else:
Occupancy.loc[hour,'tamb'] = Occupancy['tintH_set'].max()
Occupancy['Qww_dh_ls'] = ((twws-Occupancy['tamb'])*Y[1]*(Lsww_c+Lsww_dis)*((Occupancy['Mww']*Af)/1000)+
(tws-Occupancy['tamb'])*Y[1]*(Lslww_dis)*((Occupancy['Mww']*Af)/1000))
return Occupancy.copy()
# <codecell>
def calc_Qww_dis_ls(nf,nfpercent,Lw,Ll,year,Af,twws, Bf, Renovated, Occupancy,Seasonhours,footprint,calcintload):
# Local variables
D = 20 #in mm the diameter of the pipe to calculate losses
tws = 32 # t at the spurs according to EN 1516 3-2
# Ifdentification of linera trasmissivity coefficeitn dependent on dimensions and year of construction of building W/(m.K)
if year >= 1995 or Renovated == 'Yes':
Y = [0.2,0.3,0.3]
elif 1985 <= year < 1995 and Renovated == 'No':
Y = [0.3,0.4,0.4]
else:
Y = [0.4,0.4,0.4]
fforma = Calc_form(Lw,Ll,footprint)
# Identification of equivalent lenghts
hf = 3*(nf-1) # standard height of every floor
Lsww_c = 0.075*Ll*Lw*nf*nfpercent*hf*fforma
Lsww_dis = 0.038*Ll*Lw*nf*nfpercent*hf*fforma
Lslww_dis = (0.05*Ll*Lw*nf*nfpercent)*fforma
# Calculate tamb in basement according to EN
if calcintload == 1:
hours = Occupancy.tamb.count()
for hour in range(hours):
if Seasonhours[0] < hour < Seasonhours[1]: # cooling season
Occupancy.loc[hour,'tamb'] = Occupancy['tintC_set'].min()
else:
Occupancy.loc[hour,'tamb'] = Occupancy['tintH_set'].max()
else:
Occupancy['tamb'] = Occupancy['tair']
Occupancy['Qww_dh_ls'] = ((twws-Occupancy['tamb'])*Y[1]*(Lsww_c+Lsww_dis)*((Occupancy['Mww']*Af)/1000)+
(tws-Occupancy['tamb'])*Y[1]*(Lslww_dis)*((Occupancy['Mww']*Af)/1000))
return Occupancy.copy()
# <codecell>
#a factor taking into account that Ll and lw are measured from an aproximated rectangular surface
def Calc_form(Lw,Ll,footprint):
factor = footprint/(Lw*Ll)
return factor
# <codecell>
def calc_Aux_hscs(nf,nfpercent,Lw,Ll,footprint,Year,Qhs0,tsh0,trh0,data,Qcs0,tsc0,trc0,SystemH,SystemC,twws,tw):
# accoridng to SIA 2044
# Identification of equivalent lenghts
hf = 3
fforma = Calc_form(Lw,Ll,footprint)
# constants
deltaP_l = 0.1
fsr = 0.3
cp = 1000*4.186
#variable depending on new or old building. 2000 as time line
if Year >= 2000:
b =1
else:
b =1.2
# for heating system
#the power of the pump in Watts
if SystemH != 'Air conditioning' or SystemH != 'No':
fctr = 1.05
qV_des = Qhs0*1000/((tsh0-trh0)*cp)
Imax = 2*(Ll+Lw/2+hf+(nf*nfpercent)+10)*fforma
deltaP_des = Imax*deltaP_l*(1+fsr)
Phy_des = 0.2278*deltaP_des*qV_des
feff = (1.25*(200/Phy_des)**0.5)*fctr*b
Ppu_dis = Phy_des*feff
#the power of the pump in Watts
hours = data.tamb.count()
for hour in range(hours):
if data.loc[hour,'Qhsf'] > 0:
if data.loc[hour,'Qhsf']/Qhs0 > 0.67:
Ppu_dis_hy_i = Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*fctr*b
data.loc[hour,'Eaux_hs'] = Ppu_dis_hy_i*feff
else:
Ppu_dis_hy_i = 0.0367*Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*fctr*b
data.loc[hour,'Eaux_hs'] = Ppu_dis_hy_i*feff
else:
data.loc[hour,'Eaux_hs']=0
# for Cooling system
#the power of the pump in Watts
if SystemH != 'Air conditioning' or SystemH != 'No':
fctr = 1.10
qV_des = Qcs0/((trc0-tsc0)*cp)
Imax = 2*(Ll+Lw/2+hf+(nf*nfpercent)+10)*fforma
deltaP_des = Imax*deltaP_l*(1+fsr)
Phy_des = 0.2778*deltaP_des*qV_des
feff = (1.25*(200/Phy_des)**0.5)*fctr*b
Ppu_dis = Phy_des*feff
#the power of the pump in Watts
hours = data.tamb.count()
for hour in range(hours):
if data.loc[hour,'Qcsf'] > 0:
if data.loc[hour,'Qcsf']/(Qcs0*1000) > 0.67:
Ppu_dis_hy_i = Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*fctr*b
data.loc[hour,'Eaux_cs'] = Ppu_dis_hy_i*feff
else:
Ppu_dis_hy_i = 0.0367*Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*fctr*b
data.loc[hour,'Eaux_cs'] = Ppu_dis_hy_i*feff
else:
data.loc[hour,'Eaux_cs']=0
# for domestichotwater
#the power of the pump in Watts
qV_des = data['Qwwf'].max()/((twws-tw)*cp)
Imax = 2*(Ll+2.5+hf+(nf*nfpercent))*fforma
deltaP_des = Imax*deltaP_l*(1+fsr)
Phy_des = 0.2778*deltaP_des*qV_des
feff = (1.25*(200/Phy_des)**0.5)*fctr*b
Ppu_dis = Phy_des*feff
#the power of the pump in Watts
hours = data.tamb.count()
for hour in range(hours):
if data.loc[hour,'Qwwf']>0:
if data.loc[hour,'Qwwf']/data['Qwwf'].max() > 0.67:
Ppu_dis_hy_i = Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*b
data.loc[hour,'Eaux_ww'] = Ppu_dis_hy_i*feff
else:
Ppu_dis_hy_i = 0.0367*Phy_des
feff = (1.25*(200/Ppu_dis_hy_i)**0.5)*b
data.loc[hour,'Eaux_ww'] = Ppu_dis_hy_i*feff
return data.copy()
# <markdowncell>
# 2.1. Sub-Function calculation of nominal temperatures of system
# <codecell>
def calc_em_t(SystemH,SystemC):
# References: 70 supply 50 return radiatior system #several authors
# Floor cooling/ceiling cooling 18 -22 /thermofloor.co.uk
# Floor heating /ceiling heating EN 1264-3
# Emission factors extracted from SIA 384/2,1984
#Default values
nh =0.3
tsh0 = 70
trh0 = 50
tsc0 = 7
trc0 = 12
# Create tables with information of nominal temperatures
h={'Type':['Ceiling heating', 'Radiator', 'Floor heating', 'Air conditioning'],'tsnominal':[35,70,35,60],
'trnominal':[25,50,25,50],'EmissionFactor':[0.22,0.33,0.24,0.3]}
Heating = pd.DataFrame(h)
c ={'Type':['Ceiling cooling','Floor cooling', 'Air conditioning'],'tsnominal':[15,15,7],
'trnominal':[20,20,12]}
Cooling = pd.DataFrame(c)
# Calculate the nominal temperatures and emission factors based on the type of system.
# for heating systems
rows = Heating.Type.count()
for row in range(rows):
if SystemH == Heating.loc[row,'Type']:
tsh0 = Heating.loc[row,'tsnominal']
trh0 = Heating.loc[row,'trnominal']
nh = Heating.loc[row,'EmissionFactor']
#for cooling sytems
rows = Cooling.Type.count()
for row in range(rows):
if SystemC == Cooling.loc[row,'Type']:
tsc0 = Cooling.loc[row,'tsnominal']
trc0 = Cooling.loc[row,'trnominal']
return tsh0,trh0,tsc0,trc0,nh
# <markdowncell>
# ##STATISTICAL ENERGY MODEL
# <codecell>
def Querystatistics(CQ, CQ_name, Model, locationtemp1,locationFinal):
#Create the table or database of the CQ to generate the values
OutTable = 'Database.dbf'
arcpy.TableToTable_conversion(CQ, locationtemp1, OutTable)
Database0 = dbf2df(locationtemp1+'\\'+OutTable)
#THE FIRST PART RELATED TO THE BUILDING PROPERTIES
#Assing main use of the building To assign systems of heating or cooling in a building basis.
Database = MainUse(Database0)
# assign the year of each category and create a new code
Database['YearCat'] = Database.apply(lambda x: YearCategoryFunction(x['Year'], x['Renovated']), axis=1)
Database['CODE'] = Database.Type + Database.YearCat
# Create join with the model
Joineddata = pd.merge(Database, Model, left_on='CODE', right_on='Code')
Joineddata.rename(columns={'Hs_x':'Hs'},inplace=True)
# EXPORT PROPERTIES
Joineddata.to_excel('c:\ArcGIS\EDMdata\Statistical'+'\\'+CQ_name+'\\'+'Properties.xls',
sheet_name='Values',index=False,cols={'Name','tsh0','trh0','tsc0','trc0','Hs','Es','PFloor','Year','fwindow',
'Floors','Construction','Emission_heating','Emission_cooling',
'Uwall','Uroof','Ubasement','Uwindow'})
#EXPORT PROPERTIES RELATED TO PROCESEES AND EQUIPMENT
Counter = Joineddata.INDUS.count()
Joineddata['E4'] = Joineddata['SRFlag'] = Joineddata['CRFlag'] = Joineddata['ICEFlag'] = 0
for row in range(Counter):
if Joineddata.loc[row,'INDUS'] >0:
Joineddata.loc[row,'E4'] = 1
if Joineddata.loc[row,'SR'] >0:
Joineddata.loc[row,'SRFlag'] = 1
if Joineddata.loc[row,'ICE'] >0:
Joineddata.loc[row,'ICEFlag'] = 1
if Joineddata.loc[row,'CR'] >0:
Joineddata.loc[row,'CRFlag'] = 1
Joineddata.to_excel('c:\ArcGIS\EDMdata\Statistical'+'\\'+CQ_name+'\\'+'Equipment.xls',
sheet_name='Values',index=False,cols={'Name','CRFlag','SRFlag','ICEFlag',
'E4'})
#THE OTHER PART RELATED TO THE ENERGY VALUES'
DatabaseUnpivoted = pd.melt(Database, id_vars=('Name','Shape_Area','YearCat','Hs','Floors'))
DatabaseUnpivoted['CODE'] = DatabaseUnpivoted.variable + DatabaseUnpivoted.YearCat
#Now both Database with the new codification is merged or joined to the values of the Statistical model
DatabaseModelMerge = pd.merge(DatabaseUnpivoted, Model, left_on='CODE', right_on='Code')
#Now the values are created. as all the intensity values are described in MJ/m2.
##they are transformed into MWh, Heated space is assumed as an overall 90% of the gross area according to the standard SIA,
##unless it is known (Siemens buildings and surroundings, Obtained during visual inspection a report of the area Grafenau)
counter = DatabaseModelMerge.value.count()
for r in range (counter):
if DatabaseModelMerge.loc[r,'Hs_x']>0:
DatabaseModelMerge.loc[r,'Hs_y'] = DatabaseModelMerge.loc[r,'Hs_x']
DatabaseModelMerge['Qhsf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qhsf_kWhm2/1000
DatabaseModelMerge['Qhpf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qhpf_kWhm2/1000
DatabaseModelMerge['Qwwf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qwwf_kWhm2/1000
DatabaseModelMerge['Qcsf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qcsf_kWhm2/1000
DatabaseModelMerge['Qcdataf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qcdataf_kWhm2/1000
DatabaseModelMerge['Qcicef'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qcicef_kWhm2/1000
DatabaseModelMerge['Qcpf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.qcpf_kWhm2/1000
DatabaseModelMerge['Ealf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.Ealf_kWhm2/1000
DatabaseModelMerge['Edataf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Hs_y* DatabaseModelMerge.Edataf_kWhm2/1000
DatabaseModelMerge['Epf'] = DatabaseModelMerge.value * DatabaseModelMerge.Shape_Area * DatabaseModelMerge.Floors * DatabaseModelMerge.Es* DatabaseModelMerge.Epf_kWhm2/1000
DatabaseModelMerge['Ecaf'] = 0 #compressed air is 0 for all except siemens where data is measured.
# Pivoting the new table and summing rows all in MWh
Qhsf = pd.pivot_table(DatabaseModelMerge, values='Qhsf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qhpf = pd.pivot_table(DatabaseModelMerge, values='Qhpf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qwwf = pd.pivot_table(DatabaseModelMerge, values='Qwwf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qcsf = pd.pivot_table(DatabaseModelMerge, values='Qcsf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qcdataf = pd.pivot_table(DatabaseModelMerge, values='Qcdataf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qcicef = pd.pivot_table(DatabaseModelMerge, values='Qcicef', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Qcpf = pd.pivot_table(DatabaseModelMerge, values='Qcpf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Ealf = pd.pivot_table(DatabaseModelMerge, values = 'Ealf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Edataf = pd.pivot_table(DatabaseModelMerge, values='Edataf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Epf = pd.pivot_table(DatabaseModelMerge, values='Epf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Ecaf = pd.pivot_table(DatabaseModelMerge, values='Ecaf', rows='Name', cols='CODE', aggfunc='sum', margins='add all rows')
Total = pd.DataFrame({'Qhsf': Qhsf['All'],'Qhpf': Qhpf['All'],'Qwwf': Qwwf['All'],'Qcsf': Qcsf['All'],'Qcpf': Qcpf['All'],
'Ealf': Ealf['All'],'Epf': Epf['All'],'Edataf': Edataf['All'],'Qcdataf': Qcdataf['All'],
'Ecaf': Ecaf['All'],'Qcicef': Qcicef['All'] })
# reset index
Total['Name'] = Total.index
counter = Total.Qhsf.count()
Total.index = range(counter)
Total.to_csv(locationFinal+'\\'+CQ_name+'\\'+'Loads.csv', index=False)
return Total
# <markdowncell>
# This function estimates the main type of ocupation in the building. as a result those values such as coefficients of trasnmittance, temperatures of operation and type of emission systems are selected in a mayority basis.
# <codecell>
def MainUse(Database0):
uses = ['ADMIN','SR','INDUS','REST','RESTS','DEPO','COM','MDU','SDU','EDU','CR','HEALTH','SPORT',
'SWIM','PUBLIC','SUPER','ICE','HOT']
Database0['Type'] = 'MDU'
n_buildings = Database0.ADMIN.count()
n_uses = len(uses)
for r in range (n_uses):
for row in range(n_buildings):
if Database0.loc[row, uses[r]]>=0.5:
Database0.loc[row, 'Type']= uses[r]
return Database0.copy()
# <markdowncell>
# Sub-function: assign As the values in the statistical model are codified according to a secuence of 1, 2, 3, 4 and 5, a function has to be define to codify in the same therms the Database, a new filed (YearCAt) is assigned to the Database
# <codecell>
def YearCategoryFunction(x,y):
if x <= 1920:
#Database['Qh'] = Database.ADMIN.value * Model.
result = '1'
elif x > 1920 and x <= 1970:
result = '2'
elif x > 1970 and x <= 1980:
result = '3'
elif x > 1980 and x <= 2000:
result = '4'
elif x > 2000 and x <= 2020:
result = '5'
elif x > 2020:
result = '6'
if x <= 1920 and y=='Yes':
result = '7'
elif 1920 < x <= 1970 and y=='Yes':
result = '8'
elif 1970 < x <= 1980 and y=='Yes':
result = '9'
elif 1980 < x <= 2000 and y=='Yes':
result = '10'
return result
| [
"fonseca@arch.ethz.ch"
] | fonseca@arch.ethz.ch |
78449bf47c907409436262751fab4a0327e9bb74 | ad5d38fce4785037c108186f17eb1c64380355ef | /sddsd/google-cloud-sdk.staging/lib/googlecloudsdk/api_lib/cloudbuild/cloudbuild_util.py | c5720e7008ffdafaf648390fa1b04db8874cdcd5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | saranraju90/multik8s | 75864b605a139ddb7947ed4de4ae8466bdd49acb | 428576dedef7bb9cd6516e2c1ab2714581e1137c | refs/heads/master | 2023-03-03T21:56:14.383571 | 2021-02-20T14:56:42 | 2021-02-20T14:56:42 | 339,665,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,887 | py | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the cloudbuild API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from apitools.base.protorpclite import messages as proto_messages
from apitools.base.py import encoding as apitools_encoding
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import yaml
from googlecloudsdk.core.resource import resource_property
from googlecloudsdk.core.util import files
import six
_API_NAME = 'cloudbuild'
_GA_API_VERSION = 'v1'
_BETA_API_VERSION = 'v1beta1'
RELEASE_TRACK_TO_API_VERSION = {
base.ReleaseTrack.GA: _GA_API_VERSION,
base.ReleaseTrack.BETA: _BETA_API_VERSION,
base.ReleaseTrack.ALPHA: _BETA_API_VERSION,
}
REGIONAL_WORKERPOOL_NAME_MATCHER = r'projects/.*/locations/.*/workerPools/.*'
REGIONAL_WORKERPOOL_NAME_SELECTOR = r'projects/.*/locations/.*/workerPools/(.*)'
REGIONAL_WORKERPOOL_REGION_SELECTOR = r'projects/.*/locations/(.*)/workerPools/.*'
# Default for optionally-regional requests when the user does not specify.
DEFAULT_REGION = 'global'
def GetMessagesModule(release_track=base.ReleaseTrack.GA):
"""Returns the messages module for Cloud Build.
Args:
release_track: The desired value of the enum
googlecloudsdk.calliope.base.ReleaseTrack.
Returns:
Module containing the definitions of messages for Cloud Build.
"""
return apis.GetMessagesModule(_API_NAME,
RELEASE_TRACK_TO_API_VERSION[release_track])
def GetClientClass(release_track=base.ReleaseTrack.GA):
"""Returns the client class for Cloud Build.
Args:
release_track: The desired value of the enum
googlecloudsdk.calliope.base.ReleaseTrack.
Returns:
base_api.BaseApiClient, Client class for Cloud Build.
"""
return apis.GetClientClass(_API_NAME,
RELEASE_TRACK_TO_API_VERSION[release_track])
def GetClientInstance(release_track=base.ReleaseTrack.GA, use_http=True):
"""Returns an instance of the Cloud Build client.
Args:
release_track: The desired value of the enum
googlecloudsdk.calliope.base.ReleaseTrack.
use_http: bool, True to create an http object for this client.
Returns:
base_api.BaseApiClient, An instance of the Cloud Build client.
"""
return apis.GetClientInstance(
_API_NAME,
RELEASE_TRACK_TO_API_VERSION[release_track],
no_http=(not use_http))
def EncodeSubstitutions(substitutions, messages):
if not substitutions:
return None
substitution_properties = []
# TODO(b/35470611): Use map encoder function instead when implemented
for key, value in sorted(six.iteritems(substitutions)): # Sort for tests
substitution_properties.append(
messages.Build.SubstitutionsValue.AdditionalProperty(
key=key, value=value))
return messages.Build.SubstitutionsValue(
additionalProperties=substitution_properties)
def EncodeTriggerSubstitutions(substitutions, messages):
if not substitutions:
return None
substitution_properties = []
for key, value in sorted(six.iteritems(substitutions)): # Sort for tests
substitution_properties.append(
messages.BuildTrigger.SubstitutionsValue.AdditionalProperty(
key=key, value=value))
return messages.BuildTrigger.SubstitutionsValue(
additionalProperties=substitution_properties)
class ParserError(exceptions.Error):
"""Error parsing YAML into a dictionary."""
def __init__(self, path, msg):
msg = 'parsing {path}: {msg}'.format(
path=path,
msg=msg,
)
super(ParserError, self).__init__(msg)
class ParseProtoException(exceptions.Error):
"""Error interpreting a dictionary as a specific proto message."""
def __init__(self, path, proto_name, msg):
msg = 'interpreting {path} as {proto_name}: {msg}'.format(
path=path,
proto_name=proto_name,
msg=msg,
)
super(ParseProtoException, self).__init__(msg)
def SnakeToCamelString(snake):
"""Change a snake_case string into a camelCase string.
Args:
snake: str, the string to be transformed.
Returns:
str, the transformed string.
"""
parts = snake.split('_')
if not parts:
return snake
# Handle snake with leading '_'s by collapsing them into the next part.
# Legit field names will never look like this, but completeness of the
# function is important.
leading_blanks = 0
for p in parts:
if not p:
leading_blanks += 1
else:
break
if leading_blanks:
parts = parts[leading_blanks:]
if not parts:
# If they were all blanks, then we over-counted by one because of split
# behavior.
return '_' * (leading_blanks - 1)
parts[0] = '_' * leading_blanks + parts[0]
return ''.join(parts[:1] + [s.capitalize() for s in parts[1:]])
def SnakeToCamel(msg, skip=None):
"""Recursively transform all keys and values from snake_case to camelCase.
If a key is in skip, then its value is left alone.
Args:
msg: dict, list, or other. If 'other', the function returns immediately.
skip: contains dict keys whose values should not have camel case applied.
Returns:
Same type as msg, except all strings that were snake_case are now CamelCase,
except for the values of dict keys contained in skip.
"""
if skip is None:
skip = []
if isinstance(msg, dict):
return {
SnakeToCamelString(key):
(SnakeToCamel(val, skip) if key not in skip else val)
for key, val in six.iteritems(msg)
}
elif isinstance(msg, list):
return [SnakeToCamel(elem, skip) for elem in msg]
else:
return msg
def MessageToFieldPaths(msg):
"""Produce field paths from a message object.
The result is used to create a FieldMask proto message that contains all field
paths presented in the object.
https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto
Args:
msg: A user defined message object that extends the messages.Message class.
https://github.com/google/apitools/blob/master/apitools/base/protorpclite/messages.py
Returns:
The list of field paths.
"""
fields = []
for field in msg.all_fields():
v = msg.get_assigned_value(field.name)
if field.repeated and not v:
# Repeated field is initialized as an empty list.
continue
if v is not None:
name = resource_property.ConvertToSnakeCase(field.name)
if hasattr(v, 'all_fields'):
# message has sub-messages, constructing subpaths.
for f in MessageToFieldPaths(v):
fields.append('{}.{}'.format(name, f))
else:
fields.append(name)
return fields
def _UnpackCheckUnused(obj, msg_type):
"""Stuff a dict into a proto message, and fail if there are unused values.
Args:
obj: dict(), The structured data to be reflected into the message type.
msg_type: type, The proto message type.
Raises:
ValueError: If there is an unused value in obj.
Returns:
Proto message, The message that was created from obj.
"""
msg = apitools_encoding.DictToMessage(obj, msg_type)
def _CheckForUnusedFields(obj):
"""Check for any unused fields in nested messages or lists."""
if isinstance(obj, proto_messages.Message):
unused_fields = obj.all_unrecognized_fields()
if unused_fields:
if len(unused_fields) > 1:
# Because this message shows up in a dotted path, use braces.
# eg .foo.bar.{x,y,z}
unused_msg = '{%s}' % ','.join(sorted(unused_fields))
else:
# For single items, omit the braces.
# eg .foo.bar.x
unused_msg = unused_fields[0]
raise ValueError('.%s: unused' % unused_msg)
for used_field in obj.all_fields():
try:
field = getattr(obj, used_field.name)
_CheckForUnusedFields(field)
except ValueError as e:
raise ValueError('.%s%s' % (used_field.name, e))
if isinstance(obj, list):
for i, item in enumerate(obj):
try:
_CheckForUnusedFields(item)
except ValueError as e:
raise ValueError('[%d]%s' % (i, e))
_CheckForUnusedFields(msg)
return msg
def LoadMessageFromStream(stream,
msg_type,
msg_friendly_name,
skip_camel_case=None,
path=None):
"""Load a proto message from a stream of JSON or YAML text.
Args:
stream: file-like object containing the JSON or YAML data to be decoded.
msg_type: The protobuf message type to create.
msg_friendly_name: A readable name for the message type, for use in error
messages.
skip_camel_case: Contains proto field names or map keys whose values should
not have camel case applied.
path: str or None. Optional path to be used in error messages.
Raises:
ParserError: If there was a problem parsing the stream as a dict.
ParseProtoException: If there was a problem interpreting the stream as the
given message type.
Returns:
Proto message, The message that got decoded.
"""
if skip_camel_case is None:
skip_camel_case = []
# Turn the data into a dict
try:
structured_data = yaml.load(stream, file_hint=path)
except yaml.Error as e:
raise ParserError(path, e.inner_error)
if not isinstance(structured_data, dict):
raise ParserError(path, 'Could not parse as a dictionary.')
return _YamlToMessage(structured_data, msg_type, msg_friendly_name,
skip_camel_case, path)
def LoadMessagesFromStream(stream,
msg_type,
msg_friendly_name,
skip_camel_case=None,
path=None):
"""Load multiple proto message from a stream of JSON or YAML text.
Args:
stream: file-like object containing the JSON or YAML data to be decoded.
msg_type: The protobuf message type to create.
msg_friendly_name: A readable name for the message type, for use in error
messages.
skip_camel_case: Contains proto field names or map keys whose values should
not have camel case applied.
path: str or None. Optional path to be used in error messages.
Raises:
ParserError: If there was a problem parsing the stream.
ParseProtoException: If there was a problem interpreting the stream as the
given message type.
Returns:
Proto message list of the messages that got decoded.
"""
if skip_camel_case is None:
skip_camel_case = []
# Turn the data into a dict
try:
structured_data = yaml.load_all(stream, file_hint=path)
except yaml.Error as e:
raise ParserError(path, e.inner_error)
return [
_YamlToMessage(item, msg_type, msg_friendly_name, skip_camel_case, path)
for item in structured_data
]
def _YamlToMessage(structured_data,
msg_type,
msg_friendly_name,
skip_camel_case=None,
path=None):
"""Load a proto message from a file containing JSON or YAML text.
Args:
structured_data: Dict containing the decoded YAML data.
msg_type: The protobuf message type to create.
msg_friendly_name: A readable name for the message type, for use in error
messages.
skip_camel_case: Contains proto field names or map keys whose values should
not have camel case applied.
path: str or None. Optional path to be used in error messages.
Raises:
ParseProtoException: If there was a problem interpreting the file as the
given message type.
Returns:
Proto message, The message that got decoded.
"""
# Transform snake_case into camelCase.
structured_data = SnakeToCamel(structured_data, skip_camel_case)
# Then, turn the dict into a proto message.
try:
msg = _UnpackCheckUnused(structured_data, msg_type)
except Exception as e:
# Catch all exceptions here because a valid YAML can sometimes not be a
# valid message, so we need to catch all errors in the dict to message
# conversion.
raise ParseProtoException(path, msg_friendly_name, '%s' % e)
return msg
def LoadMessageFromPath(path,
msg_type,
msg_friendly_name,
skip_camel_case=None):
"""Load a proto message from a file containing JSON or YAML text.
Args:
path: The path to a file containing the JSON or YAML data to be decoded.
msg_type: The protobuf message type to create.
msg_friendly_name: A readable name for the message type, for use in error
messages.
skip_camel_case: Contains proto field names or map keys whose values should
not have camel case applied.
Raises:
files.MissingFileError: If the file does not exist.
ParserError: If there was a problem parsing the file as a dict.
ParseProtoException: If there was a problem interpreting the file as the
given message type.
Returns:
Proto message, The message that got decoded.
"""
with files.FileReader(path) as f: # Returns user-friendly error messages
return LoadMessageFromStream(f, msg_type, msg_friendly_name,
skip_camel_case, path)
def LoadMessagesFromPath(path,
msg_type,
msg_friendly_name,
skip_camel_case=None):
"""Load a proto message from a file containing JSON or YAML text.
Args:
path: The path to a file containing the JSON or YAML data to be decoded.
msg_type: The protobuf message type to create.
msg_friendly_name: A readable name for the message type, for use in error
messages.
skip_camel_case: Contains proto field names or map keys whose values should
not have camel case applied.
Raises:
files.MissingFileError: If the file does not exist.
ParseProtoException: If there was a problem interpreting the file as the
given message type.
Returns:
Proto message list of the messages that got decoded.
"""
with files.FileReader(path) as f: # Returns user-friendly error messages
return LoadMessagesFromStream(f, msg_type, msg_friendly_name,
skip_camel_case, path)
def IsRegionalWorkerPool(resource_name):
"""Determine if the provided full resource name is a regional worker pool.
Args:
resource_name: str, The string to test.
Returns:
bool, True if the string is a regional worker pool's full resource name.
"""
return bool(re.match(REGIONAL_WORKERPOOL_NAME_MATCHER, resource_name))
def RegionalWorkerPoolShortName(resource_name):
"""Get the name part of a regional worker pool's full resource name.
For example, "projects/abc/locations/def/workerPools/ghi" returns "ghi".
Args:
resource_name: A regional worker pool's full resource name.
Raises:
ValueError: If the full resource name was not well-formatted.
Returns:
The worker pool's short name.
"""
match = re.search(REGIONAL_WORKERPOOL_NAME_SELECTOR, resource_name)
if match:
return match.group(1)
raise ValueError('The worker pool resource name must match "%s"' %
(REGIONAL_WORKERPOOL_NAME_MATCHER,))
def RegionalWorkerPoolRegion(resource_name):
"""Get the region part of a regional worker pool's full resource name.
For example, "projects/abc/locations/def/workerPools/ghi" returns "def".
Args:
resource_name: str, A regional worker pool's full resource name.
Raises:
ValueError: If the full resource name was not well-formatted.
Returns:
str, The worker pool's region string.
"""
match = re.search(REGIONAL_WORKERPOOL_REGION_SELECTOR, resource_name)
if match:
return match.group(1)
raise ValueError('The worker pool resource name must match "%s"' %
(REGIONAL_WORKERPOOL_NAME_MATCHER,))
def GitHubEnterpriseConfigFromArgs(args, update=False):
"""Construct the GitHubEnterpires resource from the command line args.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
update: bool, if the args are for an update.
Returns:
A populated GitHubEnterpriseConfig message.
"""
messages = GetMessagesModule()
ghe = messages.GitHubEnterpriseConfig()
ghe.hostUrl = args.host_uri
ghe.appId = args.app_id
if args.webhook_key is not None:
ghe.webhookKey = args.webhook_key
if not update and args.peered_network is not None:
ghe.peeredNetwork = args.peered_network
if args.gcs_bucket is not None:
gcs_location = messages.GCSLocation()
gcs_location.bucket = args.gcs_bucket
gcs_location.object = args.gcs_object
if args.generation is not None:
gcs_location.generation = args.generation
ghe.appConfigJson = gcs_location
else:
secret_location = messages.GitHubEnterpriseSecrets()
secret_location.privateKeyName = args.private_key_name
secret_location.webhookSecretName = args.webhook_secret_name
secret_location.oauthSecretName = args.oauth_secret_name
secret_location.oauthClientIdName = args.oauth_client_id_name
ghe.secrets = secret_location
return ghe
| [
"saranraju90@gmail.com"
] | saranraju90@gmail.com |
2c827b70acdad62ca67fd30e1824c1fba685a3ec | 492c1e1dabb84ec4efb874b3d9228d31a675a38f | /121.py | bd46672c3c29a00f05e67a8d9d5a65edbc8accd8 | [] | no_license | ksnt/leet | 65f3c36c8a524e1cc1a5d00bb7a840222ecc9dfe | 6680ff978b88d3c44e538b4d5f0e6805ed85f9cf | refs/heads/master | 2022-09-24T10:59:18.740314 | 2022-09-01T19:06:12 | 2022-09-01T19:06:12 | 136,970,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | import sys
class Solution:
def maxProfit(self,prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0: return 0
min_price = sys.maxsize
max_profit = 0
length = len(prices)
for i in range(length):
if prices[i] < min_price:
min_price = prices[i]
elif prices[i] - min_price > max_profit:
max_profit = prices[i] - min_price
return max_profit | [
"ksn0215@gmail.com"
] | ksn0215@gmail.com |
33bd9813fab74f630b0d6986aa9f4747cd2d0f9b | 18f2d1458103e1aacaaa14d9ff52654da0154dc8 | /src/layers/cnn.py | a65eefba9fdcd3fd3a51a8020d43ef2cd3f172b7 | [] | no_license | yamad07/IADA | 4fbda5b2e7cdb5efd83f2bd2960bfb8dcfd0d455 | 7dbda1eb336f44e57567f4541e14b31304a4e381 | refs/heads/master | 2020-04-10T23:18:01.809883 | 2019-01-30T16:05:21 | 2019-01-30T16:05:21 | 161,347,800 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import torch.nn as nn
def conv_layer(in_dim, out_dim, kernel_size):
return nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.Conv2d(out_dim, out_dim, kernel_size=kernel_size, padding=int((kernel_size - 1)/2)),
nn.ELU(inplace=True),
nn.BatchNorm2d(out_dim),
nn.AvgPool2d(kernel_size=2, stride=2),
)
| [
"yuhsukeshootsfc@gmail.com"
] | yuhsukeshootsfc@gmail.com |
a82c891c8c753024768d78e5716329e714114205 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /xlsxwriter/test/comparison/test_chart_drop_lines01.py | 6e303f1bb4c31e9ce82494adcc98a6d81795dacb | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_drop_lines01.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with drop down lines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [48034944, 48036864]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.set_drop_lines()
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
440d85991f4a5c63c993bfa5575e75c0fe80b2eb | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY001~099/DAY25-BOJ1068-트리/shinjam.py | 7db78b4398a5df90c58f272225b3fb2e50d4feb0 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | from collections import defaultdict
N = int(input())
input_nodes = map(int, input().split())
del_node = int(input())
nodes = defaultdict(list)
stack = []
visited = [0] * N
for idx, val in enumerate(input_nodes):
if del_node in [idx, val]:
continue
if val == -1:
stack.append(idx)
continue
nodes[idx].append(val)
nodes[val].append(idx)
ret = 0
while stack:
node = stack.pop()
visited[node] = 1
leaf = True
for n in nodes[node]:
if not visited[n]:
stack.append(n)
leaf = False
if leaf:
ret += 1
print(ret)
| [
"noreply@github.com"
] | tachyon83.noreply@github.com |
c27db6a1a5fe6540f5fe1c700d2b2ee27a972c38 | 21b39d50e4df56ea01453001845d1580729af1df | /jdcloud_sdk/services/waf/models/WafConf.py | 27441dad49c7751c59eb9cce3518e52ea22c2365 | [
"Apache-2.0"
] | permissive | Tanc009/jdcloud-sdk-python | ef46eac7731aa8a1839b1fc1efd93249b7a977f0 | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | refs/heads/master | 2021-08-09T14:49:16.177709 | 2021-06-25T02:38:41 | 2021-06-25T02:38:41 | 141,714,695 | 0 | 0 | Apache-2.0 | 2018-07-20T13:21:17 | 2018-07-20T13:21:16 | null | UTF-8 | Python | false | false | 1,201 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class WafConf(object):
def __init__(self, enable=None, wafMode=None, wafLevel=None, redirection=None):
"""
:param enable: (Optional) 是否使能 0表示否
:param wafMode: (Optional) 0表示防护,1表示预警
:param wafLevel: (Optional) 0表示宽松,1表示正常,2表示严格
:param redirection: (Optional) 自定义页面名称
"""
self.enable = enable
self.wafMode = wafMode
self.wafLevel = wafLevel
self.redirection = redirection
| [
"tancong@jd.com"
] | tancong@jd.com |
4a59a6d730c7d42759eeb4c97d075bd0b74a5420 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/vns/rsvdevdomainrefconttodomainref.py | 6f6631bb9d8ebd61481610df7c86e13fd1a69120 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,979 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsVDevDomainRefContToDomainRef(Mo):
"""
"""
meta = NamedSourceRelationMeta("cobra.model.vns.RsVDevDomainRefContToDomainRef", "cobra.model.aaa.DomainRef")
meta.targetNameProps["name"] = "tnAaaDomainRefName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "vnsRsVDevDomainRefContToDomainRef"
meta.rnFormat = "rsVDevDomainRefContToDomainRef"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation from VDev DomainRef Container To AAA Domain Ref"
meta.writeAccessMask = 0x6000000000000001
meta.readAccessMask = 0x6000000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.vns.VDevDomainRefCont")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsVDevDomainRefContToDomainRef', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 18098, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 18094, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1562
prop.defaultValueStr = "aaaDomainRef"
prop._addConstant("aaaDomainRef", None, 1562)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnAaaDomainRefName", "tnAaaDomainRefName", 18093, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnAaaDomainRefName", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
2599f43c702b477847beae310b71941347de3dfc | d5492bcc77824e29669400622fd89b1349c90caf | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目/阿里1688_淘宝_天猫_京东_折800_卷皮_拼多多/my_flask_server/tools/时间戳_to_时间.py | bb9790a02ba469733ed07993cf5d5bc247faef0e | [] | no_license | XCodeAny/python | d88980682ba4db839911a5de8c073fa33a63da80 | 35991daf6c7eff4197662b9d07cb9fcdee6a0c02 | refs/heads/master | 2021-08-30T20:00:14.231120 | 2017-12-19T07:55:15 | 2017-12-19T07:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | # coding:utf-8
'''
@author = super_fazai
@File : 时间戳_to_时间.py
@Time : 2017/11/15 17:13
@connect : superonesfazai@gmail.com
'''
import time
def timestamp_to_regulartime(timestamp):
'''
将时间戳转换成时间
'''
# 利用localtime()函数将时间戳转化成localtime的格式
# 利用strftime()函数重新格式化时间
# 转换成localtime
time_local = time.localtime(int(timestamp))
# print(time_local)
# 转换成新的时间格式(2016-05-05 20:28:54)
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
return dt
timestamp = 1511625600
dt = timestamp_to_regulartime(timestamp)
print(dt)
def is_recent_time(timestamp):
'''
返回是否在指定的日期差内
:param timestamp:
:return:
'''
time_1 = int(timestamp)
time_2 = time.time() # 当前的时间戳
time_1 = time.localtime(time_1)
time_2 = time.localtime(time_2)
if time_1.tm_year == time_2.tm_year:
if time_1.tm_mon >= time_2.tm_mon: # 如果目标时间的月份时间 >= 当前月份(月份合法, 表示是当前月份或者是今年其他月份)
if time_1.tm_mday >= time_2.tm_mday:
if time_1.tm_hour >= 8 and time_1.tm_hour <= 16:
print('合法时间')
# diff_days = abs(time_1.tm_mday - time_2.tm_mday)
return True
else:
print('该小时在8点到16点以外,此处不处理跳过')
return False
else:
print('该日时间已过期, 此处跳过')
return False
else: # 月份过期
print('该月份时间已过期,此处跳过')
return False
else:
print('非本年度的限时秒杀时间,此处跳过')
return False
# while True:
# timestamp = input('请输入要判断的时间戳: ')
# print(is_recent_time(timestamp)) | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
44bf8f5d04ab2ef20b3544249cd1b6392eb19290 | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /w3schools/python/reference/builtInFunctions.py | b9e411f63673bbb33d19faf1d68a200cdb99c7a9 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 3,948 | py | # Built in Functions
# abs()-Returns the absolute value of a number
print(abs(-7.52))
print(abs(3+5j))
# all()-Returns True if all items in an iterable object are true
mylist = [True, True, True]
print(all(mylist)) # True
print(all([1, 1, 1])) # True
print(all([0, 1, 1])) # False
print(all([])) # True
print(all((0, True, False))) # False
# any()-Returns True if any item in an iterable object is true
"""ascii()-Returns a readable version of an object.
Replaces none-ascii characters with escape character"""
# bin()-Returns the binary version of a number
# bool()-Returns the boolean value of the specified object
# bytearray()-Returns an array of bytes
# bytes()-Returns a bytes object
# callable()-Returns True if the specified object is callable, otherwise False
# chr()-Returns a character from the specified Unicode code.
# classmethod()-Converts a method into a class method
# compile()-Returns the specified source as an object, ready to be executed
# complex()-Returns a complex number
"""
delattr()-Deletes the specified attribute
(property or method) from the specified object
"""
# dict()-Returns a dictionary (Array)
# dir()-Returns a list of the specified object's properties and methods
"""
divmod()-Returns the quotient and the remainder
when argument1 is divided by argument2
"""
"""
enumerate()-Takes a collection (e.g. a tuple)
and returns it as an enumerate object
"""
# eval()-Evaluates and executes an expression
# exec()-Executes the specified code (or object)
# filter()-Use a filter function to exclude items in an iterable object
# float()-Returns a floating point number
# format()-Formats a specified value
# frozenset()-Returns a frozenset object
# getattr()-Returns the value of the specified attribute (property or method)
# globals()-Returns the current global symbol table as a dictionary
"""hasattr()-Returns True if the specified object
has the specified attribute (property/method)"""
# hash()-Returns the hash value of a specified object
# help()-Executes the built-in help system
# hex()-Converts a number into a hexadecimal value
# id()-Returns the id of an object
# input()-Allowing user input
# int()-Returns an integer number
"""isinstance()-Returns True if a specified object
is an instance of a specified object"""
"""issubclass()-Returns True if a specified class is
a subclass of a specified object"""
# iter()-Returns an iterator object
# len()-Returns the length of an object
# list()-Returns a list
# locals()-Returns an updated dictionary of the current local symbol table
"""map()-Returns the specified iterator with
the specified function applied to each item"""
# max()-Returns the largest item in an iterable
# memoryview()-Returns a memory view object
# min()-Returns the smallest item in an iterable
# next()-Returns the next item in an iterable
# object()-Returns a new object
# oct()-Converts a number into an octal
# open()-Opens a file and returns a file object
# ord()-Convert an integer representing the Unicode of the specified character
# pow()-Returns the value of x to the power of y
# print()-Prints to the standard output device
# property()-Gets, sets, deletes a property
"""range()-Returns a sequence of numbers,
starting from 0 and increments by 1 (by default)"""
# repr()-Returns a readable version of an object
# reversed()-Returns a reversed iterator
# round()-Rounds a numbers
# set()-Returns a new set object
# setattr()-Sets an attribute (property/method) of an object
# slice()-Returns a slice object
# sorted()-Returns a sorted list
# staticmethod()-Converts a method into a static method
# str()-Returns a string object
# sum()-Sums the items of an iterator
# super()-Returns an object that represents the parent class
# tuple()-Returns a tuple
# type()-Returns the type of an object
# vars()-Returns the __dict__ property of an object
# zip()-Returns an iterator, from two or more iterators
| [
"sagarnikam123@gmail.com"
] | sagarnikam123@gmail.com |
05bf10e915b53d57bb3f0174801892d61daffed8 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/search/azure-search-documents/azure/search/documents/_internal/_generated/aio/__init__.py | fa69578ea7f244621643bd7e1b4c113301d9ff0d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 552 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._search_index_client import SearchIndexClient
__all__ = ['SearchIndexClient']
| [
"noreply@github.com"
] | yunhaoling.noreply@github.com |
ae18e15d31cb04495f56ec8136afcdb8cbf22861 | 6ecf8227cc63ea5c8f05fdd6a7d28b3167119367 | /blueking_forum/wsgi.py | 9b85fd8c45ff19aed7455d4ee3ba00e35d2a3b0a | [] | no_license | doraemonext/blueking_forum | 5ad0f46780e785a5af4db6f171654e351f509aa1 | f5737dcdeaef15c37b37a0988aa1be98f6283834 | refs/heads/master | 2020-12-28T21:29:19.982785 | 2015-11-04T04:15:20 | 2015-11-04T04:15:20 | 44,859,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for blueking_forum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blueking_forum.settings")
application = get_wsgi_application()
| [
"doraemonext@gmail.com"
] | doraemonext@gmail.com |
9d8eef47748cb50afa81f15fa27c8d75bfaca146 | 08351ac650385e2ee0f4fc08ab8ef0978bc5bf3c | /Module2_HTTP/Request_response/Request.py | 981163757b7ae56b101453c505885d2f3f2dcdcd | [] | no_license | tertiarycourses/PythonNetworkingTraining | d3c02488e91d318874558130a89fb112a2c95d55 | 9c5f223a4b83d21a791ac0d322306c3a78c4122f | refs/heads/master | 2019-07-13T07:59:49.241235 | 2017-05-11T14:48:19 | 2017-05-11T14:48:19 | 83,748,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | #Requests with urllib
# from urllib.request import urlopen
# from urllib.request import Request
# response = urlopen('http://www.debian.org')
# print(response)
# print(response.readline())
# ##response object
# print(response.url)
# print(response.status)
# print(response.headers['content-type'])
#response = urlopen('http://www.debian.org')
#print(response.read(50))
#response = urlopen('http://www.debian.org')
#print(response.read())
##print(response.read())
##Status Code
#print(response.status)
#-------------------------------------
#custom request
#req = Request('http://www.debian.org')
#req.add_header('Accept-Language', 'sv')
#response = urlopen(req)
#print(response.readlines()[:5])
#----------------------------------------
#Content Compression
#with decompression cannot see data
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.debian.org')
#req.add_header('Accept-Encoding', 'gzip')
#response = urlopen(req)
#print(response.getheader('Content-Encoding'))
#print(response.read())
#With Decompression can view data
#from urllib.request import Request
#from urllib.request import urlopen
#import gzip
#req = Request('http://www.debian.org')
#req.add_header('Accept-Encoding', 'gzip')
#response = urlopen(req)
#content = gzip.decompress(response.read())
#result=content.splitlines()[:5]
#print(result)
#--------------------------------------
#Content Negotiation
#from urllib.request import urlopen
#import gzip
#req = Request('http://www.debian.org')
#req.add_header('Accept-Content-Type', 'text/plain')
#response = urlopen(req)
#content = response.read()
#result=content.splitlines()[:5]
#print(result)
#-------------------------------------------
#User Agent
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.debian.org')
#req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64;rv:24.0) Gecko/20140722 Firefox/24.0 Iceweasel/24.7.0')
#response = urlopen(req)
#print(response.readline())
#---------------------------------------------
#Cookie
#from http.cookiejar import CookieJar
#cookie_jar = CookieJar()
#from urllib.request import build_opener, HTTPCookieProcessor
#opener = build_opener(HTTPCookieProcessor(cookie_jar))
#opener.open('http://www.github.com')
#print(len(cookie_jar))
#cookies = list(cookie_jar)
#print(cookies)
#---------------------------------------------\
#Redirect
#from urllib.request import Request
#from urllib.request import urlopen
#req = Request('http://www.gmail.com')
#response = urlopen(req)
#print(response.url)
#print(req.redirect_dict)
#---------------------------------------
#HTTP Methods
#GET
import requests
response = requests.get('http://www.debian.org')
print(response.content)
print(response.status_code)
#POST
# import requests
# r = requests.post("http://bugs.python.org", data={'number': 12524, 'type': 'issue', 'action': 'show'})
# print(r.status_code, r.reason)
# print(r.text)
| [
"angch@tertiaryinfotech.com"
] | angch@tertiaryinfotech.com |
e223b08659d04f02b9ff57fd9cc627a0bfbc4420 | 63ba933a294865f65409635f62e0f1d59f725f37 | /src/arrays/bagOfTokensScore.py | 86ce1032d9eb0987f1da6b22e658f67679b0f34d | [
"CC0-1.0"
] | permissive | way2arun/datastructures_algorithms | fc4302bdbb923ef8912a4acf75a286f2b695de2a | 4ea4c1579c28308455be4dfa02bd45ebd88b2d0a | refs/heads/master | 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 | Python | UTF-8 | Python | false | false | 2,716 | py | """
Bag of Tokens
You have an initial power of P, an initial score of 0, and a bag of tokens where tokens[i] is the value of the ith token (0-indexed).
Your goal is to maximize your total score by potentially playing each token in one of two ways:
If your current power is at least tokens[i], you may play the ith token face up, losing tokens[i] power and gaining 1 score.
If your current score is at least 1, you may play the ith token face down, gaining tokens[i] power and losing 1 score.
Each token may be played at most once and in any order. You do not have to play all the tokens.
Return the largest possible score you can achieve after playing any number of tokens.
Example 1:
Input: tokens = [100], P = 50
Output: 0
Explanation: Playing the only token in the bag is impossible because you either have too little power or too little score.
Example 2:
Input: tokens = [100,200], P = 150
Output: 1
Explanation: Play the 0th token (100) face up, your power becomes 50 and score becomes 1.
There is no need to play the 1st token since you cannot play it face up to add to your score.
Example 3:
Input: tokens = [100,200,300,400], P = 200
Output: 2
Explanation: Play the tokens in this order to get a score of 2:
1. Play the 0th token (100) face up, your power becomes 100 and score becomes 1.
2. Play the 3rd token (400) face down, your power becomes 500 and score becomes 0.
3. Play the 1st token (200) face up, your power becomes 300 and score becomes 1.
4. Play the 2nd token (300) face up, your power becomes 0 and score becomes 2.
Constraints:
0 <= tokens.length <= 1000
0 <= tokens[i], P < 104
"""
from collections import deque
from typing import List
class Solution:
def bagOfTokensScore(self, tokens: List[int], P: int) -> int:
# Solution 1 - 64 ms
"""
q = deque(sorted(tokens))
res = 0
while q and P >= q[0]:
P -= q.popleft()
res += 1
if q and len(q) > 1 and P < q[0]:
res -= 1
P += q.pop()
return res
"""
# Solution 2 - 40 ms
tokens.sort()
if not tokens or P < tokens[0]:
return 0
score = 0
left, right = 0, len(tokens) - 1
while left <= right:
if P >= tokens[left]:
P -= tokens[left]
left += 1
score += 1
else:
if right - left > 1:
P += tokens[right]
right -= 1
score -= 1
else:
break
return score
# Main Call
tokens = [100, 200]
P = 150
solution = Solution()
print(solution.bagOfTokensScore(tokens, P))
| [
"way2aru@yahoo.com"
] | way2aru@yahoo.com |
a8b32038a3ade070c8f67b3eed0e66408c072e48 | 25d4c31d5ebe470118b14beb84f3cd1e53d99c15 | /01_Tutorials/PyQt5_GUI_Tutorial/09_2_Tutorial_Progressbar_Button.py | 195496bbd802cc5cf6756f04db46337e8a71d385 | [] | no_license | daltdoerfer/Python_Templates-1 | ea4b59489feb7b7617e81b7c94d4375dbf25def3 | c2471cebeaf20bbfdfd3fd263d458e5a67ad8d1e | refs/heads/master | 2023-05-10T15:07:10.109280 | 2021-06-08T06:45:53 | 2021-06-08T06:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | # Dieses Tutorial beinhaltet das einfügen von:
# Progressbar mit ButtonS und (Multi-)Threading (Programm muss weiterlaufen und lagert andere Prozesse aus)
# https://riptutorial.com/pyqt5/example/29500/basic-pyqt-progress-bar
import sys
import time
from PyQt5 import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
TIME_LIMIT = 100 # Ausgelagertes TIME Limit, da mehrere Klassen darauf zugreifen
class External(QThread):
"""
Runs a counter thread.
"""
countChanged = pyqtSignal(int)
def run(self):
count = 0
while count < TIME_LIMIT:
count += 1
time.sleep(1)
self.countChanged.emit(count)
class Fenster(QDialog): # Wichtig für Status und Menübar von QMainWindow erben
def __init__(self):
super().__init__()
self.initMe()
def initMe(self):
#################################
# Progressbar
#################################
self.pb1 = QProgressBar(self)
self.pb1.setGeometry(0, 0, 300, 25)
self.pb1.move(50, 50)
self.pb1.setMaximum(100)
self.bt1 = QPushButton("Start", self)
self.bt1.move(50, 75)
self.bt1.clicked.connect(self.onButtonClick)
#################################
# Allgmeine Fenster Config (Hauptfenster)
#################################
self.setGeometry(50, 50, 1000, 500)
self.setWindowTitle("My First GUI")
self.setWindowIcon(QIcon("icon.png"))
self.show()
def onButtonClick(self):
self.calc = External()
self.calc.countChanged.connect(self.onCountChanged)
self.calc.start()
def onCountChanged(self, value):
self.pb1.setValue(value)
if __name__ == "__main__":
app = QApplication(sys.argv) # Neue Default-Application anlegen
w = Fenster() # Einfaches Fenster bauen -> Neue Instanz w
sys.exit(app.exec_()) # Beendet Python Skript wenn Fenster geschlossen wird | [
"daltdoerfer@yahoo.com"
] | daltdoerfer@yahoo.com |
78b480c59e1129fef3f5117392043d5251f5e5cb | 7c551e749064b25af706b9167211050f8c6ad0a9 | /signatures/windows/trojan_rovnix.py | f9b6b29446060b6a111cd040ea82c6e53ff79178 | [] | no_license | dashjuvi/Cuckoo-Sandbox-vbox-win7 | fa382828b4895c5e1ee60b37a840edd395bf1588 | a3a26b539b06db15176deadeae46fc0476e78998 | refs/heads/master | 2020-03-12T08:33:06.231245 | 2019-01-14T23:09:02 | 2019-01-14T23:09:02 | 130,529,882 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Rovnix(Signature):
name = "rovnix"
description = "Rovnix Trojan"
severity = 3
categories = ["banker", "trojan"]
authors = ["Mikael Keri"]
minimum = "2.0"
files_re = [
".*\\\\AppData\\\\Local\\\\Temp\\\\L[0-9]{9}",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\RSA[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\CFG[0-9]{9}.dll",
".*\\\\AppData\\\\Roaming\\\\Microsoft\\\\Crypto\\\\RSA\\\\KEYS\\\\DB[0-9]{9}.dll",
]
regkeys_re = [
".*\\\\Software\\\\Microsoft\\\\Installer\\\\Products\\\\B[0-9]{9}",
]
mutexes_re = [
".*UACNTFS[0-9]{9}",
".*INSNTFS[0-9]{9}",
".*BDNTFS[0-9]{9}",
".*PL6NTFS[0-9]{9}",
".*PL1NTFS[0-9]{9}",
]
def on_complete(self):
for indicator in self.mutexes_re:
for mutex in self.check_mutex(pattern=indicator, regex=True, all=True):
self.mark_ioc("mutex", mutex)
for indicator in self.regkeys_re:
for regkey in self.check_key(pattern=indicator, regex=True, all=True):
self.mark_ioc("registry", regkey)
for indicator in self.files_re:
for regkey in self.check_file(pattern=indicator, regex=True, all=True):
self.mark_ioc("file", regkey)
return self.has_marks()
| [
"diegovm14@gmail.com"
] | diegovm14@gmail.com |
e243451ce164809caa479471221ee886f2b8c8da | 41c605bf3a002a757cb2344cff526d7a7ae56ea9 | /plotly/validators/choropleth/unselected/__init__.py | 6b386c7525f160cb5f23f28d158a37c663b847da | [
"MIT"
] | permissive | Jonathan-MW/plotly.py | 9674b90b5de11fd9089e6afefd04b57bc4587829 | 7528c00772f44dee24c0df7e15d70a4852f171a8 | refs/heads/master | 2020-05-30T06:04:13.621478 | 2019-05-31T10:34:15 | 2019-05-31T10:34:15 | 189,571,988 | 2 | 0 | MIT | 2019-05-31T09:59:53 | 2019-05-31T09:59:53 | null | UTF-8 | Python | false | false | 684 | py |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name='marker',
parent_name='choropleth.unselected',
**kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Marker'),
data_docs=kwargs.pop(
'data_docs', """
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
"""
),
**kwargs
)
| [
"noreply@github.com"
] | Jonathan-MW.noreply@github.com |
75b7140688bd7f5663275f7481f344ba0990f781 | 4e04f819e376c3fba7b6a57c228c289b2c3dde12 | /compass/ocean/tests/global_ocean/mesh/so12to60/dynamic_adjustment/__init__.py | c183fae208713987c10bf3bf3c959e87c5ac2da9 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | Rihui-L/compass | 65e88253f24240a4376a9f04c047c2756848a45a | 4446f76222be26996fc44569a2047bdfb22e33ff | refs/heads/master | 2023-06-19T12:45:30.190857 | 2021-07-20T19:48:43 | 2021-07-20T19:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | from compass.ocean.tests.global_ocean.dynamic_adjustment import \
DynamicAdjustment
from compass.ocean.tests.global_ocean.forward import ForwardStep
class SO12to60DynamicAdjustment(DynamicAdjustment):
"""
A test case performing dynamic adjustment (dissipating fast-moving waves)
from an initial condition on the SO12to60 MPAS-Ocean mesh
Attributes
----------
restart_filenames : list of str
A list of restart files from each dynamic-adjustment step
"""
def __init__(self, test_group, mesh, init, time_integrator):
"""
Create the test case
Parameters
----------
test_group : compass.ocean.tests.global_ocean.GlobalOcean
The global ocean test group that this test case belongs to
mesh : compass.ocean.tests.global_ocean.mesh.Mesh
The test case that produces the mesh for this run
init : compass.ocean.tests.global_ocean.init.Init
The test case that produces the initial condition for this run
time_integrator : {'split_explicit', 'RK4'}
The time integrator to use for the forward run
"""
if time_integrator != 'split_explicit':
raise ValueError('{} dynamic adjustment not defined for {}'.format(
mesh.mesh_name, time_integrator))
restart_times = ['0001-01-03_00:00:00', '0001-01-07_00:00:00',
'0001-01-11_00:00:00', '0001-01-21_00:00:00']
restart_filenames = [
'restarts/rst.{}.nc'.format(restart_time.replace(':', '.'))
for restart_time in restart_times]
super().__init__(test_group=test_group, mesh=mesh, init=init,
time_integrator=time_integrator,
restart_filenames=restart_filenames)
module = self.__module__
# first step
step_name = 'damped_adjustment_1'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-02_00:00:00'",
'config_dt': "'00:05:00'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '1.0e-4'}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_output_file(filename='../{}'.format(restart_filenames[0]))
self.add_step(step)
# second step
step_name = 'damped_adjustment_2'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-04_00:00:00'",
'config_dt': "'00:07:30'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '4.0e-5',
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[0])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[0]))
step.add_output_file(filename='../{}'.format(restart_filenames[1]))
self.add_step(step)
# third step
step_name = 'damped_adjustment_3'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-04_00:00:00'",
'config_dt': "'00:10:00'",
'config_btr_dt': "'00:00:20'",
'config_Rayleigh_friction': '.true.',
'config_Rayleigh_damping_coeff': '1.0e-5',
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[1])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-02_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[1]))
step.add_output_file(filename='../{}'.format(restart_filenames[2]))
self.add_step(step)
# final step
step_name = 'simulation'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=step_name,
subdir=step_name)
namelist_options = {
'config_run_duration': "'00-00-10_00:00:00'",
'config_do_restart': '.true.',
'config_start_time': "'{}'".format(restart_times[2])}
step.add_namelist_options(namelist_options)
stream_replacements = {
'output_interval': '00-00-10_00:00:00',
'restart_interval': '00-00-10_00:00:00'}
step.add_streams_file(module, 'streams.template',
template_replacements=stream_replacements)
step.add_input_file(filename='../{}'.format(restart_filenames[2]))
step.add_output_file(filename='../{}'.format(restart_filenames[3]))
self.add_step(step)
self.restart_filenames = restart_filenames
| [
"xylarstorm@gmail.com"
] | xylarstorm@gmail.com |
06dec5bffda4f9bce976bfa3abf34ab323768695 | c29de7ce2d91f572aeb4da56801de7a1dc034054 | /st2/experiments/cifar10/exp011.py | f2fd3d516a219c49e1c585326c4a98eaf1043f51 | [] | no_license | kzky/works | 18b8d754bfc2b1da22022926d882dfe92ea785e6 | b8708c305e52f924ea5a7071e0dfe5f2feb7a0a3 | refs/heads/master | 2021-01-10T08:04:44.831232 | 2018-03-01T15:09:47 | 2018-03-01T15:09:47 | 54,316,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla
from nnabla.contrib.context import extension_context
import numpy as np
import os
import time
import argparse
from st2.cifar10.cnn_model_011 import cnn_model_003, ce_loss, sr_loss, er_loss, \
GradScaleContainer
from st2.cifar10.datasets import Cifar10DataReader, Separator
"""
The same script as the `st` module but with nnabla.
- ConvPool-CNN-C (Springenberg et al., 2014, Salimans&Kingma (2016))
- Stochastic Regularization
- Entropy Regularization for the outputs before CE loss and SR loss
- Gradient scaling: just consider large gradients of g_u
"""
def categorical_error(pred, label):
"""
Compute categorical error given score vectors and labels as
numpy.ndarray.
"""
pred_label = pred.argmax(1)
return (pred_label != label.flat).mean()
def main(args):
# Settings
device_id = args.device_id
batch_size = 100
batch_size_eval = 100
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Gradient Scale Container
gsc = GradScaleContainer(len(nn.get_parameters()))
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True, #TODO: use F.image_augmentation
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_supervised.forward(clear_no_need_grad=True)
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_unsupervised.backward(clear_buffer=True)
gsc.scale_grad(ctx, nn.get_parameters())
loss_supervised.backward(clear_buffer=True)
## update
solver.update()
# Evaluate
if (i+1) % iter_epoch == 0:
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
# Evaluation loop
ve = 0.
iter_val = 0
for k in range(0, len(x_data), batch_size_eval):
x_eval.d = x_data[k:k+batch_size_eval, :]
label = y_data[k:k+batch_size_eval, :]
pred_eval.forward(clear_buffer=True)
ve += categorical_error(pred_eval.d, label)
iter_val += 1
msg = "Epoch:{},ElapsedTime:{},Acc:{:02f}".format(
epoch,
time.time() - st,
(1. - ve / iter_val) * 100)
print(msg)
st = time.time()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--device_id", "-d", type=int, default=0)
parser.add_argument('--context', '-c', type=str,
default="cpu", help="Extension modules. ex) 'cpu', 'cuda.cudnn'.")
args = parser.parse_args()
main(args)
| [
"rkzfilter@gmail.com"
] | rkzfilter@gmail.com |
fb4055ed05dc497e1fbf506797c4d8371e6725f6 | bd053d2bf5444ab8f0b8b0ff56772fa75281e38d | /qchem/tests/test_observable.py | 09b74d615188f94b288e790d8fe1d3a885eb13cd | [
"Apache-2.0"
] | permissive | johannesjmeyer/pennylane | bcb762583e95537b04a9b38756369571f957d2e5 | 8f602312baea107d5248267fb3dc1593722810e0 | refs/heads/master | 2023-07-11T18:21:31.086858 | 2021-08-14T19:21:42 | 2021-08-14T19:21:42 | 341,190,636 | 3 | 1 | Apache-2.0 | 2021-06-16T09:01:58 | 2021-02-22T12:19:10 | Python | UTF-8 | Python | false | false | 4,359 | py | import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion import FermionOperator, QubitOperator
t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", -0.5)
v = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.5)
)
v1 = (
FermionOperator("0^ 0^ 0 0", 0.25)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.25)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", 0.25)
)
v2 = (
FermionOperator("0^ 0^ 0 0", 0.5)
+ FermionOperator("0^ 1^ 1 0", -0.25)
+ FermionOperator("0^ 2^ 2 0", 0.5)
+ FermionOperator("0^ 3^ 3 0", -0.25)
+ FermionOperator("1^ 0^ 0 1", -0.25)
+ FermionOperator("2^ 0^ 0 2", -0.25)
)
@pytest.mark.parametrize(
("fermion_ops", "init_term", "mapping", "terms_exp"),
[
(
[t, v],
1 / 4,
"bravyi_KITAEV",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((0, "Z"), (1, "Z")): (0.4375 + 0j),
((1, "Z"),): (-0.1875 + 0j),
},
),
(
[t, v],
1 / 4,
"JORDAN_wigner",
{
(): (0.0625 + 0j),
((0, "Z"),): (-0.0625 + 0j),
((1, "Z"),): (0.4375 + 0j),
((0, "Z"), (1, "Z")): (-0.1875 + 0j),
},
),
(
[t],
1 / 2,
"JORDAN_wigner",
{(): (0.5 + 0j), ((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[t],
0,
"JORDAN_wigner",
{((0, "Z"),): (-0.25 + 0j), ((1, "Z"),): (0.25 + 0j)},
),
(
[v1],
1 / 2,
"JORDAN_wigner",
{
(): (0.4375 + 0j),
((1, "Z"),): (0.125 + 0j),
((0, "Z"), (1, "Z")): (-0.125 + 0j),
((2, "Z"),): (-0.125 + 0j),
((0, "Z"), (2, "Z")): (0.125 + 0j),
((0, "Z"),): (0.0625 + 0j),
((3, "Z"),): (0.0625 + 0j),
((0, "Z"), (3, "Z")): (-0.0625 + 0j),
},
),
(
[v2],
1 / 4,
"bravyi_KITAEV",
{
(): (0.125 + 0j),
((0, "Z"), (1, "Z")): (0.125 + 0j),
((1, "Z"),): (-0.125 + 0j),
((2, "Z"),): (-0.0625 + 0j),
((0, "Z"), (2, "Z")): (0.0625 + 0j),
((1, "Z"), (2, "Z"), (3, "Z")): (0.0625 + 0j),
((0, "Z"), (1, "Z"), (2, "Z"), (3, "Z")): (-0.0625 + 0j),
((0, "Z"),): (0.125 + 0j),
},
),
],
)
def test_observable(fermion_ops, init_term, mapping, terms_exp, custom_wires, monkeypatch):
r"""Tests the correctness of the 'observable' function used to build many-body observables.
The parametrized inputs `terms_exp` are `.terms` attribute of the corresponding
`QubitOperator. The equality checking is implemented in the `qchem` module itself
as it could be something useful to the users as well.
"""
res_obs = qchem.observable(
fermion_ops, init_term=init_term, mapping=mapping, wires=custom_wires
)
qubit_op = QubitOperator()
monkeypatch.setattr(qubit_op, "terms", terms_exp)
assert qchem._qubit_operators_equivalent(qubit_op, res_obs, wires=custom_wires)
msg1 = "Elements in the lists are expected to be of type 'FermionOperator'"
msg2 = "Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'"
@pytest.mark.parametrize(
("fermion_ops", "mapping", "msg_match"),
[
([FermionOperator("0^ 0", 0.5), "notFermionOperator"], "JORDAN_wigner", msg1),
([FermionOperator("0^ 0", 0.5)], "no_valid_transformation", msg2),
],
)
def test_exceptions_observable(fermion_ops, mapping, msg_match):
"""Test that the 'observable' function throws an exception if any element
in the list 'fermion_ops' is not a FermionOperator objector or if the
fermionic-to-qubit transformation is not properly defined."""
with pytest.raises(TypeError, match=msg_match):
qchem.observable(fermion_ops, mapping=mapping)
| [
"noreply@github.com"
] | johannesjmeyer.noreply@github.com |
c94b2c053a007e87154dc677ea8df2d8d6db02e4 | e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551 | /uri/uri_python/iniciante/p1038.py | e994b8738d0a17022bb596d11005b2a23996e826 | [] | no_license | GabrielEstevam/icpc_contest_training | b8d97184ace8a0e13e1c0bf442baa36c853a6837 | 012796c2ceb901cf7aa25d44a93614696a7d9c58 | refs/heads/master | 2020-04-24T06:15:16.826669 | 2019-10-08T23:13:15 | 2019-10-08T23:13:15 | 171,758,893 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | valor = input().split(" ")
codigo = int(valor[0])
quantidade = int(valor[1])
preco = [4, 4.5, 5, 2, 1.5]
print("Total: R$ %.2f" % (quantidade*preco[codigo-1]))
| [
"gabrielestevam@hotmail.com"
] | gabrielestevam@hotmail.com |
98e5bb02b2f1e5c29f9b110dae3b25cd10b004f1 | d75703c2083dfc508c5608c4c35167b67d1a4308 | /2nd Chapter/graphTwo.py | 13107201655ba9be1fc0423142010b1927106346 | [] | no_license | vubon/Python-core | e8159763d281152a1b64da3a0534899fd3def2b5 | a415ef3c6159f0c85afa3240a762a00b2c68bd02 | refs/heads/master | 2020-07-03T17:08:10.091827 | 2016-12-09T19:26:51 | 2016-12-09T19:26:51 | 67,540,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | import networkx as nx
G=nx.Graph()
G.add_node("A")
G.add_node("B")
G.add_none("C")
G.add_edge("A","B")
G.add_edge("B", "C")
G.add_edge("C", "A")
print("Nodes: " + str(G.nodes()))
print("Edges: " + str(G.edge()))
| [
"vubon.roy@gmail.com"
] | vubon.roy@gmail.com |
354596a7e215dbda43d8b2a0e5becc1707e1fa44 | e3946d91dc5fe71989c2f4b6390232865fcb5d1b | /fjord/flags/tests/test_tasks.py | cc907a579b61d1e71e3621331f63e2dfa138d835 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | zeusintuivo/fjord | 61b632fd6df0e1b3508e628fe4f682a937cc0244 | 3bd227004d369df1fdc39f06acff12ebc8f0fe34 | refs/heads/master | 2021-01-16T18:28:52.564638 | 2014-09-24T21:02:51 | 2014-09-24T21:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | from mock import patch
from nose.tools import eq_
# These tests require that tasks be imported so that the post_save
# signal is connected. Don't remove this.
import fjord.flags.tasks # noqa
from fjord.base.tests import TestCase
from fjord.feedback.tests import ResponseFactory
from fjord.flags.spicedham_utils import get_spicedham, tokenize
class TestClassifyTask(TestCase):
def test_classify_task(self):
"""flags should be created if classifier returns True"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = True
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_(sorted([f.name for f in resp1.flag_set.all()]),
['abuse'])
def test_classify_false_task(self):
"""flags shouldn't be created if classifier returns False"""
with patch('fjord.flags.tasks.classify') as classify_mock:
classify_mock.return_value = False
# This creates the response and saves it which kicks off
# the classifier task. It should not be classified as
# abuse.
resp1 = ResponseFactory(locale=u'en-US', description=u'ou812')
eq_(classify_mock.call_count, 1)
eq_([f.name for f in resp1.flag_set.all()], [])
def test_ignore_non_english(self):
"""non-en-US responses should be ignored"""
with patch('fjord.flags.tasks.classify') as classify_mock:
# This response is not en-US, so classify should never get
# called.
resp1 = ResponseFactory(locale=u'es', description=u'ou812')
eq_(classify_mock.called, False)
eq_([f.name for f in resp1.flag_set.all()], [])
class TestClassification(TestCase):
def train(self, descriptions, is_abuse=True):
# Note: This is probably a cached Spicedham object.
sham = get_spicedham()
for desc in descriptions:
sham.train(tokenize(desc), match=is_abuse)
def test_abuse(self):
self.train([
'gross gross is gross gross gross browser',
'gross icky gross gross browser',
'gross is mcgrossy gross',
'omg worst gross',
'browser worst'
], is_abuse=True)
self.train([
'Firefox is super!',
'Great browser!',
'Super fast!',
'Not gross!',
'super not gross!'
], is_abuse=False)
# This creates the response and saves it which kicks off
# the classifier task. It should be classified as abuse.
resp = ResponseFactory(
locale=u'en-US', description=u'browser is gross!')
eq_(sorted([f.name for f in resp.flag_set.all()]),
['abuse'])
| [
"willkg@mozilla.com"
] | willkg@mozilla.com |
ff0180f0924a802c747f04609234a645c5b90d6f | 69c33fcad69a2e61cc60209401215530d033e712 | /Python/Python Basics/32.opr.py | dae93683c4b9860393bb38c99d87a73107461c88 | [] | no_license | KULDEEPMALIKM41/Practices | 7659b895ea959c7df2cdbc79c0b982b36f2bde63 | 193abe262ff281a384aac7895bb66dc39ee6e88d | refs/heads/master | 2023-08-17T11:01:11.694282 | 2021-09-30T08:12:41 | 2021-09-30T08:12:41 | 289,527,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # 4.Logical Operator => and , or, not
# and or not
# A B A and B | A B A or B | A not(A)
# F F F F F F T F
# F T F F T T
# T F F T F T F T
# T T T T T T
a,b,c=10,20,5
res= b>a and b>c
print('result : ',res)
a,b,c=10,5,15
res=a>b and a>c
print('Result and = ',res)
res=a>b or a>c
print('Result or = ',res)
res=not(a>b) and not(a>c)
print('Result not = ',res)
res=not(a>b and a>c)
print('Result and,not = ',res) | [
"Kuldeepmalikm41@gmail.com"
] | Kuldeepmalikm41@gmail.com |
42c2877963f0980cf4683bb135d63c3593ccd77c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch3_2020_04_10_19_31_46_547518.py | f2b28c9beb403cc065d37ef9eac91b50609cac83 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import math
def calcula_gaussiana(x, mi, sigma):
a = sigma*((2*math.pi)**(1/2))
b = -0.5*((x-mi)/sigma)**2
gaussiana = ((1/a)*(math.exp**b))
return gaussiana | [
"you@example.com"
] | you@example.com |
459af06cd809435cbcaf4c1ecd35e0e3e713e427 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2981/60712/256536.py | 0d28d89f2d62c8f4b6a26f95305cd6fb99293d9d | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py |
import re
p =list(map(int,input().split()))
s=re.split(r"([-])",input())
for i in range(len(s)):
if s[i]=='-':
pre = list(s[i-1])
pres =0
post=list(s[i+1])
posts=0
if 'a'<=pre[len(pre)-1]<='z':
pres=1
else:
pres=2
if 'a'<=post[0]<='z':
posts=1
else:
posts=2
if pres==posts and pre[len(pre)-1]<post[0]:
preascii=ord(pre[len(pre)-1])
postascii = ord(post[0])
if postascii - preascii>1:
s2=""
start=0
end=0
x=0
if p[2]!=2:
start = 1
end = postascii-preascii
x=1
else:
start=postascii-preascii-1
end=0
x=-1
for j in range(start,end,x):
for k in range(p[1]):
if p[0]==2 and pres==1:
s2=s2+chr(preascii+j).upper()
elif p[0]==3:
s2=s2+'*'
else:
s2=s2+chr(preascii+j)
s[i]=s2
else:
s[i]=''
print("".join(s))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
e1c57de141179aa79660288b3b8d5125a1fa9cbc | 027a6b2db66fad3a0242fe92124400fc013b16f2 | /SConstruct | b6cfd439ce8889defd6482e6239f07baae37e3cc | [] | no_license | cfobel/cpp_simple_serialize | aa091e9ebe14bfe8972e7d07a5afbf337147f332 | 63fcb68d784b5b87276340e1b4c703c8e9708794 | refs/heads/master | 2021-01-22T04:49:19.677185 | 2010-12-16T04:42:39 | 2010-12-16T04:42:39 | 1,833,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | Program('simple_serialize.cpp')
| [
"christian@fobel.net"
] | christian@fobel.net | |
2cb05fd34b8861c334ba7dbc9667a5c72875a592 | 9e3f3c377018a8b04ad8d96a71b2e9cc1e7e82c6 | /python/gr_digital_rf/raster.py | 473de938a1daf09c6161ba0e0dd45c63928f9137 | [
"BSD-3-Clause"
] | permissive | w2naf/digital_rf | 8079fa391eee246d9031849df719345b949c39fd | 482608dcc5608b9d9a0aacf77e75f83edbec1f0e | refs/heads/master | 2020-03-28T02:17:07.717118 | 2019-05-02T15:00:03 | 2019-05-02T15:00:03 | 147,560,147 | 0 | 0 | NOASSERTION | 2019-05-02T15:00:04 | 2018-09-05T18:10:47 | Python | UTF-8 | Python | false | false | 22,768 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2018 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Module defining raster (periodic window) tools for GNU Radio."""
from __future__ import absolute_import, division, print_function
import numpy as np
import pmt
from gnuradio import gr
__all__ = ('raster_chunk', 'raster_select_aggregate', 'raster_tag')
class raster_chunk(gr.basic_block):
"""Block for chunking periodic rasters into fixed-size vectors."""
def __init__(
self, dtype=np.complex64, vlen=1, raster_length=10000, nperseg=1,
noverlap=0, max_raster_length=None, max_noverlap=None,
):
"""Chunk periodic rasters into vectors with optional overlap.
The input data is provided as samples with length `vlen` and type
`dtype`. It is then divided into raster windows with a number of
samples equal to `raster_length`. Each raster window is then broken
into chunks of `nperseg` samples with an overlap of `noverlap` samples.
The output may be zero-padded at the end to ensure that all of the
samples in the raster window are included in an output chunk. Each
chunk is output as a vector whose total length is ``nperseg * vlen``.
The advantage of a raster of data is that its size can be changed in
a running flowgraph, but it can be useful to interface raster data
with fixed-size vectors (such as for FFTs).
Parameters
----------
dtype : numpy.dtype
Data type of the input and output data.
vlen : int
Vector length of the *input* data (NOT the output vector length).
raster_length : int
Length of the raster window.
nperseg : int
Fixed length of each output chunk. If the input data is itself a
vector, then each output vector will have a length of
``nperseg * vlen``.
noverlap : int
Number of samples to overlap for each output chunk.
Other Parameters
----------------
max_raster_length : int
Maximum possible raster length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`raster_length` will be used.
max_noverlap : int
Maximum possible number of overlap samples, to allow for changes
while the block is running. Knowing the maximum number allows for
allocation of appropriately-sized buffers. If None, two thirds of
`nperseg` will be used.
"""
if max_raster_length is None:
max_raster_length = 4*raster_length
if max_noverlap is None:
max_noverlap = 2 * nperseg // 3
gr.basic_block.__init__(
self,
name='Raster Chunk',
in_sig=[(dtype, vlen)],
out_sig=[(dtype, vlen*nperseg)],
)
self._dtype = dtype
self._vlen = vlen
self._nperseg = max(1, nperseg)
self._max_raster_length = max_raster_length
self._max_noverlap = max_noverlap
if raster_length < nperseg or raster_length > max_raster_length:
errstr = 'raster_length {0} must be between {1} and {2}'
raise ValueError(
errstr.format(raster_length, nperseg, max_raster_length)
)
if noverlap < 0 or noverlap > min(nperseg - 1, max_noverlap):
errstr = 'noverlap {0} must be between 0 and {1}'
raise ValueError(
errstr.format(noverlap, min(nperseg - 1, max_noverlap))
)
# set parameters to max values to size buffer, then set to true values
self.set_raster_length(max_raster_length)
self.set_noverlap(max_noverlap)
# makes sure the buffers have the max size
self._set_params()
# now the true values
self.set_raster_length(raster_length)
self.set_noverlap(noverlap)
# tags become meaningless on vector output
self.set_tag_propagation_policy(gr.TPP_DONT)
def _set_params(self):
"""Finalize given parameter values and calculate derived values."""
self._raster_length = min(
max(self._next_raster_length, self._nperseg),
self._max_raster_length,
)
self._noverlap = min(
max(self._next_noverlap, 0), self._nperseg - 1, self._max_noverlap,
)
nstep = self._nperseg - self._noverlap
nchunks = int(np.ceil(float(self._raster_length) / nstep))
self._nstep = nstep
self._nchunks = nchunks
# prepare zero-padded array for strided view of input raster
padded_len = (self._nchunks - 1)*self._nstep + self._nperseg
self._zeropadded = np.zeros(
(padded_len, self._vlen), dtype=self._dtype,
)
self._in_raster = self._zeropadded[:self._raster_length]
stride_shape = (self._nchunks, self._nperseg, self._vlen)
strides = (
self._nstep*self._zeropadded.strides[0],
) + self._zeropadded.strides
self._strided = np.lib.stride_tricks.as_strided(
self._zeropadded, stride_shape, strides,
)
self._out_raster = self._strided.reshape(
(self._nchunks, self._nperseg*self._vlen)
)
# set rate parameters
self.set_output_multiple(self._nchunks)
rate = float(self._nchunks) / self._raster_length
self.set_relative_rate(rate)
self._params_set = True
def _adjust_params(self):
"""Check if the parameter values have changed and set them if so."""
if not self._params_set:
self._set_params()
return True
else:
return False
def set_raster_length(self, raster_length):
"""Set a new raster length."""
self._next_raster_length = raster_length
self._params_set = False
def set_noverlap(self, noverlap):
"""Set a new number of overlap samples for the output chunks."""
self._next_noverlap = noverlap
self._params_set = False
def forecast(self, noutput_items, ninput_items_required):
"""Determine number of input items required given an output number."""
# since we set output_multiple, noutput_items is a multiple of
# self._nchunks
n = noutput_items // self._nchunks
ninput_items_required[0] = n*self._raster_length
def general_work(self, input_items, output_items):
"""Perform the block tasks on given input and output buffers."""
in_arr = input_items[0].reshape((-1, self._vlen))
out_arr = output_items[0].reshape((-1, self._nperseg*self._vlen))
noutput_items = len(out_arr)
# check if params changed, adjust and restart work if they have
if self._adjust_params():
return 0
# noutput_items is a multiple of self._nchunks because we set
# output_multiple to be self._nchunks
nrasters = noutput_items // self._nchunks
for k_raster in range(nrasters):
in_idx = k_raster*self._raster_length
out_idx = k_raster*self._nchunks
# copy input raster into zeropadded memory
self._in_raster[...] = in_arr[
in_idx:(in_idx + self._raster_length), :,
]
# copy strided chunks to output
out_arr[out_idx:(out_idx + self._nchunks), :] = self._out_raster
self.consume(0, nrasters*self._raster_length)
return noutput_items
class raster_select_aggregate(gr.basic_block):
"""Block for selecting data from a raster and optionally aggregating it."""
def __init__(
self, dtype=np.complex64, vlen=1, raster_length=10000, select_start=0,
select_length=None, nagg=1, agg_op='take', agg_op_args=(0,),
max_raster_length=None, max_select_length=None, max_nagg=None,
):
"""Select data from a periodic raster window and optionally aggregate.
The input data is provided as samples with length `vlen` and type
`dtype`. It is then divided into raster windows with a number of
samples equal to `raster_length`. Within and relative to each raster
window, samples are selected to be output using `select_start` and
`select_length`. The output rasters can optionally be aggregated
together from `nagg` outputs to one using the specified operation.
The advantage of a raster of data is that its size can be changed in
a running flowgraph.
Parameters
----------
dtype : numpy.dtype
Data type of the input and output data.
vlen : int
Vector length of the *input* data (NOT the output vector length).
raster_length : int
Length of the raster window.
select_start : int
Index relative to the start of the raster window that indicates the
start of the output raster.
select_length : int
Number of samples to include in the selection from the raster
window. The equivalent indexing of the raster window would then be
``raster[select_start:(select_start + select_length)]``. If None,
then the length of entire remaining raster window from
`select_start` will be used.
nagg : int
Number of output rasters to aggregate together. The output is thus
downsampled by `nagg` in whole chunks of the selected raster
window.
agg_op : str
String giving the name of a numpy array method to use for the
aggregation operation. For `nagg` output rasters organized as an
``(nagg, select_length, vlen)``-shaped array called ``selections``,
the aggregation operation would then be
``selections.agg_op(*agg_op_args, axis=0)``.
agg_op_args : tuple
Positional arguments to be passed to the aggregation operation
method specified by `agg_op`. See above.
Other Parameters
----------------
max_raster_length : int
Maximum possible raster length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`raster_length` will be used.
max_select_length : int
Maximum possible selection length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`select_length` will be used.
max_nagg : int
Maximum possible output aggregation, to allow for changes while the
block is running. Knowing the maximum aggregation size allows for
allocation of appropriately-sized buffers. If None, a default of
four times the initial `nagg` will be used.
"""
if max_raster_length is None:
max_raster_length = 4*raster_length
if max_select_length is None:
length = raster_length if select_length is None else select_length
max_select_length = 4*length
if max_nagg is None:
max_nagg = 4*nagg
gr.basic_block.__init__(
self,
name='Raster Select',
in_sig=[(dtype, vlen)],
out_sig=[(dtype, vlen)],
)
self._dtype = dtype
self._vlen = vlen
self._max_raster_length = max_raster_length
self._max_select_length = max_select_length
self._max_nagg = max_nagg
self.set_agg_op(agg_op)
self.set_agg_op_args(agg_op_args)
# set parameters to max values to size buffer, then set to true values
self.set_raster_length(max_raster_length)
self.set_select_start(0)
self.set_select_length(max_select_length)
self.set_nagg(max_nagg)
# makes sure the buffers have the max size
self._adjust_params()
# now the true values
self.set_raster_length(raster_length)
self.set_select_start(select_start)
self.set_select_length(select_length)
self.set_nagg(nagg)
# we will propogate tags manually
self.set_tag_propagation_policy(gr.TPP_DONT)
def _set_params(self):
"""Finalize given parameter values and calculate derived values."""
# raster parameters
self._raster_length = max(1, min(
self._next_raster_length, self._max_raster_length,
))
self._nagg = max(1, min(self._next_nagg, self._max_nagg))
self._ninput_multiple = self._raster_length*self._nagg
# selection parameters
self._select_start = self._next_select_start % self._raster_length
if self._next_select_length is None:
select_length = self._raster_length - self._select_start
else:
select_length = max(1, self._next_select_length)
self._select_length = min(
select_length,
self._raster_length - self._select_start,
self._max_select_length,
)
self._select_stop = self._select_start + self._select_length
self.set_output_multiple(self._select_length)
# hint to the scheduler and buffer allocator about rate ratio of output
# to input
rate = float(self._select_length) / self._ninput_multiple
self.set_relative_rate(rate)
self._params_set = True
def _adjust_params(self):
"""Check if the parameter values have changed and set them if so."""
if not self._params_set:
self._set_params()
return True
else:
return False
def set_raster_length(self, raster_length):
"""Set a new raster length."""
self._next_raster_length = raster_length
self._params_set = False
def set_select_start(self, select_start):
"""Set a new selection start index."""
self._next_select_start = select_start
self._params_set = False
def set_select_length(self, select_length):
"""Set a new selection length."""
self._next_select_length = select_length
self._params_set = False
self._rate_set = False
def set_nagg(self, nagg):
"""Set a new aggregation size."""
self._next_nagg = nagg
self._params_set = False
def set_agg_op(self, agg_op):
"""Set a new aggregation operation."""
self._agg_op = agg_op
def set_agg_op_args(self, agg_op_args):
"""Set new aggregation arguments."""
self._agg_op_args = agg_op_args
def forecast(self, noutput_items, ninput_items_required):
"""Determine number of input items required given an output number."""
# since we set output_multiple, noutput_items is a multiple of
# select_length
nselects = noutput_items // self._select_length
ninput_items_required[0] = self._ninput_multiple*nselects
def general_work(self, input_items, output_items):
"""Perform the block tasks on given input and output buffers."""
in_arr = input_items[0].reshape((-1, self._vlen))
out_arr = output_items[0].reshape((-1, self._vlen))
noutput_items = len(out_arr)
nread = self.nitems_read(0)
nwritten = self.nitems_written(0)
# check if params changed, adjust and restart work if they have
if self._adjust_params():
return 0
# noutput_items is a multiple of self._select_length because we set
# output_multiple to be self._select_length
nrasters = noutput_items // self._select_length
for k_raster in range(nrasters):
in_idx = k_raster*self._ninput_multiple
out_idx = k_raster*self._select_length
# forecast makes sure we have at least nagg rasters at input
raster_samples = in_arr[in_idx:(in_idx + self._ninput_multiple)]
in_rasters = raster_samples.reshape(
(self._nagg, self._raster_length, self._vlen)
)
in_selects = in_rasters[:, self._select_start:self._select_stop, :]
if self._nagg > 1:
# perform operation on rasters
op_method = getattr(in_selects, self._agg_op)
out_rasters = op_method(*self._agg_op_args, axis=0)
else:
# no operation to perform if we're only aggregating one raster
out_rasters = in_selects[0]
# copy result to output
out_arr[out_idx:(out_idx + self._select_length)] = out_rasters
# read tags for selected input (only first raster if nagg > 1)
tags = self.get_tags_in_window(
0, in_idx + self._select_start, in_idx + self._select_stop,
)
# write tags to output
for tag in tags:
offset_in_select = (
tag.offset - nread - in_idx - self._select_start
)
offset = nwritten + out_idx + offset_in_select
self.add_item_tag(
0, offset, tag.key, tag.value,
)
self.consume(0, nrasters * self._ninput_multiple)
return noutput_items
class raster_tag(gr.sync_block):
"""Block for applying tags within a periodic raster window."""
def __init__(
self, dtype=np.complex64, vlen=1, raster_length=10000,
tags=[(0, 'raster_start', True)], max_raster_length=None,
):
"""Add tags within a periodic raster window.
The input data is provided as samples with length `vlen` and type
`dtype`. It is then divided into raster windows with a number of
samples equal to `raster_length`. The specified tags are periodically
added to the output stream relative to the raster window at the given
indices.
The advantage of a raster of data is that its size can be changed in
a running flowgraph. The added tags can be for informational purposes,
or they could be used to trigger processing or plotting of the raster
windows.
Parameters
----------
dtype : numpy.dtype
Data type of the input and output data.
vlen : int
Vector length of the *input* data (NOT the output vector length).
raster_length : int
Length of the raster window.
tags : list of tuples
Tags to be added to the output relative to the specified raster
window. Each tag is represented by a tuple item in the `tags` list
with the following format:
tag_item : (int, str, any) tuple
The first entry gives the index of the tag relative to the
start of each raster window. The second entry gives the
name of the tag. The third and final entry gives the tag's
value as a python object, to be converted to a pmt value
with :func:``pmt.to_pmt``.
Other Parameters
----------------
max_raster_length : int
Maximum possible raster length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`raster_length` will be used.
"""
if max_raster_length is None:
max_raster_length = 4*raster_length
gr.sync_block.__init__(
self,
name='Tag Raster',
in_sig=[(dtype, vlen)],
out_sig=[(dtype, vlen)]
)
self._dtype = dtype
self._vlen = vlen
self._max_raster_length = max_raster_length
# set parameters to max values to size buffer, then set to true values
self.set_raster_length(max_raster_length)
self.set_tags(tags)
# makes sure the buffers have the max size
self._set_params()
# now the true values
self.set_raster_length(raster_length)
self.set_tags(tags)
def _set_params(self):
"""Finalize given parameter values and calculate derived values."""
# raster length
self._raster_length = self._next_raster_length
self.set_output_multiple(self._raster_length)
# tags
t = []
for idx, name, val in self._next_tags:
o = idx % self._raster_length
n = pmt.intern(name)
v = pmt.to_pmt(val)
t.append((o, n, v))
self._tags = sorted(t)
self._params_set = True
def _adjust_params(self):
"""Check if the parameter values have changed and set them if so."""
if not self._params_set:
self._set_params()
return True
else:
return False
def set_raster_length(self, raster_length):
"""Set a new raster length."""
self._next_raster_length = raster_length
self._params_set = False
def set_tags(self, tags):
"""Set new parameters for all of the tags to be added."""
self._next_tags = tags
self._params_set = False
def work(self, input_items, output_items):
"""Perform the block tasks on given input and output buffers."""
in_arr = input_items[0]
out_arr = output_items[0]
noutput_items = len(out_arr)
nwritten = self.nitems_written(0)
# check if params changed, adjust and restart work if they have
if self._adjust_params():
return 0
# noutput_items is a multiple of self._select_length because we set
# output_multiple to be self._raster_length
nrasters = noutput_items // self._raster_length
# copy data
out_arr[...] = in_arr[:nrasters*self._raster_length]
# add tags
for k_raster in range(nrasters):
out_idx = k_raster*self._raster_length
for raster_offset, name, val in self._tags:
self.add_item_tag(
0, nwritten + out_idx + raster_offset, name, val,
)
return noutput_items
| [
"rvolz@mit.edu"
] | rvolz@mit.edu |
4e23d1b44a12c5b9a789d8eec13f30ebec60eef8 | 750c45da795fe15d7ef40d09660742b650631967 | /snippet_builder/settings.py | 7b0c8a5ce9b385cd0ef5bb27441918dd22218181 | [] | no_license | John-W-Stevens/django_snippet_builder | cd984b69b3499136c4757bbae11d3f4701ef132e | 58d2ecd2432d1c288969cffdd6de5c0ad546306e | refs/heads/master | 2022-11-12T06:48:35.642128 | 2020-07-06T17:18:49 | 2020-07-06T17:18:49 | 258,379,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | """
Django settings for snippet_builder project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$h3%k&!&3hm^^#katx-5g+&mw=i)pm=0@(ot&ow9fga(uk$_#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'snippet_builder_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'snippet_builder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'snippet_builder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"john.william.stevens1@gmail.com"
] | john.william.stevens1@gmail.com |
3e263de797c280c76236971f3eb0a070cc15d6d8 | 559e336386e02c0e5ebc7316424c3b4a41380d99 | /fullstack/python/github_examples/magenta/generating_piano_music_with_transformer.py | ada7b928d7c7edc9bb5153e667d08f3de6b87097 | [] | no_license | maranemil/howto | edf1e294544ef6980894dcd345d73160d8aa9620 | f6270ed0affcdbd899dd8a2ff9b0b98625e63a5a | refs/heads/master | 2023-09-05T03:02:18.526914 | 2023-09-04T11:27:52 | 2023-09-04T11:27:52 | 22,177,757 | 48 | 26 | null | 2022-10-17T19:43:31 | 2014-07-23T21:04:50 | Python | UTF-8 | Python | false | false | 14,457 | py | # -*- coding: utf-8 -*-
"""Generating Piano Music with Transformer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/notebooks/magenta/piano_transformer/piano_transformer.ipynb
##### Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
# Generating Piano Music with Transformer
### ___Ian Simon, Anna Huang, Jesse Engel, Curtis "Fjord" Hawthorne___
This Colab notebook lets you play with pretrained [Transformer](https://arxiv.org/abs/1706.03762) models for piano music generation, based on the [Music Transformer](http://g.co/magenta/music-transformer) model introduced by [Huang et al.](https://arxiv.org/abs/1809.04281) in 2018.
The models used here were trained on over 10,000 hours of piano recordings from YouTube, transcribed using [Onsets and Frames](http://g.co/magenta/onsets-frames) and represented using the event vocabulary from [Performance RNN](http://g.co/magenta/performance-rnn).
Unlike the original Music Transformer paper, this notebook uses attention based on absolute instead of relative position; we may add models that use relative attention at some point in the future.
# Environment Setup
"""
#@title Setup Environment
#@markdown Copy model checkpoints and some auxiliary data from
#@markdown Google Cloud Storage. Also install and import
#@markdown Python dependencies needed for running the
#@markdown Transformer models.
#@markdown
#@markdown This cell may take a few minutes to run.
print('Copying checkpoints and Salamander piano SoundFont (via https://sites.google.com/site/soundfonts4u) from GCS...')
!gsutil -q -m cp -r gs://magentadata/models/music_transformer/* /content/
!gsutil -q -m cp gs://magentadata/soundfonts/Yamaha-C5-Salamander-JNv5.1.sf2 /content/
print('Installing dependencies...')
!apt-get update -qq && apt-get install -qq libfluidsynth1 build-essential libasound2-dev libjack-dev
!pip install -qU google-cloud magenta pyfluidsynth
import ctypes.util
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return ctypes.util.find_library(lib)
ctypes.util.find_library = proxy_find_library
print('Importing libraries...')
import numpy as np
import os
import tensorflow as tf
from google.colab import files
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
import magenta.music as mm
from magenta.models.score2perf import score2perf
print('Done!')
#@title Definitions
#@markdown Define a few constants and helper functions.
SF2_PATH = '/content/Yamaha-C5-Salamander-JNv5.1.sf2'
SAMPLE_RATE = 16000
# Upload a MIDI file and convert to NoteSequence.
def upload_midi():
data = list(files.upload().values())
if len(data) > 1:
print('Multiple files uploaded; using only one.')
return mm.midi_to_note_sequence(data[0])
# Decode a list of IDs.
def decode(ids, encoder):
ids = list(ids)
if text_encoder.EOS_ID in ids:
ids = ids[:ids.index(text_encoder.EOS_ID)]
return encoder.decode(ids)
"""# Piano Performance Language Model"""
#@title Setup and Load Checkpoint
#@markdown Set up generation from an unconditional Transformer
#@markdown model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/unconditional_model_16.ckpt'
class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = PianoPerformanceLanguageModelProblem()
unconditional_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# Create input generator (so we can adjust priming and
# decode length on the fly).
def input_generator():
global targets
global decode_length
while True:
yield {
'targets': np.array([targets], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# These values will be changed by subsequent cells.
targets = []
decode_length = 0
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
unconditional_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(unconditional_samples)
#@title Generate from Scratch
#@markdown Generate a piano performance from scratch.
#@markdown
#@markdown This can take a minute or so depending on the length
#@markdown of the performance the model ends up generating.
#@markdown Because we use a
#@markdown [representation](http://g.co/magenta/performance-rnn)
#@markdown where each event corresponds to a variable amount of
#@markdown time, the actual number of seconds generated may vary.
targets = []
decode_length = 1024
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
unconditional_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)
#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')
#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model. We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
#@markdown Set `max_primer_seconds` below to trim the primer to a
#@markdown fixed number of seconds (this will have no effect if
#@markdown the primer is already shorter than `max_primer_seconds`).
filenames = {
'C major arpeggio': '/content/primers/c_major_arpeggio.mid',
'C major scale': '/content/primers/c_major_scale.mid',
'Clair de Lune': '/content/primers/clair_de_lune.mid',
}
primer = 'C major scale' #@param ['C major arpeggio', 'C major scale', 'Clair de Lune', 'Upload your own!']
if primer == 'Upload your own!':
primer_ns = upload_midi()
else:
# Use one of the provided primers.
primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
# Handle sustain pedal in the primer.
primer_ns = mm.apply_sustain_control_changes(primer_ns)
# Trim to desired number of seconds.
max_primer_seconds = 20 #@param {type:"slider", min:1, max:120}
if primer_ns.total_time > max_primer_seconds:
print('Primer is longer than %d seconds, truncating.' % max_primer_seconds)
primer_ns = mm.extract_subsequence(
primer_ns, 0, max_primer_seconds)
# Remove drums from primer if present.
if any(note.is_drum for note in primer_ns.notes):
print('Primer contains drums; they will be removed.')
notes = [note for note in primer_ns.notes if not note.is_drum]
del primer_ns.notes[:]
primer_ns.notes.extend(notes)
# Set primer instrument and program.
for note in primer_ns.notes:
note.instrument = 1
note.program = 0
# Play and plot the primer.
mm.play_sequence(
primer_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(primer_ns)
#@title Generate Continuation
#@markdown Continue a piano performance, starting with the
#@markdown chosen priming sequence.
targets = unconditional_encoders['targets'].encode_note_sequence(
primer_ns)
# Remove the end token from the encoded primer.
targets = targets[:-1]
decode_length = max(0, 4096 - len(targets))
if len(targets) >= 4096:
print('Primer has more events than maximum sequence length; nothing will be generated.')
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
ns = mm.midi_file_to_note_sequence(midi_filename)
# Append continuation to primer.
continuation_ns = mm.concatenate_sequences([primer_ns, ns])
# Play and plot.
mm.play_sequence(
continuation_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(continuation_ns)
#@title Download Continuation as MIDI
#@markdown Download performance (primer + generated continuation)
#@markdown as MIDI (optional).
mm.sequence_proto_to_midi_file(
continuation_ns, '/tmp/continuation.mid')
files.download('/tmp/continuation.mid')
"""# Melody-Conditioned Piano Performance Model"""
#@title Setup and Load Checkpoint
#@markdown Set up generation from a melody-conditioned
#@markdown Transformer model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/melody_conditioned_model_16.ckpt'
class MelodyToPianoPerformanceProblem(score2perf.AbsoluteMelody2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = MelodyToPianoPerformanceProblem()
melody_conditioned_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# These values will be changed by the following cell.
inputs = []
decode_length = 0
# Create input generator.
def input_generator():
global inputs
while True:
yield {
'inputs': np.array([[inputs]], dtype=np.int32),
'targets': np.zeros([1, 0], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
melody_conditioned_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(melody_conditioned_samples)
#@title Choose Melody
#@markdown Here you can choose a melody to be accompanied by the
#@markdown model. We have provided a few, or you can upload a
#@markdown MIDI file; if your MIDI file is polyphonic, the notes
#@markdown with highest pitch will be used as the melody.
# Tokens to insert between melody events.
event_padding = 2 * [mm.MELODY_NO_EVENT]
melodies = {
'Mary Had a Little Lamb': [
64, 62, 60, 62, 64, 64, 64, mm.MELODY_NO_EVENT,
62, 62, 62, mm.MELODY_NO_EVENT,
64, 67, 67, mm.MELODY_NO_EVENT,
64, 62, 60, 62, 64, 64, 64, 64,
62, 62, 64, 62, 60, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Row Row Row Your Boat': [
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
64, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, 65,
67, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
72, 72, 72, 67, 67, 67, 64, 64, 64, 60, 60, 60,
67, mm.MELODY_NO_EVENT, 65,
64, mm.MELODY_NO_EVENT, 62,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Twinkle Twinkle Little Star': [
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT
]
}
melody = 'Twinkle Twinkle Little Star' #@param ['Mary Had a Little Lamb', 'Row Row Row Your Boat', 'Twinkle Twinkle Little Star', 'Upload your own!']
if melody == 'Upload your own!':
# Extract melody from user-uploaded MIDI file.
melody_ns = upload_midi()
melody_instrument = mm.infer_melody_for_sequence(melody_ns)
notes = [note for note in melody_ns.notes
if note.instrument == melody_instrument]
del melody_ns.notes[:]
melody_ns.notes.extend(
sorted(notes, key=lambda note: note.start_time))
for i in range(len(melody_ns.notes) - 1):
melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time
inputs = melody_conditioned_encoders['inputs'].encode_note_sequence(
melody_ns)
else:
# Use one of the provided melodies.
events = [event + 12 if event != mm.MELODY_NO_EVENT else event
for e in melodies[melody]
for event in [e] + event_padding]
inputs = melody_conditioned_encoders['inputs'].encode(
' '.join(str(e) for e in events))
melody_ns = mm.Melody(events).to_sequence(qpm=150)
# Play and plot the melody.
mm.play_sequence(
melody_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(melody_ns)
#@title Generate Accompaniment for Melody
#@markdown Generate a piano performance consisting of the chosen
#@markdown melody plus accompaniment.
# Generate sample events.
decode_length = 4096
sample_ids = next(melody_conditioned_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=melody_conditioned_encoders['targets'])
accompaniment_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
accompaniment_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(accompaniment_ns)
#@title Download Accompaniment as MIDI
#@markdown Download accompaniment performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
accompaniment_ns, '/tmp/accompaniment.mid')
files.download('/tmp/accompaniment.mid') | [
"maran.emil@gmail.com"
] | maran.emil@gmail.com |
a0857f8022a9592f8bee88c3e97a5859915ed831 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/settings/__init__.py | 9aa47f81067051a2e6f0d8d3d3e359503585fce2 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 494 | py | # 2016.11.19 19:49:21 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/settings/__init__.py
class BUTTON_LINKAGES(object):
BUTTON_BLACK = 'ButtonBlack'
BUTTON_RED = 'ButtonRed'
BUTTON_NORMAL = 'ButtonNormal'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\settings\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:49:21 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
6796605bf711e664aab04f2e9738a8154d274c11 | 5851bfab6684e49c808bcc724437a601107d17a8 | /cnova_api_lojista_v2/model/TicketStatus.py | daf055a860a0629849121960142851503970bdd2 | [] | no_license | ballke-dev/ViaVarejoSDK | 18e3224a9d1faebfa00803dd41d4e70fe392e51e | 90875423e4a5382faac926036de3cbc243a5c97f | refs/heads/master | 2020-04-17T17:57:12.034404 | 2019-01-21T11:55:24 | 2019-01-21T11:55:24 | 166,805,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | #!/usr/bin/env python
class TicketStatus:
def __init__(self):
self.swaggerTypes = {
'ticket_status': 'str'
}
self.attributeMap = {
'ticket_status': 'ticketStatus'
}
#Novo status desejado do Ticket. Fechado <strong> (closed) </strong> e Em Acompanhamento <strong> (attendance) </strong>
self.ticket_status = None # str
| [
"ti2@ballke.com.br"
] | ti2@ballke.com.br |
642d2eb59544d36fa4596d933f4e433abb98af6d | 3fc3c2707a4618f81cc308a15abeea11c3d0101e | /neural_network.py | 2b4d75fd9475c6b50031f7e57050c11e451422b3 | [] | no_license | Louis-Saglio/Connect4 | 734fdfcf8c68f452db03f7f91827a02a8ae9049a | bbf7538ebd03c9b3be996b915546951cde15d209 | refs/heads/master | 2023-07-15T09:27:31.423144 | 2021-09-04T19:35:05 | 2021-09-04T19:35:05 | 385,692,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from __future__ import annotations
import random
from typing import Iterable
import numpy as np
def sigmoid(array):
return 1/(1+np.exp(-array))
def relu(array):
return np.maximum(0, array)
class NeuralNetwork:
def __init__(self, input_size: int, layers_size: Iterable[int]):
self.layers = []
for layer_size in layers_size:
self.layers.append(
{
"weights": np.random.random((layer_size, input_size)),
"bias": np.random.random(layer_size),
"activation": relu,
}
)
input_size = layer_size
def feedforward(self, input_data) -> np.ndarray:
for layer in self.layers:
input_data = np.dot(layer["weights"], input_data) + layer["bias"]
input_data = layer["activation"](input_data)
return input_data / np.sum(input_data)
def clone(self) -> NeuralNetwork:
new = NeuralNetwork(0, [])
for layer in self.layers:
new.layers.append(
{
"weights": layer["weights"].copy(),
"bias": layer["bias"].copy(),
"activation": layer["activation"],
}
)
return new
def mutate(self):
layer = random.choice(range(0, len(self.layers)))
neuron = random.choice(range(0, len(self.layers[layer]["weights"])))
weight = random.choice(range(0, len(self.layers[layer]["weights"][neuron])))
self.layers[layer]["weights"][neuron][weight] += (random.random() - 0.5) * 10
if __name__ == '__main__':
nn = NeuralNetwork(input_size=100, layers_size=[50, 25, 10])
data = np.random.random(100)
clone = nn.clone()
nn.mutate()
| [
"louis.saglio@sfr.fr"
] | louis.saglio@sfr.fr |
2aad9c9d2778ce0c1f3d99e883d37d9ca996fc08 | f1cb02057956e12c352a8df4ad935d56cb2426d5 | /LeetCode/1928. Minimum Cost to Reach Destination in Time/Solution.py | cdd9f72047c03732266a7e77c97893623d41598b | [] | no_license | nhatsmrt/AlgorithmPractice | 191a6d816d98342d723e2ab740e9a7ac7beac4ac | f27ba208b97ed2d92b4c059848cc60f6b90ce75e | refs/heads/master | 2023-06-10T18:28:45.876046 | 2023-05-26T07:46:42 | 2023-05-26T07:47:10 | 147,932,664 | 15 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | class Solution:
def minCost(self, maxTime: int, edges: List[List[int]], passingFees: List[int]) -> int:
# Time Complexity: O(V' + E')
# where V' = maxTime x V, and E' = maxTime x E
# Space Complexity: O(V')
# Construct an augmented graph, whose nodes are (node, time)
# and if there is an edge in the original graph from node1 to node2 with travel time t
# then there are edges in the augmented graph from (node1, time) to (node2, time - t)
# (if time >= t)
# The augmented graph is now a DAG, and this problem becomes a DP on DAG problem
adj_lists = {i: set() for i in range(len(passingFees))}
for start, end, time in edges:
adj_lists[start].add((end, time))
adj_lists[end].add((start, time))
return self.getCost(maxTime, 0, {}, adj_lists, passingFees)
def getCost(self, remain: int, node: int, dp, adj_lists, passingFees: List[int]) -> int:
if (remain, node) in dp:
return dp[(remain, node)]
if node == len(passingFees) - 1:
return passingFees[-1]
if remain == 0:
return -1
ret = -1
for neigh, time in adj_lists.get(node, []):
if remain >= time:
cand_cost = self.getCost(remain - time, neigh, dp, adj_lists, passingFees)
if cand_cost >= 0 and (ret == -1 or cand_cost < ret):
ret = cand_cost
if ret >= 0:
ret += passingFees[node]
dp[(remain, node)] = ret
return ret
| [
"nphamcs@gmail.com"
] | nphamcs@gmail.com |
20ddf389d4c12ba065c2b68d113e910ddc480ebe | cc411c170c37397480d4ec2e290ffb66a57fefb5 | /src/cfd/demo.py | 984aeeffd301e42e563e1fd44d6e3fe3ea3d1ebe | [] | no_license | chenmaoshan/xmw_seismic | 0ab8c6672e4cced75c4dc3341a7d4c679146fa48 | c45d6795d74f657aba1ca7e291f533e9a21341ab | refs/heads/master | 2021-01-17T08:43:46.733403 | 2016-07-08T15:26:18 | 2016-07-08T15:26:18 | 62,929,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,766 | py | """
Demonstrate simultaneous multiple-well ties
Author: Xinming Wu, Colorado School of Mines
Version: 2016.05.11
"""
from utils import *
setupForSubset("cfd2007")
s1,s2,s3 = getSamplings()
n1,n2,n3 = s1.count,s2.count,s3.count
d1,d2,d3 = s1.delta,s2.delta,s3.delta
f1,f2,f3 = s1.first,s2.first,s3.first
# Names and descriptions of image files used below.
sfile = "cfs" # input seismic image
ssfile = "cfss" # smoothed seismic image
logType = "v"; logLabel = "Velocity (km/s)"; vmin,vmax,cit = 2.4,5.0,1.0
#logType = "d"; logLabel = "Density (g/cc)"; vmin,vmax,cit = 2.2,2.8,0.2
gfile = "cfg"+logType # simple gridding with null for unknown samples
pfile = "cfp"+logType # values of nearest known samples
qfile = "cfq"+logType # output of blended gridder
q1file = "cfq1"+logType # output of blended gridder
q2file = "cfq2"+logType # output of blended gridder
q3file = "cfq3"+logType # output of blended gridder
q4file = "cfq4"+logType # output of blended gridder
q5file = "cfq5"+logType # output of blended gridder
tfile = "cft"+logType # times to nearest known samples
p2file = "p2"
p3file = "p3"
epfile = "ep"
gffile = "gf"
u1file = "u1"
fpfile = "fp"
flfile = "fl"
ftfile = "ft"
dwfile = "dw"
gufile = "gu"
fskbase = "fsk"
fskgood = "fsg"
fslbase = "fsl"
# These parameters control the scan over fault strikes and dips.
# See the class FaultScanner for more information.
minPhi,maxPhi = 180,300
minTheta,maxTheta = 75,85
sigmaPhi,sigmaTheta = 20,40
# These parameters control the construction of fault skins.
# See the class FaultSkinner for more information.
lowerLikelihood = 0.3
upperLikelihood = 0.7
minSkinSize = 20000
# These parameters control the computation of fault dip slips.
# See the class FaultSlipper for more information.
minThrow = 0.01
maxThrow = 15.0
# Directory for saved png images. If None, png images will not be saved;
# otherwise, must create the specified directory before running this script.
#pngDir = ".././../png/swt/print/"
plotOnly = True
pngDir = None
pngDir = "../../../png/cfd/"
# Processing begins here. When experimenting with one part of this demo, we
# can comment out earlier parts that have already written results to files.
def main(args):
#goSeisAndWells()
#goSlopes()
#goScan()
#goSkin()
#goReSkin()
#gridNearest()
#gridBlendedP()
#gridBlendedQ()
#goFigures()
#goHorizon()
'''
go1stCo2()
go2ndCo2()
go3rdCo2()
go4thCo2()
go5thCo2()
'''
goCo2Plot()
def goCo2Plot():
gx = readImageL(sfile)
q0 = readImageL(qfile)
qc = readImageL(q5file)
#plot3(gx,q,cmin=vmin,cmax=vmax)
plot3(gx,q0,cmin=vmin,cmax=vmax,png="co2Initial")
plot3(gx,qc,cmin=vmin,cmax=vmax,png="co2Final")
def go1stCo2():
gx = readImageL(sfile)
q = readImageL(qfile)
surf = readImage2(n2,n3,"surf")
c1,c2,c3 = 820,120,130
qc = copy(q)
for k3 in range(-15,15,1):
for k2 in range(-15,15,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=15):
for k1 in range(-10,0,1):
i1 = round(surf[c2+k2][c3+k3])+10
qc[c3+k3][c2+k2][i1+k1] = q[c3+k3][c2+k2][i1+k1]-0.38
writeImage(q1file,qc)
#plot3(gx,q,cmin=vmin,cmax=vmax)
#plot3(gx,qc,cmin=vmin,cmax=vmax)
def go2ndCo2():
gx = readImageL(sfile)
q0 = readImageL(qfile)
q1 = readImageL(q1file)
surf = readImage2(n2,n3,"surf")
c1,c2,c3 = 820,120,130
qc = copy(q1)
for k3 in range(-25,25,1):
for k2 in range(-25,25,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=25):
for k1 in range(-20,-10):
i1 = round(surf[c2+k2][c3+k3])+10
qc[c3+k3][c2+k2][i1+k1] = q1[c3+k3][c2+k2][i1+k1]-0.38
writeImage(q2file,qc)
#plot3(gx,q,cmin=vmin,cmax=vmax)
#plot3(gx,qc,cmin=vmin,cmax=vmax)
def go3rdCo2():
gx = readImageL(sfile)
q0 = readImageL(qfile)
q2 = readImageL(q2file)
surf = readImage2(n2,n3,"surf")
c1,c2,c3 = 820,120,130
qc = copy(q2)
for k3 in range(-35,35,1):
for k2 in range(-35,35,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=35):
for k1 in range(-30,-20):
i1 = round(surf[c2+k2][c3+k3])+10
qc[c3+k3][c2+k2][i1+k1] = q2[c3+k3][c2+k2][i1+k1]-0.38
writeImage(q3file,qc)
#plot3(gx,q,cmin=vmin,cmax=vmax)
#plot3(gx,qc,cmin=vmin,cmax=vmax)
def go4thCo2():
gx = readImageL(sfile)
q0 = readImageL(qfile)
q3 = readImageL(q3file)
surf = readImage2(n2,n3,"surf")
c1,c2,c3 = 820,120,130
o1,o2,o3 = 776, 93, 99
qc = copy(q3)
for k3 in range(-45,45,1):
for k2 in range(-45,45,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=45):
for k1 in range(-30,-20):
i1 = round(surf[c2+k2][c3+k3])+10
qc[c3+k3][c2+k2][i1+k1] = q0[c3+k3][c2+k2][i1+k1]-0.38
for k3 in range(-15,15,1):
for k2 in range(-15,15,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=15):
d1 = round(surf[o2][o3]-o1)
for k1 in range(-20, 0):
i1 = round(surf[o2+k2][o3+k3])-d1+10
qc[o3+k3][o2+k2][i1+k1] = q0[o3+k3][o2+k2][i1+k1]-0.38
writeImage(q4file,qc)
#plot3(gx,q,cmin=vmin,cmax=vmax)
#plot3(gx,qc,cmin=vmin,cmax=vmax)
def go5thCo2():
gx = readImageL(sfile)
q0 = readImageL(qfile)
q4 = readImageL(q4file)
surf = readImage2(n2,n3,"surf")
c1,c2,c3 = 820,120,130
o1,o2,o3 = 770, 93, 99
qc = copy(q4)
for k3 in range(-55,55,1):
for k2 in range(-55,55,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=55):
for k1 in range(-30,-20):
i1 = round(surf[c2+k2][c3+k3])+10
qc[c3+k3][c2+k2][i1+k1] = q0[c3+k3][c2+k2][i1+k1]-0.38
for k3 in range(-25,25,1):
for k2 in range(-25,25,1):
ds = sqrt(k2*k2+k3*k3)
if (ds<=25):
d1 = round(surf[o2][o3]-o1)
for k1 in range(-30, 0):
i1 = round(surf[o2+k2][o3+k3])-d1+10
qc[o3+k3][o2+k2][i1+k1] = q0[o3+k3][o2+k2][i1+k1]-0.38
writeImage(q5file,qc)
#plot3(gx,q,cmin=vmin,cmax=vmax)
plot3(gx,qc,cmin=vmin,cmax=vmax)
plot3(gx,sub(q0,qc),cmin=0.0,cmax=0.2,surf=surf)
def goHorizon():
k13 = [110]#, 32, 87]
k12 = [120]#,148,151]
k11 = [820]#,826,822]
q = readImageL(qfile)
gx = readImageL(sfile)
p2 = readImageL(p2file)
p3 = readImageL(p3file)
ep = readImageL(epfile)
wp = pow(ep,4)
se = SurfaceExtractorC()
se.setWeights(0.0)
se.setSmoothings(6.0,6.0)
se.setCG(0.01,200)
surf = se.surfaceInitialization(n2,n3,n1-1,k11,k12,k13)
se.surfaceUpdateFromSlopes(wp,p2,p3,k11,k12,k13,surf)
plot3(gx,surf=surf)
plot3(gx,q,cmin=vmin,cmax=vmax,surf=surf)
writeImage("surf",surf)
def goFigures():
g = readImageL(sfile)
q = readImageL(qfile)
mds=[]
x12,x13,w1s = getLog242()
x22,x23,w2s = getLog281()
mds.append(SynSeis.getModel(x12,x13,w1s[0],w1s[1],w1s[2]))
mds.append(SynSeis.getModel(x22,x23,w2s[0],w2s[1],w2s[2]))
swt = SeismicWellTie()
sps = swt.getSamples(s1,mds)
if logType=="v":
spc = sps[0]
if logType=="d":
spc = sps[1]
plot3(g,sps=spc,wmin=vmin,wmax=vmax,clab=logLabel,cint=cit,png="seis"+logType)
plot3(g,q,cmin=vmin,cmax=vmax,sps=spc,wmin=vmin,wmax=vmax,
clab=logLabel,cint=cit,png="interp"+logType)
def goSeisAndWells():
gx = readImage(sfile)
x12,x13,w1s = getLog242()
x22,x23,w2s = getLog281()
mds=[]
mds.append(SynSeis.getModel(x12,x13,w1s[0],w1s[1],w1s[2]))
mds.append(SynSeis.getModel(x22,x23,w2s[0],w2s[1],w2s[2]))
swt = SeismicWellTie()
sps = swt.getSamples(s1,mds)
plot3(gx,sps=sps[1],wmin=2.2,wmax=2.8,clab="Density (g/cc)",png="seisDen")
plot3(gx,sps=sps[0],wmin=2.4,wmax=5.0,clab="Velocity (km/s)",png="seisVel")
def goSlopes():
print "goSlopes ..."
#gx = readImageL(sfile)
gx = readImageL(qfile)
sigma1,sigma2,sigma3,pmax = 2.0,2.0,2.0,5.0
p2,p3,ep = FaultScanner.slopes(sigma1,sigma2,sigma3,pmax,gx)
zm = ZeroMask(0.1,5,1,1,gx)
zero,tiny=0.0,0.01
zm.setValue(zero,p2)
zm.setValue(zero,p3)
zm.setValue(tiny,ep)
writeImage(p2file,p2)
writeImage(p3file,p3)
writeImage(epfile,ep)
print "p2 min =",min(p2)," max =",max(p2)
print "p3 min =",min(p3)," max =",max(p3)
plot3(gx,p2, cmin=-1,cmax=1,cmap=bwrNotch(1.0),
clab="Inline slope (sample/sample)",png="p2")
plot3(gx,p3, cmin=-1,cmax=1,cmap=bwrNotch(1.0),
clab="Crossline slope (sample/sample)",png="p3")
plot3(gx,ep,cmin=0,cmax=1,cmap=jetRamp(1.0),
clab="Planarity")
def goScan():
print "goScan ..."
gx = readImage(sfile)
if not plotOnly:
p2 = readImage(p2file)
p3 = readImage(p3file)
gx = FaultScanner.taper(10,0,0,gx)
fs = FaultScanner(sigmaPhi,sigmaTheta)
fl,fp,ft = fs.scan(minPhi,maxPhi,minTheta,maxTheta,p2,p3,gx)
zm = ZeroMask(0.3,5,1,1,gx)
zero=0.0
zm.setValue(zero,fl)
zm.setValue(zero,fp)
zm.setValue(zero,ft)
print "fl min =",min(fl)," max =",max(fl)
print "fp min =",min(fp)," max =",max(fp)
print "ft min =",min(ft)," max =",max(ft)
writeImage(flfile,fl)
writeImage(fpfile,fp)
writeImage(ftfile,ft)
else:
fl = readImage(flfile)
fp = readImage(fpfile)
ft = readImage(ftfile)
plot3(gx,clab="Amplitude")
plot3(gx,fl,cmin=0.25,cmax=1,cmap=jetRamp(1.0),
clab="Fault likelihood",png="fl")
plot3(gx,fp,cmin=minPhi,cmax=maxPhi,cmap=jetFill(1.0),
clab="Fault strike (degrees)",cint=45,png="fp")
plot3(gx,ft,cmin=minTheta,cmax=maxTheta,cmap=jetFill(1.0),
clab="Fault dip (degrees)",png="ft")
def goSkin():
print "goSkin ..."
gx = readImage(sfile)
p2 = readImage(p2file)
p3 = readImage(p3file)
fl = readImage(flfile)
fp = readImage(fpfile)
ft = readImage(ftfile)
fs = FaultSkinner()
for i3 in range(n3):
for i2 in range(n2):
for i1 in range(690):
fl[i3][i2][i1] = 0
fs.setGrowLikelihoods(lowerLikelihood,upperLikelihood)
fs.setMinSkinSize(minSkinSize)
fs.setMaxDeltaStrike(10)
fs.setMaxPlanarDistance(0.2)
cells = fs.findCells([fl,fp,ft])
skins = fs.findSkins(cells)
for skin in skins:
skin.smoothCellNormals(4)
print "total number of cells =",len(cells)
print "total number of skins =",len(skins)
print "number of cells in skins =",FaultSkin.countCells(skins)
removeAllSkinFiles(fskbase)
writeSkins(fskbase,skins)
plot3F(gx,cells=cells,png="cells")
plot3F(gx,skins=skins,png="skins")
def goReSkin():
print "goReSkin ..."
useOldCells = True
gx = readImage(sfile)
if not plotOnly:
fl = readImage(flfile)
sk = readSkins(fskbase)
fsx = FaultSkinnerX()
fsx.setParameters(10,10,0.2)
fsx.setGrowLikelihoods(lowerLikelihood,upperLikelihood)
fsx.setMinSkinSize(minSkinSize)
fsx.setMaxPlanarDistance(0.2)
fsx.setSkinning(useOldCells)
cells = FaultSkin.getCells(sk)
fsx.resetCells(cells)
skins = fsx.findSkinsXX(cells,fl)
removeAllSkinFiles(fskgood)
writeSkins(fskgood,skins)
skins = readSkins(fskgood)
for skin in skins:
skin.smoothCellNormals(4)
plot3F(gx,skins=skins,png="skinsNew")
plot3F(gx,skins=skins,links=True,png="skinsNewLinks")
def goSmooth():
print "goSmooth ..."
flstop = 0.1
fsigma = 8.0
gx = readImage(sfile)
skins = readSkins(fskgood)
flt = zerofloat(n1,n2,n3)
fsx = FaultSkinnerX()
fsx.getFl(skins,flt)
p2,p3,ep = FaultScanner.slopes(4.0,2.0,2.0,5.0,gx)
gsx = FaultScanner.smooth(flstop,fsigma,p2,p3,flt,gx)
writeImage(p2file,p2)
writeImage(p3file,p3)
writeImage(epfile,ep)
writeImage(ssfile,gsx)
plot3(gx,flt,cmin=0.25,cmax=1,cmap=jetRamp(1.0),
clab="Fault likelihood",png="fli")
plot3(gsx,png="gsx")
def goInterp():
gx = readImage(sfile)
tm = TensorMaker()
mk = tm.mask(0.3,5.0,1.0,1.0,gx)
et = tm.applyForTensors(4.0,2.0,2.0,mk,gx)
et.setEigenvalues(0.0001,1.0,1.0)
k1,k2,k3,fx=getSamples()
wp = fillfloat(1.0,n1,n2,n3)
fp = FastInterp(6.0,6.0)
fp.setTensors(et)
fp.setIterations(0.001,500)
px = fp.interpolate(wp,k1,k2,k3,fx)
writeImage(qfile,px)
mds=[]
x12,x13,w1s = getLog242()
x22,x23,w2s = getLog281()
mds.append(SynSeis.getModel(x12,x13,w1s[0],w1s[1],w1s[2]))
mds.append(SynSeis.getModel(x22,x23,w2s[0],w2s[1],w2s[2]))
swt = SeismicWellTie()
sps = swt.getSamples(s1,mds)
plot3(gx,px,cmin=2.2,cmax=2.8,sps=sps[1],wmin=2.2,wmax=2.8,
clab="Density (g/cc)",png="seisDen")
def gridBlendedP():
tm = TensorMaker()
gx = readImage(sfile)
mk = tm.mask(0.3,5.0,1.0,1.0,gx)
et = tm.applyForTensors(4.0,2.0,2.0,mk,gx)
fs = FaultSkinnerX()
sks = readSkins(fskgood)
fls = fillfloat(0.01,n1,n2,n3)
fs.getFls(sks,fls)
et.scale(fls) # scale structure tensors by fls
et.invertStructure(1.0,1.0,1.0) # invert and normalize
et.setEigenvalues(0.001,1.0,1.0)
bi = BlendedGridder3(et)
p = readImage(gfile)
t = bi.gridNearest(0.0,p)
writeImage(pfile,p)
writeImage(tfile,t)
def gridBlendedQ():
tm = TensorMaker()
gx = readImage(sfile)
mk = tm.mask(0.3,5.0,1.0,1.0,gx)
et = tm.applyForTensors(4.0,2.0,2.0,mk,gx)
fs = FaultSkinnerX()
sks = readSkins(fskgood)
fls = fillfloat(0.01,n1,n2,n3)
fs.getFls(sks,fls)
et.scale(fls) # scale structure tensors by fls
et.invertStructure(1.0,1.0,1.0) # invert and normalize
eu = fillfloat(0.001,n1,n2,n3)
ev = fillfloat(1.000,n1,n2,n3)
ew = fillfloat(1.000,n1,n2,n3)
et.setEigenvalues(eu,ev,ew)
bg = BlendedGridder3(et)
bg.setSmoothness(1.0)
p = readImage(pfile)
t = readImage(tfile)
t = clip(0.0,50.0,t)
q = copy(p)
bg.gridBlended(t,p,q)
writeImage(qfile,q)
def makeImageTensors(s):
"""
Returns tensors for guiding along features in specified image.
"""
sigma = 3
n1,n2 = len(s[0]),len(s)
lof = LocalOrientFilter(sigma)
t = lof.applyForTensors(s) # structure tensors
c = coherence(sigma,t,s) # structure-oriented coherence c
c = clip(0.0,0.99,c) # c clipped to range [0,1)
t.scale(sub(1.0,c)) # scale structure tensors by 1-c
t.invertStructure(1.0,1.0) # invert and normalize
return t
def getSamples():
mds=[]
x12,x13,w1s = getLog242()
x22,x23,w2s = getLog281()
mds.append(SynSeis.getModel(x12,x13,w1s[0],w1s[1],w1s[2]))
mds.append(SynSeis.getModel(x22,x23,w2s[0],w2s[1],w2s[2]))
swt = SeismicWellTie()
sps = swt.getSamples(s1,mds)
i12 = round((x12-f2)/d2)
i13 = round((x13-f3)/d3)
i22 = round((x22-f2)/d2)
i23 = round((x23-f3)/d3)
i2s = [i12,i22]
i3s = [i13,i23]
k1s,k2s,k3s,fxs=[],[],[],[]
for il in range(2):
i2 = i2s[il]
i3 = i3s[il]
w1 = sps[0][1][il]
wv = sps[0][0][il]
wd = sps[1][0][il]
for k1 in range(len(w1)):
i1 = round((w1[k1]-f1)/d1)
k1s.append(i1)
k2s.append(i2)
k3s.append(i3)
if logType=="d":
fxs.append(wd[k1])
else:
fxs.add(wv[k1])
return k1s,k2s,k3s,fxs
def gridNearest():
mds=[]
x12,x13,w1s = getLog242()
x22,x23,w2s = getLog281()
mds.append(SynSeis.getModel(x12,x13,w1s[0],w1s[1],w1s[2]))
mds.append(SynSeis.getModel(x22,x23,w2s[0],w2s[1],w2s[2]))
swt = SeismicWellTie()
sps = swt.getSamples(s1,mds)
i12 = round((x12-f2)/d2)
i13 = round((x13-f3)/d3)
i22 = round((x22-f2)/d2)
i23 = round((x23-f3)/d3)
i2s = [i12,i22]
i3s = [i13,i23]
gvs = zerofloat(n1,n2,n3)
gds = zerofloat(n1,n2,n3)
for il in range(2):
i2 = i2s[il]
i3 = i3s[il]
w1 = sps[0][1][il]
wv = sps[0][0][il]
wd = sps[1][0][il]
for k1 in range(len(w1)):
i1 = round((w1[k1]-f1)/d1)
gvs[i3][i2][i1] = wv[k1]
gds[i3][i2][i1] = wd[k1]
writeImage("cfgv",gvs)
writeImage("cfgd",gds)
def like(x):
n2 = len(x)
n1 = len(x[0])
return zerofloat(n1,n2)
def gain(x):
g = mul(x,x)
ref = RecursiveExponentialFilter(10.0)
ref.apply1(g,g)
y = like(x)
div(x,sqrt(g),y)
return y
def slice12(k3,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n1,n2)
SimpleFloat3(f).get12(n1,n2,0,0,k3,s)
return s
def slice13(k2,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n1,n3)
SimpleFloat3(f).get13(n1,n3,0,k2,0,s)
return s
def slice23(k1,f):
n1,n2,n3 = len(f[0][0]),len(f[0]),len(f)
s = zerofloat(n2,n3)
SimpleFloat3(f).get23(n2,n3,k1,0,0,s)
return s
def plot1(s1,ys,hlabel="Seismic traces",vlabel="depth (km)",png=None):
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
for y in ys:
pv = sp.addPoints(s1,y)
pv.setLineColor(Color.BLACK)
#sp.setVLimits(0.1,1.1)
sp.setSize(800,800)
sp.setHLabel(hlabel)
sp.setVLabel(vlabel)
if png and pngDir:
sp.paintToPng(300,7.0,pngDir+png+".png")
def plot1s(s1,ss,ys,rs=None,vmin=None,vmax=None,color=Color.RED,
hlabel="Log index",vlabel="Time (s)",png=None):
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
sf = 1.0
yf = sf
sp.setVLimits(0.1,1.0)
if vmin and vmax:
sp.setVLimits(vmin,vmax)
sp.setHLimits(0.5,11.5)
sp.setHInterval(2)
for il,y in enumerate(ys):
ya = sum(y)/len(y)
y = sub(y,ya)
y = div(y,10)
y = add(y,yf)
pv = sp.addPoints(ss[il],y)
pv.setLineColor(color)
pv.setLineWidth(1.5)
yf = yf+sf
rf = sf
if rs:
for il,r in enumerate(rs):
ra = sum(r)/len(r)
r = sub(r,ra)
r = div(r,10)
r = add(r,rf)
pv = sp.addPoints(s1,r)
pv.setLineColor(Color.BLACK)
pv.setLineWidth(1.5)
rf = rf+sf
sp.setSize(600,650)
sp.setHLabel(hlabel)
sp.setVLabel(vlabel)
#sp.setFontSize(20) #for print
sp.setFontSize(30) #for slides
sp.setVInterval(0.2)
if png and pngDir:
sp.paintToPng(300,7.0,pngDir+png+".png")
def plot2(w,sz,sl,wmin=0.0,wmax=0.0,vlabel="Time (s)",cbar=None,png=None):
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
sp.setSize(500,900)
sp.setVLabel(vlabel)
sp.setHLabel("Log index")
sp.addColorBar(cbar)
sp.plotPanel.setColorBarWidthMinimum(90)
pv = sp.addPixels(sz,sl,w)
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
pv.setColorModel(ColorMap.GRAY)
pv.setClips(wmin,wmax)
if png and pngDir:
sp.paintToPng(300,7.0,pngDir+png+".png")
def plot3F(f,g=None,cmin=None,cmax=None,cmap=None,clab=None,cint=None,
xyz=None,cells=None,skins=None,fbs=None,smax=0.0,
links=False,curve=False,trace=False,png=None):
n1 = len(f[0][0])
n2 = len(f[0])
n3 = len(f)
sf = SimpleFrame(AxesOrientation.XRIGHT_YOUT_ZDOWN)
cbar = None
if g==None:
ipg = sf.addImagePanels(s1,s2,s3,f)
if cmap!=None:
ipg.setColorModel(cmap)
if cmin!=None and cmax!=None:
ipg.setClips(cmin,cmax)
else:
ipg.setClips(-3.0,3.0)
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMapListener(cbar)
else:
ipg = ImagePanelGroup2(s1,s2,s3,f,g)
ipg.setClips1(-3.0,3.0)
if cmin!=None and cmax!=None:
ipg.setClips2(cmin,cmax)
if cmap==None:
cmap = jetFill(0.8)
ipg.setColorModel2(cmap)
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMap2Listener(cbar)
sf.world.addChild(ipg)
if cbar:
cbar.setWidthMinimum(120)
if xyz:
pg = PointGroup(0.2,xyz)
ss = StateSet()
cs = ColorState()
cs.setColor(Color.YELLOW)
ss.add(cs)
pg.setStates(ss)
#ss = StateSet()
#ps = PointState()
#ps.setSize(5.0)
#ss.add(ps)
#pg.setStates(ss)
sf.world.addChild(pg)
if cells:
ss = StateSet()
lms = LightModelState()
lms.setTwoSide(True)
ss.add(lms)
ms = MaterialState()
ms.setSpecular(Color.GRAY)
ms.setShininess(100.0)
ms.setColorMaterial(GL_AMBIENT_AND_DIFFUSE)
ms.setEmissiveBack(Color(0.0,0.0,0.5))
ss.add(ms)
cmap = ColorMap(0.0,1.0,ColorMap.JET)
xyz,uvw,rgb = FaultCell.getXyzUvwRgbForLikelihood(0.5,cmap,cells,False)
qg = QuadGroup(xyz,uvw,rgb)
qg.setStates(ss)
sf.world.addChild(qg)
if fbs:
mc = MarchingCubes(s1,s2,s3,fbs)
ct = mc.getContour(0.0)
tg = TriangleGroup(ct.i,ct.x,ct.u)
states = StateSet()
cs = ColorState()
cs.setColor(Color.CYAN)
states.add(cs)
lms = LightModelState()
lms.setTwoSide(True)
states.add(lms)
ms = MaterialState()
ms.setColorMaterial(GL_AMBIENT_AND_DIFFUSE)
ms.setSpecular(Color.WHITE)
ms.setShininess(100.0)
states.add(ms)
tg.setStates(states);
sf.world.addChild(tg)
if skins:
sg = Group()
ss = StateSet()
lms = LightModelState()
lms.setTwoSide(True)
ss.add(lms)
ms = MaterialState()
ms.setSpecular(Color.GRAY)
ms.setShininess(100.0)
ms.setColorMaterial(GL_AMBIENT_AND_DIFFUSE)
if not smax:
ms.setEmissiveBack(Color(0.0,0.0,0.5))
ss.add(ms)
sg.setStates(ss)
size = 2.0
if links:
size = 0.5
for skin in skins:
if smax>0.0: # show fault throws
cmap = ColorMap(0.0,smax,ColorMap.JET)
xyz,uvw,rgb = skin.getCellXyzUvwRgbForThrow(size,cmap,False)
else: # show fault likelihood
cmap = ColorMap(0.0,1.0,ColorMap.JET)
xyz,uvw,rgb = skin.getCellXyzUvwRgbForLikelihood(size,cmap,False)
qg = QuadGroup(xyz,uvw,rgb)
qg.setStates(None)
sg.addChild(qg)
if curve or trace:
cell = skin.getCellNearestCentroid()
if curve:
xyz = cell.getFaultCurveXyz()
pg = PointGroup(0.5,xyz)
sg.addChild(pg)
if trace:
xyz = cell.getFaultTraceXyz()
pg = PointGroup(0.5,xyz)
sg.addChild(pg)
if links:
xyz = skin.getCellLinksXyz()
lg = LineGroup(xyz)
sg.addChild(lg)
sf.world.addChild(sg)
#ipg.setSlices(198,0,89)
ipg.setSlices(198,0,58)
if cbar:
sf.setSize(837,600)
else:
sf.setSize(700,600)
vc = sf.getViewCanvas()
vc.setBackground(Color.WHITE)
radius = 0.5*sqrt(n1*n1+n2*n2+n3*n3)
ov = sf.getOrbitView()
ov.setWorldSphere(BoundingSphere(0.5*n1,0.5*n2,0.5*n3,radius))
ov.setAzimuthAndElevation(-55.0,25.0)
ov.setTranslate(Vector3(0.03,0.33,0.15))
ov.setScale(1.4)
sf.setVisible(True)
if png and pngDir:
sf.paintToFile(pngDir+png+".png")
if cbar:
cbar.paintToPng(137,1,pngDir+png+"cbar.png")
def plot3X(s1,f,g=None,cmin=None,cmax=None,cmap=None,clab=None,cint=None,
slices=None,surf=None,hs=None,logs=None,sps=None,curve=None,
wmin=0,wmax=0,png=None):
n1,n2,n3 = s1.count,s2.count,s3.count
d1,d2,d3 = s1.delta,s2.delta,s3.delta
f1,f2,f3 = s1.first,s2.first,s3.first
l1,l2,l3 = s1.last,s2.last,s3.last
sf = SimpleFrame(AxesOrientation.XRIGHT_YOUT_ZDOWN)
cbar = None
if g==None:
ipg = sf.addImagePanels(s1,s2,s3,f)
if cmap!=None:
ipg.setColorModel(cmap)
if wmin!=0 and wmax!=0:
ipg.setClips(wmin,wmax)
if cmin!=None and cmax!=None:
ipg.setClips(cmin,cmax)
else:
#ipg.setClips(-2.0,2.0)
ipg.setClips(-2.0,1.5) # use for subset plots
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMapListener(cbar)
else:
ipg = ImagePanelGroup2(s1,s2,s3,f,g)
ipg.setClips1(-2.0,1.5)
if cmin!=None and cmax!=None:
ipg.setClips2(cmin,cmax)
if cmap==None:
cmap = jetFill(0.8)
ipg.setColorModel2(cmap)
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMap2Listener(cbar)
sf.world.addChild(ipg)
if cbar:
cbar.setWidthMinimum(120) # for slides
#cbar.setWidthMinimum(80)
if logs:
wg = wellGroup(logs,curve,wmin,wmax)
sf.world.addChild(wg)
if sps:
#samples = sps[0],sps[1],sps[2],sps[3]
wg = makeLogPoints(sps,wmin,wmax,cbar)
sf.world.addChild(wg)
if hs:
x1 = readImage(ghfile)
u1 = readImage(gtfile)
hfr = HorizonFromRgt(s1,s2,s3,x1,u1)
for hi in hs:
[xyz,rgb] = hfr.singleHorizon(hi)
tg = TriangleGroup(True,xyz,rgb)
sf.world.addChild(tg)
if surf:
tgs = Triangle()
xyz = tgs.trianglesForSurface(surf,0,n1-1)
tg = TriangleGroup(True,xyz)
sf.world.addChild(tg)
ipg.setSlices(924,224,68)
#ipg.setSlices(n1,0,n3) # use only for subset plots
if cbar:
sf.setSize(837,700)
else:
sf.setSize(700,700) # for slides
#sf.setSize(740,700)
vc = sf.getViewCanvas()
vc.setBackground(Color.WHITE)
ov = sf.getOrbitView()
zscale = 0.8*max(n2*d2,n3*d3)/(n1*d1)
ov.setAxesScale(1.0,1.0,zscale)
ov.setScale(1.1)
ov.setAzimuthAndElevation(235,25)
ov.setWorldSphere(BoundingSphere(BoundingBox(f3,f2,f1,l3,l2,l1)))
ov.setTranslate(Vector3(0.0,0.05,0.08))
sf.setVisible(True)
if png and pngDir:
sf.paintToFile(pngDir+png+".png")
if cbar:
cbar.setFont(Font("Arial", Font.PLAIN, 36)) #for slides
#cbar.setFont(Font("Arial", Font.PLAIN, 24)) #for print
cbar.setInterval(0.5)
cbar.paintToPng(720,1,pngDir+png+"cbar.png")
def plot3(f,g=None,cmin=None,cmax=None,cmap=None,clab=None,cint=None,
slices=None,surf=None,hs=None,logs=None,sps=None,curve=None,
wmin=0,wmax=0,png=None):
n1,n2,n3 = s1.count,s2.count,s3.count
d1,d2,d3 = s1.delta,s2.delta,s3.delta
f1,f2,f3 = s1.first,s2.first,s3.first
l1,l2,l3 = s1.last,s2.last,s3.last
sf = SimpleFrame(AxesOrientation.XRIGHT_YOUT_ZDOWN)
cbar = None
if g==None:
ipg = sf.addImagePanels(s1,s2,s3,f)
if cmap!=None:
ipg.setColorModel(cmap)
if wmin!=0 and wmax!=0:
ipg.setClips(wmin,wmax)
if cmin!=None and cmax!=None:
ipg.setClips(cmin,cmax)
else:
#ipg.setClips(-2.0,2.0)
ipg.setClips(-2.0,1.5) # use for subset plots
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMapListener(cbar)
else:
ipg = ImagePanelGroup2(s1,s2,s3,f,g)
ipg.setClips1(-2.0,1.5)
if cmin!=None and cmax!=None:
ipg.setClips2(cmin,cmax)
if cmap==None:
cmap = jetFill(1.0)
ipg.setColorModel2(cmap)
if clab:
cbar = addColorBar(sf,clab,cint)
ipg.addColorMap2Listener(cbar)
sf.world.addChild(ipg)
if cbar:
cbar.setWidthMinimum(120) # for slides
#cbar.setWidthMinimum(80)
if logs:
wg = wellGroup(logs,curve,wmin,wmax)
sf.world.addChild(wg)
if sps:
#samples = sps[0],sps[1],sps[2],sps[3]
wg = makeLogPoints(sps,wmin,wmax,cbar)
sf.world.addChild(wg)
if hs:
x1 = readImage(ghfile)
u1 = readImage(gtfile)
hfr = HorizonFromRgt(s1,s2,s3,x1,u1)
for hi in hs:
[xyz,rgb] = hfr.singleHorizon(hi)
tg = TriangleGroup(True,xyz,rgb)
sf.world.addChild(tg)
if surf:
tgs = Triangle()
xyz = tgs.trianglesForSurface(surf,0,n1-1)
tg = TriangleGroup(True,xyz)
sf.world.addChild(tg)
#ipg.setSlices(924,202,26)
#ipg.setSlices(834,202,26)
ipg.setSlices(834,120,110)
#ipg.setSlices(n1,0,n3) # use only for subset plots
if cbar:
sf.setSize(837,700)
else:
sf.setSize(700,700) # for slides
#sf.setSize(740,700)
vc = sf.getViewCanvas()
vc.setBackground(Color.WHITE)
ov = sf.getOrbitView()
zscale = 0.9*max(n2*d2,n3*d3)/(n1*d1)
ov.setAxesScale(1.0,1.0,zscale)
ov.setScale(1.1)
ov.setAzimuthAndElevation(125,15)
ov.setWorldSphere(BoundingSphere(BoundingBox(f3,f2,f1,l3,l2,l1)))
ov.setTranslate(Vector3(0.0,0.05,0.08))
sf.setVisible(True)
if png and pngDir:
sf.paintToFile(pngDir+png+".png")
if cbar:
cbar.setFont(Font("Arial", Font.PLAIN, 36)) #for slides
#cbar.setFont(Font("Arial", Font.PLAIN, 24)) #for print
cbar.setInterval(cint)
cbar.paintToPng(720,1,pngDir+png+"cbar.png")
def wellGroup(logs,curve,cmin=0,cmax=0,cbar=None):
print "number of logs =",len(logs)
#s1 = Sampling(2762,0.002,0.000)
#s2 = Sampling(357,0.025,0.000)
#s3 = Sampling(161,0.025,0.000)
fl,x1l,x2l,x3l = [],[],[],[]
for log in logs:
samples = log.getSamples(curve,s1,s2,s3)
f,x1,x2,x3 = samples
fl.append(f)
x1l.append(x1)
x2l.append(x2)
x3l.append(x3)
samples = fl,x1l,x2l,x3l
lg = makeLogPoints(samples,cmin,cmax,cbar)
return lg
def makeLogPoints(samples,cmin,cmax,cbar):
lg = Group()
fl,x1l,x2l,x3l = samples
for i,f in enumerate(fl):
f = fl[i]
x1 = x1l[i]
x2 = x2l[i]
x3 = x3l[i]
pg = makePointGroup(f,x1,x2,x3,cmin,cmax,cbar)
lg.addChild(pg)
return lg
def makePoint(f,x1,x2,x3,cmin,cmax,cbar):
xyz = zerofloat(3)
xyz[0],xyz[1],xyz[2]=x3,x2,x1
rgb = None
if cmin<cmax:
cmap = ColorMap(cmin,cmax,ColorMap.JET)
if cbar:
cmap.addListener(cbar)
rgb = cmap.getRgbFloats([f])
pg = PointGroup(xyz,rgb)
ps = PointState()
ps.setSize(4)
ps.setSmooth(False)
ss = StateSet()
ss.add(ps)
pg.setStates(ss)
return pg
def makePointGroup(f,x1,x2,x3,cmin,cmax,cbar):
n = len(x1)
xyz = zerofloat(3*n)
copy(n,0,1,x3,0,3,xyz)
copy(n,0,1,x2,1,3,xyz)
copy(n,0,1,x1,2,3,xyz)
rgb = None
if cmin<cmax:
cmap = ColorMap(cmin,cmax,ColorMap.JET)
if cbar:
cmap.addListener(cbar)
rgb = cmap.getRgbFloats(f)
pg = PointGroup(xyz,rgb)
ps = PointState()
ps.setSize(4)
ps.setSmooth(False)
ss = StateSet()
ss.add(ps)
pg.setStates(ss)
return pg
def jetFill(alpha):
return ColorMap.setAlpha(ColorMap.JET,alpha)
def jetFillExceptMin(alpha):
a = fillfloat(alpha,256)
a[0] = 0.0
return ColorMap.setAlpha(ColorMap.JET,a)
def jetRamp(alpha):
return ColorMap.setAlpha(ColorMap.JET,rampfloat(0.0,alpha/256,256))
def bwrFill(alpha):
return ColorMap.setAlpha(ColorMap.BLUE_WHITE_RED,alpha)
def bwrNotch(alpha):
a = zerofloat(256)
for i in range(len(a)):
if i<128:
a[i] = alpha*(128.0-i)/128.0
else:
a[i] = alpha*(i-127.0)/128.0
return ColorMap.setAlpha(ColorMap.BLUE_WHITE_RED,a)
def hueFill(alpha):
return ColorMap.getHue(0.0,1.0,alpha)
def hueFillExceptMin(alpha):
a = fillfloat(alpha,256)
a[0] = 0.0
return ColorMap.setAlpha(ColorMap.getHue(0.0,1.0),a)
def addColorBar(frame,clab=None,cint=None):
cbar = ColorBar(clab)
if cint:
cbar.setInterval(cint)
cbar.setFont(Font("Arial",Font.PLAIN,32)) # size by experimenting
cbar.setWidthMinimum
cbar.setBackground(Color.WHITE)
frame.add(cbar,BorderLayout.EAST)
return cbar
def convertDips(ft):
return FaultScanner.convertDips(0.2,ft) # 5:1 vertical exaggeration
#############################################################################
run(main)
| [
"xinwucwp@gmail.com"
] | xinwucwp@gmail.com |
bcf5a5f0b3af1dc80a764845106316ccaa7392fc | 3939c1fc17fc5ad77b28c3da3b18ac3aeafc0fa8 | /neighbour/migrations/0009_auto_20210726_2340.py | f38682d79eed9cc3ad4caba45fb83916b383eb65 | [
"MIT"
] | permissive | ObadiaH123/neighbour | 1bca1ba7e9ecbc330f4c8b9337b05bdba1b0e1da | e30085236ddb2048f751400805784241eec44d9f | refs/heads/master | 2023-06-27T19:08:46.054615 | 2021-07-26T22:34:30 | 2021-07-26T22:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # Generated by Django 3.1.7 on 2021-07-26 20:40
import cloudinary.models
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('neighbour', '0008_auto_20210726_2221'),
]
operations = [
migrations.CreateModel(
name='Emergency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, null=True)),
('image', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
],
),
migrations.AddField(
model_name='healthcenter',
name='image',
field=cloudinary.models.CloudinaryField(default=django.utils.timezone.now, max_length=255, verbose_name='image'),
preserve_default=False,
),
]
| [
"ronohkelvin99@gmail.com"
] | ronohkelvin99@gmail.com |
0db13be2b7ae900ae80f4762afe3fbd131182e3b | d4a4b42fc7ce9f88f241f884d1b8f9f227c92b33 | /examples/neq/loopunreach300/loopunreach300_1.py | da08d79d547395ba0c9a2859fa093f5647795141 | [] | no_license | Client-Specific-Equivalence-Checker/CLEVER | a5cabcc6a127c80b7e8193f885bca8e5cf64b204 | 44a9027be67dcd94e0b4a30eb3cb5e7aeb0ab163 | refs/heads/master | 2022-02-13T04:23:29.654278 | 2019-02-06T17:10:48 | 2019-02-06T17:10:48 | 114,670,863 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def lib(a, b):
c = 0
if a < 0:
i = 1
while i <= a:
c += b
i += 1
return c
def loopunreach300(x):
if x >= 273 and x < 327:
return lib(x, 300)
return 0
| [
"fmorarocha@gmail.com"
] | fmorarocha@gmail.com |
c1c6117e9c201aada7c6b6791f1d3dbfd252238e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /N5JhvabK6DTD5t6gS_15.py | f6baaf716744a9bad75f70f01fe4407df3c0b5a2 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
import string
def markdown(symb):
def func(sentence, word):
return ' '.join([symb+w+symb if w.lower().translate(str.maketrans('', '', string.punctuation)) == word.lower() else w for w in sentence.split(' ')])
return func
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2a06d8169969719f56f6c0c63ed1ca5648bc7854 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02959/s028217014.py | 14c06dba3326e2507be8d625653369abe7bef9af | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
ans = 0
for i in range(N+1):
if i >= 1:
p = min(A[i], B[i-1])
ans += p
A[i] -= p
if i < N:
p = min(A[i], B[i])
ans += p
B[i] -= p
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2c80a63e8968899fc9de36d41cef0107e0562572 | ee4db47ccecd23559b3b6f3fce1822c9e5982a56 | /Build Chatbots/ChunkVerbPhrase.py | a1dfee73cfa4abcf2db55bb45550a1293288388d | [] | no_license | meoclark/Data-Science-DropBox | d51e5da75569626affc89fdcca1975bed15422fd | 5f365cedc8d0a780abeb4e595cd0d90113a75d9d | refs/heads/master | 2022-10-30T08:43:22.502408 | 2020-06-16T19:45:05 | 2020-06-16T19:45:05 | 265,558,242 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from nltk import RegexpParser
from pos_tagged_oz import pos_tagged_oz
from vp_chunk_counter import vp_chunk_counter
# define verb phrase chunk grammar here
chunk_grammar = "VP: {<VB.*><DT>?<JJ>*<NN><RB.?>?}"
#chunk_grammar = "VP: {<DT>?<JJ>*<NN><VB.*><RB.?>?}"
# create RegexpParser object here
chunk_parser = RegexpParser(chunk_grammar)
# create a list to hold verb-phrase chunked sentences
vp_chunked_oz = list()
# create for loop through each pos-tagged sentence in pos_tagged_oz here
for pos_tagged_sentence in pos_tagged_oz:
# chunk each sentence and append to vp_chunked_oz here
vp_chunked_oz.append(chunk_parser.parse(pos_tagged_sentence))
# store and print the most common vp-chunks here
most_common_vp_chunks = vp_chunk_counter(vp_chunked_oz)
print(most_common_vp_chunks) | [
"oluchukwuegbo@gmail.com"
] | oluchukwuegbo@gmail.com |
0e4aaae67303557ecd576180c2a28859058ec15e | 0cb38adedbe3a5192076de420e1aa0fd10ae3311 | /returned_items/urls.py | 6100088eab96c5170acd4668bd1525d0d7c18808 | [] | no_license | fogcitymarathoner/rma | 73ada816b98f068b6c00b2e1fcf39461259453fa | 133d6026f99820d0702f0578b8a3b4574671f888 | refs/heads/master | 2021-01-11T00:32:47.797673 | 2016-10-10T18:34:54 | 2016-10-10T18:35:11 | 70,516,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from django.conf.urls import patterns, include, url
from returned_items.views import index
from returned_items.views import move_items
from returned_items.views import ReturnedItemCreateView
from returned_items.views import ReturnedItemUpdateView
from returned_items.views import ReturnedItemDeleteView
from returned_items.views import move_items_confirm
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('',
url(r'^$', index, name='list_returned_items'),
url(r'move_items$', move_items, name='move_returned_items'),
url(r'move_items_confirm$', move_items_confirm, name='confirm_move_items'),
url(r'create/(?P<id>\d+)$', login_required(ReturnedItemCreateView.as_view()), name='create_returned_item'),
url(r'edit/(?P<id>\d+)$', login_required(ReturnedItemUpdateView.as_view()), name='edit_returned_item'),
url(r'delete/(?P<id>\d+)$', login_required(ReturnedItemDeleteView.as_view()), name='delete_returned_item'),
)
| [
"marc@fogtest.com"
] | marc@fogtest.com |
4ecdb5f970f3ac775afb8cb4bbf3db8350538c59 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_tb.py | d43de208d660ddec2bbd491b4d2a8ecbfce158ae | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py |
#calss header
class _TB():
def __init__(self,):
self.name = "TB"
self.definitions = [u'abbreviation for tuberculosis ', u'written abbreviation for terabyte ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
fd357530dfd0ab6b2300482b496fcc90edc6ae81 | 1491bc7c6c1a8e025f84e7ceaeb389f109b3d37c | /Eapp/modals/product.py | e04b28b97bcb9f6ce8df74429d25969063f27c9c | [] | no_license | Taraltinu/Second-project | 47516dbce9255e0b0a9452accce178e7a2a9ec55 | e052457afb1559b572331f5e62840f78d5b07b07 | refs/heads/master | 2022-12-29T18:15:19.251268 | 2020-10-02T17:40:20 | 2020-10-02T17:40:20 | 300,688,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | from django.db import models
from django.contrib.auth.models import User
from Eapp.modals.category import CategoryModel
class ProductModel(models.Model):
seller = models.ForeignKey(User,on_delete=models.CASCADE)
product_name = models.CharField(max_length=250)
product_Cat = models.ForeignKey(CategoryModel,on_delete=models.CASCADE)
product_price = models.CharField(max_length=10)
sale_price = models.CharField(max_length=10)
product_image = models.ImageField(upload_to="product/%y/%m/%d")
description = models.TextField()
quality = models.CharField(max_length=250,default="")
size = models.FloatField( default=0)
color = models.CharField(max_length=50,default="")
add_date = models.DateTimeField(auto_now_add=True,null=True,blank=True)
| [
"tinu1316@gmail.com"
] | tinu1316@gmail.com |
6abdc35a733f4947b3ff36352ec09f4ccde3faeb | a1cbf221a6befed3891d75c69e2a546effd2499d | /payroll/models.py | 12882e648d086a944362c8df582726c35acbbf23 | [] | no_license | Coder339/V-django-newCRM | 9a93efbb252ba814241076ece17088af8dd15935 | 2182266204f54d301b7c087a99627d441e00fe54 | refs/heads/master | 2022-12-24T15:12:47.081949 | 2020-08-24T12:15:13 | 2020-08-24T12:15:13 | 247,274,031 | 0 | 2 | null | 2022-12-08T04:19:35 | 2020-03-14T12:39:13 | Python | UTF-8 | Python | false | false | 2,281 | py | from django.db import models
from authentication.models import EmployeeProfile
class EmployeePackage(models.Model): # to be send
# months = (
# ('JAN','JAN'),('FEB','FEB'),('MAR','MAR'),('APR','APR'),
# ('MAY','MAY'),('JUN','JUN'),('JULY','JULY'),('AUG','AUG'),
# ('SEP','SEP'),('OCT','OCT'),('NOV','NOV'),('DEC','DEC'),
# )
Name = models.CharField(max_length=20,null=True)
empId = models.ForeignKey(EmployeeProfile,on_delete=models.CASCADE,null=True,default = 1)
packageId = models.CharField(max_length=20,null=True)
# packageId = models.ForeignKey(SalaryPackage,on_delete=models.CASCADE,null=True,editable=False)
salary = models.IntegerField() # paid_amount
# salaryMonth = models.CharField(max_length=20,choices=months,null=True)
dateOfPayment = models.DateField(null=True)
modeOfPayment = models.CharField(max_length=10)
unpaid_leaves_allowed = models.PositiveIntegerField()
paid_leaves_allowed = models.PositiveIntegerField()
comments = models.CharField(max_length=100,null=True)
def __str__(self):
return self.Name
class Meta:
verbose_name_plural = 'employeeSalary'
class MonthlySalary(models.Model): #dynamic
# userId = models.CharField(max_length=20, primary_key=True)
EmpId = models.ForeignKey(EmployeeProfile,on_delete=models.CASCADE,null=True)
salaryMonth = models.DateField(null=True)
salaryId = models.ForeignKey(EmployeePackage, on_delete=models.CASCADE,editable=False,null=True)
unpaid_leaves = models.PositiveIntegerField(null=True)
paid_leaves = models.PositiveIntegerField(null=True)
activeDays = models.PositiveIntegerField()
workingDays = models.PositiveIntegerField()
# paymentReceipt = models.ForeignKey(UserPaymentReceipt, on_delete=models.CASCADE)
total_Salary_Amount = models.PositiveIntegerField() # according to no. of days spent
def __str__(self):
return self.EmpId
class Meta:
verbose_name_plural = 'monthlySalary'
| [
"amanpreet.leanvia@gmail.com"
] | amanpreet.leanvia@gmail.com |
9d52bc51d884bd0bd422bf9fc7be75aa01e5af19 | 1538320b5419539879c76f923206753fc0746b4a | /proteus/tests/SWFlows/dam3Bumps.py | dc0732ed3e744a0559ff73677e586e6efa738a5a | [
"MIT"
] | permissive | dloney/proteus | 829169228221e2ca8bffad2c518d8d858da6af48 | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | refs/heads/master | 2020-06-30T00:05:53.816553 | 2019-08-01T07:41:15 | 2019-08-01T07:41:15 | 200,662,475 | 0 | 0 | MIT | 2019-08-05T13:38:18 | 2019-08-05T13:38:17 | null | UTF-8 | Python | false | false | 3,481 | py | from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlows.SWFlowProblem as SWFlowProblem
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",3.0,"Final time for simulation"),
("dt_output",1.0,"Time interval to output solution"),
("refinement",2,"Level of refinement"),
("cfl",0.33,"Desired CFL restriction"),
("reflecting_BCs",True,"Use reflecting BCs")
])
###################
# DOMAIN AND MESH #
###################
L=(75.0,30.0)
refinement = opts.refinement
domain = RectangularDomain(L=L)
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),2)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
######################
##### BATHYMETRY #####
######################
h0=10
a=3000
B=5
k=0.002
g = SWFlowProblem.default_physical_parameters['gravity']
p = old_div(np.sqrt(8*g*h0),a)
s = old_div(np.sqrt(p**2 - k**2),2.)
mannings = k
def bathymetry_function(X):
x = X[0]
y = X[1]
bump1 = 1-1./8*np.sqrt((x-30)**2+(y-6)**2)
bump2 = 1-1./8*np.sqrt((x-30)**2+(y-24)**2)
bump3 = 3-3./10*np.sqrt((x-47.5)**2+(y-15)**2)
return np.maximum(np.maximum(np.maximum(0.,bump1),bump2),bump3)
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
x = X[0]
if (x <= 16):
eta=1.875
else:
eta=0.
z = bathymetry_function(X)
return max(eta - z,0.)
class Zero(object):
def uOfXT(self,x,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': Zero(),
'y_mom': Zero()}
boundaryConditions = {'water_height': lambda x,flag: None,
'x_mom': lambda x,flag: None,
'y_mom': lambda x,flag: None}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
reflectingBCs=opts.reflecting_BCs,
bathymetry=bathymetry_function)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=0
mySWFlowProblem.physical_parameters['mannings']=0.02
| [
"cekees@gmail.com"
] | cekees@gmail.com |
05c338ef5fbde0852cfd563177a583f42c08bcd4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/308/85637/submittedfiles/testes.py | b124f9a58f1dbe095c14a4e1a353c59ee692bba3 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n = int(input("Digite a quantidade de vezes: "))
for i in range (0, n+1, 1):
print('Olá mundo')
"""
visual = [[' ',' ', ' '], [' ', ' ',' '], [' ', ' ', ' ']]
for i in range(0, 10, 1):
a = str(input('Selecione a posição: '))
if i%2==0:
visual[int(a[0])][int(a[2])]='X'
else:
visual[int(a[0])][int(a[2])]='O'
for i in range (0, 3, 1):
print(str(visual[i][0]) + ' | '+ str(visual[i][1]) + ' | '+ str(visual[i][2]))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cc2cdf4e5ff6349c49bcbc52e970a773bbc84e63 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/shamil_v3/fuel_management/wizard/fuel_slice_report.py | 65ec4386e868bb9ae0da2e2aba4d26c188ce0b93 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,407 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2016-2017 NCTR (<http://www.nctr.sd>).
#
##############################################################################
from osv import fields, osv
import time
from datetime import datetime,date,timedelta
from tools.translate import _
class vehicle_report_wiz(osv.osv_memory):
""" To manage enrich report wizard """
_name = "fuel.slice.report.wiz"
_description = "Fuel Slice Report Wizard"
def _selection_year(self, cr, uid, context=None):
"""
Select car manufacturing year between 1970 and Current year.
@return: list of years
"""
return [(str(years), str(years)) for years in range(int(datetime.now().year) + 1, 1970, -1)]
_columns = {
'date_from': fields.date('Date From'),
'date_to': fields.date('Date To'),
'process_type': fields.selection([('modify','Modify'),('insert','Insert')],'Process Type'),
'department_id': fields.many2one('hr.department',string='Department'),
'category_id': fields.many2one('vehicle.category',string='Vehicle Category'),
'year': fields.selection(_selection_year, 'Model'),
'included_department': fields.boolean('Includes sub-departments'),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, context=c).company_id.id,
'included_department': False,
}
def check_date(self, cr, uid, ids, context=None):
"""
Constrain method to check if there is a place with the same name
@return: boolean True or False
"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.date_from > rec.date_to:
raise osv.except_osv(_('ERROR'), _('The Start Date Must Be Before or Equal To the End Date'))
return True
_constraints = [
(check_date, '', []),
]
def print_report(self, cr, uid, ids, context=None):
"""
To print the report.
@return: print the report
"""
datas = {}
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'vehicle.fuel.slice',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'fuel_slice_report',
'datas':datas,
}
#if data['total_report'] == True:
'''if data['report_type'] in ['total_report']:
return {
'type': 'ir.actions.report.xml',
'report_name': 'total_vehicle_report',
'datas':datas,
}
elif data['report_type'] in ['total_number_report']:
return {
'type': 'ir.actions.report.xml',
'report_name': 'total_vehicle_number_report',
'datas':datas,
}
else:
return {
'type': 'ir.actions.report.xml',
'report_name': 'vehicle_report',
'datas':datas,
}''' | [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
86ca81dd397b3c5d0ab8e21eff47015b9d41eb08 | 629090051b975b5814b4b48e2cb2c784fa6705e4 | /ossdbtoolsservice/disaster_recovery/contracts/backup.py | 6ca7b7260d3d8d9ca0367e9874f41da05f6cd99f | [
"MIT"
] | permissive | microsoft/pgtoolsservice | 3d3597821c7cae1d216436d4f8143929e2c8a82a | 24a048226f7f30c775bbcbab462d499a465be5da | refs/heads/master | 2023-08-28T12:55:47.817628 | 2023-08-25T22:47:53 | 2023-08-25T22:47:53 | 80,681,087 | 68 | 35 | NOASSERTION | 2023-09-13T21:46:55 | 2017-02-02T01:00:33 | Python | UTF-8 | Python | false | false | 14,673 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Module containing contracts for backup operations"""
import enum
from typing import List # noqa
from ossdbtoolsservice.capabilities.contracts import CategoryValue, FeatureMetadataProvider, ServiceOption
from ossdbtoolsservice.hosting import IncomingMessageConfiguration
from ossdbtoolsservice.serialization import Serializable
class BackupParams(Serializable):
"""Parameters for a backup request"""
@classmethod
def get_child_serializable_types(cls):
return {'backup_info': BackupInfo}
def __init__(self):
self.owner_uri: str = None
self.backup_info: BackupInfo = None
self.task_execution_mode = None
class BackupInfo(Serializable):
"""Options for a requested backup"""
@classmethod
def get_child_serializable_types(cls):
return {'type': BackupType}
@classmethod
def ignore_extra_attributes(cls):
return True
def __init__(self):
self.type: BackupType = None
self.path: str = None
self.jobs: int = None
self.compress: int = None
self.data_only: bool = None
self.blobs: bool = None
self.clean: bool = None
self.create: bool = None
self.encoding: str = None
self.schema: str = None
self.exclude_schema: str = None
self.oids: bool = None
self.no_owner: bool = None
self.schema_only: bool = None
self.superuser: str = None
self.table: str = None
self.exclude_table: str = None
self.no_privileges: bool = None
self.column_inserts: bool = None
self.disable_dollar_quoting: bool = None
self.disable_triggers: bool = None
self.enable_row_security: bool = None
self.exclude_table_data: str = None
self.if_exists: bool = None
self.inserts: bool = None
self.no_security_labels: bool = None
self.no_synchronized_snapshots: bool = None
self.no_tablespaces: bool = None
self.no_unlogged_table_data: bool = None
self.quote_all_identifiers: bool = None
self.section: str = None
self.serializable_deferrable: bool = None
self.snapshot: str = None
self.strict_names: bool = None
self.use_set_session_authorization: bool = None
class BackupType(enum.Enum):
"""Enum for the type of backups that are supported"""
PG_DUMP = 'dump'
DIRECTORY = 'directory'
TAR = 'tar'
PLAIN_TEXT = 'sql'
BACKUP_REQUEST = IncomingMessageConfiguration('backup/backup', BackupParams)
# These options are handled in the disaster recovery service's _perform_backup method. A few have special case handling, but most are handled automatically by
# using the option's name as the flag name, and the setting as the value. The BackupInfo contract above has a field corresponding to each option.
# TODO: Localize the display names and descriptions
BACKUP_OPTIONS = FeatureMetadataProvider(
True,
'backup',
[
ServiceOption(
name='type',
display_name='Backup type',
description='The type of backup to perform',
value_type=ServiceOption.VALUE_TYPE_CATEGORY,
is_required=True,
category_values=[
CategoryValue(
display_name='pg_dump/pg_restore (.dump)',
name='dump'
),
CategoryValue(
display_name='Directory',
name='directory'
),
CategoryValue(
display_name='Archive (.tar)',
name='tar'
),
CategoryValue(
display_name='Plain text (.sql)',
name='sql'
),
],
default_value='sql'
),
ServiceOption(
name='path',
display_name='Output path',
description='The path to the backup file/directory that will be produced',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=True
),
ServiceOption(
name='jobs',
display_name='Number of jobs',
description='The number of parallel jobs to use for the dump',
value_type=ServiceOption.VALUE_TYPE_NUMBER,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='compress',
display_name='Compression level',
description='The compression level (for compressed formats)',
value_type=ServiceOption.VALUE_TYPE_CATEGORY,
is_required=False,
group_name='Advanced',
category_values=[CategoryValue('0', '0'), CategoryValue('1', '1'), CategoryValue('2', '2'), CategoryValue('3', '3'), CategoryValue('4', '4'),
CategoryValue('5', '5'), CategoryValue('6', '6'), CategoryValue('7', '7'), CategoryValue('8', '8'), CategoryValue('9', '9')]
),
ServiceOption(
name='dataOnly',
display_name='Data only',
description='Dump only the data, not the schema',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='blobs',
display_name='Blobs',
description='Include large objects in dump',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='clean',
display_name='Clean',
description='Clean (drop) database objects before recreating',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='create',
display_name='Create',
description='Include commands to create database in dump',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='encoding',
display_name='Encoding',
description='Dump the data in the given encoding',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='schema',
display_name='Schema',
description='Dump the named schema(s) only',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='excludeSchema',
display_name='Exclude schema',
description='Do not dump the named schema(s)',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='oids',
display_name='OIDs',
description='Include OIDs in the dump',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noOwner',
display_name='No owner',
description='Skip restoration of object ownership in plain-text format',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='schemaOnly',
display_name='Schema only',
description='Dump only the schema, no data',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='superuser',
display_name='Superuser',
description='Superuser user name to use in plain-text format',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='table',
display_name='Table',
description='Dump the named table(s) only',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='excludeTable',
display_name='Exclude table',
description='Do not dump the named table(s)',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noPrivileges',
display_name='No privileges',
description='Do not dump privileges (grant/revoke)',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='columnInserts',
display_name='Column inserts',
description='Dump data as INSERT commands with column names',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='disableDollarQuoting',
display_name='Disable dollar quoting',
description='Disable dollar quoting; use SQL standard quoting',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='disableTriggers',
display_name='Disable triggers',
description='Disable triggers during data-only restore',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='enable_row_security',
display_name='Enable row security',
description='Dump only content user has access to',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='excludeDataTable',
display_name='Exclude data table',
description='Do not dump data for the named table(s)',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='ifExists',
display_name='Use IF EXISTS',
description='Use IF EXISTS when dropping objects',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='inserts',
display_name='Inserts',
description='Dump data as INSERT commands, rather than COPY',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noSecurityLabels',
display_name='No security labels',
description='Do not dump security label assignments',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noSynchronizedSnapshots',
display_name='No synchronized snapshots',
description='Do not use synchronized snapshots in parallel jobs',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noTablespaces',
display_name='No tablespaces',
description='Do not dump tablespace assignments',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='noUnloggedTableData',
display_name='No unlogged table data',
description='Do not dump unlogged table data',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='quoteAllIidentifiers',
display_name='Quote all identifiers',
description='Quote all identifiers, even if not key words',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='section',
display_name='Section',
description='Dump named section (pre-data, data, or post-data)',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='serializableDeferrable',
display_name='Serializable deferrable',
description='Wait until the dump can run without anomalies',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='snapshot',
display_name='Snapshot',
description='Use given snapshot for the dump',
value_type=ServiceOption.VALUE_TYPE_STRING,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='strictNames',
display_name='Strict names',
description='Require table and/or schema include patterns to match at least one entity each',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
),
ServiceOption(
name='useSetSessionAuthorization',
display_name='Use SET SESSION AUTHORIZATION',
description='Use SET SESSION AUTHORIZATION commands instead of ALTER OWNER commands to set ownership',
value_type=ServiceOption.VALUE_TYPE_BOOLEAN,
is_required=False,
group_name='Advanced'
)])
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
94b322e6f3fc89092bd1e3c38f205837a8b9d53b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_sundowns.py | e5bbcdb49d823bb48b71f7c94f658c3e6ef988cd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _SUNDOWNS():
def __init__(self,):
self.name = "SUNDOWNS"
self.definitions = sundown
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sundown']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
3899deffeb67cb5493f8f39cb8bfcdca7f348d1b | 1ed4e96c20da03fbd3aa4f18d4b004a59d8f89e5 | /Repo/venv/Lib/site-packages/torch/optim/lbfgs.py | 171d214d6abc7cc4e02bd5840c49216bdc2d65d6 | [] | no_license | donhatkha/CS2225.CH1501 | eebc854864dc6fe72a3650f640787de11d4e82b7 | 19d4dd3b11f8c9560d0d0a93882298637cacdc80 | refs/heads/master | 2023-07-19T13:27:17.862158 | 2021-02-08T07:19:05 | 2021-02-08T07:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,724 | py | import torch
from functools import reduce
from .optimizer import Optimizer
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua
# Compute bounds of interpolation area
if bounds is not None:
xmin_bound, xmax_bound = bounds
else:
xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
# Code for most common case: cubic interpolation of 2 points
# w/ function and derivative values for both
# Solution in this case (where x2 is the farthest point):
# d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
# d2 = sqrt(d1^2 - g1*g2);
# min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
# t_new = min(max(min_pos,xmin_bound),xmax_bound);
d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
d2_square = d1**2 - g1 * g2
if d2_square >= 0:
d2 = d2_square.sqrt()
if x1 <= x2:
min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
else:
min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
return min(max(min_pos, xmin_bound), xmax_bound)
else:
return (xmin_bound + xmax_bound) / 2.
def _strong_wolfe(obj_func,
x,
t,
d,
f,
g,
gtd,
c1=1e-4,
c2=0.9,
tolerance_change=1e-9,
max_ls=25):
# ported from https://github.com/torch/optim/blob/master/lswolfe.lua
d_norm = d.abs().max()
g = g.clone(memory_format=torch.contiguous_format)
# evaluate objective and gradient using initial step
f_new, g_new = obj_func(x, t, d)
ls_func_evals = 1
gtd_new = g_new.dot(d)
# bracket an interval containing a point satisfying the Wolfe criteria
t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
done = False
ls_iter = 0
while ls_iter < max_ls:
# check conditions
if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
bracket = [t_prev, t]
bracket_f = [f_prev, f_new]
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
bracket_gtd = [gtd_prev, gtd_new]
break
if abs(gtd_new) <= -c2 * gtd:
bracket = [t]
bracket_f = [f_new]
bracket_g = [g_new]
done = True
break
if gtd_new >= 0:
bracket = [t_prev, t]
bracket_f = [f_prev, f_new]
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
bracket_gtd = [gtd_prev, gtd_new]
break
# interpolate
min_step = t + 0.01 * (t - t_prev)
max_step = t * 10
tmp = t
t = _cubic_interpolate(
t_prev,
f_prev,
gtd_prev,
t,
f_new,
gtd_new,
bounds=(min_step, max_step))
# next step
t_prev = tmp
f_prev = f_new
g_prev = g_new.clone(memory_format=torch.contiguous_format)
gtd_prev = gtd_new
f_new, g_new = obj_func(x, t, d)
ls_func_evals += 1
gtd_new = g_new.dot(d)
ls_iter += 1
# reached max number of iterations?
if ls_iter == max_ls:
bracket = [0, t]
bracket_f = [f, f_new]
bracket_g = [g, g_new]
# zoom phase: we now have a point satisfying the criteria, or
# a bracket around it. We refine the bracket until we find the
# exact point satisfying the criteria
insuf_progress = False
# find high and low points in bracket
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0)
while not done and ls_iter < max_ls:
# line-search bracket is so small
if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change:
break
# compute new trial value
t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0],
bracket[1], bracket_f[1], bracket_gtd[1])
# test that we are making sufficient progress:
# in case `t` is so close to boundary, we mark that we are making
# insufficient progress, and if
# + we have made insufficient progress in the last step, or
# + `t` is at one of the boundary,
# we will move `t` to a position which is `0.1 * len(bracket)`
# away from the nearest boundary point.
eps = 0.1 * (max(bracket) - min(bracket))
if min(max(bracket) - t, t - min(bracket)) < eps:
# interpolation close to boundary
if insuf_progress or t >= max(bracket) or t <= min(bracket):
# evaluate at 0.1 away from boundary
if abs(t - max(bracket)) < abs(t - min(bracket)):
t = max(bracket) - eps
else:
t = min(bracket) + eps
insuf_progress = False
else:
insuf_progress = True
else:
insuf_progress = False
# Evaluate new point
f_new, g_new = obj_func(x, t, d)
ls_func_evals += 1
gtd_new = g_new.dot(d)
ls_iter += 1
if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
# Armijo condition not satisfied or not lower than lowest point
bracket[high_pos] = t
bracket_f[high_pos] = f_new
bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format)
bracket_gtd[high_pos] = gtd_new
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
else:
if abs(gtd_new) <= -c2 * gtd:
# Wolfe conditions satisfied
done = True
elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
# old high becomes new low
bracket[high_pos] = bracket[low_pos]
bracket_f[high_pos] = bracket_f[low_pos]
bracket_g[high_pos] = bracket_g[low_pos]
bracket_gtd[high_pos] = bracket_gtd[low_pos]
# new point becomes new low
bracket[low_pos] = t
bracket_f[low_pos] = f_new
bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format)
bracket_gtd[low_pos] = gtd_new
# return stuff
t = bracket[low_pos]
f_new = bracket_f[low_pos]
g_new = bracket_g[low_pos]
return f_new, g_new, t, ls_func_evals
class LBFGS(Optimizer):
"""Implements L-BFGS algorithm, heavily inspired by `minFunc
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`.
.. warning::
This optimizer doesn't support per-parameter options and parameter
groups (there can be only one).
.. warning::
Right now all parameters have to be on a single device. This will be
improved in the future.
.. note::
This is a very memory intensive optimizer (it requires additional
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
try reducing the history size, or use a different algorithm.
Arguments:
lr (float): learning rate (default: 1)
max_iter (int): maximal number of iterations per optimization step
(default: 20)
max_eval (int): maximal number of function evaluations per optimization
step (default: max_iter * 1.25).
tolerance_grad (float): termination tolerance on first order optimality
(default: 1e-5).
tolerance_change (float): termination tolerance on function
value/parameter changes (default: 1e-9).
history_size (int): update history size (default: 100).
line_search_fn (str): either 'strong_wolfe' or None (default: None).
"""
def __init__(self,
params,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-7,
tolerance_change=1e-9,
history_size=100,
line_search_fn=None):
if max_eval is None:
max_eval = max_iter * 5 // 4
defaults = dict(
lr=lr,
max_iter=max_iter,
max_eval=max_eval,
tolerance_grad=tolerance_grad,
tolerance_change=tolerance_change,
history_size=history_size,
line_search_fn=line_search_fn)
super(LBFGS, self).__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError("LBFGS doesn't support per-parameter options "
"(parameter groups)")
self._params = self.param_groups[0]['params']
self._numel_cache = None
def _numel(self):
if self._numel_cache is None:
self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
return self._numel_cache
def _gather_flat_grad(self):
views = []
for p in self._params:
if p.grad is None:
view = p.new(p.numel()).zero_()
elif p.grad.is_sparse:
view = p.grad.to_dense().view(-1)
else:
view = p.grad.view(-1)
views.append(view)
return torch.cat(views, 0)
def _add_grad(self, step_size, update):
offset = 0
for p in self._params:
numel = p.numel()
# view as to avoid deprecated pointwise semantics
p.add_(update[offset:offset + numel].view_as(p), alpha=step_size)
offset += numel
assert offset == self._numel()
def _clone_param(self):
return [p.clone(memory_format=torch.contiguous_format) for p in self._params]
def _set_param(self, params_data):
for p, pdata in zip(self._params, params_data):
p.copy_(pdata)
def _directional_evaluate(self, closure, x, t, d):
self._add_grad(t, d)
loss = float(closure())
flat_grad = self._gather_flat_grad()
self._set_param(x)
return loss, flat_grad
@torch.no_grad()
def step(self, closure):
"""Performs a single optimization step.
Arguments:
closure (callable): A closure that reevaluates the model
and returns the loss.
"""
assert len(self.param_groups) == 1
# Make sure the closure is always called with grad enabled
closure = torch.enable_grad()(closure)
group = self.param_groups[0]
lr = group['lr']
max_iter = group['max_iter']
max_eval = group['max_eval']
tolerance_grad = group['tolerance_grad']
tolerance_change = group['tolerance_change']
line_search_fn = group['line_search_fn']
history_size = group['history_size']
# NOTE: LBFGS has only global state, but we register it as state for
# the first param, because this helps with casting in load_state_dict
state = self.state[self._params[0]]
state.setdefault('func_evals', 0)
state.setdefault('n_iter', 0)
# evaluate initial f(x) and df/dx
orig_loss = closure()
loss = float(orig_loss)
current_evals = 1
state['func_evals'] += 1
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
# optimal condition
if opt_cond:
return orig_loss
# tensors cached in state (for tracing)
d = state.get('d')
t = state.get('t')
old_dirs = state.get('old_dirs')
old_stps = state.get('old_stps')
ro = state.get('ro')
H_diag = state.get('H_diag')
prev_flat_grad = state.get('prev_flat_grad')
prev_loss = state.get('prev_loss')
n_iter = 0
# optimize for a max of max_iter iterations
while n_iter < max_iter:
# keep track of nb of iterations
n_iter += 1
state['n_iter'] += 1
############################################################
# compute gradient descent direction
############################################################
if state['n_iter'] == 1:
d = flat_grad.neg()
old_dirs = []
old_stps = []
ro = []
H_diag = 1
else:
# do lbfgs update (update memory)
y = flat_grad.sub(prev_flat_grad)
s = d.mul(t)
ys = y.dot(s) # y*s
if ys > 1e-10:
# updating memory
if len(old_dirs) == history_size:
# shift history by one (limited-memory)
old_dirs.pop(0)
old_stps.pop(0)
ro.pop(0)
# store new direction/step
old_dirs.append(y)
old_stps.append(s)
ro.append(1. / ys)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
# compute the approximate (L-BFGS) inverse Hessian
# multiplied by the gradient
num_old = len(old_dirs)
if 'al' not in state:
state['al'] = [None] * history_size
al = state['al']
# iteration in L-BFGS loop collapsed to use just one buffer
q = flat_grad.neg()
for i in range(num_old - 1, -1, -1):
al[i] = old_stps[i].dot(q) * ro[i]
q.add_(old_dirs[i], alpha=-al[i])
# multiply by initial Hessian
# r/d is the final direction
d = r = torch.mul(q, H_diag)
for i in range(num_old):
be_i = old_dirs[i].dot(r) * ro[i]
r.add_(old_stps[i], alpha=al[i] - be_i)
if prev_flat_grad is None:
prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)
else:
prev_flat_grad.copy_(flat_grad)
prev_loss = loss
############################################################
# compute step length
############################################################
# reset initial guess for step size
if state['n_iter'] == 1:
t = min(1., 1. / flat_grad.abs().sum()) * lr
else:
t = lr
# directional derivative
gtd = flat_grad.dot(d) # g * d
# directional derivative is below tolerance
if gtd > -tolerance_change:
break
# optional line search: user function
ls_func_evals = 0
if line_search_fn is not None:
# perform line search, using user function
if line_search_fn != "strong_wolfe":
raise RuntimeError("only 'strong_wolfe' is supported")
else:
x_init = self._clone_param()
def obj_func(x, t, d):
return self._directional_evaluate(closure, x, t, d)
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
obj_func, x_init, t, d, loss, flat_grad, gtd)
self._add_grad(t, d)
opt_cond = flat_grad.abs().max() <= tolerance_grad
else:
# no line search, simply move with fixed-step
self._add_grad(t, d)
if n_iter != max_iter:
# re-evaluate function only if not in last iteration
# the reason we do this: in a stochastic setting,
# no use to re-evaluate that function here
with torch.enable_grad():
loss = float(closure())
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
ls_func_evals = 1
# update func eval
current_evals += ls_func_evals
state['func_evals'] += ls_func_evals
############################################################
# check conditions
############################################################
if n_iter == max_iter:
break
if current_evals >= max_eval:
break
# optimal condition
if opt_cond:
break
# lack of progress
if d.mul(t).abs().max() <= tolerance_change:
break
if abs(loss - prev_loss) < tolerance_change:
break
state['d'] = d
state['t'] = t
state['old_dirs'] = old_dirs
state['old_stps'] = old_stps
state['ro'] = ro
state['H_diag'] = H_diag
state['prev_flat_grad'] = prev_flat_grad
state['prev_loss'] = prev_loss
return orig_loss
| [
"59596379+khado2359@users.noreply.github.com"
] | 59596379+khado2359@users.noreply.github.com |
a9cb150fe24e6d478a7062b79a9926b1c2c792b8 | bd6fd6bb82bf3179a4571c7a2ca3a030f5684c5c | /mundo3-EstruturasCompostas/081-ExtraindoDadosDeUmaLista.py | bad61e35e4d14143637f0d9408833e3179835979 | [
"MIT"
] | permissive | jonasht/CursoEmVideo-CursoDePython3 | b3e70cea1df9f33f409c4c680761abe5e7b9e739 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | refs/heads/master | 2023-08-27T12:12:38.103023 | 2021-10-29T19:05:01 | 2021-10-29T19:05:01 | 276,724,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | lista = []
print('type s to exit\ndigite s para sair')
while 1:
num = input('N: ')
if num == 's': break
else: lista.append(int(num))
lista.reverse()
print('lista reversa', lista)
print('foram digitados', len(lista), ' numeros')
print('numero 5 foi digitado' if 5 in lista else 'sem 5')
#Exercício Python 081:
# Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, mostre:
#A) Quantos números foram digitados.
#B) A lista de valores, ordenada de forma decrescente.
#C) Se o valor 5 foi digitado e está ou não na lista. | [
"jhenriquet@outlook.com.br"
] | jhenriquet@outlook.com.br |
1e3ed3f3fd8d5b2c94d100304c32df7f9ac83452 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/72/usersdata/212/39304/submittedfiles/tomadas.py | ef757877e9f3f17521a58f298b71fb444b7e3dab | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CODIGO AQUI
t1=int(input('digite o número de tomadas da régua do integrante 1:'))
t2=int(input('digite o número de tomadas da régua do integrante 2:'))
t3=int(input('digite o número de tomadas da régua do integrante 3:'))
t4=int(input('digite o número de tomadas da régua do integrante 4:'))
nt=(t1-1)+(t2-1)+(t3-1)+t4
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a3fa214725782f13d23188575d3fb3f8a5983142 | c554fc8b12fb893b6782d64e7d7f856742297ca8 | /scripts/draw_tmaps.py | 37b2d17f7d2ad203cf6004b1db7669e031825cf5 | [
"MIT"
] | permissive | AdamWu1979/dwilib | 81d3f746d6575cb77831e36f15404447d536f5f9 | 6655eea21037977ed528b992b3a8471393127b77 | refs/heads/master | 2022-02-15T12:12:04.734050 | 2019-08-28T10:39:18 | 2019-08-28T10:39:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,308 | py | #!/usr/bin/python3
"""Draw some texture maps with focus on lesions."""
# This is for the MedPhys texturepaper.
import argparse
import logging
import numpy as np
import dwi.files
import dwi.mask
import dwi.patient
import dwi.paths
import dwi.plot
from dwi.types import ImageMode, Path, TextureSpec
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('-v', '--verbose', action='count',
help='increase verbosity')
p.add_argument('-f', '--featlist', default='feats.txt')
p.add_argument('-s', '--samplelist', default='all')
p.add_argument('-o', '--outdir', default='figs')
return p.parse_args()
def show_image(plt, image, colorbar=True, scale=None, **kwargs):
"""Show image."""
d = {}
if scale is not None:
d['vmin'], d['vmax'] = scale
d.update(kwargs)
im = plt.imshow(image, **d)
if colorbar:
dwi.plot.add_colorbar(im, pad_fraction=0, format='')
def show_outline(plt, masks, cmaps=None):
"""Show outline."""
if cmaps is None:
# cmaps = ('coolwarm', 'viridis', 'hot')
# cmaps = ['spring'] * 3
# cmaps = ['rainbow'] * 3
# cmaps = 'spring', 'summer', 'autumn', 'winter'
cmaps = ['Wistia', 'cool_r', 'spring']
assert len(masks) <= len(cmaps)
for mask, cmap in zip(masks, cmaps):
view = np.full_like(mask, np.nan, dtype=np.float)
view = dwi.mask.border(mask, out=view)
d = dict(cmap=cmap, interpolation='nearest', vmin=0, vmax=1, alpha=1.0)
plt.imshow(view, **d)
def get_lesion_mask(masks, slice_index=None):
"""Get unified single-slice lesion mask and index to most relevan slice."""
def max_slices(mask):
"""Return indices of maximum slices."""
counts = [np.count_nonzero(x) for x in mask]
maxcount = max(counts)
return [i for i, c in enumerate(counts) if c == maxcount]
# Use slice with maximum lesion volume.
mask = dwi.util.unify_masks(masks)
centroids = [int(round(np.mean(max_slices(x)))) for x in masks]
centroid = int(round(np.mean(max_slices(mask))))
# centroids = [int(round(dwi.util.centroid(x)[0])) for x in masks]
# centroid = int(round(dwi.util.centroid(mask)[0]))
logging.debug('Lesion centroids (total): %s (%s)', centroids, centroid)
logging.info('Mask shape: %s, centroid: %i, slice: %s', mask.shape,
centroid, slice_index)
if slice_index is None:
slice_index = centroid
mask = mask[slice_index]
return mask, slice_index
def read_lmask(mode, case, scan):
mode = ImageMode(mode)
paths = []
try:
for i in range(1, 4):
path = Path(dwi.paths.mask_path(mode, 'lesion', case, scan,
lesion=i))
if path.exists():
paths.append(path)
except IOError:
pass
masks = [dwi.files.read_mask(x) for x in paths]
# # Manually override slice index.
# slice_indices = {
# (64, '1a', 'T2w-std'): 7,
# (64, '1a', 'T2-fitted'): 5,
# }
# slice_index = slice_indices.get((case, scan, str(mode)))
slice_index = None
lmask, img_slice = get_lesion_mask(masks, slice_index=slice_index)
return lmask, img_slice, [x[img_slice] for x in masks]
def read_pmap(mode, case, scan, img_slice):
mode = ImageMode(mode)
path = dwi.paths.pmap_path(mode, case, scan)
pmap, _ = dwi.files.read_pmap(path, ondisk=True, params=[0])
pmap = pmap[img_slice, :, :, 0]
pmap = dwi.util.normalize(pmap, mode)
return pmap
def read_tmap(mode, case, scan, img_slice, texture_spec):
mode = ImageMode(mode)
path = dwi.paths.texture_path(mode, case, scan, None, 'prostate', 'all', 0,
texture_spec, voxel='all')
# TODO: Kludge to remove `_mbb` from `glcm_mbb`. Filenames don't have it.
t = texture_spec._replace(method=texture_spec.method.split('_')[0])
param = '{t.winsize}-{t.method}({t.feature})'.format(t=t)
tmap, attrs = dwi.files.read_pmap(path, ondisk=True, params=[param])
tscale = tuple(np.nanpercentile(tmap[:, :, :, 0], (1, 99)))
tmap = tmap[img_slice, :, :, 0]
assert param == attrs['parameters'][0]
return tmap, param, tscale
def read_histology(case):
"""Read histology section image."""
from glob import glob
import PIL
pattern = '/mri/hist/pink_images/extracted/{}-*'.format(case)
paths = glob(pattern)
if not paths:
raise IOError('Histology image not found: {}'.format(pattern))
images = [np.array(PIL.Image.open(x)) for x in sorted(paths)]
# If several, concatenate by common width.
min_width = min(x.shape[1] for x in images)
images = [x[:, 0:min_width, :] for x in images]
image = np.concatenate(images)
return image
# def rescale(image, factor, order=0):
# """Rescale."""
# from scipy.ndimage import interpolation
# return interpolation.zoom(image, factor, order=order)
# def rescale_as_float(image, factor):
# """Convert to float, rescale, convert back. Special boolean handling."""
# from scipy.ndimage import interpolation
# typ = image.dtype
# image = image.astype(np.float)
# image = interpolation.zoom(image, factor)
# if typ == np.bool:
# image = dwi.util.asbool(image)
# else:
# image = image.astype(typ)
# return image
rescale = dwi.util.zoom
rescale_as_float = dwi.util.zoom_as_float
def read(mode, case, scan, texture_spec):
"""Read files."""
try:
histology = read_histology(case)
except IOError:
# histology = np.eye(5)
raise
lmask, img_slice, lmasks = read_lmask(mode, case, scan)
pmap = read_pmap(mode, case, scan, img_slice)
tmap, param, tscale = read_tmap(mode, case, scan, img_slice, texture_spec)
bb = dwi.util.bbox(np.isfinite(tmap), 10)
pmap = pmap[bb].copy()
tmap = tmap[bb].copy()
lmask = lmask[bb].copy()
lmasks = [x[bb].copy() for x in lmasks]
# if mode.startswith('DWI'):
# pmap = rescale(pmap, 2)
# tmap = rescale(tmap, 2)
# lmask = rescale_as_float(lmask, 2)
# lmasks = [rescale_as_float(x, 2) for x in lmasks]
# Remove lesion voxels outside prostate.
lmask[np.isnan(tmap)] = False
for mask in lmasks:
lmask[np.isnan(tmap)] = False
pmap_prostate = np.where(np.isfinite(tmap), pmap, np.nan)
tmap_lesion = np.where(lmask, tmap, np.nan)
pmask = np.isfinite(tmap)
images = dict(pmap=pmap, tmap=tmap, lmask=lmask,
pmap_prostate=pmap_prostate, tmap_lesion=tmap_lesion,
pmask=pmask)
assert len({x.shape for x in images.values()} |
{x.shape for x in lmasks}) == 1
images['lmasks'] = lmasks
images['histology'] = histology
images['tscale'] = tscale
return images, param
def plot(images, title, path):
"""Plot."""
pscale = (0, 1)
# tscale = tuple(np.nanpercentile(images['tmap'], (1, 99)))
tscale = images['tscale']
def histology_image(plt):
plt.imshow(images['histology'])
# plt.title('histology section')
# def pmap(plt):
# show_image(plt, images['pmap'], scale=pscale, cmap='gray')
#
def prostate_pmap(plt):
# XXX: Scale in these funcs?
show_image(plt, images['pmap_prostate'], scale=pscale, cmap='gray')
show_outline(plt, images['lmasks'])
def prostate_texture(plt):
show_image(plt, images['tmap'], scale=tscale)
show_image(plt, images['tmap_lesion'])
show_outline(plt, images['lmasks'])
def lesion_texture(plt):
# show_image(plt, images['tmap_lesion'], scale=tscale)
show_image(plt, images['tmap_lesion'])
funcs = [histology_image, prostate_pmap, prostate_texture]
it = dwi.plot.generate_plots(ncols=len(funcs), suptitle=title, path=path)
for i, plt in enumerate(it):
plt.rcParams['savefig.dpi'] = '300'
dwi.plot.noticks(plt)
f = funcs[i]
# plt.title(f.__name__.replace('_', ' '))
plt.title('')
f(plt)
def cases_scans_lesions(mode, samplelist, thresholds=None):
"""Iterate (case_id, scan_id, lesions)."""
mode = ImageMode(mode)
path = dwi.paths.samplelist_path(mode, samplelist)
patients = dwi.files.read_patients_file(path)
dwi.patient.label_lesions(patients, thresholds=thresholds)
return ((p.num, s, p.lesions) for p in patients for s in p.scans)
def main():
"""Main."""
args = parse_args()
logging.basicConfig()
# logging.basicConfig(level=logging.INFO)
# thresholds = None
# thresholds = ('3+3', '3+4')
thresholds = ('3+3',)
blacklist = [] # + [21, 22, 27, 42, 74, 79]
# whitelist = [] # + [23, 24, 26, 29, 64]
whitelist = [26, 42, 64]
for i, line in enumerate(dwi.files.valid_lines(args.featlist)):
words = line.split()
mode = words[0]
texture_spec = TextureSpec(*words[1:])
it = cases_scans_lesions(mode, args.samplelist, thresholds=thresholds)
for c, s, l in it:
if blacklist and c in blacklist:
continue
if whitelist and c not in whitelist:
continue
# if 0 not in (x.label for x in l):
# continue # Exclude if there's no first score group present.
print(i, mode, texture_spec, c, s, l)
try:
images, _ = read(mode, c, s, texture_spec)
except IOError as e:
logging.error(e)
continue
labelnames = ['low', 'high']
lesions = ', '.join('{} {} {}'.format(x.score, x.location,
labelnames[x.label])
for x in l)
d = dict(m=mode, c=c, s=s, l=lesions, tw=texture_spec.winsize,
tm=texture_spec.method, tf=texture_spec.feature,
suffix='png')
title = '{c}-{s} ({l})\n{m} {tm}({tf})-{tw}'.format(**d)
path = '{c:03}-{s}_{m}_{tm}({tf})-{tw}.{suffix}'.format(**d)
plot(images, title, Path(args.outdir, path))
if __name__ == '__main__':
main()
| [
"jupito@iki.fi"
] | jupito@iki.fi |
1024a326cae1b15ef82188bdaf3d59809f4f0394 | 77353aa80cefff9856c423acdb1313f6f7239bc4 | /dictionary/dict_count_item.py | d5935917c6b69b866c300d20b63c95f6c0688023 | [] | no_license | upasek/python-learning | ed21bc555bd684fbb432d852a274dc5a8fff38de | 026c73fe8369254bffb3f78cfd80fb152648cffa | refs/heads/master | 2023-03-18T19:34:11.297607 | 2021-03-12T17:51:54 | 2021-03-12T17:51:54 | 284,996,974 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #Write a Python program to count number of items in a dictionary value that is a list.
dict = {'Alex': ['subj1', 'subj2', 'subj3'], 'David': ['subj1', 'subj2', 'subj3']}
print("Original dictionary :",dict)
count = 0
for values in dict.values():
count += len(values)
print("Number of items in a dictionary value that is a list is :",count)
| [
"kvsupase@gmail.com"
] | kvsupase@gmail.com |
e8e60e921e6a70c094582e28cb843115b58d78a3 | a7901e211b781e55eec8e2ecb1a7ad3b37b82fa8 | /datapackage_pipelines_budgetkey/pipelines/budgetkey/elasticsearch/activity_fetch_extra_data.py | e32cf6f4c3be7c9be95015ad6762010446924dc9 | [] | no_license | inbalme/budgetkey-data-pipelines | 1c3a23d666e4643e6f4e5399a76625ab6129b2e1 | 6b219f4286d29fcaa1ac539187606c77cb397344 | refs/heads/master | 2023-01-09T08:06:39.939480 | 2020-10-20T18:24:13 | 2020-10-20T18:24:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,193 | py | import os
from decimal import Decimal
import json
from sqlalchemy import create_engine
from sqlalchemy.sql import text
import dataflows as DF
engine = None
MAPPINGS = {
'activities/שירות חברתי/משרד החינוך/תכנית קדם עתידים': [
dict(code='0020460242', year=2019, part=100)
],
'activities/שירות חברתי/משרד הבריאות/מכשירי שיקום וניידות – אספקה, התאמה, תיקון וחלוקת מכשירי שיקום וניידות': [
dict(code='0024070311', year=2019, part=50),
dict(code='0024070511', year=2017, part=50),
],
'activities/שירות חברתי/משרד הבריאות/שיקום נכי נפש בקהילה- שירותי שיקום בדיור (הוסטלים)': [
dict(code='0024071460', year=2019, part=100)
]
}
def expand_mappings(mappings):
ret = []
for mapping in mappings:
TITLE_QUERY = text('SELECT title from raw_budget where code=:code and year=:year')
mapping['title'] = engine.execute(TITLE_QUERY, **mapping).fetchone().title
ITEMS_QUERY = text('SELECT year, code, title, net_allocated, net_revised, net_executed from raw_budget where code=:code and title=:title and net_revised > 0')
for r in engine.execute(ITEMS_QUERY, **mapping).fetchall():
ret.append(dict(
code=r.code,
year=r.year,
title=r.title,
net_allocated=r.net_allocated,
net_revised=r.net_revised,
net_executed=r.net_executed,
part=mapping['part']
))
return ret
def fetch_spending(budget_code):
SPENDING = text('''
SELECT volume, executed, currency,
min_year, max_year,
purpose,
'contract-spending/' || publisher_name || '/' || order_id || '/' || budget_code AS cs_item_id,
case when entity_name is null then supplier_name->>0 else entity_name end as supplier,
case when entity_id is null
then ('s?q=' || (supplier_name->>0))
else ('i/org/' || entity_kind || '/' || entity_id) end as entity_item_id,
purchase_method->>0 AS purchase_method,
((publisher->>0) || '/' || (purchasing_unit->>0)) AS purchasing_unit,
order_date, start_date, end_date,
tender_key
FROM contract_spending
WHERE budget_code=:code
ORDER BY volume desc nulls last
''')
return [dict(r) for r in engine.execute(SPENDING, code=budget_code).fetchall()]
def fetch_tenders(**kw):
TENDER = text('''
SELECT publication_id, tender_id, tender_type, tender_type_he,
start_date, claim_date, last_update_date, end_date,
contact, contact_email,
decision, description, reason, regulation, page_title, page_url,
publisher, publisher_id,
entity_id, entity_kind, entity_name, volume, contract_volume
FROM procurement_tenders_processed
WHERE publication_id=:publication_id AND tender_id=:tender_id AND tender_type=:tender_type
''')
return dict(engine.execute(TENDER, **kw).fetchone())
def format_date(x):
if x:
return x.strftime('%d/%m/%Y')
else:
return ''
def fetch_extra_data(row):
if row['doc_id'] in MAPPINGS:
mappings = MAPPINGS[row['doc_id']]
mappings = expand_mappings(mappings)
budget_composition = dict(
title='תקנות תקציביות',
long_title='מהן התקנות התקציביות מהן יוצא התקציב?',
type='template',
template_id='table',
chart=dict(
item=dict(
headers=['שנה', 'קוד', 'כותרת', 'אחוז תרומה לתקציב'],
data=[
[
r['year'],
'.'.join(r['code'][i:i+2] for i in range(2, 10, 2)),
'<a href="/i/budget/{code}/{year}">{title}</a>'.format(**r),
'{part}%'.format(**r),
]
for r in sorted(
mappings,
key=lambda m: '{year}/{code}'.format(**m)
)
]
)
)
)
# Budget
budget = dict()
for mapping in mappings:
year = mapping['year']
budget.setdefault(year, dict(year=year))
for f in ('net_allocated', 'net_revised', 'net_executed'):
if mapping[f] is not None:
budget[year].setdefault(f, 0)
budget[year][f] += int(mapping[f]) * mapping['part'] / 100
budget = sorted(budget.values(), key=lambda x: x['year'])
budget_history = dict(
title='התקציב המוקצה לשירות זה',
long_title='מה היה התקציב שהוקצה לשירות זה במהלך השנים?',
type='plotly',
chart=[
dict(
x=[i['year'] for i in budget],
y=[i.get(measure) for i in budget],
mode='lines+markers',
name=name
)
for measure, name in (
('net_allocated', 'תקציב מקורי'),
('net_revised', 'אחרי שינויים'),
('net_executed', 'ביצוע בפועל')
)
],
layout=dict(
xaxis=dict(
title='שנה',
type='category'
),
yaxis=dict(
title='תקציב ב-₪',
rangemode='tozero',
separatethousands=True
)
)
)
# Spending
budget_codes = list(set(r['code'] for r in mappings))
spending = []
for budget_code in budget_codes:
spending.extend(fetch_spending(budget_code))
top_contracts = dict(
title='התקשרויות',
long_title='אילו התקשרויות רכש משויכות לשירות זה?',
description='100 ההתקשרויות בעלות ההיקף הגדול ביותר מוצגות מתוך {}'.format(len(spending)) if len(spending) > 100 else None,
type='template',
template_id='table',
chart=dict(
item=dict(
headers=['יחידה רוכשת', 'ספק', 'כותרת', 'היקף', 'ביצוע', 'אופן רכישה', 'מועד הזמנה', 'מועד סיום'],
data=[
[
r['purchasing_unit'],
'<a href="/{entity_item_id}">{supplier}</a>'.format(**r),
r['purpose'],
'₪{volume:,.2f}'.format(**r),
'₪{executed:,.2f}'.format(**r),
r['purchase_method'],
format_date(r['order_date']),
format_date(r['end_date']),
]
for r in spending[:100]
]
)
)
)
per_tender_spending = dict()
for r in spending:
if r.get('tender_key'):
tks = r['tender_key']
tks = [tuple(json.loads(t)) for t in tks]
for tk in tks:
dd = per_tender_spending.setdefault(tk, dict(svc_executed=0, svc_volume=0))
dd['svc_executed'] += r['executed']
dd['svc_volume'] += r['volume']
# Suppliers
suppliers_grouped = dict()
for c in spending:
suppliers_grouped.setdefault(c['entity_item_id'], []).append(c)
supplier_table = []
for eid, contracts in suppliers_grouped.items():
supplier_table.append([
'<a href="/{eid}">{supplier}</a>'.format(eid=eid, supplier=max(x['supplier'] for x in contracts)),
'₪{:,.2f}'.format(sum(x['volume'] for x in contracts)),
'₪{:,.2f}'.format(sum(x['executed'] for x in contracts)),
'{}-{}'.format(
min(x['min_year'] for x in contracts if x['min_year']),
max(x['max_year'] for x in contracts if x['max_year']),
)
])
top_suppliers = dict(
title='ספקים',
long_title='מול אילו ספקים קיימות התקשרויות במסגרת שירות זה?',
type='template',
template_id='table',
chart=dict(
item=dict(
headers=[
'שם הספק',
'סך היקף ההתקשרויות',
'סך ביצוע ההתקשרויות',
'תקופת הפעילות',],
data=sorted(supplier_table, key=lambda x: float(x[1][1:].replace(',', '')), reverse=True)
)
)
)
# Tenders
tender_keys = []
for x in spending:
if x['tender_key']:
tk = [tuple(json.loads(t)) for t in x['tender_key']]
tender_keys.extend(tk)
tender_keys = list(set(tender_keys))
tenders = []
for tk in tender_keys:
tender = fetch_tenders(publication_id=tk[0], tender_type=tk[1], tender_id=tk[2])
tender.update(per_tender_spending[tk])
tenders.append(tender)
top_tenders = dict(
title='מכרזים',
long_title='אילו מכרזים משויכים לשירות זה?',
type='template',
template_id='table',
chart=dict(
item=dict(
headers=[
'מפרסם',
'סוג המכרז',
'סטטוס',
'כותרת',
'סך התקשרויות בשירות זה',
'פרסום במנו״ף',
'מועד תחילה',
'מועד סיום',
'לפי תקנה'],
data=[
[
r['publisher'],
r['tender_type_he'],
r['decision'],
'<a href="/i/tenders/{tender_type}/{publication_id}/{tender_id}">{description}</a>'.format(**r),
'₪{svc_volume:,.2f}'.format(**r),
'<a href="{page_url}">{publication_id}</a>'.format(**r),
format_date(r['start_date']),
format_date(r['end_date']),
r['regulation'],
]
for r in sorted(tenders, key=lambda r: r['svc_volume'] or 0, reverse=True)
]
)
)
)
row['charts'] = [
budget_history,
top_tenders,
top_suppliers,
top_contracts,
budget_composition,
]
def flow(*_):
global engine
engine = create_engine(os.environ['DPP_DB_ENGINE'])
return DF.Flow(
DF.add_field(
'charts', 'array', default=[], **{
'es:itemType': 'object',
'es:index': False
}
),
fetch_extra_data
)
if __name__ == '__main__':
os.environ['DPP_DB_ENGINE'] = 'postgresql://readonly:readonly@data-next.obudget.org/budgetkey'
DF.Flow(
[{'doc_id': doc_id} for doc_id in MAPPINGS.keys()],
flow(),
DF.printer()
).process() | [
"adam.kariv@gmail.com"
] | adam.kariv@gmail.com |
5e6c0294c8f9f716e5347736ce9e9ba02b6e07b6 | 09e7c3aab7cd34c6caf701ec7224581f68c246b0 | /zkmanager/filters.py | 2743c148ad9fe1cd62398f3656f2e839414f9f73 | [] | no_license | itimor/kaoqin | d383430b29b67152469cf652690aa1ad4fd3c4eb | 8113f393c5375295494890a5d17fea2d47b30599 | refs/heads/master | 2021-04-15T03:49:19.965242 | 2018-05-03T05:38:24 | 2018-05-03T05:38:24 | 126,454,042 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # -*- coding: utf-8 -*-
# author: itimor
from .models import Punch
from django_filters import rest_framework as filters
from django_filters import DateFromToRangeFilter
class PunchFilter(filters.FilterSet):
create_date = DateFromToRangeFilter()
class Meta:
model = Punch
fields = ['create_date', 'user_id__username']
| [
"kevin@126.com"
] | kevin@126.com |
a5e568a740bc7c1933dca314a4f0ac92f09cf855 | 68ee9027d4f780e1e5248a661ccf08427ff8d106 | /extra/unused/pxfuncs.py | 4a2657a3ec4be89c1f254a3781efc76635d6c2af | [
"MIT"
] | permissive | whyjz/CARST | 87fb9a6a62d39fd742bb140bddcb95a2c15a144c | 4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b | refs/heads/master | 2023-05-26T20:27:38.105623 | 2023-04-16T06:34:44 | 2023-04-16T06:34:44 | 58,771,687 | 17 | 4 | MIT | 2021-03-10T01:26:04 | 2016-05-13T20:54:42 | Python | UTF-8 | Python | false | false | 42,430 | py | #!/usr/bin/python
# Author: Andrew Kenneth Melkonian
# All rights reserved
#import calendar;
#import fileinput;
from makeAzo import *;
import math;
import numpy;
import os;
#from pxfuncs import *;
import pylab;
import re;
import scipy;
import shutil;
import subprocess;
#import sys;
#import time;
def adjustPhase(radar_path, wavelength, width):
radar_dir = ".";
index = radar_path.rfind("/");
if index > -1:
radar_dir = radar_path[ : index];
radar_name = radar_path[index + 1 : ];
new_radar_path = radar_dir + "/new_" + radar_name;
infile = open(radar_path, "rb");
radar_unw_data = scipy.matrix(numpy.fromfile(infile,numpy.float32, -1)).reshape(int(width), -1);
radar_unw_data = radar_unw_data * float(wavelength) / 4 / numpy.pi;
infile.close();
radar_unw_data = scipy.matrix(radar_unw_data,scipy.float32);
radar_unw_data.tofile(new_radar_path);
radar_unw_data = None;
return(new_radar_path);
def ampcor(path, rwin, awin, search_x, search_y, wsamp, numproc):
cwd = os.getcwd();
import glob;
cull_paths = glob.glob(path + "/int*/*_cull.off");
for i in range(0,len(cull_paths)):
cull_name = cull_paths[i].strip()[cull_paths[i].rfind("/")+1:];
cull_dir = cull_paths[i][:cull_paths[i].rfind("/")];
if not re.search("\d{6}",cull_name):
continue;
already_processed=False;
contents=os.listdir(cull_dir);
for item in contents:
if re.search("azo_" + wsamp + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y,item) > -1:
already_processed=True;
break;
if already_processed:
print("\n***** WARNING, " + cull_dir + " contains \"" + item +"\", \"ampcor\" step already run, exiting...\n");
continue;
index1 = re.search("\d{6}",cull_name).start(0);
index2 = re.search("\d{6}",cull_name).end(0);
index3 = re.search("\d{6}",cull_name[index2:]).start(0)+index2;
index4 = re.search("\d{6}",cull_name[index2:]).end(0)+index2;
date2 = cull_name[index1:index2];
date1 = cull_name[index3:index4];
slc1 = path + "/" + date1 + "/" + date1 + ".slc";
if not os.path.exists(slc1):
print("\n***** ERROR, could not find \"" + date1 + ".slc\" in \"" + path + "/" + date1 + "/\"\n");
break;
slc2 = path + "/" + date2 + "/" + date2 + ".slc";
if not os.path.exists(slc2):
print("\n***** ERROR, could not find \"" + date2 + ".slc\" in \"" + path + "/" + date2 + "/\"\n");
break;
slc1_rsc_file = open(slc1 + ".rsc","r");
while 1:
line = slc1_rsc_file.readline();
if not line:
break;
elif line.find("WIDTH") > -1:
width = line.split()[1].strip();
slc1_rsc_file.close();
amp1 = cull_dir + "/" + date1 + ".amp";
amp2 = cull_dir + "/" + date2 + ".amp";
if not os.path.exists(amp1):
cmd = "\ncpx2mag_phs " + slc1 + " " + cull_dir + "/" + date1 + ".amp " + cull_dir + "/" + date1 + ".phs " + width + "\n";
cmd += "\ncp -pr " + slc1 + ".rsc " + cull_dir + "/" + date1 + ".amp.rsc\n";
cmd += "\nrm " + cull_dir + "/" + date1 + ".phs\n";
subprocess.call(cmd,shell=True);
slc2_rsc_file = open(slc2 + ".rsc","r");
while 1:
line = slc2_rsc_file.readline();
if not line:
break;
elif line.find("WIDTH") > -1:
width = line.split()[1].strip();
slc2_rsc_file.close();
if not os.path.exists(amp2):
cmd = "\ncpx2mag_phs " + slc2 + " " + cull_dir + "/" + date2 + ".amp " + cull_dir + "/" + date2 + ".phs " + width + "\n";
cmd += "\ncp -pr " + slc2 + ".rsc " + cull_dir + "/" + date2 + ".amp.rsc\n";
cmd += "\nrm " + cull_dir + "/" + date2 + ".phs\n";
subprocess.call(cmd,shell=True);
cmd = "\ncp -pr azo_real.pl " + cull_dir + "\n";
subprocess.call(cmd,shell=True);
cmd = "\ncd " + cull_dir + "\n";
cmd += "\nperl azo_real.pl " + amp2 + " " + amp1 + " " + cull_name[0:cull_name.rfind(".")] + " " + cull_name[index1:index4] + "_azo_" + wsamp + " " + rwin + " " + awin + " " + search_x + " " + search_y + " " + wsamp + " " + numproc + " &\n";
cmd += "\ncd " + cwd + "\n";
print(cmd);
#subprocess.call(cmd,shell=True);
return;
def makeUNW(path, rwin, awin, search_x, search_y, wsamp, angle, data_type):
cmd = "\nfind " + path + " -name \"*azo_" + wsamp + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "*.off\" -print\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ampoff_paths = pipe.read().split();
pipe.close();
ampoff_dirs={};
cat_cmds={};
angles = {};
max_inc_angle = "";
min_inc_angle = "";
if data_type.lower().find("tsx") > -1:
cmd = "\nfind " + path + " -name \"T*X*.xml\"\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
leader_file_paths = pipe.read().split();
pipe.close();
for path in leader_file_paths:
date = "";
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
elif line.find("coverageRegionMin incidenceAngle") > -1:
min_inc_angle = line[re.search("\">",line).end(0) : re.search("</",line).start(0)];
elif line.find("coverageRegionMax incidenceAngle") > -1:
max_inc_angle = line[re.search("\">",line).end(0) : re.search("</",line).start(0)];
infile.close();
angles[date] = str((float(max_inc_angle) + float(min_inc_angle)) / 2.);
for i in range(0,len(ampoff_paths)):
ampoff_dir = ampoff_paths[i].strip()[0:ampoff_paths[i].strip().rfind("/")];
if ampoff_dir not in ampoff_dirs:
ampoff_dirs[ampoff_dir] = ampoff_paths[i];
cat_cmds[ampoff_dir] = "\ncat " + ampoff_paths[i];
else:
cat_cmds[ampoff_dir] += " " + ampoff_paths[i];
for ampoff_dir in cat_cmds:
cmd = cat_cmds[ampoff_dir];
elements = cmd.split();
if len(elements) < 3:
continue;
else:
if not re.search("_\d\.off",elements[1]):
ampoff_dirs[ampoff_dir] = elements[1];
continue;
else:
composite_ampoff_path = elements[1][:re.search("_\d\.off",elements[1]).start(0)] + ".off";
ampoff_dirs[ampoff_dir]=composite_ampoff_path;
if os.path.exists(composite_ampoff_path):
continue;
cat_cmds[ampoff_dir] += " > " + composite_ampoff_path + "\n";
print("\n***** pixelTrack - step \"make_unw\" - running cat to compose ampcor results into single file...\n");
subprocess.call(cat_cmds[ampoff_dir],shell=True);
for ampoff_dir in ampoff_dirs:
ampoff_dir_contents = os.listdir(ampoff_dir);
already_done = False;
item="";
for i in range(0,len(ampoff_dir_contents)):
item = ampoff_dir_contents[i];
if re.search(".*azimuth_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw",item) or \
re.search(".*range_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw",item):
already_done=True;
break;
if already_done:
print("\n****** \"" + item +"\" already exists in \"" + ampoff_dir + "\", make_unw step likely already done for this directory, skipping...\n");
continue;
ampoff_path = ampoff_dirs[ampoff_dir];
date = ampoff_path[re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0) + 1 : re.search("/\d{6}[\-_]\d{6}", ampoff_path).start(0) + 7];
cmd = "\nls " + ampoff_path[0:ampoff_path.rfind("azo")+3]+"*.off.rsc\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ampoff_rsc_paths = pipe.read().split();
pipe.close();
if len(ampoff_rsc_paths) < 1:
print("\n***** WARNING, could not find any azo rsc file in \"" + amcporDir + "\", skipping these results\n");
break;
ampoff_rsc_path = ampoff_rsc_paths[0];
da_p = "";
r_e = "";
p_h = "";
dr = "";
endRefSample = "";
endRefLine = "";
ampoff_rsc_file = open(ampoff_rsc_path,"r");
while 1:
line = ampoff_rsc_file.readline();
if not line:
break;
elif line.find("RANGE_PIXEL_SIZE") > -1:
dr = line.split()[1].strip();
elif line.find("FILE_LENGTH") > -1:
endRefLine = line.split()[1].strip();
elif line.find("WIDTH") > -1:
endRefSample = line.split()[1].strip();
elif line.find("EARTH_RADIUS") > -1:
r_e = line.split()[1].strip();
elif re.search("^HEIGHT\s+",line):
p_h = line.split()[1].strip();
elif line.find("AZIMUTH_PIXEL_SIZE") > -1:
da_p = line.split()[1].strip();
ampoff_rsc_file.close();
if da_p == "":
print("\n***** WARNING, could not find parameter \"FILE_LENGTH\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if da_p == "":
print("\n***** WARNING, could not find parameter \"WIDTH\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if da_p == "":
print("\n***** WARNING, could not find parameter \"AZIMUTH_PIXEL_SIZE\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if r_e == "":
print("\n***** WARNING, could not find parameter \"EARTH_RADIUS\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if p_h == "":
print("\n***** WARNING, could not find parameter \"HEIGHT\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
if dr == "":
print("\n***** WARNING, could not find parameter \"RANGE_PIXEL_SIZE\" in \"" + ampoff_rsc_path[0].strip() + "\", skipping these results\n");
break;
input_angle = angle;
if data_type.lower().find("tsx") > -1:
input_angle = angles[date];
print("\n***** pixelTrack - step \"make_unw\" - running makeAzo in " + ampoff_dir + " to generate azimuth and range unw files ...\n");
makeAzo(ampoff_path, float(da_p), float(r_e), float(p_h), float(dr), float(input_angle), int(wsamp), int(rwin), int(awin), search_x, search_y, int(endRefSample), int(endRefLine));
cwd = os.getcwd();
if not os.path.exists(ampoff_dir+"/azimuth_" + rwin + "x" + awin + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc"):
date = ampoff_path[re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0)+1:re.search("/\d{6}[\-_]\d{6}",ampoff_path).start(0)+7];
cmd = "";
if not os.path.exists(ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc"):
cmd += "\nlook.pl " + ampoff_dir + "/" + date + ".slc " + str(int(rwin)/int(wsamp)) + " " + str(int(awin)/int(wsamp)) + "\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/azimuth_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/range_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
cmd += "\ncp -p " + ampoff_dir + "/" + date + "_" + str(int(rwin)/int(wsamp)) + "rlks.slc.rsc " + ampoff_dir + "/snr_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin)/int(wsamp)) + "rlks.unw.rsc\n";
subprocess.call(cmd,shell=True);
return;
def beamTable():
beam_angle["ST1"] = "23.7";
beam_angle["ST2"] = "27.7";
beam_angle["ST3"] = "33.7";
beam_angle["ST4"] = "36.6";
beam_angle["ST5"] = "39.4";
beam_angle["ST6"] = "44.0";
beam_angle["ST7"] = "47.2";
beam_angle["F1"] = "38.5";
beam_angle["F2"] = "40.8";
beam_angle["F3"] = "42.9";
beam_angle["F4"] = "44.8";
beam_angle["F5"] = "46.6";
return;
#def densifyAmpmag(path, date):
#
# if
#
# return;
def findAzimuthPixelSize(path, date, orbit):
cwd = os.getcwd();
cmd = "find " + path + " -name \"" + date + ".slc.rsc\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
slc_rsc_paths = pipe.read().split();
pipe.close();
slc_rsc_path = "";
if len(slc_rsc_paths) < 1:
cmd = "find " + path + " -name \"" + date + ".raw\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
raw_paths = pipe.read().split();
pipe.close();
cmd = "find " + path + " -name \"hdr*"+date+"*.rsc\" -print";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
hdr_paths = pipe.read().split();
pipe.close();
if len(raw_paths) < 1:
print("\n***** WARNING, could not find \"" + date + ".raw\", necessary to determine azimuth pixel size\n");
return "-1";
raw_path = raw_paths[0];
if not os.path.exists(raw_path + ".rsc"):
print("\n***** WARNING, could not find \"" + date + ".raw.rsc\", necessary to determine azimuth pixel size\n");
return "-1";
if len(hdr_paths) < 1:
print("\n***** WARNING, could not find \"hdr*" + date + "*.rsc\", necessary to determine azimuth pixel size\n");
return "-1";
hdr_path = hdr_paths[0];
cmd = "\nmkdir " + path + "/" + date + "_APS\n";
cmd += "\ncd " + path + "/" + date + "_APS\n";
cmd += "\nln -s " + raw_path + " " + raw_path[raw_path.rfind("/") + 1 : ] + "\n";
cmd += "\nln -s " + raw_path + ".rsc " + raw_path[raw_path.rfind("/") + 1 : ] + ".rsc\n";
cmd += "\nln -s " + hdr_path + " " + hdr_path[hdr_path.rfind("/") + 1 : ]+"\n";
cmd += "\ndopav.pl . . " + date + " " + date + " \"\"\n";
cmd += "\nroi_prep.pl " + date + " " + orbit + " " + date + "-" + date + "\n";
cmd += "\ncd " + cwd + "\n";
subprocess.call(cmd,shell=True);
slc_rsc_path = path + "/" + date + "_APS/" + date + ".slc.rsc";
else:
slc_rsc_path = slc_rsc_paths[0];
slc_rsc_file = open(slc_rsc_path,"r");
while 1:
line = slc_rsc_file.readline();
if not line:
break;
if line.find("AZIMUTH_PIXEL_SIZE") > -1:
slc_rsc_file.close();
if os.path.exists(path + "/" + date + "_APS"):
shutil.rmtree(path + "/" + date + "_APS");
return line[re.search("\d+\.*\d*",line).start(0) : re.search("\d+\.*\d*",line).end(0)];
slc_rsc_file.close();
print("\n***** WARNING, unable to determine azimuth pixel size, using default value of \"5\"\n");
shutil.rmtree(path + "/" + date + "_APS");
return "-1";
def GCF(num):
temp = num[0];
for i in range(len(num)-1):
num1 = temp;
num2 = num[i+1];
if num1 < num2:
num1,num2=num2,num1;
while num1 - num2:
num3 = num1 - num2;
num1 = max(num2,num3);
num2 = min(num2,num3);
temp = num1;
return num1;
def has_value(self, value):
return value in self.values();
def LCM(num):
temp = num[0];
for i in range(len(num)-1):
num1 = temp;
num2 = num[i+1];
t_gcf = GCF([num1,num2]);
temp = t_gcf * num1/t_gcf * num2/t_gcf;
return temp;
def makeProcFile(path, date2, date1, angle, dem, orbit):
proc_file_path = path + "/int_" + date2 + "_" + date1 + ".proc";
print(proc_file_path);
if os.path.exists(proc_file_path):
print("\n\"" + proc_file_path + "\" already exists, skipping\n");
return;
int_path = path + "/int_" + date2 + "_" + date1;
proc_file = open(proc_file_path,"w");
proc_file.write("SarDir1=" + path + "/" + date2 + "\n");
proc_file.write("SarDir2=" + path + "/" + date1 + "\n");
proc_file.write("IntDir=" + int_path + "\n");
proc_file.write("SimDir=" + int_path + "/SIM\n");
proc_file.write("GeoDir=" + int_path + "/GEO\n");
proc_file.write("flattening=orbit\n");
proc_file.write("DEM=" + dem + "\n");
proc_file.write("OrbitType=" + orbit + "\n");
proc_file.write("Rlooks_sim=1\n");
proc_file.write("Rlooks_unw=1\n");
proc_file.write("Rlooks_geo=1\n");
proc_file.write("Rlooks_int=1\n");
pixelRatio = "-1";
if re.search("\d+", angle):
azimuth_pixel_size = findAzimuthPixelSize(path, date1, orbit);
range_pixel_size = "-1";
if azimuth_pixel_size != "-1":
cmd = "\nfind " + path + " -name \"" + date1 + ".raw.rsc\" -print\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
raw_rsc_paths = pipe.read().split();
pipe.close();
if len(raw_rsc_paths) > 0:
raw_rsc_file = open(raw_rsc_paths[0],"r");
while 1:
line = raw_rsc_file.readline();
if not line:
break;
if line.find("RANGE_PIXEL_SIZE") > -1:
raw_rsc_file.close();
range_pixel_size = line[re.search("\d+\.*\d*",line).start(0) : re.search("\d+\.*\d*",line).end(0)];
pixel_ratio = str(round(float(range_pixel_size) / math.sin(math.radians(float(angle))) / float(azimuth_pixel_size)));
pixel_ratio = pixel_ratio[0 : pixel_ratio.rfind(".")];
break;
raw_rsc_file.close();
if pixel_ratio != "-1":
proc_file.write("pixel_ratio=" + pixel_ratio + "\n");
proc_file.close();
def getPixelRatios(path):
return;
def readProcFile(path,date2,date1):
procCmd = "find " + path + " -name \"*" + date2 + "*" + date1 + "*.proc\" -print";
procStream = subprocess.Popen(procCmd);
procOutput = procStream.read();
procFilePath = procOutput.strip().split();
if len(procFilePath) < 1:
print("\n***** ERROR, no proc file found for dates \"" + date2 + ", " + date1 + "\" in \"" + path + "\"\n");
sys.exit();
if len(procFilePath) > 1:
print("\n***** WARNING, found more than one proc file for dates \"" + date2 + ", " + date1 + "\", using \"" + procFilePath[0] + "\"\n");
procStream.close();
procFile = open(procFilePath[0],"r");
procHash = {};
while 1:
line = procFile.readline();
if not line:
break;
line = line.strip();
name = "";
value = "";
elements = line.split("=");
if len(elements) < 2 or len(elements[0]) < 1 or len(elements[1]) < 1:
print("\n***** ERROR, proc file line format is \"varName=varValue\", \"" + line + "\" does not conform to this format\n");
sys.exit();
procHash[elements[0]] = elements[1];
procFile.close();
return procHash;
def gausshpfilt(data,kernel):
padSize = numpy.size(kernel,axis=0) / 2;
temp = numpy.zeros((numpy.size(data,axis=0)+2*padSize,numpy.size(data,axis=1)+2*padSize));
#fill temp with data values
for i in range(padSize,numpy.size(temp,axis=0)-padSize):
for j in range(padSize,numpy.size(temp,axis=1)-padSize):
temp[i,j] = data[i-padSize,j-padSize];
#pad left
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=0)):
temp[j,padSize-1-i] = data[j-padSize,i];
#pad top
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=1)):
temp[padSize-1-i,j] = data[i,j-padSize];
#pad right
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=0)):
temp[j,numpy.size(temp,axis=1)-padSize+i] = data[j-padSize,numpy.size(data,axis=1)-1-i];
#pad bottom
for i in range(0,padSize):
for j in range(padSize,padSize+numpy.size(data,axis=1)):
temp[numpy.size(temp,axis=0)-padSize+i,j] = data[numpy.size(data,axis=0)-1-i,j-padSize];
#fill top-left corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[padSize-i-1,padSize-j-1] = int((temp[padSize-i-1,padSize-j] + temp[padSize-i,padSize-j-1]) / 2);
#fill top-right corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[padSize-i-1,numpy.size(temp,axis=1)-padSize+j] = int((temp[padSize-i-1,numpy.size(temp,axis=1)-padSize+j-1] + temp[padSize-i,numpy.size(temp,axis=1)-padSize+j]) / 2);
#fill bottom-right corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[numpy.size(temp,axis=0)-padSize+i,numpy.size(temp,axis=1)-padSize+j] = int((temp[numpy.size(temp,axis=0)-padSize+i,numpy.size(temp,axis=1)-padSize+j-1] + temp[numpy.size(temp,axis=0)-padSize+i-1,numpy.size(temp,axis=1)-padSize+j]) / 2);
#fill bottom-left corner
for i in range(0,padSize):
for j in range(0, padSize):
temp[numpy.size(temp,axis=0)-padSize+i,padSize-j-1] = (temp[numpy.size(temp,axis=0)-padSize+i,padSize-j] + temp[numpy.size(temp,axis=0)-padSize+i-1,padSize-j-1]) / 2;
#perform convolution
ghp_data = numpy.zeros((numpy.size(data,axis=0),numpy.size(data,axis=1)));
for i in range(numpy.size(ghp_data,axis=0)):
for j in range(numpy.size(ghp_data,axis=1)):
ghp_data[i,j] = numpy.sum(kernel*temp[i:i+numpy.size(kernel,axis=0),j:j+numpy.size(kernel,axis=1)]);
return ghp_data;
def geocode(path, rwin, awin, search_x, search_y, wsamp, orbit, dem_path):
import fnmatch;
cwd = os.getcwd();
azo_unw_paths = [];
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, "*.unw"):
if re.search("r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + str(int(rwin) / int(wsamp)), filename):
azo_unw_paths.append(root + "/" + filename);
ld_range = str(int(rwin) / int(wsamp));
ld_azimuth = str(int(awin) / int(wsamp));
for azo_unw_path in azo_unw_paths:
index = re.search("\d{6}_\d{6}", azo_unw_path).start(0);
later_date = azo_unw_path[index : index + 6];
early_date = azo_unw_path[index + 7 : index + 13];
print(azo_unw_path);
azo_unw_dir = ".";
index = azo_unw_path.rfind("/");
if index > -1:
azo_unw_dir = azo_unw_path[ : index];
azo_unw_name = azo_unw_path[index + 1 : ];
os.chdir(azo_unw_dir);
geo_unw = "geo_" + azo_unw_name[ : azo_unw_name.find("_")] + "_" + later_date + "-" + early_date + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + ld_range + "rlks.unw";
if os.path.exists(geo_unw):
print("\n**** WARNING, \"" + geo_unw + "\" already exists in \"" + azo_unw_dir + "\", skipping " + azo_unw_name + "...\n");
elif geo_unw.find("range") > -1 and os.path.exists(geo_unw.replace("range", "adj_range")):
print("\n**** WARNING, \"" + geo_unw.replace("range", "adj_range") + "\" already exists in \"" + azo_unw_dir + "\", skipping " + azo_unw_name + "...\n");
radar_name = "radar_" + orbit + ".unw";
radar_rsc_name = radar_name + ".rsc";
if not os.path.exists(radar_name):
print("\n**** WARNING, \"" + radar_name + "\" not found in \"" + azo_unw_dir + "\", skipping range ramp-removal for this pair...\n");
if not os.path.exists(radar_rsc_name):
print("\n***** WARNING, \"" + radar_rsc_name + "\" not found in \"" + azo_unw_dir + "\", skipping range ramp-removal for this pair...\n");
if re.search("^blalbalbrange", azo_unw_name) and os.path.exists(radar_name) and os.path.exists(radar_name + ".rsc"):
cmd = "\nlook.pl " + radar_name + " " + ld_range + " " + ld_azimuth + "\n";
subprocess.call(cmd, shell=True);
radar_ld_name = "radar_" + orbit + "_" + ld_range + "rlks";
radar_ld_unw = "radar_" + orbit + "_" + ld_range + "rlks.unw";
width = "";
wavelength = "";
radar_rsc_file = open(radar_ld_unw + ".rsc", "r");
while 1:
line = radar_rsc_file.readline();
if not line:
break;
if line.find("WIDTH") > -1:
elements = line.split();
width = elements[1];
if line.find("WAVELENGTH") > -1:
elements = line.split();
wavelength = elements[1];
radar_rsc_file.close();
if width == "":
print("\n***** WARNING, could not find \"WIDTH\" in \"" + radar_ld_unw + ".rsc\", skipping range ramp-removal for \"" + azo_unw_dir + "\"...\n");
continue;
if wavelength == "":
print("\n***** WARNING, could not find \"WAVELENGTH\" in \"" + radar_ld_unw + ".rsc\", skipping range ramp-removal for \"" + azo_unw_dir + "\"...\n");
continue;
cmd = "\nrmg2mag_phs " + radar_ld_unw + " " + radar_ld_name + ".mag " + radar_ld_name + ".phs " + width + "\n";
subprocess.call(cmd, shell=True);
adj_radar_ld_phs = adjustPhase(radar_ld_name + ".phs", str(100 * float(wavelength)), width);
cmd = "\nmag_phs2rmg " + radar_ld_name + ".mag " + adj_radar_ld_phs + " " + radar_ld_unw + " " + width + "\n";
subprocess.call(cmd, shell=True);
adj_range_unw_name = "adj_" + azo_unw_name;
cmd = "\nadd_rmg.pl " + azo_unw_name + " " + radar_ld_unw + " " + adj_range_unw_name + " -1 1\n";
subprocess.call(cmd, shell=True);
azo_unw_name = adj_range_unw_name;
cmd = "";
if not os.path.exists(azo_unw_dir + "/" + later_date + "_" + ld_range + "rlks.slc.rsc"):
cmd += "\nlook.pl " + later_date + ".slc " + ld_range + " " + ld_azimuth + "\n";
cmd += "\ncp -pr " + later_date + "_" + ld_range + "rlks.slc.rsc " + azo_unw_path + ".rsc\n";
cmd += "\nmake_geomap.pl ./GEO " + azo_unw_name + " azm.trans " + orbit + " " + dem_path + " " + later_date + "-" + early_date + "_SIM.aff " + ld_range + " " + later_date + " yes ../SIM\n";
cmd += "\ngeocode.pl ./GEO/azm.trans " + azo_unw_name + " geo_" + azo_unw_name[ : azo_unw_name.find("_")] + "_" + later_date + "-" + early_date + "_r" + rwin + "x" + awin + "_s" + search_x + "x" + search_y + "_" + ld_range + "rlks.unw\n";
subprocess.call(cmd,shell=True);
os.chdir(cwd);
return;
def generateProfiles(path):
currentDir = os.getcwd();
profilesCmd = "find " + path + " -name \"*.distance\" -print";
profilesStream = subprocess.Popen(profilesCmd);
profilesOutput = profilesStream.read();
profilesStream.close();
profiles = profilesOutput.split();
xyzCmd = "find " + path + " -name \"northxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzStream.read();
xyzStream.close();
xyzCmd = "find " + path + " -name \"eastxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzOutput + xyzStream.read();
xyzStream.close();
xyzCmd = "find " + path + " -name \"magxyz.txt\" -print";
xyzStream = subprocess.Popen(xyzCmd);
xyzOutput = xyzOutput + xyzStream.read();
xyzStream.close();
xyzFileList = xyzOutput.split();
for i in range(0,len(xyzFileList)):
xyzPath = xyzFileList[i].strip()[0:xyzFileList[i].strip().rfind("/")];
xyzFileName = xyzFileList[i].strip()[xyzFileList[i].strip().rfind("/")+1:];
xyzName = xyzFileName[0:xyzFileName.find(".")];
gridCmd = "";
if not os.path.exists(xyzPath + "/" + xyzName + ".grd"):
gridCmd = gridCmd + "\npython grid.py " + xyzFileList[i].strip() + "\n";
gridCmdStream = subprocess.Popen(gridCmd);
gridCmdOutput = gridCmdStream.read();
gridCmdStream.close();
#for i in range(0,len(profiles)):
# genProfileCmd = "\ncd " + xyzPath + "\ngrdtrack " + profiles[i] + " -G" + xyzName + ".grd > " + profiles[i][profiles[i].rfind("/")+1:profiles[i].find(".")] + "_" + xyzName + ".txt\ncd " + currentDir + "\n";
# print(genProfileCmd);
#genProfileStream = subprocess.Popen(genProfileCmd);
#genProfileStream.close();
def generatePNGs(path):
currentDir = os.getcwd();
findGRDsCmd = "find " + path + " -name \"*.grd\" -print";
findGRDsStream = subprocess.Popen(findGRDsCmd);
findGRDsOutput = findGRDsStream.read().split();
findGRDsStream.close();
pngCmd = "";
for i in range(0,len(findGRDsOutput)):
psName = findGRDsOutput[i][0:findGRDsOutput[i].rfind(".")] + ".ps";
psPath = findGRDsOutput[i][0:findGRDsOutput[i].rfind("/")];
pngName = findGRDsOutput[i][0:findGRDsOutput[i].rfind(".")] + ".png";
if os.path.exists(psName) and not os.path.exists(pngName):
pngCmd += "\ncd " + psPath + "\nps2raster -A -TG " + psName + "\ncd " + currentDir + "\n";
if pngCmd != "":
pngStream = subprocess.Popen(pngCmd);
pngStream.close();
def getAffineTrans(path):
cwd = os.getcwd();
contents = os.listdir(path);
proc_paths = [item for item in contents if ".proc" in item];
if len(proc_paths) < 1:
print("\n***** WARNING, no *.proc files found in " + path + ", not running \"affine\" step...\n");
return;
cmd = "";
for proc_path in proc_paths:
int_vars = readIntProcFile(proc_path);
date1 = int_vars["SarDir1"];
date2 = int_vars["SarDir2"];
int_dir = int_vars["IntDir"];
rlooks = int_vars["Rlooks_geo"];
aff_path = path + "/" + int_dir + "/" + date1 + "-" + date2 + "_" + rlooks + "rlks_SIM.aff";
if os.path.exists(aff_path):
print("\n***** WARNING, " + aff_path + " already exists in " + int_dir + ", skipping...\n");
continue;
cmd += "\ncd " + path + "\n";
cmd += "\nprocess_2pass_glac.pl " + proc_path + " offsets done_sim_removal &\n";
cmd += "\ncd " + cwd + "\n";
print(cmd);
#subprocess.call(cmd,shell=True);
return;
def getGRDCorners(path):
currentDir = os.getcwd();
findGRDsCmd = "find " + path + " -name \"*.grd\" -print";
findGRDsStream = subprocess.Popen(findGRDsCmd);
findGRDsOutput = findGRDsStream.read().split();
findGRDsStream.close();
for i in range(0,len(findGRDsOutput)):
grdPath = findGRDsOutput[i][0:findGRDsOutput[i].rfind("/")];
grdName = findGRDsOutput[i][findGRDsOutput[i].rfind("/")+1:findGRDsOutput[i].rfind(".")];
if not os.path.exists(grdPath + "/" + grdName + "_corners.dat"):
grdinfoCmd = "\ngrdinfo " + findGRDsOutput[i].strip() + "\n";
grdinfoStream = subprocess.Popen(grdinfoCmd);
grdinfoOutput = grdinfoStream.read();
grdinfoStream.close();
x_min = grdinfoOutput[grdinfoOutput.find("x_min:")+6:grdinfoOutput.find("x_max:")].strip();
x_max = grdinfoOutput[grdinfoOutput.find("x_max:")+6:grdinfoOutput.find("x_inc:")].strip();
y_min = grdinfoOutput[grdinfoOutput.find("y_min:")+6:grdinfoOutput.find("y_max:")].strip();
y_max = grdinfoOutput[grdinfoOutput.find("y_max:")+6:grdinfoOutput.find("y_inc:")].strip();
cornersFileName = grdPath + "/" + grdName + "_corners.dat";
cornersFile = open(cornersFileName,"w");
cornersFile.write(x_min + " " + y_min + " LL\n");
cornersFile.write(x_max + " " + y_max + " TR\n");
cornersFile.write(x_min + " " + y_max + " TL\n");
cornersFile.write(x_max + " " + y_min + " LR\n");
cornersFile.close()
def generateKML(path):
findPNGsCmd = "find " + path + " -name \"*.png\" -print";
findPNGsStream = subprocess.Popen(findPNGsCmd);
findPNGsOutput = findPNGsStream.read().split();
findPNGsStream.close();
def createMatlabGetXYZ(matlabPath,ampcorInFilePath):
startRefSample = "";
endRefSample = "";
skipRefSample = "";
startRefLine = "";
endRefLine = "";
skipRefLine = "";
ampcorInFile = open(ampcorInFilePath,"r");
ampoff_dir = ampcorInFilePath[0:ampcorInFilePath.rfind("/")];
ampoff_name = ampcorInFilePath[0:ampcorInFilePath.rfind(".")];
cornersFilePath = ampoff_dir + "/corners.dat";
cornersFile = open(cornersFilePath,"r");
ul_long = "";
ul_lat = "";
while 1:
line = cornersFile.readline();
if not line:
break;
line = line.strip();
if line.find("ul_long") > -1:
ul_long = line.split("=")[1];
elif line.find("ul_lat") > -1:
ul_lat = line.split("=")[1];
cornersFile.close();
while 1:
line = ampcorInFile.readline();
if not line:
break;
if line.find("Start, End and Skip Samples in Reference Image") > -1:
line = line.strip().split("=");
sampleInfo = line[1].split();
startRefSample = sampleInfo[0];
endRefSample = sampleInfo[1];
skipRefSample = sampleInfo[2];
elif line.find("Start, End and Skip Lines in Reference Image") > -1:
line = line.strip().split("=");
lineInfo = line[1].split();
startRefLine = lineInfo[0];
endRefLine = lineInfo[1];
skipRefLine = lineInfo[2];
ampcorInFile.close();
matlabFile = open(matlabPath,"r");
outputMatlabFile = open(ampoff_dir + "/getxyzs.m","w");
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("rwin\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",skipRefSample+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("awin\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",skipRefLine+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("load\s*;",line):
outputMatlabFile.write(line.replace(";",ampoff_name[ampoff_name.rfind("/")+1:]+".off;"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("indat\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ampoff_name[ampoff_name.rfind("/")+1:]+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("width0\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",endRefSample+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("length0\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",endRefLine+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("ul_long\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ul_long+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("ul_lat\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",ul_lat+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("x_step\s*=\s*;",line):
outputMatlabFile.write(line.replace(";",str(15*int(skipRefSample))+";"));
break;
else:
outputMatlabFile.write(line);
while 1:
line = matlabFile.readline();
if not line:
break;
elif re.search("y_step\s*=\s*",line):
outputMatlabFile.write(line.replace(";",str(15*int(skipRefLine))+";"));
else:
outputMatlabFile.write(line);
outputMatlabFile.close();
matlabFile.close();
currentDir = os.getcwd();
getXYZCmd = "\ncd " + ampoff_dir + "\nmatlab -nodesktop -nosplash -r getxyzs\ncd " + currentDir;
getXYZCmdStream = subprocess.Popen(getXYZCmd);
getXYZCmdStream.close();
def makeRawALOS(WorkPath):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("LED") > -1:
fbd2fbs = "NO";
img_path = item.replace("LED", "IMG-HH");
img_full_path = os.readlink(WorkPath + "/" + contents[i] + "/" + img_path);
img_alt_path = img_full_path.replace("HH","HV");
if os.path.exists(img_alt_path):
fbd2fbs = "FBD2FBS";
cwd = os.getcwd();
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_alos.pl IMG " + contents[i] + " " + fbd2fbs + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawENVISAT(WorkPath, orbit):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("ASA_") > -1:
cwd = os.getcwd();
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_envi.pl " + item + " " + orbit + " " + contents[i] + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawERS(WorkPath, orbit):
contents = os.listdir(WorkPath);
cmd = "";
for i in range(0, len(contents)):
if re.search("^\d\d\d\d\d\d$", contents[i]):
date_contents = os.listdir(WorkPath + "/" + contents[i]);
for item in date_contents:
if item.find("SARLEADER") > -1:
cwd = os.getcwd();
# cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw_ASF.pl " + orbit + " " + item + " " + contents[i] + "\ncd " + cwd + "\n";
cmd = cmd + "\ncd " + WorkPath + "/" + contents[i] + "\nmake_raw.pl " + orbit + " " + item + " " + contents[i] + "\ncd " + cwd + "\n";
break;
subprocess.call(cmd,shell=True);
return;
def makeRawTSX(WorkPath):
cwd = os.getcwd();
cmd = "\nfind " + WorkPath + " -name \"TDX*.xml\"\n";
pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
leader_file_paths = pipe.read().split();
pipe.close();
dates = {};
for path in leader_file_paths:
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
dates[date] = path;
break;
infile.close();
for date in dates:
cmd = "\ncd " + WorkPath + "/" + date + "\n";
cmd += "\nmake_slc_tsx.csh " + dates[date] + " " + date + "\n";
cmd += "\ncp -p " + WorkPath + "/" + date + "/" + date + ".slc.rsc " + WorkPath + "/" + date + "/" + date + ".raw.rsc\n";
cmd += "\ntouch " + WorkPath + "/" + date + "/" + date + ".raw\n";
cmd += "\ncd " + cwd + "\n";
subprocess.call(cmd,shell=True);
return;
def readIntProcFile(proc_path):
assert os.path.exists(proc_path), "***** ERROR: " + proc_path + " not found, cannot read proc file\n";
int_vars = {};
proc_file = open(proc_path,"r");
while 1:
line = proc_file.readline();
if not line:
break;
line = line.strip();
if not line:
continue;
name = "";
value = "";
elements = line.split("=");
if len(elements) < 2 or len(elements[0]) < 1 or len(elements[1]) < 1:
print("\n***** ERROR, proc file line format is \"name = value\", \"" + line + "\" does not conform to this format\n");
sys.exit();
name = elements[0].strip();
value = elements[1].strip();
int_vars[name] = value;
proc_file.close();
return int_vars;
def setupALOS(WorkPath, leader_file_paths):
for leader_path in leader_file_paths:
existingSARLeaderFiles = {};
sarNumber = {};
dateName = "";
extension = leader_path[leader_path.rfind("."):];
leader_name = leader_path[leader_path.rfind("/") + 1 : ];
leaderFile = open(leader_path,"rb");
while 1:
line = leaderFile.readline();
if not line:
break;
searchExp = "\s\d\d\d\d\d\d\d\d";
if re.search(searchExp,line):
index = re.search(searchExp,line).start(0);
dateName = line[index:index+9].strip();
dateName = dateName[2:8];
if not os.path.isdir(WorkPath + "/" + dateName):
cmd = "mkdir " + WorkPath + "/" + dateName;
subprocess.call(cmd,shell=True);
if not existingSARLeaderFiles.has_key(leader_path):
leader_link_path = WorkPath + "/" + dateName + "/" + leader_name;
os.symlink(leader_path, leader_link_path);
existingSARLeaderFiles[leader_path] = leader_link_path;
break;
leaderFile.close();
if re.search("LED-A",leader_path):
raw_path = leader_path.replace("LED","IMG-HH");
raw_alt_path = leader_path.replace("LED","IMG-HV");
raw_name = raw_path[raw_path.rfind("IMG") : ];
raw_alt_name = raw_alt_path[raw_alt_path.rfind("IMG") : ];
raw_link_path = WorkPath + "/" + dateName + "/" + raw_name;
raw_alt_link_path = WorkPath + "/" + dateName + "/" + raw_alt_name;
if os.path.exists(raw_path) and not os.path.exists(raw_link_path):
os.symlink(raw_path, raw_link_path);
# if os.path.exists(raw_alt_path) and not os.path.exists(raw_alt_link_path):
# os.symlink(raw_alt_path, raw_alt_link_path);
if not os.path.exists(raw_path):
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + leader_path + "\"\nPlease make sure the raw file is in the same directory and is named \"IMG-HH*"+leader_path.replace("LED","")+"\"\n");
continue;
return;
def setupTSX(WorkPath, leader_file_paths):
for path in leader_file_paths:
infile = open(path,"r");
for path in leader_file_paths:
print(path);
return;
def setupENVISAT(WorkPath, leader_file_paths):
for path in leader_file_paths:
print(path);
return;
def setupERS(WorkPath, leader_file_paths):
for path in leader_file_paths:
existingSARLeaderFiles = {};
sarNumber = {};
dateName = "";
extension = path[path.rfind("."):];
leaderFile = open(path,"rb");
while 1:
line = leaderFile.readline();
if not line:
break;
searchExp = "\s\d\d\d\d\d\d\d\d";
if re.search(searchExp,line):
index = re.search(searchExp,line).start(0);
dateName = line[index:index+9].strip();
dateName = dateName[2:8];
if not os.path.isdir(WorkPath + "/" + dateName):
cmd = "mkdir " + WorkPath + "/" + dateName;
subprocess.call(cmd,shell=True);
if not existingSARLeaderFiles.has_key(path):
if not sarNumber.has_key(dateName):
sarNumber[dateName] = 1;
else:
sarNumber[dateName] = sarNumber[dateName] + 1;
sarNumberStr = str(sarNumber[dateName])
if sarNumber[dateName] < 10:
sarNumberStr = "0" + sarNumberStr;
tempPath = WorkPath + "/" + dateName + "/SARLEADER" + sarNumberStr;
while has_value(existingSARLeaderFiles,tempPath):
sarNumber[dateName] = sarNumber[dateName] + 1;
sarNumberStr = str(sarNumber[dateName]);
if sarNumber[dateName] < 10:
sarNumberStr = "0" + sarNumberStr;
tempPath = WorkPath + "/" + dateName + "/SARLEADER" + sarNumberStr;
os.symlink(path,tempPath);
existingSARLeaderFiles[path] = tempPath;
break;
leaderFile.close();
rawFileName = "rawness";
if re.search("LEA.*\.001",path):
rawFileName = path.replace("LEA","DAT");
else:
rawFileName = path[0:path.find(".ldr")] + ".raw";
if not os.path.exists(rawFileName):
rawFileName = rawFileName[0:rawFileName.find(".raw")] + ".RAW";
if not os.path.exists(rawFileName):
rawFileName = rawFileName[0:rawFileName.find(".RAW")] + ".Raw";
if not os.path.exists(rawFileName):
if DataType.lower().find("alos") > -1:
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + path + "\"\nPlease make sure the raw file is in the same directory and is named \"IMG*"+path.replace("LED","")+"\"\n");
else:
print("\n***** WARNING, could not find corresponding raw file for leader file \"" + path + "\"\nPlease make sure the raw file is in the same directory and has the extension \".raw\"\n");
continue;
tempImagePath = "";
if re.search("SARLEADER", existingSARLeaderFiles[path]):
tempImagePath = existingSARLeaderFiles[path].replace("SARLEADER","IMAGERY");
if not os.path.exists(tempImagePath):
os.symlink(rawFileName, tempImagePath);
return;
def setupTSX(WorkPath, leader_file_paths):
for path in leader_file_paths:
infile = open(path,"r");
for line in infile:
if line.find("timeUTC") > -1:
index = re.search("timeUTC>",line).end(0);
year = line[index + 2 : index + 4];
month = line[index + 5 : index + 7];
day = line[index + 8 : index + 10];
date = year + month + day;
if not os.path.exists(date):
os.mkdir(WorkPath + "/" + date);
break;
infile.close();
return;
| [
"wz278@cornell.edu"
] | wz278@cornell.edu |
f72f5f0f60b80e25ca104b1a21939043d23487b3 | 133e8c9df1d1725d7d34ea4317ae3a15e26e6c66 | /Selenium/QQ/robot.py | 28a88422bf9a82f59aec6f1ccfb9be5e67243631 | [
"Apache-2.0"
] | permissive | 425776024/Learn | dfa8b53233f019b77b7537cc340fce2a81ff4c3b | 3990e75b469225ba7b430539ef9a16abe89eb863 | refs/heads/master | 2022-12-01T06:46:49.674609 | 2020-06-01T08:17:08 | 2020-06-01T08:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,793 | py | import os
import cv2
import time
import uuid
import traceback
from selenium import webdriver
from pyvirtualdisplay import Display
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from sys import platform
BASE_DIR = os.path.realpath(os.path.split(__file__)[0])
IMG_DIR = os.path.join(BASE_DIR, "tmp")
from utils.utils import log, LoginError, login_required, get_qq_captcha_code
from utils.eml import make_message
class QQRobot(object):
LOGIN_URL = "https://xui.ptlogin2.qq.com/cgi-bin/xlogin?appid=522005705&daid=4&s_url=https://mail.qq.com/cgi-bin/login?vt=passport%26vm=wpt%26ft=loginpage%26target=&style=25&low_login=1&proxy_url=https://mail.qq.com/proxy.html&need_qr=0&hide_border=1&border_radius=0&self_regurl=http://zc.qq.com/chs/index.html?type=1&app_id=11005?t=regist&pt_feedback_link=http://support.qq.com/discuss/350_1.shtml&css=https://res.mail.qq.com/zh_CN/htmledition/style/ptlogin_input24e6b9.css"
def __init__(self, username, passwd, proxy_ip=None, proxy_port=None):
"""
:param username: 用户名
:param passwd: 密码
:param proxy_ip: 访问QQ邮箱使用的IP, 为空是,默认选择本地IP
:param proxy_port: 当IP不为空是, 通过端口port与IP通信, 默认为3128, 就是代理服务squid的默认端口
"""
self.username = username
self.passwd = passwd
self.proxy_ip = proxy_ip
self.proxy_port = proxy_port or 31218
self.is_login = False
self.platform = platform
if self.platform == "win32":
self.geckopath = "F:\software\geckodriver\geckodriver.exe"
else:
self.geckopath = "/usr/bin/geckodriver"
def refresh(self):
log.info("refresh firefox, user: {}, proxy_ip: {}".format(self.username, self.proxy_ip))
self.driver.refresh()
def quit(self):
log.info("quit user: {}, proxy_ip: {}".format(self.username, self.proxy_ip))
try:
self.driver.quit()
except BaseException as e:
log.info(e)
if self.platform == "linux":
try:
self.display.stop()
except BaseException as e:
log.info(e)
def login(self):
self.set_driver()
self.set_login()
if self.set_login_check(timeout=1):
return True
self.set_login_verify()
if self.set_login_check(timeout=3):
return True
self.quit()
raise ValueError(u"不能登录QQ邮箱,重试")
def set_profile(self):
""" 设置代理 """
profile = None
if self.proxy_ip:
profile = webdriver.FirefoxProfile()
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.http', self.proxy_ip)
profile.set_preference('network.proxy.http_port', self.proxy_port)
profile.set_preference('network.proxy.ssl', self.proxy_ip)
profile.set_preference('network.proxy.ssl_port', self.proxy_port)
profile.update_preferences()
return profile
def set_driver(self):
""" 设置浏览器 """
try:
if self.platform == "linux":
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.driver = webdriver.Firefox(executable_path=self.geckopath, firefox_profile=self.set_profile())
self.driver.delete_all_cookies()
# 防止页面加载个没完
self.driver.set_page_load_timeout(300)
self.driver.implicitly_wait(10)
self.wait = WebDriverWait(self.driver, 30)
# 设置初始登录页面
self.driver.get(self.LOGIN_URL)
except BaseException as e:
self.quit()
log.error(traceback.format_exc())
raise LoginError("WebDriverException, can not set driver...")
def set_login(self):
""" 登录 """
try:
self.set_login_submit()
# 断言登陆成功
assert "退出" in self.driver.page_source
# self.driver.find_element_by_xpath('''//div[@id="newVcodeIframe"]/iframe[1]''')
except BaseException as e:
try:
log.info("login user: {}, retry login...".format(self.username))
self.set_login_submit()
except:
pass
def set_login_check(self, timeout=5):
""" 检测是否已经登录 """
index = 3
while index:
if self.driver.title.strip() == u"QQ邮箱":
self.is_login = True
return True
index -= 1
time.sleep(timeout)
return False
def set_login_submit(self):
""" 登录提交 """
self.driver.find_element_by_id("switcher_plogin").click()
# self.wait.until(EC.presence_of_element_located((By.ID, 'u')))
elem_user = self.driver.find_element_by_name("u")
elem_user.clear()
time.sleep(0.1)
elem_user.send_keys(self.username)
elem_pwd = self.driver.find_element_by_name("p")
elem_pwd.clear()
time.sleep(0.1)
elem_pwd.send_keys(self.passwd)
elem_but = self.driver.find_element_by_id("login_button")
# elem_pwd.send_keys(Keys.RETURN)
time.sleep(0.1)
elem_but.click()
def set_login_verify(self):
""" 遇到验证码登录 """
index = 3
while index:
try:
time.sleep(0.5)
log.info("get captcha_img user: {}, index: {}".format(self.username, index))
newVcodeIframe = self.driver.find_element_by_xpath('''//div[@id="newVcodeIframe"]/iframe[1]''')
self.driver.switch_to.frame(newVcodeIframe)
captcha_img = self.set_login_save_img('capImg')
rs, verify_code = get_qq_captcha_code(captcha_img)
log.info(
'login user: {} captcha_img: {}, verifycode: {}'.format(self.username, captcha_img, verify_code))
if not rs:
log.error('login user: {}, verify img fail'.format(self.username))
index -= 1
continue
ele_verifycode = self.driver.find_element_by_id("capAns")
ele_verifycode.send_keys(verify_code)
self.driver.find_element_by_id("submit").click()
except BaseException as e:
log.error('user: %s, verifycode err, msg: %s' % (self.username, e))
# log.error(traceback.format_exc())
index -= 1
if index == 1:
log.info("verify_login user: {}, retry login...".format(self.username))
self.set_login()
def set_login_save_img(self, imgid, uid=None):
""" 保存验证码 """
if not uid:
uid = str(uuid.uuid1())
screenshot_img = os.path.join(IMG_DIR, "screenshot_{}.png".format(uid))
captcha_img = os.path.join(IMG_DIR, "captcha_{}.png".format(uid))
self.driver.save_screenshot(screenshot_img)
img = self.driver.find_element_by_id(imgid)
loc = img.location
print("loc:")
print(loc)
image = cv2.imread(screenshot_img, True)
# roi = image[int(loc['y']):int(loc['y']) + 40, int(loc['x']):int(loc['x']) + 138]
roi = image[int(loc['y']):int(loc['y'])+48, int(loc['x']):int(loc['x'])+130]
cv2.imwrite(captcha_img, roi)
return captcha_img
@login_required
def check(self, addrs):
res = None
index = 3
while index:
try:
if index == 2: self.refresh()
if index == 1: time.sleep(5)
# 直接跳出所有frame
self.driver.switch_to.default_content()
# 点击写信
# self.wait.until(EC.presence_of_element_located((By.ID, 'composebtn')))
elem_but_w = self.driver.find_element_by_id("composebtn")
elem_but_w.click()
# 切换至右侧 主iframe
main_Frame1 = self.driver.find_element_by_id("mainFrame")
self.driver.switch_to.frame(main_Frame1)
# 发件人
check_addrs = "{};1@qq.com;".format(addrs) if addrs else "1@qq.com;"
self.driver.find_element_by_xpath('''//div[@id="toAreaCtrl"]/div[2]/input''').send_keys(check_addrs)
count = 30
while count:
_t = self.driver.find_element_by_xpath('''//div[@id="toAreaCtrl"]''')
errors = _t.find_elements_by_css_selector("div.addr_base.addr_error")
res = [e.text.strip().replace(";", "") for e in errors]
if res and res[-1] == '1@qq.com':
break
count -= 1
time.sleep(0.5)
index = 0
except BaseException as e:
log.error('user: %s, check err, msg: %s' % (self.username, e))
log.error(traceback.format_exc())
index -= 1
if res is None:
self.is_login = False
return res
@login_required
def send_email(self, addrs, subject, content, subtype="html"):
try:
self.driver.switch_to.default_content()
# 点击写信
# self.wait.until(EC.presence_of_element_located((By.ID, 'composebtn')))
elem_but_w = self.driver.find_element_by_id("composebtn")
elem_but_w.click()
# 切换至右侧 主iframe
main_Frame1 = self.driver.find_element_by_id("mainFrame")
self.driver.switch_to.frame(main_Frame1)
# 发件人
self.driver.find_element_by_xpath('''//div[@id="toAreaCtrl"]/div[2]/input''').send_keys(addrs)
# 输入主题
# self.driver.find_element_by_xpath('''//input[@id="subject"]''').send_keys(subject)
self.driver.find_element_by_id('subject').send_keys(subject)
# self.driver.find_element_by_xpath('''//input[@id="subject"]''').send_keys(subject)
# 输入正文
o = self.driver.find_elements_by_class_name("qmEditorIfrmEditArea")
o[0].click() # !!!!!!!must click!!!!!!!
o[0].send_keys(content)
time.sleep(1)
# 点击发送按钮
self.driver.find_element_by_xpath("//*[@id='toolbar']/div/a[1]").click()
# driver.find_element_by_xpath('//a[@name="sendbtn" and @tabindex="9"]').click()
time.sleep(3)
# 断言发送成功
assert "再写一封" in self.driver.page_source
except:
log.error("弹出验证框")
self.refresh()
return
try:
self.driver.switch_to.default_content()
log.error("弹出验证框")
# time.sleep(600)
captcha_img = self.set_login_save_img('QMVerify_QMDialog_verify_img_code')
rs, verify_code = get_qq_captcha_code(captcha_img)
log.info(
'send email user: {} captcha_img: {}, verifycode: {}'.format(
self.username, captcha_img, verify_code))
if not rs:
log.error('login user: {}, verify img fail'.format(self.username))
raise
ele_verifycode = self.driver.find_element_by_id("QMVerify_QMDialog_verifycodeinput")
ele_verifycode.send_keys(verify_code)
self.driver.find_element_by_id("QMVerify_QMDialog_btnConfirm").click()
time.sleep(3)
assert "再写一封" in self.driver.page_source
except:
log.error(traceback.format_exc())
self.is_login = False
time.sleep(3600)
# 关闭浏览器
self.quit()
if __name__ == "__main__":
v = QQRobot("2948906420@qq.com", "lanlan13266734099", None, None)
v.login()
# v.check("1248644045@qq.com,1@qq.com")
while 1:
subject, content, subtype = make_message()
v.send_email("2948906420@qq.com", subject, content, subtype)
log.info(subtype)
log.info(subject)
log.info(content)
time.sleep(5)
| [
"cheng.yang@salezoom.io"
] | cheng.yang@salezoom.io |
7bd962e4114a78c5aa9d3f87534c875261886917 | 13f33343e701fbfb4306c6835c24877e81dba12e | /backend/epic_kidz_3889/settings.py | 0e8f599bee2d8fae95582265c65cfb7a1d4d5a77 | [] | no_license | crowdbotics-apps/epic-kidz-3889 | 386f8b944b2c31438a6e5ae277c866ac0eb87921 | 64ced56bcffe1fa0e7d4d17de7b60e26ad1a7f91 | refs/heads/master | 2022-12-12T21:07:15.985176 | 2019-05-27T02:47:13 | 2019-05-27T02:47:13 | 188,760,034 | 0 | 0 | null | 2022-12-03T11:08:16 | 2019-05-27T02:47:10 | JavaScript | UTF-8 | Python | false | false | 4,752 | py | """
Django settings for epic_kidz_3889 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'epic_kidz_3889.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'epic_kidz_3889.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'epic_kidz_3889',
'USER': 'epic_kidz_3889',
'PASSWORD': 'epic_kidz_3889',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
db04a848e4b84dbd17930a7c2f34b562f45e428c | b13a1a96e9f1dddb3a3a44b636ca939b85962899 | /LevelFive/template_project/app_template/views.py | 76972ad28a08916481e475c5e6e8f27f5d09afed | [] | no_license | jspw/Django-Test | f266331c73c34b83b1189811a163567b6b4cc60b | 13a6d0146c9c78f8fa03c269e4546b5bbdb146bd | refs/heads/master | 2021-03-23T17:50:21.764636 | 2020-10-18T09:21:23 | 2020-10-18T09:21:23 | 247,472,132 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | from django.shortcuts import render
from app_template.forms import UserProfileInfoForm,UserForm
from django.core import validators
from django import forms
#
from django.contrib.auth import authenticate,login,logout
from django.http import HttpResponseRedirect,HttpResponse
# from django.core.urlresolvers import reverse #django 2 removes urlresolvers
from django.urls import reverse
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
contest_dict = {'text':"Hello world!"}
return render(request,'app_template/index.html',contest_dict)
@login_required
def special(request):
return HttpResponse("You are loggedin , Nice!")
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def basic(request):
return render(request,'app_template/basic.html')
def other(request):
return render(request,'app_template/other.html')
def relateive_template(request):
return render(
request,
'app_template/relative_url_template.html'
)
def signupform(request):
registered = False
if request.method == "POST":
user_form = UserForm(data=request.POST)
profile_form = UserProfileInfoForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password) #hashing the password
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'profile_pic' in request.FILES:
profile.profile_pic = request.FILES['profile_pic']
profile.save()
registered = True
else :
print(user_form.errors,profile_form.errors)
else :
user_form = UserForm()
profile_form = UserProfileInfoForm()
return render(
request,
'app_template/signup.html',
{
'user_form':user_form,
'profile_form':profile_form,
'registered':registered,
}
)
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username = username,password=password)
if user :
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else :
return HttpResponse("Account is not Active")
else :
print("Someone tried to login and failed")
print("Username : {} and password {}".format(username,password))
return HttpResponse("Invalid login detailed supplied")
else :
return render(request,'app_template/login.html')
| [
"mhshifat757@gmail.com"
] | mhshifat757@gmail.com |
66a1617fd944f84ba67cfff2a6a9a9b743131465 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/util/state/FunctionAnalyzer.pyi | 8ef7e534299b278b57e27b3297e6d55cfed74262 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,284 | pyi | from typing import List
import ghidra.program.model.address
import ghidra.program.model.pcode
import ghidra.program.model.symbol
import ghidra.util.state
import ghidra.util.task
import java.lang
class FunctionAnalyzer(object):
def dataReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, storageVarnode: ghidra.program.model.pcode.Varnode, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an absolute memory reference was encountered
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param storageVarnode absolute storage Varnode
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def indirectDataReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, offsetVarnode: ghidra.program.model.pcode.Varnode, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an indirect/computed memory reference was encountered using an indirect/computed offset
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param offsetVarnode indirect/computed offset
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def resolvedFlow(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, destAddr: ghidra.program.model.address.Address, currentState: ghidra.util.state.ContextState, results: ghidra.util.state.ResultsState, monitor: ghidra.util.task.TaskMonitor) -> bool:
"""
Callback indicating that a call/branch destination was identified.
Analyzer should create reference if appropriate
Keep in mind that there could be other unidentified destinations.
@param op branch or call flow operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param destAddr destination address
@param results contains previous states leading upto the currentState
@param currentState current state at the branch/call
@param monitor task monitor
@return true if destination should be disassembled if not already
@throws CancelledException if callback canceled by monitor
"""
...
@overload
def stackReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, stackOffset: int, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that an absolute stack reference was encountered. A non-load/store
operation will have a -1 for both storageSpaceId and size.
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param stackOffset stack offset
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID or -1 if not applicable
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
@overload
def stackReference(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, computedStackOffset: ghidra.util.state.VarnodeOperation, size: int, storageSpaceID: int, refType: ghidra.program.model.symbol.RefType, monitor: ghidra.util.task.TaskMonitor) -> None:
"""
Callback indicating that a computed stack reference was encountered. A non-load/store
operation will have a -1 for both storageSpaceId and size.
@param op pcode operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param computedStackOffset stack offset computation (i.e., VarnodeOperation w/ stack pointer)
@param size access size or -1 if not applicable
@param storageSpaceID storage space ID or -1 if not applicable
@param refType read/write/data reference type
@param monitor task monitor
@throws CancelledException if callback canceled by monitor
"""
...
def toString(self) -> unicode: ...
def unresolvedIndirectFlow(self, op: ghidra.program.model.pcode.PcodeOp, instrOpIndex: int, destination: ghidra.program.model.pcode.Varnode, currentState: ghidra.util.state.ContextState, results: ghidra.util.state.ResultsState, monitor: ghidra.util.task.TaskMonitor) -> List[ghidra.program.model.address.Address]:
"""
Callback indicating that a computed call/branch destination was not resolved.
@param op indirect branch or call flow operation
@param instrOpIndex opIndex associated with reference or -1 if it could not be determined
@param destination destination identified as a Varnode (may be an expression represented by
a {@link VarnodeOperation}
@param results contains previous states leading upto the currentState
@param currentState current state at the branch/call
@param monitor task monitor
@return list of resolved destinations which should be used or null. List of destination
addresses will trigger disassembly where necessary.
@throws CancelledException if callback cancelled by monitor
"""
...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
3fb9d6f478528789a6f211aea81aac01dd9a6fe1 | b7eb41b068614e04f38a969326f43d8f8119cb05 | /897__increasing_order_search_tree.py | 82e08117e36c06d7d355bd81c0c774907a48e697 | [] | no_license | YI-DING/daily-leetcode | ddfb6985bf5014886cba8d6219da243e0aa28d71 | a6d3898d900f2063302dc1ffc3dafd61eefa79b7 | refs/heads/master | 2020-05-19T06:07:21.557077 | 2019-07-19T16:31:46 | 2019-07-19T16:31:46 | 184,866,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def increasingBST(self, root: TreeNode):
print(f'{root.val} is what we are examining. Its left is {None if not root.left else root.left.val} and right is {None if not root.right else root.right.val}')
if not root:return root
if root.right:
print(f'we are iBSTing {root.right.val}')
root.right=Solution.increasingBST(self,root.right)
if not root.left:
print(f'{root.val} is done iBST')
return root
if not root.left.right:
root.left.right=root
print(f'we have lifted {root.left.val} and planted {root.val} to its right')
return root.left
left_subtree_right=root.left.right
while True:
if not left_subtree_right.right:
left_subtree_right.right=root
print(f'we have planted {root.val} to the right of {left_subtree_right.val}')
return Solution.increasingBST(self,root.left)
left_subtree_right=left_subtree_right.right
def increasingBST(self, root, tail = None):
if not root: return tail
res = self.increasingBST(root.left, root)
root.left = None
root.right = self.increasingBST(root.right, tail)
return res | [
"yiding1@uchicago.edu"
] | yiding1@uchicago.edu |
05d317851bc0a3a46cc148bd399a725b7cd60215 | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/neutron/plugins/ml2/ovo_rpc.py | ec7d84d2665cd4773f8a00eb74734e919975206b | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 6,363 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import traceback
import eventlet
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron._i18n import _LE
from neutron.api.rpc.callbacks import events as rpc_events
from neutron.api.rpc.handlers import resources_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron import context as n_ctx
from neutron.db import api as db_api
from neutron.objects import network
from neutron.objects import ports
from neutron.objects import securitygroup
from neutron.objects import subnet
LOG = logging.getLogger(__name__)
class _ObjectChangeHandler(object):
def __init__(self, resource, object_class, resource_push_api):
self._resource = resource
self._obj_class = object_class
self._resource_push_api = resource_push_api
self._resources_to_push = {}
self._worker_pool = eventlet.GreenPool()
for event in (events.AFTER_CREATE, events.AFTER_UPDATE,
events.AFTER_DELETE):
registry.subscribe(self.handle_event, resource, event)
def wait(self):
"""Waits for all outstanding events to be dispatched."""
self._worker_pool.waitall()
@staticmethod
def _is_session_semantic_violated(context, resource, event):
"""Return True and print an ugly error on transaction violation.
This code is to print ugly errors when AFTER_CREATE/UPDATE
event transaction semantics are violated by other parts of
the code.
"""
if not context.session.is_active:
return False
stack = traceback.extract_stack()
stack = "".join(traceback.format_list(stack))
LOG.error(_LE("This handler is supposed to handle AFTER "
"events, as in 'AFTER it's committed', "
"not BEFORE. Offending resource event: "
"%(r)s, %(e)s. Location:\n%(l)s"),
{'r': resource, 'e': event, 'l': stack})
return True
def handle_event(self, resource, event, trigger,
context, *args, **kwargs):
"""Callback handler for resource change that pushes change to RPC.
We always retrieve the latest state and ignore what was in the
payload to ensure that we don't get any stale data.
"""
if self._is_session_semantic_violated(context, resource, event):
return
resource_id = self._extract_resource_id(kwargs)
# we preserve the context so we can trace a receive on the agent back
# to the server-side event that triggered it
self._resources_to_push[resource_id] = context.to_dict()
# spawn worker so we don't block main AFTER_UPDATE thread
self._worker_pool.spawn(self.dispatch_events)
@lockutils.synchronized('event-dispatch')
def dispatch_events(self):
# this is guarded by a lock to ensure we don't get too many concurrent
# dispatchers hitting the database simultaneously.
to_dispatch, self._resources_to_push = self._resources_to_push, {}
# TODO(kevinbenton): now that we are batching these, convert to a
# single get_objects call for all of them
for resource_id, context_dict in to_dispatch.items():
context = n_ctx.Context.from_dict(context_dict)
# attempt to get regardless of event type so concurrent delete
# after create/update is the same code-path as a delete event
with db_api.context_manager.independent.reader.using(context):
obj = self._obj_class.get_object(context, id=resource_id)
# CREATE events are always treated as UPDATE events to ensure
# listeners are written to handle out-of-order messages
if obj is None:
rpc_event = rpc_events.DELETED
# construct a fake object with the right ID so we can
# have a payload for the delete message.
obj = self._obj_class(id=resource_id)
else:
rpc_event = rpc_events.UPDATED
LOG.debug("Dispatching RPC callback event %s for %s %s.",
rpc_event, self._resource, resource_id)
self._resource_push_api.push(context, [obj], rpc_event)
def _extract_resource_id(self, callback_kwargs):
id_kwarg = '%s_id' % self._resource
if id_kwarg in callback_kwargs:
return callback_kwargs[id_kwarg]
if self._resource in callback_kwargs:
return callback_kwargs[self._resource]['id']
raise RuntimeError("Couldn't find resource ID in callback event")
class OVOServerRpcInterface(object):
"""ML2 server-side RPC interface.
Generates RPC callback notifications on ML2 object changes.
"""
def __init__(self):
self._rpc_pusher = resources_rpc.ResourcesPushRpcApi()
self._setup_change_handlers()
LOG.debug("ML2 OVO RPC backend initialized.")
def _setup_change_handlers(self):
"""Setup all of the local callback listeners for resource changes."""
resource_objclass_map = {
resources.PORT: ports.Port,
resources.SUBNET: subnet.Subnet,
resources.NETWORK: network.Network,
resources.SECURITY_GROUP: securitygroup.SecurityGroup,
resources.SECURITY_GROUP_RULE: securitygroup.SecurityGroupRule,
}
self._resource_handlers = {
res: _ObjectChangeHandler(res, obj_class, self._rpc_pusher)
for res, obj_class in resource_objclass_map.items()
}
def wait(self):
"""Wait for all handlers to finish processing async events."""
for handler in self._resource_handlers.values():
handler.wait()
| [
"jcdiaztorres96@gmail.com"
] | jcdiaztorres96@gmail.com |
04054e64b66cefba84ca094869841104b29f8fdb | 14d8adc86adc14c1d64a5550b1bbd5663e984545 | /链条/reverse_linked_list.py | 2a4d84cb454b930a26ef0694c55d5098cf8338e6 | [] | no_license | milllu/leetcode | e1b68ef7774cc0c1b49325ec1b87280d27570d94 | 458b3e72cd82a203b10bdca747c4c3ba85708f75 | refs/heads/master | 2020-03-30T23:41:46.180308 | 2018-10-11T01:08:31 | 2018-10-11T01:08:31 | 151,709,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | """
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseList(self, head):
"""
迭代
"""
res = None
while head:
next_node = head.next
head.next, res = res, head
head = next_node
return res
def reverseList2(self, head):
"""递归"""
def _run(head, res):
if not head:
return res
next_node = head.next
head.next, res = res, head
return _run(next_node, res)
return _run(head, None)
| [
"3351440959@qq.com"
] | 3351440959@qq.com |
6b78d7a78da0104dfaf801b0a3d4c88df879eda6 | d68766d43725c4ba69b34a408f6aff9868eb0fc7 | /minimal_mypy_import/__init__.py | ac6633309814c3bd808d3a526d310f2b07505cad | [] | no_license | saulshanabrook/minimal_mypy_import | f04e347519ccef405c58f5c89106a58dd630f16a | a7f27c751ae9f1062ae926bf0d71aabad083eea8 | refs/heads/master | 2020-04-21T22:37:47.124351 | 2019-02-09T22:04:32 | 2019-02-09T22:04:32 | 169,918,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | from .a import *
a_value
| [
"s.shanabrook@gmail.com"
] | s.shanabrook@gmail.com |
cb474534c6555535ead6b6ef459893799675e547 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /python/ray/tune/tests/test_trial_executor_inheritance.py | 0ced35aff17c1af561f0108d7e295cdf648f720c | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 1,700 | py | import unittest
class TestTrialExecutorInheritance(unittest.TestCase):
def test_direct_inheritance_not_ok(self):
from ray.tune.trial_executor import TrialExecutor
msg = (
"_MyTrialExecutor inherits from TrialExecutor, which is being "
"deprecated. "
"RFC: https://github.com/ray-project/ray/issues/17593. "
"Please reach out on the Ray Github if you have any concerns."
)
with self.assertRaisesRegex(DeprecationWarning, msg):
class _MyTrialExecutor(TrialExecutor):
def __init__(self):
pass
def start_trial(self, trial):
return True
def stop_trial(self, trial):
pass
def restore(self, trial):
pass
def save(self, trial):
return None
def reset_trial(self, trial, new_config, new_experiment_tag):
return False
def debug_string(self):
return "This is a debug string."
def export_trial_if_needed(self):
return {}
def fetch_result(self):
return []
def get_next_available_trial(self):
return None
def get_running_trials(self):
return []
def test_indirect_inheritance_ok(self):
from ray.tune.ray_trial_executor import RayTrialExecutor
class _MyRayTrialExecutor(RayTrialExecutor):
pass
class _AnotherMyRayTrialExecutor(_MyRayTrialExecutor):
pass
| [
"noreply@github.com"
] | pdames.noreply@github.com |
3508427df51b9b799e24ea86dfef9e9c939e0510 | 9d5c9d9373002ab4ed1b493136517e8b4ab160e5 | /saas/backend/apps/role/filters.py | f626398fc2b1aff6f2a1f372c0d99597f01bf2aa | [
"MIT"
] | permissive | robert871126/bk-iam-saas | f8299bb632fc853ef0131d445f84c6084fc84aba | 33c8f4ffe8697081abcfc5771b98a88c0578059f | refs/heads/master | 2023-08-23T19:23:01.987394 | 2021-10-22T09:45:28 | 2021-10-22T09:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django_filters import rest_framework as filters
from backend.apps.role.models import Role, RoleCommonAction
class RatingMangerFilter(filters.FilterSet):
name = filters.CharFilter(lookup_expr="icontains", label="名称")
class Meta:
model = Role
fields = ["name"]
class RoleCommonActionFilter(filters.FilterSet):
system_id = filters.CharFilter(label="系统id")
class Meta:
model = RoleCommonAction
fields = ["system_id"]
| [
"zhu327@gmail.com"
] | zhu327@gmail.com |
1beee7a48a2061f0237ff1299fb0a91d09dcbc80 | c85aede0797e73dd719646a0f7671594b0d4e4e9 | /sbin/coveragerc_manager.py | 9292485b449a1bbfe9fe5d1bd624d95e573d4a61 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mtiid/putil | c0493535ed5ee7694546ee9193cad0a764c440fc | a99c84ee781aa9eb6e45272f95b82ac35648ba4b | refs/heads/master | 2021-01-18T09:05:50.437577 | 2016-01-20T16:01:12 | 2016-01-20T16:01:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,345 | py | #!/usr/bin/env python
# coveragerc_manager.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111
# Standard library imports
from __future__ import print_function
import os
import sys
###
# Global variables
###
SUBMODULES_LIST = ['plot', 'pcsv']
###
# Functions
###
def _write(fobj, data):
""" Simple file write """
fobj.write(data)
def get_source_files(sdir):
"""
Get Python source files that are not __init__.py and
interpreter-specific
"""
ver = 3 if sys.hexversion < 0x03000000 else 2
isf = []
isf.append('conftest.py')
isf.append('compat{0}.py'.format(ver))
return [
file_name
for file_name in os.listdir(sdir)
if file_name.endswith('.py') and (file_name != '__init__.py') and
(not any([file_name.endswith(item) for item in isf]))
]
def main(argv):
""" Processing """
# pylint: disable=R0912,R0914,R0915,W0702
debug = True
env = argv[0].strip('"').strip("'")
# Unpack command line arguments
print('Coverage manager')
print('Arguments received: {0}'.format(argv))
if env == 'tox':
print('Tox mode')
if len(argv[1:]) == 4:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1:]+[SUBMODULES_LIST, '']
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
else:
mode_flag, interp, _, module = argv[1:]+['']
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' module: {0}'.format(module))
elif env == 'ci':
print('Continuous integration mode')
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
os.environ['REPO_DIR'],
argv[3],
SUBMODULES_LIST,
''
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
elif env == 'local':
print('Local mode')
if len(argv[1:]) == 4:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
argv[3],
argv[3],
[argv[4]],
argv[4]
)
else:
mode_flag, interp, _, site_pkg_dir, submodules, module = (
argv[1],
argv[2],
argv[3],
argv[3],
[''],
''
)
print(' mode_flag: {0}'.format(mode_flag))
print(' interp: {0}'.format(interp))
print(' site_pkg_dir: {0}'.format(site_pkg_dir))
print(' submodules: {0}'.format(submodules))
print(' module: {0}'.format(module))
# Generate .coveragerc file
is_submodule = module in SUBMODULES_LIST
source_dir = os.path.join(site_pkg_dir, 'putil')
output_file_name = os.path.join(
site_pkg_dir,
'putil',
'.coveragerc_{0}_{1}'.format(env, interp)
)
coverage_file_name = os.path.join(
site_pkg_dir, 'putil', '.coverage_{0}'.format(interp)
)
conf_file = []
conf_file.append(os.path.join(source_dir, 'conftest.py'))
conf_file.append(os.path.join(source_dir, 'plot', 'conftest.py'))
if mode_flag == '1':
lines = []
lines.append(
'# .coveragerc_{0} to control coverage.py during {1} runs'.format(
env,
env.capitalize()
)
)
lines.append('[report]')
lines.append('show_missing = True')
lines.append('[run]')
lines.append('branch = True')
lines.append('data_file = {0}'.format(coverage_file_name))
start_flag = True
# Include modules
source_files = get_source_files(os.path.join(site_pkg_dir, 'putil'))
for file_name in [
item
for item in source_files
if (env != 'local') or ((env == 'local') and
(not is_submodule) and (item == '{0}.py'.format(module)))]:
if file_name.endswith('version.py'):
continue
start_flag, prefix = (
(False, 'include = ')
if start_flag else
(False, 10*' ')
)
lines.append(
'{0}{1}'.format(prefix, os.path.join(
site_pkg_dir,
'putil',
file_name
)))
# Include sub-modules
if (env != 'local') or ((env == 'local') and is_submodule):
for submodule in submodules:
for file_name in [
item
for item in get_source_files(os.path.join(
site_pkg_dir,
'putil',
submodule))]:
start_flag, prefix = (
(False, 'include = ')
if start_flag else
(False, 10*' ')
)
lines.append('{0}{1}'.format(prefix, os.path.join(
site_pkg_dir,
'putil',
submodule,
file_name
)))
# Generate XML reports for continuous integration
if env == 'ci':
lines.append('[xml]')
lines.append('output = {0}'.format(os.path.join(
os.environ['RESULTS_DIR'],
'codecoverage',
'coverage.xml'
)))
# Write file
with open(output_file_name, 'w') as fobj:
_write(fobj, '\n'.join(lines))
# Echo file
if debug:
print('File: {0}'.format(output_file_name))
with open(output_file_name, 'r') as fobj:
print(''.join(fobj.readlines()))
# Generate conftest.py files to selectively
# skip Python 2 or Python 3 files
skip_file = (
"# pylint: disable=E0012,C0103,C0111,C0411\n"
"import sys\n"
"import matplotlib\n"
"matplotlib.rcParams['backend'] = 'Agg'\n"
"collect_ignore = []\n"
"if sys.hexversion < 0x03000000:\n"
" collect_ignore.append('compat3.py')\n"
"else:\n"
" collect_ignore.append('compat2.py')\n"
)
with open(conf_file[0], 'w') as fobj:
_write(fobj, skip_file)
else:
del_files = conf_file
del_files.append(output_file_name)
del_files.append(coverage_file_name)
try:
for fname in del_files:
print('Deleting file {0}'.format(fname))
os.remove(fname)
except:
pass
if __name__ == '__main__':
main(sys.argv[1:])
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
02819e4ca0fafdcbf588137cb8f2f08f7272563b | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/beta/financials_beta/azext_financials_beta/vendored_sdks/financials/operations/_financial_company_purchase_invoice_vendor_operations.py | 4c592ab1e83677392acd8cf0a20451bc68ba9288 | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,208 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, IO, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FinancialCompanyPurchaseInvoiceVendorOperations(object):
"""FinancialCompanyPurchaseInvoiceVendorOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~financials.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_currency(
self,
company_id, # type: str
purchase_invoice_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum166"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphCurrency"
"""Get currency from financials.
Get currency from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param select: Select properties to be returned.
:type select: list[str or ~financials.models.Enum166]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphCurrency, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphCurrency
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphCurrency"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_currency.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphCurrency', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_currency.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/currency'} # type: ignore
def update_currency(
self,
company_id, # type: str
purchase_invoice_id, # type: str
id=None, # type: Optional[str]
amount_decimal_places=None, # type: Optional[str]
amount_rounding_precision=None, # type: Optional[float]
code=None, # type: Optional[str]
display_name=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
symbol=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property currency in financials.
Update the navigation property currency in financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param id: Read-only.
:type id: str
:param amount_decimal_places:
:type amount_decimal_places: str
:param amount_rounding_precision:
:type amount_rounding_precision: float
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:param symbol:
:type symbol: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphCurrency(id=id, amount_decimal_places=amount_decimal_places, amount_rounding_precision=amount_rounding_precision, code=code, display_name=display_name, last_modified_date_time=last_modified_date_time, symbol=symbol)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_currency.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphCurrency')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_currency.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/currency'} # type: ignore
def delete_currency(
self,
company_id, # type: str
purchase_invoice_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property currency for financials.
Delete navigation property currency for financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_currency.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_currency.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/currency'} # type: ignore
def get_payment_method(
self,
company_id, # type: str
purchase_invoice_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum167"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPaymentMethod"
"""Get paymentMethod from financials.
Get paymentMethod from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param select: Select properties to be returned.
:type select: list[str or ~financials.models.Enum167]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPaymentMethod, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphPaymentMethod
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPaymentMethod"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_payment_method.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPaymentMethod', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_payment_method.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentMethod'} # type: ignore
def update_payment_method(
self,
company_id, # type: str
purchase_invoice_id, # type: str
id=None, # type: Optional[str]
code=None, # type: Optional[str]
display_name=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property paymentMethod in financials.
Update the navigation property paymentMethod in financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param id: Read-only.
:type id: str
:param code:
:type code: str
:param display_name:
:type display_name: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphPaymentMethod(id=id, code=code, display_name=display_name, last_modified_date_time=last_modified_date_time)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_payment_method.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphPaymentMethod')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_payment_method.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentMethod'} # type: ignore
def delete_payment_method(
self,
company_id, # type: str
purchase_invoice_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property paymentMethod for financials.
Delete navigation property paymentMethod for financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_payment_method.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_payment_method.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentMethod'} # type: ignore
def get_payment_term(
self,
company_id, # type: str
purchase_invoice_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum168"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPaymentTerm"
"""Get paymentTerm from financials.
Get paymentTerm from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param select: Select properties to be returned.
:type select: list[str or ~financials.models.Enum168]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPaymentTerm, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphPaymentTerm
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPaymentTerm"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_payment_term.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPaymentTerm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_payment_term.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentTerm'} # type: ignore
def update_payment_term(
self,
company_id, # type: str
purchase_invoice_id, # type: str
id=None, # type: Optional[str]
calculate_discount_on_credit_memos=None, # type: Optional[bool]
code=None, # type: Optional[str]
discount_date_calculation=None, # type: Optional[str]
discount_percent=None, # type: Optional[float]
display_name=None, # type: Optional[str]
due_date_calculation=None, # type: Optional[str]
last_modified_date_time=None, # type: Optional[datetime.datetime]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property paymentTerm in financials.
Update the navigation property paymentTerm in financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param id: Read-only.
:type id: str
:param calculate_discount_on_credit_memos:
:type calculate_discount_on_credit_memos: bool
:param code:
:type code: str
:param discount_date_calculation:
:type discount_date_calculation: str
:param discount_percent:
:type discount_percent: float
:param display_name:
:type display_name: str
:param due_date_calculation:
:type due_date_calculation: str
:param last_modified_date_time:
:type last_modified_date_time: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphPaymentTerm(id=id, calculate_discount_on_credit_memos=calculate_discount_on_credit_memos, code=code, discount_date_calculation=discount_date_calculation, discount_percent=discount_percent, display_name=display_name, due_date_calculation=due_date_calculation, last_modified_date_time=last_modified_date_time)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_payment_term.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphPaymentTerm')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_payment_term.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentTerm'} # type: ignore
def delete_payment_term(
self,
company_id, # type: str
purchase_invoice_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property paymentTerm for financials.
Delete navigation property paymentTerm for financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_payment_term.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_payment_term.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/paymentTerm'} # type: ignore
def list_picture(
self,
company_id, # type: str
purchase_invoice_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum169"]]]
select=None, # type: Optional[List[Union[str, "models.Enum170"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPicture7"]
"""Get picture from financials.
Get picture from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~financials.models.Enum169]
:param select: Select properties to be returned.
:type select: list[str or ~financials.models.Enum170]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPicture7 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~financials.models.CollectionOfPicture7]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPicture7"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_picture.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPicture7', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_picture.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture'} # type: ignore
def create_picture(
self,
company_id, # type: str
purchase_invoice_id, # type: str
id=None, # type: Optional[str]
content=None, # type: Optional[bytes]
content_type_parameter=None, # type: Optional[str]
height=None, # type: Optional[int]
width=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPicture"
"""Create new navigation property to picture for financials.
Create new navigation property to picture for financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param id: Read-only.
:type id: str
:param content:
:type content: bytes
:param content_type_parameter:
:type content_type_parameter: str
:param height:
:type height: int
:param width:
:type width: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPicture, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphPicture
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPicture"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphPicture(id=id, content=content, content_type=content_type_parameter, height=height, width=width)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_picture.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphPicture')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPicture', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_picture.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture'} # type: ignore
def get_picture(
self,
company_id, # type: str
purchase_invoice_id, # type: str
picture_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum171"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPicture"
"""Get picture from financials.
Get picture from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param picture_id: key: id of picture.
:type picture_id: str
:param select: Select properties to be returned.
:type select: list[str or ~financials.models.Enum171]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPicture, or the result of cls(response)
:rtype: ~financials.models.MicrosoftGraphPicture
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPicture"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_picture.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
'picture-id': self._serialize.url("picture_id", picture_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPicture', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_picture.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture/{picture-id}'} # type: ignore
def update_picture(
self,
company_id, # type: str
purchase_invoice_id, # type: str
picture_id, # type: str
id=None, # type: Optional[str]
content=None, # type: Optional[bytes]
content_type_parameter=None, # type: Optional[str]
height=None, # type: Optional[int]
width=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property picture in financials.
Update the navigation property picture in financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param picture_id: key: id of picture.
:type picture_id: str
:param id: Read-only.
:type id: str
:param content:
:type content: bytes
:param content_type_parameter:
:type content_type_parameter: str
:param height:
:type height: int
:param width:
:type width: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.MicrosoftGraphPicture(id=id, content=content, content_type=content_type_parameter, height=height, width=width)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_picture.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
'picture-id': self._serialize.url("picture_id", picture_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'MicrosoftGraphPicture')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_picture.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture/{picture-id}'} # type: ignore
def delete_picture(
self,
company_id, # type: str
purchase_invoice_id, # type: str
picture_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property picture for financials.
Delete navigation property picture for financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param picture_id: key: id of picture.
:type picture_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_picture.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
'picture-id': self._serialize.url("picture_id", picture_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_picture.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture/{picture-id}'} # type: ignore
def get_picture_content(
self,
company_id, # type: str
purchase_invoice_id, # type: str
picture_id, # type: str
**kwargs # type: Any
):
# type: (...) -> IO
"""Get media content for the navigation property picture from financials.
Get media content for the navigation property picture from financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param picture_id: key: id of picture.
:type picture_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/octet-stream, application/json"
# Construct URL
url = self.get_picture_content.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
'picture-id': self._serialize.url("picture_id", picture_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/octet-stream, application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_picture_content.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture/{picture-id}/content'} # type: ignore
def set_picture_content(
self,
company_id, # type: str
purchase_invoice_id, # type: str
picture_id, # type: str
data, # type: IO
**kwargs # type: Any
):
# type: (...) -> None
"""Update media content for the navigation property picture in financials.
Update media content for the navigation property picture in financials.
:param company_id: key: id of company.
:type company_id: str
:param purchase_invoice_id: key: id of purchaseInvoice.
:type purchase_invoice_id: str
:param picture_id: key: id of picture.
:type picture_id: str
:param data: New media content.
:type data: IO
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/octet-stream")
accept = "application/json"
# Construct URL
url = self.set_picture_content.metadata['url'] # type: ignore
path_format_arguments = {
'company-id': self._serialize.url("company_id", company_id, 'str'),
'purchaseInvoice-id': self._serialize.url("purchase_invoice_id", purchase_invoice_id, 'str'),
'picture-id': self._serialize.url("picture_id", picture_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content_kwargs['stream_content'] = data
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
set_picture_content.metadata = {'url': '/financials/companies/{company-id}/purchaseInvoices/{purchaseInvoice-id}/vendor/picture/{picture-id}/content'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
696ddcda78f4d5e20f42851f7bec6f166409b249 | c127946e261f7a1739f998bab5126825bb3c1399 | /osext/test/__main__.py | 6fa395430c40f7e4a7a30d3674dedfcae7be7c5b | [
"MIT"
] | permissive | Tatsh/osext | a9763aa87d357f90169a2595caff01616558d066 | d375990eee1b66fd2cd7bdde0d9313e2340eee3c | refs/heads/master | 2020-03-30T13:11:08.709090 | 2015-01-09T20:32:16 | 2015-01-09T20:32:16 | 28,982,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from osext.test import argparse_actions_test, pushdtest
import unittest
for test in (pushdtest, argparse_actions_test):
suite = unittest.TestLoader().loadTestsFromModule(test)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"audvare@gmail.com"
] | audvare@gmail.com |
08a264c9708f0dee374d65a90f0a2fc828e0b770 | 8afe87c4e26e08b1dc24090a39fbedd7fa84210a | /sdnmpi/topology.py | 61ce97e6272298ba52328eaa2955b933ae0546c1 | [] | no_license | keichi/sdn-mpi-router | ca1cc128fa4fff11b61851d34fae0b21ed4b65ab | b20be81d39363cc28a9a0a5826a4450f9946d9d9 | refs/heads/master | 2023-01-10T00:29:03.959435 | 2016-02-09T01:03:07 | 2016-02-09T01:03:07 | 56,743,429 | 1 | 0 | null | 2022-12-26T20:03:41 | 2016-04-21T04:50:56 | Python | UTF-8 | Python | false | false | 7,216 | py | from ryu.base import app_manager
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.controller.event import EventRequestBase, EventReplyBase
from ryu.topology import event, switches
from ryu.controller import ofp_event
from ryu.lib.mac import haddr_to_bin, BROADCAST_STR, BROADCAST
from ryu.lib.packet import packet, ethernet, udp
from util.topology_db import TopologyDB
class CurrentTopologyRequest(EventRequestBase):
def __init__(self):
super(CurrentTopologyRequest, self).__init__()
self.dst = "TopologyManager"
class CurrentTopologyReply(EventReplyBase):
def __init__(self, dst, topology):
super(CurrentTopologyReply, self).__init__(dst)
self.topology = topology
class FindRouteRequest(EventRequestBase):
def __init__(self, src_mac, dst_mac):
super(FindRouteRequest, self).__init__()
self.dst = "TopologyManager"
self.src_mac = src_mac
self.dst_mac = dst_mac
class FindRouteReply(EventReplyBase):
def __init__(self, dst, fdb):
super(FindRouteReply, self).__init__(dst)
self.fdb = fdb
class FindAllRoutesRequest(EventRequestBase):
def __init__(self, src_mac, dst_mac):
super(FindAllRoutesRequest, self).__init__()
self.dst = "TopologyManager"
self.src_mac = src_mac
self.dst_mac = dst_mac
class FindAllRoutesReply(EventReplyBase):
def __init__(self, dst, fdb):
super(FindAllRoutesReply, self).__init__(dst)
self.fdbs = fdbs
class BroadcastRequest(EventRequestBase):
def __init__(self, data, src_dpid, src_in_port):
super(BroadcastRequest, self).__init__()
self.dst = "TopologyManager"
self.data = data
self.src_dpid = src_dpid
self.src_in_port = src_in_port
class TopologyManager(app_manager.RyuApp):
_CONTEXTS = {
"switches": switches.Switches,
}
_EVENTS = [CurrentTopologyRequest, BroadcastRequest]
def __init__(self, *args, **kwargs):
super(TopologyManager, self).__init__(*args, **kwargs)
self.topologydb = TopologyDB()
def _add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port, dl_dst=haddr_to_bin(dst))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def _install_multicast_drop(self, datapath, dst):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(dl_dst=haddr_to_bin(dst))
# Install a flow to drop all packets sent to dst
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xffff, actions=[])
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPStateChange, MAIN_DISPATCHER)
def _state_change_handler(self, ev):
datapath = ev.datapath
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
match = ofproto_parser.OFPMatch(dl_dst=BROADCAST)
actions = [ofproto_parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]
# Install a flow to send all broadcast packets to the controller
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xfffe, actions=actions)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
dst = eth.dst
# Do not handle IPv6 multicast packets
if dst.startswith("33:33"):
self._install_multicast_drop(datapath, dst)
return
# Do not handler unicast packets
elif dst != BROADCAST_STR:
return
# Do not handle announcement packets
udph = pkt.get_protocol(udp.udp)
if udph and udph.dst_port == 61000:
return
self._do_broadcast(msg.data, datapath.id, msg.in_port)
@set_ev_cls(CurrentTopologyRequest)
def _current_topology_request_handler(self, req):
reply = CurrentTopologyReply(req.src, self.topologydb)
self.reply_to_request(req, reply)
@set_ev_cls(FindRouteRequest)
def _find_route_request_handler(self, req):
fdb = self.topologydb.find_route(req.src_mac, req.dst_mac)
reply = FindRouteReply(req.src, fdb)
self.reply_to_request(req, reply)
@set_ev_cls(FindAllRoutesRequest)
def _find_all_routes_request_handler(self, req):
fdbs = self.topologydb.find_route(req.src_mac, req.dst_mac, True)
reply = FindAllRoutesRequest(req.src, fdbs)
self.reply_to_request(req, reply)
def _is_edge_port(self, port):
for dpid_to_link in self.topologydb.links.values():
for link in dpid_to_link.values():
if port == link.src or port == link.dst:
return False
return True
def _do_broadcast(self, data, dpid, in_port):
for switch in self.topologydb.switches.values():
datapath = switch.dp
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
# Only broadcast to non-reserved switch-to-host ports
ports = [p for p in switch.ports if self._is_edge_port(p)
and not p.is_reserved()]
# Exclude ingress port
if switch.dp.id == dpid:
ports = [p for p in ports if p.port_no != in_port]
actions = [ofproto_parser.OFPActionOutput(port.port_no)
for port in ports]
out = ofproto_parser.OFPPacketOut(
datapath=datapath, in_port=ofproto.OFPP_NONE,
buffer_id=ofproto.OFP_NO_BUFFER, actions=actions,
data=data)
datapath.send_msg(out)
@set_ev_cls(BroadcastRequest)
def _broadcast_request_handler(self, req):
self._do_broadcast(req.data, req.src_dpid, req.src_in_port)
self.reply_to_request(req, EventReplyBase(req.src))
@set_ev_cls(event.EventSwitchEnter)
def _event_switch_enter_handler(self, ev):
self.topologydb.add_switch(ev.switch)
@set_ev_cls(event.EventSwitchLeave)
def _event_switch_leave_handler(self, ev):
self.topologydb.delete_switch(ev.switch)
@set_ev_cls(event.EventLinkAdd)
def _event_link_add_handler(self, ev):
self.topologydb.add_link(ev.link)
@set_ev_cls(event.EventLinkDelete)
def _event_link_delete_handler(self, ev):
self.topologydb.delete_link(ev.link)
@set_ev_cls(event.EventHostAdd)
def _event_host_add_handler(self, ev):
self.topologydb.add_host(ev.host)
| [
"keichi.t@me.com"
] | keichi.t@me.com |
609168d13020a0c809176ddcb3d7c7dc19e27ab8 | 5c6a8cd15955f7ca5f822b17b56c37c36ca4144d | /networks/cnn_pathnet.py | b180e502fad140c272e3f43dd85b7daf79977d15 | [] | no_license | xavoliva/CAT | 57e48eb958d10f17071797645f4836ed33ae74a7 | 5f32ada1eed4bf4de4488840bd3ae7163e9dd22b | refs/heads/main | 2023-01-22T16:06:40.200292 | 2020-12-08T17:38:30 | 2020-12-08T17:38:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | import sys
import torch
import numpy as np
import utils
class Net(torch.nn.Module):
def __init__(self,inputsize,taskcla,nhid,args=0):
super(Net,self).__init__()
ncha,size,_=inputsize
self.taskcla=taskcla
self.ntasks = len(self.taskcla)
"""
# Config of Sec 2.5 in the paper
expand_factor = 0.231 # to match num params
self.N = 5
self.M = 20 # Large M numbers like this, given our architecture, produce no training
#"""
"""
# Config of Sec 2.4 in the paper
expand_factor = 0.325 # match num params
self.N = 3
self.M = 10
#"""
#"""
# Better config found by us
expand_factor = 0.258 # match num params
self.N = 3
self.M = 16
#"""
self.L = 5 # our architecture has 5 layers
self.bestPath = -1 * np.ones((self.ntasks,self.L,self.N),dtype=np.int) #we need to remember this between the tasks
#init modules subnets
self.conv1=torch.nn.ModuleList()
self.sizec1 = int(expand_factor*64)
self.conv2=torch.nn.ModuleList()
self.sizec2 = int(expand_factor*128)
self.conv3=torch.nn.ModuleList()
self.sizec3 = int(expand_factor*256)
self.fc1=torch.nn.ModuleList()
self.sizefc1 = int(expand_factor*nhid)
self.fc2=torch.nn.ModuleList()
self.sizefc2 = int(expand_factor*nhid)
self.last=torch.nn.ModuleList()
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
pdrop1 = args.pdrop1
pdrop2 = args.pdrop2
self.drop1=torch.nn.Dropout(pdrop1)
self.drop2=torch.nn.Dropout(pdrop2)
#declare task columns subnets
for j in range(self.M):
self.conv1.append(torch.nn.Conv2d(ncha,self.sizec1,kernel_size=size//8))
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2.append(torch.nn.Conv2d(self.sizec1,self.sizec2,kernel_size=size//10))
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3.append(torch.nn.Conv2d(self.sizec2,self.sizec3,kernel_size=2))
s=utils.compute_conv_output_size(s,2)
s=s//2
self.fc1.append(torch.nn.Linear(self.sizec3*s*s,self.sizefc1))
self.fc2.append(torch.nn.Linear(self.sizefc1,self.sizefc2))
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(self.sizefc2,n))
print('CNN PathNet')
print('pdrop1: ',pdrop1)
print('pdrop2: ',pdrop2)
return
def forward(self,x,t,P=None):
if P is None:
P = self.bestPath[t]
# P is the genotype path matrix shaped LxN(no.layers x no.permitted modules)
h=self.maxpool(self.drop1(self.relu(self.conv1[P[0,0]](x))))
for j in range(1,self.N):
h = h + self.maxpool(self.drop1(self.relu(self.conv1[P[0,j]](x)))) #sum activations
h_pre=self.maxpool(self.drop1(self.relu(self.conv2[P[1,0]](h))))
for j in range(1,self.N):
h_pre = h_pre + self.maxpool(self.drop1(self.relu(self.conv2[P[1,j]](h)))) #sum activations
h = h_pre
h_pre=self.maxpool(self.drop2(self.relu(self.conv3[P[2,0]](h))))
for j in range(1,self.N):
h_pre = h_pre + self.maxpool(self.drop2(self.relu(self.conv3[P[2,j]](h)))) #sum activations
h=h_pre.view(x.size(0),-1)
h_pre=self.drop2(self.relu(self.fc1[P[3,0]](h)))
for j in range(1,self.N):
h_pre = h_pre + self.drop2(self.relu(self.fc1[P[3,j]](h))) #sum activations
h = h_pre
h_pre=self.drop2(self.relu(self.fc2[P[4,0]](h)))
for j in range(1,self.N):
h_pre = h_pre + self.drop2(self.relu(self.fc2[P[4,j]](h))) #sum activations
h = h_pre
y=[]
for t,i in self.taskcla:
y.append(self.last[t](h))
return y
def unfreeze_path(self,t,Path):
#freeze modules not in path P and the ones in bestPath paths for the previous tasks
for i in range(self.M):
self.unfreeze_module(self.conv1,i,Path[0,:],self.bestPath[0:t,0,:])
self.unfreeze_module(self.conv2,i,Path[1,:],self.bestPath[0:t,1,:])
self.unfreeze_module(self.conv3,i,Path[2,:],self.bestPath[0:t,2,:])
self.unfreeze_module(self.fc1,i,Path[3,:],self.bestPath[0:t,3,:])
self.unfreeze_module(self.fc2,i,Path[4,:],self.bestPath[0:t,4,:])
return
def unfreeze_module(self,layer,i,Path,bestPath):
if (i in Path) and (i not in bestPath): #if the current module is in the path and not in the bestPath
utils.set_req_grad(layer[i],True)
else:
utils.set_req_grad(layer[i],False)
return
| [
"15011700342Xuan"
] | 15011700342Xuan |
f8eb3d68f2d770a036a28684ef69c41aea31c054 | cd876d32aa66112892dc9550837ad843e3e03afd | /env_carzone/Lib/site-packages/django/core/management/commands/createcachetable.py | a12ceb3830b2b8047936d89d1ddde2574dd92d98 | [
"BSD-3-Clause"
] | permissive | viplavdube/Car-Yard-App | 7665b7e6e54f3b0e4a4da563151f85d65c225cef | 65381a50f828e80f31d25d4f35e497f51c2d224d | refs/heads/master | 2023-04-19T03:49:18.991604 | 2021-04-27T17:51:10 | 2021-04-27T17:51:10 | 349,094,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,591 | py | from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS,
connections,
models,
router,
transaction,
)
from django.db.utils import DatabaseError
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="table_name",
nargs="*",
help="Optional table names. Otherwise, settings.CACHES is used to find cache tables.",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help="Nominates a database onto which the cache tables will be "
'installed. Defaults to the "default" database.',
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Does not create the table, just prints the SQL that would be run.",
)
def handle(self, *tablenames, **options):
db = options["database"]
self.verbosity = options["verbosity"]
dry_run = options["dry_run"]
if tablenames:
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(
name="cache_key", max_length=255, unique=True, primary_key=True
),
models.TextField(name="value"),
models.DateTimeField(name="expires", db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [
qn(f.name),
f.db_type(connection=connection),
"%sNULL" % ("NOT " if not f.null else ""),
]
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append(
"CREATE %sINDEX %s ON %s (%s);"
% (
unique,
qn("%s_%s" % (tablename, f.name)),
qn(tablename),
qn(f.name),
)
)
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(
" %s%s" % (line, "," if i < len(table_output) - 1 else "")
)
full_statement.append(");")
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(
using=database, savepoint=connection.features.can_rollback_ddl
):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s."
% (tablename, e)
)
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| [
"viplav45@gmail.com"
] | viplav45@gmail.com |
91aac8630957a94f0230668552c2631f5d11b48e | c999562ec8e3ee6952e82a0b20626301f00a6e02 | /manage.py | ad701bdeffeac39576301824e2b17a2145b0f201 | [] | no_license | shobhit1215/Calorie_Tracker | 5e3a4e1c326b912588d48fc7be871634be7ee242 | 175788d2febe90c473c1a5321e02c2bbd8a3e4cb | refs/heads/main | 2023-06-03T06:20:16.955410 | 2021-06-16T04:39:25 | 2021-06-16T04:39:25 | 377,042,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'calorie_meter.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"imshobhit.sb@gmail.com"
] | imshobhit.sb@gmail.com |
574e2bed1cd21db75ad93f26f6a4d3ef13c1fe29 | e04dbc32247accf073e3089ed4013427ad182c7c | /ABC170/ABC170E.py | 1c214969afcd03042c9fd6af3e054cba88882ed0 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # from math import factorial,sqrt,ceil #,gcd
# from itertools import permutations,combinations,combinations_with_replacement
# from collections import deque,Counter
# from bisect import bisect_left
# from heapq import heappush,heappop
# from numba import njit
# from functools import lru_cache # 簡単メモ化 @lru_cache(maxsize=1000)
# from fractions import gcd
# from decimal import Decimal, getcontext
# # getcontext().prec = 1000
# # eps = Decimal(10) ** (-100)
# import numpy as np # numpy.lcm()
# from scipy.sparse.csgraph import shortest_path, dijkstra, floyd_warshall, bellman_ford, johnson
# from scipy.sparse import csr_matrix
# from scipy.special import comb,perm #permはnPk
# import networkx as nx
# G = Graph()
# slist = "abcdefghijklmnopqrstuvwxyz"
MOD = 10**9 + 7
S = input()
N = int(input())
N,M = map(int,input().split())
lisA = list(map(int,input().split()))
# arrA = np.array(input().split(),dtype=np.int64)
print(ans)
# for row in board:
# print(*row,sep="") #unpackして間にスペース入れずに出力する
# print("{:.10f}".format(ans))
# print("{:0=10d}".format(ans))
| [
"twobookscom@gmail.com"
] | twobookscom@gmail.com |
87c35588bb28261faa867bd3a2eda366f4c81ac3 | 8dfa4c0626768e27fe474cfdfbdb9d7a1b14fa56 | /test.py | 2d9a37fe208e3a8a5c525017ebfec150893325ae | [] | no_license | MilanTagline2021/selenium | 7207a5c6e87b9b677120ac7289b1752df4d90d73 | 7f174592a1f99d0938b1bc52eaec4b79c2b2c5ab | refs/heads/master | 2023-08-27T19:31:04.752040 | 2021-10-12T04:16:37 | 2021-10-12T04:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,053 | py | import time
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver import ActionChains
def DeliveryOption(driver, send_to_someone):
if not send_to_someone:
time.sleep(2)
driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[6]/div/button[2]').click()
time.sleep(4)
driver.find_element_by_name('name').send_keys('Milan Sonani')
time.sleep(4)
driver.find_element_by_name('email').send_keys('milans.tagline@gmail.com')
time.sleep(4)
driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[1]/div/div/div/div[1]/div[3]/div/div/div/input').send_keys('+917600837364')
time.sleep(5)
else:
time.sleep(3)
driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[1]/div/div/div/div[1]/div[2]/div[2]/div/div[1]/input').send_keys('Ravi')
time.sleep(4)
driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[1]/div/div/div/div[1]/div[2]/div[2]/div/div[2]/input').send_keys('Milan')
time.sleep(4)
driver.find_element_by_name('email').send_keys('ravik.tagline@gmail.com')
time.sleep(4)
send_to_someone=driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[1]/div/div/div/div[1]/div[3]/div/label')
driver.execute_script("arguments[0].click();",send_to_someone)
time.sleep(5)
driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[1]/div/div/div/div[1]/div[4]/div[2]/div/div/ul/li[5]').click()
time.sleep(5)
class Giftcardsby(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome('/Users/mac/Downloads/chromedriver')
# def test_in_sign_up_in_giftcards_by(self):
# driver = self.driver
# driver.get('https://myglobal.app/')
# driver.find_element_by_xpath("//*[@id='root']/div/div[2]/header/nav/div/div[2]/button").click()
# driver.find_element_by_name('companyName').send_keys("Tagline")
# driver.find_element_by_name('email').send_keys("hemal@yopmail.com")
# driver.find_element_by_name('password').send_keys("Tagline@123")
# driver.find_element_by_name('confirmPassword').send_keys("Tagline@123")
# driver.find_element_by_name('siteName').send_keys("hemal")
# driver.find_element_by_xpath("/html/body/div[3]/div/div/div[2]/div/div[2]/div/p/span/input").click()
# driver.find_element_by_xpath("/html/body/div[3]/div/div/div[2]/div/div[2]/div/button").click()
# assert "Sign up is not possible" not in driver.page_source
# def test_contact_us(self):
# driver = self.driver
# driver.get('https://myglobal.app/contact')
# driver.find_element_by_name('name').send_keys('hemal')
# time.sleep(2)
# driver.find_element_by_name('role').send_keys("devloper")
# time.sleep(2)
# driver.find_element_by_name('email').send_keys('hemal.tagline@gmail.com')
# time.sleep(2)
# driver.find_element_by_name('message').send_keys('this is testing.')
# time.sleep(2)
# submit = driver.find_element_by_xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "btn-default", " " ))]')
# driver.execute_script("arguments[0].click();", submit)
# time.sleep(2)
# def test_on_publish_data(self):
# driver = self.driver
# driver.get('https://hemal.myglobal.app/login')
# try:
# element = WebDriverWait(driver, 10).until(
# EC.presence_of_element_located((By.NAME, "password"))
# )
# element.send_keys('Tagline@123')
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div/div/div/div/div[2]/form/div[5]/button').click()
# dashboard = WebDriverWait(driver, 10).until(
# EC.presence_of_element_located((By.XPATH, "//*[@id='root']/div/div[2]/div[1]/div/div/div[1]/button"))
# )
# dashboard.click()
# time.sleep(5)
# dashboard.back()
# time.sleep(5)
# driver.find_element_by_xpath("//*[@id='root']/div/div[2]/div[1]/div/div/div[2]/div/button[1]").click() #click on publish button
# time.sleep(5)
# driver.find_element_by_xpath("/html/body/div[7]/div/div/div[2]/div/div[1]/p[1]/a").click() #click on privacy policy link
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="richtexteditor_1674121545_0_rte-edit-view"]/p').send_keys('a') #set value to privacy policy
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[2]').click() #click on save button to publish
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[1]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath("//*[@id='root']/div/div[2]/div[1]/div/div/div[2]/div/button[1]").click() #click on publish button
# time.sleep(5)
# driver.find_element_by_xpath("/html/body/div[7]/div/div/div[2]/div/div[1]/p[2]/a").click() #click on t&c link
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="richtexteditor_1872661978_0_rte-edit-view"]/p').send_keys('s') #set value in t&c
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[2]').click() #save button
# time.sleep(5)
# """Automation testing of Contact Support
# """
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[1]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[1]').click()
# time.sleep(5)
# driver.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/div/div[1]/p[3]/a').click() #contact setting link
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div/form/div[2]/div/button').click() #click on edit button
# time.sleep(5)
# driver.find_element_by_xpath("//*[@id='root']/div/div[2]/div[1]/div/div/div/form/div[1]/div/div/div[1]/input").send_keys('9898989898')
# time.sleep(5)
# driver.find_element_by_xpath("//*[@id='root']/div/div[2]/div[1]/div/div/div/form/div[3]/div/div/input").send_keys('milans.tagline@gmail.com')
# time.sleep(5)
# driver.find_element_by_name('location').send_keys('surat')
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div/form/div[5]/div/button[1]').click()
# time.sleep(5)
# """Automation testing of base currency
# """
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[1]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[1]').click()
# time.sleep(5)
# driver.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/div/div[1]/p[4]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="left-tabs-example-tabpane-currency"]/div/div[2]/div/div[2]/div/ul/li[4]').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="left-tabs-example-tabpane-currency"]/div/div[2]/div/div[2]/div/button').click()
# time.sleep(5)
# """Automation testing of payment gateway
# """
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[1]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[1]').click()
# time.sleep(5)
# driver.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/div/div[1]/p[5]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="formGridCountry"]/option[2]').click()
# time.sleep(5)
# driver.find_element_by_xpath('/html/body/div[4]/div/div/div[2]/div/div/button[2]').click()
# time.sleep(5)
# production = Select(driver.find_element_by_name('production'))
# production.select_by_visible_text('Merit payment gateway')
# driver.find_element_by_xpath('/html/body/div[4]/div/div/div[2]/div/div/button[2]').click()
# time.sleep(5)
# """Automation testing of subscription
# """
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[1]/a').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[2]/div/button[1]').click()
# time.sleep(5)
# driver.find_element_by_xpath('/html/body/div[7]/div/div/div[2]/div/div[1]/p[6]/span').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="pricing"]/div/div[2]/div/div/a').click()
# time.sleep(20)
# driver.find_elements_by_xpath('//*[@id="cb-body"]/div/div[2]/div[2]/button').click()
# time.sleep(5)
# driver.find_elements_by_xpath('//*[@id="cb-body"]/div/div[2]/div/button').click()
# time.sleep(10)
# driver.find_element_by_xpath('//*[@id="cb-body"]/div/div[2]/div/button/span').click()
# time.sleep(5)
# except:
# "You are not able to logged in"
# def test_for_admin_page(self):
# driver = self.driver
# driver.get('https://hemal.myglobal.app/login')
# element = WebDriverWait(driver, 10).until(
# EC.presence_of_element_located((By.NAME, "password"))
# )
# element.send_keys('Tagline@123')
# time.sleep(2)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div/div/div/div/div[2]/form/div[5]/button').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[1]/div[1]/div/button').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[2]/div/div[1]/div[1]/div/select/option[3]').click()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[2]/div/div[1]/div[2]/button').click()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="inlineFormInputGroup_amount"]').send_keys('120')
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[2]/div/div[2]/div[1]/div/div[2]/div[1]/div/div[2]/span/button').click()
# time.sleep(5)
# driver.refresh()
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[4]/div/div/div[2]/div/div[1]').click()
# time.sleep(5)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[4]/div/div/div[2]/div/div[2]/ul/li[6]').click()
# time.sleep(5)
# send_to_someone=driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[6]/div/button[1]')
# DeliveryOption(driver, send_to_someone)
# def test_into_env(self):
# driver = self.driver
# driver.get('https://hemal.myglobal.app/login')
# driver.maximize_window()
# element = WebDriverWait(driver, 10).until(
# EC.presence_of_element_located((By.NAME, "password"))
# )
# element.send_keys('Tagline@123')
# time.sleep(3)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div/div/div/div/div[2]/form/div[5]/button').click()
# time.sleep(10)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div/div[1]/button').click()
# time.sleep(10)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/header/nav/div[2]/div/div[3]/div[1]/div/select/option[2]').click()
# time.sleep(7)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[1]/div/div[3]/div/div/div').click()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[4]/div/div/div[2]/div/div[2]/ul/li[4]').click()
# time.sleep(4)
# send_to_someone=driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[5]/div/div/div[2]/div/div[5]/div/button[1]')
# DeliveryOption(driver, send_to_someone)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[7]/div[2]/div/div/button').click()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[5]/div/div[4]/div[1]/div/select/option[3]').click()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[5]/div/div[4]/div[5]/button[2]').click()
# time.sleep(4)
# driver.find_element_by_name('name').send_keys('Milan')
# time.sleep(4)
# """Access using ActionChain
# """
# driver.find_element_by_id('cardNumber').click()
# ActionChains(driver).send_keys(4242424242424242).perform()
# time.sleep(4)
# driver.find_element_by_id('expiryDate').click()
# ActionChains(driver).send_keys(1025).perform()
# time.sleep(4)
# driver.find_element_by_id('cvv').click()
# ActionChains(driver).send_keys(100).perform()
# time.sleep(4)
# driver.find_element_by_xpath('//*[@id="root"]/div/div[5]/div/button').click()
# time.sleep(10)
# """Payment gateway javascript dynamically append so not get this dynamic values
# """
# # driver.find_element_by_class_name("form-field").send_keys('Checkout1!')
# driver.find_element_by_css_selector("input.form-field").send_keys('Checkout1!')#('//*[@id="password"]')
# # input_elmnt = WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'input.form-field')))
# # action = ActionChains(driver)
# # action.move_to_element(input_elmnt).send_keys('Checkout1!').perform()
# time.sleep(3)
# submit = driver.find_element_by_xpath("//*[@id='txtButton']")
# driver.execute_script("arguments[0].click();", submit)
# # driver.find_element_by_xpath('//*[@id="txtButton"]').click()
# time.sleep(10)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| [
"milans.tagline@gmail.com"
] | milans.tagline@gmail.com |
9e6391d2d0d23d34224673b134a21aec53a8b1e5 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/atbash-cipher/732cc5e5db4b4586a3bb7cffc064fcb3.py | ba9e3ac694f7d1f30189a304637ff582b21d8087 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 580 | py | from string import (maketrans, translate, ascii_letters, ascii_lowercase,
punctuation)
atbash_cipher_trans = maketrans(ascii_letters, ascii_lowercase[::-1] * 2)
def encode(msg):
# Puts message in lower case, translates it
# and removes the whitespace and punctuation.
msg = msg.translate(atbash_cipher_trans, " " + punctuation)
# Formats the string into 5-blocks and returns
return " ".join([msg[i:i+5] for i in range(0, len(msg), 5)])
def decode(msg):
return msg.translate(atbash_cipher_trans, " " + punctuation)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
3c4e22716c2bd1accf2c11a6210fd7e12e7a2bcc | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/02_Introduction_numpy/10 Numpy functions/expand_dims.py | c146781e6b7b57ef3ce33ab609f0f9f00c00b100 | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | import numpy as np
a = np.array([1,2])
print a.shape # (2,)
b = np.expand_dims(a, axis=0)
print b.shape # (1, 2)
c = np.expand_dims(a, axis=1)
print c.shape # (2, 1) | [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
b341250e3493fa69cf8be8acb62e237338fb0222 | 687928e5bc8d5cf68d543005bb24c862460edcfc | /nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwfieldtype.py | a7eeddfd3107010e03dfb92f96518c642a302ad5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | mbs91/nitro | c6c81665d6abd04de8b9f09554e5e8e541f4a2b8 | be74e1e177f5c205c16126bc9b023f2348788409 | refs/heads/master | 2021-05-29T19:24:04.520762 | 2015-06-26T02:03:09 | 2015-06-26T02:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,384 | py | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwfieldtype(base_resource) :
""" Configuration for application firewall form field type resource. """
def __init__(self) :
self._name = ""
self._regex = ""
self._priority = 0
self._comment = ""
self._builtin = []
self.___count = 0
@property
def name(self) :
"""Name for the field type.
Must begin with a letter, number, or the underscore character \(_\), and must contain only letters, numbers, and the hyphen \(-\), period \(.\) pound \(\#\), space \( \), at \(\@\), equals \(=\), colon \(:\), and underscore characters. Cannot be changed after the field type is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks \(for example, "my field type" or 'my field type'\).<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the field type.
Must begin with a letter, number, or the underscore character \(_\), and must contain only letters, numbers, and the hyphen \(-\), period \(.\) pound \(\#\), space \( \), at \(\@\), equals \(=\), colon \(:\), and underscore characters. Cannot be changed after the field type is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks \(for example, "my field type" or 'my field type'\).<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def regex(self) :
"""PCRE - format regular expression defining the characters and length allowed for this field type.<br/>Minimum length = 1.
"""
try :
return self._regex
except Exception as e:
raise e
@regex.setter
def regex(self, regex) :
"""PCRE - format regular expression defining the characters and length allowed for this field type.<br/>Minimum length = 1
"""
try :
self._regex = regex
except Exception as e:
raise e
@property
def priority(self) :
"""Positive integer specifying the priority of the field type. A lower number specified a higher priority. Field types are checked in the order of their priority numbers.<br/>Maximum length = 64000.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Positive integer specifying the priority of the field type. A lower number specified a higher priority. Field types are checked in the order of their priority numbers.<br/>Maximum length = 64000
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def comment(self) :
"""Comment describing the type of field that this field type is intended to match.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Comment describing the type of field that this field type is intended to match.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def builtin(self) :
"""Flag to determine if fieldtype is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwfieldtype_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwfieldtype
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add appfwfieldtype.
"""
try :
if type(resource) is not list :
addresource = appfwfieldtype()
addresource.name = resource.name
addresource.regex = resource.regex
addresource.priority = resource.priority
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appfwfieldtype() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].regex = resource[i].regex
addresources[i].priority = resource[i].priority
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete appfwfieldtype.
"""
try :
if type(resource) is not list :
deleteresource = appfwfieldtype()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appfwfieldtype() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appfwfieldtype() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update appfwfieldtype.
"""
try :
if type(resource) is not list :
updateresource = appfwfieldtype()
updateresource.name = resource.name
updateresource.regex = resource.regex
updateresource.priority = resource.priority
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ appfwfieldtype() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].regex = resource[i].regex
updateresources[i].priority = resource[i].priority
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the appfwfieldtype resources that are configured on netscaler.
"""
try :
if not name :
obj = appfwfieldtype()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appfwfieldtype()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appfwfieldtype() for _ in range(len(name))]
obj = [appfwfieldtype() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appfwfieldtype()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of appfwfieldtype resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwfieldtype()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the appfwfieldtype resources configured on NetScaler.
"""
try :
obj = appfwfieldtype()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of appfwfieldtype resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwfieldtype()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class appfwfieldtype_response(base_response) :
def __init__(self, length=1) :
self.appfwfieldtype = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwfieldtype = [appfwfieldtype() for _ in range(length)]
| [
"bensassimaha@gmail.com"
] | bensassimaha@gmail.com |
1457d81c66584c121fee49988a618a9ad2e4f5dd | 22749c6a569661b2637233cc0aebdc1701033b26 | /src/python/pants/engine/target.py | e52a74a90bd5e2e1ca8ff6ba66c765ecffe03ec5 | [
"Apache-2.0"
] | permissive | akk5597/pants | 2eceb226c39b8ef7f603dfa96684b7522e1a9065 | 7ad295f71d2990eebbbe9c778bbf70f7d9e66584 | refs/heads/main | 2023-08-27T02:40:54.753545 | 2021-11-10T03:42:18 | 2021-11-10T03:42:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91,783 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import collections.abc
import itertools
import logging
import os.path
from abc import ABC, ABCMeta, abstractmethod
from collections import deque
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import (
Any,
ClassVar,
Dict,
Generic,
Iterable,
Iterator,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
get_type_hints,
)
from typing_extensions import final
from pants.base.deprecated import warn_or_error
from pants.engine.addresses import Address, UnparsedAddressInputs, assert_single_address
from pants.engine.collection import Collection, DeduplicatedCollection
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import (
GlobExpansionConjunction,
GlobMatchErrorBehavior,
PathGlobs,
Paths,
Snapshot,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.global_options import FilesNotFoundBehavior
from pants.source.filespec import Filespec, matches_filespec
from pants.util.collections import ensure_list, ensure_str_list
from pants.util.dirutil import fast_relpath
from pants.util.docutil import doc_url
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized_classproperty, memoized_method, memoized_property
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------------------------
# Core Field abstractions
# -----------------------------------------------------------------------------------------------
# Type alias to express the intent that the type should be immutable and hashable. There's nothing
# to actually enforce this, outside of convention. Maybe we could develop a MyPy plugin?
ImmutableValue = Any
@frozen_after_init
class Field:
"""A Field.
The majority of fields should use field templates like `BoolField`, `StringField`, and
`StringSequenceField`. These subclasses will provide sensible type hints and validation
automatically.
If you are directly subclassing `Field`, you should likely override `compute_value()`
to perform any custom hydration and/or validation, such as converting unhashable types to
hashable types or checking for banned values. The returned value must be hashable
(and should be immutable) so that this Field may be used by the engine. This means, for
example, using tuples rather than lists and using `FrozenOrderedSet` rather than `set`.
If you plan to use the engine to fully hydrate the value, you can also inherit
`AsyncFieldMixin`, which will store an `address: Address` property on the `Field` instance.
Subclasses should also override the type hints for `value` and `raw_value` to be more precise
than `Any`. The type hint for `raw_value` is used to generate documentation, e.g. for
`./pants help $target_type`.
Set the `help` class property with a description, which will be used in `./pants help`. For the
best rendering, use soft wrapping (e.g. implicit string concatenation) within paragraphs, but
hard wrapping (`\n`) to separate distinct paragraphs and/or lists.
Example:
# NB: Really, this should subclass IntField. We only use Field as an example.
class Timeout(Field):
alias = "timeout"
value: Optional[int]
default = None
help = "A timeout field.\n\nMore information."
@classmethod
def compute_value(cls, raw_value: Optional[int], *, address: Address) -> Optional[int:
value_or_default = super().compute_value(raw_value, address=address)
if value_or_default is not None and not isinstance(value_or_default, int):
raise ValueError(
"The `timeout` field expects an integer, but was given"
f"{value_or_default} for target {address}."
)
return value_or_default
"""
# Subclasses must define these.
alias: ClassVar[str]
help: ClassVar[str]
# Subclasses must define at least one of these two.
default: ClassVar[ImmutableValue]
required: ClassVar[bool] = False
# Subclasses may define these.
removal_version: ClassVar[str | None] = None
removal_hint: ClassVar[str | None] = None
@final
def __init__(self, raw_value: Optional[Any], address: Address) -> None:
self._check_deprecated(raw_value, address)
self.value: Optional[ImmutableValue] = self.compute_value(raw_value, address)
@classmethod
def compute_value(cls, raw_value: Optional[Any], address: Address) -> ImmutableValue:
"""Convert the `raw_value` into `self.value`.
You should perform any optional validation and/or hydration here. For example, you may want
to check that an integer is > 0 or convert an `Iterable[str]` to `List[str]`.
The resulting value must be hashable (and should be immutable).
"""
if raw_value is None:
if cls.required:
raise RequiredFieldMissingException(address, cls.alias)
return cls.default
return raw_value
@classmethod
def _check_deprecated(cls, raw_value: Optional[Any], address: Address) -> None:
if not cls.removal_version or address.is_generated_target or raw_value is None:
return
if not cls.removal_hint:
raise ValueError(
f"You specified `removal_version` for {cls}, but not the class property "
"`removal_hint`."
)
warn_or_error(
cls.removal_version,
entity=f"the {repr(cls.alias)} field",
hint=(f"Using the `{cls.alias}` field in the target {address}. " f"{cls.removal_hint}"),
)
def __repr__(self) -> str:
return (
f"{self.__class__}(alias={repr(self.alias)}, value={repr(self.value)}, "
f"default={repr(self.default)})"
)
def __str__(self) -> str:
return f"{self.alias}={self.value}"
def __hash__(self) -> int:
return hash((self.__class__, self.value))
def __eq__(self, other: Union[Any, Field]) -> bool:
if not isinstance(other, Field):
return NotImplemented
return (self.__class__, self.value) == (other.__class__, other.value)
# NB: By subclassing `Field`, MyPy understands our type hints, and it means it doesn't matter which
# order you use for inheriting the field template vs. the mixin.
class AsyncFieldMixin(Field):
"""A mixin to store the field's original `Address` for use during hydration by the engine.
Typically, you should also create a dataclass representing the hydrated value and another for
the request, then a rule to go from the request to the hydrated value. The request class should
store the async field as a property.
(Why use the request class as the rule input, rather than the field itself? It's a wrapper so
that subclasses of the async field work properly, given that the engine uses exact type IDs.
This is like WrappedTarget.)
For example:
class Sources(StringSequenceField, AsyncFieldMixin):
alias = "sources"
# Often, async fields will want to define entry points like this to allow subclasses to
# change behavior.
def validate_resolved_files(self, files: Sequence[str]) -> None:
pass
@dataclass(frozen=True)
class HydrateSourcesRequest:
field: Sources
@dataclass(frozen=True)
class HydratedSources:
snapshot: Snapshot
@rule
def hydrate_sources(request: HydrateSourcesRequest) -> HydratedSources:
result = await Get(Snapshot, PathGlobs(request.field.value))
request.field.validate_resolved_files(result.files)
...
return HydratedSources(result)
Then, call sites can `await Get` if they need to hydrate the field, even if they subclassed
the original async field to have custom behavior:
sources1 = await Get(HydratedSources, HydrateSourcesRequest(my_tgt.get(Sources)))
sources2 = await Get(HydratedSources, HydrateSourcesRequest(custom_tgt.get(CustomSources)))
"""
@final # type: ignore[misc]
def __init__(self, raw_value: Optional[Any], address: Address) -> None:
super().__init__(raw_value, address)
# We must temporarily unfreeze the field, but then we refreeze to continue avoiding
# subclasses from adding arbitrary fields.
self._unfreeze_instance() # type: ignore[attr-defined]
# N.B.: We store the address here and not in the Field base class, because the memory usage
# of storing this value in every field was shown to be excessive / lead to performance
# issues.
self.address = address
self._freeze_instance() # type: ignore[attr-defined]
def __repr__(self) -> str:
return (
f"{self.__class__}(alias={repr(self.alias)}, address={self.address}, "
f"value={repr(self.value)}, default={repr(self.default)})"
)
def __hash__(self) -> int:
return hash((self.__class__, self.value, self.address))
def __eq__(self, other: Union[Any, AsyncFieldMixin]) -> bool:
if not isinstance(other, AsyncFieldMixin):
return NotImplemented
return (self.__class__, self.value, self.address) == (
other.__class__,
other.value,
other.address,
)
# -----------------------------------------------------------------------------------------------
# Core Target abstractions
# -----------------------------------------------------------------------------------------------
# NB: This TypeVar is what allows `Target.get()` to properly work with MyPy so that MyPy knows
# the precise Field returned.
_F = TypeVar("_F", bound=Field)
@frozen_after_init
class Target:
"""A Target represents an addressable set of metadata.
Set the `help` class property with a description, which will be used in `./pants help`. For the
best rendering, use soft wrapping (e.g. implicit string concatenation) within paragraphs, but
hard wrapping (`\n`) to separate distinct paragraphs and/or lists.
"""
# Subclasses must define these
alias: ClassVar[str]
core_fields: ClassVar[Tuple[Type[Field], ...]]
help: ClassVar[str]
removal_version: ClassVar[str | None] = None
removal_hint: ClassVar[str | None] = None
deprecated_alias: ClassVar[str | None] = None
deprecated_alias_removal_version: ClassVar[str | None] = None
# These get calculated in the constructor
address: Address
plugin_fields: tuple[type[Field], ...]
field_values: FrozenDict[type[Field], Field]
residence_dir: str
@final
def __init__(
self,
unhydrated_values: dict[str, Any],
address: Address,
# NB: `union_membership` is only optional to facilitate tests. In production, we should
# always provide this parameter. This should be safe to do because production code should
# rarely directly instantiate Targets and should instead use the engine to request them.
union_membership: UnionMembership | None = None,
*,
residence_dir: str | None = None,
) -> None:
"""Create a target.
:param unhydrated_values: A mapping of field aliases to their raw values. Any left off
fields will either use their default or error if required=True.
:param address: How to uniquely identify this target.
:param union_membership: Used to determine plugin fields. This must be set in production!
:param residence_dir: Where this target "lives". If unspecified, will be the `spec_path`
of the `address`, i.e. where the target was either explicitly defined or where its
target generator was explicitly defined. Target generators can, however, set this to
the directory where the generated target provides metadata for. For example, a
file-based target like `python_source` should set this to the parent directory of
its file. A directory-based target like `go_first_party_package` should set it to the
directory. A subtree-based target might set it to the root of the subtree. A file-less
target like `go_third_party_package` should keep the default of `address.spec_path`.
This field impacts how command line specs work, so that globs like `dir:` know whether
to match the target or not.
"""
if self.removal_version and not address.is_generated_target:
if not self.removal_hint:
raise ValueError(
f"You specified `removal_version` for {self.__class__}, but not "
"the class property `removal_hint`."
)
warn_or_error(
self.removal_version,
entity=f"the {repr(self.alias)} target type",
hint=(
f"Using the `{self.alias}` target type for {address}. " f"{self.removal_hint}"
),
)
self.address = address
self.plugin_fields = self._find_plugin_fields(union_membership or UnionMembership({}))
self.residence_dir = residence_dir if residence_dir is not None else address.spec_path
field_values = {}
aliases_to_field_types = {field_type.alias: field_type for field_type in self.field_types}
for alias, value in unhydrated_values.items():
if alias not in aliases_to_field_types:
raise InvalidFieldException(
f"Unrecognized field `{alias}={value}` in target {address}. Valid fields for "
f"the target type `{self.alias}`: {sorted(aliases_to_field_types.keys())}.",
)
field_type = aliases_to_field_types[alias]
field_values[field_type] = field_type(value, address)
# For undefined fields, mark the raw value as None.
for field_type in set(self.field_types) - set(field_values.keys()):
field_values[field_type] = field_type(None, address)
self.field_values = FrozenDict(
sorted(
field_values.items(),
key=lambda field_type_to_val_pair: field_type_to_val_pair[0].alias,
)
)
self.validate()
@final
@property
def field_types(self) -> Tuple[Type[Field], ...]:
return (*self.core_fields, *self.plugin_fields)
@final
@memoized_classproperty
def _plugin_field_cls(cls) -> type:
# NB: We ensure that each Target subtype has its own `PluginField` class so that
# registering a plugin field doesn't leak across target types.
@union
class PluginField:
pass
return PluginField
def __repr__(self) -> str:
fields = ", ".join(str(field) for field in self.field_values.values())
return (
f"{self.__class__}("
f"address={self.address}, "
f"alias={repr(self.alias)}, "
f"residence_dir={repr(self.residence_dir)}, "
f"{fields})"
)
def __str__(self) -> str:
fields = ", ".join(str(field) for field in self.field_values.values())
address = f"address=\"{self.address}\"{', ' if fields else ''}"
return f"{self.alias}({address}{fields})"
def __hash__(self) -> int:
return hash((self.__class__, self.address, self.residence_dir, self.field_values))
def __eq__(self, other: Union[Target, Any]) -> bool:
if not isinstance(other, Target):
return NotImplemented
return (self.__class__, self.address, self.residence_dir, self.field_values) == (
other.__class__,
other.address,
other.residence_dir,
other.field_values,
)
@final
@classmethod
def _find_plugin_fields(cls, union_membership: UnionMembership) -> tuple[type[Field], ...]:
return cast(Tuple[Type[Field], ...], tuple(union_membership.get(cls._plugin_field_cls)))
@final
@classmethod
def _find_registered_field_subclass(
cls, requested_field: Type[_F], *, registered_fields: Iterable[Type[Field]]
) -> Optional[Type[_F]]:
"""Check if the Target has registered a subclass of the requested Field.
This is necessary to allow targets to override the functionality of common fields. For
example, you could subclass `Tags` to define `CustomTags` with a different default. At the
same time, we still want to be able to call `tgt.get(Tags)`, in addition to
`tgt.get(CustomTags)`.
"""
subclass = next(
(
registered_field
for registered_field in registered_fields
if issubclass(registered_field, requested_field)
),
None,
)
return cast(Optional[Type[_F]], subclass)
@final
def _maybe_get(self, field: Type[_F]) -> Optional[_F]:
result = self.field_values.get(field, None)
if result is not None:
return cast(_F, result)
field_subclass = self._find_registered_field_subclass(
field, registered_fields=self.field_types
)
if field_subclass is not None:
return cast(_F, self.field_values[field_subclass])
return None
@final
def __getitem__(self, field: Type[_F]) -> _F:
"""Get the requested `Field` instance belonging to this target.
If the `Field` is not registered on this `Target` type, this method will raise a
`KeyError`. To avoid this, you should first call `tgt.has_field()` or `tgt.has_fields()`
to ensure that the field is registered, or, alternatively, use `Target.get()`.
See the docstring for `Target.get()` for how this method handles subclasses of the
requested Field and for tips on how to use the returned value.
"""
result = self._maybe_get(field)
if result is not None:
return result
raise KeyError(
f"The target `{self}` does not have a field `{field.__name__}`. Before calling "
f"`my_tgt[{field.__name__}]`, call `my_tgt.has_field({field.__name__})` to "
f"filter out any irrelevant Targets or call `my_tgt.get({field.__name__})` to use the "
f"default Field value."
)
@final
def get(self, field: Type[_F], *, default_raw_value: Optional[Any] = None) -> _F:
"""Get the requested `Field` instance belonging to this target.
This will return an instance of the requested field type, e.g. an instance of
`InterpreterConstraints`, `SourcesField`, `EntryPoint`, etc. Usually, you will want to
grab the `Field`'s inner value, e.g. `tgt.get(Compatibility).value`. (For async fields like
`SourcesField`, you may need to hydrate the value.).
This works with subclasses of `Field`s. For example, if you subclass `Tags`
to define a custom subclass `CustomTags`, both `tgt.get(Tags)` and
`tgt.get(CustomTags)` will return the same `CustomTags` instance.
If the `Field` is not registered on this `Target` type, this will return an instance of
the requested Field by using `default_raw_value` to create the instance. Alternatively,
first call `tgt.has_field()` or `tgt.has_fields()` to ensure that the field is registered,
or, alternatively, use indexing (e.g. `tgt[Compatibility]`) to raise a KeyError when the
field is not registered.
"""
result = self._maybe_get(field)
if result is not None:
return result
return field(default_raw_value, self.address)
@final
@classmethod
def _has_fields(
cls, fields: Iterable[Type[Field]], *, registered_fields: Iterable[Type[Field]]
) -> bool:
unrecognized_fields = [field for field in fields if field not in registered_fields]
if not unrecognized_fields:
return True
for unrecognized_field in unrecognized_fields:
maybe_subclass = cls._find_registered_field_subclass(
unrecognized_field, registered_fields=registered_fields
)
if maybe_subclass is None:
return False
return True
@final
def has_field(self, field: Type[Field]) -> bool:
"""Check that this target has registered the requested field.
This works with subclasses of `Field`s. For example, if you subclass `Tags` to define a
custom subclass `CustomTags`, both `tgt.has_field(Tags)` and
`python_tgt.has_field(CustomTags)` will return True.
"""
return self.has_fields([field])
@final
def has_fields(self, fields: Iterable[Type[Field]]) -> bool:
"""Check that this target has registered all of the requested fields.
This works with subclasses of `Field`s. For example, if you subclass `Tags` to define a
custom subclass `CustomTags`, both `tgt.has_fields([Tags])` and
`python_tgt.has_fields([CustomTags])` will return True.
"""
return self._has_fields(fields, registered_fields=self.field_types)
@final
@classmethod
def class_field_types(cls, union_membership: UnionMembership) -> Tuple[Type[Field], ...]:
"""Return all registered Fields belonging to this target type.
You can also use the instance property `tgt.field_types` to avoid having to pass the
parameter UnionMembership.
"""
return (*cls.core_fields, *cls._find_plugin_fields(union_membership))
@final
@classmethod
def class_has_field(cls, field: Type[Field], union_membership: UnionMembership) -> bool:
"""Behaves like `Target.has_field()`, but works as a classmethod rather than an instance
method."""
return cls.class_has_fields([field], union_membership)
@final
@classmethod
def class_has_fields(
cls, fields: Iterable[Type[Field]], union_membership: UnionMembership
) -> bool:
"""Behaves like `Target.has_fields()`, but works as a classmethod rather than an instance
method."""
return cls._has_fields(fields, registered_fields=cls.class_field_types(union_membership))
@final
@classmethod
def class_get_field(cls, field: Type[_F], union_membership: UnionMembership) -> Type[_F]:
"""Get the requested Field type registered with this target type.
This will error if the field is not registered, so you should call Target.class_has_field()
first.
"""
class_fields = cls.class_field_types(union_membership)
result = next(
(
registered_field
for registered_field in class_fields
if issubclass(registered_field, field)
),
None,
)
if result is None:
raise KeyError(
f"The target type `{cls.alias}` does not have a field `{field.__name__}`. Before "
f"calling `TargetType.class_get_field({field.__name__})`, call "
f"`TargetType.class_has_field({field.__name__})`."
)
return result
@final
@classmethod
def register_plugin_field(cls, field: Type[Field]) -> UnionRule:
"""Register a new field on the target type.
In the `rules()` register.py entry-point, include
`MyTarget.register_plugin_field(NewField)`. This will register `NewField` as a first-class
citizen. Plugins can use this new field like any other.
"""
return UnionRule(cls._plugin_field_cls, field)
def validate(self) -> None:
"""Validate the target, such as checking for mutually exclusive fields.
N.B.: The validation should only be of properties intrinsic to the associated files in any
context. If the validation only makes sense for certain goals acting on targets; those
validations should be done in the associated rules.
"""
@dataclass(frozen=True)
class WrappedTarget:
"""A light wrapper to encapsulate all the distinct `Target` subclasses into a single type.
This is necessary when using a single target in a rule because the engine expects exact types
and does not work with subtypes.
"""
target: Target
class Targets(Collection[Target]):
"""A heterogeneous collection of instances of Target subclasses.
While every element will be a subclass of `Target`, there may be many different `Target` types
in this collection, e.g. some `FileTarget` and some `PythonTestTarget`.
Often, you will want to filter out the relevant targets by looking at what fields they have
registered, e.g.:
valid_tgts = [tgt for tgt in tgts if tgt.has_fields([Compatibility, PythonSources])]
You should not check the Target's actual type because this breaks custom target types;
for example, prefer `tgt.has_field(PythonTestsSourcesField)` to
`isinstance(tgt, PythonTestsTarget)`.
"""
def expect_single(self) -> Target:
assert_single_address([tgt.address for tgt in self])
return self[0]
class UnexpandedTargets(Collection[Target]):
"""Like `Targets`, but will not replace target generators with their generated targets (e.g.
replace `python_sources` "BUILD targets" with generated `python_source` "file targets")."""
def expect_single(self) -> Target:
assert_single_address([tgt.address for tgt in self])
return self[0]
class CoarsenedTarget(EngineAwareParameter):
def __init__(self, members: Iterable[Target], dependencies: Iterable[CoarsenedTarget]) -> None:
"""A set of Targets which cyclicly reach one another, and are thus indivisible.
Instances of this class form a structure-shared DAG, and so a hashcode is pre-computed for the
recursive portion.
:param members: The members of the cycle.
:param dependencies: The deduped direct (not transitive) dependencies of all Targets in
the cycle. Dependencies between members of the cycle are excluded.
"""
self.members = FrozenOrderedSet(members)
self.dependencies = FrozenOrderedSet(dependencies)
self._hashcode = hash((self.members, self.dependencies))
def debug_hint(self) -> str:
return str(self)
def metadata(self) -> Dict[str, Any]:
return {"addresses": [t.address.spec for t in self.members]}
@property
def representative(self) -> Target:
"""A stable "representative" target in the cycle."""
return next(iter(self.members))
def __hash__(self) -> int:
return self._hashcode
def __eq__(self, other: Any) -> bool:
if not isinstance(other, CoarsenedTarget):
return NotImplemented
return (
self._hashcode == other._hashcode
and self.members == other.members
# TODO: Use a recursive memoized __eq__ if this ever shows up in profiles.
and self.dependencies == other.dependencies
)
def __str__(self) -> str:
if len(self.members) > 1:
others = len(self.members) - 1
return f"{self.representative.address.spec} (and {others} more)"
return self.representative.address.spec
def __repr__(self) -> str:
return f"{self.__class__.__name__}({str(self)})"
class CoarsenedTargets(Collection[CoarsenedTarget]):
"""The CoarsenedTarget roots of a transitive graph walk for some addresses.
To collect all reachable CoarsenedTarget members, use `def closure`.
"""
def closure(self) -> Iterator[CoarsenedTarget]:
"""All CoarsenedTargets reachable from these CoarsenedTarget roots."""
visited = set()
queue = deque(self)
while queue:
ct = queue.popleft()
if ct in visited:
continue
visited.add(ct)
yield ct
queue.extend(ct.dependencies)
@dataclass(frozen=True)
class TransitiveTargets:
"""A set of Target roots, and their transitive, flattened, de-duped dependencies.
If a target root is a dependency of another target root, then it will show up both in `roots`
and in `dependencies`.
"""
roots: Tuple[Target, ...]
dependencies: FrozenOrderedSet[Target]
@memoized_property
def closure(self) -> FrozenOrderedSet[Target]:
"""The roots and the dependencies combined."""
return FrozenOrderedSet([*self.roots, *self.dependencies])
@frozen_after_init
@dataclass(unsafe_hash=True)
class TransitiveTargetsRequest:
"""A request to get the transitive dependencies of the input roots.
Resolve the transitive targets with `await Get(TransitiveTargets,
TransitiveTargetsRequest([addr1, addr2])`.
"""
roots: Tuple[Address, ...]
include_special_cased_deps: bool
def __init__(
self, roots: Iterable[Address], *, include_special_cased_deps: bool = False
) -> None:
self.roots = tuple(roots)
self.include_special_cased_deps = include_special_cased_deps
@frozen_after_init
@dataclass(unsafe_hash=True)
class RegisteredTargetTypes:
aliases_to_types: FrozenDict[str, Type[Target]]
def __init__(self, aliases_to_types: Mapping[str, Type[Target]]) -> None:
self.aliases_to_types = FrozenDict(aliases_to_types)
@classmethod
def create(cls, target_types: Iterable[Type[Target]]) -> RegisteredTargetTypes:
result = {}
for target_type in sorted(target_types, key=lambda tt: tt.alias):
result[target_type.alias] = target_type
if target_type.deprecated_alias is not None:
result[target_type.deprecated_alias] = target_type
return cls(result)
@property
def aliases(self) -> FrozenOrderedSet[str]:
return FrozenOrderedSet(self.aliases_to_types.keys())
@property
def types(self) -> FrozenOrderedSet[type[Target]]:
return FrozenOrderedSet(self.aliases_to_types.values())
class AllTargets(Collection[Target]):
"""All targets in the project, but with target generators replaced by their generated targets,
unlike `AllUnexpandedTargets`."""
class AllUnexpandedTargets(Collection[Target]):
"""All targets in the project, including generated targets.
This should generally be avoided because it is relatively expensive to compute and is frequently
invalidated, but it can be necessary for things like dependency inference to build a global
mapping of imports to targets.
"""
@dataclass(frozen=True)
class AllTargetsRequest:
"""Find all targets in the project.
Use with either `AllUnexpandedTargets` or `AllTargets`.
"""
# -----------------------------------------------------------------------------------------------
# Target generation
# -----------------------------------------------------------------------------------------------
_Tgt = TypeVar("_Tgt", bound=Target)
@union
@dataclass(frozen=True)
class GenerateTargetsRequest(Generic[_Tgt]):
generate_from: ClassVar[type[_Tgt]]
generator: _Tgt
class GeneratedTargets(FrozenDict[Address, Target]):
"""A mapping of the address of generated targets to the targets themselves."""
def __init__(self, generator: Target, generated_targets: Iterable[Target]) -> None:
expected_spec_path = generator.address.spec_path
expected_tgt_name = generator.address.target_name
mapping = {}
for tgt in sorted(generated_targets, key=lambda t: t.address):
if tgt.address.spec_path != expected_spec_path:
raise InvalidGeneratedTargetException(
"All generated targets must have the same `Address.spec_path` as their "
f"target generator. Expected {generator.address.spec_path}, but got "
f"{tgt.address.spec_path} for target generated from {generator.address}: {tgt}"
"\n\nConsider using `request.generator.address.create_generated()`."
)
if tgt.address.target_name != expected_tgt_name:
raise InvalidGeneratedTargetException(
"All generated targets must have the same `Address.target_name` as their "
f"target generator. Expected {generator.address.target_name}, but got "
f"{tgt.address.target_name} for target generated from {generator.address}: "
f"{tgt}\n\n"
"Consider using `request.generator.address.create_generated()`."
)
if not tgt.address.is_generated_target:
raise InvalidGeneratedTargetException(
"All generated targets must set `Address.generator_name` or "
"`Address.relative_file_path`. Invalid for target generated from "
f"{generator.address}: {tgt}\n\n"
"Consider using `request.generator.address.create_generated()`."
)
mapping[tgt.address] = tgt
super().__init__(mapping)
class TargetTypesToGenerateTargetsRequests(FrozenDict[Type[Target], Type[GenerateTargetsRequest]]):
def is_generator(self, tgt: Target) -> bool:
"""Does this target type generate other targets?"""
return type(tgt) in self
def generate_file_level_targets(
generated_target_cls: type[Target],
generator: Target,
paths: Sequence[str],
# NB: Should only ever be set to `None` in tests.
union_membership: UnionMembership | None,
*,
add_dependencies_on_all_siblings: bool,
use_generated_address_syntax: bool = False,
use_source_field: bool = True,
overrides: dict[str, dict[str, Any]] | None = None,
) -> GeneratedTargets:
"""Generate one new target for each path, using the same fields as the generator target except
for the `sources` field only referring to the path and using a new address.
Set `add_dependencies_on_all_siblings` to True so that each file-level target depends on all
other generated targets from the target generator. This is useful if both are true:
a) file-level targets usually need their siblings to be present to work. Most target types
(Python, Java, Shell, etc) meet this, except for `files` and `resources` which have no
concept of "imports"
b) dependency inference cannot infer dependencies on sibling files.
Otherwise, set `add_dependencies_on_all_siblings` to `False` so that dependencies are
finer-grained.
`overrides` allows changing the fields for particular targets. It expects the full file path
as the key.
"""
if not generator.has_field(Dependencies) or not generator.has_field(SourcesField):
raise AssertionError(
f"The `{generator.alias}` target {generator.address.spec} does "
"not have both a `dependencies` and `sources` field, and thus cannot generate a "
f"`{generated_target_cls.alias}` target."
)
all_generated_addresses = []
for fp in paths:
relativized_fp = fast_relpath(fp, generator.address.spec_path)
all_generated_addresses.append(
generator.address.create_generated(relativized_fp)
if use_generated_address_syntax
else Address(
generator.address.spec_path,
target_name=generator.address.target_name,
relative_file_path=relativized_fp,
)
)
all_generated_address_specs = (
FrozenOrderedSet(addr.spec for addr in all_generated_addresses)
if add_dependencies_on_all_siblings
else FrozenOrderedSet()
)
used_overrides = set()
normalized_overrides = overrides or {}
def gen_tgt(full_fp: str, address: Address) -> Target:
generated_target_fields: dict[str, ImmutableValue] = {}
for field in generator.field_values.values():
value: ImmutableValue
if isinstance(field, MultipleSourcesField):
if not bool(matches_filespec(field.filespec, paths=[full_fp])):
raise AssertionError(
f"Target {generator.address.spec}'s `sources` field does not match a file "
f"{full_fp}."
)
value = address._relative_file_path or address.generated_name
if use_source_field:
generated_target_fields[SingleSourceField.alias] = value
else:
generated_target_fields[MultipleSourcesField.alias] = (value,)
elif add_dependencies_on_all_siblings and isinstance(field, Dependencies):
generated_target_fields[Dependencies.alias] = (field.value or ()) + tuple(
all_generated_address_specs - {address.spec}
)
elif isinstance(field, OverridesField):
continue
elif field.value != field.default:
generated_target_fields[field.alias] = field.value
if full_fp in normalized_overrides:
used_overrides.add(full_fp)
generated_target_fields.update(normalized_overrides[full_fp])
return generated_target_cls(
generated_target_fields,
address,
union_membership,
residence_dir=os.path.dirname(full_fp),
)
result = tuple(gen_tgt(fp, address) for fp, address in zip(paths, all_generated_addresses))
unused_overrides = set(normalized_overrides.keys()) - used_overrides
if unused_overrides:
unused_relative_paths = sorted(
fast_relpath(fp, generator.address.spec_path) for fp in unused_overrides
)
all_valid_relative_paths = sorted(
cast(str, tgt.address._relative_file_path or tgt.address.generated_name)
for tgt in result
)
raise InvalidFieldException(
f"Unused file paths in the `overrides` field for {generator.address}: "
f"{sorted(unused_relative_paths)}"
f"\n\nDid you mean one of these valid paths?\n\n"
f"{all_valid_relative_paths}\n\n"
f"Tip: if you want to override a value for all generated targets, set the ..."
)
return GeneratedTargets(generator, result)
# -----------------------------------------------------------------------------------------------
# FieldSet
# -----------------------------------------------------------------------------------------------
def _get_field_set_fields_from_target(
field_set: Type[FieldSet], target: Target
) -> Dict[str, Field]:
all_expected_fields: Dict[str, Type[Field]] = {
name: field_type
for name, field_type in get_type_hints(field_set).items()
if isinstance(field_type, type) and issubclass(field_type, Field)
}
return {
dataclass_field_name: (
target[field_cls] if field_cls in field_set.required_fields else target.get(field_cls)
)
for dataclass_field_name, field_cls in all_expected_fields.items()
}
_FS = TypeVar("_FS", bound="FieldSet")
@dataclass(frozen=True)
class FieldSet(EngineAwareParameter, metaclass=ABCMeta):
"""An ad hoc set of fields from a target which are used by rules.
Subclasses should declare all the fields they consume as dataclass attributes. They should also
indicate which of these are required, rather than optional, through the class property
`required_fields`. When a field is optional, the default constructor for the field will be used
for any targets that do not have that field registered.
Subclasses must set `@dataclass(frozen=True)` for their declared fields to be recognized.
You can optionally set implement the classmethod `opt_out` so that targets have a
mechanism to not match with the FieldSet even if they have the `required_fields` registered.
For example:
@dataclass(frozen=True)
class FortranTestFieldSet(FieldSet):
required_fields = (FortranSources,)
sources: FortranSources
fortran_version: FortranVersion
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(MaybeSkipFortranTestsField).value
This field set may then created from a `Target` through the `is_applicable()` and `create()`
class methods:
field_sets = [
FortranTestFieldSet.create(tgt) for tgt in targets
if FortranTestFieldSet.is_applicable(tgt)
]
FieldSets are consumed like any normal dataclass:
print(field_set.address)
print(field_set.sources)
"""
required_fields: ClassVar[Tuple[Type[Field], ...]]
address: Address
@classmethod
def opt_out(cls, tgt: Target) -> bool:
"""If `True`, the target will not match with the field set, even if it has the FieldSet's
`required_fields`.
Note: this method is not intended to categorically opt out a target type from a
FieldSet, i.e. to always opt out based solely on the target type. While it is possible to
do, some error messages will incorrectly suggest that that target is compatible with the
FieldSet. Instead, if you need this feature, please ask us to implement it. See
https://github.com/pantsbuild/pants/pull/12002 for discussion.
"""
return False
@final
@classmethod
def is_applicable(cls, tgt: Target) -> bool:
return tgt.has_fields(cls.required_fields) and not cls.opt_out(tgt)
@final
@classmethod
def applicable_target_types(
cls, target_types: Iterable[Type[Target]], union_membership: UnionMembership
) -> Tuple[Type[Target], ...]:
return tuple(
tgt_type
for tgt_type in target_types
if tgt_type.class_has_fields(cls.required_fields, union_membership)
)
@final
@classmethod
def create(cls: Type[_FS], tgt: Target) -> _FS:
return cls( # type: ignore[call-arg]
address=tgt.address, **_get_field_set_fields_from_target(cls, tgt)
)
def debug_hint(self) -> str:
return self.address.spec
def metadata(self) -> Dict[str, Any]:
return {"address": self.address.spec}
def __repr__(self) -> str:
# We use a short repr() because this often shows up in stack traces. We don't need any of
# the field information because we can ask a user to send us their BUILD file.
return f"{self.__class__.__name__}(address={self.address})"
@frozen_after_init
@dataclass(unsafe_hash=True)
class TargetRootsToFieldSets(Generic[_FS]):
mapping: FrozenDict[Target, Tuple[_FS, ...]]
def __init__(self, mapping: Mapping[Target, Iterable[_FS]]) -> None:
self.mapping = FrozenDict({tgt: tuple(field_sets) for tgt, field_sets in mapping.items()})
@memoized_property
def field_sets(self) -> Tuple[_FS, ...]:
return tuple(
itertools.chain.from_iterable(
field_sets_per_target for field_sets_per_target in self.mapping.values()
)
)
@memoized_property
def targets(self) -> Tuple[Target, ...]:
return tuple(self.mapping.keys())
class NoApplicableTargetsBehavior(Enum):
ignore = "ignore"
warn = "warn"
error = "error"
@frozen_after_init
@dataclass(unsafe_hash=True)
class TargetRootsToFieldSetsRequest(Generic[_FS]):
field_set_superclass: Type[_FS]
goal_description: str
no_applicable_targets_behavior: NoApplicableTargetsBehavior
expect_single_field_set: bool
def __init__(
self,
field_set_superclass: Type[_FS],
*,
goal_description: str,
no_applicable_targets_behavior: NoApplicableTargetsBehavior,
expect_single_field_set: bool = False,
) -> None:
self.field_set_superclass = field_set_superclass
self.goal_description = goal_description
self.no_applicable_targets_behavior = no_applicable_targets_behavior
self.expect_single_field_set = expect_single_field_set
@frozen_after_init
@dataclass(unsafe_hash=True)
class FieldSetsPerTarget(Generic[_FS]):
# One tuple of FieldSet instances per input target.
collection: Tuple[Tuple[_FS, ...], ...]
def __init__(self, collection: Iterable[Iterable[_FS]]):
self.collection = tuple(tuple(iterable) for iterable in collection)
@memoized_property
def field_sets(self) -> Tuple[_FS, ...]:
return tuple(itertools.chain.from_iterable(self.collection))
@frozen_after_init
@dataclass(unsafe_hash=True)
class FieldSetsPerTargetRequest(Generic[_FS]):
field_set_superclass: Type[_FS]
targets: Tuple[Target, ...]
def __init__(self, field_set_superclass: Type[_FS], targets: Iterable[Target]):
self.field_set_superclass = field_set_superclass
self.targets = tuple(targets)
# -----------------------------------------------------------------------------------------------
# Exception messages
# -----------------------------------------------------------------------------------------------
class InvalidTargetException(Exception):
"""Use when there's an issue with the target, e.g. mutually exclusive fields set.
Suggested template:
f"The `{repr(alias)}` target {address} ..."
"""
class InvalidGeneratedTargetException(InvalidTargetException):
pass
class InvalidFieldException(Exception):
"""Use when there's an issue with a particular field.
Suggested template:
f"The {repr(alias)} field in target {address} must ..., but ..."
"""
class InvalidFieldTypeException(InvalidFieldException):
"""This is used to ensure that the field's value conforms with the expected type for the field,
e.g. `a boolean` or `a string` or `an iterable of strings and integers`."""
def __init__(
self, address: Address, field_alias: str, raw_value: Optional[Any], *, expected_type: str
) -> None:
super().__init__(
f"The {repr(field_alias)} field in target {address} must be {expected_type}, but was "
f"`{repr(raw_value)}` with type `{type(raw_value).__name__}`."
)
class RequiredFieldMissingException(InvalidFieldException):
def __init__(self, address: Address, field_alias: str) -> None:
super().__init__(f"The {repr(field_alias)} field in target {address} must be defined.")
class InvalidFieldChoiceException(InvalidFieldException):
def __init__(
self,
address: Address,
field_alias: str,
raw_value: Optional[Any],
*,
valid_choices: Iterable[Any],
) -> None:
super().__init__(
f"The {repr(field_alias)} field in target {address} must be one of "
f"{sorted(valid_choices)}, but was {repr(raw_value)}."
)
class UnrecognizedTargetTypeException(Exception):
def __init__(
self,
target_type: str,
registered_target_types: RegisteredTargetTypes,
address: Address | None = None,
) -> None:
for_address = f" for address {address}" if address else ""
super().__init__(
f"Target type {repr(target_type)} is not registered{for_address}.\n\nAll valid target "
f"types: {sorted(registered_target_types.aliases)}\n\n(If {repr(target_type)} is a "
"custom target type, refer to "
"https://groups.google.com/forum/#!topic/pants-devel/WsRFODRLVZI for instructions on "
"writing a light-weight Target API binding.)"
)
# -----------------------------------------------------------------------------------------------
# Field templates
# -----------------------------------------------------------------------------------------------
T = TypeVar("T")
class ScalarField(Generic[T], Field):
"""A field with a scalar value (vs. a compound value like a sequence or dict).
Subclasses must define the class properties `expected_type` and `expected_type_description`.
They should also override the type hints for the classmethod `compute_value` so that we use the
correct type annotation in generated documentation.
class Example(ScalarField):
alias = "example"
expected_type = MyPluginObject
expected_type_description = "a `my_plugin` object"
@classmethod
def compute_value(
cls, raw_value: Optional[MyPluginObject], *, address: Address
) -> Optional[MyPluginObject]:
return super().compute_value(raw_value, address=address)
"""
expected_type: ClassVar[Type[T]]
expected_type_description: ClassVar[str]
value: Optional[T]
default: ClassVar[Optional[T]] = None
@classmethod
def compute_value(cls, raw_value: Optional[Any], address: Address) -> Optional[T]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is not None and not isinstance(value_or_default, cls.expected_type):
raise InvalidFieldTypeException(
address,
cls.alias,
raw_value,
expected_type=cls.expected_type_description,
)
return value_or_default
class BoolField(Field):
"""A field whose value is a boolean.
Subclasses must either set `default: bool` or `required = True` so that the value is always
defined.
"""
value: bool
default: ClassVar[bool]
@classmethod
def compute_value(cls, raw_value: bool, address: Address) -> bool: # type: ignore[override]
value_or_default = super().compute_value(raw_value, address)
if not isinstance(value_or_default, bool):
raise InvalidFieldTypeException(
address, cls.alias, raw_value, expected_type="a boolean"
)
return value_or_default
class TriBoolField(ScalarField[bool]):
"""A field whose value is a boolean or None, which is meant to represent a tri-state."""
expected_type = bool
expected_type_description = "a boolean or None"
@classmethod
def compute_value(cls, raw_value: Optional[bool], address: Address) -> Optional[bool]:
return super().compute_value(raw_value, address)
class IntField(ScalarField[int]):
expected_type = int
expected_type_description = "an integer"
@classmethod
def compute_value(cls, raw_value: Optional[int], address: Address) -> Optional[int]:
return super().compute_value(raw_value, address)
class FloatField(ScalarField[float]):
expected_type = float
expected_type_description = "a float"
@classmethod
def compute_value(cls, raw_value: Optional[float], address: Address) -> Optional[float]:
return super().compute_value(raw_value, address)
class StringField(ScalarField[str]):
"""A field whose value is a string.
If you expect the string to only be one of several values, set the class property
`valid_choices`.
"""
expected_type = str
expected_type_description = "a string"
valid_choices: ClassVar[Optional[Union[Type[Enum], Tuple[str, ...]]]] = None
@classmethod
def compute_value(cls, raw_value: Optional[str], address: Address) -> Optional[str]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is not None and cls.valid_choices is not None:
valid_choices = set(
cls.valid_choices
if isinstance(cls.valid_choices, tuple)
else (choice.value for choice in cls.valid_choices)
)
if value_or_default not in valid_choices:
raise InvalidFieldChoiceException(
address, cls.alias, value_or_default, valid_choices=valid_choices
)
return value_or_default
class SequenceField(Generic[T], Field):
"""A field whose value is a homogeneous sequence.
Subclasses must define the class properties `expected_element_type` and
`expected_type_description`. They should also override the type hints for the classmethod
`compute_value` so that we use the correct type annotation in generated documentation.
class Example(SequenceField):
alias = "example"
expected_element_type = MyPluginObject
expected_type_description = "an iterable of `my_plugin` objects"
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[MyPluginObject]], *, address: Address
) -> Optional[Tuple[MyPluginObject, ...]]:
return super().compute_value(raw_value, address=address)
"""
expected_element_type: ClassVar[Type[T]]
expected_type_description: ClassVar[str]
value: Optional[Tuple[T, ...]]
default: ClassVar[Optional[Tuple[T, ...]]] = None
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[Any]], address: Address
) -> Optional[Tuple[T, ...]]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is None:
return None
try:
ensure_list(value_or_default, expected_type=cls.expected_element_type)
except ValueError:
raise InvalidFieldTypeException(
address,
cls.alias,
raw_value,
expected_type=cls.expected_type_description,
)
return tuple(value_or_default)
class StringSequenceField(SequenceField[str]):
expected_element_type = str
expected_type_description = "an iterable of strings (e.g. a list of strings)"
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[str]], address: Address
) -> Optional[Tuple[str, ...]]:
return super().compute_value(raw_value, address)
class DictStringToStringField(Field):
value: Optional[FrozenDict[str, str]]
default: ClassVar[Optional[FrozenDict[str, str]]] = None
@classmethod
def compute_value(
cls, raw_value: Optional[Dict[str, str]], address: Address
) -> Optional[FrozenDict[str, str]]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is None:
return None
invalid_type_exception = InvalidFieldTypeException(
address, cls.alias, raw_value, expected_type="a dictionary of string -> string"
)
if not isinstance(value_or_default, collections.abc.Mapping):
raise invalid_type_exception
if not all(isinstance(k, str) and isinstance(v, str) for k, v in value_or_default.items()):
raise invalid_type_exception
return FrozenDict(value_or_default)
class NestedDictStringToStringField(Field):
value: Optional[FrozenDict[str, FrozenDict[str, str]]]
default: ClassVar[Optional[FrozenDict[str, FrozenDict[str, str]]]] = None
@classmethod
def compute_value(
cls, raw_value: Optional[Dict[str, Dict[str, str]]], address: Address
) -> Optional[FrozenDict[str, FrozenDict[str, str]]]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is None:
return None
invalid_type_exception = InvalidFieldTypeException(
address,
cls.alias,
raw_value,
expected_type="dict[str, dict[str, str]]",
)
if not isinstance(value_or_default, collections.abc.Mapping):
raise invalid_type_exception
for key, nested_value in value_or_default.items():
if not isinstance(key, str) or not isinstance(nested_value, collections.abc.Mapping):
raise invalid_type_exception
if not all(isinstance(k, str) and isinstance(v, str) for k, v in nested_value.items()):
raise invalid_type_exception
return FrozenDict(
{key: FrozenDict(nested_value) for key, nested_value in value_or_default.items()}
)
class DictStringToStringSequenceField(Field):
value: Optional[FrozenDict[str, Tuple[str, ...]]]
default: ClassVar[Optional[FrozenDict[str, Tuple[str, ...]]]] = None
@classmethod
def compute_value(
cls, raw_value: Optional[Dict[str, Iterable[str]]], address: Address
) -> Optional[FrozenDict[str, Tuple[str, ...]]]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is None:
return None
invalid_type_exception = InvalidFieldTypeException(
address,
cls.alias,
raw_value,
expected_type="a dictionary of string -> an iterable of strings",
)
if not isinstance(value_or_default, collections.abc.Mapping):
raise invalid_type_exception
result = {}
for k, v in value_or_default.items():
if not isinstance(k, str):
raise invalid_type_exception
try:
result[k] = tuple(ensure_str_list(v))
except ValueError:
raise invalid_type_exception
return FrozenDict(result)
# -----------------------------------------------------------------------------------------------
# Sources and codegen
# -----------------------------------------------------------------------------------------------
class SourcesField(AsyncFieldMixin, Field):
"""A field for the sources that a target owns.
When defining a new sources field, you should subclass `MultipleSourcesField` or
`SingleSourceField`, which set up the field's `alias` and data type / parsing. However, you
should use `tgt.get(SourcesField)` when you need to operate on all sources types, such as
with `HydrateSourcesRequest`, so that both subclasses work.
Subclasses may set the following class properties:
- `expected_file_extensions` -- A tuple of strings containing the expected file extensions for
source files. The default is no expected file extensions.
- `expected_num_files` -- An integer or range stating the expected total number of source
files. The default is no limit on the number of source files.
- `uses_source_roots` -- Whether the concept of "source root" pertains to the source files
referenced by this field.
"""
expected_file_extensions: ClassVar[tuple[str, ...] | None] = None
expected_num_files: ClassVar[int | range | None] = None
uses_source_roots: ClassVar[bool] = True
default: ClassVar[ImmutableValue] = None
@property
def globs(self) -> tuple[str, ...]:
"""The raw globs, relative to the BUILD file."""
# NB: We give a default implementation because it's common to use
# `tgt.get(SourcesField)`, and that must not error. But, subclasses need to
# implement this for the field to be useful (they should subclass `MultipleSourcesField`
# and `SingleSourceField`).
return ()
def validate_resolved_files(self, files: Sequence[str]) -> None:
"""Perform any additional validation on the resulting source files, e.g. ensuring that
certain banned files are not used.
To enforce that the resulting files end in certain extensions, such as `.py` or `.java`, set
the class property `expected_file_extensions`.
To enforce that there are only a certain number of resulting files, such as binary targets
checking for only 0-1 sources, set the class property `expected_num_files`.
"""
if self.expected_file_extensions is not None:
bad_files = [
fp for fp in files if not PurePath(fp).suffix in self.expected_file_extensions
]
if bad_files:
expected = (
f"one of {sorted(self.expected_file_extensions)}"
if len(self.expected_file_extensions) > 1
else repr(self.expected_file_extensions[0])
)
raise InvalidFieldException(
f"The {repr(self.alias)} field in target {self.address} can only contain "
f"files that end in {expected}, but it had these files: {sorted(bad_files)}."
"\n\nMaybe create a `resource`/`resources` or `file`/`files` target and "
"include it in the `dependencies` field?"
)
if self.expected_num_files is not None:
num_files = len(files)
is_bad_num_files = (
num_files not in self.expected_num_files
if isinstance(self.expected_num_files, range)
else num_files != self.expected_num_files
)
if is_bad_num_files:
if isinstance(self.expected_num_files, range):
if len(self.expected_num_files) == 2:
expected_str = (
" or ".join(str(n) for n in self.expected_num_files) + " files"
)
else:
expected_str = f"a number of files in the range `{self.expected_num_files}`"
else:
expected_str = pluralize(self.expected_num_files, "file")
raise InvalidFieldException(
f"The {repr(self.alias)} field in target {self.address} must have "
f"{expected_str}, but it had {pluralize(num_files, 'file')}."
)
@staticmethod
def prefix_glob_with_dirpath(dirpath: str, glob: str) -> str:
if glob.startswith("!"):
return f"!{os.path.join(dirpath, glob[1:])}"
return os.path.join(dirpath, glob)
@final
def _prefix_glob_with_address(self, glob: str) -> str:
return self.prefix_glob_with_dirpath(self.address.spec_path, glob)
@final
@classmethod
def can_generate(
cls, output_type: type[SourcesField], union_membership: UnionMembership
) -> bool:
"""Can this field be used to generate the output_type?
Generally, this method does not need to be used. Most call sites can simply use the below,
and the engine will generate the sources if possible or will return an instance of
HydratedSources with an empty snapshot if not possible:
await Get(
HydratedSources,
HydrateSourcesRequest(
sources_field,
for_sources_types=[FortranSources],
enable_codegen=True,
)
)
This method is useful when you need to filter targets before hydrating them, such as how
you may filter targets via `tgt.has_field(MyField)`.
"""
generate_request_types = union_membership.get(GenerateSourcesRequest)
return any(
issubclass(cls, generate_request_type.input)
and issubclass(generate_request_type.output, output_type)
for generate_request_type in generate_request_types
)
@final
def path_globs(self, files_not_found_behavior: FilesNotFoundBehavior) -> PathGlobs:
if not self.globs:
return PathGlobs([])
error_behavior = files_not_found_behavior.to_glob_match_error_behavior()
conjunction = (
GlobExpansionConjunction.all_match
if not self.default or (set(self.globs) != set(self.default))
else GlobExpansionConjunction.any_match
)
return PathGlobs(
(self._prefix_glob_with_address(glob) for glob in self.globs),
conjunction=conjunction,
glob_match_error_behavior=error_behavior,
description_of_origin=(
f"{self.address}'s `{self.alias}` field"
if error_behavior != GlobMatchErrorBehavior.ignore
else None
),
)
@property
def filespec(self) -> Filespec:
"""The original globs, returned in the Filespec dict format.
The globs will be relativized to the build root.
"""
includes = []
excludes = []
for glob in self.globs:
if glob.startswith("!"):
excludes.append(os.path.join(self.address.spec_path, glob[1:]))
else:
includes.append(os.path.join(self.address.spec_path, glob))
result: Filespec = {"includes": includes}
if excludes:
result["excludes"] = excludes
return result
class MultipleSourcesField(SourcesField, StringSequenceField):
"""The `sources: list[str]` field.
See the docstring for `SourcesField` for some class properties you can set, such as
`expected_file_extensions`.
When you need to get the sources for all targets, use `tgt.get(SourcesField)` rather than
`tgt.get(MultipleSourcesField)`.
"""
alias = "sources"
help = (
"A list of files and globs that belong to this target.\n\n"
"Paths are relative to the BUILD file's directory. You can ignore files/globs by "
"prefixing them with `!`.\n\n"
"Example: `sources=['example.ext', 'test_*.ext', '!test_ignore.ext']`."
)
@property
def globs(self) -> tuple[str, ...]:
return self.value or ()
class SingleSourceField(SourcesField, StringField):
"""The `source: str` field.
See the docstring for `SourcesField` for some class properties you can set, such as
`expected_file_extensions`.
When you need to get the sources for all targets, use `tgt.get(SourcesField)` rather than
`tgt.get(SingleSourceField)`.
"""
alias = "source"
help = (
"A single file that belongs to this target.\n\n"
"Path is relative to the BUILD file's directory, e.g. `source='example.ext'`."
)
required = True
expected_num_files: ClassVar[int | range] = 1 # Can set to `range(0, 2)` for 0-1 files.
@property
def globs(self) -> tuple[str, ...]:
# Subclasses might override `required = False`, so `self.value` could be `None`.
if self.value is None:
return ()
return (self.value,)
@frozen_after_init
@dataclass(unsafe_hash=True)
class HydrateSourcesRequest(EngineAwareParameter):
field: SourcesField
for_sources_types: tuple[type[SourcesField], ...]
enable_codegen: bool
def __init__(
self,
field: SourcesField,
*,
for_sources_types: Iterable[type[SourcesField]] = (SourcesField,),
enable_codegen: bool = False,
) -> None:
"""Convert raw sources globs into an instance of HydratedSources.
If you only want to handle certain SourcesFields, such as only PythonSources, set
`for_sources_types`. Any invalid sources will return a `HydratedSources` instance with an
empty snapshot and `sources_type = None`.
If `enable_codegen` is set to `True`, any codegen sources will try to be converted to one
of the `for_sources_types`.
"""
self.field = field
self.for_sources_types = tuple(for_sources_types)
self.enable_codegen = enable_codegen
self.__post_init__()
def __post_init__(self) -> None:
if self.enable_codegen and self.for_sources_types == (SourcesField,):
raise ValueError(
"When setting `enable_codegen=True` on `HydrateSourcesRequest`, you must also "
"explicitly set `for_source_types`. Why? `for_source_types` is used to "
"determine which language(s) to try to generate. For example, "
"`for_source_types=(PythonSources,)` will hydrate `PythonSources` like normal, "
"and, if it encounters codegen sources that can be converted into Python, it will "
"generate Python files."
)
def debug_hint(self) -> str:
return self.field.address.spec
@dataclass(frozen=True)
class HydratedSources:
"""The result of hydrating a SourcesField.
The `sources_type` will indicate which of the `HydrateSourcesRequest.for_sources_type` the
result corresponds to, e.g. if the result comes from `FilesSources` vs. `PythonSources`. If this
value is None, then the input `SourcesField` was not one of the expected types; or, when codegen
was enabled in the request, there was no valid code generator to generate the requested language
from the original input. This property allows for switching on the result, e.g. handling
hydrated files() sources differently than hydrated Python sources.
"""
snapshot: Snapshot
filespec: Filespec
sources_type: type[SourcesField] | None
@union
@dataclass(frozen=True)
class GenerateSourcesRequest:
"""A request to go from protocol sources -> a particular language.
This should be subclassed for each distinct codegen implementation. The subclasses must define
the class properties `input` and `output`. The subclass must also be registered via
`UnionRule(GenerateSourcesRequest, GenerateFortranFromAvroRequest)`, for example.
The rule to actually implement the codegen should take the subclass as input, and it must
return `GeneratedSources`.
For example:
class GenerateFortranFromAvroRequest:
input = AvroSources
output = FortranSources
@rule
def generate_fortran_from_avro(request: GenerateFortranFromAvroRequest) -> GeneratedSources:
...
def rules():
return [
generate_fortran_from_avro,
UnionRule(GenerateSourcesRequest, GenerateFortranFromAvroRequest),
]
"""
protocol_sources: Snapshot
protocol_target: Target
input: ClassVar[type[SourcesField]]
output: ClassVar[type[SourcesField]]
@dataclass(frozen=True)
class GeneratedSources:
snapshot: Snapshot
class SourcesPaths(Paths):
"""The resolved file names of the `source`/`sources` field.
This does not consider codegen, and only captures the files from the field.
"""
@dataclass(frozen=True)
class SourcesPathsRequest(EngineAwareParameter):
"""A request to resolve the file names of the `source`/`sources` field.
Use via `Get(SourcesPaths, SourcesPathRequest(tgt.get(SourcesField))`.
This is faster than `Get(HydratedSources, HydrateSourcesRequest)` because it does not snapshot
the files and it only resolves the file names.
This does not consider codegen, and only captures the files from the field. Use
`HydrateSourcesRequest` to use codegen.
"""
field: SourcesField
def debug_hint(self) -> str:
return self.field.address.spec
class SecondaryOwnerMixin(ABC):
"""Add to a Field for the target to work with file arguments and `--changed-since`, without it
needing a `SourcesField`.
Why use this? In a dependency inference world, multiple targets including the same file in the
`sources` field causes issues due to ambiguity over which target to use. So, only one target
should have "primary ownership" of the file. However, you may still want other targets to be
used when that file is included in file arguments. For example, a `python_source` target
being the primary owner of the `.py` file, but a `pex_binary` still working with file
arguments for that file. Secondary ownership means that the target won't be used for things like
dependency inference and hydrating sources, but file arguments will still work.
There should be a primary owner of the file(s), e.g. the `python_source` in the above example.
Typically, you will want to add a dependency injection rule to infer a dep on that primary
owner.
All associated files must live in the BUILD target's directory or a subdirectory to work
properly, like the `sources` field.
"""
@property
@abstractmethod
def filespec(self) -> Filespec:
"""A dictionary in the form {'globs': ['full/path/to/f.ext']} representing the field's
associated files.
Typically, users should use a file name/glob relative to the BUILD file, like the `sources`
field. Then, you can use `os.path.join(self.address.spec_path, self.value)` to relative to
the build root.
"""
def targets_with_sources_types(
sources_types: Iterable[type[SourcesField]],
targets: Iterable[Target],
union_membership: UnionMembership,
) -> tuple[Target, ...]:
"""Return all targets either with the specified sources subclass(es) or which can generate those
sources."""
return tuple(
tgt
for tgt in targets
if any(
tgt.has_field(sources_type)
or tgt.get(SourcesField).can_generate(sources_type, union_membership)
for sources_type in sources_types
)
)
# -----------------------------------------------------------------------------------------------
# `Dependencies` field
# -----------------------------------------------------------------------------------------------
class Dependencies(StringSequenceField, AsyncFieldMixin):
"""The dependencies field.
To resolve all dependencies—including the results of dependency injection and inference—use
either `await Get(Addresses, DependenciesRequest(tgt[Dependencies])` or `await Get(Targets,
DependenciesRequest(tgt[Dependencies])`.
"""
alias = "dependencies"
help = (
"Addresses to other targets that this target depends on, e.g. ['helloworld/subdir:lib']."
"\n\nAlternatively, you may include file names. Pants will find which target owns that "
"file, and create a new target from that which only includes the file in its `sources` "
"field. For files relative to the current BUILD file, prefix with `./`; otherwise, put the "
"full path, e.g. ['./sibling.txt', 'resources/demo.json'].\n\nYou may exclude dependencies "
"by prefixing with `!`, e.g. `['!helloworld/subdir:lib', '!./sibling.txt']`. Ignores are "
"intended for false positives with dependency inference; otherwise, simply leave off the "
"dependency from the BUILD file."
)
supports_transitive_excludes = False
@memoized_property
def unevaluated_transitive_excludes(self) -> UnparsedAddressInputs:
if not self.supports_transitive_excludes or not self.value:
return UnparsedAddressInputs((), owning_address=self.address)
return UnparsedAddressInputs(
(v[2:] for v in self.value if v.startswith("!!")),
owning_address=self.address,
)
@dataclass(frozen=True)
class DependenciesRequest(EngineAwareParameter):
field: Dependencies
include_special_cased_deps: bool = False
def debug_hint(self) -> str:
return self.field.address.spec
@dataclass(frozen=True)
class ExplicitlyProvidedDependencies:
"""The literal addresses from a BUILD file `dependencies` field.
Almost always, you should use `await Get(Addresses, DependenciesRequest)` instead, which will
consider dependency injection and inference and apply ignores. However, this type can be
useful particularly within inference/injection rules to see if a user already explicitly
provided a dependency.
Resolve using `await Get(ExplicitlyProvidedDependencies, DependenciesRequest)`.
Note that the `includes` are not filtered based on the `ignores`: this type preserves exactly
what was in the BUILD file.
"""
address: Address
includes: FrozenOrderedSet[Address]
ignores: FrozenOrderedSet[Address]
@memoized_method
def any_are_covered_by_includes(self, addresses: Iterable[Address]) -> bool:
"""Return True if every address is in the explicitly provided includes.
Note that if the input addresses are generated targets, they will still be marked as covered
if their original target generator is in the explicitly provided includes.
"""
return any(
addr in self.includes or addr.maybe_convert_to_target_generator() in self.includes
for addr in addresses
)
@memoized_method
def remaining_after_disambiguation(
self, addresses: Iterable[Address], owners_must_be_ancestors: bool
) -> frozenset[Address]:
"""All addresses that remain after ineligible candidates are discarded.
Candidates are removed if they appear as ignores (`!` and `!!)` in the `dependencies`
field. Note that if the input addresses are generated targets, they will still be marked as
covered if their original target generator is in the explicitly provided ignores.
Candidates are also removed if `owners_must_be_ancestors` is True and the targets are not
ancestors, e.g. `root2:tgt` is not a valid candidate for something defined in `root1`.
"""
original_addr_path = PurePath(self.address.spec_path)
def is_valid(addr: Address) -> bool:
is_ignored = (
addr in self.ignores or addr.maybe_convert_to_target_generator() in self.ignores
)
if owners_must_be_ancestors is False:
return not is_ignored
# NB: `PurePath.is_relative_to()` was not added until Python 3.9. This emulates it.
try:
original_addr_path.relative_to(addr.spec_path)
return not is_ignored
except ValueError:
return False
return frozenset(filter(is_valid, addresses))
def maybe_warn_of_ambiguous_dependency_inference(
self,
ambiguous_addresses: Iterable[Address],
original_address: Address,
*,
context: str,
import_reference: str,
owners_must_be_ancestors: bool = False,
) -> None:
"""If the module is ambiguous and the user did not disambiguate, warn that dependency
inference will not be used.
Disambiguation usually happens by using ignores in the `dependencies` field with `!` and
`!!`. If `owners_must_be_ancestors` is True, any addresses which are not ancestors of the
target in question will also be ignored.
"""
if not ambiguous_addresses or self.any_are_covered_by_includes(ambiguous_addresses):
return
remaining = self.remaining_after_disambiguation(
ambiguous_addresses, owners_must_be_ancestors=owners_must_be_ancestors
)
if len(remaining) <= 1:
return
logger.warning(
f"{context}, but Pants cannot safely infer a dependency because more than one target "
f"owns this {import_reference}, so it is ambiguous which to use: "
f"{sorted(addr.spec for addr in remaining)}."
f"\n\nPlease explicitly include the dependency you want in the `dependencies` "
f"field of {original_address}, or ignore the ones you do not want by prefixing "
f"with `!` or `!!` so that one or no targets are left."
f"\n\nAlternatively, you can remove the ambiguity by deleting/changing some of the "
f"targets so that only 1 target owns this {import_reference}. Refer to "
f"{doc_url('troubleshooting#import-errors-and-missing-dependencies')}."
)
def disambiguated(
self, ambiguous_addresses: Iterable[Address], owners_must_be_ancestors: bool = False
) -> Address | None:
"""If exactly one of the input addresses remains after disambiguation, return it.
Disambiguation usually happens by using ignores in the `dependencies` field with `!` and
`!!`. If `owners_must_be_ancestors` is True, any addresses which are not ancestors of the
target in question will also be ignored.
"""
if not ambiguous_addresses or self.any_are_covered_by_includes(ambiguous_addresses):
return None
remaining_after_ignores = self.remaining_after_disambiguation(
ambiguous_addresses, owners_must_be_ancestors=owners_must_be_ancestors
)
return list(remaining_after_ignores)[0] if len(remaining_after_ignores) == 1 else None
@union
@dataclass(frozen=True)
class InjectDependenciesRequest(EngineAwareParameter, ABC):
"""A request to inject dependencies, in addition to those explicitly provided.
To set up a new injection, subclass this class. Set the class property `inject_for` to the
type of `Dependencies` field you want to inject for, such as `FortranDependencies`. This will
cause the class, and any subclass, to have the injection. Register this subclass with
`UnionRule(InjectDependenciesRequest, InjectFortranDependencies)`, for example.
Then, create a rule that takes the subclass as a parameter and returns `InjectedDependencies`.
For example:
class FortranDependencies(Dependencies):
pass
class InjectFortranDependencies(InjectDependenciesRequest):
inject_for = FortranDependencies
@rule
async def inject_fortran_dependencies(
request: InjectFortranDependencies
) -> InjectedDependencies:
addresses = await Get(
Addresses, UnparsedAddressInputs(["//:injected"], owning_address=None)
)
return InjectedDependencies(addresses)
def rules():
return [
*collect_rules(),
UnionRule(InjectDependenciesRequest, InjectFortranDependencies),
]
"""
dependencies_field: Dependencies
inject_for: ClassVar[Type[Dependencies]]
def debug_hint(self) -> str:
return self.dependencies_field.address.spec
class InjectedDependencies(DeduplicatedCollection[Address]):
sort_input = True
@union
@dataclass(frozen=True)
class InferDependenciesRequest(EngineAwareParameter):
"""A request to infer dependencies by analyzing source files.
To set up a new inference implementation, subclass this class. Set the class property
`infer_from` to the type of `SourcesField` you are able to infer from, such as
`FortranSources`. This will cause the class, and any subclass, to use your inference
implementation. Note that there cannot be more than one implementation for a particular
`SourcesField` class. Register this subclass with
`UnionRule(InferDependenciesRequest, InferFortranDependencies)`, for example.
Then, create a rule that takes the subclass as a parameter and returns `InferredDependencies`.
For example:
class InferFortranDependencies(InferDependenciesRequest):
from_sources = FortranSources
@rule
def infer_fortran_dependencies(request: InferFortranDependencies) -> InferredDependencies:
hydrated_sources = await Get(HydratedSources, HydrateSources(request.sources_field))
...
return InferredDependencies(...)
def rules():
return [
infer_fortran_dependencies,
UnionRule(InferDependenciesRequest, InferFortranDependencies),
]
"""
sources_field: SourcesField
infer_from: ClassVar[type[SourcesField]]
def debug_hint(self) -> str:
return self.sources_field.address.spec
@frozen_after_init
@dataclass(unsafe_hash=True)
class InferredDependencies:
dependencies: FrozenOrderedSet[Address]
def __init__(self, dependencies: Iterable[Address]) -> None:
"""The result of inferring dependencies."""
self.dependencies = FrozenOrderedSet(sorted(dependencies))
def __bool__(self) -> bool:
return bool(self.dependencies)
def __iter__(self) -> Iterator[Address]:
return iter(self.dependencies)
class SpecialCasedDependencies(StringSequenceField, AsyncFieldMixin):
"""Subclass this for fields that act similarly to the `dependencies` field, but are handled
differently than normal dependencies.
For example, you might have a field for package/binary dependencies, which you will call
the equivalent of `./pants package` on. While you could put these in the normal
`dependencies` field, it is often clearer to the user to call out this magic through a
dedicated field.
This type will ensure that the dependencies show up in project introspection,
like `dependencies` and `dependees`, but not show up when you call `Get(TransitiveTargets,
TransitiveTargetsRequest)` and `Get(Addresses, DependenciesRequest)`.
To hydrate this field's dependencies, use `await Get(Addresses, UnparsedAddressInputs,
tgt.get(MyField).to_unparsed_address_inputs()`.
"""
def to_unparsed_address_inputs(self) -> UnparsedAddressInputs:
return UnparsedAddressInputs(self.value or (), owning_address=self.address)
# -----------------------------------------------------------------------------------------------
# Other common Fields used across most targets
# -----------------------------------------------------------------------------------------------
class Tags(StringSequenceField):
alias = "tags"
help = (
"Arbitrary strings to describe a target.\n\nFor example, you may tag some test targets "
"with 'integration_test' so that you could run `./pants --tag='integration_test' test ::` "
"to only run on targets with that tag."
)
class DescriptionField(StringField):
alias = "description"
help = (
"A human-readable description of the target.\n\nUse `./pants list --documented ::` to see "
"all targets with descriptions."
)
COMMON_TARGET_FIELDS = (Tags, DescriptionField)
class OverridesField(AsyncFieldMixin, Field):
"""A mapping of keys (e.g. target names, source globs) to field names with their overridden
values.
This is meant for target generators to reduce boilerplate. It's up to the corresponding target
generator rule to determine how to implement the field, such as how users specify the key. For
example, `{"f.ext": {"tags": ['my_tag']}}`.
"""
alias = "overrides"
value: dict[tuple[str, ...], dict[str, Any]] | None
default: ClassVar[None] = None # A default does not make sense for this field.
@classmethod
def compute_value(
cls,
raw_value: Optional[Dict[Union[str, Tuple[str, ...]], Dict[str, Any]]],
address: Address,
) -> Optional[Dict[Tuple[str, ...], Dict[str, Any]]]:
value_or_default = super().compute_value(raw_value, address)
if value_or_default is None:
return None
invalid_type_exception = InvalidFieldTypeException(
address,
cls.alias,
raw_value,
expected_type="dict[str | tuple[str, ...], dict[str, Any]]",
)
if not isinstance(value_or_default, collections.abc.Mapping):
raise invalid_type_exception
result: dict[tuple[str, ...], dict[str, Any]] = {}
for outer_key, nested_value in value_or_default.items():
if isinstance(outer_key, str):
outer_key = (outer_key,)
if not isinstance(outer_key, collections.abc.Sequence) or not all(
isinstance(elem, str) for elem in outer_key
):
raise invalid_type_exception
if not isinstance(nested_value, collections.abc.Mapping):
raise invalid_type_exception
if not all(isinstance(inner_key, str) for inner_key in nested_value):
raise invalid_type_exception
result[tuple(outer_key)] = dict(nested_value)
return result
def __hash__(self) -> int:
# The value might have unhashable elements like `list`, so we stringify it.
return hash((self.__class__, repr(self.value)))
def _relativize_globs(self, globs: tuple[str, ...]) -> tuple[str, ...]:
return tuple(
f"!{os.path.join(self.address.spec_path, glob[1:])}"
if glob.startswith("!")
else os.path.join(self.address.spec_path, glob)
for glob in globs
)
def to_path_globs(
self, files_not_found_behavior: FilesNotFoundBehavior
) -> tuple[PathGlobs, ...]:
"""Create a `PathGlobs` for each key.
This should only be used if the keys are file globs.
"""
if not self.value:
return ()
return tuple(
PathGlobs(
self._relativize_globs(globs),
glob_match_error_behavior=files_not_found_behavior.to_glob_match_error_behavior(),
description_of_origin=f"the `overrides` field for {self.address}",
)
for globs in self.value
)
def flatten_paths(
self, paths_to_overrides: Mapping[Paths, dict[str, Any]]
) -> dict[str, dict[str, Any]]:
"""Combine all overrides for each file into a single dictionary."""
result: dict[str, dict[str, Any]] = {}
for paths, override in paths_to_overrides.items():
for path in paths.files:
for field, value in override.items():
if path not in result:
result[path] = {field: value}
continue
if field not in result[path]:
result[path][field] = value
continue
relpath = fast_relpath(path, self.address.spec_path)
raise InvalidFieldException(
f"Conflicting overrides in the `{self.alias}` field of "
f"`{self.address}` for the relative path `{relpath}` for "
f"the field `{field}`. You cannot specify the same field name "
"multiple times for the same path.\n\n"
f"(One override sets the field to `{repr(result[path][field])}` "
f"but another sets to `{repr(value)}`.)"
)
return result
def generate_file_based_overrides_field_help_message(
generated_target_name: str, example: str
) -> str:
return (
f"Override the field values for generated `{generated_target_name}` targets.\n\n"
"Expects a dictionary of relative file paths and globs to a dictionary for the "
"overrides. You may either use a string for a single path / glob, "
"or a string tuple for multiple paths / globs. Each override is a dictionary of "
"field names to the overridden value.\n\n"
f"For example:\n\n```\n{example}\n```\n\n"
"File paths and globs are relative to the BUILD file's directory. Every overridden file is "
"validated to belong to this target's `sources` field.\n\n"
f"If you'd like to override a field's value for every `{generated_target_name}` target "
"generated by this target, change the field directly on this target rather than using the "
"`overrides` field.\n\n"
"You can specify the same file name in multiple keys, so long as you don't override the "
"same field more than one time for the file."
)
| [
"noreply@github.com"
] | akk5597.noreply@github.com |
cb3f6f45cc319404055a4824bd21acb67168c260 | 6def5721d5c7c6a9cde32a19c58ec129d2c7c6b2 | /Week_3/socket.py | ef9afd70ca80bee32b0565b18c6528d74234e23e | [
"MIT"
] | permissive | Sid2697/Python-to-access-web-data | 9d054a7e5c6d2300ec3652f367f85e355a4702c5 | 74b61339cc9060263e3f2d22e9ed90281276ab50 | refs/heads/master | 2021-05-01T17:42:27.894592 | 2018-02-12T11:36:52 | 2018-02-12T11:36:52 | 120,995,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True:
data = mysock.recv(512)
if (len(data) < 1):
break
print(data.decode(),end='')
mysock.close()
| [
"noreply@github.com"
] | Sid2697.noreply@github.com |
d6e26c537e5b3b7cc83493da35a8217f0921a9d8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02995/s716079488.py | cb1f2ffe4fe50d37290c4cfff656f2583b7f8e62 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import math
a, b, c, d = map(int, input().split())
m = b//c + (-a//c)
n = b//d + (-a//d)
g = c * d // math.gcd(c, d)
p = b//g + (-a//g)
print(b-a+1-(m+1+n+1-(p+1))) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d536705ebdb63f9f5fae564e726d40728978b813 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/tests/test_utils/test_assigner.py | 5520ef27b71306926c228341d365453d24b4dc28 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 23,656 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests the Assigner objects.
CommandLine:
pytest tests/test_utils/test_assigner.py
xdoctest tests/test_utils/test_assigner.py zero
"""
import pytest
import torch
from mmdet.core.bbox.assigners import (ApproxMaxIoUAssigner,
CenterRegionAssigner, HungarianAssigner,
MaskHungarianAssigner, MaxIoUAssigner,
PointAssigner, SimOTAAssigner,
TaskAlignedAssigner, UniformAssigner)
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(
bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where a network might predict no boxes and no gt."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt():
"""Test corner case where an image might predict no points and no gt."""
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_boxes():
"""Test corner case where an network might predict no boxes."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where an network might predict no boxes and no gt."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_random_assign_result():
"""Test random instantiation of assign result to catch corner cases."""
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
def test_center_region_assigner():
self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9,
9]])
gt_bboxes = torch.FloatTensor([
[0, 0, 11, 11], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
[4.5, 4.5, 5.5, 5.5], # match bboxes[0] but area is too small
[0, 0, 10, 10], # match bboxes[1] and has a smaller area than gt[0]
])
gt_labels = torch.LongTensor([2, 3, 4, 5])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 3
assert len(assign_result.labels) == 3
expected_gt_inds = torch.LongTensor([4, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
# [8, 8, 9, 9] in the shadowed region of [0, 0, 11, 11] (label: 2)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 2]]))
# [8, 8, 9, 9] in the shadowed region of [0, 0, 10, 10] (label: 5)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 5]]))
# [0, 0, 10, 10] is already assigned to [4.5, 4.5, 5.5, 5.5].
# Therefore, [0, 0, 11, 11] (label: 2) is shadowed
assert torch.any(shadowed_labels == torch.LongTensor([[0, 2]]))
def test_center_region_assigner_with_ignore():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_bboxes_ignore = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
assert len(assign_result.labels) == 2
expected_gt_inds = torch.LongTensor([-1, 2])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_center_region_assigner_with_empty_bboxes():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.empty((0, 4)).float()
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert assign_result.gt_inds is None or assign_result.gt_inds.numel() == 0
assert assign_result.labels is None or assign_result.labels.numel() == 0
def test_center_region_assigner_with_empty_gts():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
expected_gt_inds = torch.LongTensor([0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_hungarian_match_assigner():
self = HungarianAssigner()
assert self.iou_cost.iou_mode == 'giou'
# test no gt bboxes
bbox_pred = torch.rand((10, 4))
cls_pred = torch.rand((10, 81))
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
img_meta = dict(img_shape=(10, 8, 3))
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds == 0)
assert torch.all(assign_result.labels == -1)
# test with gt bboxes
gt_bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_labels = torch.LongTensor([1, 20])
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test iou mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='iou', weight=1.0))
assert self.iou_cost.iou_mode == 'iou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test focal loss mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0),
cls_cost=dict(type='FocalLossCost', weight=1.))
assert self.iou_cost.iou_mode == 'giou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
def test_uniform_assigner():
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(pred_bbox, anchor, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.empty((0, 4))
anchor = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(pred_bbox, anchor, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
def test_sim_ota_assigner():
self = SimOTAAssigner(
center_radius=2.5, candidate_topk=1, iou_weight=3.0, cls_weight=1.0)
pred_scores = torch.FloatTensor([[0.2], [0.8]])
priors = torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]])
decoded_bboxes = torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]])
gt_bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_labels = torch.LongTensor([2])
assign_result = self.assign(pred_scores, priors, decoded_bboxes, gt_bboxes,
gt_labels)
expected_gt_inds = torch.LongTensor([0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_task_aligned_assigner():
with pytest.raises(AssertionError):
TaskAlignedAssigner(topk=0)
self = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
assign_result = self.assign(
pred_score,
pred_bbox,
anchor,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2)
assign_result = self.assign(
pred_score, pred_bbox, anchor, gt_bboxes=gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_mask_hungarian_match_assigner():
# test no gt masks
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=1.0),
mask_cost=dict(type='FocalLossCost', weight=20.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
cls_pred = torch.rand((10, 133))
mask_pred = torch.rand((10, 50, 50))
gt_labels = torch.empty((0, )).long()
gt_masks = torch.empty((0, 50, 50)).float()
img_meta = None
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds == 0)
assert torch.all(assign_result.labels == -1)
# test with gt masks of naive_dice is True
gt_labels = torch.LongTensor([10, 100])
gt_masks = torch.zeros((2, 50, 50)).long()
gt_masks[0, :25] = 1
gt_masks[0, 25:] = 1
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with cls mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=1.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask focal mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask dice mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask dice mode that naive_dice is False
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(
type='DiceCost',
weight=1.0,
pred_act=True,
eps=1.0,
naive_dice=False))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask bce mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(
type='CrossEntropyLossCost', weight=1.0, use_sigmoid=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with ce mode of CrossEntropyLossCost which is not supported yet
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(
type='CrossEntropyLossCost', weight=1.0, use_sigmoid=False),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
with pytest.raises(AssertionError):
self = MaskHungarianAssigner(**assigner_cfg)
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
a8c3c9123e66db1f7cb29f5b7e0b69ccab0c0c28 | b0210f0320e47e3384c43c56e686844081374c6d | /tyk2_input/L31/31-46_wat/run.py | 03974c39662571c19c71e562bc7471b25406fdc3 | [] | no_license | taisung/MSU_Rutgers-GTI | 2531a9346e82131a38dfdef727380f1c100f5def | 3914a07a6be9af6d3d968288b9d4c3049fc10066 | refs/heads/master | 2021-09-14T05:45:38.906495 | 2018-05-08T17:42:16 | 2018-05-08T17:42:16 | 114,943,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import os
dir = '/mnt/scratch/songlin3/run/tyk2/L31/wat/ti_one-step/31_46/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../31-46_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
f2c37c2304e5871aa5b2d791a0ac8b0d94018f0b | 4ae775089e78aa6362545ae1f3b1beeb9dbba748 | /offset-game/envs/base_env.py | b546f66d8ed51eec258014406aa613eb4ab30903 | [
"MIT"
] | permissive | JosephDistefano/offset-human-interface | 9a3da267becaf1a2c2fafa20fd48684726e97c2c | 88ba05554c289f2c966f663b11930f37fc938a83 | refs/heads/master | 2020-09-14T17:52:53.198045 | 2019-11-19T01:51:42 | 2019-11-19T01:51:42 | 223,205,679 | 0 | 0 | MIT | 2019-11-21T15:31:41 | 2019-11-21T15:31:40 | null | UTF-8 | Python | false | false | 2,448 | py | import math
import numpy as np
import pybullet as p
import pybullet_data
import pybullet_utils.bullet_client as bc
class BaseEnv(object):
def __init__(self, config):
self.config = config
# Usage mode
if config['simulation']['headless']:
self.p = bc.BulletClient(connection_mode=p.DIRECT)
else:
self.p = bc.BulletClient(connection_mode=p.GUI)
self.p.resetDebugVisualizerCamera(cameraDistance=150,
cameraYaw=0,
cameraPitch=-89.999,
cameraTargetPosition=[0, 80, 0])
# Set gravity
self.p.setGravity(0, 0, -9.81)
self.p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optional
# Set parameters for simulation
self.p.setPhysicsEngineParameter(
fixedTimeStep=config['simulation']['time_step'], numSubSteps=1)
# Setup ground
plane = self.p.loadURDF("plane.urdf", [0, 0, 0],
self.p.getQuaternionFromEuler(
[0, 0, math.pi / 2]),
useFixedBase=True,
globalScaling=20)
self.p.changeVisualShape(plane, -1)
return None
def get_initial_position(self, agent, n_agents):
grid = np.arange(n_agents).reshape(n_agents // 5, 5)
pos_xy = np.where(grid == agent)
return [pos_xy[0][0] * 20 + 10, pos_xy[1][0] * 20]
def _initial_setup(self, UGV, UAV):
# Number of UGV and UAV
self.n_ugv = self.config['simulation']['n_ugv']
self.n_uav = self.config['simulation']['n_uav']
ugv, uav = [], []
# Initialise the UGV and UAV
init_orientation = self.p.getQuaternionFromEuler([math.pi / 2, 0, 0])
for i, item in enumerate(range(self.n_ugv)):
position = self.get_initial_position(item, self.n_ugv)
init_pos = [position[0] * 0.25 + 2.5, position[1] * 0.25, 5]
ugv.append(UGV(init_pos, init_orientation, i, self.config))
for i, item in enumerate(range(self.n_uav)):
position = self.get_initial_position(item, self.n_uav)
init_pos = [position[0] * 0.25 + 2.5, position[1] * 0.25 - 1.5, 5]
uav.append(UAV(init_pos, init_orientation, i, self.config))
return uav, ugv
| [
"hemanthm2277@gmail.com"
] | hemanthm2277@gmail.com |
d32ed17280ed2172a5ce234c61ad8de6e9779b1b | b0c8bdf5f8045ca5c0322d8e1ca685d3c0d4944a | /download_soundcloud_playlist_to_wav.py | 544d86183ba1a30ba6945c2895977c04a0630028 | [
"MIT"
] | permissive | 255BITS/DCGAN-tensorflow | 428525fbd59f90f4ff54482f43e981ba82b72629 | 684a24da17d1359606d05a36b97c82a33b3fa4da | refs/heads/master | 2020-12-13T23:30:32.728196 | 2016-03-31T06:42:17 | 2016-03-31T06:42:17 | 54,516,766 | 0 | 0 | null | 2016-03-22T23:50:15 | 2016-03-22T23:50:15 | null | UTF-8 | Python | false | false | 537 | py | import os
import sys
import glob
import time
def do(command):
print("Running " + command)
print(os.system(command))
i = 0
if(len(sys.argv) > 1):
do("cd training/to_process && scdl -c -a -l "+sys.argv[1])
for file in glob.glob('training/to_process/**/*.mp3'):
wav_out = 'training/wav'+str(i)+'-'+str(time.time())+'.wav'
do("ffmpeg -i \""+file+"\" -ac 1 -bufsize 4k -b:v 4k "+wav_out)
#do("rm \""+file+"\"")
i+=1
else:
print("Usage: " + sys.argv[0]+" [link to soundcloud playlist]")
| [
"mikkel@255bits.com"
] | mikkel@255bits.com |
2ffd314ff2cf58180b30587ccf95ac157863664e | 618f7f381ef68cf6b4384ad2a544202f0f4d286e | /Plugins/ChatLikeCMD/ChatLikeCMD.py | bec65332609d49bcedccbb4344171b9b49370934 | [
"MIT"
] | permissive | Lao-Tzu-Taoism/EasierLife | bc7b4bed8cc79f9e348a34d13827b4e53d274ac8 | 1a6bb691f99c6075e92cf1e566529b9074f3edab | refs/heads/master | 2021-08-29T23:27:43.248207 | 2017-12-15T08:26:38 | 2017-12-15T08:26:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,974 | py | #coding=utf8
import thread, time, sys, os, platform
try:
import termios, tty
termios.tcgetattr, termios.tcsetattr
import threading
OS = 'Linux'
except (ImportError, AttributeError):
try:
import msvcrt
OS = 'Windows'
except ImportError:
raise Exception('Mac is currently not supported')
OS = 'Mac'
else:
getch = msvcrt.getwch
else:
def fn():
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
except:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
raise Exception
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = fn
CMD_HISTORY = 30
class ChatLikeCMD():
def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False):
self.strBuff = []
self.cmdBuff = []
self.historyCmd = -1
self.cursor = 0
self.inPip = [] if inPip == None else inPip
self.outPip = []
self.isLaunch = False
self.isPause = False
self.header = header
self.symbol = symbol
self.inputMaintain = inputMaintain
def reprint_input(self):
sys.stdout.write(self.header + self.symbol)
if self.strBuff:
for i in self.strBuff: sys.stdout.write(i)
sys.stdout.flush()
def getch(self):
c = getch()
return c if c != '\r' else '\n'
def get_history_command(self, direction):
if direction == 'UP':
if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1
else:
if self.historyCmd == 0: return ''
if self.historyCmd > 0: self.historyCmd -= 1
if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd]
def output_command(self, s):
self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding))
if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1]
self.cmdBuff.append(s)
def print_thread(self):
while self.isLaunch:
if self.inPip:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
print self.inPip.pop()
# linux special
sys.stdout.write('\r')
sys.stdout.flush()
self.reprint_input()
time.sleep(0.01)
def fast_input_test(self):
timer = threading.Timer(0.001, thread.interrupt_main)
c = None
try:
timer.start()
c = getch()
except:
pass
timer.cancel()
return c
def process_direction_char(self, c):
if OS == 'Windows':
if ord(c) == 72:
c = 'A'
elif ord(c) == 80:
c = 'B'
elif ord(c) == 77:
c = 'C'
elif ord(c) == 75:
c = 'D'
if ord(c) == 68: # LEFT
self.process_char('\b')
return
# cursor bugs
if self.cursor > 0:
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(75))
else:
sys.stdout.write(chr(27) + '[C')
self.cursor -= 1
elif ord(c) == 67: # RIGHT
return
# cursor bugs
if self.cursor < len(self.strBuff):
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(77))
else:
sys.stdout.write(chr(27) + '[D')
self.cursor += 1
elif ord(c) == 65: # UP
hc = self.get_history_command('UP')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
elif ord(c) == 66: # DOWN
hc = self.get_history_command('DOWN')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
else:
raise Exception(c)
def process_char(self, c):
if ord(c) == 27: # Esc
if OS == 'Linux':
fitc1 = self.fast_input_test()
if ord(fitc1) == 91:
fitc2 = self.fast_input_test()
if 65 <= ord(fitc2) <= 68:
self.process_direction_char(fitc2)
return
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.outPip.append(c)
time.sleep(0.02)
if 'fitc1' in dir():
self.process_char(fitc1)
self.cursor += 1
if 'fitc2' in dir():
self.process_char(fitc2)
self.cursor += 1
elif ord(c) == 3: # Ctrl+C
self.stop()
self.isPause = True
if raw_input('Exit?(y) ') == 'y':
sys.stdout.write('Command Line Exit')
else:
self.start()
self.isPause = False
elif ord(c) in (8, 127): # Backspace
if self.strBuff:
if ord(self.strBuff[-1]) < 128:
sys.stdout.write('\b \b')
else:
sys.stdout.write('\b\b \b')
if OS == 'Linux':
self.strBuff.pop()
self.strBuff.pop()
self.strBuff.pop()
self.cursor -= 1
elif c == '\n':
if self.strBuff:
if self.inputMaintain:
sys.stdout.write(c)
else:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.output_command(''.join(self.strBuff))
self.strBuff = []
self.historyCmd = -1
elif ord(c) == 224: # Windows direction
if OS == 'Windows':
direction = self.getch()
self.process_direction_char(direction)
else:
sys.stdout.write(c)
sys.stdout.flush()
self.strBuff.append(c)
self.cursor += 1
def command_thread(self):
c = None
while self.isLaunch:
c = self.getch()
self.process_char(c)
time.sleep(0.01)
def start(self):
self.isLaunch = True
thread.start_new_thread(self.print_thread, ())
self.reprint_input()
thread.start_new_thread(self.command_thread, ())
def stop(self):
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.isLaunch = False
def print_line(self, msg = None):
self.inPip.append(msg)
def clear(self):
os.system('cls' if platform.system() == 'Windows' else 'clear')
self.reprint_input()
def get_command_pip(self):
return self.outPip
def set_header(self, header):
self.header = header
if __name__ == '__main__':
c = ChatLikeCMD()
s = c.get_command_pip()
c.start()
def loopinput(c):
while True:
c.print_line('LOOP INPUT......')
time.sleep(3)
thread.start_new_thread(loopinput, (c,))
while c.isLaunch or c.isPause:
if s:
c.print_line(s.pop())
time.sleep(0.01)
| [
"i7meavnktqegm1b@qq.com"
] | i7meavnktqegm1b@qq.com |
e9d20af65be43f8bf9ec9fb4ea874002c82bf2e2 | 22b906ca2dab20d8b88e58a5bc862ddc15960f05 | /src/python/app/controllers/sample_controller.py | 951eda108863580add8f7a796efcc5e2b11da0e3 | [] | no_license | itsumura-h/speed_test | 3705d7e587362f14ed7cbc33e0e5b6463e3d94da | 57c999e7833cfc1e3abb48bf46e77df48732e1a1 | refs/heads/master | 2022-12-15T13:11:15.125585 | 2019-09-03T14:14:53 | 2019-09-03T14:14:53 | 204,280,512 | 0 | 0 | null | 2022-04-22T22:15:41 | 2019-08-25T10:42:30 | Python | UTF-8 | Python | false | false | 268 | py | from django.http.response import JsonResponse
from ..services.domain_services.sample_service import SampleService
class SampleController:
def fib(self, num):
new_num = int(num)
data = SampleService().fib(new_num)
return JsonResponse(data)
| [
"dumblepy@gmail.com"
] | dumblepy@gmail.com |
f9529e9350359629ea1dd8c1d73962ffbc7413c0 | d930edd227427e03931a9d4be2997bfaea8cb3a6 | /unit_10/talk.py | 6e153a52f84943baf0c4afb8752eed45e8b71b7f | [] | no_license | mikeselezniov/python-21v | 2d95c5a87c9d1e9371577127d4dfbc03a1b77d7f | 8c5753ccdc07492ea9cf46acac643c9e9674d4c7 | refs/heads/master | 2020-12-28T04:38:54.456159 | 2015-07-05T19:06:24 | 2015-07-05T19:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # -*- coding:utf-8 -*-
# функция в Python'e может быть определена… внутри другой функции!
def talk():
# Внутри определения функции "talk" мы можем определить другую...
def whisper(word="да"):
return word.lower()+"...";
# ... и сразу же её использовать!
print whisper()
# Теперь, КАЖДЫЙ РАЗ при вызове "talk", внутри неё определяется а затем
# и вызывается функция "whisper".
talk() # выведет: "да..."
# Но вне функции "talk" НЕ существует никакой функции "whisper":
try:
print whisper()
except NameError, e:
print e
#выведет : "name 'whisper' is not defined" | [
"janusnic@gmail.com"
] | janusnic@gmail.com |
05a625ebd18925ce38954b5a657ed70269ef23fd | f7c82725ae83896385109ffa1476eb98e411b13e | /setup.py | cee2f8da328b2ae8f2a02d7dd08b28374400aa18 | [
"MIT"
] | permissive | myousefi2016/vtkInterface | f173fda29648ee28fb8e0ba39d3724a6d4ef6205 | 3c18f4a5fe25b67f918809fd0589b80bbf3bff1d | refs/heads/master | 2021-06-20T03:41:42.666862 | 2017-08-04T12:44:52 | 2017-08-04T12:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | """
Installation file for python vtkInterface module
"""
from setuptools import setup
import os
from io import open as io_open
package_name = 'vtkInterface'
# Get version from tqdm/_version.py
__version__ = None
version_file = os.path.join(os.path.dirname(__file__), package_name, '_version.py')
with io_open(version_file, mode='r') as fd:
# execute file from raw string
exec(fd.read())
# Actual setup
setup(
name=package_name,
packages = [package_name, 'vtkInterface.tests', 'vtkInterface.examples'],
# Version
version=__version__,
description='Easier Pythonic interface to VTK',
long_description=open('README.rst').read(),
# long_description=open('pypiREADME.rst').read(),
# Author details
author='Alex Kaszynski',
author_email='akascap@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
# Target audience
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
# MIT License
'License :: OSI Approved :: MIT License',
# Untested, but will probably work for other python versions
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# Website
url = 'https://github.com/akaszynski/vtkInterface',
keywords='vtk numpy plotting mesh',
package_data={'vtkInterface.examples': ['airplane.ply', 'ant.ply',
'hexbeam.vtk', 'sphere.ply']},
install_requires=['numpy'],
)
| [
"akascap@gmail.com"
] | akascap@gmail.com |
deda3cc678f53a878cd15528c706ad51a410a119 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/copyPaste/Indent7709.dst.py | 3080b8a3c10dc880fcbc82fe92a34df9b8dd2b6c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 21 | py | a = 1
<caret>
b = 2
| [
"Ekaterina.Tuzova@jetbrains.com"
] | Ekaterina.Tuzova@jetbrains.com |
be70b94decd74015163cc6d0d9c11389e0116e44 | 0bb1d74bac2872b76fb7ae5bfb40e36ecac7cfa2 | /py/funcion.py | 5e4a47cda6a6e1d6f2f02f8dbe8ac97b457d41d4 | [] | no_license | antalcides/migit | 965349fa53f4e2c99419fc15ae2e3c2e6c9cc3cf | e9b611e1ba91a63d52b14efb9963eec7f4c20d75 | refs/heads/master | 2021-07-13T04:59:38.732109 | 2020-09-05T03:39:24 | 2020-09-05T03:39:24 | 74,794,023 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 16:09:00 2015
@author: antalcides
"""
from math import* # in main
def f(x):
e = exp(-0.1*x)
s = sin(6*pi*x)
return e*s # in main
x = 2
y = f(x)
print 'f(%g)=%g' % (x, y) | [
"antalcides@gmail.com"
] | antalcides@gmail.com |
7fa8c4b0e4c9540f36fbf566ad99d48e913d1c26 | 885feec0699da96fcfa1e118adffbc94b4f31fd1 | /src/tbm_utils/path.py | dc75a16cd2a6428dad91d7e6043bacbfaaea12e7 | [
"MIT"
] | permissive | ddboline/tbm-utils | d48bfdb98737a4a45def81143bf13fa11f00f6d3 | 7f77bc25651079bc8884de1cfcb45e28d672fb16 | refs/heads/master | 2020-08-29T11:43:49.571204 | 2019-10-16T20:05:55 | 2019-10-16T20:05:55 | 218,022,543 | 0 | 0 | MIT | 2019-10-28T10:34:34 | 2019-10-28T10:34:34 | null | UTF-8 | Python | false | false | 664 | py | __all__ = [
'UNIX_PATH_RE',
'convert_unix_path'
]
import re
from pathlib import Path
UNIX_PATH_RE = re.compile(r'(/(cygdrive/)?)(.*)')
"""Regex pattern matching UNIX-style filepaths."""
def convert_unix_path(filepath):
"""Convert Unix filepath string from Unix to Windows format.
Parameters:
filepath (str, os.PathLike, Path): A filepath string.
Returns:
Path: A Windows path object.
Raises:
FileNotFoundError
subprocess.CalledProcessError
"""
match = UNIX_PATH_RE.match(str(filepath))
if not match:
return Path(filepath.replace('/', r'\\'))
parts = match.group(3).split('/')
parts[0] = f"{parts[0].upper()}:/"
return Path(*parts)
| [
"mail@thebigmunch.me"
] | mail@thebigmunch.me |
6bbd6c656d8161a8b8be3d02844a8220aa9d5b9f | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /onos_id_bug_fixed_ids_file_blackbox_mcs2/interreplay_10_l_3/replay_config.py | 82595f379760b10766728d353bd972790e96556f | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./start-onos.sh start', label='c1', address='192.168.56.11', cwd='/home/mininet/ONOS', controller_type='onos', kill_cmd='./start-onos.sh stop', restart_cmd='./start-onos.sh stop'), ControllerConfig(start_cmd='./start-onos.sh start', label='c2', address='192.168.56.12', cwd='/home/mininet/ONOS', controller_type='onos', kill_cmd='./start-onos.sh stop', restart_cmd='./start-onos.sh stop')],
topology_class=MeshTopology,
topology_params="num_switches=2",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=True,
kill_controllers_on_exit=False)
control_flow = Replayer(simulation_config, "experiments/onos_id_bug_fixed_ids_file_blackbox_mcs2/interreplay_10_l_3/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='check_for_file',
bug_signature="bug_file_detected")
| [
"a.hassany@gmail.com"
] | a.hassany@gmail.com |
522ca045013c70ddf30198f05b93ecc0ea09d608 | 33524b5c049f934ce27fbf046db95799ac003385 | /Дистанционная_подготовка/Программирование_на_python/11_Арифметика/zadacha_G.py | dab1de67ae08f8d442d48ab597d13da15fe422b7 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py |
'''
Сумма двух квадратов
'''
n = int(input())
def square(n):
return n*n
for i in range(1, 20):
x1 = square(i)
if x1 > n:
break
for j in range(1, 20):
value = x1 + square(j)
if value == n:
print (i,j)
elif value > n:
print ('Imposible')
break
| [
"mgbo433@gmail.com"
] | mgbo433@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.