max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
mdtoc.py | jmossberg/mdtoc | 0 | 12766451 | <filename>mdtoc.py<gh_stars>0
import argparse
import re
import sys
# Header structure
# ================
#
# ### Header title<a name="header-title"></a>
# | | | |
# Pounds | |
# Header title| |
# Anchor tag
# Name attribute
class MdToc:
def __init__(self):
self.regexp_header = re.compile(r"^#{1,6} ")
self.regexp_anchor_tag = re.compile(r"<a name=.{1,300}></a>$")
self.HEADER_LEVEL_SPACES_INDENT = 4
self.ANCHOR_TAG_PREFIX = '<a name="'
self.ANCHOR_TAG_POSTFIX = '"></a>'
self.TOC_HEADER = "Contents"
def is_header(self, line):
result = self.regexp_header.match(line)
if result is None:
return False
else:
return True
def compose_name_attribute(self, header_text):
name_attribute = header_text.lower()
name_attribute = name_attribute.replace('(', '')
name_attribute = name_attribute.replace(')', '')
name_attribute = name_attribute.replace('.', '')
name_attribute = name_attribute.replace("'", "")
name_attribute = name_attribute.replace('/', '-')
name_attribute = name_attribute.replace(' ', '-')
name_attribute = name_attribute.replace('å', 'a')
name_attribute = name_attribute.replace('ä', 'a')
name_attribute = name_attribute.replace('ö', 'o')
name_attribute = name_attribute.replace('&', 'and')
name_attribute = name_attribute.replace(':', '')
return name_attribute
def compose_anchor_tag(self, anchor_name):
return self.ANCHOR_TAG_PREFIX + anchor_name + self.ANCHOR_TAG_POSTFIX
def parse_header_elements(self, line):
header_elements = []
match_pounds = self.regexp_header.search(line)
match_tag = self.regexp_anchor_tag.search(line)
pounds = match_pounds.group()
pounds = pounds.rstrip(' ')
title_start_pos = match_pounds.end()
title_end_pos = match_tag.start() if match_tag else len(line)
header_title = line[title_start_pos:title_end_pos]
anchor_tag = match_tag.group() if match_tag else ''
header_elements.append(pounds)
header_elements.append(header_title)
header_elements.append(anchor_tag)
return header_elements
def parse_header_level(self, line):
header_elements = self.parse_header_elements(line)
pounds = header_elements[0]
level = pounds.count('#')
return level
def parse_header_title(self, line):
header_elements = self.parse_header_elements(line)
header_title = header_elements[1]
return header_title
def parse_anchor_tag_name(self, line):
header_elements = self.parse_header_elements(line)
anchor_tag = header_elements[2]
if anchor_tag == '':
return None
anchor_tag_split = anchor_tag.split('"')
anchor_tag_name = anchor_tag_split[1]
return anchor_tag_name
def parse_header(self, line, line_number):
header = {'header': self.parse_header_title(line),
'level' : self.parse_header_level(line),
'line' : line_number,
'tag' : self.parse_anchor_tag_name(line)}
return header
def parse_headers(self, lines):
toc = []
code_highlight_section = False
for index, line in enumerate(lines):
line_number = index + 1
if line.startswith("{% highlight"):
code_highlight_section = True
if not code_highlight_section and self.is_header(line):
header = self.parse_header(line, line_number)
toc.append(header)
if line.startswith("{% endhighlight %}"):
code_highlight_section = False
return toc
def generate_non_duplicate_name_attribute(self, base_tag, tags):
tag = base_tag
counter = 2
while tag in tags:
tag = base_tag + '-' + str(counter)
counter += 1
return tag
def generate_tags(self, headers):
tags = []
for header in headers:
header['new_tag'] = None
if header['tag'] is None:
tag = self.compose_name_attribute(header['header'])
tag = self.generate_non_duplicate_name_attribute(tag, tags)
header['new_tag'] = tag
else:
tag = header['tag']
tags.append(tag)
return headers
def header_level_min(self, headers):
header_level_min = headers[0]['level']
for header in headers:
if header['level'] < header_level_min:
header_level_min = header['level']
return header_level_min
def generate_toc(self, headers, skip_headers=0):
toc = []
headers_in_toc = headers[skip_headers:len(headers)]
header_level_min = self.header_level_min(headers_in_toc)
for header in headers_in_toc:
toc_line = ""
spaces = self.HEADER_LEVEL_SPACES_INDENT * (header['level'] - header_level_min)
while len(toc_line) < spaces:
toc_line += ' '
toc_line += '*'
toc_line += ' ['
toc_line += header['header'].rstrip()
toc_line += '](#'
if header['tag'] is None:
toc_line += header['new_tag'].rstrip()
else:
toc_line += header['tag'].rstrip()
toc_line += ')'
toc.append(toc_line)
return toc
def add_anchor_tags(self, lines, headers):
output_lines = lines
for header in headers:
output_line = output_lines[header['line']-1].rstrip()
if header['tag'] is None:
output_line += self.compose_anchor_tag(header['new_tag'])
output_lines[header['line']-1] = output_line
return output_lines
def add_toc(self, lines, skip_headers=0):
output_lines = []
headers = self.parse_headers(lines)
headers_with_tags = self.generate_tags(headers)
toc = self.generate_toc(headers_with_tags, skip_headers)
content_with_tags = self.add_anchor_tags(lines, headers_with_tags)
output_lines = self.insert_toc(content_with_tags, toc)
return output_lines
def insert_toc(self, lines_with_tags, toc):
output = []
insert_toc = False
insert_toc_done = False
for line in lines_with_tags:
if self.is_header(line):
if self.TOC_HEADER == self.parse_header_title(line):
insert_toc = True
output.append(line)
output.append('')
output += toc
output.append('')
insert_toc_done = True
else:
insert_toc = False
if not insert_toc:
output.append(line)
if not insert_toc_done:
print('ERROR: Document does not contain header with name Contents')
sys.exit(1)
return output
def parse_command_line_arguments():
parser_help_text="""Add table of contents to markdown file
mdtoc will create a table of contents and insert below
the header named Contents. Any existing lines in the
Contents section will be removed. An error message
will be emitted if no Contents header is found in
the input file.
Example:
$ python3 mdtoc.py article.md
article.md before:
# Contents
# Header 1
Some text between header 1 and 2
## Header 2
Some text below header 2
article.md after:
# Contents<a name="contents"></a>
* [Contents](#contents)
* [Header 1](#header-1)
* [Header 2](#header-2)
# Header 1<a name="header-1"></a>
Some text between header 1 and 2
## Header 2<a name="header-2></a>
Some text below header 2
"""
parser = argparse.ArgumentParser(description=parser_help_text,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("filename", help="Markdown file to add table of contents to")
parser.add_argument("--skip_headers",
help="number of headers in the beginning of the file to not include in the toc (default: 0)",
default=0)
args = parser.parse_args()
filename = args.filename
skip_headers = int(args.skip_headers)
return filename, skip_headers
def main():
mt = MdToc()
input_lines = []
filename, skip_headers = parse_command_line_arguments()
f_in = open(filename, 'r')
for line in f_in:
input_lines.append(line.rstrip('\n'))
f_in.close()
output_lines = mt.add_toc(input_lines, skip_headers)
f_out = open(filename, 'w')
for line in output_lines:
f_out.write(line + '\n')
f_out.close()
if __name__ == '__main__':
main()
| 2.734375 | 3 |
CIApi.py | jironghuang/Python-CityIndex-API | 3 | 12766452 | <filename>CIApi.py<gh_stars>1-10
"""
Author: <NAME>
License: Apache 2.0 License
Copyright 2015
"""
import json
from CITool import *
import pprint as pp
import requests
from Singleton import *
"""
Market Tag ID Lookup Table
=============================
Tag ID Description
80 FX
81 FX FX-Major
82 FX AUD-Crosses
83 FX CHF-Crosses
84 FX EUR-Crosses
85 FX GBP-Crosses
86 FX Scandies-Crosses
87 FX JPY-Crosses
88 FX EM-Europe
89 FX EM-Asia
90 Indices
91 Indices UK
92 Indices US
93 Indices Europe
94 Indices Asia
95 Indices Australia
96 Commodities
97 Commodities Energy
98 Commodities Grain
99 Commodities Soft
100 Commodities Other
101 Commodities Options
102 Equities
103 Equities UK
104 Equities US
105 Equities Europe
106 Equities Asia
107 Equities Austria
108 Equities Belgium
109 Equities Canada
110 Equities Denmark
111 Equities France
112 Equities Germany
113 Equities Ireland
114 Equities Italy
115 Equities Netherlands
116 Equities Norway
117 Equities Poland
118 Equities Portugal
119 Equities Spain
120 Equities Sweden
121 Equities Switzerland
122 Equities Finland
123 Sectors
124 Sectors UK
125 Metals
126 Bonds
127 Interest Rates
128 iShares
129 iShares UK
130 iShares US
131 iShares Asia
132 iShares Australia
133 iShares Emerging-Markets
134 Options
135 Options UK 100
136 Options Germany 30
137 Options US SP 500
138 Options Wall Street
139 Options Australia 200
140 Options US Crude Oil
141 Options GBP/USD
142 Options EUR/USD
143 Options AUD/USD
144 Options Gold
145 Equities Australia
146 Popular
147 Popular Spreads
150 Popular Australia
"""
class COrderList:
def __init__(self, orders):
if len(orders) <= 0:
self.orders = []
return
self.orders = orders
def select_orders_by_marketID(self, marketID):
orders = []
for order in self.orders:
if order["MarketId"] == marketID:
orders.append(order)
return orders
class API(Singleton):
"""Summary of API
This is a Cityindex API Class.
Features:
* Authentication
- login
- logout
* Account Information
-Get Client and Trading Account
* Margin
- Get Client Account Margin
* Market
- Full Search With Tags
* Trades and Orders
- send a Market Order
- simulate Trade order
- Modify Order
* Price History
"""
OP_BUY = "buy"
OP_SELL = "sell"
def __init__(self, uid, password, isLive=True):
"""
:param uid: Username
:param password: Password
:param isLive: Live or Production
:return:
"""
self.uid = uid
self.password = password
self.trading_account_info = {}
self.client_account_margin = {}
if isLive:
self.APIURL = 'https://ciapi.cityindex.com/TradingAPI'
else:
self.APIURL = "https://ciapipreprod.cityindextest9.co.uk/TradingApi/"
"""
Authentication
===============================
"""
def login(self):
"""
Login to CityIndex
:return:
"""
data = {'Password': <PASSWORD>,
'AppVersion': '1',
'AppComments': 'LoginFromPython',
'Username': self.uid,
'AppKey': 'cipythonAPP'}
url = self.APIURL + '/session'
headers = {'Content-type': 'application/json'}
response = requests.post(url, json.dumps(data), headers=headers)
if response.status_code != 200:
print("Failed Login " + str(response.status_code))
return False
self.login_resp = response.json()
self.session = response.json()["Session"]
return True
def logout(self):
"""
Logout of Cityindex
:return:
"""
data = {"Username": self.uid, "Session": self.session}
url = self.APIURL + '/session/deleteSession?Username=' + self.uid + '&Session=' + self.session
headers = {'Content-type': 'application/json'}
response = requests.post(url, json.dumps(data), headers)
if response.status_code != 200:
print("Failed Logout " + str(response.status_code))
return False
return True
"""
Account Information
===============================
"""
def get_trading_account_info(self):
"""
Get User's ClientAccountId and a list of their TradingAccounts
:return: [dictionary] AccountInformationResponseDTO
"""
data = {"Username": self.uid, "Session": self.session}
url = self.APIURL + "/useraccount/ClientAndTradingAccount"
response = requests.get(url, data)
if response.status_code != 200:
print("Error retrieving Trading Acc info: " + str(response.status))
return False
self.trading_account_info = response.json()
return self.trading_account_info
"""
Margin
===============================
"""
def get_client_account_margin(self):
"""
Retrieves the current margin values for the logged-in client account.
:return: ApiClientAccountMarginResponseDTO
"""
url = self.APIURL + '/margin/clientaccountmargin?Username=' + self.uid + '&Session=' + self.session
response = requests.get(url)
if response.status_code != 200:
print("Error getting clientaccountmargin : " + str(response.status_code))
return False
self.client_account_margin = response.json()
return self.client_account_margin
"""
Market
===============================
"""
def get_full_market_info(self, tagId="0"):
"""
Return market information
:param tagId: [string] market Tag IDs
:return: [Dictionary] market_info[UnderlyingRicCode]
"""
url = self.APIURL + "/market/fullsearchwithtags?Username=" + self.uid + "&Session=" + self.session + "&maxResults=200&tagId=" + tagId
response = requests.get(url)
if response.status_code != 200:
return False
response = json.loads(response.text)
self.market_info = {}
for symbol in response["MarketInformation"]:
self.market_info[symbol["UnderlyingRicCode"]] = symbol
return self.market_info
"""
Price History
===============================
"""
def get_pricebar_history(self, symbol, interval="HOUR", span="1", pricebars="65535", priceType="BID"):
"""
Get historic price bars for the specified market in OHLC (open, high, low, close) format,
suitable for plotting in candlestick charts. Returns price bars in ascending order up to the current time.
When there are no prices for a particular time period, no price bar is returned.
Thus, it can appear that the array of price bars has "gaps", i.e.
the gap between the date & time of each price bar might not be equal to interval x span.
:param symbol: ricCode (not the marketTagID)
:param interval: [string] (TICK, MINUTE, HOUR, DAY, WEEK)
:param span: [string] (1, 2, 3, 5, 10, 15, 30 MINUTE) and (1, 2, 4, 8 HOUR) TICK, DAY and WEEK must be supplied with a span of 1
:param pricebars: [string] number of pricebars in string
:param priceType: [string] BID, MID, ASK
:return: GetPriceBarResponseDTO
"""
url = self.APIURL + "/market/" + str(
self.market_info[symbol][
"MarketId"]) + "/barhistory?Username=" + self.uid + "&Session=" + self.session + "&interval=" + interval + "&span=" + span + "&PriceBars=" + pricebars + "&PriceType=" + priceType
response = requests.get(url)
if (response.status_code != 200):
print("GetPriceBarHistory: HTTP Error " + str(response.status_code))
return False
return response.json()
"""
Trades and Orders
===============================
"""
def simulate_trade_order(self, symbol, cmd, qty, data):
"""
:param symbol:
:param cmd:
:param qty:
:return:
"""
url = self.APIURL + '/order/simulate/newtradeorder?Username=' + self.uid + "&Session=" + self.session
data = {
"OcoOrder": None,
"Applicability": None,
"Direction": cmd,
"BidPrice": data["Bid"],
"AuditId": data["AuditId"],
"AutoRollover": False,
"MarketId": self.market_info[symbol]["MarketId"],
"isTrade": True,
"OfferPrice": data["Offer"],
"Quantity": qty,
"QuoteId": None,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"PositionMethodId": 1,
"IfDone": []
}
pp.pprint(data)
headers = {'Content-type': 'application/json'}
response = requests.post(url, json.dumps(data), headers=headers)
if response.status_code != 200:
print("Error SimulateTrade : " + str(response.status_code))
print("Reason: " + response.reason)
print("URL " + url)
return False
pp.pprint(response.json())
return response.json()
def orders_total(self):
url = self.APIURL + '/order/openpositions'
data = {"Username": self.uid, "Session": self.session,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"maxResults": 10000}
response = requests.get(url, data)
if response.status_code != 200:
print("ListOpenPositions Error: " + str(response.status_code))
return False
return len(response.json()["OpenPositions"])
def get_orders(self):
url = self.APIURL + '/order/openpositions'
data = {"Username": self.uid, "Session": self.session,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"maxResults": 10000}
response = requests.get(url, data)
if response.status_code != 200:
print("ListOpenPositions Error: " + str(response.status_code))
return False
orders = COrderList(response.json()["OpenPositions"])
return orders
def get_order_history(self):
url = self.APIURL + '/order/tradehistory'
data = {"Username": self.uid, "Session": self.session,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"maxResults": 10000}
response = requests.get(url, data)
if response.status_code != 200:
print("ListOpenPositions Error: " + str(response.status_code))
return False
orders = COrderList(response.json()["TradeHistory"])
return orders
def get_order(self, orderId):
"""
:param orderId:
:return:
"""
url = self.APIURL + "/order/" + str(orderId) + "?UserName=" + self.uid + "&Session=" + self.session
print("Get Order URL : " + url)
response = requests.get(url)
if response.status_code != 200:
print("Get Order Error: " + str(response.status_code))
return False
pp.pprint(response.json())
return response.json()
def close_order(self, symbol, order, data):
"""
:return:
"""
orderID = order["OrderId"]
cmd = order["Direction"]
qty = order["Orders"][0]["Quantity"]
if cmd == self.OP_BUY:
oppcmd = self.OP_SELL
else:
oppcmd = self.OP_BUY
data = {
"PositionMethodId": None,
"BidPrice": data["Bid"],
"OfferPrice": data["Offer"],
"AuditId": data["AuditId"],
"MarketId": self.market_info[symbol]["MarketId"],
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"Direction": oppcmd, # must have!
"Quantity": qty,
"Close": [orderID]
}
url = self.APIURL + '/order/newtradeorder?Username=' + self.uid + "&Session=" + self.session
headers = {'Content-type': 'application/json'}
pp.pprint(data)
response = requests.post(url, json.dumps(data), headers=headers)
if response.status_code != 200:
print("Error Update Trade : " + str(response.status_code))
print("Reason: " + response.reason)
print("URL " + url)
return False
pp.pprint(response.json())
return response.json()
def modify_order(self, symbol, order, stoploss=0.0, takeprofit=0.0, Guaranteed=False):
orderID = order["OrderId"]
qty = order["Orders"][0]["Quantity"]
IfDone = order["Orders"][0]["IfDone"]
cmd = order["Direction"]
if cmd == self.OP_BUY:
oppcmd = self.OP_SELL
else:
oppcmd = self.OP_BUY
stoploss = round(stoploss, self.market_info[symbol]["PriceDecimalPlaces"])
takeprofit = round(takeprofit, self.market_info[symbol]["PriceDecimalPlaces"])
data = {
"MarketId": self.market_info[symbol]["MarketId"],
"OrderId": orderID,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"IfDone": [],
"Direction": cmd # must have!
}
if stoploss > 0.0:
stopLossData = {"Stop": {
"TriggerPrice": stoploss,
# "OrderId" : order["Orders"][0]["IfDone"][0]["Stop"]["OrderId"],
"Direction": oppcmd,
"Quantity": qty
# "ParentOrderId":order["Orders"][0]["IfDone"][]
}}
for stoplimitorder in IfDone:
if stoplimitorder["Stop"] is not None:
stopLossData["Stop"]["OrderId"] = stoplimitorder["Stop"]["OrderId"]
data["IfDone"].append(stopLossData)
if takeprofit > 0.0:
limitData = {"Limit": {
"TriggerPrice": takeprofit,
"Direction": cmd,
"Quantity": qty
# "ParentOrderId":orderID
}}
for stoplimitorder in IfDone:
if stoplimitorder["Limit"] is not None:
limitData["Limit"]["OrderId"] = stoplimitorder["Limit"]["OrderId"]
data["IfDone"].append(limitData)
url = self.APIURL + '/order/updatetradeorder?Username=' + self.uid + "&Session=" + self.session
headers = {'Content-type': 'application/json'}
pp.pprint(data)
response = requests.post(url, json.dumps(data), headers=headers)
if response.status_code != 200:
print("Error Update Trade : " + str(response.status_code))
print("Reason: " + response.reason)
print("URL " + url)
return False
pp.pprint(response.json())
return response.json()
def send_market_order(self, symbol, cmd, qty, data, stoploss=0.0, takeprofit=0.0, Guaranteed=False):
"""
Place a trade on a particular market using market price
:param symbol: [string] ricCode
:param cmd: [string] OP_BUY or OP_SELL
:param qty: [integer] qty of contract to place
:param stoploss: [double] stop loss price
:param takeprofit: [double] take profit price
:param Guaranteed: [Boolean] To be Implemented
:return: False if failed, True return ApiTradeOrderResponseDTO
"""
stoploss = round(stoploss, self.market_info[symbol]["PriceDecimalPlaces"])
takeprofit = round(takeprofit, self.market_info[symbol]["PriceDecimalPlaces"])
qty = round(qty, 0)
if qty < self.market_info[symbol]['WebMinSize']:
print("qty " + str(qty) + " < WebminSize[" + str(self.market_info[symbol]['WebMinSize']) + "]")
return False
if cmd == self.OP_BUY and qty > self.market_info[symbol]['MaxLongSize']:
print("qty " + str(qty) + " > MaxLongSize[" + str(self.market_info[symbol]['MaxLongSize']) + "]")
return False
if cmd == self.OP_SELL and qty > self.market_info[symbol]['MaxShortSize']:
print("qty " + str(qty) + " > MaxShortSize[" + str(self.market_info[symbol]['MaxShortSize']) + "]")
return False
url = self.APIURL + '/order/newtradeorder?Username=' + self.uid + "&Session=" + self.session
data = {
"IfDone": [],
"Direction": cmd,
# "ExpiryDateTimeUTCDate":null,
# "LastChangedDateTimeUTCDate":null,
# "OcoOrder":null,
# "Type":null,
# "ExpiryDateTimeUTC":null,
# "Applicability":null,
# "TriggerPrice":null,
"BidPrice": data["Bid"],
"AuditId": data["AuditId"],
"AutoRollover": True,
"MarketId": self.market_info[symbol]["MarketId"],
"OfferPrice": data["Offer"],
# "OrderId":0,
"Currency": self.market_info[symbol]["MarketSizesCurrencyCode"],
"Quantity": qty,
"QuoteId": None,
# "LastChangedDateTimeUTC":None,
"PositionMethodId": 2,
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
# "MarketName":"Wall Street CFD",
# "Status":null,
"isTrade": True
# "Reference":"PythonAPI"
}
if cmd == self.OP_BUY:
oppcmd = self.OP_SELL
else:
oppcmd = self.OP_BUY
if stoploss > 0.0:
data["IfDone"].append({"Stop": {
# "Guaranteed" : Guaranteed,
"TriggerPrice": stoploss,
"Direction": oppcmd,
"MarketId": self.market_info[symbol]["MarketId"],
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"Quantity": qty
# "ExpiryDateTimeUTC" : None,
# "Applicability" : "GTC",
# "ParentOrderId" : 0
}})
if takeprofit > 0.0:
data["IfDone"].append({"Limit": {
"TriggerPrice": takeprofit,
"Direction": oppcmd,
"MarketId": self.market_info[symbol]["MarketId"],
"TradingAccountId": self.trading_account_info["TradingAccounts"][0]["TradingAccountId"],
"Quantity": qty
# "Guaranteed" : Guaranteed,
# "TriggerPrice" : takeprofit,
# "ExpiryDateTimeUTC" : None,
# "Applicability" : "GTC",
# "ParentOrderId" : 0
}})
headers = {'Content-type': 'application/json'}
response = requests.post(url, json.dumps(data), headers=headers)
if (response.status_code != 200):
print("Error SimulateTrade : " + str(response.status_code))
print("Reason: " + response.reason)
print("URL " + url)
return False
jsonData = response.json()
pp.pprint(jsonData)
jsonData["Direction"] = cmd
return jsonData
def cross_rate(self, symbol, cmd, data):
"""
Return the conversion rate from Trading Currency to Home Currency
e.g. If you are trading US stock in USD and your home currency is SGD. The rate is to convert USD to SGD
e.g. If you trading EURJPY currency in JPY and your home is SGD. The rate convert JPY to SGD
Rate = Trading Currency / Home Currency
(how much Trading Currency per $1 Home Currency)
:param symbol:
:param cmd:
:return: cross_rate
"""
qty = self.market_info[symbol]["WebMinSize"]
simulatedOrder = self.simulate_trade_order(symbol, cmd, qty, data)
if not simulatedOrder:
return False
if simulatedOrder["Status"] != 1:
return False
if simulatedOrder["StatusReason"] != 1:
return False
#Total Margin required after this trade
SimulatedTotalMarginRequirement = simulatedOrder["SimulatedTotalMarginRequirement"]
#Total current Margin
actualTotalMargin = simulatedOrder["ActualTotalMarginRequirement"]
SimulatedTotalMarginRequirement = SimulatedTotalMarginRequirement - actualTotalMargin
if SimulatedTotalMarginRequirement <= 0.0:
return False
marginUnit = self.market_info[symbol]["MarginFactorUnits"]
marginFactor = self.market_info[symbol]["MarginFactor"]
marginMultiplier = 1.0
if marginUnit == 26: # %
marginMultiplier = marginFactor / 100.0
elif marginFactor == 27:
marginMultiplier = marginFactor
qtyMultiplier = 1.0 / self.market_info[symbol]["BetPer"]
qty = qty * qtyMultiplier
if cmd == self.OP_BUY:
totalTradingCash = qty * data["Offer"] * marginMultiplier
else:
totalTradingCash = qty * data["Bid"] * marginMultiplier
crossRate = totalTradingCash / SimulatedTotalMarginRequirement
return crossRate
class Context:
lightstreamer_url = 'https://push.cityindex.com'
def __init__(self, symbol, ea_param, time_interval, time_span, handle_data, bars_count="65000"):
self.high = []
self.low = []
self.close = []
self.open = []
self.time = []
self.data = {}
self.indicators = []
self.clientAccountMarginData = {}
self.EAParam = ea_param
self.symbol = symbol
self.TimeInterval = time_interval
self.TimeSpan = time_span
self.BarsCount = bars_count
self.handle_data = handle_data
self.LS_PRICE_DATA_ADAPTER = "PRICES"
self.LS_PRICE_ID = "PRICE."
self.LS_PRICE_SCHEMA = ["MarketId", "AuditId", "Bid",
"Offer", "Change", "Direction",
"High", "Low", "Price", "StatusSummary",
"TickDate"]
self.LS_MARGIN_DATA_ADAPTER = "CLIENTACCOUNTMARGIN"
self.LS_MARGIN_ID = "CLIENTACCOUNTMARGIN"
self.LS_MARGIN_SCHEMA = ["Cash",
"CurrencyId",
"CurrencyISO",
"Margin",
"MarginIndicator",
"NetEquity",
"OpenTradeEquity",
"TradeableFunds",
"PendingFunds",
"TradingResource",
"TotalMarginRequirement"]
def init_data(self):
api = API()
self.LS_PRICE_ID = self.LS_PRICE_ID + str(api.market_info[self.symbol]["MarketId"])
self.clientAccountMarginData = api.get_client_account_margin()
priceBars = False
while not priceBars:
priceBars = api.get_pricebar_history(self.symbol,
self.TimeInterval,
self.TimeSpan,
self.BarsCount)
for counter, price in enumerate(priceBars["PriceBars"]):
self.high.insert(0, price["High"])
self.low.insert(0, price["Low"])
self.close.insert(0, price["Close"])
self.open.insert(0, price["Open"])
self.time.insert(0, wcfDate2Sec(price["BarDate"]))
partialBar = priceBars["PartialPriceBar"]
self.high.insert(0, partialBar["High"])
self.low.insert(0, partialBar["Low"])
self.close.insert(0, partialBar["Close"])
self.open.insert(0, partialBar["Open"])
self.time.insert(0, wcfDate2Sec(partialBar["BarDate"]))
def update_data(self):
api = API()
last_time_sec = self.time[0];
time_diff = int((time.time() - last_time_sec) / intervalUnitSec(self))
priceBars = False
while not priceBars:
priceBars = api.get_pricebar_history(self.symbol,
self.TimeInterval,
self.TimeSpan,
str(time_diff + 2))
partialBar = priceBars["PartialPriceBar"]
curTime = wcfDate2Sec(partialBar["BarDate"])
self.high[0] = partialBar["High"]
self.low[0] = partialBar["Low"]
self.open[0] = partialBar["Open"]
self.close[0] = partialBar["Close"]
self.time[0] = curTime
for counter, price in enumerate(priceBars["PriceBars"]):
time_sec = wcfDate2Sec(price["BarDate"])
time_diff = int((time_sec - self.time[1]) / intervalUnitSec(self))
if time_diff > 0:
self.high.insert(1, price["High"])
self.low.insert(1, price["Low"])
self.close.insert(1, price["Close"])
self.open.insert(1, price["Open"])
self.time.insert(1, time_sec)
def prepare_data(self, data):
tableNo = data["_tableIdx_"]
if tableNo == 1:
data['TickDate'] = wcfDate2Sec(data['TickDate'])
if data['Offer']:
data['Offer'] = float(data['Offer'])
else:
if len(self.data) > 0:
if self.data["Offer"]:
data['Offer'] = self.data["Offer"]
if data['Bid']:
data['Bid'] = float(data['Bid'])
else:
if len(self.data) > 0:
if self.data["Bid"]:
data['Bid'] = self.data["Bid"]
self.data = data
self.update_data()
for counter, indicator in enumerate(self.indicators):
indicator.onCalculate(self, len(self.time))
self.handle_data(self, data)
elif tableNo == 2:
if data["Cash"]:
self.clientAccountMarginData["Cash"] = data["Cash"]
if data["CurrencyISO"]:
self.clientAccountMarginData["CurrencyISO"] = data["CurrencyISO"]
if data["CurrencyId"]:
self.clientAccountMarginData["CurrencyId"] = data["CurrencyId"]
if data["Margin"]:
self.clientAccountMarginData["Margin"] = data["Margin"]
if data["MarginIndicator"]:
self.clientAccountMarginData["MarginIndicator"] = data["MarginIndicator"]
if data["NetEquity"]:
self.clientAccountMarginData["NetEquity"] = data["NetEquity"]
if data["OpenTradeEquity"]:
self.clientAccountMarginData["OpenTradeEquity"] = data["OpenTradeEquity"]
if data["PendingFunds"]:
self.clientAccountMarginData["PendingFunds"] = data["PendingFunds"]
if data["TotalMarginRequirement"]:
self.clientAccountMarginData["TotalMarginRequirement"] = data["TotalMarginRequirement"]
if data["TradeableFunds"]:
self.clientAccountMarginData["TradeableFunds"] = data["TradeableFunds"]
if data["TradingResource"]:
self.clientAccountMarginData["TradingResource"] = data["TradingResource"]
if __name__ == "__main__":
accid = raw_input("Enter City Index account ID: ")
password = raw_input("Enter City Index password: ")
api = API(accid, password)
print("Test Login...")
if api.login():
print("\tSuccess!\n\n")
else:
print("\tFailed\n\n")
exit()
print("Test Trading Account Information...")
if api.get_trading_account_info():
pp.pprint(api.trading_account_info)
print("\tSuccess!")
else:
print("\tFailed\n\n")
api.logout()
exit()
print("Test Margin Info...")
if api.get_client_account_margin():
pp.pprint(api.client_account_margin)
print("\tSuccess!")
else:
print("\tFailed\n\n")
api.logout()
exit()
print("Test get FX-major (81) market info...")
if api.get_full_market_info("81"):
pp.pprint(api.market_info)
print("\tSuccess!")
else:
print("\tFailed\n\n")
api.logout()
exit()
print("Test Get 3x EUR/USD 1 HOUR BID price Bar History...")
price_bars = api.get_pricebar_history("41", "HOUR", "1", "3", "3")
if price_bars:
pp.pprint(price_bars)
print("\tSuccess!")
else:
print("\tFailed\n\n")
api.logout()
exit()
print("Test Get Order History...")
orders = api.get_order_history()
if orders:
pp.pprint(orders.orders)
print("\tSuccess!")
else:
print("\tFailed\n\n")
api.logout()
exit()
print("Test Logout...")
if api.logout():
print("\tSuccess!\n\n")
else:
print("\tFailed\n\n")
| 1.765625 | 2 |
adv/farren.py | dl-stuff/dl | 22 | 12766453 | <filename>adv/farren.py
from core.advbase import *
from conf import DEFAULT
class Farren(Adv):
def prerun(self):
self.a3_regen = Timer(self.a3_regen, 1.0, True).on()
def s2_proc(self, e):
if e.group == DEFAULT:
self.add_hp(140 * self.dragonform.utp_gauge / self.dragonform.max_utp_gauge)
self.dragonform.charge_utprep(e.name, -100)
def a3_regen(self, t):
if self.amp_lvl(kind="team", key=3) >= 1:
self.dragonform.charge_utprep("a3", 1.5)
variants = {None: Farren}
| 2.171875 | 2 |
exercicios/ex 011 a 020/ex013.py | CarlosWillian/python | 0 | 12766454 | print('Vamos te dar um aumento salarial meu garoto')
n1 = float(input('Digite o valor do seu salário atual: R$ '))
print('Seu novo salário com reajuste de 15% é: R$ {:.2f}'.format(n1*1.15))
| 3.59375 | 4 |
python/application.py | mrkem598/housing-insights | 0 | 12766455 | <filename>python/application.py
# -*- coding: utf-8 -*-
"""
flask api
~~~~~~~~~
This is a simple Flask applicationlication that creates SQL query endpoints.
TODO, as of 6/15/2017 none of these endpoints are SQL Injection ready
"""
from flask import Flask, request, Response, abort, json
import psycopg2
from sqlalchemy import create_engine
import logging
from flask_cors import CORS, cross_origin
import math
import sys
#Different json output methods.
# Currently looks like best pick is jsonify, but with the simplejson package pip-installed so that
# jsonify will uitilize simplejson's decimal conversion ability.
import json
import simplejson
from flask import jsonify
from flask.json import JSONEncoder
import calendar
from datetime import datetime, date
import dateutil.parser as dateparser
from flask_sqlalchemy import SQLAlchemy
from flask_restless import APIManager
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
#######################
# Setup
#######################
logging.basicConfig(level=logging.DEBUG)
application = Flask(__name__)
#######################
# Flask Restless Setup
#######################
# Allow us to test locally if desired
if 'docker' in sys.argv:
database_choice = 'docker_database'
elif 'remote' in sys.argv:
database_choice = 'remote_database'
else:
database_choice = 'codefordc_remote_admin'
with open('housinginsights/secrets.json') as f:
secrets = json.load(f)
connect_str = secrets[database_choice]['connect_str']
logging.info("Connecting to database {}".format(database_choice))
application.config['SQLALCHEMY_DATABASE_URI'] = connect_str
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(application)
Base = automap_base()
metadata = MetaData(bind=db)
Base.prepare(db.engine, reflect=True)
db.session.commit()
BuildingPermits = Base.classes.building_permits
Census = Base.classes.census
# This table is not importing correctly
# CensusMarginOfError = Base.classes.census_margin_of_error
Crime = Base.classes.crime
DcTax = Base.classes.dc_tax
Project = Base.classes.project
ReacScore = Base.classes.reac_score
RealProperty = Base.classes.real_property
Subsidy = Base.classes.subsidy
Topa = Base.classes.topa
WmataDist = Base.classes.wmata_dist
WmataInfo = Base.classes.wmata_info
models = [BuildingPermits, Census, Crime, DcTax, Project, ReacScore,
RealProperty, Subsidy, Topa, WmataDist, WmataInfo
]
manager = APIManager(application, flask_sqlalchemy_db=db)
class CustomJSONEncoder(JSONEncoder):
# uses datetime override http://flask.pocoo.org/snippets/119/
def default(self, obj):
try:
if isinstance(obj,date):
return datetime.strftime(obj,'%Y-%m-%d')
if isinstance(obj, datetime):
return datetime.strftime(obj,'%Y-%m-%d')
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
# apply the custom encoder to the app. All jsonify calls will use this method
application.json_encoder = CustomJSONEncoder
# Allow cross-origin requests. TODO should eventually lock down the permissions on this a bit more strictly, though only allowing GET requests is a good start.
CORS(application, resources={r"/api/*": {"origins": "*"}}, methods=['GET'])
# Should create a new connection each time a separate query is needed so that API can recover from bad queries
# Engine is used to create connections in the below methods
engine = create_engine(connect_str)
# Establish a list of tables so that we can validate queries before executing
conn = engine.connect()
q = "SELECT tablename FROM pg_catalog.pg_tables where schemaname = 'public'"
proxy = conn.execute(q)
results = proxy.fetchall()
tables = [x[0] for x in results]
application.logger.debug('Tables available: {}'.format(tables))
conn.close()
logging.info(tables)
##########################################
# API Endpoints
##########################################
#######################
# Test endpoints - prove things work
# Is the application running?
@application.route('/')
def hello():
return("The Housing Insights API Rules!")
# Can we access the housinginsights package folder?
import housinginsights.tools.test_util as test_util
@application.route('/housinginsights')
def test_housinginsights_package():
return(test_util.api_demo_variable)
# an we make blueprints with passed in arguments?
from api.demo_blueprint_constructor import construct_demo_blueprint
created_blueprint = construct_demo_blueprint("This is my choice")
application.register_blueprint(created_blueprint)
# What urls are available (NOTE must have default params)?
from flask import url_for
def has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments)
@application.route("/site-map")
def site_map():
links = []
for rule in application.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
if "GET" in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((url, rule.endpoint))
return str(links)
#######################
#Register blueprints
#######################
from api.summarize_observations import construct_summarize_observations
from api.project_view_blueprint import construct_project_view_blueprint
from api.filter_blueprint import construct_filter_blueprint
from api.zone_facts_blueprint import construct_zone_facts_blueprint
from api.project_extended_constructor import construct_project_extended_blueprint
# Generate blueprints w/ any needed arguments
sum_obs_blue = construct_summarize_observations('sum_obs',engine)
project_view_blue = construct_project_view_blueprint('project_view',engine)
filter_blue = construct_filter_blueprint('filter', engine)
zone_facts = construct_zone_facts_blueprint('zone_facts',engine)
project_extended = construct_project_extended_blueprint('project_extended',engine, tables, models)
# Register all the blueprints
for blueprint in [sum_obs_blue, project_view_blue, filter_blue, zone_facts, project_extended]:
application.register_blueprint(blueprint)
# Register Flask Restless blueprints
for model in models:
# https://github.com/jfinkels/flask-restless/pull/436
model.__tablename__ = model.__table__.name
blueprint = manager.create_api_blueprint(model, url_prefix = '/api/raw', results_per_page=100, max_results_per_page=10000, methods=['GET'])
application.register_blueprint(blueprint)
#######################
#Real endpoints
#######################
@application.route('/api/meta', methods=['GET'])
@cross_origin()
def get_meta():
'''
Outputs the meta.json to the front end
'''
conn = engine.connect()
result = conn.execute("SELECT meta FROM meta")
row = result.fetchone()
return row[0]
##########################################
# Start the app
##########################################
if __name__ == "__main__":
try:
application.run(host="0.0.0.0", debug=True)
except:
conn.close()
| 2.3125 | 2 |
main.py | shakilbd009/py-compute-func | 0 | 12766456 | <reponame>shakilbd009/py-compute-func<gh_stars>0
import base64,json,os,compute,bigquery,firestore,time
import googleapiclient.discovery as gcp
def compute_deployment(data:dict):
engine = gcp.build(serviceName='compute',version='v1',cache_discovery=False)
vm = compute.Compute_engine(compute=engine,
project = data['project'],
zone = data['zone'],
name = data['instance_name'],
svc_account=os.getenv("SVC_ACCOUNT"),
machine_type=data['machine_type'])
try:
ops = vm.create_compute_engine()
except:
raise
try:
vm.wait_for_operation(operation=ops['name'])
except:
raise
return vm
# def my_pubsub_compute_func(event):
def my_pubsub_compute_func(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
data = json.loads(pubsub_message)
# data = event.get_json(force=True)
vm = compute_deployment(data)
print('vm deployed')
# time.sleep(5)
try:
data = vm.get_details(data)
print(firestore.Firestore(data).update_deployment())
print(bigquery.Update_deployment(data))
except:
raise
else:
print({'status': "function executed successfully"})
return {'status': "function executed successfully"}
| 2.28125 | 2 |
cogs/kill.py | feimaomiao/killbot | 4 | 12766457 | import asyncio
import datetime
import json
import logging
import os
from collections import Counter
from copy import deepcopy as dc
from io import BytesIO
from random import choice as randchoice
from random import randrange as rrange
from re import match, sub
from statistics import mean as avg
from time import time as timetime
import aiohttp
import requests
from discord import Embed as discordembed
from discord import File as discordfile
from lzl import lzfloat, lzint, lzlist
from PIL import Image as img
from PIL import ImageDraw as imgdraw
from PIL import ImageFont as imgfont
# Set color variables
BACKGROUND = (10, 10, 10)
BLUE = (0, 255, 255)
RED = (255, 211, 0)
WHITE = (255, 255, 255)
# set font variables
defaultfont = imgfont.truetype("fonts/death.ttf", 70)
namesfont = imgfont.truetype("fonts/lk.ttf", 70)
namesfont_small = imgfont.truetype("fonts/guild.ttf", 50)
systemfont = imgfont.truetype("fonts/systemtext.ttf", 35)
largesystemfont = imgfont.truetype("fonts/systemtext.ttf", 60)
infotext = imgfont.truetype("fonts/info.ttf", 35)
# set formatted items used for items_get function
formatteditems = requests.get(
"https://raw.githubusercontent.com/broderickhyman/ao-bin-dumps/master/formatted/items.json"
).json()
# get accurate plotting locations and names.
with open("data.json") as file:
data = json.load(file)
# possibleprefixes is used for formatting the embed
with open("possibleprefixes.json") as file:
prefixes = json.load(file)
# substituting useless items in the item name
def substitute(name):
# Crystal league tokens
m = match(r"T4_TOKEN_CRYSTALLEAGUE_LVL_(\d{1,2})_S(\d{1,2})", name)
# Trade missions
k = match(r"QUESTITEM_CARAVAN_TRADEPACK_([A-Z]{5,8})_(LIGHT|MEDIUM|HEAVY)",
name)
# HCE Maps
h = match(r"QUESTITEM_EXP_TOKEN_D(\d{1,2})_T\d.+", name)
if m:
return f"S{m.group(2)} Crystal League Token (Lvl. {m.group(1)})"
elif k:
location = {
"SWAMP": "Thetford",
"FOREST": "Lymhurst",
"STEPPE": "Bridgewatch",
"HIGHLAND": "Martlock",
"MOUNTAIN": "Fort Sterling"
}[k.group(1)]
tier = {"LIGHT": 1, "MEDIUM": 2, "HEAVY": 3}[k.group(2)]
return f"Tier {tier} {location}'s Faction Transport"
elif h:
return f"HCE Map (Lvl. {h.group(1)})"
ls = [
"Beginner's ", "Novice's ", "Journeyman's ", "Adept's ", "Expert's ",
"Master's ", "Grandmaster's ", "Elder's ", "Uncommon ", "Rare ",
"Exceptional ", "Novice ", "Journeyman ", "Adept ", "Expert ",
"Master ", "Grandmaster ", "Elder ", "Major ", "Minor ", "Danglemouth "
]
for items in ls:
name = sub(items, "", name)
name = sub("Partially Full", "Half", name)
return name
# get tier of items
def loadtier(i):
tier = None
try:
tier = match(r"T([1-8]).*", i).group(1)
enchantment = match(r".+@([1-3])", i).group(1)
except AttributeError:
enchantment = 0
if not tier:
return ""
if not enchantment:
enchantment = 0
return f"[{tier}.{enchantment}]"
# Return item name from given unique key
def items_get(items, quality=1):
try:
return loadtier(items) + substitute([
i["LocalizedNames"]["EN-US"]
for i in formatteditems
if i["UniqueName"] == str(items)
][0]) + '{}'.format({
0: "",
1: "(NM)",
2: "(GD)",
3: "(OT)",
4: "(EX)",
5: "(MP)"
}[quality])
except Exception as e:
return substitute(str(items))
# convert the transparent nodes into orange nodes like the background
# Parameters: imageobj, transparent color to be converted into.
def convert_to_transparent(imageobj, transparent):
# convert image object into rgba format
imageobj = imageobj.convert("RGBA")
# get image data from the image object
data = imageobj.getdata()
newData = []
# loop through data items
for i in data:
# i[3] is the alpha channel for the image
if i[3] <= 10:
newData.append(transparent)
else:
newData.append(i)
# Convert the list of data into an image.
imageobj.putdata(newData)
return imageobj
# async function to check if itemid is a double handed weapon.
# parameter: name -> unique key of weapon
async def is_two_main_hand(name):
if name is None or name == "":
return False
try:
async with aiohttp.ClientSession(
headers={"Connection": "close"}) as session:
async with session.get(
f"http://gameinfo.albiononline.com/api/gameinfo/items/{name}/data"
) as resp:
respond = await resp.json()
return respond["twoHanded"]
except Exception as e:
# Print error type
logging.warning(f"in is_two_main_hand, {e.__class__.__name__}")
return False
# async function to get image from the given link
async def get_image(link, item, session, quality=1, debugchannel=None, count=0):
async with session.get(link + item + f".png?quality={quality}") as resp:
'''
use bytesio object to load the respond conetent from online∂
-> use image module to load the image from bytesio object
-> resize the image object to 180x180 size
-> convert the image to transparent
'''
try:
tobyte = BytesIO(await resp.content.read())
tobyte.seek(0)
return convert_to_transparent(
img.open(tobyte).resize((180, 180), img.ANTIALIAS), BACKGROUND)
except Exception as e:
await asyncio.sleep(1)
if debugchannel:
await debugchannel.send(
debugchannel.guild.owner.mention +
f"{e.__class__.__name__} in get_image. Keys include {item}, {quality}"
)
if count == 10:
return False
return await get_image(link, item, session, quality, debugchannel,
count + 1)
async def get_iw_json(items, session, count=0, sendchannel=None):
# Lambda function to return the api link
getlink = lambda x: "https://www.albion-online-data.com/api/v2/stats/prices/" + x
try:
async with session.get(getlink(items)) as resp:
return await resp.json(content_type=None)
# happens when the returned item is 404 error
except json.decoder.JSONDecodeError:
if count == 0:
logging.warning("Gearworth Error {}".format(items))
if sendchannel:
await sendchannel.send(sendchannel.guild.owner.mention +
f" Gearworth error for {items}")
if count > 5:
count = 5
if count == 5:
try:
return requests.get(getlink(items)).json()
except Exception as e:
return await get_iw_json(items,
session,
1,
sendchannel=sendchannel)
await asyncio.sleep(count)
return await get_iw_json(items,
session,
count + 1,
sendchannel=sendchannel)
# Function to get the average price from Lymhurst, Martlock, Bridgewatch, FortSterling and Thetford
def _getaverage(x, y):
fnl = []
for i in x:
if i['quality'] == y and i["sell_price_min"] != 0:
fnl.append(i["sell_price_min"])
if len(fnl) == 0:
fnl = [i["sell_price_min"] for i in x if i["sell_price_min"] != 0]
return 0 if len(fnl) == 0 else fnl[0] if len(fnl) == 1 else avg(
[i for i in fnl if i <= 3 * avg(sorted(fnl)[:-1])])
# determines gear worth
async def calculate_gearworth(person, session, debugchannel=None):
# initialise list of inventory and total value
loi = []
total = 0
# unpack user items, get gear
for position, gear in person["Equipment"].items():
# Gear is sometimes None if the user did not use the value
if gear is not None:
loi.append((gear["Type"], gear['Quality']))
# looping through items in counter
for items, count in Counter(loi).items():
try:
total += _getaverage(
await get_iw_json(items[0], session, sendchannel=debugchannel),
items[1],
) * count
except KeyError:
logging.info("Time: {0:20} KeyError: Item {1}".format(
datetime.datetime.now().strftime("%x %X:%f"), items[0]))
return total
async def drawplayer(player,
kav,
totaldamage=0,
killer=True,
peoplegettingfame=0,
debugchannel=None):
# Base image link to load the images
_baseimagelink = "https://render.albiononline.com/v1/item/"
# kav is used to determine if hte player is killer, assist or victim
# Create a new image
playerimg = img.new("RGBA", (600, 1200), BACKGROUND)
# set lambda functions to put text in the middle of width and height
wmiddle = lambda x: (600 - x) / 2
hmiddle = lambda x, y, z: x + (y - z) / 2
# set drawing image for putting text
drawimg = imgdraw.Draw(playerimg)
# Get width and height of text
width, height = drawimg.textsize(kav, font=defaultfont)
# Set a text for the heading, padding of 10.
drawimg.text((wmiddle(width), hmiddle(10, 50, height)),
text=kav,
font=defaultfont,
fill=RED)
# height after this starts from 65.0
width, height = drawimg.textsize(player["Name"], font=namesfont)
drawimg.text((wmiddle(width), hmiddle(65, 50, height)),
text=player["Name"],
font=namesfont,
fill=WHITE)
# After this line of the text will start at height 140
# Get user guild name as shown in the game
fullguildname = "{0}{1}".format(player["AllianceName"], player["GuildName"])
# Get the width and height of the guild name in text
width, height = drawimg.textsize(fullguildname, font=namesfont_small)
drawimg.text((wmiddle(width), hmiddle(150, 25, height)),
text=fullguildname,
font=namesfont_small,
fill=BLUE)
# set a variable for easy access
equipments = player["Equipment"]
"""
File structure for data.json:
[itemname, photolocation, textspace]
itemname: UNIQUE_NAME key for the using item, can be used as a key to look for the image online from the database
photolocation: location data on the image, it is a 2 point tuple that helps determine the x and y value of the upper left corner of the photo
textspace: location data for the count of the item. Usually only useful in potion slot and food slot, used to determine the count of the item.
"""
async with aiohttp.ClientSession(
headers={"Connection": "keep-alive"}) as session:
# unpacks the data
for item, imgspace, textspace in data:
# check if the item exists
if equipments[item]:
# downloads image
loadingimg = await get_image(_baseimagelink,
equipments[item]["Type"], session,
equipments[item]["Quality"],
debugchannel)
if loadingimg == False:
return False
# puts the image on the background using the given data
playerimg.paste(loadingimg, imgspace)
# put the count on the pasted image using the given data
drawimg.text(textspace,
text=str(equipments[item]["Count"]),
font=systemfont,
fill=WHITE)
# Check if user is using a two-handed weapon
try:
twohand = await is_two_main_hand(equipments["MainHand"]["Type"])
except (AttributeError, TypeError) as e:
twohand = False
if twohand and equipments["MainHand"]:
# downloads the image again from the database
async with aiohttp.ClientSession(
headers={"Connection": "close"}) as session:
content = await get_image(_baseimagelink,
equipments["MainHand"]["Type"],
session,
equipments["MainHand"]["Count"],
debugchannel=debugchannel)
# make the image transparent
content.putalpha(100)
playerimg.paste(content, (400, 380))
# provides the count
drawimg.text((533, 490),
text=str(equipments["MainHand"]["Count"]),
font=systemfont,
fill=WHITE)
# Calculate their gear worth
async with aiohttp.ClientSession(
headers={"Connection": "close"}) as session:
gearworth = await calculate_gearworth(player, session, debugchannel)
# Set IP
width, height = drawimg.textsize("IP: {}".format(
round(player["AverageItemPower"], 2)),
font=largesystemfont)
drawimg.text((wmiddle(width), 930),
"IP: {}".format(round(player["AverageItemPower"], 2)),
font=largesystemfont,
fill=WHITE)
if killer:
damageline = "Damage done:\n{}%[{}/{}]".format(
lzfloat(player["DamageDone"] / totaldamage * 100).round_sf(4),
round(int(player["DamageDone"])), totaldamage)
else:
damageline = "Death Fame: {} [{}]\n ={}/particiant".format(
player["DeathFame"], peoplegettingfame,
player["DeathFame"] // peoplegettingfame)
width, height = drawimg.textsize(damageline, font=infotext)
# Both death fame and the damage done are multiline texts.
drawimg.multiline_text((wmiddle(width), hmiddle(1000, 70, height)),
damageline,
font=infotext,
fill=WHITE)
# Convert the gear worth into integer and round the gear worth to 5 signifacant figures
gearworthline = "Estimated Gear Worth: {:,}".format(
lzint(gearworth).round_sf(5))
width, height = drawimg.textsize(gearworthline, font=infotext)
# Set gear worth
drawimg.text((wmiddle(width), hmiddle(1120, 40, height)),
gearworthline,
font=infotext,
fill=(RED if gearworth >= 1000000 else WHITE))
return playerimg
class kill:
def __init__(self, kd, debugchannel=None):
"""
Usage:
variable = kill(kill json item)
"""
self.debugchannel = debugchannel
self.starttime = timetime()
kd = dc(kd)
self.kd = kd
self.killer = kd["Killer"]
# Track killer
for i in kd["Participants"]:
if i["Id"] == kd["Killer"]["Id"]:
self.killer = dc(i)
break
try:
if not self.killer["DamageDone"]:
self.killer["DamageDone"] = 0
except KeyError:
self.killer["DamageDone"] = 0
# track victim
self.victim = kd["Victim"]
# Get the people who did the most damage
try:
self.assist = sorted(
[i for i in kd["Participants"] if i["DamageDone"] > 0],
key=lambda x: x["DamageDone"],
reverse=True)[0]
# Happens when the amount of participants is less than 1(even though I don't know how did it happen)
except IndexError:
self.assist = dc(self.killer)
# Set type of solo kill or group kill
# Is used to show if 3 people is shown on the final kill or 2
self.solokill = (self.killer["Id"] == self.assist["Id"])
# Set alliance names to the one similar in game
if self.killer["AllianceName"]:
self.killer["AllianceName"] = "[{}]".format(
self.killer["AllianceName"])
if self.assist["AllianceName"]:
self.assist["AllianceName"] = "[{}]".format(
self.assist["AllianceName"])
if self.victim["AllianceName"]:
self.victim["AllianceName"] = "[{}]".format(
self.victim["AllianceName"])
# Set victim guild if victim does not have a guild
if not self.killer["GuildName"]:
self.killer["GuildName"] = "- - - - -"
if not self.assist["GuildName"]:
self.assist["GuildName"] = "- - - - -"
if not self.victim["GuildName"]:
self.victim["GuildName"] = "- - - - -"
self.totaldamage = int(
sum([i["DamageDone"] for i in kd["Participants"]]))
self.peoplegettingfame = len(
[i for i in kd["GroupMembers"] if i["KillFame"] > 0])
# Get the list of participants that dealt damage
self.participants = sorted(
[i for i in kd["Participants"] if i["DamageDone"] != 0],
key=lambda x: x["DamageDone"],
reverse=True)
for i in self.participants:
if i["AllianceName"] and not match(r"\[.*\]", i["AllianceName"]):
i["AllianceName"] = "[{}]".format(i["AllianceName"])
self.eventid = kd["EventId"]
# Use regex and datetime module to get the time of killing in UTC
dt = match(
r"(\d{4})\-(\d{2})\-(\d{2})T(\d{2})\:(\d{2})\:(\d{2}\:*)\.(\d+)Z",
kd["TimeStamp"])
self.eventtime = datetime.datetime(int(dt.group(1)), int(dt.group(2)),
int(dt.group(3)), int(dt.group(4)),
int(dt.group(5)), int(dt.group(6)),
int(dt.group(7)[:6]))
if self.peoplegettingfame == 0:
self.peoplegettingfame = len(kd["GroupMembers"])
logging.warning("Peoplegetting fame error: {}".format(self.eventid))
if self.totaldamage == 0:
self.totaldamage = 100
logging.warning("totaldamage error: {}".format(self.eventid))
self.gw = 0
# Function to draw a whole set of gear on a blank template
async def draw(self):
background = img.new("RGBA", (1800, 1200), BACKGROUND)
killer_pic = False
victim_pic = False
while int(bool(killer_pic)) + int(bool(victim_pic)) < 2:
# load pictures for each player
killer_pic = await drawplayer(self.killer,
"Killer",
totaldamage=self.totaldamage,
killer=True,
debugchannel=self.debugchannel)
victim_pic = await drawplayer(
self.victim,
"Victim",
killer=False,
peoplegettingfame=self.peoplegettingfame,
debugchannel=self.debugchannel)
if self.solokill:
background.paste(killer_pic, (150, 0))
background.paste(victim_pic, (1050, 0))
else:
assist_pic = False
while bool(assist_pic) != True:
assist_pic = await drawplayer(self.assist,
"Assist",
killer=True,
totaldamage=self.totaldamage,
debugchannel=self.debugchannel)
background.paste(killer_pic, (0, 0))
background.paste(assist_pic, (600, 0))
background.paste(victim_pic, (1200, 0))
self.fileloc = f"temp/{self.eventid}.png"
background.save(self.fileloc, "png")
# returns gear worth
async with aiohttp.ClientSession(
headers={"Connection": "close"}) as session:
self.gw = round(await calculate_gearworth(self.victim, session,
self.debugchannel))
self.inv = await self.inventory()
return background
# returns a tuple of 3 values, [kill/assist] name, guild(allliance) and damage[percentage]
@property
def assists(self):
# list of names for participants
fn = [i["Name"] for i in self.participants]
# List of guild naems
guild = [(i["AllianceName"] +
i["GuildName"] if i["GuildName"] else "- - - - -")
for i in self.participants]
# list of damage/percent of total damage
perc = [
"{:4}[{}%]".format(
round(i["DamageDone"]),
round(i['DamageDone'] / self.totaldamage * 100, 2))
for i in self.participants
]
# return joins
return ("\n".join(fn), "\n".join(guild), "\n".join(perc))
def gettype(self, iskiller=False, isvictim=False, isassist=False):
if (iskiller or isassist) and isvictim:
useitem = prefixes["ffire"]
color = 0xae00ff
elif self.solokill and len(self.participants) == 1 and iskiller:
useitem = prefixes["solo"]
color = 0x00ff00 if self.gw <= 2500000 else 0xfa77aa
elif (iskiller or isassist) and self.gw > 2500000:
useitem = prefixes["juicyk"]
color = 0xfa77aa
elif iskiller:
useitem = prefixes["kill"]
color = 0x00ff00
elif isassist:
useitem = prefixes["assist"]
color = 0x00ff00
elif isvictim and self.gw > 2500000:
useitem = prefixes["juicyd"]
color = 0x3131b2
elif isvictim:
useitem = prefixes["death"]
color = 0xd42f2f
else:
useitem = prefixes["juicy"]
color = 0x00ffff
return (
f"{self.victim['Name']} was killed by {self.killer['Name']} for {self.victim['DeathFame']} kill fame"
if isvictim else
f"{self.killer['Name']} killed {self.victim['Name']} for {self.victim['DeathFame']} kill fame. :{randchoice(useitem['emoji'])}:",
f"{randchoice(useitem['choices'])}", color)
async def inventory(self):
stuff = []
async with aiohttp.ClientSession(
headers={"Connection": "close"}) as session:
for i in [j for j in self.victim["Inventory"] if j is not None]:
itemworth = _getaverage(
await get_iw_json(i["Type"],
session,
sendchannel=self.debugchannel),
i["Quality"])
stuff.append(
(items_get(i["Type"],
i["Quality"]), int(i["Count"]), int(itemworth)))
for i in stuff:
self.gw += i[2] * i[1]
sortedstuff = sorted(stuff, key=lambda x: x[2], reverse=True)
rs = lambda x, y: "\n".join([str(i[int(x)]) for i in tuple(y)])
if any(len(rs(0, sortedstuff)) > 1024 for x in range(0, 3)):
s0, s1 = (lzlist(sortedstuff).split_to(2))
return (rs(0, s0), rs(1, s0), rs(2, s0), rs(0, s1), rs(1, s1),
rs(2, s1), True)
return (rs(0, sortedstuff), rs(1, sortedstuff), rs(2, sortedstuff), "",
"", "", False)
def create_embed(self, followinglists):
self.file = discordfile(self.fileloc, filename=f"{self.eventid}.png")
# find kill type
iskiller = self.killer["Id"] in followinglists or self.killer[
"GuildId"] in followinglists
isvictim = self.victim["Id"] in followinglists or self.victim[
"GuildId"] in followinglists
isassist = False
for i in [i for i in self.kd["Participants"] if i["DamageDone"] > 0]:
if i["Id"] in followinglists or i["GuildId"] in followinglists:
isassist = True
localtitle, localdescription, color = self.gettype(
iskiller, isvictim, isassist)
# Create discord embed object
self.embed = None
self.embed = discordembed(
title=localtitle,
url=f"https://albiononline.com/en/killboard/kill/{self.eventid}",
description=localdescription + "!" * rrange(1, 3),
color=color,
timestamp=self.eventtime)
# derives image link from eventid as uploads are done in draw() function
self.embed.set_image(url=f"attachment://{self.eventid}.png")
self.embed.set_footer(text="Local Kill time: ")
# get an assist list
self.assistlist = self.assists
# This step may encounter an error where no one dealt damage
# These two lines fixes the output and prevent httperror where value is None
if self.assistlist == ("", "", ""):
# Forcibly set assistlist to a tuple
self.assistlist = (self.killer["Name"], self.killer["GuildName"],
"100[100%]")
# Add in values for the embed
self.embed.add_field(name="Killers", value=self.assistlist[0])
self.embed.add_field(name="Guild",
value=self.assistlist[1],
inline=True)
self.embed.add_field(name="Damage",
value=self.assistlist[2],
inline=True)
# check if victim's inventory is empty
if len([i for i in self.victim["Inventory"] if i is not None]) > 0:
i0, c0, v0, i1, c1, v1, lis2 = self.inv
# adds embed field for victim's inventory
self.embed.add_field(name="Victim's Inventory:",
value=i0,
inline=True)
self.embed.add_field(name="Amount", value=c0, inline=True)
self.embed.add_field(name="Worth est.", value=v0, inline=True)
if lis2:
self.embed.add_field(name="Inventory", value=i1, inline=True)
self.embed.add_field(name="Amount", value=c1, inline=True)
self.embed.add_field(name="Worth est.", value=v1, inline=True)
# adds embed field for the total gear worth.
self.embed.add_field(name="Estimated Victim's Total Worth:",
value="{:,}".format(self.gw),
inline=False)
'''
returns two items:
self.embed: the embed file to be sent
self.file: the file object that has to be sent together with the embed
'''
return (self.embed, self.file)
| 2.375 | 2 |
tests/test_codemelli.py | HanifBirgani/codemelli | 3 | 12766458 | <gh_stars>1-10
#!/usr/bin/env python
"""Tests for `codemelli` package."""
import pytest
from re import search
from codemelli import codemelli
def test_city_codes_data_returns_dict_type():
assert isinstance(codemelli.city_codes_data(), dict)
def test_validator_valid_input_is_true():
assert codemelli.validator(1493933957) is True
def test_validator_invalid_input_is_false():
assert codemelli.validator(1493933958) is False
def test_validator_raises_valueerror_on_bool_input():
with pytest.raises(ValueError):
codemelli.validator(True)
def test_validator_raises_valueerror_on_dict_input():
with pytest.raises(ValueError):
codemelli.validator({'test_key': 'test_value'})
def test_validator_raises_valueerror_on_list_input():
with pytest.raises(ValueError):
codemelli.validator([1, 2, 3])
def test_validator_raises_valueerror_on_tuple_input():
with pytest.raises(ValueError):
codemelli.validator((1, 2))
def test_validator_raises_valueerror_on_string_input():
with pytest.raises(ValueError):
codemelli.validator('abcde')
def test_validator_raises_valueerror_on_less_than_10_number_string_input():
with pytest.raises(ValueError):
codemelli.validator('12345678')
def test_validator_raises_valueerror_on_less_than_10_integer_input():
with pytest.raises(ValueError):
codemelli.validator(1234567)
def test_validator_raises_valueerror_on_less_more_10_integer_input():
with pytest.raises(ValueError):
codemelli.validator(12345671234565)
def test_validator_strict_raises_valueerror_on_invalid_city_code_input():
with pytest.raises(ValueError):
codemelli.validator(9995872448, strict=True)
def test_get_remainder_raises_typeerror_on_string_input():
with pytest.raises(TypeError):
codemelli._get_remainder('string')
def test_get_remainder_raises_typeerror_on_tuple_input():
with pytest.raises(TypeError):
codemelli._get_remainder((1, 2))
def test_get_remainder_does_not_raise_typeerror_on_int_input():
try:
codemelli._get_remainder(1234567890)
except Exception:
assert False
def test_get_remainder_does_not_raise_typeerror_on_list_input():
try:
codemelli._get_remainder([1, 2, 3])
except Exception:
assert False
def test_generator_raises_a_valueerror_on_input_integer_less_than_3():
with pytest.raises(ValueError):
codemelli.generator(12)
def test_generator_raises_a_valueerror_on_input_integer_more_than_3():
with pytest.raises(ValueError):
codemelli.generator(1234)
def test_generator_raises_a_valueerror_on_input_string():
with pytest.raises(ValueError):
codemelli.generator('abc')
def test_generator_returns_a_10_character_string():
assert search(r'^\d{10}$', codemelli.generator())
def test_generator_returns_a_10_character_string_with_city_code_input():
assert search(r'^\d{10}$', codemelli.generator('123'))
def test_lookup_returns_dict_on_valid_input():
assert isinstance(codemelli.lookup('123'), dict)
def test_lookup_returns_none_on_notfound_key():
assert codemelli.lookup('999') is None
| 2.609375 | 3 |
lecture-6/game_of_life/conway_v3.py | evarga/parallel-computing-lectures | 3 | 12766459 | """
Variant of the base class with parallelized, pipelined, and vectorized operations.
The technique pertaining to convolution was reused from https://stackoverflow.com/a/36968434.
Try executing this program with (the other variants will crawl):
python3 conway_v3.py --board-size 160 --interval 20 --configuration patterns/garden-of-eden.cells 30 30
"""
import numpy as np
import dask.array as da
from scipy.ndimage import convolve
from conway_base import Cell, ConwayBase
class ConwayV3(ConwayBase):
def create_buffers(self):
self.board = da.from_array(self.board, chunks=("auto", "auto"))
self._mask = np.ones((3, 3))
self._mask[1, 1] = 0
def _process_cell(self, block, block_id=None):
rows, cols = block.shape
start_row = block_id[0] * rows
start_col = block_id[1] * cols
# We presume that this slicing will fit into memory.
board_slice = self.board[start_row:(start_row + rows), start_col:(start_col + cols)].compute()
# Apply the rules of the game.
block[np.logical_or(block < 2, block > 3)] = Cell.DEAD
block[block == 3] = Cell.LIVE
block[block == 2] = board_slice[block == 2]
return block
def prepare_next_board(self, steps):
for _ in range(steps):
num_live_neighbors = self.board.map_overlap(convolve, depth=1, boundary='none',
weights=self._mask, mode='constant', cval=0)
next_board = num_live_neighbors.map_blocks(self._process_cell, dtype=np.int).compute()
self.board = da.from_array(next_board, chunks=("auto", "auto"))
return next_board
if __name__ == '__main__':
game = ConwayV3(ConwayV3.parse_command_line_args())
game.simulate()
| 2.78125 | 3 |
logfetcher3000/logfetcher3000.py | am401/logfetcher3000 | 2 | 12766460 | <gh_stars>1-10
import argparse
import datetime
import json
import logging
import os
import re
import requests
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='Toggle verbose debug logging', action='store_true')
parser.add_argument('-f', '--file', default='links.json', help='Specify file to load in to application', type=file_exists)
args = parser.parse_args()
return args
def file_exists(filename):
if os.path.isfile(filename):
return filename
else:
raise argparse.ArgumentTypeError(
"The file {} does not exist".format(filename))
def get_links_from_file(filename):
"""Read in JSON file containing list of links
ond convert all characters to lowercase.
:param filename: string
"""
try:
with open(filename) as json_file:
url_json_object = json.load(json_file)
# Ensure all dict items are lower case
url_json_object = {k.lower(): [i.lower() for i in v] for k, v in url_json_object.items()}
except IOError as e:
print("An error has occurred: {}".format(e))
sys.exit()
except ValueError as e:
print("An error has occurred: {}".format(e))
return url_json_object
def get_log(url, filename):
"""
Download the log files and save them locally.
Set User-Agent when accessing websites to LogFetcher3000.
:param url: list
:param filename: string
"""
headers = {'User-Agent': 'LogFetcher3000'}
try:
r = requests.get(url, allow_redirects=True, headers=headers, timeout=5)
if r.status_code == 200:
with open(filename, 'wb') as f:
f.write(r.content)
else:
logging.error("Unexpected HTTP response code received: {}".format(r.status_code))
except requests.exceptions.Timeout as e:
logging.error("Connection timeout: {}".format(e))
def access_links_loop(access_links):
for i in range(len(access_links)):
if access_links[i]:
if not re.search('^https?://', access_links[i]):
logging.error("Incorrect protocol used: {}".format(access_links[i]))
continue # Move on to the next iteration in the loop
else:
log_filename = access_links[i].split('/')
log_filename = log_filename[3]
get_log(access_links[i], '{}_access_{}.log'.format(log_filename, date_stamp))
def error_links_loop(error_links):
for i in range(len(error_links)):
if error_links[i]:
if not re.search('^https?://', error_links[i]):
logging.error("Incorrect protocol used: {}".format(error_links[i]))
pass #continue # Move on to the next iteration in the loop
elif not re.search('.*account_name=', error_links[i]):
logging.error("Incorrect link detected: {}".format(error_links[i]))
pass
else:
log_filename = re.findall('.*account_name=([^&]*)', error_links[i])
log_filename = log_filename[0]
get_log(error_links[i], '{}_error_{}.log'.format(log_filename, date_stamp))
if __name__ == '__main__':
args = parse_args()
# Setup logging for error messages
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
output_file_handler = logging.FileHandler("debug.log")
stdout_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')
output_file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
if args.verbose:
logger.addHandler(output_file_handler)
logger.addHandler(stdout_handler)
else:
logger.addHandler(output_file_handler)
links_filename = args.file
date_stamp = datetime.datetime.today().strftime('%m%d%Y_%H%M%S')
url_object = get_links_from_file(links_filename)
access_links = url_object["access"]
error_links = url_object["error"]
access_links_loop(access_links)
error_links_loop(error_links)
| 2.984375 | 3 |
tests/unit/oil/barrels/aws/test_autoscaling.py | kbougy/oil | 3 | 12766461 | import unittest
from unittest.mock import MagicMock, patch
from oil.barrels.aws import AutoScalingBarrel
class AutoScalingBarrelTestCase(unittest.TestCase):
def client_mock(self, fixture):
client = MagicMock()
paginator = MagicMock()
response_iterator = fixture
paginator.paginate.return_value = response_iterator
client.get_paginator.return_value = paginator
return client
def test_has_correct_supported_regions(self):
supported_regions = set([
'us-east-2',
'us-east-1',
'us-west-2',
'us-west-1',
'ap-northeast-1',
'ap-northeast-2',
'ap-south-1',
'ap-southheast-1',
'ap-southheast-2',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'eu-central-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
])
barrel = AutoScalingBarrel({}, clients={})
self.assertEqual(supported_regions, barrel.supported_regions)
@patch("boto3.client")
def test_default_clients(self, mock_client):
mock_client.return_value = MagicMock()
barrel = AutoScalingBarrel({})
for region, client in barrel.clients.items():
self.assertIn(region, barrel.supported_regions)
def test_tap_functions_with_describe_auto_scaling_groups(self):
fixture = [
{
'AutoScalingGroups': [
{
'AutoScalingGroupName': 'a_group',
}
]
}
]
clients = {
'us-east-1': self.client_mock(fixture)
}
barrel = AutoScalingBarrel({}, clients=clients)
tap_return = barrel.tap('describe_auto_scaling_groups')
auto_scaling_return = barrel.describe_auto_scaling_groups()
self.assertEqual(auto_scaling_return, tap_return)
def test_tap_throws_error_with_unsupported_call(self):
barrel = AutoScalingBarrel({})
with self.assertRaises(RuntimeError):
barrel.tap('unsupported_call')
def test_auto_scaling_returns_groups_by_region(self):
fixture_1 = [
{
'AutoScalingGroups': [
{
'AutoScalingGroupName': 'a_group',
}
]
}
]
fixture_2 = [
{
'AutoScalingGroups': [
{
'AutoScalingGroupName': 'a_group_2',
}
]
}
]
clients = {
'us-east-1': self.client_mock(fixture_1),
'us-east-2': self.client_mock(fixture_2),
}
barrel = AutoScalingBarrel({}, clients=clients)
results = barrel.describe_auto_scaling_groups()
expected = {
'us-east-1': [
{
'AutoScalingGroupName': 'a_group'
},
],
'us-east-2': [
{
'AutoScalingGroupName': 'a_group_2'
},
]
}
self.assertEqual(results, expected)
def test_describe_auto_scaling_groups_empty(self):
fixture = [ # Multiple pages of empty
{
'AutoScalingGroups': [
]
}
]
clients = {
'us-east-1': self.client_mock(fixture)
}
barrel = AutoScalingBarrel({}, clients=clients)
results = barrel.describe_auto_scaling_groups()
expected = {
'us-east-1': []
}
self.assertEqual(results, expected)
| 2.359375 | 2 |
utils.py | MarkEEaton/fictograph | 0 | 12766462 | <filename>utils.py
""" some tools to work with the data """
from random import uniform
import asks
import trio
import multio
from bs4 import BeautifulSoup
import key
def clean(data):
""" clean the data """
# sort the data by date
sorted_data = sorted(data, key=lambda k: k["date"])
cleaned = []
# if there are duplicate dates, add some randomness so that they are not
# exact duplicates
for i, item in enumerate(sorted_data):
try:
if item["date"] == sorted_data[i + 1]["date"]:
cleaned.append(
{
"title": item["title"],
"date": item["date"] + uniform(0, 1),
"rating": item["rating"],
"id": item["id"],
}
)
else:
cleaned.append(item)
except IndexError:
cleaned.append(item)
return cleaned
def gather_books(soup):
""" assemble the urls for all of the author's books """
urls = []
for book in soup.find_all("book"):
# use the book.show api to get original publication year
url = (
"https://www.goodreads.com/book/show.xml?key="
+ key.token
+ "&id="
+ book.id.string
)
urls.append(url)
return urls
async def fetch(url: str, htmls: list):
""" fetch an individual url """
response = await asks.get(url)
htmls.append(response.content)
def run_asy(urls: list):
""" set up trio """
multio.init("trio")
return trio.run(nurs, urls)
async def nurs(urls: list):
""" run the trio loop """
htmls = []
async with trio.open_nursery() as nursery:
for url in urls:
nursery.start_soon(fetch, url, htmls, name=url)
works = []
for page in htmls:
soup3 = BeautifulSoup(page, "xml")
try:
year = soup3.book.work.original_publication_year.string
# truncate long titles
if len(soup3.book.title.string) > 20:
title = soup3.book.title.string[:20] + "..."
else:
title = soup3.book.title.string
if year is not None:
work = {
"title": title,
"date": int(year),
"rating": float(soup3.book.average_rating.string),
"id": soup3.book.id.string,
}
works.append(work)
else:
pass
except Exception as exception:
print(exception)
return works
| 3.3125 | 3 |
deployman/test_manifests.py | alexvnilsson/deployman | 0 | 12766463 | import manifests
m = manifests.read("deploy.yml")
print(m.name) | 1.328125 | 1 |
src/compas_plotters/artists/circleartist.py | archimarkGit/compas | 0 | 12766464 | <gh_stars>0
from compas_plotters.artists import Artist
from matplotlib.patches import Circle as CirclePatch
# from matplotlib.transforms import ScaledTranslation
__all__ = ['CircleArtist']
class CircleArtist(Artist):
""""""
zorder = 1000
def __init__(self, circle, **kwargs):
super(CircleArtist, self).__init__()
self._mpl_circle = None
self.circle = circle
self.facecolor = kwargs.get('facecolor', '#ffffff')
self.edgecolor = kwargs.get('edgecolor', '#000000')
self.fill = kwargs.get('fill', True)
def draw(self):
circle = CirclePatch(
self.circle.center[:2],
radius=self.circle.radius,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
fill=self.fill,
zorder=self.zorder
)
self._mpl_circle = self.plotter.axes.add_artist(circle)
self.plotter.axes.update_datalim([self.circle.center[:2]])
def redraw(self):
self._mpl_circle.set_radius(self.circle.radius)
self._mpl_circle.set_edgecolor(self.edgecolor)
self._mpl_circle.set_facecolor(self.facecolor)
self.plotter.axes.update_datalim([self.circle.center[:2]])
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
from compas.geometry import Circle
from compas.geometry import Point
from compas.geometry import Plane
from compas.geometry import Vector
from compas_plotters import GeometryPlotter
plotter = GeometryPlotter()
plane = Plane(Point(0, 0, 0), Vector(0, 0, 1))
a = Circle(plane, 4.0)
b = Circle(plane, 3.0)
c = Circle(plane, 2.0)
plotter.add(a, edgecolor='#ff0000')
plotter.add(b, edgecolor='#00ff00')
plotter.add(c, edgecolor='#0000ff')
plotter.draw(pause=1.0)
plotter.show()
| 2.703125 | 3 |
DMOJ/DMOPC/DMPG_17_S5_Bit_Matrix.py | Togohogo1/pg | 0 | 12766465 | from math import log2, ceil
def find(gen, arr):
if gen == 1:
return arr
rev = arr[::-1]
for i in range(len(arr)):
arr[i] = "0" + arr[i]
rev[i] = "1" + rev[i]
return find(gen-1, arr+rev)
R, C = map(int, input().split())
rows = find(ceil(log2(R+1)), ["0", "1"])
cols = find(ceil(log2(C+1)), ["0", "1"])
for i in range(R):
for j in range(C):
print(int(rows[i]+cols[j], 2), end=" ")
print()
| 3.1875 | 3 |
websiteV2/utils/encrypt.py | meng950813/scholar_discovery_sys | 1 | 12766466 | """
created by chen
该模块用于密码加密
基本思路为 : 调用 hashlib 库中的 md5 加密方法,
将用户密码 与 专门的 key 进行 update 运算
返回一个 32 位 长的密码
"""
import hashlib
import sys
sys.path.append("..")
from config import SECRET_KEY
def encryption(pwd):
"""
用于加密的函数
:param pwd: 用户密码 (str)
:return: 32位长度字符串
"""
pwd = str(pwd)
password = hashlib.md5(pwd.encode())
password.update(SECRET_KEY['KEY'])
# 第三次加密
password.update(SECRET_KEY['KEY2'])
return password.hexdigest()
if __name__ == '__main__':
print(encryption("a"))
print(encryption(123))
print(encryption(None)) | 3.15625 | 3 |
scripts/npc/goNinja.py | Snewmy/swordie | 9 | 12766467 | <gh_stars>1-10
# Palanquin (9110107) | Outside Ninja Castle (800040000)
mushroomShrine = 800000000
response = sm.sendAskYesNo("Would you like to go to #m" + str(mushroomShrine) + "m#?")
if response:
sm.warp(mushroomShrine) | 1.679688 | 2 |
app/dao/email_branding_dao.py | alphagov/notify-notifications-api | 51 | 12766468 | from app import db
from app.dao.dao_utils import autocommit
from app.models import EmailBranding
def dao_get_email_branding_options():
return EmailBranding.query.all()
def dao_get_email_branding_by_id(email_branding_id):
return EmailBranding.query.filter_by(id=email_branding_id).one()
def dao_get_email_branding_by_name(email_branding_name):
return EmailBranding.query.filter_by(name=email_branding_name).first()
@autocommit
def dao_create_email_branding(email_branding):
db.session.add(email_branding)
@autocommit
def dao_update_email_branding(email_branding, **kwargs):
for key, value in kwargs.items():
setattr(email_branding, key, value or None)
db.session.add(email_branding)
| 2.4375 | 2 |
src/player/player.py | philipholler/RocoCup_Soccer_P6 | 5 | 12766469 | <reponame>philipholler/RocoCup_Soccer_P6<gh_stars>1-10
import math
import configurations
from geometry import calculate_full_origin_angle_radians, is_angle_in_range, smallest_angle_difference, get_xy_vector, \
Vector2D, inverse_y_axis
from configurations import BALL_DECAY, KICKABLE_MARGIN
from player.world_objects import PrecariousData, Coordinate, Ball, ObservedPlayer
from utils import debug_msg
MAX_MOVE_DISTANCE_PER_TICK = 1.05
APPROA_GOAL_DISTANCE = 30
DEFAULT_MODE = "DEFAULT"
INTERCEPT_MODE = "INTERCEPTING"
DRIBBLING_MODE = "DRIBBLING"
CHASE_MODE = "CHASE"
POSSESSION_MODE = "POSSESSION"
PASSED_MODE = "PASSED"
CATCH_MODE = "CATCH"
class PlayerState:
def __init__(self):
self.mode = DEFAULT_MODE
self._ball_seen_since_missing = True
self.power_rate = configurations.DASH_POWER_RATE
self.team_name = ""
self.num = -1
self.player_type = None
self.ball_collision_time = 0
self.position: PrecariousData = PrecariousData.unknown()
self.world_view = WorldView(0)
self.body_angle: PrecariousData = PrecariousData(0, 0)
self.action_history = ActionHistory()
self.body_state = BodyState()
self.players_close_behind = 0
self.coach_commands: [PrecariousData] = []
self.starting_position: Coordinate = None
self.playing_position: Coordinate = None
self.last_see_global_angle = 0
self.current_objective = None
self.face_dir = PrecariousData(0, 0)
self.should_reset_to_start_position = False
self.objective_behaviour = "idle"
self.passchain_targets: [PrecariousData] = []
self.is_generating_strategy = False
self.strategy_result_list: [] = []
self.dribble_or_pass_strat = PrecariousData.unknown()
self.last_dribble_pass_strat = -9999
self.goalie_position_dict = None
self.goalie_position_strategy = None
self.goalie_position_random_seed = 123456789
self.goalie_position_strat_have_dribbled = False
self.intercepting = False
self.received_dribble_instruction = PrecariousData(False, 0)
self.statistics = Statistics()
super().__init__()
def get_y_north_velocity_vector(self):
return Vector2D.velocity_to_xy(self.body_state.speed, inverse_y_axis(self.body_angle.get_value()))
def __str__(self) -> str:
return "side: {0}, team_name: {1}, player_num: {2}, position: {3}".format(self.world_view.side, self.team_name
, self.num, self.position)
def needs_dribble_or_pass_strat(self):
if not (self.mode is DRIBBLING_MODE or self.ball_incoming()) or self.intercepting or self.now() - self.last_dribble_pass_strat <= 4:
if self.is_test_player():
debug_msg(str(self.now()) + " Not dribbling or recently generated strat", "DRIBBLE_PASS_MODEL")
return False
enough_opponents = len(self.world_view.get_opponents(self.team_name, 10)) > 0
enough_teammates = len(self.world_view.get_teammates(self.team_name, 10, 5)) > 0
if not enough_opponents:
if self.is_test_player():
debug_msg(str(self.now()) + " Not enough opponents: " + str(self.world_view.other_players)
, "DRIBBLE_PASS_MODEL")
return False
if not enough_teammates:
if self.is_test_player():
debug_msg(str(self.now()) + " Not enough teammates: " + str(
self.world_view.other_players), "DRIBBLE_PASS_MODEL")
return False
return True
def is_inside_field(self):
position: Coordinate = self.position.get_value()
if -52.5 > position.pos_x or position.pos_x > 52.5:
return False
if -34 > position.pos_y or position.pos_y > 34:
return False
return True
def is_approaching_goal(self):
if self.position.is_value_known():
pos: Coordinate = self.position.get_value()
if self.world_view.side == "l" and pos.euclidean_distance_from(Coordinate(52.5, 0)) < APPROA_GOAL_DISTANCE:
return True
if self.world_view.side == "r" and pos.euclidean_distance_from(Coordinate(-52.5, 0)) < APPROA_GOAL_DISTANCE:
return True
return False
def get_global_start_pos(self):
if self.world_view.side == "l":
return Coordinate(self.starting_position.pos_x, -self.starting_position.pos_y)
else:
return Coordinate(-self.starting_position.pos_x, self.starting_position.pos_y)
def get_global_play_pos(self):
if self.world_view.side == "l":
return Coordinate(self.playing_position.pos_x, -self.playing_position.pos_y)
else:
return Coordinate(-self.playing_position.pos_x, self.playing_position.pos_y)
def get_global_angle(self):
if self.body_angle.is_value_known():
neck_angle = self.body_state.neck_angle
return PrecariousData(self.body_angle.get_value() + neck_angle, self.body_angle.last_updated_time)
else:
return PrecariousData.unknown()
def body_facing(self, coordinate, delta):
if not self.body_angle.is_value_known() or not self.position.is_value_known():
# should this return unknown?(None?)
return False
expected_angle = math.degrees(calculate_full_origin_angle_radians(coordinate, self.position.get_value()))
return abs(smallest_angle_difference(expected_angle, self.body_angle.get_value())) < delta
def is_near(self, coordinate: Coordinate, allowed_delta=0.5):
if not self.position.is_value_known():
return False
distance = coordinate.euclidean_distance_from(self.position.get_value())
return distance <= allowed_delta
def is_near_ball(self, delta=KICKABLE_MARGIN):
minimum_last_update_time = self.now() - 3
ball_known = self.world_view.ball.is_value_known(minimum_last_update_time)
if ball_known:
return float(self.world_view.ball.get_value().distance) <= delta
return False
def is_near_goal(self, delta=10.0):
if self.world_view.goals[0] is not None and self.world_view.side != self.world_view.goals[0].goal_side:
return float(self.world_view.goals[0].distance) <= delta
if self.world_view.goals[1] is not None and self.world_view.side != self.world_view.goals[1].goal_side:
return float(self.world_view.goals[1].distance) <= delta
return False
# True if looking towards last known ball position and not seeing the ball
def is_ball_missing(self):
if self.world_view.ball.get_value() is None or not self._ball_seen_since_missing:
# print("ball missing!")
self._ball_seen_since_missing = False
return True
ball_position = self.world_view.ball.get_value().coord
ball_angle = math.degrees(calculate_full_origin_angle_radians(ball_position, self.position.get_value()))
angle_difference = abs(self.last_see_global_angle - ball_angle)
looking_towards_ball = angle_difference < self.body_state.fov * 0.25
can_see_ball = self.world_view.ball.is_value_known(self.action_history.last_see_update)
if looking_towards_ball and not can_see_ball:
pass
# print("ball missing!")
return looking_towards_ball and not can_see_ball
def now(self):
return self.world_view.sim_time
def is_test_player(self):
return self.num == 2 and self.world_view.side == 'l'
def is_nearest_ball(self, degree=1):
team_mates = self.world_view.get_teammates(self.team_name, 10)
if len(team_mates) < degree:
return True
ball_position: Coordinate = self.world_view.ball.get_value().coord
distances = map(lambda t: t.coord.euclidean_distance_from(ball_position), team_mates)
sorted_distances = sorted(distances)
return sorted_distances[degree - 1] > ball_position.euclidean_distance_from(self.position.get_value())
def ball_interception(self):
wv = self.world_view
ball = wv.ball.get_value()
ball_known = wv.ball.is_value_known(self.now() - 4)
if (not ball_known) or ball.absolute_velocity is None or ball.absolute_velocity.magnitude() < 0.2:
return None, None
if wv.ball.is_value_known(self.now() - 4):
ball: Ball = wv.ball.get_value()
tick_offset = self.now() - wv.ball.last_updated_time
project_positions = ball.project_ball_position(10, tick_offset)
if project_positions is None:
return None, None
all_ticks = range(1, 11)
positions_and_ticks = list(zip(project_positions, all_ticks))
printable_list = [(pt[0], pt[1]) for pt in positions_and_ticks]
# positions_and_ticks = sorted(positions_and_ticks, key=lambda pos_and_t: pos_and_t[0].euclidean_distance_from(self.position.get_value()))
for (position, tick) in positions_and_ticks:
if self.can_player_reach(position, tick):
debug_msg(str(self.now()) + " | Found reachable position: " + str(ball.absolute_velocity)
, "INTERCEPTION")
return position, tick
if self.is_test_player():
debug_msg(str(self.now()) + " | Based on ball velocity : " + str(ball.absolute_velocity)
, "INTERCEPTION")
debug_msg(str(self.now()) + " | Predictions : " + str(printable_list)
, "INTERCEPTION")
return None, None
def can_player_reach(self, position: Coordinate, ticks):
distance = position.euclidean_distance_from(self.position.get_value())
extra_time = 1
if distance <= KICKABLE_MARGIN:
return True
if not self.body_facing(position, delta=5):
extra_time += 1
if self.body_state.speed > 0.2:
extra_time += 1
return self.time_to_rush_distance(distance) <= ticks + extra_time
def time_to_rush_distance(self, distance):
def distance_in_n_ticks(speed, ticks):
if ticks == 0:
return 0
return speed + distance_in_n_ticks(speed * configurations.PLAYER_SPEED_DECAY, ticks - 1)
projected_speed = 0
ticks = 0
while distance > distance_in_n_ticks(projected_speed, 3):
ticks += 1
projected_speed += configurations.DASH_POWER_RATE * 100
distance -= projected_speed
projected_speed *= configurations.PLAYER_SPEED_DECAY
return ticks + 3
def update_body_angle(self, new_angle, time):
# If value is uninitialized, then accept new_angle as actual angle
self.body_angle.set_value(new_angle, time)
def update_position(self, new_position: Coordinate):
self.position.set_value(new_position, self.now())
# print("PARSED : ", time, " | Position: ", new_position)
self.action_history.projected_position = new_position
def update_face_dir(self, new_global_angle):
if self.action_history.turn_in_progress:
history = self.action_history
actual_angle_change = abs(new_global_angle - self.face_dir.get_value())
if history.missed_turn_last_see:
# Missed turn update in last see message, so it must have been included in this see update
history.turn_in_progress = False
history.missed_turn_last_see = False
history.expected_body_angle = None
elif actual_angle_change + 0.1 >= abs(history.expected_angle_change) / 2 or actual_angle_change > 2.0:
# Turn registered
history.turn_in_progress = False
history.expected_body_angle = None
else:
# Turn not registered
history.missed_turn_last_see = True
# Reset expected angle
history.expected_angle_change = 0
self.last_see_global_angle = new_global_angle
self.face_dir.set_value(new_global_angle, self.now())
self.update_body_angle(new_global_angle - self.body_state.neck_angle, self.now())
def on_see_update(self):
# Delete old observations of players
self.world_view.other_players = list(filter(lambda op: self.now() - op.last_updated_time < 20,
self.world_view.other_players))
self.action_history.three_see_updates_ago = self.action_history.two_see_updates_ago
self.action_history.two_see_updates_ago = self.action_history.last_see_update
self.action_history.last_see_update = self.now()
if self.current_objective is not None:
self.current_objective.has_processed_see_update = False
if self.world_view.ball.last_updated_time == self.action_history.last_see_update:
# We've seen the ball this tick, so it is not missing
self._ball_seen_since_missing = True
elif self.is_ball_missing():
# We're looking in the direction of the ball and not seeing it, so it must be missing
self._ball_seen_since_missing = False
def update_ball(self, new_ball: Ball, time):
self.world_view.ball.set_value(new_ball, time)
self._ball_seen_since_missing = True
def ball_incoming(self):
if not self.world_view.ball.is_value_known(self.action_history.three_see_updates_ago):
return False
ball: Ball = self.world_view.ball.get_value()
dist = ball.distance
if dist >= 15:
return False
if ball.absolute_velocity is not None:
ball_move_dir = ball.absolute_velocity.world_direction()
ball_relative_dir = math.degrees(calculate_full_origin_angle_radians(ball.coord, self.position.get_value()))
dif = abs(smallest_angle_difference((ball_move_dir + 180) % 360, ball_relative_dir))
if self.is_test_player():
debug_msg(str(self.now()) + " Ball movement dir: " + str(ball_move_dir)
+ " Direction from player: " + str(ball_relative_dir)
+ " Heading this way : " + str(dif <= 15), "DRIBBLE_PASS_MODEL")
return dif <= 15
position, projected_direction, speed = ball.approximate_position_direction_speed(2)
if projected_direction is None or speed < 0.25:
return False
ball_angle = math.degrees(calculate_full_origin_angle_radians(self.position.get_value(), ball.coord))
if abs(ball_angle - projected_direction) < 15 and dist < 40:
return True
return False
def is_inside_own_box(self) -> bool:
pos: Coordinate = self.position.get_value()
result = True
if self.world_view.side == "l":
if pos.pos_x > -36 or (pos.pos_y < -20 or pos.pos_y > 20):
result = False
else:
if pos.pos_x < 36 or (pos.pos_y < -20 or pos.pos_y > 20):
result = False
if self.is_test_player():
debug_msg("is_inside_own_box={0}, Pos={1}".format(result, pos), "GOALIE")
return result
def is_ball_inside_own_box(self) -> bool:
pos: Coordinate = self.world_view.ball.get_value().coord
result = True
if self.world_view.side == "l":
if pos.pos_x > -36 or (pos.pos_y < -20 or pos.pos_y > 20):
result = False
else:
if pos.pos_x < 36 or (pos.pos_y < -20 or pos.pos_y > 20):
result = False
if self.is_test_player():
debug_msg("is_inside_own_box={0}, Pos={1}".format(result, pos), "GOALIE")
return result
def get_closest_free_position(self, opt_coord: Coordinate, max_delta_from_org_coord=5, min_delta_from_opp=7):
init_x: int = int(opt_coord.pos_x)
init_y: int = int(opt_coord.pos_y)
free_cords: [Coordinate] = []
for x in range(init_x, init_x + max_delta_from_org_coord):
for y in range(init_y, init_y + max_delta_from_org_coord):
c: Coordinate = Coordinate(x, y)
if self.is_coord_free(c, min_delta_from_opp):
free_cords.append(c)
debug_msg("Opt_coord={0}, free_coords={1}".format(opt_coord, free_cords), "FREE_POSITION")
if len(free_cords) > 0:
return sorted(free_cords, key=lambda c: c.euclidean_distance_from(opt_coord), reverse=False)[0]
return None
def is_coord_free(self, coord: Coordinate, min_delta_from_opp=1):
players: [ObservedPlayer] = self.world_view.get_all_known_players(self.team_name, max_data_age=3)
for p in players:
dist = p.coord.euclidean_distance_from(coord)
if dist <= min_delta_from_opp:
return False
return True
def get_ball_possessor(self, max_data_age=2, poss_min_dist=2):
if self.world_view.ball.is_value_known(max_data_age):
other_players = self.world_view.get_all_known_players(self.team_name, max_data_age)
if len(other_players) < 1:
return None
possessor: [ObservedPlayer] = list(sorted(other_players, key=lambda p: p.coord.euclidean_distance_from(
self.world_view.ball.get_value().coord), reverse=False))[0]
if possessor.coord.euclidean_distance_from(self.world_view.ball.get_value().coord) < poss_min_dist:
return possessor
return None
def is_dribbling(self):
return self.mode == DRIBBLING_MODE
def find_teammate_closest_to(self, coord: Coordinate, max_distance_delta=200):
closest_teammate = None
team_mates: [ObservedPlayer] = self.world_view.get_teammates(self.team_name, max_data_age=8)
for tm in team_mates:
tm : ObservedPlayer
if closest_teammate is None or \
closest_teammate.coord.euclidean_distance_from(coord) > tm.coord.euclidean_distance_from(coord):
closest_teammate = tm
if closest_teammate is None or closest_teammate.coord.euclidean_distance_from(coord) > max_distance_delta:
return None
return closest_teammate
class Statistics(object):
def __init__(self) -> None:
self.strategies_generated = 0
self.current_missed_ticks = 0
self.missed_ticks_history = []
self.applied_possession_strategies = 0
self.outdated_possession_strategies = 0
def register_missed_tick(self):
self.current_missed_ticks += 1
def register_finished_strategy_generation(self):
self.missed_ticks_history.append(self.current_missed_ticks)
self.current_missed_ticks = 0
def use_possession_strategy(self):
self.applied_possession_strategies += 1
def discard_possession_strategy(self):
self.outdated_possession_strategies += 1
def print_to_file(self, player_num, player_team):
pass
def missed_ticks_text(self):
return str(self.missed_ticks_history).replace('[', '').replace(']', '')
class ActionHistory:
def __init__(self) -> None:
self.turn_history = ViewFrequency()
self.ball_focus_actions = 0
self.last_see_update = 0
self.last_catch = 0
self.two_see_updates_ago = 0
self.three_see_updates_ago = 0
self.has_just_intercept_kicked = False
self.turn_in_progress = False
self.missed_turn_last_see = False
self.expected_speed = None
self.projected_position = Coordinate(0, 0)
self.has_looked_for_targets = False
self.expected_angle_change = 0
self.expected_body_angle = None
self.last_look_for_pass_targets = 0
self.last_stamina_strat_generated = 0
self.dashes_last_stamina_strat = 0
self.intercepting = False
class ViewFrequency:
SLICE_WIDTH = 15 # The amount of degrees between each view 'slice'
SLICES = round(360 / SLICE_WIDTH)
def __init__(self) -> None:
self.last_update_time: [int] = [0] * self.SLICES
def least_updated_angle(self, field_of_view, lower_bound=0, upper_bound=360):
viewable_slices_to_each_side = self._get_viewable_slices_to_each_side(field_of_view)
oldest_angle = 0
best_angle_index = 0
for i, update_time in enumerate(self.last_update_time):
if not is_angle_in_range(i * self.SLICE_WIDTH, lower_bound, upper_bound):
continue
viewable_range = range(i - viewable_slices_to_each_side, i + viewable_slices_to_each_side + 1)
total_age = 0
for v in viewable_range:
total_age += self.last_update_time[v % self.SLICES]
if oldest_angle < total_age:
oldest_angle = total_age
best_angle_index = i
return self.SLICE_WIDTH * best_angle_index
def renew_angle(self, angle: int, field_of_view: int):
viewable_slices_to_each_side = self._get_viewable_slices_to_each_side(field_of_view)
angle_index = round(angle / self.SLICE_WIDTH)
view_range = range(angle_index - viewable_slices_to_each_side, angle_index + viewable_slices_to_each_side + 1)
# Increment all timers
for i in range(0, len(self.last_update_time)):
self.last_update_time[i] = min(self.last_update_time[i] + 1, 20)
# Reset now visible angles
for i in view_range:
self.last_update_time[i % self.SLICES] = 0
def _get_viewable_slices_to_each_side(self, field_of_view) -> int:
viewable_slices = math.floor(field_of_view / self.SLICE_WIDTH)
if viewable_slices % 2 == 0:
viewable_slices -= 1
return max(math.floor(viewable_slices / 2), 0)
class BodyState:
def __init__(self):
self.time = 0
self.view_mode = ""
self.stamina = 0
self.effort = 0
self.capacity = 0
self.speed = 0
self.direction_of_speed = 0
self.neck_angle = 0
self.arm_movable_cycles = 0
self.dash_count = 0
self.arm_expire_cycles = 0
self.distance = 0
self.direction = 0
self.target = ""
self.tackle_expire_cycles = 0
self.collision = ""
self.charged = 0
self.card = ""
self.fov = 90
self.max_dash_power = 100
self.jog_dash_power = 100 * 0.6
self.dribble_dash_power = 80
self.dribble_kick_power = 100 * 0.3
class WorldView:
def __init__(self, sim_time):
self.sim_time = sim_time
self.other_players: [PrecariousData] = []
self.ball: PrecariousData = PrecariousData.unknown()
self.goals = []
self.lines = []
self.side = ""
self.game_state = "before_kick_off"
def __repr__(self) -> str:
return super().__repr__()
def is_marked(self, team, max_data_age, min_distance=3):
opponents: [ObservedPlayer] = self.get_teammates(team, max_data_age=max_data_age)
for opponent in opponents:
if opponent.distance < min_distance:
return True
return False
def ticks_ago(self, ticks):
return self.sim_time - ticks
def team_has_ball(self, team, max_data_age, min_possession_distance=3):
if not self.ball.is_value_known():
debug_msg("{0} has ball".format("Team2"), "HAS_BALL")
return False
all_players: [ObservedPlayer] = self.get_all_known_players(team, max_data_age)
# Sort players by distance to ball
sorted_list: [ObservedPlayer] = list(
sorted(all_players, key=lambda p: p.coord.euclidean_distance_from(self.ball.get_value().coord),
reverse=False))
if len(sorted_list) < 1:
return False
# If closest player to ball team is known and is our team, return True
closest_player: ObservedPlayer = sorted_list[0]
if closest_player.team is not None and closest_player.team == team and closest_player.coord.euclidean_distance_from(
self.ball.get_value().coord) < min_possession_distance:
debug_msg("{0} has ball | player: {1}".format(team, closest_player), "HAS_BALL")
return True
debug_msg("{0} has ball".format("Team2"), "HAS_BALL")
return False
def get_all_known_players(self, team, max_data_age):
all_players: [ObservedPlayer] = []
all_players.extend(self.get_teammates(team, max_data_age))
all_players.extend(self.get_opponents(team, max_data_age))
return all_players
def get_free_forward_team_mates(self, team, side, my_coord: Coordinate, max_data_age, min_distance_free,
min_dist_from_me=3):
free_team_mates: [ObservedPlayer] = self.get_free_team_mates(team, max_data_age, min_distance_free)
debug_msg("Free_team_mates={0}".format(free_team_mates), "OFFSIDE")
if side == "l":
free_forward_team_mates = list(filter(
lambda p: p.coord.pos_x > my_coord.pos_x and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me, free_team_mates))
else:
free_forward_team_mates = list(filter(
lambda p: p.coord.pos_x < my_coord.pos_x and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me, free_team_mates))
return free_forward_team_mates
def get_non_offside_forward_team_mates(self, team, side, my_coord: Coordinate, max_data_age, min_distance_free,
min_dist_from_me=1):
free_forward_team_mates: [ObservedPlayer] = self.get_free_forward_team_mates(team, side, my_coord, max_data_age,
min_distance_free,
min_dist_from_me)
opponents: [ObservedPlayer] = self.get_opponents(team, max_data_age)
# If no opponents are seen, no one is offside
if len(opponents) < 1:
debug_msg("free_forward_team_mates={0}".format(free_forward_team_mates), "OFFSIDE")
return free_forward_team_mates
reverse = True if side == "l" else False
furthest_behind_opponent: ObservedPlayer = \
list(sorted(opponents, key=lambda p: p.coord.pos_x, reverse=reverse))[0]
furthest_opp_x_pos = furthest_behind_opponent.coord.pos_x
if side == "l":
non_offside_players = list(filter(lambda p: (p.coord.pos_x < furthest_opp_x_pos
and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me)
or p.coord.pos_x < 0, free_forward_team_mates))
else:
non_offside_players = list(filter(lambda p: (p.coord.pos_x > furthest_opp_x_pos
and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me)
or p.coord.pos_x > 0, free_forward_team_mates))
debug_msg(
"Further_opp_x_pos={0} | free_forward_team_mates={1} | furthest_behind_opponent={2} | non_offisde_players={3}".format(
furthest_opp_x_pos, free_forward_team_mates, furthest_behind_opponent, non_offside_players), "OFFSIDE")
return non_offside_players
def get_free_behind_team_mates(self, team, side, my_coord: Coordinate, max_data_age, min_distance_free,
min_dist_from_me=3):
free_team_mates: [ObservedPlayer] = self.get_free_team_mates(team, max_data_age, min_distance_free)
if side == "l":
free_behind_team_mates = list(filter(lambda p: p.coord.pos_x < my_coord.pos_x
and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me
and p.num != 1, free_team_mates))
else:
free_behind_team_mates = list(filter(
lambda p: p.coord.pos_x > my_coord.pos_x and p.coord.euclidean_distance_from(
my_coord) > min_dist_from_me, free_team_mates))
return free_behind_team_mates
def get_teammates_precarious(self, team, max_data_age, min_dist=0):
precarious_filtered = filter(lambda x: (x.is_value_known(self.sim_time - max_data_age)
and x.get_value().team == team) and x.get_value().distance >= min_dist
, self.other_players)
return list(precarious_filtered)
def get_opponents_precarious(self, team, max_data_age, min_dist=0):
precarious_filtered = filter(lambda x: (x.is_value_known(self.sim_time - max_data_age)
and x.get_value().team != team) and x.get_value().distance >= min_dist
, self.other_players)
return list(precarious_filtered)
def get_teammates(self, team, max_data_age, min_dist=0):
return list(map(lambda x: x.get_value(), self.get_teammates_precarious(team, max_data_age, min_dist)))
def get_opponents(self, team, max_data_age, min_dist=0):
return list(map(lambda x: x.get_value(), self.get_opponents_precarious(team, max_data_age, min_dist)))
def get_free_team_mates(self, team, max_data_age, min_distance=2) -> [ObservedPlayer]:
team_mates: [ObservedPlayer] = self.get_teammates(team, max_data_age=max_data_age)
opponents: [ObservedPlayer] = self.get_opponents(team, max_data_age=max_data_age)
debug_msg(
"Team_mates={0} | opponents={1} | other_players:{2}".format(team_mates, opponents, self.other_players),
"OFFSIDE")
free_team_mates = []
if len(opponents) < 1:
return team_mates
for team_mate in team_mates:
for opponent in opponents:
tm: ObservedPlayer = team_mate
op: ObservedPlayer = opponent
if tm.coord.euclidean_distance_from(op.coord) > min_distance and tm not in free_team_mates:
free_team_mates.append(tm)
return free_team_mates
def update_player_view(self, observed_player: ObservedPlayer):
for i, data_point in enumerate(self.other_players):
p = data_point.get_value()
if p.num == observed_player.num and p.team == observed_player.team:
self.other_players[i].set_value(observed_player, self.sim_time)
return
# Add new data point if player does not already exist in list
self.other_players.append(PrecariousData(observed_player, self.sim_time))
def ball_speed(self):
t1 = self.ball.get_value().last_position.last_updated_time
t2 = self.ball.last_updated_time
if t1 == t2:
return 0
delta_time = t2 - t1
distance = self.ball.get_value().coord.euclidean_distance_from(self.ball.get_value().last_position.get_value())
speed = distance / delta_time
return speed
| 2.578125 | 3 |
backend/api/views.py | ankitgehlot123/TheDevCom_ProjectChatroom | 0 | 12766470 | from django.shortcuts import render
from django.utils.safestring import mark_safe
from django.http import HttpResponse
import json
alias_set = set()
def home(request):
return render(request,'home.html',{})
def room(request, room_name, user):
return render(request, 'room.html', {
'alias': mark_safe(json.dumps(user))
})
def check_alias(request,alias):
print(alias_set)
if alias in alias_set:
return HttpResponse(status=400)
else:
alias_set.add(alias)
return HttpResponse(status=200) | 2.15625 | 2 |
scripts/python/s2-c2-extract-schema.py | ashutoshsingh0223/freebase-triples | 177 | 12766471 | <filename>scripts/python/s2-c2-extract-schema.py
#!/usr/bin/env python
"""
Run with:
$ python this-script.py [path_to_input_file]
"""
import argparse
import datetime
import subprocess
import time
# Globals
# Note: path to the query file has been hardcoded here
# queries.txt file has a schema of [slice_title],[query]
queries = open('queries/queries-schema-for-domains-types-properties').readlines()
def main(input_file):
""" Run the main shell commands
:param input_file: the path to the RDF file you want sliced according to the queries """
query_count = 0
fname_input = input_file
fname_output = "slices-new/fb-rdf-schema-"
fname_rest = "fb-rdf-rest-"
for query in queries:
query = query.split(",")
query_title = query[0].strip().replace(".", "-")
query_raw = query[1].strip()
query_count += 1
fname_output += query_title # Add the 1st column from the queries data to the title
fname_rest += str(query_count) # Increment up the filename for the remainder data
t0 = subprocess.check_output(['gdate','+"%s%3N"'])
p = subprocess.Popen(['gawk',
"{ fname" + '="'+fname_output+'";' + ' fname_rest="' +fname_rest +'"; ' +
'if(' + query_raw + ')' + " { print $0 >> fname; } else { print $0 >> fname_rest; } }",
fname_input])
p.communicate()
t1 = subprocess.check_output(['gdate','+"%s%3N"'])
# Show the runtime stats: initial time, finished time
print(query_title + "\t" + t0.decode('ascii').strip() + "\t" + t1.decode('ascii').strip())
# Reset some of the file names for the next loop
fname_input = fname_rest
fname_rest = "fb-rdf-rest-"
fname_output = "slices-new/fb-rdf-schema-"
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to the input data file')
args = parser.parse_args()
main(args.input_file)
# main()
| 2.984375 | 3 |
jetbrains/project_parser.py | MrMitch/ulauncher-jetbrains | 0 | 12766472 | """
Parses the Jetbrains based IDEs recent projects list
"""
import glob
import os
import re
import xml.etree.ElementTree as ET
class RecentProjectsParser():
""" Processes the "Recent projects" file from Jetbrains IDEs """
@staticmethod
def parse(file_path, query):
"""
Parses the recent projects file passed as argument and returns a list of projects
@param str file_path The path to the file which holds the recent open projects by the IDE
@param str query Optional search query to filter the results
"""
if not os.path.isfile(file_path):
return []
root = ET.parse(file_path).getroot()
recent_projects = root.findall( # recent projects in products version 2020.2 and below
'.//component[@name="RecentProjectsManager"][1]/option[@name="recentPaths"]/list/option'
) + root.findall( # recent projects in products version 2020.2 and below
'.//component[@name="RecentDirectoryProjectsManager"][1]/option[@name="recentPaths"]/list/option'
) + root.findall( # projects in groups in products version 2020.3+
'.//component[@name="RecentProjectsManager"][1]/option[@name="groups"]/list/ProjectGroup/option'
'[@name="projects"]/list/option'
) + root.findall( # recent projects in products version 2020.3+
'.//component[@name="RecentProjectsManager"][1]/option[@name="additionalInfo"]/map/entry'
)
home = os.path.expanduser('~')
query = query.lower() if query else ''
# extract all the words (delimited by " " or "/") from the query.
# we will match them against the title and the path of the project.
words = [word.lower() for word in re.split('[ /]+', query)]
result = []
already_matched = []
for project in recent_projects:
title = ''
path = (project.attrib['value' if 'value' in project.attrib else 'key']).replace('$USER_HOME$', home)
title_file = path + '/.idea/.name'
if os.path.exists(title_file):
with open(title_file, 'r') as file:
title = file.read().replace('\n', '').lower()
icons = glob.glob(os.path.join(path, '.idea', 'icon.*'))
# match all words from the query to the path and the title of the project
matched_words = [word for word in words if word in '{} {}'.format(title, path)]
if query and len(matched_words) < len(words):
continue
# prevent duplicate results, because from version 2020.3, a project can appear more than once in the XML
# (in the option[@name="groups"] section and in the option[@name="additionalInfo"] section)
if path in already_matched:
continue
already_matched.append(path)
result.append({
'name': title or os.path.basename(path).lower(),
'path': path,
'icon': icons[0] if len(icons) > 0 else None
})
return result[:8]
| 2.890625 | 3 |
run.py | beenje/aiolegomac | 1 | 12766473 | from aiolegomac.app import run
run()
| 1.054688 | 1 |
torch/nn/quantized/_reference/modules/linear.py | CompPsy/pytorch | 3 | 12766474 | <reponame>CompPsy/pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Dict, Any
from .utils import _quantize_and_dequantize_weight
from .utils import _save_weight_qparams
from .utils import _get_weight_qparam_keys
class Linear(nn.Linear):
""" A reference quantized linear module that fits into the FX
Graph Mode Quantization workflow
activation will be floating point Tensor, we will store floating
point weight as well in the module, but in forward we'll quantize
and dequantize the weight before running the floating point functional
linear operator.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias_: bool = True,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
weight_qparams: Optional[Dict[str, Any]] = None):
super().__init__(in_features, out_features, bias_, device, dtype)
if weight_qparams is None:
weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.quint8,
"scale": 1.0,
"zero_point": 0
}
self.weight_qscheme = weight_qparams["qscheme"]
self.weight_dtype = weight_qparams["dtype"]
assert self.weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \
Exception(f"qscheme: {self.weight_qscheme} is not support in reference quantized linear module")
if self.weight_qscheme is not None:
self.register_buffer(
"weight_scale",
torch.tensor(weight_qparams["scale"], dtype=torch.float, device=device))
self.register_buffer(
"weight_zero_point",
torch.tensor(
weight_qparams["zero_point"],
dtype=torch.int, device=device))
if self.weight_qscheme == torch.per_channel_affine:
self.register_buffer(
"weight_axis",
torch.tensor(weight_qparams["axis"], dtype=torch.int, device=device))
else:
# added for TorchScriptability, not used
self.register_buffer(
"weight_axis",
torch.tensor(0, dtype=torch.int, device=device))
def _get_name(self):
return "QuantizedLinear(Reference)"
def get_weight(self):
"""
Fake quantize (quantize and dequantize) the weight with
the quantization parameters for weight, this is used to
simulate the numerics for the quantized weight in a quantized
model
"""
# supress mypy warning
assert isinstance(self.weight, torch.Tensor)
assert isinstance(self.weight_scale, torch.Tensor)
assert isinstance(self.weight_zero_point, torch.Tensor)
assert isinstance(self.weight_axis, torch.Tensor)
return _quantize_and_dequantize_weight(
self.weight, self.weight_qscheme, self.weight_dtype, self.weight_scale,
self.weight_zero_point, self.weight_axis)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.linear ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.linear --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized linear
"""
weight_dequant = self.get_weight()
result = F.linear(x, weight_dequant, self.bias)
return result
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
_save_weight_qparams(
destination, prefix, self.weight_qscheme, self.weight_dtype,
self.weight_scale, self.weight_zero_point, self.weight_axis)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
for key in _get_weight_qparam_keys(state_dict, prefix):
setattr(self, key, state_dict[prefix + key])
state_dict.pop(prefix + key)
super()._load_from_state_dict(
state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
@classmethod
def from_float(cls, float_linear, weight_qparams):
qref_linear = Linear(
float_linear.in_features, float_linear.out_features,
float_linear.bias is not None, device=float_linear.weight.device,
dtype=float_linear.weight.dtype, weight_qparams=weight_qparams)
qref_linear.weight = torch.nn.Parameter(float_linear.weight.detach())
if float_linear.bias is not None:
qref_linear.bias = torch.nn.Parameter(float_linear.bias.detach())
return qref_linear
| 2.078125 | 2 |
tests/unit/test_gherkin_formatter.py | radish-bdd/radish2 | 0 | 12766475 | <filename>tests/unit/test_gherkin_formatter.py
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import re
import textwrap
import datetime
import colorful as cf
import pytest
from radish.formatters.gherkin import (
write_feature_footer,
write_feature_header,
write_rule_header,
write_scenario_footer,
write_scenario_header,
write_step,
write_step_running,
write_step_result,
write_tagline,
write_summary,
)
from radish.models import (
Background,
DefaultRule,
Feature,
Rule,
Scenario,
State,
Step,
Tag,
)
@pytest.fixture(name="disabled_colors", scope="function")
def disable_ansi_colors(world_default_config):
"""Fixture to disable ANSI colors"""
orig_colormode = cf.colormode
cf.disable()
world_default_config.no_ansi = True
yield
cf.colormode = orig_colormode
def dedent_feature_file(contents):
"""Dedent the given Feature File contents"""
dedented = textwrap.dedent(contents)
# remove first empty line
return "\n".join(dedented.splitlines()[1:]) + "\n"
def assert_output(capsys, expected_stdout):
"""Assert that the captured stdout matches"""
actual_stdout = capsys.readouterr().out
for actual_stdout_line, expected_stdout_line in zip(
actual_stdout.splitlines(), expected_stdout.splitlines()
):
assert re.match(
"^" + expected_stdout_line + "$", actual_stdout_line
), "{!r} == {!r}".format(expected_stdout_line, actual_stdout_line)
def test_gf_write_tag_after_an_at_sign(disabled_colors, capsys, mocker):
"""Test that the Gherkin Formatter writes a Tag after the @-sign on a single line"""
# given
tag = mocker.MagicMock(spec=Tag)
tag.name = "tag-a"
# when
write_tagline(tag)
# then
stdout = capsys.readouterr().out
assert stdout == "@tag-a\n"
def test_gf_write_tag_with_given_indentation(disabled_colors, capsys, mocker):
"""Test that the Gherkin Formatter writes a Tag with the given indentation"""
# given
tag = mocker.MagicMock(spec=Tag)
tag.name = "tag-a"
indentation = " " * 4
# when
write_tagline(tag, indentation)
# then
stdout = capsys.readouterr().out
assert stdout == " @tag-a\n"
def test_gf_write_feature_header_without_tags_without_description_without_background(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
with no Tags, no description and no Background
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = []
feature.tags = []
feature.background = None
# when
write_feature_header(feature)
# then
stdout = capsys.readouterr().out
assert stdout == "Feature: My Feature\n"
def test_gf_write_feature_header_with_tags_without_description_without_background(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
with Tags, but no description and no Background
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = []
first_tag = mocker.MagicMock(spec=Tag)
first_tag.name = "tag-a"
second_tag = mocker.MagicMock(spec=Tag)
second_tag.name = "tag-b"
feature.tags = [first_tag, second_tag]
feature.background = None
# when
write_feature_header(feature)
# then
stdout = capsys.readouterr().out
assert stdout == dedent_feature_file(
"""
@tag-a
@tag-b
Feature: My Feature
"""
)
def test_gf_write_feature_header_without_tags_with_description_without_background(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
without Tags and Background, but description
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = ["foo", "bar", "bla"]
feature.tags = []
feature.background = None
# when
write_feature_header(feature)
# then
stdout = capsys.readouterr().out
assert stdout == dedent_feature_file(
"""
Feature: My Feature
foo
bar
bla
"""
)
def test_gf_write_feature_header_without_description_with_empty_background_no_short_description(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
without Tags and Description, but with an empty Background with no short description
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = []
feature.tags = []
feature.background = mocker.MagicMock(spec=Background)
feature.background.keyword = "Background"
feature.background.short_description = None
feature.background.steps = []
# when
write_feature_header(feature)
# then
assert_output(
capsys,
dedent_feature_file(
"""
Feature: My Feature
Background:[ ]
"""
),
)
def test_gf_write_feature_header_with_description_with_empty_background_no_short_description(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
without Tags, but Description and an empty Background with no short description
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = ["foo", "bar", "bla"]
feature.tags = []
feature.background = mocker.MagicMock(spec=Background)
feature.background.keyword = "Background"
feature.background.short_description = None
feature.background.steps = []
# when
write_feature_header(feature)
# then
assert_output(
capsys,
dedent_feature_file(
"""
Feature: My Feature
foo
bar
bla
Background:[ ]
"""
),
)
def test_gf_write_feature_header_empty_background_with_short_description(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly writes a Feature header
without Tags and Description but an empty Background with short description
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = []
feature.tags = []
feature.background = mocker.MagicMock(spec=Background)
feature.background.keyword = "Background"
feature.background.short_description = "My Background"
feature.background.steps = []
# when
write_feature_header(feature)
# then
assert_output(
capsys,
dedent_feature_file(
"""
Feature: My Feature
Background: My Background
"""
),
)
def test_gf_write_feature_header_background_with_steps(disabled_colors, capsys, mocker):
"""
Test that the Gherkin Formatter properly writes a Feature header
without Tags and Description but a Background with Steps
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.keyword = "Feature"
feature.short_description = "My Feature"
feature.description = []
feature.tags = []
feature.background = mocker.MagicMock(spec=Background)
feature.background.keyword = "Background"
feature.background.short_description = "My Background"
first_step = mocker.MagicMock(
spec=Step,
keyword="Given",
used_keyword="Given",
text="there is a Step",
doc_string=None,
data_table=None,
)
second_step = mocker.MagicMock(
spec=Step,
keyword="When",
used_keyword="When",
text="there is a Step",
doc_string=None,
data_table=None,
)
feature.background.steps = [first_step, second_step]
# when
write_feature_header(feature)
# then
assert_output(
capsys,
dedent_feature_file(
"""
Feature: My Feature
Background: My Background
Given there is a Step
When there is a Step
"""
),
)
def test_gf_write_feature_footer_blank_line_if_no_description_and_no_rules(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter writes a blank line after a Feature
without a description and Rules
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.description = []
feature.rules = []
# when
write_feature_footer(feature)
# then
stdout = capsys.readouterr().out
assert stdout == "\n"
def test_gf_write_feature_footer_no_blank_line_if_description(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter writes no blank line after a Feature
with a Description
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.description = ["foo"]
feature.rules = []
# when
write_feature_footer(feature)
# then
stdout = capsys.readouterr().out
assert stdout == ""
def test_gf_write_feature_footer_no_blank_line_if_rules(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter writes no blank line after a Feature
with a Rule
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.description = []
feature.rules = ["foo"]
# when
write_feature_footer(feature)
# then
stdout = capsys.readouterr().out
assert stdout == ""
def test_gf_write_rule_header(disabled_colors, capsys, mocker):
"""Test that the Gherkin Formatter properly writes a Rule"""
# given
rule = mocker.MagicMock(spec=Rule)
rule.keyword = "Rule"
rule.short_description = "My Rule"
# when
write_rule_header(rule)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation> )Rule: My Rule
"""
),
)
def test_gf_write_rule_header_nothing_for_default_rule(disabled_colors, capsys, mocker):
"""Test that the Gherkin Formatter writes no Rule header for a DefaultRule"""
# given
rule = mocker.MagicMock(spec=DefaultRule)
# when
write_rule_header(rule)
# then
stdout = capsys.readouterr().out
assert stdout == ""
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 4), (Rule, " " * 8)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_scenario_header_without_tags(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""Test that the Gherkin Formatter properly formatter a Scenario Header without Tags"""
# given
scenario = mocker.MagicMock(spec=Scenario)
scenario.keyword = "Scenario"
scenario.rule = mocker.MagicMock(spec=given_rule_type)
scenario.short_description = "My Scenario"
scenario.tags = []
# when
write_scenario_header(scenario)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})Scenario: My Scenario
""".format(
indentation=expected_indentation
)
),
)
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 4), (Rule, " " * 8)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_scenario_header_with_tags(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""Test that the Gherkin Formatter properly formatter a Scenario Header with Tags"""
# given
scenario = mocker.MagicMock(spec=Scenario)
scenario.keyword = "Scenario"
scenario.rule = mocker.MagicMock(spec=given_rule_type)
scenario.short_description = "My Scenario"
first_tag = mocker.MagicMock(spec=Tag)
first_tag.name = "tag-a"
second_tag = mocker.MagicMock(spec=Tag)
second_tag.name = "tag-b"
scenario.tags = [first_tag, second_tag]
# when
write_scenario_header(scenario)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})@tag-a
(?P<indentation>{indentation})@tag-b
(?P<indentation>{indentation})Scenario: My Scenario
""".format(
indentation=expected_indentation
)
),
)
def test_gf_write_scenario_footer_always_a_blank_line(disabled_colors, capsys, mocker):
"""Test that the Gherkin Formatter always writes a blank line after a Scenario"""
# given
scenario = mocker.MagicMock(spec=Scenario)
# when
write_scenario_footer(scenario)
# then
stdout = capsys.readouterr().out
assert stdout == "\n"
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 8), (Rule, " " * 12)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_step_without_doc_string_with_data_table(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step with a data table and
without a doc string
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = None
step.data_table = [["foo", "bar"]]
step.rule = mocker.MagicMock(spec=given_rule_type)
# when
write_step(step, step_color_func=lambda x: x)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})Given there is a Step
(?P<indentation>{indentation} )| foo | bar |
""".format(
indentation=expected_indentation
)
),
)
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 8), (Rule, " " * 12)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_step_without_doc_string_without_data_table(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step without a doc string and data table
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = None
step.data_table = None
step.rule = mocker.MagicMock(spec=given_rule_type)
# when
write_step(step, step_color_func=lambda x: x)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})Given there is a Step
""".format(
indentation=expected_indentation
)
),
)
def test_gf_write_step_explicit_indentation_without_doc_string_without_data_table(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step with an explicit indentation
butwithout a doc string and data table
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = None
step.data_table = None
# when
write_step(step, step_color_func=lambda x: x, indentation=" ")
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation> )Given there is a Step
"""
),
)
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 8), (Rule, " " * 12)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_step_with_doc_string_without_data_table(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step with a doc string
but without a data table
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = """foo
bar
bla
"""
step.data_table = None
step.rule = mocker.MagicMock(spec=given_rule_type)
# when
write_step(step, step_color_func=lambda x: x)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})Given there is a Step
(?P<indentation>{indentation} )\"\"\"
(?P<indentation>{indentation} )foo
(?P<indentation>{indentation} )bar
(?P<indentation>{indentation} )bla
(?P<indentation>{indentation} )\"\"\"
""".format(
indentation=expected_indentation
)
),
)
@pytest.mark.parametrize(
"given_rule_type, expected_indentation",
[(DefaultRule, " " * 8), (Rule, " " * 12)],
ids=["DefaultRule", "Rule"],
)
def test_gf_write_step_with_doc_string_keep_indentation_without_data_table(
given_rule_type, expected_indentation, disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step with a doc string
that has an indentation itself
but without a data table
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = """foo
bar
meh
bla
"""
step.data_table = None
step.rule = mocker.MagicMock(spec=given_rule_type)
# when
write_step(step, step_color_func=lambda x: x)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation>{indentation})Given there is a Step
(?P<indentation>{indentation} )\"\"\"
(?P<indentation>{indentation} )foo
(?P<indentation>{indentation} ) bar
(?P<indentation>{indentation} ) meh
(?P<indentation>{indentation} )bla
(?P<indentation>{indentation} )\"\"\"
""".format(
indentation=expected_indentation
)
),
)
@pytest.mark.parametrize(
"step_state, expected_color",
[
pytest.param(State.PASSED, cf.forestGreen, id="State.PASSED => cf.forestGreen"),
pytest.param(State.FAILED, cf.firebrick, id="State.FAILED => cf.firebrick"),
pytest.param(State.PENDING, cf.orange, id="State.PENDING => cf.orange"),
pytest.param(
State.UNTESTED, cf.deepSkyBlue3, id="State.UNTESTED => cf.deepSkyBlue3"
),
],
)
def test_gf_write_step_result_without_failure_report(
step_state, expected_color, world_default_config, disabled_colors, capsys, mocker
):
"""Test that the Gherkin Formatter properly formats a Step result without a Failure Report"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.state = step_state
step.failure_report = None
write_step_mock = mocker.patch("radish.formatters.gherkin.write_step")
# when
write_step_result(step)
# then
write_step_mock.assert_called_once_with(step, expected_color)
def test_gf_write_and_as_keyword_if_not_first_step_of_keyword_context(
disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter writes the ``And`` keyword instead of the keyword itself
if it's not the first Step of this keyword context.
"""
# given
first_step = mocker.MagicMock(spec=Step)
first_step.keyword = "Given"
first_step.used_keyword = "Given"
first_step.text = "there is the first Step"
first_step.doc_string = None
first_step.data_table = None
first_step.rule = mocker.MagicMock(spec=DefaultRule)
second_step = mocker.MagicMock(spec=Step)
second_step.keyword = "Given"
second_step.used_keyword = "And"
second_step.text = "there is the second Step"
second_step.doc_string = None
second_step.data_table = None
second_step.rule = mocker.MagicMock(spec=DefaultRule)
# when
write_step(first_step, step_color_func=lambda x: x)
write_step(second_step, step_color_func=lambda x: x)
# then
assert_output(
capsys,
dedent_feature_file(
"""
(?P<indentation> )Given there is the first Step
(?P<indentation> )And there is the second Step
"""
),
)
def test_gf_write_summary(disabled_colors, capsys, mocker):
"""
Test that the Gherkin Formatter properly writes a summary of the Features
"""
# given
feature = mocker.MagicMock(spec=Feature)
feature.short_description = "My Feature"
feature.keyword = 'Feature'
feature.state = State.PASSED
mocker.patch('radish.formatters.gherkin.sum', return_value=datetime.timedelta())
rule = mocker.MagicMock(spec=Rule)
rule.short_description = 'My Rule'
rule.state = State.PASSED
scenario = mocker.MagicMock(spec=Scenario)
scenario.keyword = "Scenario"
scenario.state = State.PASSED
scenario.short_description = "My Scenario"
step = mocker.MagicMock(spec=Step)
step.keyword = 'Given'
step.used_keyword = "Given"
step.state = State.PASSED
step.text = 'there is a Step 1'
scenario.steps = [step]
rule.scenarios = [scenario]
feature.rules = [rule]
# when
write_summary([feature])
# then
stdout = capsys.readouterr().out
assert stdout == dedent_feature_file(
"""
1 Feature (1 passed)
1 Scenario (1 passed)
1 Step (1 passed)
Run None finished within 0.0 seconds
"""
)
@pytest.mark.parametrize(
"step_state, expected_color",
[
pytest.param(State.PASSED, cf.forestGreen, id="State.PASSED => cf.forestGreen"),
pytest.param(State.FAILED, cf.firebrick, id="State.FAILED => cf.firebrick"),
pytest.param(State.PENDING, cf.orange, id="State.PENDING => cf.orange"),
pytest.param(
State.UNTESTED, cf.deepSkyBlue3, id="State.UNTESTED => cf.deepSkyBlue3"
),
],
)
def test_gf_write_step_running(
step_state, expected_color, disabled_colors, capsys, mocker
):
"""
Test that the Gherkin Formatter properly formats a Step without a doc string and data table
"""
# given
step = mocker.MagicMock(spec=Step)
step.keyword = "Given"
step.used_keyword = "Given"
step.text = "there is a Step"
step.doc_string = None
step.data_table = None
step.state = step_state
step.rule = mocker.MagicMock(spec=Rule)
write_step_mock = mocker.patch("radish.formatters.gherkin.write_step")
# when
write_step_running(step)
# then
write_step_mock.assert_called_once_with(step, expected_color)
| 2.25 | 2 |
src/misc/gpu_running.py | Brunopaes/python-sandbox | 5 | 12766476 | <reponame>Brunopaes/python-sandbox<filename>src/misc/gpu_running.py
#!-*- coding: utf8 -*-
from numba import jit
import datetime
import numpy
def func(a):
for i in range(len(a)):
a[i] += 1
@jit
def func2(a):
for i in range(len(a)):
a[i] += 1
if __name__ == "__main__":
n_ = numpy.ones(1000000000, dtype=numpy.float64)
start = datetime.datetime.now()
func(n_)
print("without GPU:", datetime.datetime.now() - start)
start = datetime.datetime.now()
func2(n_)
print("with GPU:", datetime.datetime.now() - start)
| 2.96875 | 3 |
tests/test_http_asgi.py | jessewmc/azure-functions-python-library | 0 | 12766477 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import azure.functions as func
from azure.functions._http_asgi import (
AsgiMiddleware
)
class MockAsgiApplication:
response_code = 200
response_body = b''
response_headers = [
[b"content-type", b"text/plain"],
]
async def __call__(self, scope, receive, send):
self.received_scope = scope
# Verify against ASGI specification
assert scope['type'] == 'http'
assert isinstance(scope['type'], str)
assert scope['asgi.spec_version'] in ['2.0', '2.1']
assert isinstance(scope['asgi.spec_version'], str)
assert scope['asgi.version'] in ['2.0', '2.1', '2.2']
assert isinstance(scope['asgi.version'], str)
assert scope['http_version'] in ['1.0', '1.1', '2']
assert isinstance(scope['http_version'], str)
assert scope['method'] in ['POST', 'GET', 'PUT', 'DELETE', 'PATCH']
assert isinstance(scope['method'], str)
assert scope['scheme'] in ['http', 'https']
assert isinstance(scope['scheme'], str)
assert isinstance(scope['path'], str)
assert isinstance(scope['raw_path'], bytes)
assert isinstance(scope['query_string'], bytes)
assert isinstance(scope['root_path'], str)
assert hasattr(scope['headers'], '__iter__')
for k, v in scope['headers']:
assert isinstance(k, bytes)
assert isinstance(v, bytes)
assert scope['client'] is None or hasattr(scope['client'], '__iter__')
if scope['client']:
assert len(scope['client']) == 2
assert isinstance(scope['client'][0], str)
assert isinstance(scope['client'][1], int)
assert scope['server'] is None or hasattr(scope['server'], '__iter__')
if scope['server']:
assert len(scope['server']) == 2
assert isinstance(scope['server'][0], str)
assert isinstance(scope['server'][1], int)
self.received_request = await receive()
assert self.received_request['type'] == 'http.request'
assert isinstance(self.received_request['body'], bytes)
assert isinstance(self.received_request['more_body'], bool)
await send(
{
"type": "http.response.start",
"status": self.response_code,
"headers": self.response_headers,
}
)
await send(
{
"type": "http.response.body",
"body": self.response_body,
}
)
class TestHttpAsgiMiddleware(unittest.TestCase):
def _generate_func_request(
self,
method="POST",
url="https://function.azurewebsites.net/api/http?firstname=rt",
headers={
"Content-Type": "application/json",
"x-ms-site-restricted-token": "xmsrt"
},
params={
"firstname": "roger"
},
route_params={},
body=b'{ "lastname": "tsang" }'
) -> func.HttpRequest:
return func.HttpRequest(
method=method,
url=url,
headers=headers,
params=params,
route_params=route_params,
body=body
)
def _generate_func_context(
self,
invocation_id='123e4567-e89b-12d3-a456-426655440000',
function_name='httptrigger',
function_directory='/home/roger/wwwroot/httptrigger'
) -> func.Context:
class MockContext(func.Context):
def __init__(self, ii, fn, fd):
self._invocation_id = ii
self._function_name = fn
self._function_directory = fd
@property
def invocation_id(self):
return self._invocation_id
@property
def function_name(self):
return self._function_name
@property
def function_directory(self):
return self._function_directory
return MockContext(invocation_id, function_name, function_directory)
def test_middleware_calls_app(self):
app = MockAsgiApplication()
test_body = b'Hello world!'
app.response_body = test_body
app.response_code = 200
req = func.HttpRequest(method='get', url='/test', body=b'')
response = AsgiMiddleware(app).handle(req)
# Verify asserted
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_body(), test_body)
def test_middleware_calls_app_with_context(self):
"""Test if the middleware can be used by exposing the .handle method,
specifically when the middleware is used as
def main(req, context):
return AsgiMiddleware(app).handle(req, context)
"""
app = MockAsgiApplication()
test_body = b'Hello world!'
app.response_body = test_body
app.response_code = 200
req = self._generate_func_request()
ctx = self._generate_func_context()
response = AsgiMiddleware(app).handle(req, ctx)
# Verify asserted
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_body(), test_body)
def test_middleware_wrapper(self):
"""Test if the middleware can be used by exposing the .main property,
specifically when the middleware is used as
main = AsgiMiddleware(app).main
"""
app = MockAsgiApplication()
test_body = b'Hello world!'
app.response_body = test_body
app.response_code = 200
req = self._generate_func_request()
ctx = self._generate_func_context()
main = AsgiMiddleware(app).main
response = main(req, ctx)
# Verify asserted
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_body(), test_body)
| 2.515625 | 3 |
src/backend/hacker_gif_poll/graphql_api/tenor/resolvers/hacker_gif.py | guligon90/hacker-gif-poll | 0 | 12766478 | <filename>src/backend/hacker_gif_poll/graphql_api/tenor/resolvers/hacker_gif.py
# Project imports
from api.tenor import get_hacker_gifs
from graphql_api.tenor.schemas.hacker_gif.result import Result
def map_graphql_args_to_api_filters(args):
"""
Maps the GraphQL query arguments to the corresponding
query string filters accepted by the Tenor API.
"""
mapped_filters = {}
# At the moment, only the 'query' parameter has to be mapped
# to the key 'q' which is acceptable by the Tenor API. Nevertheless,
# the structure was generalized for all GraphQL query parameters.
filter_switcher = {
'query': 'q',
'limit': 'limit',
'key': 'key',
}
for key, value in args.items():
filter_key = filter_switcher.get(key)
mapped_filters.update({
filter_key: value,
})
return mapped_filters
def resolve_hacker_gifs(root, info, **args):
try:
params = map_graphql_args_to_api_filters(args)
result_from_api = get_hacker_gifs(params)
if result_from_api:
return Result.create_from_data(result_from_api)
except Exception as exc:
raise Exception(str(exc))
| 2.296875 | 2 |
find_a_supplier/migrations/0067_auto_20181012_1507.py | kaedroho/dit-directory-cms | 0 | 12766479 | <reponame>kaedroho/dit-directory-cms
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-10-12 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('find_a_supplier', '0066_auto_20180830_0632'),
]
operations = [
migrations.AlterField(
model_name='industryarticlepage',
name='breadcrumbs_label',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='industryarticlepage',
name='call_to_action_text',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='industryarticlepage',
name='introduction_title',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='industrycontactpage',
name='breadcrumbs_label',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='industrylandingpage',
name='hero_title',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='industrypage',
name='breadcrumbs_label',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='industrypage',
name='company_list_call_to_action_text',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='landingpage',
name='breadcrumbs_label',
field=models.CharField(max_length=50),
),
]
| 1.554688 | 2 |
asgard/workers/autoscaler/simple_decision_component.py | rockerbacon/asgard-api | 0 | 12766480 | <reponame>rockerbacon/asgard-api
from typing import List
from asgard.conf import settings
from asgard.workers.autoscaler.decision_component_interface import (
DecisionComponentInterface,
)
from asgard.workers.autoscaler.decision_events import DecisionEvents
from asgard.workers.models.decision import Decision
from asgard.workers.models.scalable_app import ScalableApp
from hollowman.log import logger as default_logger
class DecisionComponent(DecisionComponentInterface):
def __init__(self, logger=default_logger):
self.logger = logger
def decide_scaling_actions(self, apps: List[ScalableApp]) -> List[Decision]:
decisions = []
for app in apps:
if app.app_stats:
decision = Decision(app.id)
deploy_decision = False
cpu_usage = app.app_stats.cpu_usage / 100
mem_usage = app.app_stats.mem_usage / 100
if app.is_set_to_scale_cpu():
if (
abs(cpu_usage - app.cpu_threshold)
> settings.AUTOSCALER_MARGIN_THRESHOLD
):
new_cpu = (
cpu_usage * app.cpu_allocated
) / app.cpu_threshold
decision.cpu = (
app.min_cpu_scale_limit
if new_cpu < app.min_cpu_scale_limit
else app.max_cpu_scale_limit
if new_cpu > app.max_cpu_scale_limit
else new_cpu
)
event = (
DecisionEvents.CPU_SCALE_DOWN
if app.cpu_allocated > decision.cpu
else DecisionEvents.CPU_SCALE_UP
)
self.logger.info(
{
"appname": app.id,
"event": event,
"previous_value": app.cpu_allocated,
"new_value": decision.cpu,
}
)
deploy_decision = True
else:
self.logger.debug(
{
"appname": app.id,
"event": DecisionEvents.CPU_SCALE_NONE,
"reason": "usage within accepted margin",
"usage": cpu_usage,
"threshold": app.cpu_threshold,
"accepted_margin": settings.AUTOSCALER_MARGIN_THRESHOLD,
}
)
if app.is_set_to_scale_mem():
if (
abs(mem_usage - app.mem_threshold)
> settings.AUTOSCALER_MARGIN_THRESHOLD
):
new_mem = (
mem_usage * app.mem_allocated
) / app.mem_threshold
decision.mem = (
app.min_mem_scale_limit
if new_mem < app.min_mem_scale_limit
else app.max_mem_scale_limit
if new_mem > app.max_mem_scale_limit
else new_mem
)
event = (
DecisionEvents.MEM_SCALE_DOWN
if app.mem_allocated > decision.mem
else DecisionEvents.MEM_SCALE_UP
)
self.logger.info(
{
"appname": app.id,
"event": event,
"previous_value": app.mem_allocated,
"new_value": decision.mem,
}
)
deploy_decision = True
else:
self.logger.debug(
{
"appname": app.id,
"event": DecisionEvents.MEM_SCALE_NONE,
"reason": "usage within accepted margin",
"usage": mem_usage,
"threshold": app.mem_threshold,
"accepted_margin": settings.AUTOSCALER_MARGIN_THRESHOLD,
}
)
if deploy_decision:
decisions.append(decision)
return decisions
| 2.15625 | 2 |
python/sample_amazon/eduactive-crack-amazon/print_braces.py | FriendlyUser/code-algorithm-questions | 0 | 12766481 | import copy
def print_all_braces(n):
# TODO: Write - Your - Code
output = []
result = []
print_all_braces_rec(n, 0, 0, output, result)
return result
def print_all_braces_rec(n, left_count, right_count, output, result):
if left_count >= n and right_count >= n:
result.append(copy.copy(output))
if left_count < n:
result.append(copy.copy(output))
if left_count < n:
output += "{"
print_all_braces_rec(n, left_count + 1, right_count, output, result)
output.pop()
if right_count < left_count:
output + = "}"
print_all_braces_rec(n, left_count, right_count + 1, output, result)
output.pop()
| 3.59375 | 4 |
tools/graph_bag/scripts/test_rmse_utilities.py | limenutt/astrobee | 629 | 12766482 | <gh_stars>100-1000
#!/usr/bin/python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import unittest
import numpy as np
import poses
import rmse_utilities
def make_poses(times, xs, ys, zs):
new_poses = poses.Poses("", "")
new_poses.times = times
new_poses.positions.xs = xs
new_poses.positions.ys = ys
new_poses.positions.zs = zs
return new_poses
class TestRMSESequence(unittest.TestCase):
def test_prune_missing_timestamps_beginning_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(5.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 5)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_middle_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(3.0, 7.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 4)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_end_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(7.0, 10.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertEqual(len(trimmed_a.times), 3)
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_scattered_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.array([1.0, 5.0, 6.0, 9.0])
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0))
self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0))
def test_prune_missing_timestamps_disjoint_set(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(11, 20)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), 0)
self.assertEqual(len(trimmed_b.times), 0)
def test_prune_missing_timestamps_some_overlap(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
b_times = np.arange(8.0, 20.0)
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(b_times, xs, ys, zs)
expected_time_range = np.arange(8.0, 10.0)
trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b)
self.assertEqual(len(trimmed_a.times), len(trimmed_b.times))
self.assertTrue(np.allclose(trimmed_a.times, trimmed_b.times, rtol=0))
self.assertTrue(np.allclose(trimmed_a.times, expected_time_range, rtol=0))
self.assertTrue(
np.allclose(trimmed_a.positions.xs, expected_time_range, rtol=0)
)
self.assertTrue(
np.allclose(trimmed_a.positions.ys, expected_time_range + 1, rtol=0)
)
self.assertTrue(
np.allclose(trimmed_a.positions.zs, expected_time_range + 2, rtol=0)
)
def test_rmse_same_poses(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs, ys, zs)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, 0, rtol=0))
def test_rmse_off_by_one(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs + 1, ys, zs)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, 1.0, rtol=0))
def test_rmse_all_off_by_one(self):
a_times = np.arange(10.0)
xs = np.arange(10.0)
ys = np.arange(10.0) + 1.0
zs = np.arange(10.0) + 2.0
poses_a = make_poses(a_times, xs, ys, zs)
poses_b = make_poses(a_times, xs + 1, ys + 1, zs + 1)
rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b)
self.assertTrue(np.isclose(rmse, math.sqrt(3.0), rtol=0))
if __name__ == "__main__":
unittest.main()
| 2.03125 | 2 |
cogs/misc.py | DoggieLicc/ReminderFriend | 3 | 12766483 | <filename>cogs/misc.py<gh_stars>1-10
from datetime import timedelta
from discord.ext import commands
import discord
import time
import inspect
from io import StringIO
from classes import embed_create, CustomBot
class Misc(commands.Cog, name="Misc. Commands"):
def __init__(self, bot):
self.bot: CustomBot = bot
print("MiscCog init")
def get_uptime(self):
return round(time.time() - self.bot.start_time)
@commands.command(aliases=['setprefix'])
async def prefix(self, ctx, *, prefix):
"""Sets a custom prefix for this server!
You can ping the bot to get the custom prefix."""
if len(prefix) > 100:
embed = embed_create(ctx.author,
title='Prefix is too long!',
description='Your prefix has to be less than 100 characters!',
color=discord.Color.red())
return await ctx.send(embed=embed)
await self.bot.prefix.set_custom_prefix(ctx.guild, prefix)
embed = embed_create(ctx.author,
title='Prefix successfully set!',
description=f'Prefix has been set to `{prefix}`')
await ctx.send(embed=embed)
@commands.command(aliases=["i", "ping"])
async def info(self, ctx):
"""Shows information for the bot!"""
embed = embed_create(ctx.author, title="Info for Reminder Friend!",
description="This bot sets reminders for you!")
embed.add_field(name="Invite this bot!", value=
"[**Invite**]"
"(https://discord.com/api/oauth2/authorize?"
"client_id=812140712803827742&permissions=18432&scope=bot)",
inline=False)
embed.add_field(name="Join support server!",
value="[**Support Server**](https://discord.gg/Uk6fg39cWn)",
inline=False)
embed.add_field(name='Bot Creator:',
value='[Doggie](https://github.com/DoggieLicc)#1641',
inline=True)
embed.add_field(name='Bot Uptime:',
value=str(timedelta(seconds=self.get_uptime())), inline=False)
embed.add_field(name='Ping:',
value='{} ms'.format(round(1000 * self.bot.latency), inline=False))
await ctx.send(embed=embed)
@commands.command()
async def source(self, ctx, *, command: str = None):
"""Look at the bots code"""
if command is None:
embed = embed_create(ctx.author, title='Source Code:',
description='[Github for **Reminder Friend**]'
'(https://github.com/DoggieLicc/ReminderFriend)')
return await ctx.send(embed=embed)
if command == 'help':
src = type(self.bot.help_command)
filename = inspect.getsourcefile(src)
else:
obj = self.bot.get_command(command.replace('.', ' '))
if obj is None:
embed = embed_create(ctx, title='Command not found!',
description='This command wasn\'t found in this bot.')
return await ctx.send(embed=embed)
src = obj.callback.__code__
filename = src.co_filename
lines, _ = inspect.getsourcelines(src)
code = ''.join(lines)
buffer = StringIO(code)
file = discord.File(fp=buffer, filename=filename)
await ctx.send(f"Here you go, {ctx.author.mention}. (You should view this on a PC)", file=file)
def setup(bot):
bot.add_cog(Misc(bot))
| 2.71875 | 3 |
week1/mapper.py | JasonSanchez/w261 | 6 | 12766484 | <filename>week1/mapper.py<gh_stars>1-10
#!/usr/bin/python
import sys
import re
for line in sys.stdin:
# Turn the list into a collection of lowercase words
for word in re.findall(r'[a-zA-Z]+', line):
if word.isupper():
print("Capital 1")
else:
print("Lower 1") | 3.796875 | 4 |
object_store_experiments/aiohttp_s3_put_get_redis.py | fadams/amqp-lambda-runtime | 0 | 12766485 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Run with:
# PYTHONPATH=.. python3 aiohttp_s3_put_get_redis.py
#
"""
This example is exactly the same as aiohttp_s3_put_get.py, but has the Redis
endpoint_url uncommented so it uses the Redis client facade instead of HTTP/S3.
This example illustrates using aiohttp/aiosonic for native asyncio s3.put_object
and s3.get_object. The approach taken is to create a "aioboto3lite" library
which re-implements the main boto3/aioboto3 S3 CRUD methods by directly invoking
the underlying HTTP requests using aiohttp or alternatively aiosonic clients.
This approach avoids much of the many levels of indirection that can slow down
boto3 invocations, though it can get quite complicated due to the header signing
that AWS APIs require hence wrapping all of those gory details in a library.
Another advantage of creating a "fake" aioboto3 is that it is basically possible
to do a "plug in replacement" where instead of doing:
import botocore, aioboto3
we can do:
import aioboto3lite as aioboto3
import aioboto3lite as botocore
and the actual application code can remain the same for both.
N.B. At the moment aioboto3lite is very much a proof of concept and is fairly
limited, only supporting a few of the basic S3 CRUD methods however the
performance difference is *significant*
Using a single instance Docker minio and the default overlayfs data directory
with 5KB objects the put_object rate seems to be around 1429 items/s and
the get_object rate seems to be around 3238 items/s using aiosonic - that's
around 2.65x the write performance of aioboto3 and 4.5x the read performance of
aioboto3.
"""
import sys
assert sys.version_info >= (3, 8) # Bomb out if not running Python3.8
import asyncio, os, time, uuid
import aioboto3lite as aioboto3
import aioboto3lite as aiobotocore
#import aiobotocore, aioboto3 # Uncomment this line to use the real aioboto3/aiobotocore
from utils.logger import init_logging
from s3_utils_asyncio import (
create_configured_session,
create_bucket,
purge_and_delete_bucket,
put_object,
get_object
)
async def aiohttp_launch_as_tasks_in_batches():
"""
Creates aiohttp.Session() configured from environment variables or users's
profile or minio profile. The easiest way to use with minio is to add the
following to ~/.aws/credentials (setting the key_id and key used to set
MINIO_ROOT_USER and MINIO_ROOT_PASSWORD.
[minio]
aws_access_key_id = <id>
aws_secret_access_key = <key>
"""
session = create_configured_session(aioboto3)
config=aiobotocore.config.AioConfig(max_pool_connections=MAX_CONNECTIONS)
#config.http_client = "aiohttp" # Defaults to "aiosonic"
#client = session.client("s3", endpoint_url="http://localhost:9001", config=config)
client = session.client("s3", endpoint_url="redis://localhost:6379", config=config)
async with client as s3: # In aioboto3 client and resource are context managers
await create_bucket(s3, bucket_name)
content = "x" * 5000
print()
print(__file__)
print(f"Testing {ITERATIONS} iterations, with an item size of {len(content)}")
#------------------------- Test writing objects ------------------------
start = time.time()
overall_start = start # Used to time aggregate put then get time
object_refs = []
tasks = []
for i in range(ITERATIONS):
s3_uri = f"s3://{bucket_name}/{uuid.uuid4()}"
#print(s3_uri)
tasks.append(put_object(s3, s3_uri, content))
if len(tasks) == MAX_CONNECTIONS:
await asyncio.gather(*tasks)
tasks = []
object_refs.append(s3_uri)
#print(len(tasks))
await asyncio.gather(*tasks) # await any outstanding tasks
end = time.time()
rate = ITERATIONS/(end - start)
bandwidth = rate * len(content)/1024
print(f"put_object: rate {rate} items/s, {bandwidth} KiB/s")
#------------------------ Test reading objects -------------------------
start = time.time()
tasks = []
for s3_uri in object_refs:
tasks.append(get_object(s3, s3_uri))
if len(tasks) == MAX_CONNECTIONS:
results = await asyncio.gather(*tasks)
tasks = []
results = await asyncio.gather(*tasks) # await any outstanding tasks
#print(results)
end = time.time()
rate = ITERATIONS/(end - start)
bandwidth = rate * len(content)/1000
print(f"get_object: rate {rate} items/s, {bandwidth} KiB/s")
print()
rate = ITERATIONS/(end - overall_start)
bandwidth = rate * len(content)/1000
print(f"Overall put_object then get_object: rate {rate} items/s, {bandwidth} KiB/s")
print()
#------------------ Test writing then reading objects ------------------
async def put_then_get(s3, s3_uri, body): # put followed by get as a task
await put_object(s3, s3_uri, body)
return await get_object(s3, s3_uri)
start = time.time()
object_refs = []
tasks = []
for i in range(ITERATIONS):
s3_uri = f"s3://{bucket_name}/{uuid.uuid4()}"
#print(s3_uri)
tasks.append(put_then_get(s3, s3_uri, content))
if len(tasks) == MAX_CONNECTIONS * 2:
await asyncio.gather(*tasks)
tasks = []
object_refs.append(s3_uri)
#print(len(tasks))
await asyncio.gather(*tasks) # await any outstanding tasks
end = time.time()
rate = ITERATIONS/(end - start)
bandwidth = rate * len(content)/1024
print(f"put_then_get: rate {rate} items/s, {bandwidth} KiB/s")
# Delete the objects we created then the bucket to tidy things up up
await purge_and_delete_bucket(s3, bucket_name)
if __name__ == '__main__':
"""
Attempt to use uvloop libuv based event loop if available
https://github.com/MagicStack/uvloop
"""
try:
import uvloop
uvloop.install()
except: # Fall back to standard library asyncio epoll event loop
pass
ITERATIONS = 10000
MAX_CONNECTIONS = 1000
# Create bucket to use in this test
bucket_name = "aiohttp-s3-put-get"
# Initialise logger
logger = init_logging(log_name=bucket_name)
loop = asyncio.get_event_loop()
loop.run_until_complete(aiohttp_launch_as_tasks_in_batches())
| 1.84375 | 2 |
models/locomchatlogsresponse.py | jujinesy/Empier_PythonKakaoBot | 3 | 12766486 | <filename>models/locomchatlogsresponse.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from locoresponse import LocoResponse
from locochatlog import *
class LocoMChatLogsResponse(LocoResponse):
def chat_logs(self):
return map(LocoChatLog, self._data["chatLogs"])
def eof(self):
return self._data["eof"]
| 2.1875 | 2 |
lab4/src/enquirie_feature.py | derry95922/STV | 0 | 12766487 | <reponame>derry95922/STV<gh_stars>0
import unittest
from keywords import *
class test_suite(unittest.TestCase):
def create_enquiry_on_the_contact_page(self):
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:3000/")
driver.maximize_window()
wait_element_is_visible(driver, "//li//*[text()='Contact']").click()
input_text(driver, "//*[<EMAIL>='<EMAIL>']", "testName")
input_text(driver, "//*[@name='email']", "<EMAIL>")
input_text(driver, "//*[@name='phone']", "0987654321")
wait_element_is_visible(driver, "//*[@name='enquiryType']").click()
wait_element_is_visible(driver, "//*[text()='Just leaving a message']").click()
input_text(driver, "//*[@name='message']", "<PASSWORD>Message")
actualName = driver.find_element(By.XPATH, "//*[@name='<EMAIL>']").get_attribute("value")
wait_element_is_visible(driver, "//*[text()='Submit']").click()
wait_element_is_visible(driver, "//*[text()='Success!']")
assert "Success" in driver.find_element(By.XPATH, "//*[text()='Success!']").text
driver.quit()
driver = login()
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
expectName = driver.find_element(By.XPATH, "//*[contains(@class,'ItemList__value--name')]").text
assert actualName == expectName
delete_enquiry(driver)
logout(driver)
def delete_enquiry_on_admin_ui_page(self):
create_enquiry()
driver = login()
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--name')]/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No enquiries found...']")
noEnquiries = driver.find_element(By.XPATH, "//*[text()='No enquiries found...']").text
assert "No enquiries found..." == noEnquiries
logout(driver)
def create_enquiry():
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:3000/")
driver.maximize_window()
wait_element_is_visible(driver, "//li//*[text()='Contact']").click()
input_text(driver, "//*[@name='<EMAIL>.full']", "testName")
input_text(driver, "//*[@name='email']", "<EMAIL>")
input_text(driver, "//*[@name='phone']", "0987654321")
wait_element_is_visible(driver, "//*[@name='enquiryType']").click()
wait_element_is_visible(driver, "//*[text()='Just leaving a message']").click()
input_text(driver, "//*[@name='message']", "<PASSWORD>")
wait_element_is_visible(driver, "//*[text()='Submit']").click()
wait_element_is_visible(driver, "//*[text()='Success!']")
assert "Success" in driver.find_element(By.XPATH, "//*[text()='Success!']").text
driver.quit()
def delete_enquiry(driver):
wait_element_is_visible(driver, "//a[text()='Enquiries']").click()
wait_element_is_visible(driver, "//*[@class='ItemList-wrapper']")
wait_element_is_visible(driver, "//*[contains(@class,'ItemList__value--name')]/../preceding-sibling::*").click()
wait_element_is_visible(driver, "//*[@data-screen-id='modal-dialog']")
wait_element_is_visible(driver, "//*[@data-button-type='confirm']").click()
wait_element_is_visible(driver, "//*[text()='No enquiries found...']")
noEnquiries = driver.find_element(By.XPATH, "//*[text()='No enquiries found...']").text
assert "No enquiries found..." == noEnquiries
def suite():
suite = unittest.TestSuite()
suite.addTests([test_suite('create_enquiry_on_the_contact_page'),
test_suite('delete_enquiry_on_admin_ui_page'),
])
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite()) | 2.96875 | 3 |
python/nehe/lesson6.py | acs/opengl-samples | 1 | 12766488 | <gh_stars>1-10
#!/usr/bin/env python3
from OpenGL.GL import *
from OpenGL.GLUT import *
from opengl_app import OpenGLApp
class Lesson6(OpenGLApp):
red = (1.0, 0.0, 0.0)
dark_green = (0.0, 0.4, 0.0)
light_green = (0.0, 0.4, 0.0)
green = dark_green
blue = (0.0, 0.0, 1.0)
yellow = (1.0, 1.0, 0.0)
cyan = (0.0, 1.0, 1.0)
white = (1.0, 1.0, 1.0)
def cube_square(self):
# We have 8 vertex: 4 top square, 4 for bottom square
top_front_left = (-1.0, 1.0, 1.0)
top_front_right = (1.0, 1.0, 1.0)
top_back_left = (-1.0, 1.0, -1.0)
top_back_right = (1.0, 1.0, -1.0)
bottom_front_left = (-1.0, -1.0, 1.0)
bottom_front_right = (1.0, -1.0, 1.0)
bottom_back_left = (-1.0, -1.0, -1.0)
bottom_back_right = (1.0, -1.0, -1.0)
# Front square
glTexCoord2f(0.0, 0.0)
glVertex3f(*top_front_left)
glTexCoord2f(1.0, 0.0)
glVertex3f(*top_front_right)
glTexCoord2f(1.0, 1.0)
glVertex3f(*bottom_front_right)
glTexCoord2f(0.0, 1.0)
glVertex3f(*bottom_front_left)
# Right square
glColor3f(*self.blue)
glVertex3f(*top_front_right)
glColor3f(*self.blue)
glVertex3f(*top_back_right)
glColor3f(*self.green)
glVertex3f(*bottom_back_right)
glColor3f(*self.green)
glVertex3f(*bottom_front_right)
# Back square
glColor3f(*self.blue)
glVertex3f(*top_back_right)
glColor3f(*self.blue)
glVertex3f(*top_back_left)
glColor3f(*self.green)
glVertex3f(*bottom_back_left)
glColor3f(*self.green)
glVertex3f(*bottom_back_right)
# Left square
glColor3f(*self.blue)
glVertex3f(*top_back_left)
glColor3f(*self.blue)
glVertex3f(*top_front_left)
glColor3f(*self.green)
glVertex3f(*bottom_front_left)
glColor3f(*self.green)
glVertex3f(*bottom_back_left)
# The main drawing function.
def draw_gl_scene(self):
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View to the center
glTranslatef(0.0, 0.0, -5.0)
# To show the pyramid and cube let's rotate it a bit based on y-axis
glRotatef(self.rotation_triangle, 1.0, 1.0, 1.0)
self.rotation_triangle += 1
# Select texture to use
self.texture_id = 1
glBindTexture(GL_TEXTURE_2D, self.texture_id)
# Draw
glBegin(GL_QUADS) # Doing it as a triangle probably it is faster
self.cube_square()
glEnd()
glutSwapBuffers()
def main():
Lesson6().main()
if __name__ == '__main__':
main()
| 3.046875 | 3 |
misc_scripts/file_to_bytes.py | jtara1/misc_scripts | 4 | 12766489 | <filename>misc_scripts/file_to_bytes.py
import click
@click.command()
@click.argument('file_name', type=click.Path())
@click.argument('output_file_name', default='file_bytes.txt', type=click.Path())
def file_to_bytes(file_name, output_file_name):
# save bytes
with open(file_name, 'rb') as infile:
with open(output_file_name, 'w') as outfile:
outfile.write(str(bytes(infile.read())))
if __name__ == "__main__":
file_to_bytes()
| 3.40625 | 3 |
crds/tests/test_submit.py | rendinam/crds | 7 | 12766490 | import os
import shutil
import tempfile
from nose.tools import assert_equals, assert_not_equals, assert_is, assert_true, raises
from crds.submit import Submission, NoFilesSelected
from crds.tests import test_config
import mock
# To run:
# nosetests -v unit_tests.py
TEMPFILES = ['ipppssoot_ccd.fits', 'opppssoot_bia.fits']
# Mocked urllib.request to .../redcat_description.yml:
FORM_DESCRIPTION_YML = '''\
- {help_text: 'Who are you?', key: deliverer, label: Name of deliverer, required: true,
type: CharField}
- {help_text: Comma-delimited list (optional), key: other_email, label: Other e-mail
adresses to send notifications, required: false, type: CharField}
- choices: [acs, cos, nicmos, stis, synphot, wfc3, wfpc2]
initial: acs
key: instrument
label: Instrument (All submitted files should match this instrument. This instrument
will be locked for your submission exclusively)
required: true
type: TypedChoiceField
- {key: file_type, label: 'Type of files (Bias, Dark, etc.)', required: true, type: CharField}
- choices: [false, true]
initial: false
key: history_updated
label: Has HISTORY section in the primary header been updated to describe in detail
the reason for delivery and how the files were created?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: pedigree_updated
label: Has PEDIGREE keyword been checked and updated as necessary?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: keywords_checked
label: Has COMMENT been checked?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: descrip_updated
label: Was the DESCRIP keyword updated with a summary of why the files were updated
or created?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: useafter_updated
label: Has the USEAFTER keyword been checked, and if necessary, updated?
required: false
type: BooleanField
- choices: [N/A, 'No', 'Yes']
help_text: N/A for ETC Files Only
initial: N/A
key: useafter_matches
label: If the reference files are replacing previous versions, do the new USEAFTER
dates exactly match the old ones?
required: true
type: TypedChoiceField
- choices: [N/A, 'No', 'Yes']
help_text: optional
initial: N/A
key: compliance_verified
label: Verification for compliance complete (fits, certify, etc. or N/A)
required: true
type: TypedChoiceField
- choices: [false, true]
initial: false
key: ingest_files
label: Should the affected files be reprocessed?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: etc_delivery
label: Should the files be submitted to the ETC?
required: false
type: BooleanField
- choices: [false, true]
initial: false
key: jwst_etc
label: Are these JWST ETC files?
required: false
type: BooleanField
- {key: calpipe_version, label: Files run through the current version of the calibration
software being used by the pipeline or PYSYNPHOT and ETC (yes/no and version number),
required: true, type: CharField}
- choices: [false, true]
initial: false
key: replacement_files
label: Are any files replacing old reference files (deliveries can be a mix of files
that are or are not replacing old files) (yes/no)
required: false
type: BooleanField
- {key: old_reference_files, label: 'If yes, list them here', required: false, type: CharField}
- choices: [N/A, 'No', 'Yes']
initial: N/A
key: replacing_badfiles
label: If the files being replaced are bad, and should not be used with any data,
please indicate this here
required: true
type: TypedChoiceField
- {help_text: Comma-delimited list (optional), key: jira_issue, label: Any JIRA issues
filed in regard to the references being delivered (e.g. "REDCAT-25"), required: false,
type: CharField}
- {key: table_rows_changed, label: 'If files are tables, please indicate exactly which
rows have changed', required: false, type: CharField}
- {key: modes_affected, label: 'Please indicate which modes (e.g. all the STIS, FUVMAMA,
E140L modes) are affected by the changes in the files', required: true, type: CharField}
- {key: correctness_testing, label: Description of how the files were tested for correctness,
required: true, type: CharField}
- {key: additional_considerations, label: Additional considerations, required: false,
type: CharField}
'''
def touch(path):
with open(path, 'a'):
os.utime(path, None)
class TestSubmission(object):
@classmethod
@mock.patch('crds.submit.rc_submit.urllib.request.urlopen', autospec=True)
def setup_class(cls, urlopen):
'''This method is run once for each class before any tests are run.'''
cls.old_state = test_config.setup()
# Create a temporary directory:
cls.tmpdir = tempfile.mkdtemp(prefix='tmp_rc_submit_')
# Create empty test files in the temporary directory:
cls.tempfiles = [os.path.join(cls.tmpdir, x) for x in TEMPFILES]
for filename in cls.tempfiles:
touch(filename)
# Create a file handle to use as a mockup of the urllib.request object:
cls.mockup_form = os.path.join(cls.tmpdir, 'mocked_redcat_description.yml')
with open(cls.mockup_form, 'w') as f:
f.write(FORM_DESCRIPTION_YML)
urlopen.return_value = open(cls.mockup_form)
# Instantiate the Submission object used in these tests:
cls.s = Submission('hst', 'dev', context='hst_0723.pmap')
@classmethod
def teardown_class(cls):
'''This method is run once for each class after all tests are run.'''
# Remove temporary directory and all files contained therein:
shutil.rmtree(cls.tmpdir)
test_config.cleanup(cls.old_state)
@raises(KeyError)
def test_badkey(self):
self.s['bad_key'] = 'some value'
def test_goodvalue_char(self):
self.s['file_type'] = 'bias'
def test_goodvalue_bool(self, key='history_updated'):
self.s[key] = True
assert_is(self.s[key], True)
self.s[key] = False
assert_is(self.s[key], False)
def test_goodvalue_trinary(self, key='compliance_verified'):
# Set with Booleans:
self.s[key] = True
assert_equals(self.s[key], 'Yes')
self.s[key] = False
assert_equals(self.s[key], 'No')
# Set with strings:
self.s[key] = 'Yes'
assert_equals(self.s[key], 'Yes')
self.s[key] = 'yes' # Handle different case
assert_equals(self.s[key], 'Yes')
self.s[key] = 'No'
assert_equals(self.s[key], 'No')
self.s[key] = 'n/a' # Handle different case
assert_equals(self.s[key], 'N/A')
@raises(ValueError)
def test_badtype(self):
self.s['calpipe_version'] = 123 # Expects a str
@raises(ValueError)
def test_badvalue_trinary(self, key='compliance_verified'):
self.s[key] = 'bad value'
@raises(ValueError)
def test_badvalue_choices(self):
self.s['change_level'] = 'bad choice'
@raises(ValueError)
def test_emptyvalue_char(self):
self.s['file_type'] = ''
@raises(ValueError)
def test_emptyvalue_char(self, key='file_type'):
self.s[key] = '' # Required field
def test_emptyvalue_optional(self):
self.s['additional_considerations'] = '' # Optional field
def test_resetfield(self, key='deliverer'):
new_value = 'Wombat'
self.s[key] = new_value
assert_equals(self.s[key], new_value)
del self.s[key]
assert_not_equals(self.s[key], new_value) # Also assumes KeyError is not thrown!
def test_addfiles(self):
for filename in self.tempfiles:
self.s.add_file(filename)
@raises(FileNotFoundError)
def test_addbadfile(self):
self.s.add_file(os.path.join(self.tmpdir, 'missing_file.fits'))
def test_rmfile(self):
for filename in self.tempfiles:
self.s.add_file(filename)
self.s.remove_file(list(self.tempfiles)[0])
assert_equals( len(self.s.files), len(self.tempfiles)-1 )
@raises(KeyError)
def test_rmbadfile(self):
for filename in self.tempfiles:
self.s.add_file(filename)
self.s.remove_file('bad_filename.fits')
def test_yaml(self):
assert_true(self.s.yaml)
def test_help(self):
self.s.help() # Prints stuff
@raises(ValueError)
def test_validate_emptykey(self, key='file_type'):
del self.s[key] # Resets to empty str
self.s.validate()
@raises(NoFilesSelected)
def test_validate_emptyfiles(self):
for filename in self.s.files:
self.s.remove_file(filename)
# Do something here to pass field validation checks:
self.s['file_type'] = 'value'
self.s['correctness_testing'] = 'value'
self.s['deliverer'] = 'value'
self.s['description'] = 'value'
self.s['calpipe_version'] = 'value'
self.s['modes_affected'] = 'value'
self.s['instrument'] = 'stis' # Only works for HST
self.s.validate()
def test_validate(self):
self.s.add_file(list(self.tempfiles)[0])
# Do something here to pass field validation checks:
self.s['file_type'] = 'value'
self.s['correctness_testing'] = 'value'
self.s['deliverer'] = 'value'
self.s['description'] = 'value'
self.s['calpipe_version'] = 'value'
self.s['modes_affected'] = 'value'
self.s['instrument'] = 'stis' # Only works for HST
self.s.validate()
| 2.109375 | 2 |
setup.py | dchud/demodocus | 0 | 12766491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Software License Agreement (Apache 2.0)
Copyright (c) 2020, The MITRE Corporation.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This project was developed by The MITRE Corporation.
If this code is used in a deployment or embedded within another project,
it is requested that you send an email to <EMAIL> in order to
let us know where this software is being used.
"""
# this setup.py was created from this template below. See it for more features.
# https://github.com/kennethreitz/setup.py/blob/master/setup.py
import io
import os
#from shutil import rmtree
from setuptools import find_packages, setup
# Package meta-data.
NAME = 'demodocusfw'
DESCRIPTION = 'demodocusfw generates a full state graph for a web site'
URL = 'https://gitlab.mitre.org/demodocus/demodocus-framework'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.8.0'
VERSION = '0.1.0'
here = os.path.abspath(os.path.dirname(__file__))
# loading required packages from 'requirements.txt'
with open(os.path.join(here, 'requirements.txt')) as f:
required = f.read().splitlines()
# What packages are required for this module to be executed?
REQUIRED = required
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Where the magic happens:
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
#package_dir={'demodocusfw': 'demodocusfw'},
packages=find_packages(),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
) | 1.515625 | 2 |
Eval.py | sk-aravind/3D-Bounding-Boxes-From-Monocular-Images | 38 | 12766492 | """
This script utilises the ground truth label's 2D bounding box to
crop out the the points of interest and feed it into the model so that
it can predict a 3D bounding box for each of the 2D detections
The script will plot the results of the 3D bounding box onto the image
and display them alongside the groundtruth image and it's 3D bounding box.
This is to help with qualitative assesment.
Images to be evaluated should be placed in eval/image_2 folder
Eval Results for each file in the eval/image_2 folder will be saved to "eval/eval-results/"
FLAGS:
--show-single
Show 3D BoundingBox detections one at a time
--hide-imgs
Hides Display of ground truth and bounding box
"""
import os
import cv2
import errno
import argparse
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torchvision.models as models
from lib.DataUtils import *
from lib.Utils import *
from tqdm import tqdm
from lib import Model, ClassAverages
def main():
exp_no = 34
print ("Generating evaluation results for experiment No. ",exp_no)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights/exp_' + str(exp_no) + '/'
weight_list = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]
# Create out folder for pred-labels and pred-imgs
for x in range(len(weight_list)):
check_and_make_dir('Kitti/results/validation/labels/exp_' + str(exp_no) +"/epoch_%s/" % str(x+1))
check_and_make_dir('Kitti/results/validation/pred_imgs/exp_' + str(exp_no) )
if len(weight_list) == 0:
print('We could not find any model weights to load, please train the model first!')
exit()
for model_weight in weight_list:
epoch_no = model_weight.split(".")[0].split('_')[-1]
print ("Evaluating for Epoch: ",epoch_no)
print ('Loading model with %s'%model_weight)
my_vgg = models.vgg19_bn(pretrained=True)
model = Model.Model(features=my_vgg.features, bins=2)
if use_cuda:
checkpoint = torch.load(weights_path + model_weight)
else:
checkpoint = torch.load(weights_path + model_weight)
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
# Load Test Images from eval folder
dataset = Dataset(os.path.abspath(os.path.dirname(__file__)) + 'Kitti/validation')
all_images = dataset.all_objects()
print ("Length of eval data",len(all_images))
averages = ClassAverages.ClassAverages()
all_images = dataset.all_objects()
print ("Model is commencing predictions.....")
for key in tqdm(sorted(all_images.keys())):
data = all_images[key]
truth_img = data['Image']
img = np.copy(truth_img)
imgGT = np.copy(truth_img)
objects = data['Objects']
cam_to_img = data['Calib']
filename = "Kitti/results/validation/labels/exp_" +str(exp_no) + '/epoch_' + str(epoch_no) + "/" +str(key)+".txt"
check_and_make_dir(filename)
file = open(filename,"w")
for object in objects:
label = object.label
theta_ray = object.theta_ray
input_img = object.img
input_tensor = torch.zeros([1,3,224,224])
input_tensor[0,:,:,:] = input_img
input_tensor.cuda()
[orient, conf, dim] = model(input_tensor)
orient = orient.cpu().data.numpy()[0, :, :]
conf = conf.cpu().data.numpy()[0, :]
dim = dim.cpu().data.numpy()[0, :]
dim += averages.get_item(label['Class'])
argmax = np.argmax(conf)
orient = orient[argmax, :]
cos = orient[0]
sin = orient[1]
alpha = np.arctan2(sin, cos)
alpha += dataset.angle_bins[argmax]
alpha -= np.pi
location = plot_regressed_3d_bbox_2(img, truth_img, cam_to_img, label['Box_2D'], dim, alpha, theta_ray)
locationGT = plot_regressed_3d_bbox_2(imgGT, truth_img, cam_to_img, label['Box_2D'], label['Dimensions'], label['Alpha'], theta_ray)
file.write( \
# Class label
str(label['Class']) + " -1 -1 " + \
# Alpha
str(round(alpha,2)) + " " + \
# 2D Bounding box coordinates
str(label['Box_2D'][0][0]) + " " + str(label['Box_2D'][0][1]) + " " + \
str(label['Box_2D'][1][0]) + " " + str(label['Box_2D'][1][1]) + " " + \
# 3D Box Dimensions
str(' '.join(str(round(e,2)) for e in dim)) + " " + \
# 3D Box Location
str(' '.join(str(round(e,2)) for e in location)) + " 0.0 " + \
# Ry
str(round(theta_ray + alpha ,2)) + " " + \
# Confidence
str( round(max(softmax(conf)),2) ) + "\n"
)
# print('Estimated pose: %s'%location)
# print('Truth pose: %s'%label['Location'])
# print('-------------')
file.close()
numpy_vertical = np.concatenate((truth_img,imgGT, img), axis=0)
image_name = 'Kitti/results/validation/pred_imgs/exp_' + str(exp_no) + '/' + str(key) + "/epoch_" + epoch_no + '_' + str(key) + '.jpg'
check_and_make_dir(image_name)
cv2.imwrite(image_name, numpy_vertical)
print ("Finished.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--show-single", action="store_true",
help="Show 3D BoundingBox detecions one at a time")
parser.add_argument("--hide-imgs", action="store_true",
help="Hide display of visual results")
FLAGS = parser.parse_args()
main() | 2.796875 | 3 |
test/gdsctools/test_readers.py | Donnyvdm/gdsctools | 28 | 12766493 | from gdsctools.readers import GenomicFeatures, IC50, DrugDecode
from gdsctools.readers import Reader, drug_name_to_int
from easydev import TempFile
from gdsctools import ic50_test, gdsctools_data
import pandas as pd
from gdsctools.datasets import testing
def test_readers():
a = Reader()
try:
a = Reader('stupido')
assert False
except:
assert True
try:
a = Reader(1)
assert False
except:
assert True
def test_read_ic50():
# -------------------------------- functionalities
r = IC50(ic50_test)
# we can also instanciate from a valid dataframe
r = IC50(r)
# test repr
r
# and print statement
print(r)
# the copy method
assert r == r.copy()
r.hist()
r.plot_ic50_count()
r.cosmicIds
f = TempFile()
r.to_csv(f.name)
f.delete()
# columns may be duplicated
r = IC50(ic50_test)
df = pd.concat([r.df, r.df[999]], axis=1)
# create new instance that should raise an error
try:
IC50(df)
assert False
except:
assert True
# ---------------------------------------- different IC50 formats
# test all files available
for key in testing.keys() :
filename = testing[key].location
if filename.startswith('ic50_test'):
ic = IC50(filename)
# some specific checks:
#ic = IC50(testing['ic50_test_header_drug_prefix_only'].location)
#assert ic.df.shape == (2,2)
#assert all(ic.df.columns == ['1','2'])
ic = IC50(testing['ic50_test_header_no_drug_prefix'].location)
assert ic.drugIds == [1, 2]
ic = IC50(testing['ic50_test_header_drug_prefix_only'].location)
assert ic.drugIds == [1, 2]
ic = IC50(testing['ic50_test_header_mixed_drug_prefix'].location)
assert ic.drugIds == [1, 2]
def test_read_gf():
# Reads a default file
r = GenomicFeatures()
# we can also instanciate from another GenomicFeatures instance
r = GenomicFeatures(r)
# we can also instanciate from a valid dataframe
r = GenomicFeatures(r.df)
# test repr
r
# and print statement
print(r)
r.features
r.tissues
r.plot()
r.drop_tissue_in('breast')
r.drop_tissue_in(['skin', 'bone'])
r.keep_tissue_in(['cervix', 'lung'])
assert r.shift == 2
assert len(r.unique_tissues) == 2
gf1 = GenomicFeatures()
gf2 = GenomicFeatures(testing.genomic_features_csv)
to_drop = [x for x in gf1.df.index if x not in gf2.df.index]
gf1.drop_cosmic(to_drop)
gf1.features = gf2.features
assert gf2 == gf1
gf = GenomicFeatures(testing.genomic_features_bare_csv)
assert gf.shift == 1
gf.get_TCGA()
def test_gf_compress():
gf = GenomicFeatures()
gf.compress_identical_features()
def test_drugs():
r1 = DrugDecode(testing.drug_test_csv)
r1.drugIds
r2 = DrugDecode(testing.drug_test_tsv)
r2.drugIds
assert r1 == r2
# r1.get_info() this example fails because all webrelease are NAN
assert len(r1) == 11
dd = DrugDecode(gdsctools_data("test_drug_decode_comp.csv"))
assert dd.companies == ["ME"]
assert dd.is_public(5) == 'Y'
dd.check()
assert dd.get_info()['N_prop'] == 1
# test repr and print
print(dd)
dd
# test __add__
assert dd + dd == dd
assert len(dd.get_public_and_one_company("ME")) == 10
dd = DrugDecode(testing.drug_test_csv)
dd.get_name("Drug_1047_IC50") == "Nutlin-3a"
def test_readers_tabs():
# If the files ends in csv but its content is tsv, this may be an issue
try:
IC50(gdsctools_data("test_IC50_tabs.csv"))
assert False
except:
assert True
def test_reader_long_strings():
assert 10 == drug_name_to_int(10)
assert drug_name_to_int("1234567890123456789") == 1234567890123456789
assert drug_name_to_int(str(2**63)) == 9223372036854775808
| 2.5 | 2 |
setup.py | drawks/txstatsd | 0 | 12766494 | <gh_stars>0
# Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from distutils.command.install import install
from glob import glob
import os
from txstatsd import version
# If setuptools is present, use it to find_packages()
extra_setup_args = {}
try:
import setuptools
from setuptools import find_packages
from setuptools import setup
except ImportError:
from distutils.core import setup
def find_packages():
"""
Compatibility wrapper.
Taken from storm setup.py.
"""
packages = []
for directory, subdirectories, files in os.walk("txstatsd"):
if '__init__.py' in files:
packages.append(directory.replace(os.sep, '.'))
return packages
long_description = """
Twisted-based implementation of a statsd-compatible server and client.
"""
class TxPluginInstaller(install):
def run(self):
install.run(self)
# Make sure we refresh the plugin list when installing, so we know
# we have enough write permissions.
# see http://twistedmatrix.com/documents/current/core/howto/plugin.html
# "when installing or removing software which provides Twisted plugins,
# the site administrator should be sure the cache is regenerated"
from twisted.plugin import IPlugin, getPlugins
list(getPlugins(IPlugin))
setup(
cmdclass = {'install': TxPluginInstaller},
name="txStatsD",
version=version.txstatsd,
description="A network daemon for aggregating statistics",
author="txStatsD Developers",
url="https://launchpad.net/txstatsd",
license="MIT",
packages=find_packages() + ["twisted.plugins"],
scripts=glob("./bin/*"),
long_description=long_description,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Programming Language :: Python",
"Topic :: Database",
"Topic :: Internet :: WWW/HTTP",
"License :: OSI Approved :: MIT License",
],
**extra_setup_args
)
| 1.570313 | 2 |
day03/python/quajak/three.py | AlexisTM/aoc-2021 | 11 | 12766495 | f = open("./three.txt", "r")
lines = [x.strip() for x in f.readlines()]
gamma = ""
for index in range(len(lines[0])):
bits = [line[index] for line in lines]
zeros = list(bits).count('0')
ones = list(bits).count('1')
if zeros > ones:
gamma += "0"
else:
gamma += "1"
epsilon = ""
for index in range(len(lines[0])):
bits = [line[index] for line in lines]
zeros = list(bits).count('0')
ones = list(bits).count('1')
if zeros < ones:
epsilon += "0"
else:
epsilon += "1"
print("Star 1", int(epsilon, 2) * int(gamma, 2))
oxygen_valid = list(lines)
index = 0
while len(oxygen_valid) > 1:
bits = [line[index] for line in oxygen_valid]
zeros = list(bits).count('0')
ones = list(bits).count('1')
if zeros > ones:
oxygen_valid = [line for line in oxygen_valid if line[index] == "0"]
else:
oxygen_valid = [line for line in oxygen_valid if line[index] == "1"]
index += 1
co = list(lines)
index = 0
while len(co) > 1:
bits = [line[index] for line in co]
zeros = list(bits).count('0')
ones = list(bits).count('1')
if zeros <= ones:
co = [line for line in co if line[index] == "0"]
else:
co = [line for line in co if line[index] == "1"]
index += 1
print("Star 2", int(oxygen_valid[0], 2) * int(co[0], 2)) | 3.109375 | 3 |
simpletickets/views.py | monobot/simple-tickets | 6 | 12766496 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic.list import ListView
from .models import Ticket # noqa
from .forms import TicketFormUser, TicketFormStaff
from .helpers import monitor, monitorfile
from .settings import (BASE_TEMPLATE, ST_FL_MNTR_OWNER, ST_FL_MNTR_STAFF,
ST_SETT_NUMBERS_STAFF, ST_SETT_NUMBERS_OWNER, ST_SETT_TIMES_STAFF,
ST_SETT_TIMES_OWNER, ST_SETT_MAIN_TASKBAR, ST_STAFF_GNAME, ST_ADMIN_GNAME
)
# MIXINS
class ContextMixin(SuccessMessageMixin, View):
def get_context_data(self, **kwargs):
context = super(ContextMixin, self).get_context_data(**kwargs)
context['title'] = self.title
context['base_template'] = BASE_TEMPLATE
is_staff = self.request.user.is_staff
context['ST_MNTR'] = is_staff and ST_FL_MNTR_STAFF or ST_FL_MNTR_OWNER
stt_numb = is_staff and ST_SETT_NUMBERS_STAFF or ST_SETT_NUMBERS_OWNER
stt_times = is_staff and ST_SETT_TIMES_STAFF or ST_SETT_TIMES_OWNER
context['ST_SETT_MAIN_TASKBAR'] = ST_SETT_MAIN_TASKBAR
if stt_numb:
context['stt_numb'] = stt_numb
n_solved = Ticket.objects.n_solved(self.request.user)
n_total = Ticket.objects.n_total(self.request.user)
if n_solved and n_total:
context['porc_solved'] = n_solved * 100 / n_total
context['porc_pending'] = 100 - context['porc_solved']
elif n_total:
context['porc_solved'] = 0
context['porc_pending'] = 100
else:
context['porc_solved'] = 100
context['porc_pending'] = 0
if stt_times:
context['statistic_times'] = stt_times
all_tickets = Ticket.objects.all()
if all_tickets.filter(state__gt=7):
context['fastest'] = all_tickets.filter(state__gt=7
).order_by('resolution_delta'
)[0].humanized_delta()
if n_solved:
context['media'] = timedelta(
seconds=sum(
[t.resolution_delta for t in all_tickets.filter(
state__gt=7)]) / n_solved
)
else:
context['media'] = 'N/A'
return context
class Login_required_mixin(View):
@classmethod
def as_view(self, **kwargs):
return login_required(
super(Login_required_mixin, self).as_view(**kwargs)
)
class TicketMixin(object):
staff_group = Group.objects.get_or_create(name=ST_STAFF_GNAME)[0]
admin_group = Group.objects.get_or_create(name=ST_ADMIN_GNAME)[0]
def get_queryset(self):
user = self.request.user
is_ticket_manager = user.groups.filter(
name=self.staff_group.name
).exists()
is_ticket_admin = user.groups.filter(
name=self.admin_group.name
).exists()
if is_ticket_admin:
return Ticket.objects.filter(
Q(state=1) |
Q(state__lt=9)
)
elif is_ticket_manager:
return Ticket.objects.filter(
Q(state=1) |
Q(staff=user, state__lt=9)
)
return Ticket.objects.filter(user=user)
def get_object(self):
return self.get_queryset().get(id=self.kwargs['ST_id'])
# END MIXINS
class TicketCreate(ContextMixin, Login_required_mixin, CreateView):
title = _('Edit ticket')
model = Ticket
fields = ['ticket_type', 'severity', 'description', 'attachment', ]
success_message = _('Ticket was successfully created')
error_message = _('Please check the failures bellow')
success_url = reverse_lazy('ticketList')
def form_valid(self, form):
form.instance.user = self.request.user
return super(TicketCreate, self).form_valid(form)
class TicketDelete(ContextMixin, Login_required_mixin, TicketMixin,
DeleteView):
title = _('Delete ticket')
model = Ticket
success_url = reverse_lazy('home')
success_message = _('Ticket was successfully deleted')
success_url = reverse_lazy('ticketList')
class TicketUpdate(ContextMixin, Login_required_mixin, TicketMixin,
UpdateView):
title = _('Edit ticket')
model = Ticket
success_url = reverse_lazy('home')
success_message = _('Ticket was successfully updated')
success_url = reverse_lazy('ticketList')
def getHeader(self, date, user):
header_msg = _('{date} [user: {user}] ').format(date=date, user=user)
return header_msg
def get_form_class(self):
if self.request.user.is_staff:
return TicketFormStaff
return TicketFormUser
def form_valid(self, form):
ticket = form.instance
header_msg = self.getHeader(
ticket.creation_date,
self.request.user.username
)
if not ticket.staff and self.request.user.is_staff:
ticket.staff = self.request.user
if not self.request.user.is_staff and ticket.state == 8:
ticket.state = 2
ticket.resolution_date = None
ticket.resolution_delta = None
monitor(monitorfile(ticket), header_msg)
return super(TicketUpdate, self).form_valid(form)
class TicketList(ContextMixin, Login_required_mixin, TicketMixin, ListView):
model = Ticket
title = _('Ticket list')
| 1.820313 | 2 |
applications/MultilevelMonteCarloApplication/external_libraries/PyCOMPSs/exaqute/ExaquteParameter.py | ma6yu/Kratos | 0 | 12766497 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
IN = None
INOUT = None
FILE_IN = None
FILE_OUT = None
FILE_INOUT = None
COLLECTION_IN = None
COLLECTION_INOUT = None
COLLECTION_OUT = None
# Aliases for parameter definition as dictionary
Type = 'type' # parameter type
Direction = 'direction' # parameter type
StdIOStream = 'stream' # parameter stream
Prefix = 'prefix' # parameter prefix
Depth = 'depth' # collection recursive depth
block_count = 'block_count'
block_length = 'block_length'
stride = 'stride'
| 1.757813 | 2 |
examples/callbacks.py | progmacattack/mpyg321 | 0 | 12766498 | <gh_stars>0
"""
MPyG321 callbacks example
Playing and pausing some music, triggering callbacks
You need to add a "sample.mp3" file in the working directory
"""
from mpyg321.mpyg321 import MPyg321Player
from time import sleep
class MyPlayer(MPyg321Player):
"""We create a class extending the basic player to implement callbacks"""
def onAnyStop(self):
"""Callback when the music stops for any reason"""
print("The music has stopped")
def onUserPause(self):
"""Callback when user pauses the music"""
print("The music has paused")
def onUserResume(self):
"""Callback when user resumes the music"""
print("The music has resumed")
def onUserStop(self):
"""Callback when user stops music"""
print("The music has stopped (by user)")
def onMusicEnd(self):
"""Callback when music ends"""
print("The music has ended")
def do_some_play_pause(player):
"""Does some play and pause"""
player.play_song("sample.mp3")
sleep(5)
player.pause()
sleep(3)
player.resume()
sleep(5)
player.stop()
player.quit()
def main():
"""Do the magic"""
player = MyPlayer()
do_some_play_pause(player)
if __name__ == "__main__":
main()
| 3.53125 | 4 |
gyoseki/admin.py | yamaken1343/gyoseki-archive | 1 | 12766499 | from django.contrib import admin
from .models import Author, Recode, Tag, Division, Language
# Register your models here.
admin.site.register(Author)
admin.site.register(Recode)
admin.site.register(Tag)
admin.site.register(Division)
admin.site.register(Language) | 1.445313 | 1 |
books/migrations/0008_auto_20210621_0054.py | Aki-qiu/DATA130039.01-MyBookDB | 3 | 12766500 | # Generated by Django 3.2.3 on 2021-06-20 16:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publishers', '0002_auto_20210608_2151'),
('writers', '0004_auto_20210621_0054'),
('books', '0007_alter_books_title'),
]
operations = [
migrations.AlterField(
model_name='books',
name='classification',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.classification', verbose_name='Category'),
),
migrations.AlterField(
model_name='books',
name='edition',
field=models.TextField(null=True, verbose_name='Edition'),
),
migrations.AlterField(
model_name='books',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Price'),
),
migrations.AlterField(
model_name='books',
name='price_vip',
field=models.DecimalField(decimal_places=2, max_digits=6, verbose_name='VIP Price'),
),
migrations.AlterField(
model_name='books',
name='publish_date',
field=models.DateField(verbose_name='Publish Date'),
),
migrations.AlterField(
model_name='books',
name='publishers',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='publishers.publishers', verbose_name='Publisher'),
),
migrations.AlterField(
model_name='books',
name='storage',
field=models.PositiveIntegerField(default=0, verbose_name='Storage'),
),
migrations.AlterField(
model_name='books',
name='sub_classification',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.classificationsub', verbose_name='Sub-Category'),
),
migrations.AlterField(
model_name='books',
name='title',
field=models.CharField(max_length=64, verbose_name='Title'),
),
migrations.AlterField(
model_name='books',
name='writers',
field=models.ManyToManyField(to='writers.Writers', verbose_name='Author/Translator'),
),
migrations.AddIndex(
model_name='books',
index=models.Index(fields=['title'], name='books_books_title_5d5dc9_idx'),
),
]
| 1.570313 | 2 |
syncfin/utils/positions.py | gitvipin/syncfin | 1 | 12766501 |
import collections
import datetime
import logging
import os
import syncfin.db.model as mydb
import syncfin.utils.common as common
import syncfin.core.config as config
from prettytable import PrettyTable
log = logging.getLogger(__name__)
class Positions(object):
def parse_and_save(self, fpath):
"""
Parses the file and saves to Positions DB.
"""
data = []
with open(fpath, 'r') as fp:
data = fp.readlines()
data = [_.strip() for _ in data]
with mydb.PositionsDB() as _db:
_db.table = _db.TABLE
for line in data[1:]:
try:
date, fund, company, ticker, _, shares, mvalue, weight = line.split(',')
# TODO: Convert date to standard format.
date = '-'.join(date.split('/')[::-1])
if _db.read(date=date, fund=fund, ticker=ticker):
# If entry is already present for a ticker in a fund on a given date,
# do not create duplicate entry and ignore it.
continue
_db.write(date=date,
fund=fund,
company=company,
ticker = ticker,
shares = shares,
mvalue = mvalue,
weight = weight
)
except Exception as err:
log.info("Skipped line : %s", line)
log.error('%r' % err)
def _update_from_file(self, fund_file):
try:
self.parse_and_save(fund_file)
except Exception as _:
log.error("Error in updating - %s", fund_file)
def _update_from_files(self):
fund_files = config.get_param('SYNCFIN_FUND_FILES')
if not fund_files:
return
fund_files = [x for x in fund_files.split(';') if x]
for fund_file in fund_files:
self._update_from_file(fund_file)
def _update_from_dirs(self):
funds_dirs = config.get_param('SYNCFIN_FUND_DIRS')
if not funds_dirs:
return
funds_dirs = [x for x in funds_dirs.split(';') if x]
# Add info from all the files in directory.
for funds_dir in funds_dirs:
for root, _, files in os.walk(funds_dir):
for fpath in files:
if fpath.startswith('.'):
continue
self._update_from_file(os.path.join(root, fpath))
def update(self):
"""
Update database from CSV files of respective ETFs.
ETFs must be in ./data/sample_etf.csv format.
"""
self._update_from_files()
self._update_from_dirs()
def report(self, tickers):
results = collections.defaultdict(list)
with mydb.PositionsDB() as _db:
_db.table = _db.TABLE
for ticker in tickers:
records = _db.read(ticker=ticker)
results[ticker].extend(sorted(records))
t = PrettyTable(['Date','Fund','Company','Ticker','Shares',
'Market value($) of holding', 'Weight(%)', 'Note'])
print ("=" * 70)
print (" " * 30, " Holdings (in ETFs) ")
print ("=" * 70)
for ticker in sorted(results):
for holding in results[ticker]:
holding = list(holding)
holding[5] = common.format_currency(holding[5])
t.add_row(holding)
print(t)
| 2.4375 | 2 |
src/commandline.py | tiktikimelbo7/framebot | 3 | 12766502 | import argparse
import os
import sys
from src import config
def process_arguments():
# Take arguments from commandline and parse them
parser = argparse.ArgumentParser()
parser.add_argument("--page-id", required=True, help="your facebook page-id")
parser.add_argument("--pdir", required=True, help="directory of frames for main posts")
parser.add_argument("--cdir", help="directory of frames to post as comments under main posts")
parser.add_argument("--palbum-id", help="album-id to post frames from --pdir")
parser.add_argument("--calbum-id", help="album-id to post frames from --cdir")
parser.add_argument("--token", required=True, help="your facebook page access-token")
parser.add_argument("--start", type=int, required=True, help="starting number of the frame to post")
parser.add_argument("--count", type=int, help="how many frames to post starting from --start")
parser.add_argument("--delay", type=int, help="delay between two frame-posts in seconds")
parser.add_argument("--use-timestamp", action="store_true", help="parse timestamp from filename")
parser.add_argument("-v", "--verbose", action="store_true", help="turns on verbosity")
parser.add_argument("-n", "--dry-run", action="store_true", help="offline testing, no web request made")
args = parser.parse_args()
# Store the values from commandline into variables
config.page_id = args.page_id
config.pdir = args.pdir
config.cdir = args.cdir
config.palbum_id = args.palbum_id
config.calbum_id = args.calbum_id
config.token = args.token
config.start = args.start
config.count = args.count
config.delay = args.delay
config.use_timestamp = args.use_timestamp
config.verbose = args.verbose
config.dry_run = args.dry_run
if config.dry_run:
config.verbose = True
print("DRY RUN MODE")
print("No web request will be made, a dummy response will be returned for offline app testing.\n")
if not os.path.isdir(config.pdir):
print("Photo-frames directory is not valid.")
sys.exit(1)
if config.cdir:
if not os.path.isdir(config.cdir):
print("Comment-frames directory is not valid.")
sys.exit(1)
if not config.count:
# If --count is not provided in commandline, adjust the count variable from the remaining number of frames
config.count = len(os.listdir(config.pdir)) - config.start + 1
# If count is less than 0, then that means start-number is greater than total frame-counts
if config.count < 0:
print(f"Invalid start-number. There are less than {config.start} frames in your directory.")
sys.exit(1)
if not config.delay:
config.delay = 120 # Default delay is 120 seconds or 2 minutes
if config.verbose:
print(f"Page-id: {config.page_id}")
print(f"Access-token: {config.token}")
print(f"Photo-frames directory: {config.pdir}")
if config.cdir:
print(f"Comment-frames directory: {config.cdir}")
else:
print("Warning: Comment-frames directory is not provided, nothing will be posted in comments.")
if config.palbum_id:
print(f"Album-id for photo-frames: {config.palbum_id}")
else:
print("Warning: album-id for photo-frames is not provided, photo-frames will not be added to album")
if config.calbum_id:
# In case comment-album-id is provided but comment-photo directory isn't
if not config.cdir:
print("ERROR: Comment-frames directory not provided, not possible to post in album.")
sys.exit(1)
else:
print(f"Album-id for comment-frames: {config.calbum_id}")
else:
print("Warning: Album-id for comment-frames is not provided, comment-frames will not be added to album")
print(f"Starting frame-number: {config.start}")
print(f"Number of frames to post: {config.count}")
print(f"Delay: {config.delay} seconds")
| 3.015625 | 3 |
competitive-programming/codeforces/edu/binary-search/step1b.py | sanchopanca/coding-for-pleasure | 0 | 12766503 | <gh_stars>0
def bin_search_lower(a, x):
l, r = -1, len(a)
while l + 1 < r:
m = (l + r) // 2
if a[m] <= x:
l = m
else:
r = m
return l
n, k = map(int, input().split())
array = list(map(int, input().split()))
for x in map(int, input().split()):
print(bin_search_lower(array, x) + 1)
| 3.359375 | 3 |
msgraph/graph/implementations/graph_filter.py | SWB-Dev/microsoft-graph-api | 0 | 12766504 | <reponame>SWB-Dev/microsoft-graph-api
from __future__ import annotations
class GraphFilter:
"""Implements the IGraphFilter protocol."""
def __init__(self, filter:str, value:str):
self.filter = filter
self.value = value
self.subfilters:list[GraphFilter] = []
def add_subfilter(self, filter:GraphFilter):
self.subfilters.append(filter)
def compose(self) -> str:
result = f"${self.filter}={self.value}"
if len(self.subfilters) < 1:
return result
result += "("
for filter in self.subfilters:
result += filter.compose()
result += ")"
return result | 2.5 | 2 |
nally/core/layers/transport/tcp/tcp_control_bits.py | FreibergVlad/port-scanner | 0 | 12766505 | from nally.core.utils.bit_flags import BitFlags
from nally.core.utils.utils import Utils
class TcpControlBits(BitFlags):
"""
Represents 9 TCP control flags, can be used for storing, setting or
retrieving flags
"""
NS = 256
"""Bit mask used to check or set NS flag """
CWR = 128
"""Bit mask used to check or set CWR flag """
ECE = 64
"""Bit mask used to check or set ECE flag """
URG = 32
"""Bit mask used to check or set URG flag """
ACK = 16
"""Bit mask used to check or set ACK flag """
PSH = 8
"""Bit mask used to check or set PSH flag """
RST = 4
"""Bit mask used to check or set RST flag """
SYN = 2
"""Bit mask used to check or set SYN flag """
FIN = 1
"""Bit mask used to check or set FIN flag """
def __init__(
self,
ns=False,
cwr=False,
ece=False,
urg=False,
ack=False,
psh=False,
rst=False,
syn=False,
fin=False
):
"""
Initialises TCPControlBits instance
:param bool ns: ECN-nonce - concealment protection
:param bool cwr: Congestion window reduced (CWR) flag is set by the
sending host to indicate that it received a TCP segment with the
ECE flag set and had responded in congestion control mechanism
:param bool ece: ECN-Echo has a dual role, depending on the value of
the SYN flag. It indicates:
* If the SYN flag is set (1), that the TCP peer is ECN capable.
* If the SYN flag is clear (0), that a packet with Congestion
Experienced flag set (ECN=11) in the IP header was received
during normal transmission. This serves as an indication of
network congestion (or impending congestion) to the TCP sender.
:param bool urg: indicates that the Urgent pointer field is significant
:param bool ack: indicates that the Acknowledgment field is
significant. All packets after the initial SYN packet sent by the
client should have this flag set
:param bool psh: push function. Asks to push the buffered data to the
receiving application
:param bool rst: reset the connection
:param bool syn: synchronize sequence numbers. Only the first packet
sent from each end should have this flag set. Some other flags and
fields change meaning based on this flag, and some are only valid
when it is set, and others when it is clear
:param bool fin: means that current packet is the last packet
from sender
"""
super().__init__()
self.set_flag(self.NS, ns)
self.set_flag(self.CWR, cwr)
self.set_flag(self.ECE, ece)
self.set_flag(self.URG, urg)
self.set_flag(self.ACK, ack)
self.set_flag(self.PSH, psh)
self.set_flag(self.RST, rst)
self.set_flag(self.SYN, syn)
self.set_flag(self.FIN, fin)
@staticmethod
def from_int(bits: int):
"""
Creates TcpControlBits instance from integer
:param int bits: integer which represents bit flags
:return: TcpControlBits instance
"""
is_flag_set: callable = Utils.is_bit_set
return TcpControlBits(
is_flag_set(bits, TcpControlBits.NS),
is_flag_set(bits, TcpControlBits.CWR),
is_flag_set(bits, TcpControlBits.ECE),
is_flag_set(bits, TcpControlBits.URG),
is_flag_set(bits, TcpControlBits.ACK),
is_flag_set(bits, TcpControlBits.PSH),
is_flag_set(bits, TcpControlBits.RST),
is_flag_set(bits, TcpControlBits.SYN),
is_flag_set(bits, TcpControlBits.FIN)
)
@property
def ns(self) -> bool:
return self.is_flag_set(self.NS)
@property
def cwr(self) -> bool:
return self.is_flag_set(self.CWR)
@property
def ece(self) -> bool:
return self.is_flag_set(self.ECE)
@property
def urg(self) -> bool:
return self.is_flag_set(self.URG)
@property
def ack(self) -> bool:
return self.is_flag_set(self.ACK)
@property
def psh(self) -> bool:
return self.is_flag_set(self.PSH)
@property
def rst(self) -> bool:
return self.is_flag_set(self.RST)
@property
def syn(self) -> bool:
return self.is_flag_set(self.SYN)
@property
def fin(self) -> bool:
return self.is_flag_set(self.FIN)
def __eq__(self, other: object) -> bool:
if isinstance(other, TcpControlBits):
return self.flags == other.flags
def __str__(self) -> str:
flags_str = ""
if self.ns:
flags_str += "ns "
if self.cwr:
flags_str += "cwr "
if self.ece:
flags_str += "ece "
if self.urg:
flags_str += "urg "
if self.ack:
flags_str += "ack "
if self.psh:
flags_str += "psh "
if self.rst:
flags_str += "rst "
if self.syn:
flags_str += "syn "
if self.fin:
flags_str += "fin"
return flags_str.strip()
| 2.640625 | 3 |
progress2.py | hrhee8/DL | 0 | 12766506 | #!/usr/bin/env python
# coding: utf-8
# In[9]:
#setup for implementing gpu brought from given file
import numpy as np
import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import h5py
import time
# In[3]:
#data augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# In[5]:
train_data = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform_train)
test_data = torchvision.datasets.CIFAR10(root='./data', train=False,
download=False, transform=transform)
train_loader = torch.utils.data.DataLoader(train_data, batch_size= 100,
shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=100,
shuffle=False, num_workers=2)
# In[19]:
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.convlayer1 = nn.Sequential(
nn.Conv2d(3,64,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(64),
nn.Conv2d(64,64,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(64),
nn.MaxPool2d(2,2),
nn.Dropout(p=0.5)
)
self.convlayer2 = nn.Sequential(
nn.Conv2d(64,128,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(128),
nn.Conv2d(128,128,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(128),
nn.MaxPool2d(2,2),
nn.Dropout(p=0.5)
)
self.convlayer3 = nn.Sequential(
nn.Conv2d(128,256,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(256),
nn.Conv2d(256,256,4,1,2),
nn.ReLU(inplace = True),
nn.BatchNorm2d(256),
nn.MaxPool2d(2,2),
nn.Dropout(p=0.5)
)
self.fulllayer = nn.Sequential(
nn.Linear(256*5*5,500),
nn.ReLU(inplace = True),
nn.Linear(500,10)
)
def forward(self,x):
x = self.convlayer1(x)
x = self.convlayer2(x)
x = self.convlayer3(x)
x = x.view(-1,256*5*5)
x = self.fulllayer(x)
return x
# In[20]:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
model = CNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.0005)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones = [60,120,160],gamma=0.1, last_epoch=-1)
# In[ ]:
epoch = 9
time1 = time.time()
for epoch in range(epoch):
scheduler.step()
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print ("Epoch", epoch+1, "%s minibatches"%i, "loss: %.4f"%(running_loss/1000.))
running_loss = 0.0
print('Complete')
time2 = time.time()
print(time2-time1)
# In[22]:
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Test accuracy:", (100 * correct / total))
# In[ ]:
| 2.71875 | 3 |
celery_app/plugins/pluginnormal/phpmyadmin_weak_password.py | tiaotiaolong/piu | 2 | 12766507 | import requests
import re
from celery_app.utils.utils import insert_vuln_db
from celery_app.config.config import web_port_long
#通达 OA 系统 SQL 注入漏洞
plugin_id=73
default_port_list=web_port_long
def get_token(url):
result = re.findall('<input type="hidden" name="token" value="(\w+)" />', requests.get(url, timeout=10).text)
if result:
return result[0]
return False
def is_phpmyadmin(url):
return 'phpMyAdmin' in requests.get(url, timeout=10).text
def check(host, port=80):
scheme = 'https' if '443' in str(port) else 'http'
target = '{}://{}:{}'.format(scheme, host, port)
urls = [target, '{}/phpmyadmin/index.php'.format(target)]
try:
for url in urls:
if not is_phpmyadmin(url):
continue
simple_passwords = ['', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '!@#', '<PASSWORD>', '111', '666', '1314']
simple_users = ['', 'root', 'test', 'admin', 'server', 'password', 'mysql', 'ceshi', 'mima',
host.split('.')[0]]
passwords = ['{}{}'.format(user, password) for user in simple_users for password in simple_passwords]
for user in ['root', 'test', 'server', 'ceshi']:
for pwd in passwords:
token = get_token(url)
if not token:
return False
data = {
"pma_username": user,
"pma_password": <PASSWORD>,
"server": 1,
"token": token
}
requests.packages.urllib3.disable_warnings()
response = requests.post(url, data, timeout=7, headers={'Cookie': "pma_lang=zh_CN"})
if 'login_form' in response.text:
continue
elif response.status_code == 200 and 'db_structure.php' in response.text:
output = "用户名:{}\t 密码:{}".format(user, pwd)
target = url
insert_vuln_db(host, target, output, plugin_id)
return True, host, target, output
except:
return False
return False
| 2.5625 | 3 |
basic/simple.py | chaniotakismg/quantum-experiments | 0 | 12766508 | import numpy as np
from qiskit import (
QuantumCircuit,
execute,
Aer)
from qiskit.visualization import plot_histogram
# Use Aer's qasm_simulator
simulator = Aer.get_backend('qasm_simulator')
# Create a Quantum Circuit acting on the q register
circuit = QuantumCircuit(3, 3)
# Add a H gate on qubit 0
circuit.h(0)
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1
circuit.cx(0, 1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1])
# Execute the circuit on the qasm simulator
job = execute(circuit, simulator, shots=1000)
# Grab results from the job
result = job.result()
# Returns counts
counts = result.get_counts(circuit)
print("\nTotal count for 00 and 11 are:", counts)
# Draw the circuit
circuit.draw()
plot_histogram(counts) | 3 | 3 |
handlers/__init__.py | dragondjf/cqssl | 0 | 12766509 | <reponame>dragondjf/cqssl<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .task import task
from .mainhandler import MainHandler
from .websockerhandler import WebSocketManagerHandler
| 1.101563 | 1 |
source_code/1-1-urllib.py | VickyMin1994/easy-scraping-tutorial | 708 | 12766510 | from urllib.request import urlopen
# if has Chinese, apply decode()
html = urlopen("https://mofanpy.com/static/scraping/basic-structure.html").read().decode('utf-8')
print(html)
import re
res = re.findall(r"<title>(.+?)</title>", html)
print("\nPage title is: ", res[0])
# Page title is: Scraping tutorial 1 | 莫烦Python
res = re.findall(r"<p>(.*?)</p>", html, flags=re.DOTALL) # re.DOTALL if multi line
print("\nPage paragraph is: ", res[0])
# Page paragraph is:
# 这是一个在 <a href="https://mofanpy.com/">莫烦Python</a>
# <a href="https://mofanpy.com/tutorials/scraping">爬虫教程</a> 中的简单测试.
res = re.findall(r'href="(.*?)"', html)
print("\nAll links: ", res)
# All links: ['https://mofanpy.com/static/img/description/tab_icon.png', 'https://mofanpy.com/', 'https://mofanpy.com/tutorials/scraping'] | 3.234375 | 3 |
src/data/52.py | NULLCT/LOMC | 0 | 12766511 | from collections import deque
n, q = map(int, input().split())
ab = [[] for i in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
ab[a - 1].append(b - 1)
ab[b - 1].append(a - 1)
cd = [list(map(int, input().split())) for i in range(q)]
s = set()
qu = deque()
s.add(0)
qu.append(0)
hugo = [0] * n
while qu:
here = qu.popleft()
for i in ab[here]:
if i not in s:
s.add(i)
qu.append(i)
hugo[i] = (hugo[here] + 1) % 2
for i in cd:
if hugo[i[0] - 1] == hugo[i[1] - 1]:
print("Town")
else:
print("Road")
| 3 | 3 |
mandarin_twitter_bot/settings.py | matthew-li/mandarin_twitter_bot | 0 | 12766512 | <reponame>matthew-li/mandarin_twitter_bot<filename>mandarin_twitter_bot/settings.py
import configparser
import os
"""This module contains settings referenced by the application."""
# The format in which dates are stored.
DATE_FORMAT = "%Y-%m-%d"
# Read configuration from a file.
CONFIG_FILE = os.environ["TWITTER_BOT_SETTINGS_MODULE"].strip()
config = configparser.ConfigParser(os.environ)
config.read(CONFIG_FILE)
# AWS-specific settings.
section = "aws"
AWS_DYNAMODB_ENDPOINT_URL = config.get(section, "endpoint_url")
if not AWS_DYNAMODB_ENDPOINT_URL.strip():
AWS_DYNAMODB_ENDPOINT_URL = None
# Twitter-specific settings.
section = "twitter"
TWITTER_ACCESS_TOKEN = config.get(section, "twitter_access_token")
TWITTER_ACCESS_TOKEN_SECRET = config.get(
section, "twitter_access_token_secret")
TWITTER_BEARER_TOKEN = config.get(section, "twitter_bearer_token")
TWITTER_CONSUMER_KEY = config.get(section, "twitter_consumer_key")
TWITTER_CONSUMER_SECRET = config.get(section, "twitter_consumer_secret")
TWITTER_SIGNATURE_METHOD = "HMAC-SHA1"
TWITTER_USER_USERNAME = config.get(section, "twitter_user_username")
| 2.828125 | 3 |
app/common/whats_new/views.py | domdomegg/coronavirus-dashboard-frontend-server | 0 | 12766513 | #!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import re
from json import loads
from datetime import datetime
# 3rd party:
# Internal:
from app.storage import AsyncStorageClient
from app.caching import from_cache_or_func
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'get_whats_new_banners'
]
BANNER_DATA = dict(
container="publicdata",
path="assets/cms/changeLog.json"
)
special_chars_pattern = re.compile(r"[\"')]")
to_underscore_pattern = re.compile(r"[\s.(&,]+")
async def _get_whats_new_banners(timestamp: str):
async with AsyncStorageClient(**BANNER_DATA) as client:
data_io = await client.download()
raw_data = await data_io.readall()
full_data = loads(raw_data.decode())
if full_data is None:
full_data = dict()
data = full_data.get("changeLog", list())
datestamp = timestamp.split("T")[0]
filtered_data = filter(
lambda b: b["date"] == datestamp and b.get("displayBanner", False),
data
)
results = list()
for banner in filtered_data:
banner['anchor'] = special_chars_pattern.sub("", banner["headline"].lower())
banner['anchor'] = to_underscore_pattern.sub("_", banner["anchor"])
banner['formatted_date'] = f"{datetime.strptime(banner['date'], '%Y-%m-%d'):%-d %B %Y}"
results.append(banner)
return results
async def get_whats_new_banners(request, timestamp):
response = from_cache_or_func(
request=request,
func=_get_whats_new_banners,
prefix="FRONTEND::CL::",
expire=60 * 15,
timestamp=timestamp
)
return await response
| 2.140625 | 2 |
9_lab_3_draw_bbox_json.py | zgle-fork/synthetic-images | 0 | 12766514 | <reponame>zgle-fork/synthetic-images
import os, math
from PIL import Image, ImageDraw
import json
def draw_bbs(img_fp, annotations):
img=Image.open(img_fp)
for a in annotations:
c=a['coordinates']
x, y, w, h =c['x'], c['y'],c['width'],c['height']
label=a['label']
shape = [(x-w/2, y-h/2), (x +w/2, y+h/2)]
# create rectangle image
img1 = ImageDraw.Draw(img)
img1.rectangle(shape, outline ="red")
# img.show()
img.save(os.path.join('Out',img_fp))
def draw_bb(img_fp, x,y,w,h):
img=Image.open(img_fp)
shape = [(x-w/2, y-h/2), (x +w/2, y+h/2)] # two corner points: [(x0, y0), (x1, y1)]
# create rectangle image
img1 = ImageDraw.Draw(img)
img1.rectangle(shape, outline ="red")
# img.show()
img.save(os.path.join('Out',img_fp))
# img_fp='TrainingImages/1.png'
# x, y, w, h =1405, 681, 90, 150
# draw_bb(img_fp, x, y, w, h)
json_fp='annotations.json'
data=json.load(open(json_fp))
for x in data:
print(x)
img_fp=x['path']
draw_bbs(img_fp, x['annotations'])
# for a in x['annotations']:
# c=a['coordinates']
# x, y, w, h =c['x'], c['y'],c['width'],c['height']
# label=a['label']
# # draw_bb(img_fp, x, y, w, h)
| 2.578125 | 3 |
moarri_profile_iacs/utils/enumeratings.py | moarri/moarri_profile_iacs | 0 | 12766515 | # -*- coding: utf-8 -*-
# Copyright 2018 Moarri Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
from enum import Enum
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
class CodeableEnum(Enum):
def __init__(self, code):
self.code = code
@classmethod
def value_of(cls, code):
for e in list(cls):
if e.code == code:
return e
return None
| 3.1875 | 3 |
access/views.py | pepeul1191/tutorial-flask | 0 | 12766516 | <reponame>pepeul1191/tutorial-flask
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, session, request, redirect
from datetime import datetime
from main.filters import if_session_active_go_home, if_session_not_active_go_login
view = Blueprint('access_bludprint', __name__)
@view.route('/login', methods=['GET'])
@if_session_active_go_home()
def login():
locals={
'message': '',
}
return render_template(
'login.html',
locals=locals
), 200
@view.route('/login', methods=['POST'])
def login_access():
user = request.form['user']
password = request.form['password']
if user == 'root' and password == '<PASSWORD>':
# crear session
session['status'] = 'active'
session['user'] = 'root'
session['time'] = datetime.now()
locals={}
print(session)
return redirect('/')
else:
locals={
'message': 'El usuario y/o no existen',
}
print(session)
return render_template(
'login.html',
locals=locals
), 500
@view.route('/logout', methods=['GET'])
def logout():
session.clear()
locals={
'message': 'Su sesión ha sido destruida',
}
return redirect('/login')
@view.route('/', methods=['GET'])
def home():
locals={ }
return render_template(
'home.html',
locals=locals
), 200
@view.route('/admin', methods=['GET'])
@if_session_not_active_go_login(param='pepe')
def admin():
locals={
'csss': ['assets/css/demo', 'assets/css/demo2'],
'jss': ['assets/js/lib', 'assets/js/demo'],
}
return render_template(
'admin.html',
locals=locals
), 200 | 2.71875 | 3 |
Paso2(Andrea).py | andmansim/Ping-Pong | 0 | 12766517 | <gh_stars>0
# Paso 2: Clase de PelotaPong
#Importación de módulos
import random
#Importación de módulos
import pygame
#Importación de módulos
import random
from pygame.locals import QUIT
#Definición de constantes
# Constantes para la inicialización de la superficie de dibujo
VENTANA_HORI = 800 # Ancho de la ventana
VENTANA_VERT = 600 # Alto de la ventana
FPS = 60 # Fotogramas por segundo
BLANCO = (255, 255, 255) # Color del fondo de la ventana (RGB)
#Clase PelotaPong
class PelotaPong:
def __init__(self, fichero_imagen):
# --- Atributos de la Clase ---
# Imagen de la Pelota
self.imagen = pygame.image.load(fichero_imagen).convert_alpha()
# Dimensiones de la Pelota
self.ancho, self.alto = self.imagen.get_size()
# Posición de la Pelota
self.x = VENTANA_HORI / 2 - self.ancho / 2
self.y = VENTANA_VERT / 2 - self.alto / 2
# Dirección de movimiento de la Pelota
self.dir_x = random.choice([-5, 5])
self.dir_y = random.choice([-5, 5])
def mover(self):
self.x += self.dir_x
self.y += self.dir_y
def rebotar(self):
if self.x <= -self.ancho:
self.reiniciar()
if self.x >= VENTANA_HORI:
self.reiniciar()
if self.y <= 0:
self.dir_y = -self.dir_y
if self.y + self.alto >= VENTANA_VERT:
self.dir_y = -self.dir_y
def reiniciar(self):
self.x = VENTANA_HORI / 2 - self.ancho / 2
self.y = VENTANA_VERT / 2 - self.alto / 2
self.dir_x = -self.dir_x
self.dir_y = random.choice([-5, 5])
#Inicialización
def main():
pygame.init()
# Inicialización de la superficie de dibujo (display surface)
ventana = pygame.display.set_mode((VENTANA_HORI, VENTANA_VERT))
pygame.display.set_caption("Pong 2")
#Creación de la pelota
pelota = PelotaPong("bola_roja.png")
# Bucle principal
jugando = True
while jugando:
#Mover la pelota
pelota.mover()
pelota.rebotar()
ventana.fill(BLANCO)
ventana.blit(pelota.imagen, (pelota.x, pelota.y))
for event in pygame.event.get():
if event.type == QUIT:
jugando = False
pygame.display.flip()
pygame.time.Clock().tick(FPS)
pygame.quit() | 2.828125 | 3 |
scanpy/tools/dbscan.py | gioelelm/scanpy | 0 | 12766518 | <filename>scanpy/tools/dbscan.py
# Author: <NAME> (http://falexwolf.de)
"""Cluster using DBSCAN
Using the scikit-learn implementation.
"""
import numpy as np
from .. import settings as sett
from .. import logging as logg
def dbscan(adata, basis='tsne', n_comps=2, eps=None, min_samples=None, n_jobs=None, copy=False):
"""Cluster cells using DBSCAN
This wraps sklearn.cluster.DBSCAN and shares most of the parameters.
Parameters
----------
eps : float or None, optional
The maximum distance between samples for being considered as in the same
neighborhood. Clusters are "grown" from samples that have more than
min_samples points in their neighborhood. Increasing eps therefore
allows clusters to spread over wider regions.
min_samples : int or None, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
n_jobs : int (default: None)
Number of threads to use. Defaults to sett.n_jobs.
copy : bool (default: False)
References
----------
Ester et al. (1996), "A Density-Based Algorithm for Discovering Clusters in
Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231.
Pedregosa et al. (2011) ...
"""
logg.m('starting DBSCAN', r=True)
adata = adata.copy() if copy else adata
if basis not in {'tsne', 'pca'}:
raise ValueError('`basis` needs to be "tsne" or "pca"')
if 'X_tsne' in adata.smp and basis == 'tsne':
X = adata.smp['X_tsne'][:, :n_comps]
elif 'X_pca' in adata.smp and basis == 'pca':
X = adata.smp['X_pca'][:, :n_comps]
else:
raise ValueError('Run {} first.'.format(basis))
n_jobs = sett.n_jobs if n_jobs is None else n_jobs
range_1 = np.max(X[:, 0]) - np.min(X[:, 0])
range_2 = np.max(X[:, 1]) - np.min(X[:, 1])
if eps is None:
if n_comps == 2:
avg_area_per_point = (range_1 * range_2 / X.shape[0])
logg.m('... the "drawing range" is', range_1, '×', range_2,
'with the average area per point', avg_area_per_point)
eps = 1.7 * np.sqrt(avg_area_per_point)
else:
eps = 5
if min_samples is None: min_samples = 30
logg.m('... using eps =', eps, end=', ')
logg.m('min_samples =', min_samples, end=', ')
logg.m('basis =', basis, end=', ')
logg.m('n_comps =', basis, end=', ')
logg.m('n_jobs =', n_jobs) #, end=', ')
logg.m('increase `min_samples` if you find too many clusters', v='hint')
logg.m('reduce eps if "everything is connected"', v='hint')
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors(n_neighbors=min_samples, n_jobs=n_jobs)
nn.fit(X)
D = nn.radius_neighbors_graph(radius=eps, mode='distance')
db = DBSCAN(eps=eps, min_samples=min_samples,
n_jobs=n_jobs, metric='precomputed').fit(D)
labels = db.labels_
dont_know = labels == -1
labels = labels.astype(str)
labels[dont_know] = '?'
# loop_over_labels = (label for label in np.unique(labels) if label >= 0)
adata.smp['dbscan_groups'] = labels
from natsort import natsorted
adata.add['dbscan_groups_order'] = np.array(natsorted(np.unique(labels)))[:-1]
logg.m(' finished', t=True, end=' ')
logg.m('and found', len(np.unique(labels))-1, 'clusters, added\n'
' "dbscan_groups", the cluster labels (adata.smp)\n'
' "dbscan_groups_order", the unique cluster labels (adata.add)')
return adata if copy else None
| 2.75 | 3 |
nau_bb_reporting/reports/hardlinks.py | dikaiosune/nau-bb-learn-reporting | 0 | 12766519 | <reponame>dikaiosune/nau-bb-learn-reporting<gh_stars>0
"""
This report looks at all of the course content HTML and tries to find
links that point back to Bb Learn but are not managed by the LMS or CMS
(i.e. they are copypasta from a user who was smart but not smart enough).
In a given term, we get all of the course content HTML, and then begin
ingesting it into BeautifulSoup and grabbing all of the links. The links
are then checked against a bunch of patterns (developed partially in the
abstract but also iteratively over dozens of previous reports) to see
if they're likely candidates.
If "greedy" mode is enabled, we'll also go after all of the deployed
HTML files in a course, as those very frequently contain bad links.
"""
__author__ = 'adam'
import logging
from string import ascii_uppercase
import pandas as pd
from bs4 import BeautifulSoup
log = logging.getLogger('nau_bb_reporting.reports.hardlinks')
# this only gets us all of the HTML with links in a term
# we have to parse and check the HTML locally
lazy_query = """
select
content_items.course_id,
cc.main_data
from
(SELECT DISTINCT
cm.course_id,
cc.title,
cc.pk1
FROM bblearn.course_main cm, bblearn.course_contents cc
WHERE
cc.main_data LIKE '%<a%'
AND cc.crsmain_pk1 = cm.pk1
AND cm.course_id LIKE :course_id_like
) content_items
JOIN
bblearn.course_contents cc
ON cc.pk1 = content_items.pk1
"""
# this only gets us the deployed HTML files in a term
# this could be extended to actually fetch the files, but that
# would require a special reporting account with read access
# to all BbL files, and is not a security hole we want to introduce
greedy_query = """
select DISTINCT
cm.course_id
from bblearn.course_main cm, bblearn.course_contents cc, bblearn.course_contents_files ccf, bblearn.files f
WHERE
NOT REGEXP_LIKE(f.link_name,'(DVD|VT|T)[0-9]{2,5}_(.+).html','i')
AND f.link_name LIKE '%.htm%'
AND f.pk1 = ccf.files_pk1
AND ccf.course_contents_pk1 = cc.pk1
AND cc.available_ind = 'Y'
AND cc.cnthndlr_handle = 'resource/x-bb-file'
AND cc.crsmain_pk1 = cm.pk1
AND cm.course_id LIKE :course_id_like
"""
def run(term, connection, out_file_path, greedy=False):
log.info("Running hardlinks report for %s.", term)
course_ids = set()
found_course_ids = []
# splitting this query seems to improve performance, as temp tables are kept small
# and we avoid hitting swap (at least that's my guess as to why)
course_id_patterns = [term + '-NAU00-' + letter + '%' for letter in ascii_uppercase]
# first get all the deployed HTML files if needed
if greedy:
log.info('Retreiving a list of %s courses with deployed HTML files...', term)
greedy_cur = connection.cursor()
greedy_cur.prepare(greedy_query)
for pattern in course_id_patterns:
greedy_cur.execute(None, course_id_like=pattern)
for row in greedy_cur:
found_course_ids.append((row[0], 'HTML FILE'))
main_cur = connection.cursor()
main_cur.prepare(lazy_query)
log.info('Checking all %s courses for content items with bad links...', term)
for pattern in course_id_patterns:
main_cur.execute(None, course_id_like=pattern)
for row in main_cur:
course_id = row[0]
html = row[1]
# now we check the HTML for bad links...
found_link = get_first_hardlink(html)
if course_id not in course_ids and found_link is not None:
found_course_ids.append((course_id, found_link))
course_ids.add(course_id)
log.info('Found all courses, writing to report file.')
header = ['course id', 'link found']
df = pd.DataFrame([x for x in found_course_ids], columns=header)
df.to_excel(out_file_path, encoding='UTF-8', columns=header, index=False)
log.info('Wrote report to %s', out_file_path)
def get_first_hardlink(html_content):
soup = BeautifulSoup(html_content)
# get all of the link urls and the image sources
urls = [link.get('href') for link in soup.find_all('a')]
urls.extend([image.get('src') for image in soup.find_all('img')])
for link in urls:
# now begin the long fall-through logic
# if we get to the bottom, the link isn't a problem
# first we want to make sure we don't want cycles checking empty text
if link is None or len(link) == 0:
continue
trimmed = link.replace('%20', ' ').strip()
if len(trimmed) == 0:
continue
# prep the url for easier conditionals
url = trimmed.replace(' ', '%20')
url = url.replace('@X@EmbeddedFile.requestUrlStub@X@', 'https://bblearn.nau.edu/')
url = url.lower()
# these don't reference bblearn, but they suck for students
if 'iris.nau.edu/owa/redir.aspx' in url:
return url
elif 'about:blank' == url:
continue
# if it's an xid in WebDAV then it's probably OK
elif 'xid' in url and 'bbcswebdav' in url:
continue
# if it points outside of bblearn, it's not really our problem (with the exception of OWA links)
elif (url.startswith('http://') or url.startswith('https://')) and 'bblearn' not in url:
continue
# these are placed by the content editor's smiley tool
elif '/images/ci/' in url:
continue
# if it is definitely pointing to Bb Learn, but isn't of the many tools that legitimately
# insert links in content items, it's BAD
elif ('courses' in url or 'webapps' in url or 'bbcswebdav' in url or 'webct' in url or 'vista' in url) \
and '/institution/' not in url \
and '/execute/viewdocumentation?' not in url \
and '/wvms-bb-bblearn' not in url \
and '/bb-collaborate-bblearn' not in url \
and '/vtbe-tinymce/tiny_mce' not in url \
and 'webapps/login' not in url \
and 'webapps/portal' not in url \
and 'bbgs-nbc-content-integration-bblearn' not in url \
and 'bb-selfpeer-bblearn' not in url:
return url
# if it doesn't points outside of bblearn, and it doesn't specifically point to bblearn
# then it's probably a relative link, which we should also burn with fire
elif not url.startswith('https://') and \
not url.startswith('http://') and \
not url.startswith('www') and \
not url.startswith('javascript:') and \
not url.startswith('mailto:') and \
not url.startswith('#') and \
not url.startswith('data:image/') and \
'webapps' not in url and \
'.com' not in url and \
'.net' not in url and \
'.edu' not in url and \
'.org' not in url and \
'http://cdn.slidesharecdn.com/' not in url:
return url
# if we've made it this far, we've checked all urls and found them wanting
return None
| 2.59375 | 3 |
client/states/base.py | AndrewIjano/distributed-tic-tac-toe | 0 | 12766520 | class State:
def __init__(self, client) -> None:
self.client = client
def get_input_command(self):
return input(f"JogoDaVelha> ").strip().split() or [""]
def handle_input_command(self, *input_command):
pass
def handle_opponent_command(self, *command):
pass
def handle_new_connection_command(self, *command):
pass
def _handle_skip(self):
pass
def _handle_default(self, *args):
print("Invalid command!")
def _handle_exit(self):
self.client.stop()
| 2.75 | 3 |
attic/gui/mainwindow/mainwindow.py | awacha/cct | 1 | 12766521 | import logging
import traceback
import pkg_resources
from gi.repository import Gtk, GdkPixbuf
from .devicestatusbar import DeviceStatusBar
from .logtreeview import LogTreeView
from ..accounting import UserManager, ProjectManager
from ..core import ToolWindow, error_message, ToolFrame, question_message
from ..devices import Motors, GeniX, TPG201, HaakePhoenix, Pilatus, DeviceConnections
from ..diagnostics import ResourceUsage
from ..measurement import ScanMeasurement, SingleExposure, TransmissionMeasurement, ScriptMeasurement, CommandHelpDialog
from ..setup import EditConfig, SampleEdit, DefineGeometry, Calibration
from ..toolframes import ResourceUsageFrame, NextFSN, ShutterBeamstop, AccountingFrame
from ..tools import ExposureViewer, CapillaryMeasurement, ScanViewer, MaskEditor, DataReduction, OptimizeGeometry
from ...core.commands.command import CommandError
from ...core.instrument.instrument import Instrument
from ...core.services.interpreter import Interpreter
# initialize the logger for the main window level.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CollectingHandler(logging.Handler):
instance = None
def __init__(self):
self.collected = []
if self.__class__.instance is not None:
raise RuntimeError('This is a singleton class!')
super().__init__()
self.__class__.instance = self
@classmethod
def get_default(cls):
return cls.instance
def emit(self, record):
self.collected.append(record)
class MainWindow(object):
toolwindow_registry = [
('sampleeditor', SampleEdit, 'samplesetup', 'setup_sampleedit.glade', {}),
('definegeometry', DefineGeometry, 'definegeometry', 'setup_definegeometry.glade', {}),
('editconfig', EditConfig, 'editconfig', 'setup_editconfig.glade', {}),
('calibration', Calibration, 'calibration', 'setup_calibration.glade', {}),
('xraysource', GeniX, 'genix', 'devices_genix.glade', {}),
('detector', Pilatus, 'pilatus', 'devices_pilatus.glade', {}),
('motors', Motors, 'motoroverview', 'devices_motors.glade', {}),
('vacgauge', TPG201, 'vacgauge', 'devices_tpg201.glade', {}),
('temperaturestage', HaakePhoenix, 'haakephoenix', 'devices_haakephoenix.glade', {}),
('connections', DeviceConnections, 'deviceconnections', 'devices_connection.glade', {}),
('scanmeasurement', ScanMeasurement, 'scan', 'measurement_scan.glade', {}),
('singleexposure', SingleExposure, 'singleexposure', 'measurement_singleexposure.glade', {}),
('transmission', TransmissionMeasurement, 'measuretransmission',
'measurement_transmission.glade', {}),
('scriptmeasurement', ScriptMeasurement, 'script', 'measurement_script.glade', {}),
('maskeditor', MaskEditor, 'maskeditor', 'tools_maskeditor.glade', {}),
('imgviewer', ExposureViewer, 'calibration', 'setup_calibration.glade', {}),
('viewscans', ScanViewer, 'scanviewer', 'tools_scanviewer.glade', {}),
('capillarymeasurement', CapillaryMeasurement, 'capillarymeasurement',
'tools_capillarymeasurement.glade', {}),
('datareduction', DataReduction, 'datareduction', 'tools_datareduction.glade', {}),
('resourceusage', ResourceUsage, 'resourceusagewindow', 'diagnostics_resourceusage.glade',
{}),
('commandhelp', CommandHelpDialog, 'commandhelpbrowser', 'help_commandhelpbrowser.glade',
{'insert': 'on_insert_command'}),
('users', UserManager, 'usermanager', 'accounting_usermanager.glade', {}),
('projects', ProjectManager, 'projectmanager', 'accounting_projectmanager.glade', {}),
('optimizegeometry', OptimizeGeometry, 'optimizegeometry', 'tools_optimizegeometry.glade', {}),
]
class LogHandler(logging.Handler):
def __init__(self, mainwindow):
super().__init__()
self.mw = mainwindow
def emit(self, record):
message = self.format(record)
# GLib.idle_add(lambda msg=message, rec=record: self.mw.writelogline(msg, rec) and False)
self.mw.writelogline(message, record)
def __init__(self, instrument: Instrument):
# initialize the main window
self.builder = Gtk.Builder.new_from_file(
pkg_resources.resource_filename('cct', 'resource/glade/mainwindow.glade'))
self.builder.set_application(Gtk.Application.get_default())
self.widget = self.builder.get_object('mainwindow')
self.builder.connect_signals(self)
self.widget.set_show_menubar(True)
self.widget.connect('delete-event', self.on_delete_event)
self.widget.set_default_icon_list([GdkPixbuf.Pixbuf.new_from_file_at_size(
pkg_resources.resource_filename('cct', 'resource/icons/scalable/cctlogo.svg'),
sz, sz) for sz in [16, 32, 48, 64, 128, 256]])
self.widget.show_all()
# Initialize the log textbuffer
self._logtags = self.builder.get_object('log_texttags')
self._logbuffer = self.builder.get_object('logbuffer')
self._logbuffer.create_mark(
'log_end', self._logbuffer.get_end_iter(), False)
self._logview = self.builder.get_object('logtext')
self._logview2 = LogTreeView()
self.builder.get_object('logviewer_stack').add_titled(self._logview2.widget, 'treelogviewer', 'Log tree')
# initialize custom log handler for the root logger. This is responsible for printing
# all log records in the main window.
self._loghandler = self.LogHandler(self)
self._loghandler.setLevel(logging.DEBUG)
logging.root.addHandler(self._loghandler)
self._loghandler.setFormatter(logging.Formatter(
'%(asctime)s: %(levelname)s: %(message)s (Origin: %(name)s:%(lineno)d)'))
ch = CollectingHandler.get_default()
for record in ch.collected:
self._loghandler.emit(record)
logging.root.removeHandler(ch)
del ch.collected
self._toolwindows = {}
self._toolwindow_connections = {}
self.instrument = instrument
self._instrumentconnections = [
self.instrument.connect('shutdown', self.on_instrument_shutdown),
self.instrument.connect('device-connected', lambda i, d: self.set_menu_sensitivity()),
self.instrument.connect('device-disconnected', lambda i, d, b: self.set_menu_sensitivity()),
]
if self.instrument.online:
self.instrument.connect_devices()
logger.debug('Mainwindow: devices connected.')
self._devicestatus = DeviceStatusBar(self.instrument)
logger.debug('DeviceStatusBar initialized')
self.builder.get_object('devicestatus_box').pack_start(self._devicestatus, True, True, 0)
self._toolframes = {}
for framename, cls, gladefile, mainwidget in [
('resourceusage', ResourceUsageFrame, 'toolframe_telemetry.glade', 'telemetryframe'),
('nextfsn', NextFSN, 'toolframe_nextfsn.glade', 'nextfsnframe'),
('shutterbeamstop', ShutterBeamstop, 'toolframe_shutter.glade', 'shutterframe'),
('accounting', AccountingFrame, 'toolframe_accounting.glade', 'accountingframe')
]:
try:
self._toolframes[framename] = cls(gladefile, mainwidget, self.instrument)
self.builder.get_object('toolbox').pack_end(self._toolframes[framename].widget, False, True, 0)
except Exception:
logger.error('Cannot open toolframe ' + framename)
logger.debug('Initializing toolframes done.')
self.widget.show_all()
self.widget.set_title('Credo Control Tool v{}'.format(pkg_resources.get_distribution('cct').version))
logger.debug('Connecting to interpreter')
interpreter = self.instrument.services['interpreter']
self._interpreterconnections = [
interpreter.connect('cmd-return', self.on_interpreter_cmd_return),
interpreter.connect('cmd-fail', self.on_interpreter_cmd_fail),
interpreter.connect('pulse', self.on_interpreter_cmd_pulse),
interpreter.connect('progress', self.on_interpreter_cmd_progress),
interpreter.connect('cmd-message', self.on_interpreter_cmd_message),
interpreter.connect('idle-changed', self.on_interpreter_idle_changed),
]
self._commandhistory = []
self._historyindex = None
self.on_change_logviewer(self.builder.get_object('menuitem_advancedlogviewer'))
self.set_menu_sensitivity()
def on_change_logviewer(self, checkmenuitem: Gtk.CheckMenuItem):
if checkmenuitem.get_active():
self.builder.get_object('logviewer_stack').set_visible_child_name('treelogviewer')
else:
self.builder.get_object('logviewer_stack').set_visible_child_name('textlogviewer')
def on_command_entry_keyevent(self, entry: Gtk.Entry, event):
if event.hardware_keycode == 111:
# cursor up key
if self._commandhistory:
if self._historyindex is None:
self._historyindex = len(self._commandhistory)
self._historyindex = max(0, self._historyindex - 1)
entry.set_text(self._commandhistory[self._historyindex])
return True # inhibit further processing of this key event
elif event.hardware_keycode == 116:
# cursor down key
if self._commandhistory:
if self._historyindex is None:
self._historyindex = -1
self._historyindex = min(self._historyindex + 1, len(self._commandhistory) - 1)
entry.set_text(self._commandhistory[self._historyindex])
return True # inhibit further processing of this key event
return False
def on_interpreter_idle_changed(self, interpreter: Instrument, idle: bool):
if not idle:
self.builder.get_object('command_entry').set_sensitive(idle)
if self.builder.get_object('execute_button').get_label() == 'Execute':
self.builder.get_object('execute_button').set_sensitive(idle)
if idle:
self.builder.get_object('command_entry').set_sensitive(idle)
self.builder.get_object('execute_button').set_sensitive(idle)
def on_command_execute(self, button: Gtk.Button):
if button.get_label() == 'Execute':
cmd = self.builder.get_object('command_entry').get_text()
try:
self.instrument.services['interpreter'].execute_command(cmd)
except CommandError as ce:
error_message(self.widget, 'Cannot execute command', str(ce))
else:
button.set_label('Stop')
if (not self._commandhistory) or (self._commandhistory and self._commandhistory[-1] != cmd):
self._commandhistory.append(self.builder.get_object('command_entry').get_text())
elif button.get_label() == 'Stop':
self.instrument.services['interpreter'].kill()
else:
raise ValueError(button.get_label())
# noinspection PyUnusedLocal
def on_interpreter_cmd_return(self, interpreter: Interpreter, commandname: str, returnvalue: object):
self.builder.get_object('command_entry').set_sensitive(True)
self.builder.get_object('command_entry').set_progress_fraction(0)
self.builder.get_object('command_entry').set_text('')
self.builder.get_object('command_entry').grab_focus()
self.builder.get_object('execute_button').set_label('Execute')
self._historyindex = None
self.builder.get_object('statusbar').pop(1)
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def on_interpreter_cmd_fail(self, interpreter, commandname, exc, tb):
logger.error('Command {} failed: {} {}'.format(commandname, str(exc), tb))
# noinspection PyUnusedLocal
def on_interpreter_cmd_message(self, interpreter, commandname, message):
self.builder.get_object('statusbar').pop(1)
self.builder.get_object('statusbar').push(1, message)
logger.info('Command {} :: {}'.format(commandname, message))
# noinspection PyUnusedLocal
def on_interpreter_cmd_pulse(self, interpreter, commandname, message):
self.builder.get_object('command_entry').progress_pulse()
self.builder.get_object('statusbar').pop(1)
self.builder.get_object('statusbar').push(1, message)
# noinspection PyUnusedLocal
def on_interpreter_cmd_progress(self, interpreter, commandname, message, fraction):
self.builder.get_object('command_entry').set_progress_fraction(fraction)
self.builder.get_object('statusbar').pop(1)
self.builder.get_object('statusbar').push(1, message)
def on_delete_event(self, window, event):
return self.on_quit()
def writelogline(self, message: str, record: logging.LogRecord):
assert hasattr(record, 'message')
if record.levelno >= logging.CRITICAL:
tag = self._logtags.lookup('critical')
elif record.levelno >= logging.ERROR:
tag = self._logtags.lookup('error')
elif record.levelno >= logging.WARNING:
tag = self._logtags.lookup('warning')
else:
tag = self._logtags.lookup('normal')
enditer = self._logbuffer.get_end_iter()
self._logbuffer.insert_with_tags(enditer, message + '\n', tag)
self._logview.scroll_to_mark(
self._logbuffer.get_mark('log_end'), 0.1, False, 0, 0)
if record.levelno >= logging.INFO:
self.builder.get_object('statusbar').pop(0)
self.builder.get_object('statusbar').push(0, record.message.split('\n')[0])
self._logview2.add_logentry(record)
return False
def construct_and_run_dialog(self, windowclass, toplevelname, gladefile, windowtitle, connections):
assert issubclass(windowclass, ToolWindow)
key = str(windowclass) + str(toplevelname)
logger.debug('Construct & run dialog: ' + gladefile)
if key not in self._toolwindows:
logger.debug('Constructing needed for dialog ' + gladefile)
try:
self._toolwindows[key] = windowclass(gladefile, toplevelname, self.instrument, windowtitle)
except ToolFrame.DeviceException as ex:
error_message(self.widget, 'Could not open window {}'.format(windowtitle),
'Missing required device: {}'.format(ex.args[0]))
return
except Exception as exc:
error_message(self.widget, 'Could not open window {}'.format(windowtitle),
'{}\n{}'.format(str(exc), traceback.format_exc()))
return
# if self._toolwindows[key].widget.destroyed():
# logger.error('Error while constructing dialog ' + gladefile)
# del self._toolwindows[key]
logger.debug('Successful construction of dialog ' + gladefile)
assert key not in self._toolwindow_connections
logger.debug('Connecting signals for dialog ' + gladefile)
try:
self._toolwindow_connections[key] = [
self._toolwindows[key].connect('destroy', self.on_toolwindow_destroyed, key)]
for signal in connections:
self._toolwindow_connections[key].append(
self._toolwindows[key].connect(signal, getattr(self, connections[signal])))
except Exception as exc:
logger.error('Error connecting signals to dialog ' + gladefile)
try:
for c in self._toolwindow_connections[key]:
self._toolwindows[key].disconnect(c)
self._toolwindows[key].destroy()
raise
finally:
del self._toolwindow_connections[key]
del self._toolwindows[key]
logger.debug('Dialog should be up and running: ' + gladefile)
logger.debug('Presenting dialog ' + gladefile)
return self._toolwindows[key].widget.present()
def on_toolwindow_destroyed(self, toolwindow: ToolWindow, key):
logger.debug('Dialog destroyed: ' + toolwindow.gladefile)
assert key in self._toolwindow_connections
for c in self._toolwindow_connections[key]:
toolwindow.disconnect(c)
del self._toolwindow_connections[key]
del self._toolwindows[key]
logger.debug('Mainwindow keeps no reference for dialog ' + toolwindow.gladefile)
def on_quit(self):
if self.instrument.is_busy():
if not question_message(self.widget, 'Confirm quit', 'The instrument is busy. Do you still want to quit?'):
return True
logger.info('Shutdown requested.')
self.instrument.save_state()
self.instrument.shutdown()
return True
def on_instrument_shutdown(self, instrument):
logger.info('Instrument shutdown finished.')
for c in self._instrumentconnections:
instrument.disconnect(c)
self._instrumentconnections = []
logging.root.removeHandler(self._loghandler)
self.widget.destroy()
Gtk.Application.get_default().quit()
def on_menu(self, menuitem: Gtk.MenuItem):
name = menuitem.get_name()
if not (name.startswith('menuitem') or name.startswith('toolitem')):
raise ValueError('Invalid menu item name: {}'.format(name))
name = name.split('_', 1)[1]
if name == 'quit':
return self.on_quit()
elif name == 'savesettings':
self.instrument.save_state()
elif name == 'about':
builder = Gtk.Builder.new_from_file(
pkg_resources.resource_filename('cct', 'resource/glade/help_about.glade'))
ad = builder.get_object('aboutdialog')
ad.set_version(pkg_resources.get_distribution('cct').version)
ad.set_logo(GdkPixbuf.Pixbuf.new_from_file_at_size(
pkg_resources.resource_filename('cct', 'resource/icons/scalable/cctlogo.svg'), 256, 256))
ad.run()
ad.destroy()
del ad
else:
for nm, cls, toplevelname, gladefile, connections in self.toolwindow_registry:
if nm != name:
continue
self.construct_and_run_dialog(cls, toplevelname, gladefile, menuitem.get_label().replace('_', ''),
connections)
return False
raise ValueError(name)
def on_insert_command(self, commandhelpdialog: CommandHelpDialog, command: str):
self.builder.get_object('command_entry').set_text(command)
def on_toolbar(self, toolbutton):
return self.on_menu(toolbutton)
def set_menu_sensitivity(self):
for nm, cls, toplevelname, gladefile, connections in self.toolwindow_registry:
requirementsmet = cls.requirements_met(self.instrument)
for what in ['menuitem', 'toolitem']:
try:
self.builder.get_object(what + '_' + nm).set_sensitive(requirementsmet)
except AttributeError:
pass
| 1.882813 | 2 |
client/src/client/motion.py | andrhahn/pi-spy | 1 | 12766522 | <reponame>andrhahn/pi-spy
import io
import logging
import os
import threading
import uuid
import picamera
from PIL import Image
from PIL import ImageChops
from PIL import ImageDraw
from PIL import ImageOps
import config_service
import s3_service
log_level = config_service.get_config("log_level")
logging.basicConfig(level=getattr(logging, log_level))
images_path = config_service.get_config("images_path")
videos_path = config_service.get_config("videos_path")
logs_path = config_service.get_config("logs_path")
prior_image = None
captured_image = None
captured_image_file_names = []
rect_coords = None
def create_dirs():
create_dir(images_path)
create_dir(videos_path)
create_dir(logs_path)
def create_dir(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def save_image(file_name):
captured_image.save(images_path + '/' + file_name)
def process_recording(captured_image_file_names, video_guid):
s3_host_name = 'http://s3.amazonaws.com'
s3_bucket_name = config_service.get_config('s3_bucket_name')
captured_image_urls = []
for captured_image_file_name in captured_image_file_names:
key = 'images/' + captured_image_file_name
# upload image to s3
s3_service.upload_file(s3_bucket_name, open(images_path + '/' + captured_image_file_name, 'rb'), key,
'image/jpeg')
captured_image_url = s3_host_name + '/' + s3_bucket_name + '/' + key
captured_image_urls.append(captured_image_url)
# upload videos to s3
captured_video_before_file_name = 'before_' + video_guid + '.h264'
captured_video_after_file_name = 'after_' + video_guid + '.h264'
captured_before_video_key = 'videos/' + captured_video_before_file_name
captured_after_video_key = 'videos/' + captured_video_after_file_name
s3_service.upload_file(s3_bucket_name, open(videos_path + '/' + captured_video_before_file_name, 'rb'),
captured_before_video_key, 'video/h264')
s3_service.upload_file(s3_bucket_name, open(videos_path + '/' + captured_video_after_file_name, 'rb'),
captured_after_video_key, 'video/h264')
captured_video_before_url = s3_host_name + '/' + s3_bucket_name + '/' + captured_before_video_key
captured_video_after_url = s3_host_name + '/' + s3_bucket_name + '/' + captured_after_video_key
# send ses email
body = 'Screenshots:<br>'
for captured_image_url in captured_image_urls:
body += '<img src="' + captured_image_url + '"/><br>'
body += '<br>'
body += '<a href="' + captured_video_before_url + '">' + captured_video_before_url + '</a><br><br>'
body += '<a href="' + captured_video_after_url + '">' + captured_video_after_url + '</a><br><br><br>'
body += '<a href="https://github.com/andrhahn/pi-spy">Captured with pi-spy</a><br>'
to_emails = ['<EMAIL>']
s3_service.send_email('pispy motion detected', body, to_emails)
logging.info('Image and Video processing complete.')
logging.debug('SHOULDNT SEE ME!!')
def detect_motion(camera):
global prior_image
global captured_image
global captured_image_file_names
global rect_coords
stream = io.BytesIO()
camera.capture(stream, format='jpeg', use_video_port=True)
stream.seek(0)
if prior_image is None:
prior_image = Image.open(stream)
return False
else:
current_image = Image.open(stream)
diff_image = ImageOps.posterize(ImageOps.grayscale(ImageChops.difference(prior_image, current_image)), 1)
rect_coords = diff_image.getbbox()
if rect_coords != None:
captured_image = current_image.copy()
# draw box around the image
ImageDraw.Draw(captured_image).rectangle(rect_coords, outline="yellow", fill=None)
image_guid = str(uuid.uuid4())
captured_image_file_name = image_guid + '.jpg'
# save file to file system
save_image_thread = threading.Thread(target=save_image, args=(captured_image_file_name,))
save_image_thread.start()
if len(captured_image_file_names) < 5:
captured_image_file_names.append(captured_image_file_name)
prior_image = current_image
return True
else:
return False
def write_video(stream, video_guid):
with io.open(videos_path + '/before_' + video_guid + '.h264', 'wb') as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
stream.seek(0)
stream.truncate()
with picamera.PiCamera() as camera:
create_dirs()
print 'Started pi-cam'
camera.resolution = (1280, 720)
camera.vflip = True
camera.hflip = True
# creates a "before" stream which only holds 10 total seconds of data
stream = picamera.PiCameraCircularIO(camera, seconds=10)
# start recording to "before" stream
camera.start_recording(stream, format='h264')
try:
while True:
print 'Polling for motion...'
camera.wait_recording(1)
if detect_motion(camera):
print 'Recording motion - started'
captured_image_file_names = []
video_guid = str(uuid.uuid4())
# once motion is detected, start recording "after" video data directly to disk
# this recording of "after" video will continue to record to disk until motion is no longer detected
camera.split_recording(videos_path + '/after_' + video_guid + '.h264')
# write "before" stream data (the last 10 seconds) to disk
write_video(stream, video_guid)
# keep recording to "after" stream until motion stops
while detect_motion(camera):
camera.wait_recording(1)
print 'Recording motion - completed'
# once motion is done, start recording to "before" stream again
camera.split_recording(stream)
# process images in a separate thread
process_images_thread = threading.Thread(target=process_recording,
args=(list(captured_image_file_names), video_guid,))
process_images_thread.start()
print 'About to sleep camera for 10 seconds...'
camera.wait_recording(10)
finally:
camera.stop_recording()
| 1.921875 | 2 |
get_proxy.py | pighui/myproxy | 6 | 12766523 | #! /usr/bin/env python
# -*-coding:UTF-8-*-
# __author__ : pighui
# __time__ : 2019-11-2 上午11:51
import requests
def get_proxies(params: dict = {}):
'''
:param params: 参数字典 默认为空
:return: 返回一个包含多条代理信息的列表,列表的每一个元素是一个字典
'''
try:
response = requests.get('http://127.0.0.1:8888/ip/', params=params)
if response.status_code == 200:
result = response.json()
return [{d['protocol']: 'http://' + d['ip'] + ':' + d['port']} for d in result]
except ConnectionError:
return None
def random_proxy():
'''
:return: 返回一个代理信息字典
'''
try:
response = requests.get('http://127.0.0.1:8888/ip/random/')
if response.status_code == 200:
result = response.json()
return result
except ConnectionError:
return None
if __name__ == '__main__':
# 获取一条代理
ip1 = get_proxies()
print(ip1)
# 获取多条代理
ip2 = get_proxies({'count': 3})
print(ip2)
# 获取匿名代理
ip3 = get_proxies({'anonymity': 1})
print(ip3)
# 获取https代理
ip4 = get_proxies({'protocol': 'https'})
print(ip4)
# 获取多条匿名的https代理
ip5 = get_proxies({'count': 3, 'anonymity': 1, 'protocol': 'https'})
print(ip5)
# 随机获取一条代理
ip6 = random_proxy()
print(ip6)
| 3.09375 | 3 |
autocnet/camera/camera.py | readthedocs-assistant/autocnet | 17 | 12766524 | import numpy as np
from autocnet.camera.utils import crossform
from cv2 import triangulatePoints
def compute_epipoles(f):
"""
Compute the epipole and epipolar prime
Parameters
----------
f : ndarray
(3,3) fundamental matrix or autocnet Fundamental Matrix object
Returns
-------
e : ndarray
(3,1) epipole
e1 : ndarray
(3,3) epipolar prime matrix
"""
u, _, _ = np.linalg.svd(f)
e = u[:, -1]
e1 = crossform(e)
return e, e1
def idealized_camera():
"""
Create an idealized camera transformation matrix
Returns
-------
: ndarray
(3,4) with diagonal 1
"""
i = np.eye(3, 4)
i[:,-1] = 0
return i
def camera_from_f(F):
"""
Estimate a camera matrix using a fundamental matrix.
Parameters
----------
f : ndarray
(3,3) fundamental matrix or autocnet Fundamental Matrix object
Returns
-------
p1 : ndarray
Estimated camera matrix
"""
e, e1 = compute_epipoles(F)
p1 = np.empty((3, 4))
p1[:, :3] = -e1.dot(F)
p1[:, 3] = e
return p1
def triangulate(pt, pt1, p, p1):
"""
Given two sets of homogeneous coordinates and two camera matrices,
triangulate the 3D coordinates. The image correspondences are
assumed to be implicitly ordered.
References
----------
[Hartley2003]_
Parameters
----------
pt : ndarray
(n, 3) array of homogeneous correspondences
pt1 : ndarray
(n, 3) array of homogeneous correspondences
p : ndarray
(3, 4) camera matrix
p1 : ndarray
(3, 4) camera matrix
Returns
-------
coords : ndarray
(4, n) projection matrix
"""
pt = np.asarray(pt)
pt1 = np.asarray(pt1)
# Transpose for the openCV call if needed
if pt.shape[0] != 3:
pt = pt.T
if pt1.shape[0] != 3:
pt1 = pt1.T
X = triangulatePoints(p, p1, pt[:2], pt1[:2])
X /= X[3] # Homogenize
return X
def projection_error(p1, p, pt, pt1):
"""
Based on Hartley and Zisserman p.285 this function triangulates
image correspondences and computes the reprojection error
by back-projecting the points into the image.
This is the classic cost function (minimization problem) into
the gold standard method for fundamental matrix estimation.
Parameters
-----------
p1 : ndarray
(3,4) camera matrix
p : ndarray
(3,4) idealized camera matrix in the form np.eye(3,4)
pt : dataframe or ndarray
of homogeneous coordinates in the form (x_{i}, y_{i}, 1)
pt1 : dataframe or ndarray
of homogeneous coordinates in the form (x_{i}, y_{i}, 1)
Returns
-------
reproj_error : ndarray
(n, 1) vector of reprojection errors
"""
# SciPy least squares solver needs a vector, so reshape back to a 3x4 c
# camera matrix at each iteration
if p1.shape != (3,4):
p1 = p1.reshape(3,4)
# Triangulate the correspondences
xhat = triangulate(pt, pt1, p, p1)
xhat1 = xhat[:3] / xhat[2]
xhat2 = p1.dot(xhat)
xhat2 /= xhat2[2]
# Compute error
cost = (pt - xhat1)**2 + (pt1 - xhat2)**2
cost = np.sqrt(np.sum(cost, axis=0))
return cost
| 2.921875 | 3 |
web/controllers/__init__.py | DXDSpirits/tatinspiration | 0 | 12766525 | <reponame>DXDSpirits/tatinspiration
# -*- coding: utf-8 -*-
import frontend
import api
import search_api
| 0.933594 | 1 |
tests/test_decorator.py | spulec/django-file-keeper | 1 | 12766526 | from mock import mock_open, patch
import sure
from keeper.core import get_bucket, use_file
@patch("keeper.core.get_bucket")
def test_basic_decorator(get_bucket):
class FakeKey(object):
def __init__(self, keyname):
self.keyname = keyname
def get_contents_as_string(self):
return "test1\ntest2"
fake_key = FakeKey('other.csv')
class FakeBucket(object):
vals = {'foobar.csv': fake_key}
def get_key(self, keyname):
return self.vals.get(keyname)
fake_bucket = FakeBucket()
get_bucket.return_value = fake_bucket
@use_file('foobar.csv')
def a_handle(keeper_file, *args, **options):
lines = []
for line in keeper_file:
lines.append(line)
return lines
a_handle.when.called_with().should.return_value(["test1", "test2"])
@patch("keeper.core.get_bucket")
def test_result_csv(get_bucket):
class FakeKey(object):
def __init__(self, keyname):
self.keyname = keyname
def get_contents_as_string(self):
return "test1,test2\nblue,green\n"
fake_key = FakeKey('other.csv')
class FakeBucket(object):
vals = {'foobar.csv': fake_key}
def get_key(self, keyname):
return self.vals.get(keyname)
fake_bucket = FakeBucket()
get_bucket.return_value = fake_bucket
@use_file('foobar.csv')
def a_handle(keeper_file, *args, **options):
lines = []
for line in keeper_file.csv:
lines.append(tuple(line))
return lines
a_handle.when.called_with().should.return_value([("test1", "test2"), ("blue", "green")])
@patch("keeper.core.get_bucket")
def test_result_json(get_bucket):
class FakeKey(object):
def __init__(self, keyname):
self.keyname = keyname
def get_contents_as_string(self):
return '{"test1": "test2",\n"blue":{\n"a":"green"}}'
fake_key = FakeKey('other.json')
class FakeBucket(object):
vals = {'foobar.json': fake_key}
def get_key(self, keyname):
return self.vals.get(keyname)
fake_bucket = FakeBucket()
get_bucket.return_value = fake_bucket
@use_file('foobar.json')
def a_handle(keeper_file, *args, **options):
return keeper_file.json
a_handle.when.called_with().should.return_value({
'test1': 'test2',
'blue': {
'a': 'green'
}
})
@patch("keeper.core.get_bucket")
def test_key_doesnt_exist(get_bucket):
get_bucket.return_value.get_key.return_value = None
@use_file('foobar.csv')
def a_handle(keeper_file, *args, **options):
pass
a_handle.when.called_with().should.throw(IOError,
'The file foobar.csv cannot be found on S3.')
@patch("keeper.core.settings")
@patch("keeper.core.boto.connect_s3")
def test_get_bucket(connect_s3, settings):
connect_s3.return_value.get_bucket.return_value = "foobar"
get_bucket.when.called_with().should.return_value("foobar")
@patch('keeper.core.open', mock_open(read_data='local contents\nand some more'), create=True)
@patch("keeper.core.get_bucket")
def test_local_result(get_bucket):
get_bucket.return_value.get_key.return_value = None
@use_file('foobar.csv')
def a_handle(keeper_file, *args, **options):
lines = []
for line in keeper_file:
lines.append(line)
return lines
a_handle.when.called_with(local=True).should.return_value(['local contents', 'and some more'])
| 2.640625 | 3 |
erpnext_telegram_integration/extra_notifications/doctype/date_notification/date_notification.py | chiajunshen/shrdc_erpnext_telegram | 60 | 12766527 | <reponame>chiajunshen/shrdc_erpnext_telegram
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import nowdate, add_to_date
from jinja2 import TemplateError
class DateNotification(Document):
def validate(self):
self.validate_condition()
def validate_condition(self):
temp_doc = frappe.new_doc(self.doctype_name)
if self.condition:
try:
frappe.safe_eval(self.condition, None, get_context(temp_doc))
except Exception:
frappe.throw(_("The Condition '{0}' is invalid").format(self.condition))
def get_documents_for_today(self):
'''get list of documents that will be triggered today'''
docs = []
if not self.enable:
return docs
for row_date_field in self.date_fields:
if not int(row_date_field.enable):
continue
diff_days = int(row_date_field.days)
if row_date_field.days_before_or_after=="Days After":
diff_days = -diff_days
reference_date = add_to_date(nowdate(), days=diff_days)
reference_date_start = reference_date + ' 00:00:00.000000'
reference_date_end = reference_date + ' 23:59:59.000000'
if not row_date_field.doctype_name:
continue
doc_list = frappe.get_all(row_date_field.doctype_name,
fields='name',
filters=[
{ row_date_field.fieldname: ('>=', reference_date_start) },
{ row_date_field.fieldname: ('<=', reference_date_end) },
])
child_doctype_name = ""
for d in doc_list:
if int(row_date_field.is_child_field) == 0:
doc = frappe.get_doc(row_date_field.doctype_name, d.name)
date_value = str(getattr(doc,row_date_field.fieldname))
else :
child_doc = frappe.get_doc(row_date_field.doctype_name, d.name)
date_value = str(getattr(child_doc,row_date_field.fieldname))
child_doctype_name = str(child_doc.doctype)
doc = frappe.get_doc(child_doc.parenttype, child_doc.parent)
if self.condition and not frappe.safe_eval(self.condition, None, get_context(doc)):
continue
doc.child_doctype_name = child_doctype_name
doc.date_value = date_value
doc.date_notification = {
"label": row_date_field.label,
"fieldname": row_date_field.fieldname,
"days_before_or_after": row_date_field.days_before_or_after,
"days" : row_date_field.days
}
docs.append(doc)
return docs
def creat_extra_notification_log(self, doc):
child_doctype_name = _(doc.child_doctype_name)+ " "
date_value = doc.date_value
enl_doc = frappe.new_doc('Extra Notification Log')
enl_doc.subject = _(doc.doctype) +" "+_(doc.name) +" "+ child_doctype_name + _(doc.date_notification["label"]) +" "+ date_value
enl_doc.doctype_name = doc.doctype
enl_doc.doc_name = doc.name
enl_doc.status = "Open"
enl_doc.type = "Date"
enl_doc.doc_name = doc.name
enl_doc.message = child_doctype_name + _(doc.date_notification["label"]) + " " + date_value
enl_doc.insert(ignore_permissions=True)
def get_context(doc):
return {"doc": doc, "nowdate": nowdate, "frappe.utils": frappe.utils}
@frappe.whitelist()
def get_date_fields(doctype_name):
fields = frappe.get_meta(doctype_name).fields
filed_list = []
for d in fields:
if d.fieldtype == "Date" or d.fieldtype == "Datetime":
field = {
"label":d.label,
"fieldname": d.fieldname,
"fieldtype" : d.fieldtype,
"doctype_name": doctype_name,
}
filed_list.append(field)
if d.fieldtype == "Table":
child_fields = frappe.get_meta(d.options).fields
for c in child_fields:
if c.fieldtype == "Date" or c.fieldtype == "Datetime":
field = {
"label":c.label,
"fieldname": c.fieldname,
"fieldtype" : c.fieldtype,
"is_child_field": 1,
"doctype_name": d.options,
# "child_doctype_name": c.name,
"c":c,
}
filed_list.append(field)
return filed_list
@frappe.whitelist()
def get_documents_for_today(notification):
notification = frappe.get_doc('Date Notification', notification)
notification.check_permission('read')
return [d.name for d in notification.get_documents_for_today()]
@frappe.whitelist()
def trigger_daily_alerts():
if frappe.flags.in_import or frappe.flags.in_patch:
# don't send notifications while syncing or patching
return
doc_list = frappe.get_all('Date Notification',
filters={
'enable': 1
})
for d in doc_list:
alert = frappe.get_doc('Date Notification', d.name)
for doc_obj in alert.get_documents_for_today():
evaluate_alert(doc_obj, alert)
frappe.db.commit()
def evaluate_alert(doc, alert):
try:
context = get_context(doc)
if alert.condition:
if not frappe.safe_eval(alert.condition, None, context):
return
alert.creat_extra_notification_log(doc)
except TemplateError:
frappe.throw(_("Error while evaluating Notification {0}. Please fix your template.").format(alert))
except Exception as e:
error_log = frappe.log_error(message=frappe.get_traceback(), title=str(e))
frappe.throw(_("Error in Notification: {}".format(
frappe.utils.get_link_to_form('Error Log', error_log.name)))) | 1.929688 | 2 |
2 - data2graph/evaluateData/readData.py | Tocha4/HSM-Solubility | 0 | 12766528 | <filename>2 - data2graph/evaluateData/readData.py
import Anton as aen
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import linregress as lin
def gcfunction(files, temp): # liefert nur die files mit einer bestimmten Temperatur
gc = []
for i in files:
fname,name = os.path.split(i)
gcn = name.split('_')[-1:]
gcn = gcn[0][:-4]
gcn = float(gcn)
if gcn == temp:
gc.append(i)
return gc
def entry():
evaluatin = input('>>> ')
evi = evaluatin.split(';')
times = [(i) for i in evi]
times = [i.split(',') for i in times]
times = [[float(j) for j in i] for i in times]
return times
def seerawdata(files,temp):
forty = gcfunction(files,temp)
j = 1
for i in forty:
_,title = os.path.split(i)
data = np.load(i)
plt.subplot(4,3,j)
plt.plot(data[0,:],data[1,:], '-ob')
plt.grid()
plt.title(title)
j += 1
figure = plt.gcf()
figure.set_size_inches(20,17)
plt.savefig(('%s.png'% title), dpi=200)
plt.close(figure)
return forty
def createSlopes(tempfiles, times):
slopes = np.array([])
j = 0
for i in tempfiles:
if times[j][0] != times[j][1]:
path,title = os.path.split(i)
data = np.load(i)
start = np.where(data[0] >= times[j][0])[0][0]
ende = np.where(data[0] <= times[j][1])[0][-1]
x = data[0,start:ende]
# print(np.max(data[1,start]))
y = data[1,start:ende]/data[1,start]
slope, intercept, r_value, p_value, slope_std_error = lin(x, y)
print(slope)
fx = intercept+ slope*x
lab = ('%s %s - %s'% (title, str(data[0][start]),str(data[0][ende])))
plt.plot(x,fx, label=lab)
plt.legend()
j += 1
slopes = np.append(slopes,slope)
else: j += 1
name = os.path.split(tempfiles[0])[1].split('_')
temp = ( '%.1f' % float(name[-1].split('.npy')[0]))
name = ('%s_%s_%s_%s_Celsius' %(name[1],name[2],name[3],temp))
file = os.path.join(os.path.split(os.path.split(tempfiles[0])[0])[0],name)
plt.title('%s'% times)
plt.show()
np.save(file, slopes)
return slopes
#%%
path = r'Z:\2_Projekt__Permeabilitätsbeeinflussung\02_Löslichkeitsuntersuchungen\HS Microscope\Experiments\Final_results\data\1 - RIMR+KE60'
files = aen.searchfiles(path, '.npy')
files.sort()
#data = np.load(files[0])
temp = 42.5
tempfiles = seerawdata(files, temp)
times = entry()
slopes = createSlopes(tempfiles,times)
| 2.75 | 3 |
src/ptpip/data_object/storage_info.py | DethCount/ptpip-d5300 | 0 | 12766529 | <reponame>DethCount/ptpip-d5300
from ptpip.constants.storage.access_capability import AccessCapability
from ptpip.constants.storage.type import StorageType
from ptpip.constants.storage.filesystem_type import FilesystemType
from ptpip.packet.stream_reader import StreamReader
class StorageInfo():
def __init__(self, packet, data):
super(StorageInfo, self).__init__()
self.packet = packet
if data is not None:
reader = StreamReader(data)
self.typeId = reader.readUint16()
self.type = StorageType(self.typeId) \
if self.typeId in StorageType._value2member_map_ \
else None
self.filesystemTypeId = reader.readUint16()
self.filesystemType = FilesystemType(self.filesystemTypeId) \
if self.filesystemTypeId in FilesystemType._value2member_map_ \
else None
self.accessCapabilityId = reader.readUint16()
self.accessCapability = AccessCapability(self.accessCapabilityId) \
if self.accessCapabilityId in AccessCapability._value2member_map_ \
else None
self.maxCapacity = reader.readUint64()
self.freeSpaceInBytes = reader.readUint64()
self.freeSpaceInImages = reader.readUint32()
self.description = reader.readUint8()
self.volumeLabel = reader.readString()
def sizeof(num, suffix = "B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return f"{num:3.1f}{unit}{suffix}"
num /= 1024.0
return f"{num:.1f}Yi{suffix}"
def __str__(self):
return 'StorageInfo: ' + "\n" \
+ "\t" + 'typeId: ' + str(self.typeId) + "\n" \
+ "\t" + 'type: ' + (
self.type.name \
if self.type is not None \
else ''
) + "\n" \
+ "\t" + 'filesystemTypeId: ' + str(self.filesystemTypeId) + "\n" \
+ "\t" + 'filesystemType: ' + (
self.filesystemType.name \
if self.filesystemType is not None \
else ''
) + "\n" \
+ "\t" + 'accessCapabilityId: ' + str(self.accessCapabilityId) + "\n" \
+ "\t" + 'accessCapability: ' + (
self.accessCapability.name \
if self.accessCapability is not None \
else ''
) + "\n" \
+ "\t" + 'maxCapacity: ' + StorageInfo.sizeof(self.maxCapacity) + "\n" \
+ "\t" + 'freeSpaceInBytes: ' + StorageInfo.sizeof(self.freeSpaceInBytes) + "\n" \
+ "\t" + 'freeSpaceInImages: ' + str(self.freeSpaceInImages) + "\n" \
+ "\t" + 'description: ' + str(self.description) + "\n" \
+ "\t" + 'volumeLabel: ' + str(self.volumeLabel) + "\n"
| 2.296875 | 2 |
grindone.py | racumen/python-binance | 0 | 12766530 | <filename>grindone.py
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 6 22:49:00 2021
@author: f.romano
"""
# from datetime import datetime
from binance.client import Client
import pandas as pd
import statistics
import matplotlib.pyplot as plt
import sklearn
from joblib import dump,load
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import grindfunc
variables_definition=[
{"name":"minutes","n_samples":1,"n_values":60},
{"name":"15minutes","n_samples":15,"n_values":28},
{"name":"hours","n_samples":60,"n_values":0},
{"name":"6hours","n_samples":360,"n_values":0},
]
target_definition={"samples_from":5, "samples_to":120, "n_samples":10}
n_samples=0
n_values=0
for v in variables_definition:
v["offset_values"]=n_values
v["offset_samples"]=n_samples
n_samples=n_samples+v["n_samples"]*v["n_values"]
n_values=n_values+v["n_values"]
values=list(range(n_values))
df=grindfunc.import_samples("Binance_BTCUSDT_minute3.csv","2021-01-01")
h_samples=df.values.tolist()
print("Importati ",len(h_samples),"valori da",h_samples[-1],"a",h_samples[0])
print("Variabili",n_values)
print("Giorni",grindfunc.twodec(n_samples/60/24))
print("Vampioni",n_samples)
inpt=1
#inpt=input("Prepare to train model? y/n")
if 1 or inpt =="y":
datefrom="2021-01-01"
dateto="2021-03-01"
train_X, train_y = grindfunc.preparetrain(df,datefrom,dateto,variables_definition,target_definition)
np.savetxt("train_y4.csv", train_y, delimiter=",")
np.savetxt("train_X4.csv", train_X, delimiter=",")
#inpt=input("Train model? y/n")
if 1 or inpt =="y":
# Instantiate model with 1000 decision trees
rf = RandomForestRegressor(n_estimators = 50, random_state = 42)
# Train the model on training data
rf.fit(train_X, train_y);
dump(rf,"rf4.joblib")
| 2.234375 | 2 |
das/migrations/0005_auto_20180415_1947.py | knowwinter/monkey | 0 | 12766531 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-15 11:47
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('das', '0004_auto_20180415_1926'),
]
operations = [
migrations.AlterField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='children', to='das.Category'),
),
migrations.AlterField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='children', to='das.Comment'),
),
]
| 1.492188 | 1 |
openstates/metadata/data/sc.py | washabstract/openstates-core | 9 | 12766532 | from ..models import State, Chamber, simple_numbered_districts
SC = State(
name="South Carolina",
abbr="SC",
capital="Columbia",
capital_tz="America/New_York",
fips="45",
unicameral=False,
legislature_name="South Carolina Legislature",
legislature_organization_id="ocd-organization/57920b9f-163e-4cc3-9642-e616579b47ee",
executive_name="Office of the Governor",
executive_organization_id="ocd-organization/852b2773-ace9-5d16-b69b-ecc11f74b548",
division_id="ocd-division/country:us/state:sc",
jurisdiction_id="ocd-jurisdiction/country:us/state:sc/government",
url="http://www.scstatehouse.gov/",
lower=Chamber(
chamber_type="lower",
name="House",
organization_id="ocd-organization/11144d16-6b61-4e2a-94fe-6f9c24e36193",
num_seats=124,
title="Representative",
districts=simple_numbered_districts(
"ocd-division/country:us/state:sc", "lower", 124
),
),
upper=Chamber(
chamber_type="upper",
name="Senate",
organization_id="ocd-organization/705ec497-4b18-4a39-a0e0-ce114a2f3c78",
num_seats=46,
title="Senator",
districts=simple_numbered_districts(
"ocd-division/country:us/state:sc", "upper", 46
),
),
)
| 1.84375 | 2 |
data/quantum/ts/g-bQOOHIsom/TS.py | goldmanm/butanol_paper_data | 0 | 12766533 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
spinMultiplicity = 2
energy = {
'CCSD(T)-F12/cc-pVTZ-F12': MolproLog('TSN09_f12.out'),
}
frequencies = GaussianLog('ts3-freq.log')
rotors = [HinderedRotor(scanLog=ScanLog('scan_0.log'), pivots=[4,10], top=[10,11,12,13,14], symmetry=1, fit='best'),
HinderedRotor(scanLog=ScanLog('scan_1.log'), pivots=[10,13], top=[13,14], symmetry=1, fit='best'),
HinderedRotor(scanLog=ScanLog('scan_2.log'), pivots=[4,6], top=[6,7,8,9], symmetry=3, fit='best'),]
| 1.867188 | 2 |
fancy/config/base_config.py | susautw/fancy-config | 1 | 12766534 | <filename>fancy/config/base_config.py
import inspect
from abc import ABC
from typing import TYPE_CHECKING, Dict, List, Optional
from . import ConfigStructure, ConfigContext, exc
from . import Option
if TYPE_CHECKING:
from ..config import BaseConfigLoader
class BaseConfig(ConfigStructure, ConfigContext, ABC): # TODO more accurate error msg
_name_mapping: Dict[str, str]
_all_options: Dict[str, Option]
_all_required_options: List[Option]
_loader: Optional['BaseConfigLoader'] = None
def __init__(self, loader: Optional['BaseConfigLoader'] = None):
if loader is not None:
self.load(loader)
def load(self, loader: 'BaseConfigLoader') -> None:
self._loader = loader
loader.load(self)
for option in self.get_all_required_options():
if not hasattr(self, option.__name__):
raise ValueError(f'the missing option {option.name} is required.')
self.post_load()
def load_by_context(self, context: ConfigContext, val):
self.load(context.get_loader().get_sub_loader(val))
def __getitem__(self, item):
if isinstance(item, str):
raise TypeError(f'index must be str, not {type(item)}')
try:
return self.__getattribute__(self.get_name_mapping()[item])
except AttributeError:
raise KeyError(f'not contains the config named {item}')
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError(f'index must be str, not {type(key)}')
if key not in self.get_name_mapping().keys():
raise KeyError(f'not contains the config named {key}, value: {repr(value)}')
key = self.get_name_mapping()[key]
self.__setattr__(key, value)
def get_loader(self) -> 'BaseConfigLoader':
if self._loader is None:
raise exc.ContextNotLoadedError(self)
return self._loader
@property
def loaded(self) -> bool:
return self._loader is not None
def post_load(self):
pass
@classmethod
def get_all_options(cls) -> Dict[str, Option]:
if "_all_options" not in vars(cls):
cls._all_options = {name: option for name, option in inspect.getmembers(cls) if isinstance(option, Option)}
return cls._all_options
@classmethod
def get_all_required_options(cls) -> List[Option]:
if "_all_required_options" not in vars(cls):
cls._all_required_options = [option for option in cls.get_all_options().values() if option.required]
return cls._all_required_options
@classmethod
def get_name_mapping(cls) -> Dict[str, str]:
if "_name_mapping" not in vars(cls):
cls._name_mapping = {option.name: attr_name for attr_name, option in cls.get_all_options().items()}
return cls._name_mapping
def to_dict(self) -> dict:
return {
option.name: getattr(self, option.__name__)
for option in self.get_all_options().values()
if option.is_assigned(self)
}
def __repr__(self):
return str(self.to_dict())
def __str__(self):
return self.__repr__()
| 2.421875 | 2 |
tests/test_pipelines.py | bow/pytest-pipeline | 20 | 12766535 | # -*- coding: utf-8 -*-
"""
plugin tests
~~~~~~~~~~~~
"""
# (c) 2014-2020 <NAME> <<EMAIL>>
import glob
import os
import sys
import pytest
pytest_plugins = "pytester"
MOCK_PIPELINE = """
#!/usr/bin/env python
if __name__ == "__main__":
import os
import sys
OUT_DIR = "output_dir"
if len(sys.argv) > 1:
sys.exit(1)
sys.stdout.write("stdout stream")
sys.stderr.write("stderr stream")
with open("log.txt", "w") as log:
log.write("not really\\n")
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
with open(os.path.join(OUT_DIR, "results.txt"), "w") as result:
result.write("42\\n")
"""
@pytest.fixture(scope="function")
def mockpipe(request, testdir):
"""Mock pipeline script"""
mp = testdir.makefile("", pipeline=MOCK_PIPELINE)
return mp
TEST_OK = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.make_fixture("class", "{sys.executable} pipeline")
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
"""
def test_pipeline_basic(mockpipe, testdir):
"""Test for basic run"""
test = testdir.makepyfile(TEST_OK)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(failed) == 0
TEST_OK_CLASS_FIXTURE = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.class_fixture("{sys.executable} pipeline")
@pytest.mark.usefixtures("run")
class TestMyPipelineAgain(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
"""
def test_pipeline_class_fixture(mockpipe, testdir):
"""Test for basic run"""
test = testdir.makepyfile(TEST_OK_CLASS_FIXTURE)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(failed) == 0
TEST_REDIRECTION = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.make_fixture(
"class",
cmd="{sys.executable} pipeline",
stdout="stream.out",
stderr="stream.err",
)
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
"""
def test_pipeline_redirection(mockpipe, testdir):
test = testdir.makepyfile(TEST_REDIRECTION)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(failed) == 0
testdir_matches = glob.glob(os.path.join(test.dirname, "MyRun*"))
assert len(testdir_matches) == 1
testdir_pipeline = testdir_matches[0]
stdout = os.path.join(testdir_pipeline, "stream.out")
assert os.path.exists(stdout)
assert open(stdout).read() == "stdout stream"
stderr = os.path.join(testdir_pipeline, "stream.err")
assert os.path.exists(stderr)
assert open(stderr).read() == "stderr stream"
TEST_REDIRECTION_MEM = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.make_fixture(
"class",
cmd="{sys.executable} pipeline",
stdout=True,
stderr=True,
)
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
def test_stdout(self):
assert self.run_fixture.stdout == b"stdout stream"
def test_stderr(self):
assert self.run_fixture.stderr == b"stderr stream"
"""
def test_pipeline_redirection_mem(mockpipe, testdir):
test = testdir.makepyfile(TEST_REDIRECTION_MEM)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 3
assert len(skipped) == 0
assert len(failed) == 0
testdir_matches = glob.glob(os.path.join(test.dirname, "MyRun*"))
assert len(testdir_matches) == 1
TEST_AS_NONCLASS_FIXTURE = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.make_fixture("module", "{sys.executable} pipeline")
def test_exit_code(run):
assert run.exit_code == 0
"""
def test_pipeline_as_nonclass_fixture(mockpipe, testdir):
"""Test for PipelineTest classes without run attribute"""
test = testdir.makepyfile(TEST_AS_NONCLASS_FIXTURE)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 1
assert len(skipped) == 0
assert len(failed) == 0
TEST_OK_GRANULAR = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run(order=2)
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
@mark.before_run(order=1)
def check_init_condition(self):
assert not os.path.exists("pipeline")
run = MyRun.make_fixture("class", cmd="{sys.executable} pipeline")
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
def test_output_file(self):
assert os.path.exists(os.path.join("output_dir", "results.txt"))
"""
def test_pipeline_granular(mockpipe, testdir):
"""Test for execution with 'order' specified in before_run and after_run"""
test = testdir.makepyfile(TEST_OK_GRANULAR)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 2
assert len(skipped) == 0
assert len(failed) == 0
MOCK_PIPELINE_TIMEOUT = """
#!/usr/bin/env python
if __name__ == "__main__":
import time
time.sleep(10)
"""
TEST_TIMEOUT = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def test_and_prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = PipelineRun.make_fixture(
"class",
cmd="{sys.executable} pipeline",
timeout=0.01,
)
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code != 0
"""
@pytest.fixture(scope="function")
def mockpipe_timeout(request, testdir):
"""Mock pipeline script with timeout"""
mp = testdir.makefile("", pipeline=MOCK_PIPELINE_TIMEOUT)
return mp
def test_pipeline_timeout(mockpipe_timeout, testdir):
"""Test for execution with timeout"""
test = testdir.makepyfile(TEST_TIMEOUT)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 0
assert len(skipped) == 0
assert len(failed) == 1
MOCK_PIPELINE_FMT = """
#!/usr/bin/env python
import sys
if __name__ == "__main__":
print(sys.argv[1])
"""
TEST_FMT = f"""
import os, shutil, unittest
import pytest
from pytest_pipeline import PipelineRun, mark
class MyRun(PipelineRun):
@mark.before_run
def prep_executable(self):
shutil.copy2("../pipeline", "pipeline")
assert os.path.exists("pipeline")
run = MyRun.make_fixture(
"class",
"{sys.executable} pipeline {{run_dir}}",
stdout=True,
)
@pytest.mark.usefixtures("run")
class TestMyPipeline(unittest.TestCase):
def test_exit_code(self):
assert self.run_fixture.exit_code == 0
def test_stdout(self):
stdout = self.run_fixture.stdout.decode("utf-8").strip()
assert self.run_fixture.run_dir == stdout
"""
@pytest.fixture(scope="function")
def mockpipe_fmt(request, testdir):
"""Mock pipeline script with timeout"""
mp = testdir.makefile("", pipeline=MOCK_PIPELINE_FMT)
return mp
def test_pipeline_fmt(mockpipe_fmt, testdir):
"""Test for run with templated command"""
test = testdir.makepyfile(TEST_FMT)
result = testdir.inline_run(
"-v",
f"--base-pipeline-dir={test.dirname}",
test
)
passed, skipped, failed = result.listoutcomes()
assert len(passed) == 2
assert len(skipped) == 0
assert len(failed) == 0
| 2.21875 | 2 |
Python/Python20/15.py | sapieninja/AdventOfCode | 0 | 12766536 | import aoc_utils
import datetime
si = aoc_utils.read()
starting_nos = list(map(int,si.split(',')))
x = 1
p = [-1 for x in range(30000000)]
previous = 0
for sno in starting_nos:
p[sno] = x
previous = sno
x+=1
first = True
while True:
if first:
nextout = 0
else:
nextout = x-p[previous]-1
if p[nextout] == -1:
first = True
else:
first = False
p[previous] = x-1
previous = nextout
if x == 2020:
print(nextout)
if x == 30000000:
print(nextout)
break
x+= 1
| 2.5 | 2 |
camera_calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/main_fpn.py | tamaslevente/trai | 0 | 12766537 | <gh_stars>0
import torch
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from constants import *
from model_fpn import I2D
import argparse
import time
# from utils.net_utils import adjust_learning_rate
from torch.autograd import Variable
# from dataset.dataloader import DepthDataset
# from dataset.nyuv2_dataset import NYUv2Dataset
from torchvision.utils import save_image
from dataset.nyuv2_dataset import MyCustomDataset
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.utils.data.sampler import Sampler
from collections import Counter
import matplotlib
import cv2
import open3d as o3d
matplotlib.use('Agg')
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class DDDDepthDiff(nn.Module):
def __init__(self):
super(DDDDepthDiff, self).__init__()
def forward(self, fake, real,epoch,show_image):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
eps = 1e-7
# eps = 2
batch_size = real.shape[0]
real1 = real.clone() #real[0].cpu().detach().numpy()
fake1 = fake.clone() #fake[0].cpu().detach().numpy()
###### debug purposes ########
# fake1[real1==0] = 1.0
# a = np.asarray(real1.cpu().detach()*7000)[0]
# # a[a!=0.0] = 10000
# b = np.asarray(fake1.cpu().detach()*7000)[0]
# plt.imshow(np.uint16(b), vmin=0, vmax=7000)
# plt.colorbar()
# plt.savefig(save_dir +'fake1_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
# plt.imshow(np.uint16(a), vmin=0, vmax=7000)
# plt.colorbar()
# plt.savefig(save_dir +'real1_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
# b[b!=0.0] = 10000
# c = np.abs(a-b)
# cv2.imwrite(save_dir+'gttest_'+str(epoch)+'.png',np.uint16(a))
# cv2.imwrite(save_dir+'faketest_'+str(epoch)+'.png',np.uint16(b))
# cv2.imwrite(save_dir+'diff_test_'+str(epoch)+'.png',np.uint16(c))
####################################
# real1[real1==0] = eps
# fake1[fake1==0] = eps
# for calculating the loss on all the images in the batch size (Thanks Szilard for telling me about this!!!)
all_real_pcd = self.point_cloud(real1[0]).clone() * 1000.0
all_fake_pcd = self.point_cloud(fake1[0]).clone() * 1000.0
for nr_img in range(1,batch_size):
real_pcd = self.point_cloud(real1[nr_img]).clone() * 1000.0
fake_pcd = self.point_cloud(fake1[nr_img]).clone() * 1000.0
all_real_pcd = torch.cat(all_real_pcd,real_pcd)
all_fake_pcd = torch.cat(all_fake_pcd,fake_pcd)
all_real_pcd[all_real_pcd==0] = eps
all_fake_pcd[all_fake_pcd==0] = eps
# real_pcd = nan_real_pcd[~torch.isnan(nan_real_pcd)]
# fake_pcd = nan_fake_pcd[~torch.isnan(nan_real_pcd)]
# ### loss 22
# # remove nans from z...
# nan_z_real = real_pcd[:,2].clone()
# z_real = nan_z_real[~torch.isnan(nan_z_real)]
# nan_z_fake = fake_pcd[:,2].clone()
# z_fake = nan_z_fake[~torch.isnan(nan_z_real)]
# # and replace the nans from x and y with 0.0
# x_real = real_pcd[:,0].clone()
# x_real[torch.isnan(x_real)] = 0.0
# x_fake = fake_pcd[:,0].clone()
# x_fake[torch.isnan(x_real)] = 0.0
# y_real = real_pcd[:,1].clone()
# y_real[torch.isnan(y_real)] = 0.0
# y_fake = fake_pcd[:,1].clone()
# y_fake[torch.isnan(y_real)] = 0.0
# lossX = torch.mean(torch.abs(x_real-x_fake))
# lossY = torch.mean(torch.abs(y_real-y_fake))
# lossZ = torch.mean(torch.abs(z_real-z_fake))
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE, lossX, lossY, lossZ]
# loss22 = RMSE * torch.abs(10*(3-torch.exp(1*lossX)+torch.exp(1*lossY)+torch.exp(1*lossZ)))
# ### loss 21
# z_real = real_pcd[:,2].clone()
# z_real[torch.isnan(z_real)] = 10.0
# z_fake = fake_pcd[:,2].clone()
# z_fake[torch.isnan(z_real)] = 10.0
# x_real = real_pcd[:,0].clone()
# x_real[torch.isnan(x_real)] = 10.0
# x_fake = fake_pcd[:,0].clone()
# x_fake[torch.isnan(x_real)] = 10.0
# y_real = real_pcd[:,1].clone()
# y_real[torch.isnan(y_real)] = 10.0
# y_fake = fake_pcd[:,1].clone()
# y_fake[torch.isnan(y_real)] = 10.0
# lossX = torch.mean(torch.abs(x_real-x_fake))
# lossY = torch.mean(torch.abs(y_real-y_fake))
# lossZ = torch.mean(torch.abs(z_real-z_fake))
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE, lossX, lossY, lossZ]
# loss21 = RMSE * torch.abs(1*(3-torch.exp(1*lossX)+torch.exp(1*lossY)+torch.exp(1*lossZ)))
### for the next losses you will need this section
# nan_z_real = real_pcd[:,2].clone()
# z_real = nan_z_real[~torch.isnan(nan_z_real)]
# nan_z_fake = fake_pcd[:,2].clone()
# z_fake = nan_z_fake[~torch.isnan(nan_z_real)]
# nan_x_real = real_pcd[:,0].clone()
# x_real = nan_x_real[~torch.isnan(nan_x_real)]
# nan_x_fake = fake_pcd[:,0].clone()
# x_fake = nan_x_fake[~torch.isnan(nan_x_real)]
# nan_y_real = real_pcd[:,1].clone()
# y_real = nan_y_real[~torch.isnan(nan_y_real)]
# nan_y_fake = fake_pcd[:,1].clone()
# y_fake = nan_y_fake[~torch.isnan(nan_y_real)]
# loss = np.sqrt(np.mean(np.abs(np.log(z_real)-np.log(z_fake))**2))
# dist_real = torch.sqrt(torch.sum(real_pcd**2,dim=1))
# dist_fake = torch.sqrt(torch.sum(fake_pcd**2,dim=1))
# loss2 = torch.sqrt(torch.mean(torch.abs(torch.log(dist_real)-torch.log(dist_fake)) ** 2))
# ### lossXX
# x_real = real_pcd[:,0].clone()
# x_fake = fake_pcd[:,0].clone()
# y_real = real_pcd[:,1].clone()
# y_fake = fake_pcd[:,1].clone()
# lossX = torch.mean(torch.exp(torch.abs(x_real-x_fake)**2))
# lossY = torch.mean(torch.exp(torch.abs(y_real-y_fake)**2))
# lossZ = torch.mean(torch.exp(torch.abs(z_real-z_fake)**2))
# RMSE_term = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE_term, lossX, lossY, lossZ]
# loss18 = RMSE_term*torch.abs(3-lossX+lossY+lossZ)
# ### lossXX
# x_real = real_pcd[:,0].clone()
# x_fake = fake_pcd[:,0].clone()
# y_real = real_pcd[:,1].clone()
# y_fake = fake_pcd[:,1].clone()
# lossX = torch.mean(torch.exp(10*torch.abs(x_real-x_fake)))
# lossY = torch.mean(torch.exp(10*torch.abs(y_real-y_fake)))
# lossZ = torch.mean(torch.exp(10*torch.abs(z_real-z_fake)))
# RMSE_term = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE_term, lossX, lossY, lossZ]
# loss17 = 10*RMSE_term+torch.abs(3-lossX+lossY+lossZ)
# ### loss20
# lossX = torch.mean(torch.exp(torch.abs(x_real-x_fake))**2)
# lossZ = torch.mean(torch.exp(torch.abs(z_real-z_fake))**2)
# lossY = torch.mean(torch.exp(torch.abs(y_real-y_fake))**2)
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE, lossX, lossY, lossZ]
# loss20 = RMSE * torch.abs(10*(3-lossX+lossY+lossZ))
# ### loss19
# lossX = torch.mean(torch.exp(10*torch.abs(x_real-x_fake)))
# lossZ = torch.mean(torch.exp(10*torch.abs(z_real-z_fake)))
# lossY = torch.mean(torch.exp(10*torch.abs(y_real-y_fake)))
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE, lossX, lossY, lossZ]
# loss19 = RMSE * torch.abs(10*(3-lossX+lossY+lossZ))
# ### loss18
# lossX = torch.mean(torch.abs(x_real-x_fake))
# lossY = torch.mean(torch.abs(y_real-y_fake))
# lossZ = torch.mean(torch.abs(z_real-z_fake))
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
# delta = [RMSE, lossX, lossY, lossZ]
# loss18 = RMSE + torch.abs(10*(3-torch.exp(1*lossX)+torch.exp(1*lossY)+torch.exp(1*lossZ)))
#######################
# Take out nan points #
# If this doesn't work replace the values with 2 or something
### loss17
nan_z_real = all_real_pcd[:,2].clone()
temp_z_real = nan_z_real[~torch.isnan(nan_z_real)]
nan_z_fake = all_fake_pcd[:,2].clone()
temp_z_fake = nan_z_fake[~torch.isnan(nan_z_real)]
nan_x_real = all_real_pcd[:,0].clone()
temp_x_real = nan_x_real[~torch.isnan(nan_x_real)]
nan_x_fake = all_fake_pcd[:,0].clone()
temp_x_fake = nan_x_fake[~torch.isnan(nan_x_real)]
nan_y_real = all_real_pcd[:,1].clone()
temp_y_real = nan_y_real[~torch.isnan(nan_y_real)]
nan_y_fake = all_fake_pcd[:,1].clone()
temp_y_fake = nan_y_fake[~torch.isnan(nan_y_real)]
z_real = temp_z_real[~torch.isnan(temp_z_fake)]
z_fake = temp_z_fake[~torch.isnan(temp_z_fake)]
x_real = temp_x_real[~torch.isnan(temp_x_fake)]
x_fake = temp_x_fake[~torch.isnan(temp_x_fake)]
y_real = temp_y_real[~torch.isnan(temp_y_fake)]
y_fake = temp_y_fake[~torch.isnan(temp_y_fake)]
######Original########
# lossX = torch.mean(torch.abs(x_real-x_fake))
# lossZ = torch.mean(torch.abs(z_real-z_fake))
# lossY = torch.mean(torch.abs(y_real-y_fake))
####sixth try #####
lossX = torch.sqrt(torch.mean(torch.abs(x_real-x_fake)**2))
lossZ = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
lossY = torch.sqrt(torch.mean(torch.abs(y_real-y_fake)**2))
#some inversed rmse (or better said for subunit numbers)
# lossX = torch.sqrt(torch.mean(torch.sqrt(torch.abs(x_real-x_fake))))
# lossZ = torch.sqrt(torch.mean(torch.sqrt(torch.abs(z_real-z_fake))))
# lossY = torch.sqrt(torch.mean(torch.sqrt(torch.abs(y_real-y_fake))))
# this is the real inversed rmse
# lossX = torch.square(torch.mean(torch.sqrt(torch.abs(x_real-x_fake))))
# lossZ = torch.square(torch.mean(torch.sqrt(torch.abs(z_real-z_fake))))
# lossY = torch.square(torch.mean(torch.sqrt(torch.abs(y_real-y_fake))))
#######second#############
# lossX = torch.mean(torch.abs(torch.log(torch.abs(x_real))-torch.log(torch.abs(x_fake))))
# lossY = torch.mean(torch.abs(torch.log(torch.abs(y_real))-torch.log(torch.abs(y_fake))))
# lossZ = torch.mean(torch.abs(torch.log(torch.abs(z_real))-torch.log(torch.abs(z_fake))))
##### third ######
# lossX = torch.mean(torch.log(torch.abs(1-torch.abs(x_real-x_fake))))
# lossY = torch.mean(torch.log(torch.abs(1-torch.abs(y_real-y_fake))))
# lossZ = torch.mean(torch.log(torch.abs(1-torch.abs(z_real-z_fake))))
##### fourth ######
# lossX = 15 - torch.abs(torch.log(torch.mean(torch.abs(x_real-x_fake)**2)))
# lossY = 15 - torch.abs(torch.log(torch.mean(torch.abs(y_real-y_fake)**2)))
# lossZ = 15 - torch.abs(torch.log(torch.mean(torch.abs(z_real-z_fake)**2)))
# RMSE = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))
######fifth ################
# lossX = torch.sqrt(torch.mean(torch.abs(torch.log(torch.abs(x_real))-torch.log(torch.abs(x_fake)))**2))
# lossZ = torch.sqrt(torch.mean(torch.abs(torch.log(torch.abs(z_real))-torch.log(torch.abs(z_fake)))**2))
# lossY = torch.sqrt(torch.mean(torch.abs(torch.log(torch.abs(y_real))-torch.log(torch.abs(y_fake)))**2))
RMSE_log = torch.sqrt(torch.mean(torch.abs(torch.log(torch.abs(z_real))-torch.log(torch.abs(z_fake)))**2))
# RMSE_log2 = torch.sqrt(torch.mean(torch.log(torch.abs(z_real-z_fake)**2)))
# RMSE_log = 0
# RMSE_log3 = torch.sqrt(torch.mean(torch.log(1-torch.abs(z_real-z_fake))))
delta = [RMSE_log, lossX, lossY, lossZ]
# loss13 = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2))*torch.abs(10*(torch.exp(1*(lossX-0.01))+torch.exp(1*(lossY-0.01))+torch.exp(1*(lossZ-0.05))))
loss17 = 100*RMSE_log * torch.abs(10*(3-torch.exp(1*lossX)-torch.exp(1*lossY)-torch.exp(1*lossZ)))
# loss18 = 10*(RMSE_log + torch.abs(10*(3-torch.exp(1*lossX)-torch.exp(1*lossY)-torch.exp(1*lossZ))))
# loss19 = 1*(torch.abs(10*(3-torch.exp(1*lossX)-torch.exp(1*lossY)-torch.exp(1*lossZ))))
# loss17p1 = 10*RMSE_log * (lossX+lossY+lossZ)
# loss17p2 = 10*(RMSE_log + lossX + lossY)
# if show_image:
# ### A loss pointcloud?... Probably...
# real_pcd = real_pcd
# fake_pcd = fake_pcd
# z_real = real_pcd[:,2].clone()
# z_real[torch.isnan(z_real)] = 0.0
# z_fake = fake_pcd[:,2].clone()
# z_fake[torch.isnan(z_real)] = 0.0
# x_real = real_pcd[:,0].clone()
# x_real[torch.isnan(x_real)] = 0.0
# x_fake = fake_pcd[:,0].clone()
# x_fake[torch.isnan(x_real)] = 0.0
# y_real = real_pcd[:,1].clone()
# y_real[torch.isnan(y_real)] = 0.0
# y_fake = fake_pcd[:,1].clone()
# y_fake[torch.isnan(y_real)] = 0.0
# ################ - simple difference
# coord_X = torch.abs(x_real-x_fake)
# coord_Y = torch.abs(y_real-y_fake)
# coord_Z = torch.abs(z_real-z_fake)
# max_depth = 7000/1000.0
# o3d_pcd = o3d.geometry.PointCloud()
# loss_pcd = torch.stack((coord_X,coord_Y,coord_Z),dim=1).cpu().detach().numpy()
# o3d_pcd.points = o3d.utility.Vector3dVector(loss_pcd*max_depth)
# o3d.io.write_point_cloud(save_dir+"loss_diff_cloud"+str(epoch)+".pcd", o3d_pcd)
# loss_pcd_img = self.image_from_cloud(real_pcd.cpu().detach().numpy())
# # o3d.io.write_image(save_dir+"loss_diff_cloud"+str(epoch)+".png",loss_pcd_img)
# plt.imshow(loss_pcd_img*max_depth*1000.0, vmin=0, vmax=max_depth*1000.0)
# plt.colorbar()
# plt.savefig(save_dir+"loss_diff_cloud"+str(epoch)+".png",bbox_inches='tight')
# plt.close()
# ################ - difference with exp
# coord_X = torch.exp(torch.abs(x_real-x_fake))
# coord_Y = torch.exp(torch.abs(y_real-y_fake))
# coord_Z = torch.exp(torch.abs(z_real-z_fake))
# max_depth = 7000
# o3d_pcd = o3d.geometry.PointCloud()
# loss_pcd = torch.stack((coord_X,coord_Y,coord_Z),dim=1).cpu().detach().numpy()
# o3d_pcd.points = o3d.utility.Vector3dVector(loss_pcd*max_depth)
# o3d.io.write_point_cloud(save_dir+"loss_exp_diff_cloud"+str(epoch)+".pcd", o3d_pcd)
# ################# - difference with exp and rmse multiplied
# coord_X = torch.exp(torch.abs(x_real-x_fake))
# coord_Y = torch.exp(torch.abs(y_real-y_fake))
# coord_Z = torch.exp(torch.abs(z_real-z_fake))
# rmse_term = torch.sqrt(torch.mean(torch.abs(z_real-z_fake)**2)).cpu().detach().numpy()
# max_depth = 7000
# o3d_pcd = o3d.geometry.PointCloud()
# loss_pcd = torch.stack((coord_X,coord_Y,coord_Z),dim=1).cpu().detach().numpy()
# o3d_pcd.points = o3d.utility.Vector3dVector(rmse_term*loss_pcd*max_depth)
# o3d.io.write_point_cloud(save_dir+"loss_exp_diff_multip_rmse_cloud"+str(epoch)+".pcd", o3d_pcd)
return delta, loss17
def l2_norm(self,v):
norm_v = np.sqrt(np.sum(np.square(v), axis=1))
return norm_v
# def theta(v, w): return arccos(v.dot(w)/(norm(v)*norm(w)))
def point_cloud(self, depth1):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
"""
# depth is of shape (1,480,640)
cx = 334.081
cy = 169.808
fx = 460.585
fy = 460.268
if depth1.shape[0] == 3:
depth = depth1[2].clone()
else:
depth = depth1[0].clone()
# open3d_img = o3d.t.geometry.Image(depth[0])#/1000.0)
# intrinsics = o3d.camera.PinholeCameraIntrinsic(640,360,fx,fy,cx,cy)
# pcd = o3d.geometry.create_point_cloud_from_depth_image(open3d_img,intrinsic=intrinsics)
rows, cols = depth.shape
c, _ = torch.meshgrid(torch.arange(cols), torch.arange(cols))
c = torch.meshgrid(torch.arange(cols))
new_c = c[0].reshape([1,cols]).to('cuda')
r = torch.meshgrid(torch.arange(rows))
new_r = r[0].unsqueeze(-1).to('cuda')
valid = (depth > 0) & (depth < 65535)
nan_number = torch.tensor(np.nan).to('cuda')
# zero_number = torch.tensor(0.).to('cuda')
z = torch.where(valid, depth/1000.0, nan_number) # allways divide with 1000.0
x = torch.where(valid, z * (new_c - cx) / fx, nan_number)
y = torch.where(valid, z * (new_r - cy) / fy, nan_number)
dimension = rows * cols
z_ok = z.reshape(dimension)
x_ok = x.reshape(dimension)
y_ok = y.reshape(dimension)
return torch.stack((x_ok,y_ok,z_ok),dim=1)
def image_from_cloud(self, point_cloud):
cx = 334.081
cy = 169.808
fx = 460.585
fy = 460.268
# point_cloud = point_cloud/1000.0
np_image = np.tile(0,(360,640))
z = point_cloud[:,2] * 1000.0
x = point_cloud[:,0]
y = point_cloud[:,1]
valid = ~(np.isnan(z) | (z==0))
z = np.where(valid, z*1000.0, 0)
# z[np.isnan(z) | (z==0)] = 1e-7
# pos_x = (point_cloud[:,0] * 1000.0 * fx)/ z + cx
valid_x = ~(np.isnan(x) | np.isnan(z) | (z==0))
pos_x = np.where(valid_x,(x * 1000.0 * fx)/ z + cx, 0)
pos_x = pos_x.astype(np.int32)
# pos_y = (point_cloud[:,1] * 1000.0 * fy)/ z + cy
valid_y = ~(np.isnan(y) | np.isnan(z) | (z==0))
pos_y = np.where(valid_y,(y * 1000.0 * fy)/z + cy, 0)
pos_y = pos_y.astype(np.int32)
pos_x[pos_x>639] = 639
pos_x[pos_x<0] = 0
pos_y[pos_y>359] =359
pos_y[pos_y<0] = 0
pos_x = pos_x.reshape(360,640)
pos_y = pos_y.reshape(360,640)
z = z.reshape(360,640)
np_image[pos_y,pos_x] = z
# depth = depth.cpu().detach().numpy()
# rows, cols = depth[0].shape
# c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
# valid = (depth[0] > 0) & (depth[0] < 65535)
# z = np.where(valid, depth[0] / 1000.0, np.nan)
# x = np.where(valid, z * (c - cx) / fx, 0)
# y = np.where(valid, z * (r - cy) / fy, 0)
return np_image
class NormalsDiff(nn.Module):
def __init__(self):
super(NormalsDiff, self).__init__()
def forward(self, fake, real):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
eps = 1e-7
real = real[0].cpu().detach().numpy()
fake = fake[0].cpu().detach().numpy()
real[real==0] = eps
fake[fake==0] = eps
real_pcd = self.point_cloud(real)
fake_pcd = self.point_cloud(fake)
o3d.geometry.estimate_normals(real_pcd,search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.03,max_nn=30))
o3d.geometry.estimate_normals(fake_pcd,search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.03,max_nn=30))
real_normals = np.array(real_pcd.normals)
fake_normals = np.array(fake_pcd.normals)
normal_gt_norm = self.l2_norm(real_normals)
normal_results_norm = self.l2_norm(fake_normals)
normals_results = np.divide(fake_normals, np.tile(np.expand_dims(normal_results_norm, axis=1), [1, 3]))
normals_gt = np.divide(real_normals, np.tile(np.expand_dims(normal_gt_norm, axis=1), [1, 3]))
# Not oriented rms
nn = np.sum(np.multiply(normals_gt, normals_results), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
angle = np.rad2deg(np.arccos(np.abs(nn)))
# inner_product = (fake_normals * real_normals).sum(1)
# fake_norm = fake_normals.pow(2).sum(1).pow(0.5)
# real_norm = real_normals.pow(2).sum(1).pow(0.5)
# cos = inner_product / (2 * fake_norm * real_norm)
# angle = torch.acos(cos)
# eps=1e-7
# # gt2 = gt.clone()
# # pred2 = pred.clone()
# gt2[gt2==0] = eps
# pred2[pred2==0] = eps
# if method == 0:
# o3d.visualization.draw_geometries([real_pcd])
# o3d.io.write_point_cloud("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/training_process_debug/python_cloud_normals003.pcd", real_pcd)
# o3d.geometry.estimate_normals(real_pcd,search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=60,max_nn=30))
# o3d.io.write_point_cloud("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/training_process_debug/python_cloud_normals60.pcd", real_pcd)
# o3d.io.write_point_cloud("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/training_process_debug/python_cloud_pred.pcd", fake_pcd)
# print(real)
loss = np.mean(angle)
return loss
def l2_norm(self,v):
norm_v = np.sqrt(np.sum(np.square(v), axis=1))
return norm_v
# def theta(v, w): return arccos(v.dot(w)/(norm(v)*norm(w)))
def point_cloud(self, depth):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
"""
# depth is of shape (1,480,640)
cx = 334.081
cy = 169.808
fx = 460.585
fy = 460.268
open3d_img = o3d.geometry.Image(depth[0]/1000.0)
intrinsics = o3d.camera.PinholeCameraIntrinsic(640,360,fx,fy,cx,cy)
pcd = o3d.geometry.create_point_cloud_from_depth_image(open3d_img,intrinsic=intrinsics)
# rows, cols = depth[0].shape
# c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
# valid = (depth[0] > 0) & (depth[0] < 65535)
# z = np.where(valid, depth[0] / 1000.0, np.nan)
# x = np.where(valid, z * (c - cx) / fx, 0)
# y = np.where(valid, z * (r - cy) / fy, 0)
return pcd #np.dstack((x, y, z))
class RMSE_log(nn.Module):
def __init__(self):
super(RMSE_log, self).__init__()
def forward(self, fake, real):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
eps=1e-7
real2 = real.clone()
fake2 = fake.clone()
real2[real2==0] = eps
fake2[fake2==0] = eps
loss = torch.sqrt(torch.mean(
torch.abs(torch.log(real2)-torch.log(fake2)) ** 2))
return loss
class L1(nn.Module):
def __init__(self):
super(L1, self).__init__()
def forward(self, fake, real):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
loss = torch.mean(torch.abs(10.*real-10.*fake))
return loss
class L1_log(nn.Module):
def __init__(self):
super(L1_log, self).__init__()
def forward(self, fake, real):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
loss = torch.mean(torch.abs(torch.log(real)-torch.log(fake)))
return loss
class BerHu(nn.Module):
def __init__(self, threshold=0.2):
super(BerHu, self).__init__()
self.threshold = threshold
def forward(real, fake):
mask = real > 0
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
fake = fake * mask
diff = torch.abs(real-fake)
delta = self.threshold * torch.max(diff).data.cpu().numpy()[0]
part1 = -F.threshold(-diff, -delta, 0.)
part2 = F.threshold(diff**2 - delta**2, 0., -delta**2.) + delta**2
part2 = part2 / (2.*delta)
loss = part1 + part2
loss = torch.sum(loss)
return loss
class RMSE(nn.Module):
def __init__(self):
super(RMSE, self).__init__()
def forward(self, fake, real):
if not fake.shape == real.shape:
_, _, H, W = real.shape
fake = F.interpolate(fake, size=(H, W), mode='bilinear')
loss = torch.sqrt(torch.mean(torch.abs(10.*real-10.*fake) ** 2))
return loss
class GradLoss(nn.Module):
def __init__(self):
super(GradLoss, self).__init__()
# L1 norm
def forward(self, grad_fake, grad_real):
return torch.sum(torch.mean(torch.abs(grad_real-grad_fake)))
class NormalLoss(nn.Module):
def __init__(self):
super(NormalLoss, self).__init__()
def forward(self, grad_fake, grad_real):
prod = (grad_fake[:, :, None, :] @
grad_real[:, :, :, None]).squeeze(-1).squeeze(-1)
fake_norm = torch.sqrt(torch.sum(grad_fake**2, dim=-1))
real_norm = torch.sqrt(torch.sum(grad_real**2, dim=-1))
eps=1e-7
real_norm2 = real_norm.clone()
fake_norm2 = fake_norm.clone()
real_norm2[real_norm2==0] = eps
fake_norm2[fake_norm2==0] = eps
return 1 - torch.mean(prod/(fake_norm2*real_norm2))
# def get_acc(output, target):
# # takes in two tensors to compute accuracy
# pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
# correct = pred.eq(target.data.view_as(pred)).cpu().sum()
# print("Target: ", Counter(target.data.cpu().numpy()))
# print("Pred: ", Counter(pred.cpu().numpy().flatten().tolist()))
# return float(correct)*100 / target.size(0)
def adjust_learning_rate(optimizer, decay=0.1):
"""Sets the learning rate to the initial LR decayed by 0.5 every 20 epochs"""
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(
description='Single image depth estimation')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='nyuv2', type=str)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=10, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--bs', dest='bs',
help='batch_size',
default=1, type=int)
parser.add_argument('--num_workers', dest='num_workers',
help='num_workers',
default=1, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='display interval',
default=10, type=int)
parser.add_argument('--output_dir', dest='output_dir',
help='output directory',
default='saved_models', type=str)
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="adam", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=1e-3, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
parser.add_argument('--eval_epoch', dest='eval_epoch',
help='number of epoch to evaluate',
default=2, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--start_at', dest='start_epoch',
help='epoch to start with',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# training parameters
parser.add_argument('--gamma_sup', dest='gamma_sup',
help='factor of supervised loss',
default=1., type=float)
parser.add_argument('--gamma_unsup', dest='gamma_unsup',
help='factor of unsupervised loss',
default=1., type=float)
parser.add_argument('--gamma_reg', dest='gamma_reg',
help='factor of regularization loss',
default=10., type=float)
args = parser.parse_args()
return args
def get_coords(b, h, w):
i_range = Variable(torch.arange(0, h).view(
1, h, 1).expand(b, 1, h, w)) # [B, 1, H, W]
j_range = Variable(torch.arange(0, w).view(
1, 1, w).expand(b, 1, h, w)) # [B, 1, H, W]
coords = torch.cat((j_range, i_range), dim=1)
norm = Variable(torch.Tensor([w, h]).view(1, 2, 1, 1))
coords = coords * 2. / norm - 1.
coords = coords.permute(0, 2, 3, 1)
return coords
def resize_tensor(img, coords):
return nn.functional.grid_sample(img, coords, mode='bilinear', padding_mode='zeros')
def imgrad(img):
img = torch.mean(img, 1, True)
# fx = np.array([[[1,0,-1],[1,0,-1],[1,0,-1]],[[2,0,-2],[2,0,-2],[2,0,-2]],[[1,0,-1],[1,0,-1],[1,0,-1]]])
# conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=2, bias=False)
fx = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
conv1 = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
weight = torch.from_numpy(fx).float().unsqueeze(0).unsqueeze(0)
if img.is_cuda:
weight = weight.cuda()
conv1.weight = nn.Parameter(weight)
grad_x = conv1(img)
# fy = np.array([[[1,2,1],[1,2,1],[1,2,1]],[[0,0,0],[0,0,0],[0,0,0]],[[-1,-2,-1],[-1,-2,-1],[-1,-2,-1]]])
# conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False)
fy = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
conv2 = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
weight = torch.from_numpy(fy).float().unsqueeze(0).unsqueeze(0)
if img.is_cuda:
weight = weight.cuda()
conv2.weight = nn.Parameter(weight)
grad_y = conv2(img)
# grad = torch.sqrt(torch.pow(grad_x,2) + torch.pow(grad_y,2))
return grad_y, grad_x
def imgrad_yx(img):
N, C, _, _ = img.size()
grad_y, grad_x = imgrad(img)
return torch.cat((grad_y.view(N, C, -1), grad_x.view(N, C, -1)), dim=1)
def reg_scalor(grad_yx):
return torch.exp(-torch.abs(grad_yx)/255.)
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(
self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(
self.num_per_batch).view(-1, 1) * self.batch_size
self.rand_num = rand_num.expand(
self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat(
(self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
def collate_fn(data):
imgs, depths = zip(*data)
B = len(imgs)
im_batch = torch.ones((B, 3, 376, 1242))
d_batch = torch.ones((B, 1, 376, 1242))
for ind in range(B):
im, depth = imgs[ind], depths[ind]
im_batch[ind, :, -im.shape[1]:, :im.shape[2]] = im
d_batch[ind, :, -depth.shape[1]:, :depth.shape[2]] = depth
return im_batch, d_batch
if __name__ == '__main__':
args = parse_args()
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You might want to run with --cuda")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
train_dataset = MyCustomDataset()
train_size = len(train_dataset)
eval_dataset = MyCustomDataset(train=False)
eval_size = len(eval_dataset)
print(train_size)
print(eval_size)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.bs,
shuffle=True, num_workers=args.num_workers)
# nr_of_pixels = len(train_dataset)*640*480*3
# finding max depth and ir values
# max_ir_value = 0
# max_d_value = 0
# min_ir_value = 100000
# min_d_value = 100000
# for batch in train_dataloader:
# # batch[0][0][0][batch[0][0][0]!=0].min()
# # max depth value
# if batch[0][0][1].max() > max_d_value:
# max_d_value = batch[0][0][1].max()
# # max ir value
# if batch[0][0][0].max() > max_ir_value:
# max_ir_value = batch[0][0][0].max()
# # min depth value
# if batch[0][0][1][batch[0][0][1] != 0].min() < min_d_value:
# min_d_value = batch[0][0][1][batch[0][0][1] != 0].min()
# # min ir value
# if batch[0][0][0][batch[0][0][0] != 0].min() < min_ir_value:
# min_ir_value = batch[0][0][0][batch[0][0][0] != 0].min()
# print("max_d_value: ", max_d_value, "min_d_value",min_d_value)
# print("max_ir_value: ", max_ir_value, "min_ir_value",min_ir_value)
# depth_test = cv2.imread("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/combined_ir_d_d_debug/train/image.png",-1)
# min_d_value = 100000
# max_d_value = 0
# mean = total_sum / nr_of_pixels
# sum_of_squared_error = 0
# for batch in train_dataloader:
# sum_of_squared_error += ((batch[0] - mean).pow(2)).sum()
# std = torch.sqrt(sum_of_squared_error / nr_of_pixels)
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.bs,
shuffle=True, num_workers=args.num_workers) #maybe trying with False for shuffle, here?
# same as above but on validation set
# max_ir_value = 0
# max_d_value = 0
# min_ir_value = 100000
# min_d_value = 100000
# for batch in eval_dataloader:
# # batch[0][0][0][batch[0][0][0]!=0].min()
# # max depth value
# if batch[0][0][1].max() > max_d_value:
# max_d_value = batch[0][0][1].max()
# # max ir value
# if batch[0][0][0].max() > max_ir_value:
# max_ir_value = batch[0][0][0].max()
# # min depth value
# if batch[0][0][1][batch[0][0][1] != 0].min() < min_d_value:
# min_d_value = batch[0][0][1][batch[0][0][1] != 0].min()
# # min ir value
# if batch[0][0][0][batch[0][0][0] != 0].min() < min_ir_value:
# min_ir_value = batch[0][0][0][batch[0][0][0] != 0].min()
# print("evaluation dataset")
# print("max_d_value: ", max_d_value, "min_d_value",min_d_value)
# print("max_ir_value: ", max_ir_value, "min_ir_value",min_ir_value)
# network initialization
print('Initializing model...')
i2d = I2D(fixed_feature_weights=False)
torch.cuda.empty_cache()
if args.cuda:
i2d = i2d.cuda()
print('Done!')
# hyperparams
lr = args.lr
bs = args.bs
lr_decay_step = args.lr_decay_step
lr_decay_gamma = args.lr_decay_gamma
# params
params = []
for key, value in dict(i2d.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
DOUBLE_BIAS = 0
WEIGHT_DECAY = 4e-5
params += [{'params': [value], 'lr':lr*(DOUBLE_BIAS + 1),
'weight_decay': 4e-5 and WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr':lr, 'weight_decay': 4e-5}]
# optimizer
if args.optimizer == "adam":
optimizer = torch.optim.Adam(params, lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=4e-5)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9)
rmse = RMSE()
depth_criterion = RMSE_log()
dddDepth_criterion = DDDDepthDiff()
l1_crit = L1()
normals_diff = NormalsDiff()
grad_criterion = GradLoss()
normal_criterion = NormalLoss()
eval_metric = RMSE_log()
# resume
if args.resume:
load_name = os.path.join(args.output_dir,
'i2d_1_{}.pth'.format(args.checkepoch))
print("loading checkpoint %s" % (load_name))
state = i2d.state_dict()
checkpoint = torch.load(load_name)
args.start_epoch = checkpoint['epoch']
checkpoint = {k: v for k,
v in checkpoint['model'].items() if k in state}
state.update(checkpoint)
i2d.load_state_dict(state)
# optimizer.load_state_dict(checkpoint['optimizer'])
# lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
del checkpoint
torch.cuda.empty_cache()
# constants
iters_per_epoch = int(train_size / args.bs)
grad_factor = 10.
normal_factor = 1.
# max_depth = 6571
max_depth = 11000
#for visualizing the train and validation loss
train_loss_arr = []
val_loss_arr = []
for epoch in range(args.start_epoch, args.max_epochs):
train_loss = 0
val_loss = 0
# setting to train mode
i2d.train()
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
img = Variable(torch.FloatTensor(1))
z = Variable(torch.FloatTensor(1))
if args.cuda:
img = img.cuda()
z = z.cuda()
train_data_iter = iter(train_dataloader)
show_image = True
# saving results in a txt file
save_dir = '/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/multiP_training_data/main_multiP/training_process/'
# with torch.profiler.profile(
# schedule=torch.profiler.schedule(
# wait=2,
# warmup=2,
# active=6,
# repeat=1),
# on_trace_ready=tensorboard_trace_handler,
# with_stack=True
# ) as profiler:
for step in range(iters_per_epoch):
start = time.time()
data = train_data_iter.next()
img.resize_(data[0].size()).copy_(data[0])#*max_depth)
z.resize_(data[1].size()).copy_(data[1])#*max_depth)
# max_depth = data[1].max()
optimizer.zero_grad()
z_fake = i2d(img)#*max_depth # * 6000 #z.max()
# depth_loss = depth_criterion(z_fake, z)
delta, dddDepth_loss = dddDepth_criterion(z_fake,z,epoch,show_image)#*max_depth,z*max_depth)
# dddDepth_loss = dddDepth_criterion(z_fake,z)
# grad_real, grad_fake = imgrad_yx(z), imgrad_yx(z_fake)
# if epoch > 3:
# grad_loss = grad_criterion(grad_fake, grad_real) * grad_factor * (epoch > 3)
# else:
# grad_loss = 0
# if epoch > 7:
# normals_diff_loss = normals_diff(z_fake*max_depth,z*max_depth) * (epoch > 7)
# # normal_loss = normal_criterion(grad_fake, grad_real) * normal_factor * (epoch > 7)
# else:
# normals_diff_loss = 0
# # normal_loss = 0
# loss = 10*(depth_loss + 0.01*grad_loss) + normals_diff_loss #+ normal_loss
# loss = depth_loss + grad_loss + normal_loss
# depth_loss_arr.append(depth_loss)
# dddDepth_loss_arr.append(dddDepth_loss)
torch.autograd.set_detect_anomaly(True)
# if delta > 0.193 and epoch > 6:
# loss = (10*dddDepth_loss)**2
# else:
# loss = 10*dddDepth_loss #depth_loss + 10*dddDepth_loss - depth_loss #+ normal_loss
loss = 1*dddDepth_loss
# loss *= 10
loss.backward()
optimizer.step()
writer.add_scalar("Loss/train",loss,step)
train_loss += loss.item()
end = time.time()
if show_image:
# for i in range(img.shape[0]):
# plt.imshow(np.transpose(imgs[i], (1, 2, 0)))
# plt.show()
# save_image(img[0], save_dir+'depthirPIL_'+str(epoch)+'.png')
# plt.imshow(img[0].cpu().numpy().transpose((1,2,0)))
# plt.savefig(save_dir +'depthir_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
# rgbArray = np.zeros((len(img[0][1]),len(img[0][1][1]),3), 'uint16')
o3d_pcd = o3d.geometry.PointCloud()
##############################
#####save input cloud#########
input_img = img[0][2].cpu().numpy()
rgbArray = input_img*max_depth # np.array(img[0].cpu()*max_depth,np.uint16).transpose((1,2,0))
plt.imshow(rgbArray, vmin=0, vmax=max_depth)
plt.colorbar()
plt.savefig(save_dir+'depthirCV_'+str(epoch)+'.png', bbox_inches='tight')
plt.close()
# cv2.imwrite(save_dir+'depthirCV_'+str(epoch)+'.png',rgbArray)
input_depth = img.clone()
input_pcd = dddDepth_criterion.point_cloud(input_depth[0]).cpu().detach().numpy()
o3d_pcd.points = o3d.utility.Vector3dVector(input_pcd*max_depth)
o3d.io.write_point_cloud(save_dir+"input_cloud"+str(epoch)+".pcd", o3d_pcd)
# a = cv2.imread(save_dir+'depthirCV_'+str(epoch)+'.png', cv2.IMREAD_UNCHANGED)
# vmin, vmax = 0, 10000/65536.
####################
#depth ground truth#
plt.imshow(z[0].cpu().numpy().transpose((1,2,0))*max_depth, vmin=0, vmax=max_depth)
plt.colorbar()
plt.savefig(save_dir +'gt_'+str(epoch)+'.png',bbox_inches='tight')
plt.close()
# plt.imshow(z[0].cpu().numpy().transpose((1,2,0)))#, vmin=vmin, vmax=vmax)
# plt.colorbar()
# plt.savefig(save_dir +'unscaled_gt_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
z_pcd = dddDepth_criterion.point_cloud(z[0]).cpu().detach().numpy()
o3d_pcd.points = o3d.utility.Vector3dVector(z_pcd*max_depth)
o3d.io.write_point_cloud(save_dir+"gt_cloud"+str(epoch)+".pcd", o3d_pcd)
##################
#depth prediction#
plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0))*max_depth, vmin=0, vmax=max_depth)
plt.colorbar()
plt.savefig(save_dir +'pred_'+str(epoch)+'.png',bbox_inches='tight')
plt.close()
# plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0)))#, vmin=vmin, vmax=vmax)
# plt.colorbar()
# plt.savefig(save_dir +'unscaled_pred_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
z_fake_pcd = dddDepth_criterion.point_cloud(z_fake[0]).cpu().detach().numpy()
o3d_pcd.points = o3d.utility.Vector3dVector(z_fake_pcd*max_depth)
o3d.io.write_point_cloud(save_dir+"pred_cloud"+str(epoch)+".pcd", o3d_pcd)
##############
# txt images #
# img_file = open(save_dir+'gt_'+str(epoch)+'.txt',"w")
# for row in z[0].cpu().numpy():
# np.savetxt(img_file,row)
# img_file.close()
# # save_image(z_fake[0], save_dir+'predPIL_'+str(epoch)+'.png')
# img_file = open(save_dir+'pred_'+str(epoch)+'.txt',"w")
# for row in z_fake[0].cpu().detach().numpy():
# np.savetxt(img_file,row)
# img_file.close()
#### save difference ####
plt.imshow(np.abs(z[0].cpu().numpy().transpose((1,2,0)) - z_fake[0].cpu().detach().numpy().transpose((1,2,0)))*max_depth)
plt.colorbar()
plt.savefig(save_dir+'diff_'+str(epoch)+'.png', bbox_inches='tight')
plt.close()
pred_img = z_fake[0][0].cpu().detach().numpy()
plt.imshow(np.abs(input_img - pred_img)*max_depth)
plt.colorbar()
plt.savefig(save_dir+'input_pred_diff_'+str(epoch)+'.png', bbox_inches='tight')
plt.close()
# z_diff = dddDepth_criterion.point_cloud(torch.abs(z[0]-z_fake[0])).cpu().detach().numpy()
# o3d_pcd.points = o3d.utility.Vector3dVector(z_diff*max_depth)
# o3d.io.write_point_cloud(save_dir+"diff_cloud"+str(epoch)+".pcd", o3d_pcd)
show_image=False
# info
if step % args.disp_interval == 0:
# file_object = open("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/results.txt", 'a')
print("[epoch %2d][iter %4d] loss: %.4f 3DDepthLoss: %.4f RMSE: %.4f lossX: %.4f lossY: %.4f lossZ: %.4f"#RMSElog: %.4f Grad: %.4f Normals diff: %.4f"
% (epoch, step, loss, dddDepth_loss, delta[0], delta[1], delta[2], delta[3]))#depth_loss, grad_loss, normals_diff_loss))
# print("[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f Grad: %.4f Normals loss: %.4f"
# % (epoch, step, loss, depth_loss, grad_loss, normal_loss))
# print("[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f"
# % (epoch, step, loss, depth_loss))
# file_object.write("\n[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f" #grad_loss: %.4f" # normal_loss: %.4f"
# % (epoch, step, loss, depth_loss))#, grad_loss))#, normal_loss))
# file_object.close()
# print("[epoch %2d][iter %4d] loss: %.4f iRMSE: %.4f" \
# % (epoch, step, loss, metric))
# save model
# plt.plot(depth_loss_arr,'g',dddDepth_loss_arr,'r')
# plt.savefig(save_dir +'train_loss_'+str(epoch)+'.png',bbox_inches='tight')
# plt.close()
if epoch%4 == 0:
save_name = os.path.join(args.output_dir, 'i2d_{}_{}.pth'.format(args.session, epoch))
torch.save({'epoch': epoch+1,
'model': i2d.state_dict(),
# 'optimizer': optimizer.state_dict(),
},
save_name)
print('save model: {}'.format(save_name))
print('time elapsed: %fs' % (end - start))
# if epoch % 1 == 0:
with torch.no_grad():
# setting to eval mode
i2d.eval()
# img = Variable(torch.FloatTensor(1), volatile=True)
# img = Variable(torch.FloatTensor(1),requires_grad=False)
img = Variable(torch.FloatTensor(1))
# z = Variable(torch.FloatTensor(1), volatile=True)
# z = Variable(torch.FloatTensor(1), requires_grad=False)
z = Variable(torch.FloatTensor(1))
if args.cuda:
img = img.cuda()
z = z.cuda()
print('evaluating...')
rmse_accum = 0
count = 0
eval_data_iter = iter(eval_dataloader)
for i, data_eval in enumerate(eval_data_iter):
print(i, '/', len(eval_data_iter)-1)
img.resize_(data_eval[0].size()).copy_(data_eval[0])
z.resize_(data_eval[1].size()).copy_(data_eval[1])
z_fake = i2d(img)
depth_loss_eval = depth_criterion(z_fake,z)
grad_real, grad_fake = imgrad_yx(z), imgrad_yx(z_fake)
delta_val, dddDepth_loss_eval = dddDepth_criterion(z_fake,z,epoch,show_image)
# dddDepth_loss_eval = dddDepth_criterion(z_fake,z)
# if epoch > 3:
# grad_loss_eval = grad_criterion(grad_fake, grad_real) * grad_factor #* (epoch > 3)
# else:
# grad_loss_eval = 0
# if epoch > 7:
# normals_diff_loss_eval = normals_diff(z_fake*max_depth,z*max_depth) #* (epoch > 7)
# # normal_loss_eval = normal_criterion(grad_fake, grad_real) * normal_factor * (epoch > 7)
# else:
# normals_diff_loss_eval = 0
# normal_loss_eval = 0
# loss_val = 10*(depth_loss_eval + 0.01*grad_loss_eval) + normals_diff_loss_eval
# loss_val = depth_loss_eval + grad_loss_eval + normal_loss_eval
# loss_val *= 10
# if delta_val > 0.193 and epoch > 7:
# loss = (10*dddDepth_loss_eval)**2
# else:
# loss_val = 10*dddDepth_loss_eval #depth_loss_eval + 10*dddDepth_loss_eval - depth_loss_eval
loss_val = 1*dddDepth_loss_eval
writer.add_scalar("Loss/validation",loss_val,i)
val_loss += loss_val.item()
# print("Loss on test_data: ",loss_eval)
if i==337:
plt.imshow(z[0].cpu().numpy().transpose((1,2,0))*max_depth)#, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.savefig('/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/multiP_training_data/main_multiP/val_vis_images/gt_'+str(epoch)+'.png',bbox_inches='tight')
plt.close()
plt.imshow(z_fake[0].cpu().detach().numpy().transpose((1,2,0))*max_depth)#, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.savefig('/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/multiP_training_data/main_multiP/val_vis_images/pred_'+str(epoch)+'.png',bbox_inches='tight')
plt.close()
# save_image(z_fake[0],'/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/dataset/training_data/training_data/vis_images/depth_pred_'+str(epoch)+'_'+'.png')
# depth_loss = float(img.size(0)) * rmse(z_fake, z)**2
# eval_loss += depth_loss
# rmse_accum += float(img.size(0)) * eval_metric(z_fake, z)**2
# count += float(img.size(0))
train_loss = train_loss/iters_per_epoch #len(train_dataloader)
val_loss = val_loss/len(eval_dataloader)
train_loss_arr.append(train_loss)
val_loss_arr.append(val_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, val_loss))
file_object = open("/home/marian/calibration_ws/monodepth-FPN/MonoDepth-FPN-PyTorch/results.txt", 'a')
# print("[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f"# grad_loss: %.4f"# normal_loss: %.4f"
# % (epoch, step, loss, depth_loss))#, grad_loss))#, normal_loss))
# print("[epoch %2d][iter %4d] loss: %.4f RMSElog: %.4f"
# % (epoch, step, loss, depth_loss))
file_object.write('\nEpoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, val_loss)) #grad_loss: %.4f" # normal_loss: %.4f"
# % (epoch, step, loss, depth_loss))#, grad_loss))#, normal_loss))
file_object.close()
writer.flush()
# print("[epoch %2d] RMSE_log: %.4f RMSE: %.4f"
# % (epoch, torch.sqrt(eval_loss/count), torch.sqrt(rmse_accum/count)))
# with open('val.txt', 'a') as f:
# f.write("[epoch %2d] RMSE_log: %.4f RMSE: %.4f\n"
# % (epoch, torch.sqrt(eval_loss/count), torch.sqrt(rmse_accum/count)))
# plt.plot(train_loss_arr,'g',val_loss_arr,'r')
# plt.legend((train_loss_arr, val_loss_arr),('training loss', 'validation loss'))
# plt.savefig(save_dir +'t75losses'+'.png',bbox_inches='tight')
# plt.close()
writer.close()
epochs = range(args.start_epoch, args.max_epochs)
plt.plot(epochs, train_loss_arr, '-g', label='Training loss')
plt.plot(epochs, val_loss_arr, 'b', label='Validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig(save_dir+"t95_losses.png")
plt.close() | 1.953125 | 2 |
ita/__init__.py | Tezar/Assigment-generator | 0 | 12766538 | import os
MODULE_PATH = os.path.dirname(__file__)
VERBOSE = True
from ita import loader_file
from ita import parser
from ita import generator
FileLoader = loader_file.FileLoader
Loader = loader_file.FileLoader
Parser = parser.Parser
Generator = generator.Generator
GeneratorException = generator.GeneratorException
__all__ = ["VERBOSE", "MODULE_PATH", "web", "cli", "Generator", "Parser", "Loader", "FileLoader", "GeneratorException"] | 1.851563 | 2 |
cisco_exec_by_ssh.py | sweetcolor/cisco-exec-by-ssh | 1 | 12766539 | import paramiko
import sys
import os
import time
import re
class CiscoExecBySSH:
def __init__(self):
self.input_file = self.get_input_file()
self.login_params = {'host': '', 'user': '', 'password': '', 'port': 22}
self.get_login_info()
self.client = self.login()
def get_login_info(self):
host, user, password = self.input_file.readline().strip().split(',')
self.login_params['host'] = host
self.login_params['user'] = user
self.login_params['password'] = password
def login(self):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=self.login_params['host'], username=self.login_params['user'],
password=self.login_params['password'], port=self.login_params['port'],
look_for_keys=False, allow_agent=False)
return client
def exec_commands(self):
buffer = 2048
delay = 1
channel = self.client.invoke_shell()
self._print_channel_output(channel.recv(buffer))
channel.send(self.login_params['user'] + '\n')
channel.send(self.login_params['password'] + '\n')
self._print_channel_output(channel.recv(buffer))
for command in self.input_file:
channel.send(command)
time.sleep(delay)
output = channel.recv(buffer)
if re.search('\(y/n\)', output.decode(), re.IGNORECASE):
channel.send('y')
time.sleep(delay)
self._print_channel_output(output)
output = channel.recv(buffer)
self._print_channel_output(output)
time.sleep(delay)
channel.close()
self.client.close()
@staticmethod
def _print_channel_output(output):
print(output.decode(), end='')
@staticmethod
def get_input_file():
try:
input_file_path = sys.argv[1]
if not os.path.isfile(input_file_path):
print('No such file {}'.format(input_file_path))
sys.exit(1)
return open(input_file_path)
except IndexError:
print('Please enter input file')
print('python3 <scrip name>.py <input file>')
sys.exit(1)
CiscoExecBySSH().exec_commands()
| 2.5 | 2 |
mastermind_django_files/front_end/admin.py | chodges7/mastermind-capstone | 0 | 12766540 | <filename>mastermind_django_files/front_end/admin.py
from django.contrib import admin
from .models import Games, Stats
admin.site.register(Games)
admin.site.register(Stats)
| 1.320313 | 1 |
tests/integration/workflows/nodejs_npm_esbuild/test_nodejs_npm_with_esbuild.py | awslabs/aws-lambda-builders | 180 | 12766541 | <filename>tests/integration/workflows/nodejs_npm_esbuild/test_nodejs_npm_with_esbuild.py<gh_stars>100-1000
import os
import shutil
import tempfile
from unittest import TestCase
from aws_lambda_builders.builder import LambdaBuilder
from aws_lambda_builders.exceptions import WorkflowFailedError
from aws_lambda_builders.workflows.nodejs_npm.npm import SubprocessNpm
from aws_lambda_builders.workflows.nodejs_npm.utils import OSUtils
from aws_lambda_builders.workflows.nodejs_npm_esbuild.esbuild import EsbuildExecutionError
from aws_lambda_builders.workflows.nodejs_npm_esbuild.utils import EXPERIMENTAL_FLAG_ESBUILD
from parameterized import parameterized
class TestNodejsNpmWorkflowWithEsbuild(TestCase):
"""
Verifies that `nodejs_npm` workflow works by building a Lambda using NPM
"""
TEST_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "testdata")
def setUp(self):
self.artifacts_dir = tempfile.mkdtemp()
self.scratch_dir = tempfile.mkdtemp()
self.dependencies_dir = tempfile.mkdtemp()
self.no_deps = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild")
self.builder = LambdaBuilder(language="nodejs", dependency_manager="npm-esbuild", application_framework=None)
def tearDown(self):
shutil.rmtree(self.artifacts_dir)
shutil.rmtree(self.scratch_dir)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_doesnt_build_without_feature_flag(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
with self.assertRaises(EsbuildExecutionError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
)
self.assertEqual(str(context.exception), "Esbuild Failed: Feature flag must be enabled to use this workflow")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_multiple_entrypoints(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-multiple-entrypoints")
options = {"entry_points": ["included.js", "included2.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map", "included2.js", "included2.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_typescript_projects(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-typescript")
options = {"entry_points": ["included.ts"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_with_external_esbuild(self, runtime):
osutils = OSUtils()
npm = SubprocessNpm(osutils)
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild")
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
executable_search_paths=[binpath],
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_no_options_passed_to_esbuild(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild")
with self.assertRaises(WorkflowFailedError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
self.assertEqual(str(context.exception), "NodejsNpmEsbuildBuilder:EsbuildBundle - entry_points not set ({})")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_bundle_with_implicit_file_types(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "implicit-file-types")
options = {"entry_points": ["included", "implicit"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "implicit.js.map", "implicit.js", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_bundles_project_without_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-package-esbuild")
options = {"entry_points": ["included"]}
osutils = OSUtils()
npm = SubprocessNpm(osutils)
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
executable_search_paths=[binpath],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_without_download_dependencies_with_dependencies_dir(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
osutils = OSUtils()
npm = SubprocessNpm(osutils)
esbuild_dir = os.path.join(self.TEST_DATA_FOLDER, "esbuild-binary")
npm.run(["ci"], cwd=esbuild_dir)
binpath = npm.run(["bin"], cwd=esbuild_dir)
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
options=options,
runtime=runtime,
dependencies_dir=self.dependencies_dir,
download_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
executable_search_paths=[binpath],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_with_download_dependencies_and_dependencies_dir(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
dependencies_dir=self.dependencies_dir,
download_dependencies=True,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
expected_modules = "minimal-request-promise"
output_modules = set(os.listdir(os.path.join(self.dependencies_dir, "node_modules")))
self.assertIn(expected_modules, output_modules)
expected_dependencies_files = {"node_modules"}
output_dependencies_files = set(os.listdir(os.path.join(self.dependencies_dir)))
self.assertNotIn(expected_dependencies_files, output_dependencies_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_with_remote_dependencies_without_download_dependencies_without_dependencies_dir(
self, runtime
):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
with self.assertRaises(EsbuildExecutionError) as context:
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
dependencies_dir=None,
download_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
self.assertEqual(str(context.exception), "Esbuild Failed: Lambda Builders encountered and invalid workflow")
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_project_without_combine_dependencies(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-no-node_modules")
options = {"entry_points": ["included.js"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
dependencies_dir=self.dependencies_dir,
download_dependencies=True,
combine_dependencies=False,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js.map", "included.js"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
expected_modules = "minimal-request-promise"
output_modules = set(os.listdir(os.path.join(self.dependencies_dir, "node_modules")))
self.assertIn(expected_modules, output_modules)
expected_dependencies_files = {"node_modules"}
output_dependencies_files = set(os.listdir(os.path.join(self.dependencies_dir)))
self.assertNotIn(expected_dependencies_files, output_dependencies_files)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_external(self, runtime):
source_dir = os.path.join(self.TEST_DATA_FOLDER, "with-deps-esbuild-externals")
options = {"entry_points": ["included.js"], "external": ["minimal-request-promise"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
with open(str(os.path.join(self.artifacts_dir, "included.js"))) as f:
js_file = f.read()
# Check that the module has been require() instead of bundled
self.assertIn('require("minimal-request-promise")', js_file)
@parameterized.expand([("nodejs12.x",), ("nodejs14.x",), ("nodejs16.x",)])
def test_builds_javascript_project_with_loader(self, runtime):
osutils = OSUtils()
source_dir = os.path.join(self.TEST_DATA_FOLDER, "no-deps-esbuild-loader")
options = {"entry_points": ["included.js"], "loader": [".reference=json"]}
self.builder.build(
source_dir,
self.artifacts_dir,
self.scratch_dir,
os.path.join(source_dir, "package.json"),
runtime=runtime,
options=options,
experimental_flags=[EXPERIMENTAL_FLAG_ESBUILD],
)
expected_files = {"included.js", "included.js.map"}
output_files = set(os.listdir(self.artifacts_dir))
self.assertEqual(expected_files, output_files)
included_js_path = os.path.join(self.artifacts_dir, "included.js")
# check that the .reference file is correctly bundled as code by running the result
self.assertEqual(
osutils.check_output(included_js_path),
str.encode(
"===\n"
"The Muses\n"
"===\n"
"\n"
"\tcalliope: eloquence and heroic poetry\n"
"\terato: lyric or erotic poetry\n"
"\tmelpomene: tragedy\n"
"\tpolymnia: sacred poetry\n"
"\tterpsichore: dance\n"
"\tthalia: comedy\n"
"\turania: astronomy and astrology"
),
)
| 2.15625 | 2 |
converter.py | josuemartinezsv/joitek-video2text-converter | 0 | 12766542 | """
Copyright 2021 <NAME>
"""
from pathlib import Path
from tempfile import TemporaryDirectory
from textwrap import TextWrapper
from time import sleep
from typing import Optional
import typer
from moviepy.video.io.VideoFileClip import VideoFileClip
from speech_recognition import Recognizer, AudioFile, UnknownValueError, RequestError
def clear_console():
typer.clear()
def video_to_text_converter(path_of_video: Optional[Path], retries: Optional[int],
transcription_file_dir_path: Optional[Path],
transcription_file_name: Optional[str], skip_bad_chunks: Optional[bool],
abort_on_bad_chunk: Optional[bool], language: Optional[str]) -> None:
with TemporaryDirectory(
prefix="Joitek_Video_To_Text_Of_Video_",
suffix=f"{path_of_video.stem.capitalize()}") as temp_folder_path:
clear_console()
typer.echo("Determining the total minutes that video has...")
with VideoFileClip(path_of_video.__str__()) as original_videoFC:
video_seconds_duration = int(float(original_videoFC.duration))
if video_seconds_duration <= 60:
minutes_on_seconds_range = list(range(0, video_seconds_duration + 1, video_seconds_duration))
else:
minutes_on_seconds_range = list(range(0, video_seconds_duration + 1, 60))
length_of_list = len(minutes_on_seconds_range)
sleep(2)
clear_console()
extracted_words_container = {}
text_wrapper = TextWrapper()
text_wrapper.width = 80
try:
for i in range(length_of_list - 1):
typer.echo(f"Video will be divided in: {length_of_list - 1} chunks...")
typer.echo("Original video is not affected...")
typer.echo()
temp_audio_start_time = (minutes_on_seconds_range[i] - 1 * (minutes_on_seconds_range[i] != 0))
temp_audio_end_time = minutes_on_seconds_range[i + 1]
name_temp_audio = f"tempA-{i + 1}.wav"
dic_key = f"text-{i + 1}"
temp_audio_abs_path = Path(temp_folder_path, name_temp_audio).__str__()
audio_sub = original_videoFC.audio.subclip(temp_audio_start_time, temp_audio_end_time)
audio_sub.write_audiofile(temp_audio_abs_path, logger=None)
attempt_number = 1
exit_loop = False
text_to_save = ""
while not exit_loop:
typer.echo(f"Chunk {(i + 1)} of {length_of_list - 1}")
typer.echo()
typer.echo(f"Attempt {attempt_number} of {retries}")
typer.echo("Please wait...")
r = Recognizer()
try:
audio = AudioFile(temp_audio_abs_path)
with audio as source:
r.adjust_for_ambient_noise(source)
audio_file = r.record(source)
result = r.recognize_google(audio_file, language=language)
typer.echo("Extraction success...")
text_to_save = text_wrapper.fill(result)
exit_loop = True
except UnknownValueError:
typer.echo("Can't understand this chunk...")
except RequestError:
typer.echo("Text extractor failure ")
typer.echo("or there is no active internet connection...")
typer.echo()
if exit_loop:
typer.echo("Saving results...")
clear_console()
else:
if abort_on_bad_chunk:
typer.echo("Bad chunk found.\n Aborting conversion...")
extracted_words_container[dic_key] = text_to_save
exit_loop = True
else:
if skip_bad_chunks:
typer.echo("Bad fragment...")
typer.echo("Ignoring...")
actual_seconds_range = f"[{temp_audio_start_time}-{temp_audio_end_time}]"
text_to_save = f"Bad fragment {actual_seconds_range}"
extracted_words_container[dic_key] = text_to_save
exit_loop = True
else:
if attempt_number <= retries:
attempt_number += 1
else:
typer.echo("Retry limit reached...")
actual_seconds_range = f"[{temp_audio_start_time}-{temp_audio_end_time}]"
text_to_save = f"Retry limit reached for chunk on {actual_seconds_range}"
extracted_words_container[dic_key] = text_to_save
exit_loop = True
if abort_on_bad_chunk:
break
else:
continue
typer.echo("Retrieving previously saved results...")
list_with_all_text = []
for k, v in extracted_words_container.items():
list_with_all_text.append(v)
separator = "\n\n========\n\n"
text = separator.join(list_with_all_text)
clear_console()
typer.echo("Saving...")
file = Path(transcription_file_dir_path, transcription_file_name)
file.write_text(text)
typer.echo()
typer.echo("Text extraction complete!")
typer.echo()
input("Press ENTER to exit...")
except Exception as ex:
typer.echo('\n')
typer.echo("A very serious error occurred\n"
"Can't continue...")
typer.echo(ex)
typer.echo(ex.__cause__)
| 2.734375 | 3 |
account/models.py | AhteshamSid/College_school_management_system | 0 | 12766543 | from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=45, blank=True)
photo = models.ImageField(default="man.png", upload_to='admin/', null=True, blank=True)
gender_select = (
('male', 'Male'),
('female', 'Female')
)
gender = models.CharField(choices=gender_select, max_length=6, blank=True)
employee_select = (
('admin', 'Admin'),
('professor', 'Professor'),
('teacher', 'Teacher'),
('register', 'Register'),
('student', 'Student'),
)
employee_type = models.CharField(choices=employee_select, max_length=15, blank=True)
def __str__(self):
return str(self.user)
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
instance.userprofile.save() | 2.125 | 2 |
samples/python/Wilson/image-net_backup/work.py | yptheangel/Gymmy | 0 | 12766544 | #Author : <NAME>
#Last Edited : 14 June 2017
import cv2
import os
from shutil import copyfile
import numpy as np
max_num = 1800
img_folder = "pos_test/"
perfect_img = "pos_test/99.jpg"
def convert_images():
pic_num = 1
for pic_num in range(1,max_num+1):
try:
img = cv2.imread(img_folder+str(pic_num)+".jpg", cv2.IMREAD_GRAYSCALE)
resized_image = cv2.resize(img, (100,100))
cv2.imwrite(img_folder+str(pic_num)+".jpg",resized_image)
pic_num +=1
print "%d.jpg was processed!" %pic_num
except Exception as e:
os.remove(img_folder+str(pic_num)+".jpg")
copyfile(perfect_img,img_folder+str(pic_num)+".jpg")
print(str(e))
def find_uglies():
for file_type in ['neg_test']:
for img in os.listdir(file_type):
for ugly in os.listdir('uglies'):
try:
current_image_path = str(file_type)+'/'+str(img)
ugly = cv2.imread('uglies/'+str(ugly))
question = cv2.imread(current_image_path)
if ugly.shape == question.shape and not(np.bitwise_xor(ugly,question).any()):
print('Deleting ugly picture! ')
print(current_image_path)
os.remove(current_image_path)
copyfile(perfect_img,current_image_path)
except Exception as e:
print(str(e))
def create_pos_n_neg():
for file_type in ['neg_test']:
for img in os.listdir(file_type):
if file_type == 'pos_test':
line = file_type+'/'+img+' 1 0 0 50 50\n'
with open('info.dat','a') as f:
f.write(line)
elif file_type == 'neg_test':
line = file_type+'/'+img+'\n'
with open('bg.txt','a') as f:
f.write(line)
def make_baby():
pic_num = 919
for pic_num in range(919,1800):
try:
copyfile(perfect_img,img_folder+str(pic_num)+".jpg")
pic_num+1
except Exception as e:
print(str(e))
make_baby()
#convert_images()
#find_uglies()
create_pos_n_neg()
| 3.046875 | 3 |
draw_benchmark.py | dragonly/pingcap_interview | 25 | 12766545 | import matplotlib.pyplot as plt
with open('benchmark_local.txt', 'rt') as fd:
lines = fd.readlines()
data = {}
for line in lines:
line = line.strip()
if not line.endswith('ns/op'):
continue
split = line.split()
benchmark_name, record_len = split[0].split('-')[:2]
benchmark_name = benchmark_name.split('/')[1]
ns_per_op = int(split[2])
if benchmark_name not in data:
data[benchmark_name] = {
'record_len': [],
'ns_per_op': [],
}
data[benchmark_name]['record_len'].append(record_len)
data[benchmark_name]['ns_per_op'].append(ns_per_op)
benchmarks = sorted(list(data.keys()))
print(benchmarks)
for benchmark in benchmarks:
value = data[benchmark]
record_len = [l[:-3] + 'k' for l in value['record_len']]
ns_per_op = value['ns_per_op']
plt.plot(record_len, ns_per_op, '-o', label=benchmark)
plt.grid(True)
plt.xlabel('records number')
plt.ylabel('ns/op')
plt.yscale('log')
plt.legend()
plt.title('In-memory TopN Benchmark')
plt.show()
| 2.890625 | 3 |
wb/main/jobs/export_project/export_project_job.py | apaniukov/workbench | 23 | 12766546 | """
OpenVINO DL Workbench
Class for exporting the whole project
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import tarfile
import tempfile
from contextlib import closing
import yaml
from sqlalchemy import desc
from sqlalchemy.orm import Session
from openvino.tools.pot.version import get_version as get_pot_version
import openvino.tools.accuracy_checker.__init__ as accuracy_checker_info
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.accuracy_utils.accuracy_utils import construct_accuracy_tool_config
from wb.main.calibration_abstractions.utils import construct_calibration_tool_config
from wb.main.enumerates import JobTypesEnum, ModelSourceEnum, StatusEnum, AccuracyReportTypeEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.jobs.interfaces.job_observers import ExportProjectDBObserver
from wb.main.models import (ExportProjectJobModel, JobsModel, SingleInferenceInfoModel, DownloadableArtifactsModel,
ProjectsModel, WBInfoModel, AccuracyReportModel)
from wb.main.shared.enumerates import TaskEnum
class ExportProjectJob(IJob):
job_type = JobTypesEnum.export_project_type
_job_model_class = ExportProjectJobModel
extension = '.tar.gz'
calibration_config_name = 'calibration_config.json'
accuracy_config_name = 'accuracy_config.yml'
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
export_project_db_observer = ExportProjectDBObserver(job_id=self._job_id)
self._job_state_subject.attach(export_project_db_observer)
self._attach_default_db_and_socket_observers()
def run(self):
self._job_state_subject.update_state(status=StatusEnum.running, progress=0)
session = get_db_session_for_celery()
with closing(session):
export_project_model: ExportProjectJobModel = self.get_job_model(session)
include_model = export_project_model.include_model
include_dataset = export_project_model.include_dataset
include_accuracy_config = export_project_model.include_accuracy_config
include_calibration_config = export_project_model.include_calibration_config
components_paths = dict()
project = export_project_model.project
if include_model:
components_paths['model'] = project.topology.path
self._job_state_subject.update_state(status=StatusEnum.running, progress=10)
with tempfile.TemporaryDirectory() as temp_directory:
components_paths['description'] = self._generate_description(session, project, temp_directory)
if include_dataset:
components_paths['dataset'] = self._pack_dataset(project.dataset.path,
temp_directory,
project.dataset.name)
self._job_state_subject.update_state(status=StatusEnum.running, progress=20)
configs_folder = os.path.join(temp_directory, 'configs')
os.mkdir(configs_folder)
components_paths['configs'] = configs_folder
if include_accuracy_config:
accuracy_config_path = os.path.join(configs_folder, self.accuracy_config_name)
self._generate_accuracy_config(accuracy_config_path, export_project_model)
self._job_state_subject.update_state(status=StatusEnum.running, progress=30)
if include_calibration_config:
calibration_config_path = os.path.join(configs_folder, self.calibration_config_name)
self._generate_calibration_config(calibration_config_path, project)
self._job_state_subject.update_state(status=StatusEnum.running, progress=40)
artifact = export_project_model.shared_artifact
archive_path = artifact.build_full_artifact_path()
self._pack_project(archive_path, components_paths)
is_int8 = '_INT8' if project.topology.analysis_job.is_int8 else ''
package_name = project.topology.name + is_int8 + '_' + project.dataset.name
artifact.name = package_name
artifact.update(archive_path)
artifact.write_record(session)
self._job_state_subject.update_state(status=StatusEnum.ready, progress=100)
self._job_state_subject.detach_all_observers()
@staticmethod
def _generate_description(session: Session, project: ProjectsModel, directory: str):
best_inference_job = session.query(SingleInferenceInfoModel).filter(
JobsModel.job_type == JobTypesEnum.single_inference_type,
JobsModel.project_id == project.id,
JobsModel.progress == 100).order_by(desc(SingleInferenceInfoModel.throughput)).first()
accuracy_report: AccuracyReportModel = (
session
.query(AccuracyReportModel)
.filter_by(project_id=project.id, report_type=AccuracyReportTypeEnum.dataset_annotations)
.order_by(AccuracyReportModel.accuracy_result.desc())
.first()
)
workbench_info = session.query(WBInfoModel).first()
description = {
'Model': project.topology.name,
'Dataset': project.dataset.name,
'Device': ' | '.join((project.device.device_name, project.device.product_name)),
'Target': ' | '.join((project.target.target_type.value, project.target.host, project.target.name)),
'Optimized with INT8 Calibration': 'Yes' if project.topology.analysis_job.is_int8 else 'No',
}
if best_inference_job:
description['Corresponding latency'] = best_inference_job.latency
description['Best result FPS'] = best_inference_job.throughput
description['Best result batch configuration'] = best_inference_job.batch
description['Best result stream configuration'] = best_inference_job.nireq
if accuracy_report:
description['Accuracy'] = accuracy_report.accuracy_result
description['DL Workbench version'] = workbench_info.get_version_from_file()
description['Accuracy Checker version'] = accuracy_checker_info.__version__
description['Post-training Optimisation Tool version'] = get_pot_version()
description_path = os.path.join(directory, 'Description.txt')
with open(description_path, 'w') as description_file:
for parameter in description:
new_line = ': '.join((parameter, str(description[parameter])))
description_file.write(new_line + '\n')
return description_path
@staticmethod
def _generate_accuracy_config(accuracy_config_path: str, export_project_model: ExportProjectJobModel):
accuracy_config_dict = None
project = export_project_model.project
topology = export_project_model.project.topology
if project.accuracy:
accuracy_config = project.accuracy.raw_configuration
accuracy_config_dict = json.loads(accuracy_config)
elif topology.source == ModelSourceEnum.omz or topology.meta.task_type != TaskEnum.generic:
accuracy_config = construct_accuracy_tool_config(topology, project.dataset, project.device)
accuracy_config_dict = accuracy_config.to_dict()
if accuracy_config_dict:
with open(accuracy_config_path, 'w') as outfile:
yaml.dump(accuracy_config_dict, outfile, sort_keys=False)
@staticmethod
def _pack_dataset(dataset_path: str, temp_directory: str, dataset_name: str) -> str:
packed_dataset_folder = os.path.join(temp_directory, 'dataset')
os.mkdir(packed_dataset_folder)
dataset_full_name = f'{dataset_name}{ExportProjectJob.extension}'
packed_dataset_path = os.path.join(packed_dataset_folder, dataset_full_name)
with tarfile.open(packed_dataset_path, 'w:gz') as tar:
for file in os.listdir(dataset_path):
tar.add(os.path.join(dataset_path, file), arcname=file)
return packed_dataset_folder
@staticmethod
def _generate_calibration_config(calibration_config_path: str, project: ProjectsModel):
if project.topology.int8_job.calibration_config:
calibration_config = json.loads(project.topology.int8_job.calibration_config)
else:
calibration_config = construct_calibration_tool_config(project.topology, project.topology.int8_job).json()
with open(calibration_config_path, 'w') as out_file:
json.dump(calibration_config, out_file, indent=3)
def _pack_project(self, output_filename: str, components_paths: dict):
with tarfile.open(output_filename, 'w:gz') as tar:
progress_step = int(50 / len(components_paths))
progress = 40
for component in components_paths:
progress += progress_step
if component == 'description':
tar.add(components_paths[component],
arcname=component + os.path.splitext(components_paths[component])[1])
continue
if component == 'model':
for file in os.listdir(components_paths[component]):
if os.path.splitext(file)[1] in ('.xml', '.bin'):
tar.add(os.path.join(components_paths[component], file),
arcname=(os.path.join(component, file)))
continue
for file in os.listdir(components_paths[component]):
tar.add(os.path.join(components_paths[component], file), arcname=(os.path.join(component, file)))
self._job_state_subject.update_state(status=StatusEnum.running, progress=progress)
def on_failure(self, exception: Exception):
with closing(get_db_session_for_celery()) as session:
export_project_model: ExportProjectJobModel = self.get_job_model(session)
file_path = export_project_model.shared_artifact.build_full_artifact_path()
if file_path and os.path.isfile(file_path):
os.remove(file_path)
super().on_failure(exception)
| 1.71875 | 2 |
plugins/trivia/questions.py | wuhoodude/Bappybot | 7 | 12766547 | <gh_stars>1-10
class QuestionGenerator:
def makeQuestion(self):
return {'a':'42','q':'What is the meaning of life?'}
| 2.78125 | 3 |
pyaniml/core/result.py | FAIRChemistry/pyAnIML | 0 | 12766548 | from dataclasses import dataclass
from typing import List, Union, Optional
from pyaniml.utility.utils import SchemaBase, elements, attribute
from pyaniml.core.series import SeriesSet
from pyaniml.core.parameter import Category
@dataclass
class Result(SchemaBase):
"""Container for experiment results"""
results: List[object] = elements(
choices=(
{"name": "SeriesSet", "type": SeriesSet},
{"name": "Category", "type": Category},
),
default=list,
)
def add_result(self, result: Union[SeriesSet, Category]) -> None:
"""Adds a measurement result to the the container. Must be of any low-level AnIML type.
Args:
result (Union[SeriesSet, Category]): The quantitive measurement results.
"""
self.results.append(result)
| 2.640625 | 3 |
pets/serializers.py | fabrilopez/django_backend_irobot | 1 | 12766549 | <filename>pets/serializers.py
from rest_framework import serializers
from .models import Pet
class PetSerializer(serializers.ModelSerializer):
class Meta:
model = Pet
fields = ('id',
'name',
'age',
'exact_age') | 2.078125 | 2 |
kinoml/features/__init__.py | t-kimber/kinoml | 36 | 12766550 | <filename>kinoml/features/__init__.py<gh_stars>10-100
"""
Featurizers will always output arrays
but they will use structure-oriented methods
underneath to do it.
"""
| 1.234375 | 1 |